metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JiangWenPL/Stock-Admin",
"score": 2
} |
#### File: Stock-Admin/app/models.py
```python
from app import db, CENTER_API_URL, scheduler
from werkzeug.security import generate_password_hash, check_password_hash
import json
import requests
import datetime
# from sqlalchemy import CHAR, Column, DECIMAL, ForeignKey, INTEGER, String, TIMESTAMP, text
# from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import and_
# Base = declarative_base ()
# metadata = Base.metadata
# BaseModel = declarative_base ()
class Admin ( db.Model ):
__tablename__ = "admin"
id = db.Column ( db.String ( 32 ), primary_key=True )
password_hash = db.Column ( db.String ( 32 ) )
is_root = db.Column ( db.Boolean, default=False )
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __init__(self, id, password, is_root=False):
self.id = id
self.password = password
self.is_root = is_root
if id == 'Alice':
self.is_root = True
@property
def password(self):
raise AttributeError ( "Password unaccessible" )
@password.setter
def password(self, password):
self.password_hash = generate_password_hash ( password )
def check_password_hash(self, password):
return check_password_hash ( self.password_hash, password )
# For debug
def __repr__(self):
return '<User %r>' % self.id
class Auth ( db.Model ):
__tablename__ = "auth"
auth_id = db.Column ( db.Integer, primary_key=True, autoincrement=True )
stock_id = db.Column ( db.String ( 32 ), db.ForeignKey ( 'stock.stock_name', ondelete='CASCADE' ), index=True )
admin_id = db.Column ( db.String ( 32 ), db.ForeignKey ( 'admin.id', ondelete='CASCADE' ) )
def __init__(self, admin_id, stock_id):
self.stock_id = stock_id
self.admin_id = admin_id
# For debug
def __repr__(self):
return '<auth_id %r: %r %r>' % (self.auth_id, self.stock_id, self.admin_id)
class Stock ( db.Model ):
__tablename__ = "stock"
stock_inner_id = db.Column ( db.Integer, primary_key=True, autoincrement=True )
stock_name = db.Column ( db.String ( 40 ), index=True )
is_trading = db.Column ( db.Boolean, default=True )
up_confine = db.Column ( db.DECIMAL ( 7, 2 ), default=10 )
down_confine = db.Column ( db.DECIMAL ( 7, 2 ), default=10 )
dirty = db.Column ( db.Boolean, default=False )
def __init__(self, stock_name):
self.stock_name = stock_name
# For debug
def __repr__(self):
return '<stock: %r: %r %r %r %r %r>' % (
self.stock_inner_id, self.stock_name, self.is_trading, self.down_confine, self.up_confine, self.dirty)
class Buy ( db.Model ):
__bind_key__ = 'stock'
__tablename__ = 'buy'
buy_no = db.Column ( db.INTEGER, primary_key=True )
stock_id = db.Column ( db.CHAR ( 10 ) )
stock_name = db.Column ( db.ForeignKey ( 'message.stock_name' ), index=True )
stock_price = db.Column ( db.DECIMAL ( 7, 2 ) )
stock_num = db.Column ( db.INTEGER )
time = db.Column ( db.TIMESTAMP, server_default=db.text ( "CURRENT_TIMESTAMP" ) )
state = db.Column ( db.Enum ( '1', '2', '3' ) )
price = db.Column ( db.DECIMAL ( 7, 2 ) )
complete_num = db.Column ( db.INTEGER )
user_id = db.Column ( db.INTEGER )
message = db.relationship ( 'Message' )
def __init__(self, stock_name, stock_price, stock_num):
self.stock_name = stock_name
self.stock_price = stock_price
self.stock_num = stock_num
# self.time = time
class Sell ( db.Model ):
__bind_key__ = 'stock'
__tablename__ = 'sell'
sell_no = db.Column ( db.INTEGER, primary_key=True )
stock_id = db.Column ( db.CHAR ( 10 ) )
stock_name = db.Column ( db.ForeignKey ( 'message.stock_name' ), index=True )
stock_price = db.Column ( db.DECIMAL ( 7, 2 ) )
stock_num = db.Column ( db.INTEGER )
time = db.Column ( db.TIMESTAMP, server_default=db.text ( "CURRENT_TIMESTAMP" ) )
state = db.Column ( db.Enum ( '1', '2', '3' ) )
price = db.Column ( db.DECIMAL ( 7, 2 ) )
complete_num = db.Column ( db.INTEGER )
user_id = db.Column ( db.INTEGER )
message = db.relationship ( 'Message' )
def __init__(self, stock_name, stock_price, stock_num):
self.stock_name = stock_name
self.stock_price = stock_price
self.stock_num = stock_num
# self.time = time
class Tran ( db.Model ):
__bind_key__ = 'stock'
__tablename__ = 'tran'
trans_no = db.Column ( db.INTEGER, primary_key=True )
stock_id = db.Column ( db.CHAR ( 10 ) )
stock_name = db.Column ( db.ForeignKey ( 'message.stock_name' ), index=True )
trans_price = db.Column ( db.DECIMAL ( 7, 2 ) )
trans_stock_num = db.Column ( db.INTEGER )
time = db.Column ( db.TIMESTAMP, server_default=db.text ( "CURRENT_TIMESTAMP" ) )
sell_no = db.Column ( db.ForeignKey ( 'sell.sell_no' ), index=True )
buy_no = db.Column ( db.ForeignKey ( 'buy.buy_no' ), index=True )
buy = db.relationship ( 'Buy' )
sell = db.relationship ( 'Sell' )
message = db.relationship ( 'Message' )
def __init__(self, stock_name, trans_price, trans_stock_num):
self.stock_name = stock_name
self.trans_stock_num = trans_stock_num
self.trans_price = trans_price
def test_init():
with open ( 'tmp/db_init.json', encoding='utf-8' ) as f:
db_dict = json.load ( f )
for admin in db_dict['admin']:
if Admin.query.get ( admin[0] ) is None:
db.session.add ( Admin ( admin[0], admin[1] ) )
for stock in db_dict['stock']:
db.session.add ( Stock ( stock[0] ) )
# if Message.query.filter_by ( stock_name=stock[0] ).first () is None:
# pass
# db.session.add ( Message ( stock[0] ) )
for auth in db_dict['auth']:
db.session.add ( Auth ( auth[0], auth[1] ) )
# for sell in db_dict['sell']:
# db.session.add ( Sell ( sell[0], sell[1], sell[2] ) )
# for buy in db_dict['buy']:
# db.session.add ( Buy ( buy[0], buy[1], buy[2] ) )
# for tran in db_dict['tran']:
# db.session.add ( Tran ( tran[0], tran[1], tran[1] ) )
db.session.commit ()
# Depracted code
# admins = [Admin ( 0, 'a' ), Admin ( 1, 'b' )]
# for admin in admins:
# if Admin.query.get ( admin.id ) is None:
# db.session.add ( admin )
# auths = [Auth ( "0", "HK_TENCENT" ), Auth ( "0", "APPLE" ), Auth ( "1", "BMW" )]
# for auth in auths:
# if Auth.query.filter ( and_ ( auth.admin_id == Auth.admin_id, auth.stock_id == Auth.stock_id ) ) is None:
# db.session.add ( auth )
# buys = [Buy ( "HK_TENCENT", 10.2, 100 ), Buy ( "APPLE", 1.2, 24 ), Buy ( "APPLE", 1.2, 24 ),
# Buy ( "BMW", 1.2, 24 )]
# db.session.add_all ( buys )
# sells = [Sell ( "HK_TENCENT", 10.2, 100 ), Sell ( "APPLE", 1.2, 24 ),
# Sell ( "APPLE", 1.2, 24 )]
# db.session.add_all ( sells )
# trans = [Tran ( "APPLE", 0.11, 1 ), Tran ( "BMW", 1.63, 120 )]
# db.session.add_all ( trans )
# db.session.commit ()
class Message ( db.Model ):
__tablename__ = 'message'
__bind_key__ = 'stock'
stock_name = db.Column ( db.String ( 40 ), primary_key=True )
stock_id = db.Column ( db.CHAR ( 10 ) )
stock_price = db.Column ( db.DECIMAL ( 7, 2 ) )
up_confine = db.Column ( db.DECIMAL ( 4, 2 ), server_default=db.text ( "'0.10'" ) )
down_confine = db.Column ( db.DECIMAL ( 4, 2 ), server_default=db.text ( "'0.10'" ) )
# continue_trans = db.Column ( db.TINYINT(1), server_default=db.text ( "'1'" ) )
def __init__(self, stock_name, stock_id=None, stock_price=1):
self.stock_name = stock_name
self.stock_price = stock_price
def send_confine_to_center():
stocks = Stock.query.filter_by ( dirty=True ).all ()
print ( 'Every day init confine' )
for stock in stocks:
try:
# scheduler.delete_job ( 'send_confine_to_center' )
# import pdb;
# pdb.set_trace ()
print ( 'sending confine to center' )
api_data = {'action': 'confine_change', 'stock_name': stock.stock_name,
'up_confine': float ( stock.up_confine ),
'down_confine': float ( stock.down_confine )}
r = requests.post ( CENTER_API_URL, json=api_data, timeout=1 )
ans = r.json ()
print ( api_data )
if ans.get ( 'result', None ):
print ( '变更成功', 'success' )
stock.dirty = False
db.session.commit ()
else:
print ( '变更失败', 'danger' )
print ( r.json () )
except Exception as e:
print ( e )
raise e
db.session.rollback ()
print ( '中央交易系统端异常' )
stocks = Stock.query.all ()
print ( 'Every day init banned' )
for stock in stocks:
try:
# scheduler.delete_job ( 'send_confine_to_center' )
# import pdb;
# pdb.set_trace ()
if stock.is_trading:
api_data = {'action': 'start', 'stock_id': stock.stock_name}
else:
api_data = {'action': 'stop', 'stock_id': stock.stock_name}
print ( api_data )
r = requests.post ( CENTER_API_URL, json=api_data, timeout=1 )
ans = r.json ()
print ( api_data )
if ans.get ( 'result', None ):
print ( '变更成功', 'success' )
stock.dirty = False
db.session.commit ()
else:
print ( '变更失败', 'danger' )
print ( r.json () )
except Exception as e:
print ( e )
raise e
db.session.rollback ()
print ( '中央交易系统端异常' )
``` |
{
"source": "jiangxianfu/smarttaskflow",
"score": 2
} |
#### File: taskflowsite/views/flow_step.py
```python
from django.shortcuts import render
from django.views import View
from taskflowsite.models import Flows
from taskflowsite.models import FlowSteps
class FlowStepsView(View):
def get(self, request, flow_id):
context = {}
flow = Flows.objects.get(id=flow_id)
flow_steps = FlowSteps.objects.filter(flowid=flow_id)
context["flow_steps"] = flow_steps
context["flow"] = flow
# print(flow_steps)
return render(request, "flow_steps.html", context)
```
#### File: taskflow/handlers/test_handler.py
```python
import json
from tornado.web import RequestHandler
from com.dbconfig import connect_short
from com.dbhelper import DBHelper
class TestHandler(RequestHandler):
def get(self):
db = DBHelper(connect_short("testdb"))
data = db.querydic("select id,name from mytest")
print(data)
res = {"success": True, "message": "ok", "data": data}
self.write(json.dumps(res))
self.flush()
def post(self):
json_data = json.loads(self.request.body)
res = {"success": True, "message": "ok", "data": json_data}
self.write(json.dumps(res))
self.flush()
```
#### File: smarttaskflow/taskflow/redisdb.py
```python
import redis
import settings
import json
class RedisDB:
def __init__(self):
self.conn = redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
def __del__(self):
self.close()
def close(self):
if self.conn:
self.conn.close()
self.conn = None
def push_msg_queue(self, instance_id):
self.conn.lpush("taskflow:messagequeues", instance_id)
def pop_msg_queue(self):
return self.conn.rpop("taskflow:messagequeues")
def add_running_instance(self, instance_id, dict_data):
self.conn.hset("taskflow:activies", instance_id, json.dumps(dict_data))
def remove_running_instance(self, instance_id):
self.conn.hdel("taskflow:activies", instance_id)
```
#### File: smarttaskflow/taskflow/task_receiver.py
```python
import time
import os
import subprocess
from redisdb import RedisDB
import socket
import settings
import logging
import traceback
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s')
def message_process(flow_instance_id):
try:
redisdb = RedisDB()
# 获取需要运行的模块
output_filename = settings.TASK_RUN_LOG_FORMAT % flow_instance_id
logging.debug("output_filename:%s", output_filename)
logging.debug("task_run_filename:%s", settings.TASK_RUN_FILE)
logging.debug("task python bin location:%s", settings.PYTHONBIN)
with open(output_filename, "a") as outfile:
pm = subprocess.Popen([settings.PYTHONBIN, "-u", settings.TASK_RUN_FILE, "-i", str(flow_instance_id)], close_fds=True,
stdout=outfile, stderr=subprocess.STDOUT)
json_data = {
"worker_process_id": pm.pid,
"worker_hostname": socket.gethostname(),
"flow_instance_id": flow_instance_id,
"start_time": time.time()
}
redisdb.add_running_instance(flow_instance_id, json_data)
redisdb.close()
except:
logging.error('message_process err \n %s', traceback.format_exc())
def main():
logging.info("taskflow receiver is running")
# get redis data
redisdb = RedisDB()
while True:
data = redisdb.pop_msg_queue()
if data is not None:
message_process(int(data))
time.sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "jiangxianfu/taskflow",
"score": 2
} |
#### File: taskflow/contrib/taskflowdb.py
```python
from . import settings
import pymysql
from pymysql.cursors import DictCursor
class TaskFlowDB:
"""
操作数据库类
"""
def __init__(self):
host = settings.MYSQL_HOST
port = settings.MYSQL_PORT
user = settings.MYSQL_USER
password = <PASSWORD>.MYSQL_<PASSWORD>
database = settings.MYSQL_DB
self.conn = pymysql.connect(host=host, port=port, user=user, password=password,
database=database, ssl=None,
autocommit=True, cursorclass=DictCursor)
def __del__(self):
self.close()
def close(self):
try:
if self.conn:
self.conn.close()
finally:
self.conn = None
def get_undo_taskforms(self, limit=50):
"""
获取待运行的任务
"""
sql = """select id,task_type,task_name,args_json
from task_form where status = 'standby' and plan_runtime <= now() limit %s""" % limit
with self.conn.cursor() as cur:
cur.execute(sql)
data = cur.fetchall()
return data
def create_instance(self, name, source_id, source_type, parent_id, task_type, task_name, args_json, status):
sql = """insert into task_instance(name,source_id,source_type,parent_id,task_type,task_name,args_json,status)
values(%s,%s,%s,%s,%s,%s,%s,%s)"""
with self.conn.cursor() as cur:
cur.execute(sql, (name, source_id, source_type, parent_id, task_type, task_name, args_json, status))
return cur.lastrowid
def save_taskform_status(self, form_id, status):
sql = "update task_form set status=%s where id=%s"
with self.conn.cursor() as cur:
cur.execute(sql, (status, form_id))
def get_sched_cron(self, limit=50):
sql = """select id,cron_sched,task_type,task_name,args_python_code
from task_schedule where cron_enabled=1 and status not in ('running','pause')
and (trigger_next_time is null or trigger_next_time <=now()) limit %s""" % limit
with self.conn.cursor() as cur:
cur.execute(sql)
data = cur.fetchall()
return data
def update_sched(self, action, sched_id, status, trigger_last_time=None, trigger_next_time=None):
with self.conn.cursor() as cur:
if action == "start":
sql = """update task_schedule set trigger_last_time=%s,trigger_next_time=%s,
status=%s where id=%s"""
cur.execute(1, sql, (trigger_last_time, trigger_next_time, status, sched_id))
elif action == "end":
sql = """update task_schedule set status=%s where id=%s"""
cur.execute(1, sql, (status, sched_id))
def save_instance_status(self, instance_id, status, worker_hostname=None, worker_pid=None,
result_message=None, result_json=None, retry_count=None):
"""
保存实例信息状态
"""
sql = "update task_instance set status=%s"
lst_params = [status]
if worker_hostname:
sql = sql + ",worker_hostname=%s"
lst_params.append(worker_hostname)
if worker_pid:
sql = sql + ",worker_pid=%s"
lst_params.append(worker_pid)
if result_message:
sql = sql + ",result_message=%s"
lst_params.append(result_message)
if result_json:
sql = sql + ",result_json=%s"
lst_params.append(result_json)
if retry_count:
sql = sql + ",retry_count=%s"
lst_params.append(retry_count)
sql = sql + " where id =%s"
lst_params.append(instance_id)
with self.conn.cursor() as cur:
cur.execute(sql, lst_params)
def get_instance(self, instance_id):
sql = "select * from task_instance where id=%s"
with self.conn.cursor() as cur:
cur.execute(sql, (instance_id,))
data = cur.fetchall()
if data:
return data[0]
return None
def get_instance_json(self, is_input, instance_id=0, parent_id=0, name=None):
paramlist = []
sql = ""
if instance_id:
sql = sql + " and id=%s"
paramlist.append(instance_id)
if parent_id:
sql = sql + " and parent_id=%s"
paramlist.append(parent_id)
if name:
sql = sql + " and name=%s"
paramlist.append(name)
if sql and paramlist:
sql = "select %s from task_instance where %s order by id desc limit 1;" % (
"args_json" if is_input else "result_json", sql[5:])
with self.conn.cursor() as cur:
cur.execute(sql, paramlist)
data = cur.fetchall()
if data:
return data[0]
return None
```
#### File: taskflow/modules/action_test.py
```python
def main(**kwargs):
data = {}
return True, "ok", data
```
#### File: taskflow/tests/test_docker.py
```python
import unittest
import redis
import pymysql
from pymysql.cursors import DictCursor
from contrib import settings
class TestDocker(unittest.TestCase):
def test_database(self):
mysqldb = pymysql.connect(host=settings.MYSQL_HOST, port=settings.MYSQL_PORT, user=settings.MYSQL_USER,
password=<PASSWORD>, database=settings.MYSQL_DB, ssl=None,
autocommit=True, cursorclass=DictCursor)
mysqldb_cursor = mysqldb.cursor()
mysqldb_cursor.execute("show tables;")
data = mysqldb_cursor.fetchall()
print('db tables:',data)
self.assertTrue(data)
def test_redis(self):
redisdb = redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
redisdb.set("test_key", "123456", 60)
data = redisdb.get("test_key")
print('redis test_key data:',data)
self.assertEqual(data, b"123456")
```
#### File: taskflow/tests/test_scheduler.py
```python
import datetime
import unittest
from scheduler import get_arguments
class TestScheduler(unittest.TestCase):
def test_python_code(self):
code = """import datetime
import requests
import time
def get_arguments(**kwargs):
data={}
data["start_time"]=int((time.time()-100000)*1000)
data["end_time"]=int(time.time()*1000)
data["data"]='ok' #requests.get('url',timeout=60).strptime('%Y-%M-%d')
print(kwargs.get('run_time'))
return data
"""
json_data = get_arguments(code, datetime.datetime.now())
print(json_data)
```
#### File: taskflow/tests/test_workflow_spec.py
```python
import unittest
from contrib.workflow_spec import WorkflowSpec
from contrib.taskflowdb import TaskFlowDB
class TestWorkflowSpec(unittest.TestCase):
def setUp(self) -> None:
self.db = TaskFlowDB()
def test_read_yaml(self):
wf = WorkflowSpec("test_simple", self.db, 1, 0)
print('check workflow dir', dir(wf))
print('test workflow description:', wf.description)
print('test workflow steps:', wf.steps)
print('test workflow filename:', wf.filename)
for t, v in wf.steps.items():
print("step:", t)
print("step object:", v)
print("step-on-success:", v.get("on-success"))
print("step-on-success-eval:", wf.get_step_name(v.get("on-success")))
def test_eval(self):
wf = WorkflowSpec("test_simple", self.db, 1, 0)
print("===========================")
for name, step in wf.steps.items():
print("step_name:", name)
parameters = step.get("parameters")
wf.get_step_parameters(name)
for param_name, param_value in parameters.items():
if param_value.startswith("$"):
if param_value.startswith("$$"):
param_value = param_value[1:]
else:
param_value = eval(param_value[1:], {"this": wf})
print("test_eval:", param_name, type(param_value), param_value)
``` |
{
"source": "JiangXiaobai00/FCNonKitti-Cityscapes",
"score": 2
} |
#### File: JiangXiaobai00/FCNonKitti-Cityscapes/test.py
```python
from __future__ import print_function
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import time
import sys
import shutil
import argparse
import torchvision.models
from tqdm import tqdm
from tensorboardX import SummaryWriter
from dataloader import Cityscapeloader as CL
import skimage
import skimage.io
import skimage.transform
from matplotlib import pyplot as plt
from models import *
from utils import function,drawseg
from utils.loss import cross_entropy2d
CUDA_VISIBLE_DEVICES=0
torch.cuda.set_device(0)
parser = argparse.ArgumentParser(description='MENet')
parser.add_argument('--maxdisp', type=int, default=192,
help='maxium disparity')#192
parser.add_argument('--n_class', type=int, default=19,
help='set class for seg')
parser.add_argument('--PSMmodel', default='basic',
help='select model')#basic
parser.add_argument('--datatype', default='2015',
help='datapath')
parser.add_argument('--data_path', default='./dataset/cityscapes/',#
help='datapath')
parser.add_argument('--batch_size', type=int, default=1,
help='number of batch to train')
parser.add_argument('--lr', type=float, default=0.0001,
help='number of batch to train')
parser.add_argument('--loadmodel', default='/media/c4007/387ca079-3067-4f93-afce-0233ab11a53c/fcn/model/mix7_city/model_best_iou_iter_mini.tar',)
parser.add_argument('--logdir', default='/media/c4007/387ca079-3067-4f93-afce-0233ab11a53c/fcn/model/project/',
help='save log')
parser.add_argument('--saveseg', default='/media/c4007/387ca079-3067-4f93-afce-0233ab11a53c/fcn/model/project/seg/',
help='save img2')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
v_loader = CL.CityScapesDataset(args.data_path,phase='val',n_class=args.n_class,flip_rate=0.)#
valloader = data.DataLoader(v_loader,batch_size=1, shuffle=False, num_workers=4, drop_last=False)
model = basic(args.maxdisp,args.n_class)
if args.cuda:
model.cuda()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.loadmodel is not None:
state_dict = torch.load(args.loadmodel)
model.load_state_dict(state_dict['state_dict'])
def main():
print('RUNDIR: {}'.format(args.logdir))
sys.stdout.flush()
logger = function.get_logger(args.logdir)
logger.info('test') # write in log file
running_metrics_val = function.runningScoreSeg(args.n_class)
val_loss_meter = function.averageMeter()
time_meter = function.averageMeter()
print(len(valloader))
start_ts = time.time() # return current time stamp
model.eval()
with torch.no_grad():
for i_val, (leftimgval,rightimgval, labelval, disp_true,L_name) in tqdm(enumerate(valloader)):
imgL = leftimgval.numpy()
imgR = rightimgval.numpy()
if args.cuda:
imgL = torch.FloatTensor(imgL).cuda()
imgR = torch.FloatTensor(imgR).cuda()
imgL, imgR = Variable(imgL), Variable(imgR)
output= model(imgL, imgR) # 1 1024 2048 1 19 1024 2048
pred_seg = output0.data.cpu().numpy()
# FCN OF SEGMETATION
N, _, h, w = pred_seg.shape # 4,12,192,704,numpy
pred_segmap = pred_seg.transpose(0, 2, 3, 1).reshape(-1, args.n_class).argmax(axis=1).reshape(N,
h,
w)
img = drawseg.direct_render(pred_segmap, args.n_class)
skimage.io.imsave(args.saveseg + (L_name[0].split('/')[-1]), img[0])
# segmetation mIoU
score =torch.from_numpy(pred_seg).cuda()
lossval = cross_entropy2d(score, labelval.cuda())# mean pixelwise loss in a batch
pred = score.data.max(1)[1].cpu().numpy() # [batch_size, height, width]#229,485
gt = labelval.data.cpu().numpy() # [batch_size, height, width]#256,512
running_metrics_val.update(gt=gt, pred=pred)
val_loss_meter.update(lossval.item())
torch.cuda.empty_cache()
logger.info(" val_loss: %.4f" % ( val_loss_meter.avg))
print("val_loss: %.4f" % ( val_loss_meter.avg))
#"""
# output scores
score, class_iou = running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
sys.stdout.flush()
logger.info('{}: {}'.format(k, v))
for k, v in class_iou.items():
print(k, v)
logger.info('{}: {}'.format(k, v))
#"""
if __name__ == '__main__':
main()
```
#### File: FCNonKitti-Cityscapes/utils/drawseg.py
```python
from torch.utils.data import Dataset
import numpy as np
import torch
from torchvision import transforms
import random
from skimage import io, transform
import os
from PIL import Image
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',]
class_name = ('road', 'sidewalk', 'building', 'wall',
'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation',
'terrain', 'sky', 'person', 'rider', 'car',
'truck', 'bus', 'train', 'motorcycle', 'bicycle',
)
class_color = ((128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156),
(190, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), \
(152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), \
(0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32),
)
label_map = np.array(class_color)
class_n = 19
mean = [0.2902, 0.2976, 0.3042]
std = [0.1271, 0.1330, 0.1431]
flip_rate = 0
shrink_rate = 1
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
image = [img for img in os.listdir(filepath) if img.find('_10') > -1]
left_test = [filepath+img for img in image]
return left_test
def gettransform(img):
h, w, c = img.shape
h = int(h // 32 * shrink_rate) * 32
w = int(w // 32 * shrink_rate) * 32
# use interpolation for quality
img=transform.resize(img, (h, w), order=1, mode='constant', preserve_range=True).astype('uint8')
if np.random.random() < flip_rate:
img= np.fliplr(img) # cause error if remove '.copy()' (prevent memory sharing)
img= transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])(img)
return img
def draw_img(rgb,segmentation, n_class):
# mask
mask = np.zeros_like(rgb, dtype=np.float32)#(384,1248,3)
for clsid in range(n_class):
mask += np.dot((segmentation == clsid)[..., np.newaxis], [label_map[clsid]])
rgb = np.clip(np.round(mask * 1), 0, 255.0).astype(np.uint8)
return rgb
def direct_render(label,n_class):
renders = []
if not isinstance(label, torch.Tensor):
label = torch.from_numpy(label)
_,h, w = label.shape
temp_label = np.zeros((1,h, w, 3), dtype='uint8')# B H W C np
for i, segmentation in enumerate(label):
render = draw_img(temp_label[i], segmentation, n_class)
renders.append(render)
renders = np.array(renders)
return renders
def visualize(label):
if not isinstance(label, torch.Tensor):
label = torch.from_numpy(label)
h, w = label.shape
temp_label = np.zeros((h, w, 3), dtype='uint8')
for i in range(h): # how to write more elegantly
for j in range(w):
temp_label[i, j] = class_color[int(label[i, j])]
return transforms.ToTensor()(temp_label)
def denormalize(self, image):
image = np.transpose(image, (1, 2, 0))
image[:, :, 0] = image[:, :, 0] * self.std[0] + self.mean[0]
image[:, :, 1] = image[:, :, 1] * self.std[1] + self.mean[1]
image[:, :, 2] = image[:, :, 2] * self.std[2] + self.mean[2]
return np.transpose(image, (2, 0, 1))
#process
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
#__imagenet_stats = {'mean': [0.5, 0.5, 0.5],
# 'std': [0.5, 0.5, 0.5]}
#__imagenet_stats ={'mean': [0.2902, 0.2976, 0.3042],
# 'std': [0.1271, 0.1330, 0.1431]}
__imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
#if scale_size != input_size:
#t_list = [transforms.Scale((960,540))] + t_list
return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [
transforms.RandomCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(**normalize),
]
if scale_size != input_size:
t_list = [transforms.Scale(scale_size)] + t_list
transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats):
padding = int((scale_size - input_size) / 2)
return transforms.Compose([
transforms.RandomCrop(input_size, padding=padding),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize),
])
def inception_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
transforms.RandomSizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(**normalize)
])
def inception_color_preproccess(input_size, normalize=__imagenet_stats):
return transforms.Compose([
#transforms.RandomSizedCrop(input_size),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']),
transforms.Normalize(**normalize)
])
def get_transform(name='imagenet', input_size=None,
scale_size=None, normalize=None, augment=True):
normalize = __imagenet_stats
input_size = 256
if augment:
return inception_color_preproccess(input_size, normalize=normalize)
else:
return scale_crop(input_size=input_size,
scale_size=scale_size, normalize=normalize)
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class Grayscale(object):
def __call__(self, img):
gs = img.clone()
gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])
gs[1].copy_(gs[0])
gs[2].copy_(gs[0])
return gs
class Saturation(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha)
class Brightness(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = img.new().resize_as_(img).zero_()
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha)
class Contrast(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
gs.fill_(gs.mean())
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha)
class RandomOrder(object):
""" Composes several transforms together in random order.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
if self.transforms is None:
return img
order = torch.randperm(len(self.transforms))
for i in order:
img = self.transforms[i](img)
return img
class ColorJitter(RandomOrder):
def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):
self.transforms = []
if brightness != 0:
self.transforms.append(Brightness(brightness))
if contrast != 0:
self.transforms.append(Contrast(contrast))
if saturation != 0:
self.transforms.append(Saturation(saturation))
def findmax(numpy, n_class):
N, _, H, W = numpy.shape
dnumpy = numpy.transpose(0, 2, 3, 1).reshape(-1, n_class).argmax(axis=1).reshape(N, H, W)
return dnumpy
def toRGB(img, dtype=np.uint8):
dnumpy = (img.transpose(0, 2, 3, 1) * 255).astype(dtype) # 1,384,1248,3
dnumpy = np.round(dnumpy)
dnumpy = np.clip(dnumpy, 0, 255)
return dnumpy
def draw_img1(rgb,segmentation, n_class, opacity):
#rgb[segmentation > 0] *= 1 - opacity
# mask
mask = np.zeros_like(rgb, dtype=np.float32)#(384,1248,3)
for clsid in range(n_class):
mask += np.dot((segmentation == clsid)[..., np.newaxis], [label_map[clsid]])
# paste
#rgb = np.clip(np.round(rgb + mask * opacity), 0, 255.0).astype(np.uint8)
#rgb = np.clip(np.round(mask * opacity), 0, 255.0).astype(np.uint8)
rgb = np.clip(np.round(mask * 1), 0, 255.0).astype(np.uint8)
return rgb
def direct_render1(img, predict_map,n_class=21,opacity=0.5):
renders = []
rgb = toRGB(img, dtype=np.float32)# 1,384,1248,3
for i, segmentation in enumerate(predict_map):
render = draw_img1(rgb[i], segmentation, n_class=n_class, opacity=opacity)
renders.append(render)
renders = np.array(renders)
return renders
``` |
{
"source": "jiangxiaolin/tatk",
"score": 2
} |
#### File: tatk/deploy/server.py
```python
import json
import copy
from deploy.ctrl import ModuleCtrl, SessionCtrl
from deploy.utils import DeployError
class ServerCtrl(object):
def __init__(self, **kwargs):
self.net_conf = copy.deepcopy(kwargs['net'])
self.module_conf = {
'nlu': copy.deepcopy(kwargs['nlu']),
'dst': copy.deepcopy(kwargs['dst']),
'policy': copy.deepcopy(kwargs['policy']),
'nlg': copy.deepcopy(kwargs['nlg'])
}
self.modules = {mdl: ModuleCtrl(mdl, self.module_conf[mdl]) for mdl in self.module_conf.keys()}
self.sessions = SessionCtrl(expire_sec=self.net_conf['session_time_out'])
def on_models(self):
ret = {}
for module_name in ['nlu', 'dst', 'policy', 'nlg']:
ret[module_name] = {}
for model_id in self.module_conf[module_name].keys():
ret[module_name][model_id] = {key: self.module_conf[module_name][model_id][key] for key in
['class_path', 'data_set', 'ini_params', 'model_name']}
ret[module_name][model_id]['ini_params'] = json.dumps(ret[module_name][model_id]['ini_params'])
return ret
def on_register(self, **kwargs):
ret = {'nlu': 0, 'dst': 0, 'policy': 0, 'nlg': 0}
try:
for module_name in ['nlu', 'dst', 'policy', 'nlg']:
model_id = kwargs.get(module_name, None)
if isinstance(model_id, str):
ret[module_name] = self.modules[module_name].add_used_num(model_id)
except Exception as e:
for module_name in ['nlu', 'dst', 'policy', 'nlg']:
model_id = kwargs.get(module_name, None)
if isinstance(model_id, str) and ret[module_name] != 0:
self.modules[module_name].sub_used_num(model_id)
raise e
if ret['nlu'] == 0 and ret['dst'] == 0 and ret['policy'] == 0 and ret['nlg'] == 0:
raise DeployError('At least one model needs to be started')
token = self.sessions.new_session(*[kwargs.get(mn, None) for mn in ['nlu', 'dst', 'policy', 'nlg']])
return {'token': token}
def on_close(self, token):
if not self.sessions.has_token(token):
raise DeployError('No such token:\'%s\'' % token)
sess_info = self.sessions.pop_session(token)
for module in ['nlu', 'dst', 'policy', 'nlg']:
self.modules[module].sub_used_num(sess_info[module])
return {'del_token': token}
def on_clear_expire(self):
expire_session = self.sessions.pop_expire_session()
del_tokens = []
for (token, sess_info) in expire_session.items():
del_tokens.append(token)
for module in ['nlu', 'dst', 'policy', 'nlg']:
self.modules[module].sub_used_num(sess_info[module])
return {'del_tokens': del_tokens}
def on_response(self, token, input_module, data):
if not self.sessions.has_token(token):
raise DeployError('No such token:\'%s\'' % token)
sess_info = self.sessions.get_session(token)
isfirst = not sess_info['cache']
history = []
if isfirst:
cache_nlu, cache_dst, cache_plc, cache_nlg = None, None, None, None
else:
last_cache = sess_info['cache'][-1]
cache_nlu = last_cache.get('nlu', None)
cache_dst = last_cache.get('dst', None)
cache_plc = last_cache.get('policy', None)
cache_nlg = last_cache.get('nlg', None)
for cache in sess_info['cache']:
history.append(['user', cache.get('usr', '')])
history.append(['system', cache.get('sys', '')])
ret_nlu, ret_dst, ret_plc, ret_nlg = None, None, None, None
new_cache_nlu, new_cache_dst, new_cache_plc, new_cache_nlg = None, None, None, None
# NLU
if input_module == 'nlu':
in_nlu = data
if sess_info['nlu'] is not None:
(ret_nlu, new_cache_nlu) = self.modules['nlu'].run(sess_info['nlu'], cache_nlu, isfirst, [in_nlu, history])
out_nlu = ret_nlu
else:
out_nlu = in_nlu
# DST
if input_module in ['nlu', 'dst']:
in_dst = out_nlu if input_module == 'nlu' else data
if sess_info['dst'] is not None:
(ret_dst, new_cache_dst) = self.modules['dst'].run(sess_info['dst'], cache_dst, isfirst, [in_dst])
out_dst = ret_dst
else:
out_dst = in_dst
# POLICY
if input_module in ['nlu', 'dst', 'policy']:
in_plc = out_dst if input_module in ['nlu', 'dst'] else data
if sess_info['policy'] is not None:
(ret_plc, new_cache_plc) = self.modules['policy'].run(sess_info['policy'], cache_plc, isfirst, [in_plc])
out_plc = ret_plc
else:
out_plc = None
# NLG
in_nlg = out_plc if input_module in ['nlu', 'dst', 'policy'] else data
if sess_info['nlg'] is not None and in_nlg is not None:
(ret_nlg, new_cache_nlg) = self.modules['nlg'].run(sess_info['nlg'], cache_nlg, isfirst, [in_nlg])
# save cache
new_cache = {
'nlu': new_cache_nlu, 'dst': new_cache_dst, 'policy': new_cache_plc, 'nlg': new_cache_nlg,
'usr': data if isinstance(data, str) and input_module == 'nlu' else '',
'sys': ret_nlg if isinstance(ret_nlg, str) else ''
}
sess_info['cache'].append(copy.deepcopy(new_cache))
self.sessions.set_session(token, sess_info)
history.append(['user', new_cache.get('usr', '')])
history.append(['system', new_cache.get('sys', '')])
return {'nlu': ret_nlu, 'dst': ret_dst, 'policy': ret_plc, 'nlg': ret_nlg, 'history': history}
def on_rollback(self, token, back_turns=1):
if not self.sessions.has_token(token):
raise DeployError('No such token:\'%s\'' % token)
sess_info = self.sessions.get_session(token)
sess_info['cache'] = sess_info['cache'][:-back_turns]
turns = len(sess_info['cache'])
self.sessions.set_session(token, sess_info)
return {'current_turns': turns}
if __name__ == '__main__':
pass
``` |
{
"source": "jiangxiluning/deeplearning-project-template",
"score": 2
} |
#### File: project/model/model.py
```python
from typing import List, Optional, Any
from argparse import ArgumentParser
import torch
import pytorch_lightning as pl
from torch.nn import functional as F
from easydict import EasyDict
import torchmetrics
class LitClassifier(pl.LightningModule):
def __init__(self, config: EasyDict):
super().__init__()
self.save_hyperparameters()
self.l1 = torch.nn.Linear(28 * 28, self.hparams.config.model.hidden_dim)
self.l2 = torch.nn.Linear(self.hparams.config.model.hidden_dim, 10)
self.acc = torchmetrics.Accuracy()
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log('valid_loss', loss)
y_hat = torch.softmax(y_hat, dim=-1)
self.acc(y_hat, y)
def validation_epoch_end(self, *args) -> None:
acc = self.acc.compute()
self.log('val_acc', acc, prog_bar=True, logger=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
y_hat = torch.softmax(y_hat, dim=-1)
self.acc(y_hat, y)
def test_epoch_end(self, outputs: List[Any]) -> None:
acc = self.acc.compute()
self.log('test_acc', acc)
def configure_optimizers(self):
if self.hparams.config.trainer.optim.name == 'Adam':
return torch.optim.Adam(self.parameters(), **self.hparams.config.trainer.optim.args)
``` |
{
"source": "jiangxiluning/MASTER-TF",
"score": 2
} |
#### File: src/model/backbone.py
```python
from typing import *
from tensorflow import keras
import tensorflow as tf
from .gcb import GolbalContextBlock
def conv33(out_planes, stride=1):
return keras.layers.Conv2D(out_planes, kernel_size=3, strides=stride, padding='same', use_bias=False)
class BasicBlock(keras.layers.Layer):
expansion = 1
def __init__(self,
planes,
stride=1,
downsample=None,
gcb_config=None,
use_gcb=None, **kwargs):
super().__init__(name='BasciBlock', **kwargs)
self.conv1 = conv33(planes, stride)
self.bn1 = keras.layers.BatchNormalization(momentum=0.1,
epsilon=1e-5)
self.relu = keras.layers.ReLU()
self.conv2 = conv33(planes, stride)
self.bn2 = keras.layers.BatchNormalization(momentum=0.1,
epsilon=1e-5)
if downsample:
self.downsample = downsample
else:
self.downsample = tf.identity
self.stride = stride
if use_gcb:
self.gcb = GolbalContextBlock(
inplanes=planes,
ratio=gcb_config['ratio'],
headers=gcb_config['headers'],
pooling_type=gcb_config['pooling_type'],
fusion_type=gcb_config['fusion_type'],
att_scale=gcb_config['att_scale']
)
else:
self.gcb = tf.identity
def call(self, inputs, **kwargs):
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.gcb(out)
out = out + self.downsample(inputs)
out = self.relu(out)
return out
class Resnet31(keras.layers.Layer):
def __init__(self, block, backbone_config, **kwargs):
super(Resnet31, self).__init__(name='ResNet31', **kwargs)
layers = [1, 2, 5, 3]
gcb_config = backbone_config['gcb']
gcb_enabling = gcb_config['layers']
self.inplanes = 128
self.conv1 = keras.layers.Conv2D(64,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.he_normal())
self.bn1 = keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)
self.relu1 = keras.layers.ReLU()
self.conv2 = keras.layers.Conv2D(128,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.he_normal())
self.bn2 = keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)
self.relu2 = keras.layers.ReLU()
self.maxpool1 = keras.layers.MaxPool2D(strides=2)
self.layer1 = self._make_layer(block,
256,
layers[0],
stride=1,
use_gcb=gcb_enabling[0],
gcb_config=gcb_config)
self.conv3 = keras.layers.Conv2D(256,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.he_normal())
self.bn3 = keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)
self.relu3 = keras.layers.ReLU()
self.maxpool2 = keras.layers.MaxPool2D(strides=2)
self.layer2 = self._make_layer(block,
256,
layers[1],
stride=1,
use_gcb=gcb_enabling[1],
gcb_config=gcb_config)
self.conv4 = keras.layers.Conv2D(256,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.he_normal())
self.bn4 = keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)
self.relu4 = keras.layers.ReLU()
self.maxpool3 = keras.layers.MaxPool2D(pool_size=(2,1), strides=(2,1))
self.layer3 = self._make_layer(block,
512,
layers[2],
stride=1,
use_gcb=gcb_enabling[2],
gcb_config=gcb_config)
self.conv5 = keras.layers.Conv2D(512,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.he_normal())
self.bn5 = keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)
self.relu5 = keras.layers.ReLU()
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=1,
use_gcb=gcb_enabling[3],
gcb_config=gcb_config)
self.conv6 = keras.layers.Conv2D(512,
kernel_size=3,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.he_normal())
self.bn6 = keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)
self.relu6 = keras.layers.ReLU()
def _make_layer(self, block, planes, blocks, stride=1, gcb_config=None, use_gcb=False):
downsample =None
if stride!=1 or self.inplanes != planes * block.expansion:
downsample = keras.Sequential(
[keras.layers.Conv2D(planes * block.expansion,
kernel_size=(1,1),
strides=stride,
use_bias=False,
kernel_initializer=keras.initializers.he_normal()),
keras.layers.BatchNormalization(momentum=0.9,
epsilon=1e-5)],
name='downsample'
)
layers = []
layers.append(block(planes, stride, downsample, gcb_config, use_gcb))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(planes))
return keras.Sequential(layers, name='make_layer')
def call(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu5(x)
x = self.layer4(x)
x = self.conv6(x)
x = self.bn6(x)
x = self.relu6(x)
return x
``` |
{
"source": "jiangxiluning/TCPN",
"score": 2
} |
#### File: project/model/decoder.py
```python
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from data_util import config
from numpy import random
from easydict import EasyDict
import einops
from ..data_module import vocabs
use_cuda = config.use_gpu and torch.cuda.is_available()
random.seed(123)
torch.manual_seed(123)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(123)
def init_lstm_wt(lstm):
for names in lstm._all_weights:
for name in names:
if name.startswith('weight_'):
wt = getattr(lstm, name)
wt.data.uniform_(-config.rand_unif_init_mag, config.rand_unif_init_mag)
elif name.startswith('bias_'):
# set forget bias to 1
bias = getattr(lstm, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data.fill_(0.)
bias.data[start:end].fill_(1.)
def init_linear_wt(linear):
linear.weight.data.normal_(std=config.trunc_norm_init_std)
if linear.bias is not None:
linear.bias.data.normal_(std=config.trunc_norm_init_std)
def init_wt_normal(wt):
wt.data.normal_(std=config.trunc_norm_init_std)
def init_wt_unif(wt):
wt.data.uniform_(-config.rand_unif_init_mag, config.rand_unif_init_mag)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(config.vocab_size, config.emb_dim)
init_wt_normal(self.embedding.weight)
self.lstm = nn.LSTM(config.emb_dim, config.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
init_lstm_wt(self.lstm)
self.W_h = nn.Linear(config.hidden_dim * 2, config.hidden_dim * 2, bias=False)
# seq_lens should be in descending order
def forward(self, input, seq_lens):
embedded = self.embedding(input)
packed = pack_padded_sequence(embedded, seq_lens, batch_first=True)
output, hidden = self.lstm(packed)
encoder_outputs, _ = pad_packed_sequence(output, batch_first=True) # h dim = B x t_k x n
encoder_outputs = encoder_outputs.contiguous()
encoder_feature = encoder_outputs.view(-1, 2 * config.hidden_dim) # B * t_k x 2*hidden_dim
encoder_feature = self.W_h(encoder_feature)
return encoder_outputs, encoder_feature, hidden
class ReduceState(nn.Module):
def __init__(self):
super(ReduceState, self).__init__()
self.reduce_h = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
init_linear_wt(self.reduce_h)
self.reduce_c = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
init_linear_wt(self.reduce_c)
def forward(self, hidden):
h, c = hidden # h, c dim = 2 x b x hidden_dim
h_in = h.transpose(0, 1).contiguous().view(-1, config.hidden_dim * 2)
hidden_reduced_h = F.relu(self.reduce_h(h_in))
c_in = c.transpose(0, 1).contiguous().view(-1, config.hidden_dim * 2)
hidden_reduced_c = F.relu(self.reduce_c(c_in))
return (hidden_reduced_h.unsqueeze(0), hidden_reduced_c.unsqueeze(0)) # h, c dim = 1 x b x hidden_dim
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
# attention
if config.is_coverage:
self.W_c = nn.Linear(1, config.hidden_dim * 2, bias=False)
self.decode_proj = nn.Linear(config.hidden_dim * 2, config.hidden_dim * 2)
self.v = nn.Linear(config.hidden_dim * 2, 1, bias=False)
def forward(self, s_t_hat, encoder_outputs, encoder_feature, enc_padding_mask, coverage):
b, t_k, n = list(encoder_outputs.size())
dec_fea = self.decode_proj(s_t_hat) # B x 2*hidden_dim
dec_fea_expanded = dec_fea.unsqueeze(1).expand(b, t_k, n).contiguous() # B x t_k x 2*hidden_dim
dec_fea_expanded = dec_fea_expanded.view(-1, n) # B * t_k x 2*hidden_dim
att_features = encoder_feature + dec_fea_expanded # B * t_k x 2*hidden_dim
if config.is_coverage:
coverage_input = coverage.view(-1, 1) # B * t_k x 1
coverage_feature = self.W_c(coverage_input) # B * t_k x 2*hidden_dim
att_features = att_features + coverage_feature
e = F.tanh(att_features) # B * t_k x 2*hidden_dim
scores = self.v(e) # B * t_k x 1
scores = scores.view(-1, t_k) # B x t_k
attn_dist_ = F.softmax(scores, dim=1) * enc_padding_mask # B x t_k
normalization_factor = attn_dist_.sum(1, keepdim=True)
attn_dist = attn_dist_ / normalization_factor
attn_dist = attn_dist.unsqueeze(1) # B x 1 x t_k
c_t = torch.bmm(attn_dist, encoder_outputs) # B x 1 x n
c_t = c_t.view(-1, config.hidden_dim * 2) # B x 2*hidden_dim
attn_dist = attn_dist.view(-1, t_k) # B x t_k
if config.is_coverage:
coverage = coverage.view(-1, t_k)
coverage = coverage + attn_dist
return c_t, attn_dist, coverage
class Decoder(nn.Module):
def __init__(self, config: EasyDict):
super(Decoder, self).__init__()
self.embedding_dim = config.model.embedding_dim
self.hidden_dim = config.model.hidden_dim
self.pointer_gen = config.model.pointer_gen
self.attention_network = Attention()
# decoder
self.embedding = nn.Embedding(len(vocabs.key), self.embedding_dim)
init_wt_normal(self.embedding.weight)
self.x_context = nn.Linear(self.hidden_dim * 2 + self.embedding_dim, self.embedding_dim)
self.lstm = nn.LSTM(self.embedding_dim, self.embedding_dim, num_layers=1, batch_first=True, bidirectional=False)
init_lstm_wt(self.lstm)
self.p_copy_linear = nn.Linear(self.hidden_dim * 4 + self.embedding_dim, 1)
# p_vocab
self.out1 = nn.Linear(self.hidden_dim * 3, self.hidden_dim)
self.out2 = nn.Linear(self.hidden_dim, len(vocabs.key))
init_linear_wt(self.out2)
def forward(self,
y_t_1,
s_t_1_forward,
s_t_1_backward,
class_emb,
encoder_outputs_forward,
encoder_outputs_backward,
encoder_inputs,
enc_padding_mask,
c_t_1_forward,
c_t_1_backward,
coverage_forward,
coverage_backward,
step):
"""
Args:
y_t_1: B * num_entities
s_t_1_forward: (hidden, context) hidden: B * num_entities * hidden_dim,
context: B * num_entities * hidden_dim
s_t_1_backward: (hidden, context) hidden: B * num_entities * hidden_dim,
context: B * num_entities * hidden_dim
class_emb: B * num_entities * class_embedding_dim
encoder_outputs_forward: B * max_len_encoder
encoder_outputs_backward: B * max_len_encoder
encoder_inputs: B * max_len_encoder
enc_padding_mask:
c_t_1_forward: B * num_entities * embedding_dim
c_t_1_backward: B * num_entities * embedding_dim
coverage_forward: B * num_entities * max_len_encoder
coverage_backward: B * num_entities * max_len_encoder
step:
Returns:
"""
if not self.training and step == 0:
h_decoder_forward, c_decoder_forward = s_t_1_forward
s_t_hat_forward = torch.cat((h_decoder_forward.view(-1, self.hidden_dim),
c_decoder_forward.view(-1, self.hidden_dim)), 1) # B x 2*hidden_dim
c_t_forward, _, coverage_next_forward = self.attention_network(s_t_hat_forward,
encoder_outputs_forward,
encoder_feature,
enc_padding_mask,
coverage_forward)
coverage_forward = coverage_next_forward
h_decoder_backward, c_decoder_backward = s_t_1_backward
s_t_hat_backward = torch.cat((h_decoder_backward.view(-1, self.hidden_dim),
c_decoder_backward.view(-1, self.hidden_dim)), 1) # B x 2*hidden_dim
c_t_backward, _, coverage_next_backward = self.attention_network(s_t_hat_backward,
encoder_outputs_backward,
encoder_feature,
enc_padding_mask,
coverage_forward)
coverage_backward = coverage_next_backward
y_t_1_embd = self.embedding(y_t_1)
x_forward = self.x_context(torch.cat((c_t_1_forward, y_t_1_embd), 1))
x_backward = self.x_context(torch.cat((c_t_1_backward, y_t_1_embd), 1))
_, s_t_forward = self.lstm(x_forward.unsqueeze(1), s_t_1_forward)
_, s_t_backward = self.lstm(x_backward.unsqueeze(1), s_t_1_backward)
h_decoder_forward, c_decoder_forward = s_t_forward
s_t_hat_forward = torch.cat((h_decoder_forward.view(-1, self.hidden_dim),
c_decoder_forward.view(-1, self.hidden_dim)), 1) # B x 2*hidden_dim
c_t_forward, attn_dist_forward, coverage_next_forward = self.attention_network(s_t_hat_forward,
encoder_outputs_forward,
encoder_feature,
enc_padding_mask,
coverage_forward)
h_decoder_backward, c_decoder_backward = s_t_backward
s_t_hat_backward = torch.cat((h_decoder_backward.view(-1, self.hidden_dim),
c_decoder_backward.view(-1, self.hidden_dim)), 1) # B x 2*hidden_dim
c_t_backward, attn_dist_backward, coverage_next_backward = self.attention_network(s_t_hat_backward,
encoder_outputs_backward,
encoder_feature,
enc_padding_mask,
coverage_backward)
if self.training or step > 0:
coverage_forward = coverage_next_forward
coverage_backward = coverage_next_backward
F_aggregated = torch.cat((c_t_forward, c_t_backward))
s_t_aggregated = torch.cat((s_t_hat_forward, s_t_hat_backward))
p_copy_input = torch.cat((F_aggregated, s_t_aggregated, x), 1) # B x (2*2*hidden_dim + emb_dim)
p_copy = self.p_gen_linear(p_copy_input)
p_copy = F.sigmoid(p_copy)
output = torch.cat((F_aggregated, s_t_aggregated), 1) # B x hidden_dim * 3
output = self.out1(output) # B x hidden_dim
output = self.out2(output) # B x vocab_size
p_pred = torch.softmax(output, dim=1)
index = torch.where((encoder_inputs == y_t_1))
selected_attn_forward = attn_dist_forward[index]
selected_attn_backward = attn_dist_backward[index]
selected_attn = (selected_attn_forward + selected_attn_backward) / 2
p_output = p_copy * selected_attn + (1 - p_copy) * p_pred[y_t_1]
return p_output, s_t_forward, s_t_backward, c_t_forward, c_t_backward, coverage_forward, coverage_backward
```
#### File: project/model/feature_encoding.py
```python
import typing
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
from fastai.vision.models.unet import DynamicUnet
import easydict
from .unet import UNetWithResnetEncoder
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([
input_tensor,
xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(
torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5,
2))
ret = torch.cat([ret, rr], dim=1)
return ret
class FeatureEncoding(nn.Module):
def __init__(self, config: easydict.EasyDict):
super(FeatureEncoding, self).__init__()
self.lattice_dim = config.model.lattice_dim
resnet = torchvision.models.resnet18(pretrained=True)
resnet.conv1 = nn.Conv2d(self.lattice_dim + 2,
self.resnet.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
resnet.maxpool = nn.Identity()
self.unet = UNetWithResnetEncoder(resnet)
self.add_coords = AddCoords(with_r=False)
def forward(self, lattice, b_t, *args, **kwargs):
"""
Args:
lattice: B C H W
b_t: list of tensors [[N1, 2], [N2, 2], ..., [N_(B), 2]]
*args:
**kwargs:
Returns:
"""
x = self.add_coords(lattice)
output = lattice + self.unet(x)
output = output.permute((0, 2, 3, 1)).contiguous()
F_features = []
lengths = []
for i, center in enumerate(b_t):
# N_i, C
f = output[i][center]
lengths.append(f.shape[0])
F_features.append(f)
max_len = max(lengths)
sequences = torch.zeros((len(F_features), max_len, output.shape[-1]), dtype=output.dtype).to(output.device)
for i, feature in enumerate(F_features):
sequences[i][:feature.shape[0]] = feature
lengths = torch.tensor(lengths, dtype=torch.long, device=sequences.device)
return sequences, lengths
```
#### File: project/model/unet.py
```python
import typing
import torch
from torchvision.models import ResNet
import torch.nn as nn
class ConvBlock(nn.Module):
"""
Helper module that consists of a Conv -> BN -> ReLU
"""
def __init__(self, in_channels, out_channels, padding=1, kernel_size=3, stride=1, with_nonlinearity=True):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, padding=padding, kernel_size=kernel_size, stride=stride)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
self.with_nonlinearity = with_nonlinearity
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.with_nonlinearity:
x = self.relu(x)
return x
class Bridge(nn.Module):
"""
This is the middle layer of the UNet which just consists of some
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.bridge = nn.Sequential(
ConvBlock(in_channels, out_channels),
ConvBlock(out_channels, out_channels)
)
def forward(self, x):
return self.bridge(x)
class UpBlockForUNetWithResNet(nn.Module):
"""
Up block that encapsulates one up-sampling step which consists of Upsample -> ConvBlock -> ConvBlock
"""
def __init__(self, in_channels, out_channels, up_conv_in_channels=None, up_conv_out_channels=None,
upsampling_method="conv_transpose"):
super().__init__()
if up_conv_in_channels == None:
up_conv_in_channels = in_channels
if up_conv_out_channels == None:
up_conv_out_channels = out_channels
if upsampling_method == "conv_transpose":
self.upsample = nn.ConvTranspose2d(up_conv_in_channels, up_conv_out_channels, kernel_size=2, stride=2)
elif upsampling_method == "bilinear":
self.upsample = nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1)
)
self.conv_block_1 = ConvBlock(in_channels, out_channels)
self.conv_block_2 = ConvBlock(out_channels, out_channels)
def forward(self, up_x, down_x):
"""
:param up_x: this is the output from the previous up block
:param down_x: this is the output from the down block
:return: upsampled feature map
"""
x = self.upsample(up_x)
x = torch.cat([x, down_x], 1)
x = self.conv_block_1(x)
x = self.conv_block_2(x)
return x
class UNetWithResnetEncoder(nn.Module):
DEPTH = 5
def __init__(self, encoder: ResNet):
super().__init__()
resnet = encoder
down_blocks = []
up_blocks = []
self.input_block = nn.Sequential(*list(resnet.children()))[:3]
self.input_pool = list(resnet.children())[3]
for bottleneck in list(resnet.children()):
if isinstance(bottleneck, nn.Sequential):
down_blocks.append(bottleneck)
self.down_blocks = nn.ModuleList(down_blocks)
layer_dims = []
for name, children in resnet.named_children():
if name == 'layer1':
convs = []
for name_i, children_i in children[-1].named_children():
if 'conv' in name_i:
convs.append(children_i)
layer_dims.append(convs[-1].out_channels)
self.bridge = Bridge(layer_dims[-1], layer_dims[-1])
up_blocks.append(UpBlockForUNetWithResNet(layer_dims[-1], layer_dims[-2]))
up_blocks.append(UpBlockForUNetWithResNet(layer_dims[-2], layer_dims[-3]))
up_blocks.append(UpBlockForUNetWithResNet(layer_dims[-3], layer_dims[-4]))
up_blocks.append(UpBlockForUNetWithResNet(in_channels=layer_dims[-4], out_channels=64))
self.up_blocks = nn.ModuleList(up_blocks)
def forward(self, x, with_output_feature_map=False):
pre_pools = dict()
pre_pools[f"layer_0"] = x
x = self.input_block(x)
pre_pools[f"layer_1"] = x
x = self.input_pool(x)
for i, block in enumerate(self.down_blocks, 2):
x = block(x)
if i == (UNetWithResnetEncoder.DEPTH - 1):
continue
pre_pools[f"layer_{i}"] = x
x = self.bridge(x)
for i, block in enumerate(self.up_blocks, 1):
key = f"layer_{UNetWithResnetEncoder.DEPTH - 1 - i}"
x = block(x, pre_pools[key])
output_feature_map = x
x = self.out(x)
del pre_pools
if with_output_feature_map:
return x, output_feature_map
else:
return x
``` |
{
"source": "Jiangxinz/Paddle-bot",
"score": 3
} |
#### File: Paddle-bot/gitee/handler.py
```python
import requests
import logging
logging.basicConfig(
level=logging.INFO,
filename='./logs/pr.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class GiteePROperation():
def __init__(self):
self.prUrl = 'https://gitee.com/api/v5/repos/{owner}/{repo}/pulls'
self.prMergeUrl = self.prUrl + '/{number}/merge'
self.access_token = "xxxxx"
def merge(self, owner, repo, number):
prMergeUrl = self.prMergeUrl.format(
owner=owner, repo=repo, number=number)
payload = {
"access_token": self.access_token,
"merge_method": "squash",
"prune_source_branch": "true"
}
r = requests.request(
"PUT",
prMergeUrl,
params=payload,
headers={'Content-Type': 'application/json'})
print(r.text)
return r.status_code
def getPRListWithOpenStatus(self, owner, repo):
PRList = []
prUrl = self.prUrl.format(owner=owner, repo=repo)
payload = {
"access_token": self.access_token,
"per_page": 100,
"state": "open"
}
r = requests.request(
"GET",
prUrl,
params=payload,
headers={'Content-Type': 'application/json'})
for item in r.json():
PR = item['number']
PRList.append(PR)
return PRList
```
#### File: Paddle-bot/gitee/pr_merge.py
```python
from gitee.handler import GiteePROperation
import json
import time
from webservice.utils.mail_163 import Mail
from Singleton import MySingleton
def gitee_merge_pr():
"""merge pr"""
merge_pr_list = GiteePROperation().getPRListWithOpenStatus('paddlepaddle',
'Paddle')
merge_pr_list.sort()
print(merge_pr_list)
merge_pr_info = ""
count = 0
singleton = MySingleton()
for PR in merge_pr_list:
print("PR: %s" % PR)
merge_status = GiteePROperation().merge('paddlepaddle', 'Paddle', PR)
while merge_status not in [200, 201]:
time.sleep(10)
merge_status = GiteePROperation().merge('paddlepaddle', 'Paddle',
PR)
count += 1
if count >= 3:
break
if merge_status in [200, 201]:
merge_pr_info = merge_pr_info + "<tr align=center><td>PR</td><td>{}</td><td>merged succeed</td></tr>".format(
PR)
pr_state = singleton.get_github_pr_by_gitee_pr(PR)
singleton.set_pr_merge_state(pr_state.github_pr, '已合入')
else:
merge_pr_info = merge_pr_info + "<tr align=center><td>PR</td><td>{}</td><td>merged failed</td></tr>".format(
PR)
def sendMail(title, content, receivers):
mail = Mail()
mail.set_sender('<EMAIL>')
mail.set_receivers(receivers)
mail.set_title(title)
mail.set_message(content, messageType='html', encoding='gb2312')
mail.send()
# gitee_merge_pr()
```
#### File: Paddle-bot/statistics/get_PR_count.py
```python
import pandas as pd
import datetime
import requests
import argparse
import time
import re
def BJtime(mergeTime):
"""
utc时间转换北京时间
"""
mergeTime = mergeTime.replace('T', ' ').replace('Z', '')
mergeTime = datetime.datetime.strptime(mergeTime, '%Y-%m-%d %H:%M:%S')
mergeTime = mergeTime + datetime.timedelta(hours=8)
mergeTime = datetime.datetime.strftime(mergeTime, '%Y-%m')
return mergeTime
def getPersonnel(user):
"""
判断是否为内部员工
"""
personnel_api = ''
# 部门员工信息平台api--->按名字查询
isname = requests.get(personnel_api + '?github_name=' + user).json()
# 部门员工信息平台api--->按ID查询
isID = requests.get(personnel_api + '?github_id=' + user).json()
if isname:
return [isname[0]['name'], isname[0]['email'], isname[0]['team']]
elif isID:
return [isID[0]['name'], isID[0]['email'], isID[0]['team']]
return False
def get_page(url, headers):
"""
获取总页数
"""
page_num = 0
response = requests.get(url, headers=headers, stream=True)
try:
if "Link" in response.headers.keys():
# 获取头信息中的Link内容
header_info = response.headers["Link"]
# 消除<>和空格
header_replace = re.sub('<|>| ', '', header_info)
# 以,和;分割成一个列表
header_split = re.split(',|;', header_replace)
# 获取列表中rel="last"的索引
last_index = header_split.index('rel=\"last\"')
# 获取last的url链接
num = header_split[last_index - 1]
# 获取last的url中的页码
page_num = int(re.search('&page=(\d+)', num).group(1))
except BaseException as e:
print(url)
if not page_num:
page_num = 1
return page_num
def toFile(path, msg):
with open(path, "w+", encoding='utf-8') as f:
f.write(msg)
def get_info(url, headers, page_num, date):
user_dict = {}
for page in range(page_num):
page += 1
page_url = url + '&page=' + str(page)
res = requests.get(page_url, headers=headers, stream=True).json()
for info in res:
if 'merged_at' in info.keys() and info['merged_at']:
mergeTime = BJtime(info['merged_at'])
if mergeTime == date:
user_info = getPersonnel(info['user']['login'])
if user_info:
user = user_info[0]
email = user_info[1]
team = user_info[2]
pr_num = info['number']
pr_url = info['url']
pr_res = requests.get(pr_url, headers=headers).json()
if email not in user_dict.keys():
user_dict[email] = [
user, email, team, 1, pr_res['additions'],
pr_res['deletions']
]
else:
user_dict[email][3] += 1
user_dict[email][4] += pr_res['additions']
user_dict[email][5] += pr_res['deletions']
print(user_dict)
df = pd.DataFrame(
user_dict.values(),
columns=['name', 'email', 'team', 'PR数量', 'additions', 'deletions'])
file_path = pd.ExcelWriter('./%s_contribution.xlsx' % date)
df.fillna(' ', inplace=True)
df.to_excel(file_path, encoding='utf-8', index=False, sheet_name="个人贡献量统计")
file_path.save()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--date', help='年-月', default='2021-07')
args = parser.parse_args()
url = 'https://api.github.com/repos/PaddlePaddle/Paddle/pulls?state=closed&per_page=100'
headers = {
'User-Agent': 'Mozilla/5.0',
'Authorization': 'token i',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
page_num = get_page(url, headers)
res = get_info(url, headers, page_num, args.date)
```
#### File: Paddle-bot/webservice/__main__.py
```python
import os
import aiohttp
from gidgethub.aiohttp import GitHubAPI
from aiohttp import web
from gidgethub import routing, sansio
from gidgethub import aiohttp as gh_aiohttp
from utils.auth import get_jwt, get_installation, get_installation_access_token
import event
import json
routes = web.RouteTableDef()
router = routing.Router(event.router)
@routes.post("/")
async def main(request):
body = await request.read()
user = json.loads(body.decode('utf8'))['repository']['owner']['login']
repo = json.loads(body.decode('utf8'))['repository']['full_name']
secret = os.environ.get("GH_SECRET")
event = sansio.Event.from_http(request.headers, body, secret=secret)
async with aiohttp.ClientSession() as session:
app_id = os.getenv("GH_APP_ID")
jwt = get_jwt(app_id)
gh = gh_aiohttp.GitHubAPI(session, user)
try:
installation = await get_installation(gh, jwt, user)
except ValueError as ve:
print(ve)
else:
access_token = await get_installation_access_token(
gh, jwt=jwt, installation_id=installation["id"])
# treat access_token as if a personal access token
gh = gh_aiohttp.GitHubAPI(
session, user, oauth_token=access_token["token"])
await router.dispatch(event, gh, repo)
return web.Response(status=200)
if __name__ == "__main__":
app = web.Application()
app.add_routes(routes)
port = os.environ.get("PORT")
if port is not None:
port = int(port)
web.run_app(app, port=port)
```
#### File: webservice/monitor/autoMarkTimeOutPR.py
```python
import os
import aiohttp
import asyncio
import json
import time
import datetime
import logging
import gidgethub
import requests
from gidgethub import aiohttp as gh_aiohttp
import sys
import pandas as pd
sys.path.append("..")
from utils.auth import get_jwt, get_installation, get_installation_access_token
from utils.test_auth_ipipe import xlyOpenApiRequest
from utils.readConfig import ReadConfig
logging.basicConfig(
level=logging.INFO,
filename='../logs/regularMark.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
localConfig = ReadConfig(path='../conf/config.ini')
class MarkTimeoutCI(object):
def __init__(self, user, repo, gh):
self.pr_url = 'https://api.github.com/repos/%s/%s/pulls?per_page=100&page=1&q=addClass' % (
user, repo)
self.gh = gh
self.user = user
self.repo = repo
self.mark_url = 'https://xly.bce.baidu.com/open-api/ipipe/rest/v1/job-builds/{}/mark'
self.rerun_url = 'http://www.cipaddlepaddle.cn:8081/%s/%s/{}/{}' % (
user, repo)
self.comment_url = 'https://api.github.com/repos/%s/%s/issues/{}/comments' % (
user, repo)
def getNextUrl(self, link):
"""遍历所有的PR"""
next_str = None
for i in link.split(','):
if 'rel="next"' in i:
next_str = i
break
if next_str != None:
start_index = next_str.index('<')
end_index = next_str.index('>')
url = next_str[start_index + 1:end_index]
else:
url = None
return url
async def getBeforeSevenDaysPRList(self):
"""
1. 获取距离现在7天-30天创建的PR列表:只获取,不做处理
2. 30天之前的暂不处理: 默认认为GitHub已经设它们为code conflicts. 如有需要,后续在处理。
return : [{PR, commit, status_url}]
"""
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
month_Days_ago = str(today - datetime.timedelta(days=30))
overduelist = []
while (self.pr_url != None):
(code, header, body) = await self.gh._request(
"GET", self.pr_url,
{'accept': 'application/vnd.github.antiope-preview+json'})
res = json.loads(body.decode('utf8'))
for item in res:
if item['created_at'] < seven_Days_ago and item[
'created_at'] > month_Days_ago:
item_dic = {}
item_dic['PR'] = item['number']
item_dic['commit'] = item['head']['sha']
item_dic['status_url'] = item['statuses_url']
overduelist.append(item_dic)
self.pr_url = self.getNextUrl(header['link'])
print("before %s's PRs: %s" % (seven_Days_ago, overduelist))
logger.info("before %s's PRs: %s" % (seven_Days_ago, overduelist))
return overduelist
async def getCIstatus(self):
"""
获取符合条件的PR的CI列表:
1. 获取PR最新的commit url
2. 获取1的commit的最近的CI(去除一些GitHub的脏数据(eg. pending状态的))
3. 判断最近的CI是否是7天之前的,只要有一条CI是7天之前的就需要标记
4. 只标记成功的CI为失败
"""
PRList = await self.getBeforeSevenDaysPRList()
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
CI_STATUS_LIST = []
for item in PRList:
commit_ci_status = {}
commit_ci_status['PR'] = item['PR']
commit_ci_status['commit'] = item['commit']
status_url = item['status_url']
res = requests.get(status_url,
headers={'authorization': "token xxx"},
timeout=15).json()
commit_ci_status['CI'] = []
if_before_seven_day = [] #标记是否所有的CI都是7天之前的
for ci in res:
already_exit = False
if ci['context'] != 'license/cla':
for i in commit_ci_status['CI']:
if ci['context'] == i['ciName'] and i['time'] > ci[
'created_at']: #删除一些脏数据 github api
already_exit = True
break
if already_exit == False:
item_dic = {}
item_dic['time'] = ci['created_at']
item_dic['ciName'] = ci['context']
item_dic['status'] = ci['state']
item_dic['markId'] = ci['target_url'].split('/')[-1]
commit_ci_status['CI'].append(item_dic)
if item_dic['time'] > seven_Days_ago: #最新的一次CI不是7天之前的
if_before_seven_day.append(False)
else:
if_before_seven_day.append(True) #True 是7天之前的
if True in if_before_seven_day: #只要有一个CI是七天之前的就必须标记
print('%s is 7 ago..........' % item['PR'])
CI_STATUS_LIST.append(commit_ci_status)
else:
print('%s not 7 ago' % item['PR'])
logger.info("need to mark ci list: %s" % CI_STATUS_LIST)
return CI_STATUS_LIST
async def markCIFailed(self):
"""
mark success/pending ci to failed
"""
CIStatusList = await self.getCIstatus()
REQUIRED_CI = localConfig.cf.get('%s/%s' % (self.user, self.repo),
'REQUIRED_CI')
DATA = {"data": "FAIL", "message": "Paddle-bot", "type": "MARK"}
json_str = json.dumps(DATA)
headers = {
"Content-Type": "application/json",
"IPIPE-UID": "Paddle-bot"
}
for item in CIStatusList:
PR = item['PR']
commit = item['commit']
ci_list = item['CI']
mark_ci_list = []
for ci in ci_list:
if ci['ciName'] in REQUIRED_CI and ci[
'status'] in ['success', 'pending']:
markId = ci['markId']
mark_url = self.mark_url.format(markId)
res = xlyOpenApiRequest().post_method(
mark_url, json_str, headers=headers)
if res.status_code == 200 or res.status_code == 201:
mark_ci_list.append(ci['ciName'])
print('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
logger.info('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
else:
print('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
logger.error('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
if len(mark_ci_list) > 0:
marked = self.queryIfHasMark(PR, commit)
if marked == False:
self.inform(item)
else:
print('%s_%s has marked!!!!' % (PR, commit))
logger.info('%s_%s has marked!!!!' % (PR, commit))
data = {
'TIME': time.strftime("%Y%m%d %H:%M:%S", time.localtime()),
'PR': PR,
'COMMITID': commit,
'CINAME': mark_ci_list
}
self.save_markci_job(data)
def queryIfHasMark(self, PR, commitid):
"""marked 是否已经标记过"""
marked = True
df = pd.read_csv('../buildLog/mark_timeout_ci.csv')
queryKey = df[(df['PR'] == PR) & (df['COMMITID'] == commitid)]
if queryKey.empty:
marked = False
return marked
def create_markci_csv(self, filename):
"""创建存储文件"""
df = pd.DataFrame(columns=['TIME', 'PR', 'COMMITID', 'CINAME'])
df.to_csv(filename)
def save_markci_job(self, data):
"""将kill的任务存到"""
filename = '../buildLog/mark_timeout_ci.csv'
if os.path.exists(filename) == False:
self.create_markci_csv(filename)
write_data = pd.DataFrame(data)
write_data.to_csv(filename, mode='a', header=False)
async def inform(self, item):
"""Paddle-bot发出评论"""
#POST /repos/:owner/:repo/issues/:issue_number/comments
rerun_ci_link = self.rerun_url.format(item['PR'], item['commit'])
comment_url = self.comment_url.format(item['PR'])
shortId = item['commit'][0:7]
message = "Sorry to inform you that %s's CIs have passed for more than 7 days. To prevent PR conflicts, you need to re-run all CIs manually. " % shortId
await self.gh.post(comment_url, data={"body": message})
async def main(user, repo):
async with aiohttp.ClientSession() as session:
app_id = os.getenv("GH_APP_ID")
jwt = get_jwt(app_id)
gh = gh_aiohttp.GitHubAPI(session, user)
try:
installation = await get_installation(gh, jwt, user)
except ValueError as ve:
print(ve)
else:
access_token = await get_installation_access_token(
gh, jwt=jwt, installation_id=installation["id"])
# treat access_token as if a personal access token
gh = gh_aiohttp.GitHubAPI(
session, user, oauth_token=access_token["token"])
markCIObject = MarkTimeoutCI(user, repo, gh)
await markCIObject.markCIFailed()
loop = asyncio.get_event_loop()
loop.run_until_complete(main('PaddlePaddle', 'Paddle'))
```
#### File: webservice/monitor/exceptionQueueCIMonitor.py
```python
import json
import time
import sys
sys.path.append("..")
from utils.resource import Resource
from utils.mail import Mail
class ExceptionWaitingJob():
"""异常排队作业"""
def __init__(self):
self.required_labels = [
'nTeslaV100-16', 'nTeslaP4', 'Paddle-mac', 'Paddle-mac-py3',
'Paddle-windows', 'Paddle-windows-cpu', 'Paddle-approval-cpu',
'Paddle-benchmark-P40', 'Paddle-Kunlun', 'Paddle-musl'
]
self.__resource = self.getEachResource()
self.__longest_waiting_default = 30
def getEachResourceDict(self):
ResourceDict = {}
for label in self.required_labels:
if label not in ['nTeslaV100-16', 'nTeslaP4']:
ResourceDict[label] = self.__resource[label]
ResourceDict['nTeslaV100-16'] = 17
ResourceDict['nTeslaP4'] = 5
return ResourceDict
def classifyTaskByCardType(self, task_list, cardType):
"""
this function will classify container tasks. eg nTeslaV100, nTeslaP4
Args:
container_task_list(list):
cardType(str): gpu card type.
Returns:
cardType_task_list: .
"""
print("cardType: %s" % cardType)
task_list_by_card = []
for task in task_list:
if task['label'] == cardType:
task_list_by_card.append(task)
print(task_list_by_card)
return len(task_list_by_card)
def getRunningJobSize(self):
"""
this function will get size of running job list in different types.
"""
running_job_size = {}
xly_container_running_task_list = self.getJobList('running')
sa_container_running_task_list = self.getJobList('sarunning')
all_running_task = xly_container_running_task_list + sa_container_running_task_list
for label in self.required_labels:
running_job_size[label] = self.classifyTaskByCardType(
all_running_task, label)
print(running_job_size)
return running_job_size
def getExceptionWaitingJob(self):
"""
this function will get Exception WaitingJob.
"""
running_job_size = self.getRunningJobSize()
ResourceDict = self.getEachResourceDict()
with open("../buildLog/wait_task.json", 'r') as load_f:
all_waiting_task = json.load(load_f)
load_f.close()
mailContent = ''
for task in all_waiting_task:
if task['waiting'] > self.__longest_waiting_default:
for label in self.required_labels:
if task['cardType'] == label:
real_use_count = running_job_size[label]
resource_count = ResourceDict[label]
isAbnormal = self.getIsAbnormal(resource_count,
real_use_count)
if isAbnormal == True:
mailContent += "<tr align=center><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" % (
task['PR'], task['CIName'], task['waiting'],
task['cardType'], real_use_count,
resource_count, task['repoName'])
return mailContent
def exactExceptionAlarm(self):
count = 1
mailContent = self.getExceptionWaitingJob()
mailBeginning = "<html><body><p>Hi, ALL:</p> <p>以下任务已等待超过60min, 且对应的资源并不是全在使用, 请及时查看.</p><table border='1' align=center> <caption><font size='3'><b>等待超过60min的任务列表</b></font></caption><tr align=center><td bgcolor='#d0d0d0'>PR</td><td bgcolor='#d0d0d0'>CIName</td><td bgcolor='#d0d0d0'>已等待时间/min</td><td bgcolor='#d0d0d0'>使用资源</td><td bgcolor='#d0d0d0'>实际使用资源个数/个</td><td bgcolor='#d0d0d0'>资源全量/个</td><td bgcolor='#d0d0d0'>repo</td></tr>"
while count < 4 and mailContent != '':
print("count: %s" % count)
print("mailContent: %s" % mailContent)
time.sleep(60)
mailContent = self.getExceptionWaitingJob()
count += 1 #最多请求3次
if mailContent != '':
mailDetails = mailBeginning + mailContent + '</body></html>'
self.sendMail(mailDetails)
else:
print("资源正常!")
def getIsAbnormal(self, default_count, running_count):
"""
this function will get the WaitingJob is ifAbnormal.
Returns:
isAbnormal(bool): True/False
"""
isAbnormal = False
ratio = (default_count - running_count) / default_count
print('ratio: %s' % ratio)
if ratio > 0.25:
isAbnormal = True
return isAbnormal
def sendMail(self, mailContent):
"""
this function will send alarm mail.
"""
mail = Mail()
mail.set_sender('<EMAIL>')
mail.set_receivers(['<EMAIL>'])
mail.set_title('[告警]任务等待超时')
mail.set_message(mailContent, messageType='html', encoding='gb2312')
mail.send()
ExceptionWaitingJob().exactExceptionAlarm()
```
#### File: webservice/utils/db.py
```python
from influxdb import InfluxDBClient
import time
import os
INFLUXDB_IP = os.getenv("INFLUXDB_IP")
INFLUXDB_PORT = 8086
INFLUXDB_DATABASE = os.getenv("INFLUXDB_DATABASE")
class Database:
"""Database"""
def __init__(self):
self._client = InfluxDBClient(
host=INFLUXDB_IP,
port=8086,
username='xxx',
password='<PASSWORD>',
database=INFLUXDB_DATABASE)
def insert(self, table, index_dict):
data_points = [{"measurement": table, "fields": index_dict}]
result = self._client.write_points(data_points)
return result
def query(self, query_stat):
result = self._client.query(query_stat)
return result
``` |
{
"source": "jiangxkjohn/coding_tutorial",
"score": 3
} |
#### File: coding_tutorial/leetcode/LengthOfLongestSubstring.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
len_map = [[0 for _ in range(len(s)+1)] for _ in range(len(s)+1)]
for i in range(len(s)+1):
len_map[i][i] = 0
if i < len(s):
len_map[i][i+1] = 1
max_value = 1
for i in range(2, len(s)+1):
for j in range(len(s)-i+1):
if len_map[j-1][i+j] == len_map[j][i+j-1] and len_map[j][i+j-1] > len_map[j-1][i+j-1]:
print("here")
if s[j] == s[i+j-1]:
len_map[j][i+j] = len_map[j-1][i+j]
else:
len_map[j][i+j] = len_map[j-1][i+j] + 1
else:
len_map[j][i+j] = max(len_map[j-1][i+j], len_map[j][i+j-1])
max_value = max(max_value, len_map[j][i+j])
print(len_map)
return max_value
if __name__ == "__main__":
s = Solution()
s.lengthOfLongestSubstring("asdf")
``` |
{
"source": "jiangxuda/tristan-and-isolde",
"score": 3
} |
#### File: jiangxuda/tristan-and-isolde/heart.py
```python
import scipy
import matplotlib.pyplot as plt
def plot_heart():
fig = plt.figure()
x = scipy.linspace(-2, 2, 1000)
y1 = scipy.sqrt(1 - (abs(x) - 1) ** 2)
y2 = -3 * scipy.sqrt(1 - (abs(x) / 2) ** 0.5)
plt.fill_between(x, y1, color='yellow')
plt.fill_between(x, y2, color='yellow')
plt.xlim([-2.5, 2.5])
plt.text(
0,
-0.4,
'BVB',
fontsize=40,
fontweight='bold',
color='white',
horizontalalignment='center'
)
return fig
if __name__ == '__main__':
heart_fig = plot_heart()
heart_fig.show()
plt.savefig('heart.pdf')
``` |
{
"source": "Jiangxuejian/spectral-cube",
"score": 3
} |
#### File: spectral_cube/io/casa_dask.py
```python
from __future__ import print_function, absolute_import, division
import os
from math import ceil, floor
import uuid
import numpy as np
import dask.array
from .casa_low_level_io import getdminfo
__all__ = ['casa_image_dask_reader']
class CASAArrayWrapper:
"""
A wrapper class for dask that accesses chunks from a CASA file on request.
It is assumed that this wrapper will be used to construct a dask array that
has chunks aligned with the CASA file chunks.
Having a single wrapper object such as this is far more efficient than
having one array wrapper per chunk. This is because the dask graph gets
very large if we end up with one dask array per chunk and slows everything
down.
"""
def __init__(self, filename, totalshape, chunkshape, dtype=None, itemsize=None, memmap=False):
self._filename = filename
self._totalshape = totalshape[::-1]
self._chunkshape = chunkshape[::-1]
self.shape = totalshape[::-1]
self.dtype = dtype
self.ndim = len(self.shape)
self._stacks = np.ceil(np.array(totalshape) / np.array(chunkshape)).astype(int)
self._chunksize = np.product(chunkshape)
self._itemsize = itemsize
self._memmap = memmap
if not memmap:
if self._itemsize == 1:
self._array = np.unpackbits(np.fromfile(filename, dtype='uint8'), bitorder='little').astype(np.bool_)
else:
self._array = np.fromfile(filename, dtype=dtype)
def __getitem__(self, item):
# TODO: potentially normalize item, for now assume it is a list of slice objects
indices = []
for dim in range(self.ndim):
if isinstance(item[dim], slice):
indices.append(item[dim].start // self._chunkshape[dim])
else:
indices.append(item[dim] // self._chunkshape[dim])
chunk_number = indices[0]
for dim in range(1, self.ndim):
chunk_number = chunk_number * self._stacks[::-1][dim] + indices[dim]
offset = chunk_number * self._chunksize * self._itemsize
item_in_chunk = []
for dim in range(self.ndim):
if isinstance(item[dim], slice):
item_in_chunk.append(slice(item[dim].start - indices[dim] * self._chunkshape[dim],
item[dim].stop - indices[dim] * self._chunkshape[dim],
item[dim].step))
else:
item_in_chunk.append(item[dim] - indices[dim] * self._chunkshape[dim])
item_in_chunk = tuple(item_in_chunk)
if self._itemsize == 1:
if self._memmap:
offset = offset // self._chunksize * ceil(self._chunksize / 8) * 8
start = floor(offset / 8)
end = ceil((offset + self._chunksize) / 8)
array_uint8 = np.fromfile(self._filename, dtype=np.uint8,
offset=start, count=end - start)
array_bits = np.unpackbits(array_uint8, bitorder='little')
chunk = array_bits[offset - start * 8:offset + self._chunksize - start * 8]
return chunk.reshape(self._chunkshape[::-1], order='F').T[item_in_chunk].astype(np.bool_)
else:
ceil_chunksize = int(ceil(self._chunksize / 8)) * 8
return (self._array[chunk_number*ceil_chunksize:(chunk_number+1)*ceil_chunksize][:self._chunksize]
.reshape(self._chunkshape[::-1], order='F').T[item_in_chunk])
else:
if self._memmap:
return np.fromfile(self._filename, dtype=self.dtype,
offset=offset,
count=self._chunksize).reshape(self._chunkshape[::-1], order='F').T[item_in_chunk]
else:
return (self._array[chunk_number*self._chunksize:(chunk_number+1)*self._chunksize]
.reshape(self._chunkshape[::-1], order='F').T[item_in_chunk])
def from_array_fast(arrays, asarray=False, lock=False):
"""
This is a more efficient alternative to doing::
[dask.array.from_array(array) for array in arrays]
that avoids a lot of the overhead in from_array by using the Array
initializer directly.
"""
slices = tuple(slice(0, size) for size in arrays[0].shape)
chunk = tuple((size,) for size in arrays[0].shape)
meta = np.zeros((0,), dtype=arrays[0].dtype)
dask_arrays = []
for array in arrays:
name1 = str(uuid.uuid4())
name2 = str(uuid.uuid4())
dsk = {(name1,) + (0,) * array.ndim: (dask.array.core.getter, name2,
slices, asarray, lock),
name2: array}
dask_arrays.append(dask.array.Array(dsk, name1, chunk, meta=meta, dtype=array.dtype))
return dask_arrays
def casa_image_dask_reader(imagename, memmap=True, mask=False):
"""
Read a CASA image (a folder containing a ``table.f0_TSM0`` file) into a
numpy array.
"""
# the data is stored in the following binary file
# each of the chunks is stored on disk in fortran-order
if mask:
if mask is True:
mask = 'mask0'
imagename = os.path.join(str(imagename), mask)
if not os.path.exists(imagename):
raise FileNotFoundError(imagename)
# the data is stored in the following binary file
# each of the chunks is stored on disk in fortran-order
img_fn = os.path.join(str(imagename), 'table.f0_TSM0')
# load the metadata from the image table. Note that this uses our own
# implementation of getdminfo, which is equivalent to
# from casatools import table
# tb = table()
# tb.open(str(imagename))
# dminfo = tb.getdminfo()
# tb.close()
dminfo = getdminfo(str(imagename))
# Determine whether file is big endian
big_endian = dminfo['*1']['BIGENDIAN']
# chunkshape defines how the chunks (array subsets) are written to disk
chunkshape = tuple(dminfo['*1']['SPEC']['DEFAULTTILESHAPE'])
chunksize = np.product(chunkshape)
# the total shape defines the final output array shape
totalshape = dminfo['*1']['SPEC']['HYPERCUBES']['*1']['CubeShape']
# we expect that the total size of the array will be determined by finding
# the number of chunks along each dimension rounded up
totalsize = np.product(np.ceil(totalshape / chunkshape)) * chunksize
# the file size helps us figure out what the dtype of the array is
filesize = os.stat(img_fn).st_size
# the ratio between these tells you how many chunks must be combined
# to create a final stack
stacks = np.ceil(totalshape / chunkshape).astype(int)
nchunks = int(np.product(stacks))
# check that the file size is as expected and determine the data dtype
if mask:
expected = nchunks * ceil(chunksize / 8)
if filesize != expected:
raise ValueError("Unexpected file size for mask, found {0} but "
"expected {1}".format(filesize, expected))
dtype = bool
itemsize = 1
else:
if filesize == totalsize * 4:
if big_endian:
dtype = '>f4'
else:
dtype = '<f4'
itemsize = 4
elif filesize == totalsize * 8:
if big_endian:
dtype = '>f8'
else:
dtype = '<f8'
itemsize = 8
else:
raise ValueError("Unexpected file size for data, found {0} but "
"expected {1} or {2}".format(filesize, totalsize * 4, totalsize * 8))
# CASA does not like numpy ints!
chunkshape = tuple(int(x) for x in chunkshape)
totalshape = tuple(int(x) for x in totalshape)
# Create a wrapper that takes slices and returns the appropriate CASA data
wrapper = CASAArrayWrapper(img_fn, totalshape, chunkshape, dtype=dtype, itemsize=itemsize, memmap=memmap)
# Convert to a dask array
dask_array = dask.array.from_array(wrapper, name='CASA Data ' + str(uuid.uuid4()), chunks=chunkshape[::-1])
# Since the chunks may not divide the array exactly, all the chunks put
# together may be larger than the array, so we need to get rid of any
# extraneous padding.
final_slice = tuple([slice(dim) for dim in totalshape[::-1]])
return dask_array[final_slice]
```
#### File: io/tests/test_casa_wcs.py
```python
from __future__ import print_function, absolute_import, division
import tempfile
import pytest
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits
from numpy.testing import assert_allclose
from ..casa_low_level_io import getdesc
from ..casa_wcs import wcs_casa2astropy
from ...tests.test_casafuncs import make_casa_testimage
from .test_casa_low_level_io import ALL_DATA_FIXTURES
from ...conftest import HEADER_FILENAME
try:
from casatools import image
CASATOOLS_INSTALLED = True
except ImportError:
CASATOOLS_INSTALLED = False
@pytest.fixture
def filename(request):
return request.getfixturevalue(request.param)
def assert_header_correct(casa_filename):
fits_filename = tempfile.mktemp()
# Use CASA to convert back to FITS and use that header as the reference
ia = image()
ia.open(casa_filename)
ia.tofits(fits_filename, stokeslast=False)
ia.done()
ia.close()
# Parse header with WCS - for the purposes of this function
# we are not interested in keywords/values not in WCS
reference_wcs = WCS(fits_filename)
reference_header = reference_wcs.to_header()
# Now use our wcs_casa2astropy function to create the header and compare
# the results.
desc = getdesc(casa_filename)
actual_wcs = wcs_casa2astropy(desc['_keywords_']['coords'])
actual_header = actual_wcs.to_header()
assert sorted(actual_header) == sorted(reference_header)
for key in reference_header:
if isinstance(actual_header[key], str):
assert actual_header[key] == reference_header[key]
else:
assert_allclose(actual_header[key], reference_header[key])
@pytest.mark.skipif('not CASATOOLS_INSTALLED')
@pytest.mark.parametrize('filename', ALL_DATA_FIXTURES, indirect=['filename'])
def test_wcs_casa2astropy(tmp_path, filename):
casa_filename = str(tmp_path / 'casa.image')
make_casa_testimage(filename, casa_filename)
assert_header_correct(casa_filename)
@pytest.mark.skipif('not CASATOOLS_INSTALLED')
def test_wcs_casa2astropy_linear(tmp_path):
# Test that things work properly when the WCS coordinates aren't set
casa_filename = str(tmp_path / 'test.image')
data = np.random.random((3, 4, 5, 6, 7))
ia = image()
ia.fromarray(outfile=casa_filename, pixels=data, log=False)
ia.close()
assert_header_correct(casa_filename)
def header_copy_with(**kwargs):
header = fits.Header.fromtextfile(HEADER_FILENAME).copy()
header.update(kwargs)
return header
ALL_HEADERS = [
header_copy_with(),
header_copy_with(CTYPE1='GLON-TAN', CTYPE2='GLAT-TAN'),
header_copy_with(CTYPE1='SLON-TAN', CTYPE2='SLAT-TAN'),
header_copy_with(CTYPE1='ELON-TAN', CTYPE2='ELAT-TAN'),
header_copy_with(CTYPE1='HLON-TAN', CTYPE2='HLAT-TAN'),
header_copy_with(SPECSYS=''),
header_copy_with(SPECSYS='TOPOCENT'),
header_copy_with(SPECSYS='GEOCENTR'),
header_copy_with(SPECSYS='BARYCENT'),
header_copy_with(SPECSYS='HELIOCEN'),
header_copy_with(SPECSYS='LSRK'),
header_copy_with(SPECSYS='LSRD'),
header_copy_with(SPECSYS='GALACTOC'),
header_copy_with(SPECSYS='LOCALGRP'),
header_copy_with(SPECSYS='CMBDIPOL'),
header_copy_with(SPECSYS='SOURCE'),
header_copy_with(RADESYS='FK4'),
header_copy_with(RADESYS='FK4-NO-E'),
header_copy_with(RADESYS='FK5'),
header_copy_with(RADESYS='ICRS'),
header_copy_with(EQUINOX=1950.),
header_copy_with(EQUINOX=1979.9),
header_copy_with(EQUINOX=2000),
header_copy_with(EQUINOX=2010),
header_copy_with(CTYPE3='FREQ', CUNIT3='GHz', CRVAL3=100., CDELT3=1.),
header_copy_with(CTYPE3='WAVE', CUNIT3='m', CRVAL3=1e-6, CDELT3=1e-8),
header_copy_with(CTYPE3='VOPT'),
header_copy_with(CTYPE3='VRAD')
]
@pytest.mark.skipif('not CASATOOLS_INSTALLED')
@pytest.mark.parametrize('header', ALL_HEADERS)
def test_wcs_casa2astropy_additional(tmp_path, header):
# More cases to improve coverage
casa_filename = str(tmp_path / 'casa.image')
fits_filename = str(tmp_path / 'casa.fits')
fits.writeto(fits_filename, np.ones((2, 3, 4, 5)), header)
make_casa_testimage(fits_filename, casa_filename)
assert_header_correct(casa_filename)
``` |
{
"source": "jiangxuewen16/hq-crawler",
"score": 2
} |
#### File: api/model/spot.py
```python
import datetime
import json
import re
import queue
from django.utils.autoreload import logger
from apps.api.common.helper.helper import getDayList
from spiders.common import OTA
from spiders.items.association import douyin
from spiders.items.spot import spot
spot_queue = queue.Queue()
spot_queue_list = queue.Queue()
def get_yesterday():
today = datetime.date.today()
one_day = datetime.timedelta(days=1)
yesterday = today - one_day
return yesterday
def get_three_type(spot_city_s):
L = []
for p in spot_city_s:
L.append(dict(p))
count_down = 0
count_up = 0
for m in L:
score = m['_id']['c_score']
count = m['count']
if not score:
count_down = count + count_down
else:
count_up = count + count_up
K = {'total': count_down + count_up, 'count_up': count_up, 'count_down': count_down}
return K
# class SpotModel(Spot):
# pass
class SpotComment:
@classmethod
def today_total_comment(cls):
pipeline = [
{
'$match': {
'create_at': {
'$gte': str(datetime.date.today())
# '$lt': '2018-12-05'
}
}
},
{'$group':
{'_id': {'c_score': {'$gt': ['$c_score', 3]}},
'count': {'$sum': 1}
}
}]
spot_city_s = spot.SpotComment.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
# return get_three_type(spot_city_s)
@classmethod
def yesterday_total_comment(cls):
pipeline = [
{
'$match': {
'create_at': {
'$lt': str(datetime.date.today()),
'$gte': str(get_yesterday())
}
}
},
{'$group':
{'_id': {'c_score': {'$gt': ['$c_score', 3]}},
'count': {'$sum': 1}
}
}]
spot_city_s = spot.SpotComment.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
# return get_three_type(spot_city_s)
@classmethod
def ota_list_comment(cls, condition, skip, limit, sort):
pipeline = [
{
'$lookup': {
'from': "spot",
'localField': "ota_spot_id",
'foreignField': "ota_spot_id",
'as': "spot"
}
},
{
'$unwind': {
'path': "$spot",
'preserveNullAndEmptyArrays': True
}
},
{
'$project': {
'_id': "$_id",
'ota_id': "$ota_id",
'ota_spot_id': "$ota_spot_id",
'u_id': "$u_id",
'u_avatar': "$u_avatar",
'u_level': "$u_level",
'u_name': "$u_name",
'c_tag': "$c_tag",
'c_id': "$c_id",
'c_score': "$c_score",
'c_content': "$c_content",
'c_img': "$c_img",
'c_from': "$c_from",
'create_at': "$create_at",
'update_at': "$update_at",
'spot_name': "$spot.spot_name"
}
},
{
'$sort': {sort: -1}
},
{
'$match': {
'$and': [
{'$or': [
{'u_name': {'$regex': '.*' + condition['check_name'] + '.*'}},
{'_id': {'$regex': '.*' + condition['check_name'] + '.*'}},
]},
{
'create_at': {
'$gte': condition['begin_date'],
'$lt': condition['end_date']
}
},
{
'c_score': {
'$gt': condition['down_score'],
'$lte': condition['up_score']
}
},
{
'spot_name': condition['spot_name']
},
{
'ota_id': {'$in': condition['ota_id']}
}
]
}
},
{
'$skip': skip
},
{
'$limit': limit
}
]
spot_city_s = spot.SpotComment.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
p['_id'] = str(p['_id'])
L.append(dict(p))
return L
@classmethod
def list_comment(cls, condition, skip, limit, sort):
pipeline = [
{
'$lookup': {
'from': "spot",
'localField': "ota_spot_id",
'foreignField': "ota_spot_id",
'as': "spot"
}
},
{
'$unwind': {
'path': "$spot",
'preserveNullAndEmptyArrays': True
}
},
{
'$project': {
'_id': "$_id",
'ota_id': "$ota_id",
'ota_spot_id': "$ota_spot_id",
'u_id': "$u_id",
'u_avatar': "$u_avatar",
'u_level': "$u_level",
'u_name': "$u_name",
'c_tag': "$c_tag",
'c_id': "$c_id",
'c_score': "$c_score",
'c_content': "$c_content",
'c_img': "$c_img",
'c_from': "$c_from",
'create_at': "$create_at",
'update_at': "$update_at",
'spot_name': "$spot.spot_name"
}
},
{
'$sort': {sort: -1}
},
{
'$match': {
'$and': [
{'$or': [
{'u_name': {'$regex': '.*' + condition['check_name'] + '.*'}},
{'_id': {'$regex': '.*' + condition['check_name'] + '.*'}},
]},
{
'create_at': {
'$gte': condition['begin_date'],
'$lt': condition['end_date']
}
},
{
'c_score': {
'$gt': condition['down_score'],
'$lte': condition['up_score']
}
},
{
'ota_id': {'$in': condition['ota_id']}
}
]
}
},
{
'$skip': skip
},
{
'$limit': limit
}
]
spot_city_s = spot.SpotComment.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
p['_id'] = str(p['_id'])
L.append(dict(p))
return L
@classmethod
def total_comment(cls, condition):
pipeline = [
{
'$match': {
'$and': [
{'$or': [
{'u_name': {'$regex': '.*' + condition['check_name'] + '.*'}},
{'_id': {'$regex': '.*' + condition['check_name'] + '.*'}},
]},
{
'create_at': {
'$gte': condition['begin_date'],
'$lt': condition['end_date']
}
},
{
'c_score': {
'$gt': condition['down_score'],
'$lte': condition['up_score']
}
},
# {
# 'ota_id': condition['ota_id']
# }
{
'ota_id': {'$in': condition['ota_id']}
}
]
}
},
{
'$count': "count"
}
]
count = spot.SpotComment.objects().aggregate(*pipeline)
L = []
for p in count:
L.append(dict(p))
if len(L) < 1:
return 0
else:
return L[0]['count']
class Spot:
@classmethod
def list_spot_array(cls):
pipeline = [
{
'$project': {
'_id': 0,
'ota_spot_id': 1,
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p)['ota_spot_id'])
return L
@classmethod
def list_spot_select(cls):
pipeline = [
{
'$project': {
'_id': 0,
'spot_name': 1,
'ota_spot_id': 1,
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
@classmethod
def today_spot_comment(cls):
pipeline = [
{
'$lookup': {
'from': 'spot_comment',
'localField': "ota_spot_id",
'foreignField': "ota_spot_id",
'as': "spot_comments"
}
},
{
'$unwind': {
'path': "$spot_comments",
'preserveNullAndEmptyArrays': True
}
},
{
'$project': {
'_id': 0,
'ota_spot_id': '$spot_comments.ota_spot_id',
'c_score': '$spot_comments.c_score',
'score_true': {'$cond': [{'$gt': ['$spot_comments.c_score', 3]}, 1, 0]},
'score_false': {'$cond': [{'$lte': ['$spot_comments.c_score', 3]}, 1, 0]},
'spot_name': '$spot_name',
'create_at': '$spot_comments.create_at'
}
},
{
'$match': {
'create_at': {'$gte': str(datetime.date.today())},
# 'spot_name': {'$ne': None}
}
},
{
'$group': {
'_id': {'spot_name': '$spot_name', 'ota_spot_id': '$ota_spot_id'},
'score_true_total': {'$sum': '$score_true'},
'score_false_total': {'$sum': '$score_false'}
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
@classmethod
def yesterday_spot_comment(cls):
pipeline = [
{
'$lookup': {
'from': 'spot_comment',
'localField': "ota_spot_id",
'foreignField': "ota_spot_id",
'as': "spot_comments"
}
},
{
'$unwind': {
'path': "$spot_comments",
'preserveNullAndEmptyArrays': True
}
},
{
'$project': {
'_id': 0,
'ota_spot_id': '$ota_spot_id',
'c_score': '$spot_comments.c_score',
'score_true': {'$cond': [{'$gt': ['$spot_comments.c_score', 3]}, 1, 0]},
'score_false': {'$cond': [{'$lte': ['$spot_comments.c_score', 3]}, 1, 0]},
'spot_name': '$spot_name',
'create_at': '$spot_comments.create_at'
}
},
{
'$match': {
'create_at': {'$lt': str(datetime.date.today()),
'$gte': str(get_yesterday())},
# 'spot_name': {'$ne': None}
}
},
{
'$group': {
'_id': {'spot_name': '$spot_name', 'ota_spot_id': '$ota_spot_id'},
'score_true_total': {'$sum': '$score_true'},
'score_false_total': {'$sum': '$score_false'}
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
#
@classmethod
def list_spot(cls, s_name, skip, limit, sort):
pipeline = [
{
'$sort': {sort: -1}
},
{
'$match': {
's_name': {'$regex': '.*' + s_name + '.*'}
}
},
{
'$project': {
'_id': 0,
'city_id': 1,
'city_name': 1,
'ota_spot_id': 1,
's_name': 1,
's_addr': 1,
's_level': 1,
's_score': 1,
's_comment_num': 1,
's_sale_num': 1,
's_ticket_num': 1,
's_img': 1,
'year': {'$year': "$create_at"},
'month': {'$month': "$create_at"},
'day': {'$dayOfMonth': "$create_at"},
'create_at': {'$toString': {'$toDate': "$create_at"}},
'update_at': {'$toString': {'$toDate': "$update_at"}}
}
},
{
'$skip': skip
},
{
'$limit': limit
},
]
spot_city_s = spot.SpotCity.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
@classmethod
def total_spot(cls, s_name):
pipeline = [
{
'$match': {
's_name': {'$regex': '.*' + s_name + '.*'}
}
},
{
'$count': "count"
}
]
count = spot.SpotCity.objects().aggregate(*pipeline)
# return dict(count)
L = []
for p in count:
L.append(dict(p))
if len(L) < 1:
return 0
else:
return L[0]['count']
@classmethod
def spot_comment_group(cls):
pipeline = [
{
'$lookup': {
'from': "spot_comment",
'localField': "ota_spot_id",
'foreignField': "ota_spot_id",
'as': "spot_comment"
}
},
{
'$project': {
'_id': 0,
'spot_name': "$spot_name",
'ota_spot_id': "$ota_spot_id",
'c_score': "$spot_comment.c_score",
'comment_create_at': "$spot_comment.create_at",
}
}
,
{
'$match': {
'$and': [
{
'$or': [
{'ota_spot_id': {'$eq': 103113}},
{'ota_spot_id': {'$eq': 100025}},
{'ota_spot_id': {'$eq': 5427075}},
{'ota_spot_id': {'$eq': 339}},
]
},
{
'comment_create_at': {
'$gte': '2017-12-04',
# '$lt': '2019-12-05'
}
}
]
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
# sum(i > 5 for i in j)
for p in spot_city_s:
p['total_up_score'] = sum(int(i) > 3 for i in p['c_score'])
p['total_down_score'] = sum(int(i) <= 3 for i in p['c_score'])
del p['c_score']
L.append(dict(p))
return L
@classmethod
def last_spot_comment(cls):
pipeline = [
{
'$lookup': {
'from': 'spot_comment',
'localField': 'ota_spot_id',
'foreignField': 'ota_spot_id',
'as': 'spot_comments'
}
},
{
'$unwind': {
'path': "$spot_comments",
'preserveNullAndEmptyArrays': True
}
},
{
'$match': {
'spot_name': {'$ne': None}
}
},
{
'$group': {
'_id': {'ota_spot_id': '$ota_spot_id', 'spot_name': '$spot_name', 'ota_id': '$ota_id'},
'c_score': {'$first': "$spot_comments.c_score"},
'create_at': {'$first': "$spot_comments.create_at"}
}
},
{
'$sort': {'create_at': -1}
},
{
'$group': {
'_id': {'ota_spot_id': '$_id.ota_spot_id', 'spot_name': '$_id.spot_name'},
'ota_10001_score': {'$sum': {'$cond': [{'$eq': ['$_id.ota_id', 10001]}, '$c_score', 0]}},
'ota_10002_score': {'$sum': {'$cond': [{'$eq': ['$_id.ota_id', 10002]}, '$c_score', 0]}},
'ota_10003_score': {'$sum': {'$cond': [{'$eq': ['$_id.ota_id', 10003]}, '$c_score', 0]}},
'ota_10004_score': {'$sum': {'$cond': [{'$eq': ['$_id.ota_id', 10004]}, '$c_score', 0]}},
'ota_10005_score': {'$sum': {'$cond': [{'$eq': ['$_id.ota_id', 10005]}, '$c_score', 0]}},
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
@classmethod
def count_comment(cls, condition):
pipeline = [
{
'$lookup': {
'from': "spot_comment",
'localField': "ota_spot_id",
'foreignField': "ota_spot_id",
'as': "spot_comments"
}
},
{
'$unwind': {
'path': "$spot_comments",
'preserveNullAndEmptyArrays': True
}
},
{
'$project': {
'_id': 0,
'ota_id': '$spot_comments.ota_id',
'ota_spot_id': '$ota_spot_id',
'spot_name': '$spot_name',
'c_score': '$spot_comments.c_score',
'score_true': {'$cond': [{'$gt': ['$spot_comments.c_score', 3]}, 1, 0]},
'score_false': {'$cond': [{'$lte': ['$spot_comments.c_score', 3]}, 1, 0]},
'score_total': {'$cond': [{'$lte': ['$spot_comments.c_score', 3]}, 1, 1]},
'create_at': '$spot_comments.create_at'
}
},
{
'$match': {
'$and': [
{
'create_at': {
'$gte': condition['begin_date'],
'$lt': condition['end_date']
}
},
{
'c_score': {
'$gt': condition['down_score'],
'$lte': condition['up_score']
}
},
{
'ota_id': {'$in': condition['ota_id']}
},
{
'ota_spot_id': {'$in': condition['ota_spot_id']}
}
]
}
},
{
'$group': {
'_id': {'spot_name': '$spot_name', 'ota_spot_id': '$ota_spot_id', 'ota_id': '$ota_id'},
'score_true_total': {'$sum': '$score_true'},
'score_false_total': {'$sum': '$score_false'},
'score_total': {'$sum': '$score_total'}
}
}
]
spot_city_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_city_s:
L.append(dict(p))
return L
@classmethod
def all_comment(cls, condition, skip, limit):
pipeline = [
{
"$lookup": {
"from": "spot_comment",
"localField": "ota_spot_id",
"foreignField": "ota_spot_id",
"as": "spot_comment"
}
},
{
"$unwind": {
"path": "$spot_comment",
"preserveNullAndEmptyArrays": True
}
},
{
"$project": {
"ota_spot_id": "$ota_spot_id",
"spot_name": "$spot_name",
"create_at": "$spot_comment.create_at",
"ota_id": "$spot_comment.ota_id",
"c_score": "$spot_comment.c_score"
}
},
{
"$match": {
"$and": [
{
"spot_name": {
"$exists": True
}
},
{
"ota_spot_id": {
"$in": condition['ota_spot_id']
}
},
# {
# "c_score": {
# "$ne": None
# }
#
# }
# {
# "create_at": {
# "$gte": condition['begin_date'] + " 00:00:00"
# }
# },
# {
# "create_at": {
# "$lte": condition['end_date'] + " 23:59:59"
# }
# }
]
}
},
{
"$group": {
"_id": {
"ota_spot_id": "$ota_spot_id",
"spot_name": "$spot_name"
},
"sum_score_ota_id_10000": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10000]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10000": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10000]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10000_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10000]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10000_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10000]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10001": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10001]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10001": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10001]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10001_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10001]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10001_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10001]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10002": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10002]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10002": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10002]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10002_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10002]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10002_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10002]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10003": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10003]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10003": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10003]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10003_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10003]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10003_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10003]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10004": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10004]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10004": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10004]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10004_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10004]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10004_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10004]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10005": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10005]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10005": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10005]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10005_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10005]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10005_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10005]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10006": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10006]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10006": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10006]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10006_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10006]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10006_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10006]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10007": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10007]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10007": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10007]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_ota_id_10007_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10007]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_score_ota_id_10007_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$eq": ["$ota_id", 10007]
}, {
"$lte": ["$create_at", condition['end_date_pre'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date_pre'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"count_start_5": {
"$sum": {
"$cond": [{
"$and": [{
"$gte": ["$c_score", 5]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"count_start_4": {
"$sum": {
"$cond": [{
"$and": [{
"$gte": ["$c_score", 4]
}, {
"$lt": ["$c_score", 5]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"count_start_3": {
"$sum": {
"$cond": [{
"$and": [{
"$gte": ["$c_score", 3]
}, {
"$lt": ["$c_score", 4]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"count_start_2": {
"$sum": {
"$cond": [{
"$and": [{
"$gte": ["$c_score", 2]
}, {
"$lt": ["$c_score", 3]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"count_start_1": {
"$sum": {
"$cond": [{
"$and": [{
"$gte": ["$c_score", 1]
}, {
"$lt": ["$c_score", 2]
}, {
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_comment": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
},
"sum_score_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, "$c_score", 0]
}
},
"count_comment_pre": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['end_date'] + " 23:59:59"]
}, {
"$gte": ["$create_at", condition['begin_date'] + " 00:00:00"]
}]
}, 1, 0]
}
}
}
},
{
"$project": {
"avg_total": {
"$cond": [{
"$eq": ["$count_comment", 0]
}, 0, {
"$divide": ["$sum_score", "$count_comment"]
}]
},
"avg_total_pre": {
"$cond": [{
"$eq": ["$count_comment_pre", 0]
}, 0, {
"$divide": ["$sum_score_pre", "$count_comment_pre"]
}]
},
"avg_10000": {
"$cond": [{
"$eq": ["$count_score_ota_id_10000", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10000", "$count_score_ota_id_10000"]
}]
},
"avg_10000_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10000_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10000_pre", "$count_score_ota_id_10000_pre"]
}]
},
"avg_10001": {
"$cond": [{
"$eq": ["$count_score_ota_id_10001", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10001", "$count_score_ota_id_10001"]
}]
},
"avg_10001_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10001_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10001_pre", "$count_score_ota_id_10001_pre"]
}]
},
"avg_10002": {
"$cond": [{
"$eq": ["$count_score_ota_id_10002", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10002", "$count_score_ota_id_10002"]
}]
},
"avg_10002_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10002_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10002_pre", "$count_score_ota_id_10002_pre"]
}]
},
"avg_10003": {
"$cond": [{
"$eq": ["$count_score_ota_id_10003", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10003", "$count_score_ota_id_10003"]
}]
},
"avg_10003_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10003_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10003_pre", "$count_score_ota_id_10003_pre"]
}]
},
"avg_10004": {
"$cond": [{
"$eq": ["$count_score_ota_id_10004", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10004", "$count_score_ota_id_10004"]
}]
},
"avg_10004_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10004_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10004_pre", "$count_score_ota_id_10004_pre"]
}]
},
"avg_10005": {
"$cond": [{
"$eq": ["$count_score_ota_id_10005", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10005", "$count_score_ota_id_10005"]
}]
},
"avg_10005_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10005_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10005_pre", "$count_score_ota_id_10005_pre"]
}]
},
"avg_10006": {
"$cond": [{
"$eq": ["$count_score_ota_id_10006", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10006", "$count_score_ota_id_10006"]
}]
},
"avg_10006_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10006_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10006_pre", "$count_score_ota_id_10006_pre"]
}]
},
"avg_10007": {
"$cond": [{
"$eq": ["$count_score_ota_id_10007", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10007", "$count_score_ota_id_10007"]
}]
},
"avg_10007_pre": {
"$cond": [{
"$eq": ["$count_score_ota_id_10007_pre", 0]
}, 0, {
"$divide": ["$sum_score_ota_id_10007_pre", "$count_score_ota_id_10007_pre"]
}]
},
"count_start_5": "$count_start_5",
"count_start_4": "$count_start_4",
"count_start_3": "$count_start_3",
"count_start_2": "$count_start_2",
"count_start_1": "$count_start_1",
"count_comment": "$count_comment",
}
},
{
"$project": {
"avg_total": "$avg_total",
"avg_total_percent": {
"$cond": [{
"$eq": ["$avg_total_pre", 0]
}, 0, {
"$subtract": ["$avg_total", "$avg_total_pre"]
}]
},
"avg_10000": "$avg_10000",
"avg_10000_percent": {
"$cond": [{
"$eq": ["$avg_10000_pre", 0]
}, 0, {
"$subtract": ["$avg_10000", "$avg_10000_pre"]
}]
},
"avg_10001": "$avg_10001",
"avg_10001_percent": {
"$cond": [{
"$eq": ["$avg_10001_pre", 0]
}, 0, {
"$subtract": ["$avg_10001", "$avg_10001_pre"]
}]
},
"avg_10002": "$avg_10002",
"avg_10002_percent": {
"$cond": [{
"$eq": ["$avg_10002_pre", 0]
}, 0, {
"$subtract": ["$avg_10002", "$avg_10002_pre"]
}]
},
"avg_10003": "$avg_10003",
"avg_10003_percent": {
"$cond": [{
"$eq": ["$avg_10003_pre", 0]
}, 0, {
"$subtract": ["$avg_10003", "$avg_10003_pre"]
}]
},
"avg_10004": "$avg_10004",
"avg_10004_percent": {
"$cond": [{
"$eq": ["$avg_10004_pre", 0]
}, 0, {
"$subtract": ["$avg_10004", "$avg_10004_pre"]
}]
},
"avg_10005": "$avg_10005",
"avg_10005_percent": {
"$cond": [{
"$eq": ["$avg_10005_pre", 0]
}, 0, {
"$subtract": ["$avg_10005", "$avg_10005_pre"]
}]
},
"avg_10006": "$avg_10006",
"avg_10006_percent": {
"$cond": [{
"$eq": ["$avg_10006_pre", 0]
}, 0, {
"$subtract": ["$avg_10006", "$avg_10006_pre"]
}]
},
"avg_10007": "$avg_10007",
"avg_10007_percent": {
"$cond": [{
"$eq": ["$avg_10006_pre", 0]
}, 0, {
"$subtract": ["$avg_10007", "$avg_10007_pre"]
}]
},
"count_start_5": "$count_start_5",
"count_start_4": "$count_start_4",
"count_start_3": "$count_start_3",
"count_start_2": "$count_start_2",
"count_start_1": "$count_start_1",
"count_comment": "$count_comment",
}
},
{
"$sort": {
"avg_total": -1
}
},
{
'$skip': skip
},
{
'$limit': limit
}
]
spot_comment_s = spot.Spot.objects.aggregate(*pipeline)
L = []
i = 1
for p in spot_comment_s:
p['sort'] = i
p['avg_total'] = round(p['avg_total'], 1)
p['avg_total_percent'] = round(p['avg_total_percent'], 1)
p['avg_10000'] = round(p['avg_10000'], 1)
p['avg_10000_percent'] = round(p['avg_10000_percent'], 1)
p['avg_10001'] = round(p['avg_10001'], 1)
p['avg_10001_percent'] = round(p['avg_10001_percent'], 1)
p['avg_10002'] = round(p['avg_10002'], 1)
p['avg_10002_percent'] = round(p['avg_10002_percent'], 1)
p['avg_10003'] = round(p['avg_10003'], 1)
p['avg_10003_percent'] = round(p['avg_10003_percent'], 1)
p['avg_10004'] = round(p['avg_10004'], 1)
p['avg_10004_percent'] = round(p['avg_10004_percent'], 1)
p['avg_10005'] = round(p['avg_10005'], 1)
p['avg_10005_percent'] = round(p['avg_10005_percent'], 1)
p['avg_10006'] = round(p['avg_10006'], 1)
p['avg_10006_percent'] = round(p['avg_10006_percent'], 1)
p['avg_10007'] = round(p['avg_10007'], 1)
p['avg_10007_percent'] = round(p['avg_10007_percent'], 1)
if p['avg_total'] >= 4.5:
p['tags'] = '优秀'
elif p['avg_total'] is 0:
p['tags'] = '未知'
elif p['avg_total'] < 4.0:
p['tags'] = '不及格'
else:
p['tags'] = '合格'
L.append(dict(p))
i = i + 1
return L
@classmethod
def spot_count(cls, condition):
pipeline = [
{
"$match": {
"$and": [
{
"spot_name": {
"$exists": True
}
},
{
"ota_spot_id": {
"$in": condition['ota_spot_id']
}
}
]
}
},
{
"$group": {
"_id": None,
"count": {
"$sum": 1
}
}
}
]
spot_count = spot.Spot.objects.aggregate(*pipeline)
L = list(spot_count)
return L[0]['count']
@classmethod
def spot_score_count(cls, condition):
pipeline = [
{
"$lookup": {
"from": "spot_comment",
"localField": "ota_spot_id",
"foreignField": "ota_spot_id",
"as": "spot_comment"
}
},
{
"$unwind": {
"path": "$spot_comment",
"preserveNullAndEmptyArrays": True
}
},
{
"$project": {
"ota_spot_id": "$ota_spot_id",
"spot_name": "$spot_name",
"create_at": "$spot_comment.create_at",
"c_score": "$spot_comment.c_score"
}
},
{
"$match": {
"$and": [
{
"spot_name": {
"$exists": True
}
},
{
"spot_name": {
"$regex": re.compile(condition['spot_name'])
}
},
{
"create_at": {
"$gte": condition['begin_date'] + " 00:00:00"
}
},
{
"create_at": {
"$lte": condition['end_date'] + " 23:59:59"
}
}
]
}
},
{
"$group": {
"_id": None,
"score_True": {
"$sum": {
"$cond": [{
"$gt": ["$c_score", 3]
}, 1, 0]
}
},
"score_false": {
"$sum": {
"$cond": [{
"$lte": ["$c_score", 3]
}, 1, 0]
}
},
"score_total": {
"$sum": 1
}
}
}
]
spot_comment_s = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in spot_comment_s:
L.append(dict(p))
return L
@classmethod
def spot_complex(cls, condition):
pipeline = [
{
"$group": {
"_id": None,
"now_month_total": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['now'] + ' 23:59:59']
}, {
"$gte": ["$create_at", condition['now_month'] + ' 00:00:00']
}]
}, 1, 0]
}
},
"now_month_score": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['now'] + ' 23:59:59']
}, {
"$gte": ["$create_at", condition['now_month'] + ' 00:00:00']
}]
}, "$c_score", 0]
}
},
"last_month_total": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['last_month_first'] + ' 23:59:59']
}, {
"$gte": ["$create_at", condition['last_month_last'] + ' 00:00:00']
}]
}, 1, 0]
}
},
"last_month_score": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['last_month_first'] + ' 23:59:59']
}, {
"$gte": ["$create_at", condition['last_month_last'] + ' 00:00:00']
}]
}, "$c_score", 0]
}
},
"lastyear_month_total": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['last_year_month_first'] + ' 23:59:59']
}, {
"$gte": ["$create_at", condition['last_year_month_last'] + ' 00:00:00']
}]
}, 1, 0]
}
},
"lastyear_month_score": {
"$sum": {
"$cond": [{
"$and": [{
"$lte": ["$create_at", condition['last_year_month_first'] + ' 23:59:59']
}, {
"$gte": ["$create_at", condition['last_year_month_last'] + ' 00:00:00']
}]
}, "$c_score", 0]
}
}
}
},
{
"$project": {
"_id": 0,
"now_month_per_score": {
'$cond': [{
'$eq': ['$now_month_total', 0]
}, 4.7, {
'$multiply': [{
"$divide": ["$now_month_score", "$now_month_total"]
}, 1]
}]
},
"last_month_per_score": {
'$cond': [{
'$eq': ['$last_month_total', 0]
}, 4.7, {
'$multiply': [{
"$divide": ["$last_month_score", "$last_month_total"]
}, 1]
}]
},
"lastyear_month_per_score": {
'$cond': [{
'$eq': ['$lastyear_month_total', 0]
}, 4.7, {
'$multiply': [{
"$divide": ["$lastyear_month_score", "$lastyear_month_total"]
}, 1]
}]
}
}
}
]
spot_complex = spot.SpotComment.objects.aggregate(*pipeline)
L = list(spot_complex)
tongbi_num = round(L[0]['now_month_per_score'] - L[0]['lastyear_month_per_score'], 2)
huanbi_num = round(L[0]['now_month_per_score'] - L[0]['last_month_per_score'], 2)
if L[0]['lastyear_month_per_score']:
tongbi_per = round(
(L[0]['now_month_per_score'] - L[0]['lastyear_month_per_score']) * 100 / L[0][
'lastyear_month_per_score'], 1)
else:
tongbi_per = None
if L[0]['last_month_per_score']:
huanbi_per = round(
(L[0]['now_month_per_score'] - L[0]['last_month_per_score']) * 100 / L[0]['last_month_per_score'], 1)
else:
huanbi_per = None
dic = {"now_month_per_score": round(L[0]['now_month_per_score'], 1),
"tongbi_num": tongbi_num,
"huanbi_num": huanbi_num,
"tongbi_per": tongbi_per,
"huanbi_per": huanbi_per
}
return dic
@classmethod
def comment_num(cls, condition):
pipeline = [
{
"$unwind": {
"path": "$spot_comment",
"preserveNullAndEmptyArrays": True
}
},
{
"$group": {
"_id": True,
"up_score_count": {
"$sum": {
"$cond": {
"if": {
"$gt": ["$c_score", 3]
},
"then": 1,
"else": 0
}
}
},
"down_score_count": {
"$sum": {
"$cond": {
"if": {
"$lte": ["$c_score", 3]
},
"then": 1,
"else": 0
}
}
},
}
}
]
spot_complex = spot.SpotComment.objects.aggregate(*pipeline)
L = list(spot_complex)
up_score_count = L[0]['up_score_count']
down_score_count = L[0]['down_score_count']
up_score_per = round(up_score_count * 100 / (up_score_count + down_score_count), 1)
down_score_per = round(down_score_count * 100 / (up_score_count + down_score_count), 1)
dic = {"up_score_count": up_score_count, "down_score_count": down_score_count, "up_score_per": up_score_per,
"down_score_per": down_score_per, "total": up_score_count + down_score_count}
# spot_queue.put((dic, 'comment_num'))
return dic
@classmethod
def now_month(cls, condition):
pipeline = [
{
"$project": {
"create_at": "$create_at",
"day_substring": {
"$substr": ["$create_at", 8, 2]
},
"c_score": "$c_score"
}
},
{
"$match": {
"create_at": {
"$gte": condition['now_month']
}
}
},
{
"$group": {
"_id": "$day_substring",
"avg_score": {
"$avg": "$c_score"
}
}
},
{
"$sort": {
"_id": 1
}
}
]
now_month_bak = getDayList()
now_month = spot.SpotComment.objects.aggregate(*pipeline)
for p in now_month:
for b in now_month_bak:
if b['_id'] == p['_id']:
b['avg_score'] = p['avg_score']
else:
pass
L = []
for p in now_month_bak:
p['avg_score'] = round(p['avg_score'], 1)
L.append(dict(p))
return L
@classmethod
def star_percent(cls, condition):
pipeline = [
{
"$project": {
"c_score": {
"$floor": "$c_score"
}
}
},
{
"$match": {
"c_score": {
"$ne": None
}
}
},
{
"$group": {
"_id": "$c_score",
"count": {
"$sum": 1
}
}
},
{
"$sort": {
"_id": -1
}
}]
star_percent = spot.SpotComment.objects.aggregate(*pipeline)
L = []
total = 0
for p in star_percent:
L.append(dict(p))
total = total + dict(p)['count']
result = []
for m in L:
m['percent'] = round(m['count'] * 100 / total, 1)
result.append(m)
# spot_queue.put((L, 'star_percent'))
return result # 15221
@classmethod
def comment_tags(cls, condition):
pipeline = [{
"$unwind": {
"path": "$tag_list",
"preserveNullAndEmptyArrays": True
}
},
{
"$match": {
# "tag_list.tag_type": 1
'tag_list.tag_type': {'$in': condition}
}
},
{
"$group": {
"_id": "$tag_list.tag_name",
"count": {
"$sum": "$tag_list.tag_num"
}
}
},
{
"$sort": {
"count": - 1
}
}
]
comment_tags = spot.Spot.objects.aggregate(*pipeline)
L = []
for p in comment_tags:
L.append(dict(p))
# return spot_queue.put((L, topic))
return L
@classmethod
def get_param(cls, param, in_name, default):
if in_name in param and param[in_name]:
return param[in_name]
else:
return default
@classmethod
def group_true_false(cls):
return 1
class SpotCity:
@classmethod
def detail_spot(cls, ota_spot_id):
# spot_city = spot.SpotCity.objects(ota_spot_id=ota_spot_id).fields(slice__comments=[5, 10]).first()
# logger.info(spot_city.to_mongo())
pipeline = [
{
'$match': {
'ota_spot_id': {
'$eq': ota_spot_id
}
}
},
{
'$project': {
'_id': 0
}
}
]
spot_city_s = spot.SpotCity.objects.aggregate(*pipeline)
L = {}
for p in spot_city_s:
# logger.info(p)
p['create_at'] = p['create_at'].strftime("%Y-%m-%d %H:%M:%S")
p['update_at'] = p['update_at'].strftime("%Y-%m-%d %H:%M:%S")
if p['ota_id'] == OTA.OtaCode.MEITUAN.value.id:
p['s_notes'] = [item['text'] for note in p['s_notes'] for item in note['contents']]
elif p['ota_id'] == OTA.OtaCode.CTRIP.value.id:
p['s_notes'] = [note['subtitle'] + ':' + item['desc'] for note in p['s_notes'] for item in
note['desclist']]
if p['ota_id'] == OTA.OtaCode.MEITUAN.value.id:
s_desc = ''
for desc in p['s_desc']:
contents = desc['contents']
for content in contents:
if 'type' not in content:
if 'text' in content:
logger.info('=' * 20, content)
s_desc += '<p>' + content['text'] + '</p>'
else:
logger.info(content)
elif content['type'] == 'text':
s_desc += '<p>' + content['content']['text'] + '</p>'
elif content['type'] == 'img':
s_desc += '<img src="' + content['content'] + '"/>'
p['s_desc'] = s_desc
# p['s_ticket'] =
if p['ota_id'] == OTA.OtaCode.MEITUAN.value.id:
ticket_info = {}
for ticket in p['s_ticket']:
ticket_info[ticket['productType'].lower()] = []
for product in ticket['productModels']:
if 'title5' in product:
info = {'name': product['title5'], 'price': product['price'],
'sale': product['newSoldsString']}
ticket_info[ticket['productType'].lower()].append(info)
# append(ticket_info)
p['s_ticket'] = ticket_info
elif p['ota_id'] == OTA.OtaCode.CTRIP.value.id:
ticket_info = {}
if 'spot_hotel' in p['s_ticket']:
ticket_info['tc'] = []
for item in p['s_ticket']['spot_hotel']:
ticket_info['tc'].append({'name': item['productname'], 'price': '未知', 'sale': '已售未知'})
if 'spot_ticket' in p['s_ticket']:
ticket_info['mp'] = []
for item in p['s_ticket']['spot_ticket']:
ticket_info['mp'].append({'name': item['name'], 'price': '未知', 'sale': '已售未知'})
p['s_ticket'] = ticket_info
elif p['ota_id'] == OTA.OtaCode.LVMAMA.value.id:
ticket_info = {}
if 'ADULT' in p['s_ticket']:
ticket_info['mp'] = []
for item in p['s_ticket']['ADULT']:
ticket_info['mp'].append({'name': item['name'], 'price': item['pay_price'], 'sale': '已售未知'})
if 'TC' in p['s_ticket']:
ticket_info['tc'] = []
for item in p['s_ticket']['TC']:
ticket_info['tc'].append({'name': item['name'], 'price': item['pay_price'], 'sale': '已售未知'})
p['s_ticket'] = ticket_info
L = p
logger.info(p)
# L.append(dict(p))
return L
class MediaDetail:
@classmethod
def dou_yin_new(cls, condition):
pipeline = [{
"$unwind": {
"path": "$tag_list",
"preserveNullAndEmptyArrays": True
}
},
{
'$match': {
'create_at': {
'$eq': condition['create_at']
}
}
},
{
"$group": {
"_id": {
"create_at": "$create_at"
},
"now_fans_count": { # 今日总粉丝量
"$sum": "$fans_num"
},
"now_total_like_count": { # 今日总获攒数
"$sum": "$total_like"
},
"now_comment_count": { # 今日总评论数
"$sum": "$comment_num"
},
"now_broadcast_count": { # 今日总直播数
"$sum": "$broadcast_num"
}
}
},
{
"$project": {
"_id": 0,
"now_fans_count": 1,
"now_total_like_count": 1,
"now_comment_count": 1,
"now_broadcast_count": 1,
}
}
]
comment_tags = douyin.MediaDetail.objects.aggregate(*pipeline)
L = []
for p in comment_tags:
L.append(dict(p))
return L
@classmethod
def dou_yin_is_official(cls, condition):
pipeline = [{
"$unwind": {
"path": "$tag_list",
"preserveNullAndEmptyArrays": True
}
},
{
'$match': {
'is_official': {
'$eq': condition['is_official']
}
}
},
{
"$group": {
"_id": None,
"fans_count": { # 总粉丝量
"$sum": "$fans_count"
},
"total_like_count": { # 今日总获攒数
"$sum": "$total_like"
},
"broadcast_count": { # 今日总直播数
"$sum": "$broadcast_num"
}
}
},
{
"$project": {
"_id": 0,
"fans_count": 1,
"total_like_count": 1,
"count": 1,
"broadcast_count": 1,
}
}
]
comment_tags = douyin.MediaDetail.objects.aggregate(*pipeline)
L = []
for p in comment_tags:
L.append(dict(p))
return L
@classmethod
def dou_yin_user(cls, condition):
pipeline = [{
"$unwind": {
"path": "$tag_list",
"preserveNullAndEmptyArrays": True
}
},
{
'$match': {
'create_at': {
'$eq': condition['create_at']
}
}
},
{
"$group": {
"_id": {
"create_at": "$create_at"
},
"fans_count": { # 今日总粉丝量
"$sum": "$fans_num"
},
"total_like_count": { # 今日总获攒数
"$sum": "$total_like"
},
"comment_count": { # 今日总评论数
"$sum": "$comment_num"
},
"broadcast_count": { # 今日总直播数
"$sum": "$broadcast_num"
}
}
},
{
"$project": {
"_id": 0,
"fans_count": 1,
"total_like_count": 1,
"comment_count": 1,
"broadcast_count": 1,
}
}
]
comment_tags = douyin.DouYinUser.objects.aggregate(*pipeline)
print(comment_tags)
L = []
for p in comment_tags:
L.append(dict(p))
return L
```
#### File: api/views/home.py
```python
from django.core.cache import cache
from django.http import FileResponse
from django.utils.autoreload import logger
from core.common.helper import get_scrapyd_cli
from core.lib.view import BaseView
from core.lib.route import Route
@Route.route(path='api/home/')
class Home(BaseView):
@Route.route(path='index')
def index(self):
# a = add.delay()
file = open('README.md', 'rb')
logger.info(type(file))
return self.file_response(file)
@Route.route(path='home/1')
def home(self):
# crawler.spot_comment()
jobid = get_scrapyd_cli().schedule('spiders', 'ctrip_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
jobid = get_scrapyd_cli().schedule('spiders', 'lvmama_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
jobid = get_scrapyd_cli().schedule('spiders', 'mafengwo_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
jobid = get_scrapyd_cli().schedule('spiders', 'meituan_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
jobid = get_scrapyd_cli().schedule('spiders', 'fliggy_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
jobid = get_scrapyd_cli().schedule('spiders', 'qunar_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
jobid = get_scrapyd_cli().schedule('spiders', 'ly_comment')
logger.info('=' * 30, '爬虫定时任务:::', '景区评论:::', ':::', jobid)
return self.success({})
@Route.route(path='home')
def home1(self):
print(self.request.FILES.get('fafafa').name)
return self.success({})
```
#### File: views/pub_opinion/dy_fack.py
```python
import json
import os
import re
import requests
from fontTools.ttLib import TTFont
from core.lib.route import Route
from core.lib.view import BaseView
from django.core.cache import cache
from bs4 import BeautifulSoup
@Route.route(path='api/dy/fk')
class PublicOpinion(BaseView):
# 下载抖音字符字体
@Route.route(path='/get/download')
def download(self):
url = 'https://s3.pstatp.com/ies/resource/falcon/douyin_falcon/static/font/iconfont_9eb9a50.woff'
r = requests.get(url)
with open("demo.woff", "wb") as code:
code.write(r.content)
font = TTFont(r'demo.woff')
ss = font.saveXML('demo.xml')
return self.success(ss)
# 获取单个下的长度
@Route.route(path='/get/count')
def get_count(self):
str_num = {'post_num': '', 'like_num': '', 'focus_num': '', 'follower_num': '', 'liked_num': ''}
num_list = self.get_str_list()
print(num_list)
str_num['post_num'] = self.use_fk(num_list['post_str'])
str_num['like_num'] = self.use_fk(num_list['like_str'])
str_num['focus_num'] = self.use_fk(num_list['focus_str'])
str_num['follower_num'] = self.use_fk(num_list['follower_str'])
str_num['liked_num'] = self.use_fk(num_list['liked_str'])
return self.success(str_num)
@staticmethod
def use_fk(pre_code_s):
#  num_;  num_1;  num_5;
out_num = 0
out_str = ''
# pre_code_s = pre_code_s
for pre_code in pre_code_s:
input_code = pre_code.replace("&#", "0")
glyphID = {
'x': '',
'num_': 1,
'num_1': 0,
'num_2': 3,
'num_3': 2,
'num_4': 4,
'num_5': 5,
'num_6': 6,
'num_7': 9,
'num_8': 7,
'num_9': 8,
}
html = open("demo.xml") # 返回一个文件对象
page = html.read() # 调用文件的 readline()方法
soup = BeautifulSoup(page, "html.parser")
for link in soup.find_all('map'):
code = link.get('code')
if code == input_code:
name = link.get('name')
out_num = glyphID[name]
elif input_code == '.':
out_num = '.'
out_str = out_str + str(out_num)
print('out_str-----------------------------')
print(out_str)
if out_str.find('.') > 0:
out_str = float(out_str) * 10000
return int(out_str)
def get_str_list(self):
num_list = self.get_num_list()
str_list = {'post_str': '', 'like_str': '', 'focus_str': '', 'follower_str': '', 'liked_str': ''}
with open("test.html", "r", encoding="utf-8") as f:
# print(num_list)
pre_line = f.read()
# line = pre_line.replace('>.<', '><i class="icon iconfont follow-num"> .; </i><')
line = pre_line.replace('>.<', '><i class="icon iconfont follow-num"> .; </i><',
(pre_line.count('>.<') - 1))
# print(line)
# print('作品数、喜欢数-------------------------------------------------')
like_all = re.findall('class="icon iconfont tab-num"> (.*?);', line) # 怎么分开
str_list['post_str'] = like_all[0:num_list['post_len']] # post_len 作品数
str_list['like_str'] = like_all[-num_list['like_len']:] # like_len 喜欢数
# print(like_all)
# print('关注数、粉丝数、赞数-------------------------------------------------')
flower_all = re.findall('class="icon iconfont follow-num"> (.*?);', line) # 怎么分开
str_list['focus_str'] = flower_all[0:num_list['focus_len']] # focus_len 关注数
str_list['follower_str'] = flower_all[num_list['focus_len']:num_list['focus_len'] + num_list[
'follower_len']] # follower_len 粉丝数
str_list['liked_str'] = flower_all[-num_list['liked_len']:] # liked_len 赞数
print('flower_all--------------------------')
print(flower_all)
return str_list
def get_num_list(self):
num_list = {'post_len': 0, 'like_len': 0, 'focus_len': 0, 'follower_len': 0, 'liked_len': 0}
with open("test.html", "r", encoding="utf-8") as f:
pre_line = f.read()
# line = pre_line.replace('>.<', '><i class="icon iconfont follow-num"> .; </i><')
line = pre_line.replace('>.<', '><i class="icon iconfont follow-num"> .; </i><',
(pre_line.count('>.<') - 1))
print('xxxxxxxxxxxxxxxxxxxx')
print(line)
'''
判断是否有干扰
'''
soup = BeautifulSoup(line, "html.parser")
ss = soup.find("div", {"class": "tab-wrap"}).find("i", {"class": "icon iconfont follow-num"})
print('sssssssssssssssss')
print(ss)
# line = ss.replace('>.<', '><i class="icon iconfont follow-num"> .; </i><')
# soup = BeautifulSoup(line, "html.parser")
# print('作品数--------------------------------------')
post_len = self.get_num_len(soup, 'user-tab active tab get-list', 'icon iconfont tab-num')
num_list['post_len'] = post_len
# print('喜欢数--------------------------------------')
like_len = self.get_num_len(soup, 'like-tab tab get-list', 'icon iconfont tab-num')
num_list['like_len'] = like_len
# print('关注数--------------------------------------')
focus_len = self.get_num_len(soup, 'focus block', 'icon iconfont follow-num')
num_list['focus_len'] = focus_len
# print('粉丝数--------------------------------------')
follower_len = self.get_num_len(soup, 'follower block', 'icon iconfont follow-num')
num_list['follower_len'] = follower_len
# print('点赞数--------------------------------------')
liked_len = self.get_num_len(soup, 'liked-num block', 'icon iconfont follow-num')
num_list['liked_len'] = liked_len
return num_list
@staticmethod
def get_num_len(soup, class_1, class_2):
liked_list = soup.find(class_=class_1)
liked_len = liked_list.find_all(class_=class_2)
return len(liked_len)
# 尝试杀进程
@Route.route(path='/get/pid')
def pid(self):
from psutil import process_iter
from signal import SIGTERM # or SIGKILL
for proc in process_iter():
for conns in proc.connections(kind='inet'):
if conns.laddr.port == 8080:
proc.send_signal(SIGTERM) # or SIGKILL
return self.success(1)
```
#### File: scheduler/task/crawler.py
```python
from django_apscheduler.jobstores import register_job
from core.common.helper import get_scrapyd_cli
from core.lib.task import scheduler
from apps.api.model.exception import ExcLog
from django.utils.autoreload import logger
"""
设置定时任务,选择方式为interval,时间间隔为10s
另一种方式为每天固定时间执行任务,对应代码为:
@register_job(scheduler, 'cron', day_of_week='mon-fri', hour='9', minute='30', second='10',id='task_time')
"""
# @register_job(scheduler, "cron", hour='02', minute='10', id='spot_info')
# def spot_info():
# jobid = get_scrapyd_cli().schedule('spiders', 'ctrip_spot')
# logger.info('=' * 30, '爬虫定时任务:::', '景区信息:::', jobid)
# jobid = get_scrapyd_cli().schedule('spiders', 'lvmama_spot')
# logger.info('=' * 30, '爬虫定时任务:::', '景区信息:::', jobid)
# jobid = get_scrapyd_cli().schedule('spiders', 'mafengwo_spot')
# logger.info('=' * 30, '爬虫定时任务:::', '景区信息:::', jobid)
# jobid = get_scrapyd_cli().schedule('spiders', 'meituan_spot')
# logger.info('=' * 30, '爬虫定时任务:::', '景区信息:::', jobid)
@register_job(scheduler, "cron", hour='02', minute='10', id='spot_comment')
def spot_comment():
logger.info("==========【景区评论:开启】==========")
jobid = get_scrapyd_cli().schedule('spiders', 'ctrip_comment')
jobid = get_scrapyd_cli().schedule('spiders', 'lvmama_comment')
jobid = get_scrapyd_cli().schedule('spiders', 'mafengwo_comment')
jobid = get_scrapyd_cli().schedule('spiders', 'meituan_comment')
jobid = get_scrapyd_cli().schedule('spiders', 'fliggy_comment')
jobid = get_scrapyd_cli().schedule('spiders', 'qunar_comment')
jobid = get_scrapyd_cli().schedule('spiders', 'ly_comment')
logger.info("==========【景区评论:结束】==========")
@register_job(scheduler, "cron", hour='03', minute='10', id='spot_price')
def spot_price():
logger.info("==========【价格监控:开启】==========")
jobid = get_scrapyd_cli().schedule('spiders', 'ctrip_price')
jobid = get_scrapyd_cli().schedule('spiders', 'fliggy_spot_price')
jobid = get_scrapyd_cli().schedule('spiders', 'jd_spot_price')
jobid = get_scrapyd_cli().schedule('spiders', 'lvmama_spot_price')
jobid = get_scrapyd_cli().schedule('spiders', 'ly_price')
jobid = get_scrapyd_cli().schedule('spiders', 'Mafengwo_price')
jobid = get_scrapyd_cli().schedule('spiders', 'meituan_price')
jobid = get_scrapyd_cli().schedule('spiders', 'qunar_price')
logger.info("==========【价格监控:结束】==========")
@register_job(scheduler, "interval", seconds=2 * 60 * 60, id='association')
def association():
logger.info("==========【社群监控:开启】==========")
jobid = get_scrapyd_cli().schedule('spiders', 'crm')
jobid = get_scrapyd_cli().schedule('spiders', 'crm_contactor')
jobid = get_scrapyd_cli().schedule('spiders', 'wetool_list_member')
jobid = get_scrapyd_cli().schedule('spiders', 'dy_page')
logger.info("==========【社群监控:结束】==========")
# 订单接口检测 每2秒执行一次
# @register_job(scheduler, "interval", seconds=5 * 60 * 60)
# def order_check():
# jobid = get_scrapyd_cli().schedule('spiders', 'hqlx_order')
# logger.info('=' * 30, 'test:::', 'test:::', jobid)
# @register_job(scheduler, "interval", seconds=5)
# def order_check():
# logger.info('=' * 30 + "测试定时任务")
@register_job(scheduler, "interval", seconds=2 * 60 * 60, id='udesk')
def udesk():
logger.info("==========【客服数据:开启】==========")
jobid = get_scrapyd_cli().schedule('spiders', 'udesk')
jobid = get_scrapyd_cli().schedule('spiders', 'customer_daily_report')
jobid = get_scrapyd_cli().schedule('spiders', 'wetool_daily_wechat')
logger.info("==========【客服数据:结束】==========")
```
#### File: core/lib/command.py
```python
import os
class Command(object):
name: str
key: str
command: str
pid: str
path: str
file_name = "pid"
def run(self):
pass
def write_pid(self, pid: str):
filename = self.path + self.file_name
if not os.path.isfile(filename): # 无文件时创建
with open(filename, mode="w", encoding="utf-8") as fd:
fd.write(pid)
```
#### File: core/lib/route.py
```python
import importlib
import json
from collections import namedtuple
from types import FunctionType
# from django.urls import re_path
from django.urls import re_path
from hq_crawler import settings
from django.utils.autoreload import logger
"""
注解路由核心类
"""
class Route:
routeViewPath = namedtuple('classPath', 'path module class_name func_name') # 类方法-具名元组(路由路径 模块 类名 执行的方法名)
classRouteTuple = namedtuple('classRoute', 'module class_name path') # 类路由元祖(模块 类名 路由路径)
ROUTER: list = [] # 路由与路由装饰器的映射
classRoute: list = [] # 类路由
routeList: dict = {} # 路由对方法的映射
@classmethod
def route(cls, path):
def my_decorator(func):
# logger.info('调用的方法列表:', func)
# 类的路由
if not isinstance(func, FunctionType):
cls.classRoute.append(cls.classRouteTuple(func.__module__, func.__qualname__, path))
return func
cls.ROUTER.append(cls.routeViewPath(path, func.__module__, func.__qualname__[:func.__qualname__.index('.')],
func.__name__))
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapper
return my_decorator
@classmethod
def register(cls, urlpatterns: list):
routeKeyList = []
for classItem in Route.classRoute: # 类路由
module = importlib.import_module(classItem.module)
routeClass = getattr(module, classItem.class_name)
for routeItem in Route.ROUTER: # 方法路由
if routeItem.module + routeItem.class_name == classItem.module + classItem.class_name: # 是不是同一个类
path = classItem.path + routeItem.path # 路由路径
if path in Route.routeList:
exceptionStr = f'路由重复:{routeItem.module + routeItem.class_name} -> {routeItem.func_name}, 路径:{path}'
raise Exception(exceptionStr)
Route.routeList[path] = routeItem.func_name
if classItem.path in routeKeyList:
continue
path_str = '^' + settings.BASE_URL
urlpatterns.append(re_path(path_str + classItem.path, routeClass.as_view())),
routeKeyList.append(classItem.path)
# logger.info('路由列表:', json.dumps(urlpatterns))
```
#### File: core/middleware/scheduler.py
```python
from django.utils.deprecation import MiddlewareMixin
from django.utils.autoreload import logger
class Scheduler(MiddlewareMixin):
def process_request(self, request):
pass
# logger.info(request)
def process_response(self, request, response):
return response
```
#### File: spiders/common/OTA.py
```python
from collections import namedtuple
from enum import Enum, unique
ota = namedtuple('ota_info', 'name id') # ota 定义 id
sp_map = namedtuple('sp_map', 'ota ota_spot_id') # ota平台景区id
"""
ota的id定义
"""
@unique
class OtaCode(Enum):
HUIQULX = ota('惠趣旅行', 10000)
MAFENGWO = ota('马蜂窝', 10001) # 不需要爬取
CTRIP = ota('携程', 10002)
FLIGGY = ota('飞猪', 10003) # 不需要爬
MEITUAN = ota('美团', 10004)
LVMAMA = ota('驴妈妈', 10005)
QUNAR = ota('去哪儿', 10006)
LY = ota('同程', 10007)
JD = ota('京东', 10008)
"""
各个景区在每个ota的id映射
"""
@unique
class OtaSpotIdMap(Enum):
# 石燕湖
SHI_YAN_HU = [sp_map(OtaCode.HUIQULX, 10001),
sp_map(OtaCode.MAFENGWO, 339),
sp_map(OtaCode.MEITUAN, 1515791),
sp_map(OtaCode.LVMAMA, 100025),
sp_map(OtaCode.LY, 9513),
sp_map(OtaCode.CTRIP, 62931),
sp_map(OtaCode.QUNAR, 706176810),
sp_map(OtaCode.FLIGGY, 11481),
]
# 石牛寨
SHI_NIU_ZHAI = [sp_map(OtaCode.HUIQULX, 10002),
sp_map(OtaCode.MAFENGWO, 5427075),
sp_map(OtaCode.MEITUAN, 30067),
sp_map(OtaCode.LVMAMA, 103113),
sp_map(OtaCode.LY, 25196),
sp_map(OtaCode.CTRIP, 127339),
sp_map(OtaCode.QUNAR, 1915618311),
sp_map(OtaCode.FLIGGY, 33966)
]
# 益阳嘉年华
YI_YANG_JIA_NIAN_HUA = [
sp_map(OtaCode.HUIQULX, 10003),
sp_map(OtaCode.MAFENGWO, 34944996),
sp_map(OtaCode.MEITUAN, 179283431),
sp_map(OtaCode.LVMAMA, 11367356),
sp_map(OtaCode.CTRIP, 4741361),
sp_map(OtaCode.QUNAR, 2877753081),
]
# 花田溪谷
HUA_TIAN_XI_GU = [
sp_map(OtaCode.HUIQULX, 10004),
sp_map(OtaCode.MAFENGWO, 71460244),
sp_map(OtaCode.MEITUAN, 188085997),
sp_map(OtaCode.CTRIP, 5060343),
sp_map(OtaCode.QUNAR, 2554926827),
sp_map(OtaCode.FLIGGY, 140975087),
]
# 东浒寨
DONG_HU_ZHAI = [
sp_map(OtaCode.HUIQULX, 10005),
sp_map(OtaCode.MAFENGWO, 33665644),
sp_map(OtaCode.MEITUAN, 115915971),
sp_map(OtaCode.LVMAMA, 10829578),
sp_map(OtaCode.CTRIP, 1979030),
sp_map(OtaCode.LY, 229768),
sp_map(OtaCode.QUNAR, 225118749),
sp_map(OtaCode.FLIGGY, 32659156),
]
# 马仁奇峰
MA_REN_QI_FENG = [
sp_map(OtaCode.HUIQULX, 10006),
sp_map(OtaCode.MAFENGWO, 5436442),
sp_map(OtaCode.MEITUAN, 1451152),
sp_map(OtaCode.LVMAMA, 103177),
sp_map(OtaCode.CTRIP, 65169),
sp_map(OtaCode.LY, 5808),
sp_map(OtaCode.QUNAR, 3821817759),
sp_map(OtaCode.FLIGGY, 103590),
]
# 大茅山
DA_MAO_SHAN = [
sp_map(OtaCode.HUIQULX, 10007),
sp_map(OtaCode.MAFENGWO, 7689642),
sp_map(OtaCode.MEITUAN, 41614694),
sp_map(OtaCode.CTRIP, 1493248),
sp_map(OtaCode.QUNAR, 420237024),
sp_map(OtaCode.LY, 231854),
sp_map(OtaCode.FLIGGY, 61484),
]
# 九龙江
JIU_LONG_JIANG = [
sp_map(OtaCode.HUIQULX, 10008),
sp_map(OtaCode.MEITUAN, 41164719),
sp_map(OtaCode.LVMAMA, 160416),
sp_map(OtaCode.CTRIP, 140900),
sp_map(OtaCode.LY, 29283),
sp_map(OtaCode.QUNAR, 4123349957),
sp_map(OtaCode.FLIGGY, 191470),
]
# 天空之城
TIAN_KONG_ZHI_CHENG = [
sp_map(OtaCode.HUIQULX, 10009),
sp_map(OtaCode.MEITUAN, 182573099),
sp_map(OtaCode.LVMAMA, 11945662),
sp_map(OtaCode.CTRIP, 5058354),
sp_map(OtaCode.FLIGGY, 140626417),
]
# 连云山
LIAN_YUN_SHAN = [
sp_map(OtaCode.HUIQULX, 10010),
sp_map(OtaCode.MAFENGWO, 33673148),
sp_map(OtaCode.MEITUAN, 5464367),
sp_map(OtaCode.LVMAMA, 102525),
sp_map(OtaCode.CTRIP, 1411376),
]
# 侠天下
XIA_TIAN_XIA = [
sp_map(OtaCode.HUIQULX, 10011),
sp_map(OtaCode.MAFENGWO, 24960734),
sp_map(OtaCode.MEITUAN, 51575391),
sp_map(OtaCode.LVMAMA, 10650528),
sp_map(OtaCode.CTRIP, 1415157),
sp_map(OtaCode.LY, 182563),
sp_map(OtaCode.QUNAR, 2333288470),
sp_map(OtaCode.FLIGGY, 17165564),
]
# 三翁花园
SAN_FENG_HUA_YUAN = [
sp_map(OtaCode.HUIQULX, 10012),
sp_map(OtaCode.MAFENGWO, 70048608),
sp_map(OtaCode.MEITUAN, 158907227),
sp_map(OtaCode.LVMAMA, 12210014),
sp_map(OtaCode.LY, 672767),
sp_map(OtaCode.CTRIP, 3989530),
sp_map(OtaCode.QUNAR, 3333064220),
sp_map(OtaCode.FLIGGY, 33559796),
]
#乌金山
WU_JIN_SHAN = [
sp_map(OtaCode.HUIQULX, 10013),
sp_map(OtaCode.MAFENGWO, 964195),
sp_map(OtaCode.MEITUAN, 2498352),
sp_map(OtaCode.LVMAMA, 162027),
sp_map(OtaCode.CTRIP, 3264963),
]
# 恺之峰
KAI_ZHI_FENG = [
sp_map(OtaCode.CTRIP, 1410449),
sp_map(OtaCode.QUNAR, 63919496),
sp_map(OtaCode.LY, 190966),
]
# 上海迪斯尼
DI_SI_NI = [
sp_map(OtaCode.FLIGGY, 15968),
]
@classmethod
def get_ota_spot_id(cls, spot_name: str, ota_code: OtaCode) -> int:
if spot_name not in cls.__members__:
raise Exception('景区名称未定义!')
for map_item in cls[spot_name].value:
if map_item.ota == ota_code:
return map_item.ota_spot_id
@classmethod
def get_ota_spot_list(cls, ota_code: OtaCode) -> list:
return [item.ota_spot_id for _, member in cls.__members__.items() for item in member.value if
item.ota == ota_code]
```
#### File: spiders/association/wetool.py
```python
import json
import re
import time
import datetime
from urllib import parse
import scrapy
from scrapy.http import HtmlResponse
from spiders.items.association.wetool import TWetool, TWetoolDailyWechat
from spiders.items.association.association import TAssociation
from spiders.items.distributor.distributor import CDistributor
class WeToolListMemberSpider(scrapy.Spider):
name = "wetool_list_member"
allowed_domains = ['wp.wxb.com', 'account.wxb.com']
start_urls = ['http://account.wxb.com/index2/login']
wx_list = {
'中惠旅内购客服': 'wxid_kqc0n5mpeivp22',
'中惠旅内购客服1': 'wxid_ogz10j91aix112',
'中惠旅内购客服2': 'wxid_08ey7r0i9dvz12',
'中惠旅内购客服3': 'wxid_bqi9qznshsmy12',
'中惠旅内购客服4': 'wxid_1yya6xpk3yre22',
'中惠旅内购客服6': 'wxid_y6loz86fbkxo22',
'中惠旅内购客服7': 'wxid_708w2ttyaz412',
'趣哥': 'wxid_39iil8tclrdb22',
}
crm_list = []
def start_requests(self):
"""
登录
scrapy默认get请求,所以重写初始方法
:return:
"""
self.get_all_crm()
if not self.crm_list:
return
url = self.start_urls[0]
post_data = parse.urlencode({
'captcha': '',
'email': '<EMAIL>',
'from': 'https://wp.wxb.com/',
'password': '<PASSWORD>ei',
'remember': 'on'
})
headers = {
'Accept': 'application/json, text/plain, */*',
'Sec-Fetch-Dest': 'empty',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.132 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://account.wxb.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://account.wxb.com/page/login?from=https%3A%2F%2Fwp.wxb.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
yield scrapy.FormRequest(url=url, body=post_data, method='POST', headers=headers)
def parse(self, response):
"""
获取用户群组
:param response: HtmlResponse
:return:
"""
response_str = response.body.decode('utf-8')
json_data = json.loads(response_str)
if json_data['errcode'] == 0:
request_cookie = self.detail_cookie(response)
headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'wp-api.wxb.com',
'Origin': 'https://wp.wxb.com',
'Referer': 'https://wp.wxb.com/cloud',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.132 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Cookie': request_cookie
}
for name in self.wx_list:
yield scrapy.Request(url='https://wp-api.wxb.com/chat/listMember?wxid=' + self.wx_list[name] +
'&member_type=2', headers=headers, method='GET',
callback=self.parse_wx, dont_filter=True, meta={'account': self.wx_list[name]})
@staticmethod
def detail_cookie(response: HtmlResponse):
"""
将cookie转换为字符串方便放入header
:param response:
:return: string
"""
request_cookie = ''
cookie_list = response.headers.getlist('Set-Cookie')
for cookie in cookie_list:
request_cookie += bytes.decode(cookie) + '; '
return request_cookie
@staticmethod
def parse_wx(response):
"""
处理群组消息
:param response:
:return:
"""
response_str = response.body.decode('utf-8')
list_member = json.loads(response_str)
num_list = {}
if list_member['errcode'] == 0:
account = response.meta['account']
for chat_info in list_member['data']:
match = re.search(r'.*?(\d{4,10})', chat_info['nickname'])
if match:
wetool = TWetool.objects(chat_room_id=chat_info['wxid']).order_by('-update_at').first()
association = TAssociation.objects(team_group_id=match.group(1)).first()
cal_member_count = original_member_count = member_count = int(chat_info['member_count'])
if wetool:
if 500 > wetool.chat_room_member_count > 0 and (member_count > 500 or member_count <= 0):
cal_member_count = member_count = wetool.chat_room_member_count
if member_count > 500 or member_count < 0:
cal_member_count = 0
distributor_id = channel_id = "0"
cd = CDistributor.objects(team_group_id=match.group(1)).first()
if cd is not None:
channel_id = cd.channel_id
distributor_id = cd.distributor_id
if association:
if match.group(1) in num_list:
num_list[match.group(1)] = [num_list[match.group(1)][0] + cal_member_count,
num_list[match.group(1)][1] + 1]
else:
num_list[match.group(1)] = [cal_member_count, 1]
association.chat_room_id = chat_info['wxid']
association.chat_room_member_count = num_list[match.group(1)][0]
association.chat_room_nickname = chat_info['nickname']
association.chat_room_owner_wxid = chat_info['owner_wxid']
association.char_room_sum = num_list[match.group(1)][1]
association.chat_room_avatar = 'http' + chat_info['avatar']
association.update_at = time.strftime("%Y-%m-%d", time.localtime())
association.channel_id = channel_id
yield association
if not wetool:
wetool = TWetool()
wetool.chat_room_id = chat_info['wxid']
wetool.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
wetool.create_date = time.strftime("%Y-%m-%d", time.localtime())
wetool.chat_room_member_count = original_member_count
wetool.account = account
else:
if time.strftime("%Y-%m-%d", time.localtime()) != wetool.create_date:
wetool = TWetool()
wetool.chat_room_id = chat_info['wxid']
wetool.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
wetool.create_date = time.strftime("%Y-%m-%d", time.localtime())
wetool.account = account
wetool.chat_room_member_count = member_count
wetool.team_group_id = match.group(1)
wetool.chat_room_nickname = chat_info['nickname']
wetool.chat_room_owner_wxid = chat_info['owner_wxid']
wetool.chat_room_avatar = 'http:' + chat_info['avatar']
wetool.channel_id = channel_id
wetool.distributor_id = distributor_id
wetool.update_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield wetool
def get_all_crm(self):
"""
获取所有crm团长群编码
:return:
"""
association = TAssociation.objects().all()
for ass in association:
self.crm_list.append(ass.team_group_id)
class WeToolDailyWechatSpider(scrapy.Spider):
name = "wetool_daily_wechat"
allowed_domains = ['wp.wxb.com', 'account.wxb.com']
start_urls = ['http://account.wxb.com/index2/login']
crm_list = []
def start_requests(self):
"""
登录
scrapy默认get请求,所以重写初始方法
:return:
"""
url = self.start_urls[0]
post_data = parse.urlencode({
'captcha': '',
'email': '<EMAIL>88<EMAIL>',
'from': 'https://wp.wxb.com/',
'password': '<PASSWORD>',
'remember': 'on'
})
headers = {
'Accept': 'application/json, text/plain, */*',
'Sec-Fetch-Dest': 'empty',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.132 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://account.wxb.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://account.wxb.com/page/login?from=https%3A%2F%2Fwp.wxb.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9'
}
yield scrapy.FormRequest(url=url, body=post_data, method='POST', headers=headers)
def parse(self, response):
"""
获取用户群组
:param response: HtmlResponse
:return:
"""
start_time = self.get_time('startTime')
end_time = self.get_time('endTime')
response_str = response.body.decode('utf-8')
json_data = json.loads(response_str)
if json_data['errcode'] == 0:
request_cookie = self.detail_cookie(response)
headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'wp-api.wxb.com',
'Origin': 'https://wp.wxb.com',
'Referer': 'https://wp.wxb.com/report',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.132 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Cookie': request_cookie
}
yield scrapy.Request(url='https://wp-api.wxb.com/stat/dailyWechat?s_date=' + start_time + '&e_date=' + end_time,
headers=headers, method='GET',
callback=self.daily_wechat, dont_filter=True)
def daily_wechat(self, response: HtmlResponse):
json_data = json.loads(response.body.decode('utf-8'))
if json_data['errcode'] == 0:
list_data = json_data['list']
today_data = json_data['today']
data = [list_data[0], today_data]
for item in data:
wetool_daily_report = TWetoolDailyWechat.objects(
create_date=item['date_key']).first()
if not wetool_daily_report:
wetool_daily_report = TWetoolDailyWechat()
wetool_daily_report.create_date = item['date_key']
wetool_daily_report.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
wetool_daily_report.group_send_msg_num = str(item['group_send_msg_num'])
wetool_daily_report.single_send_msg_num = str(item['single_send_msg_num'])
wetool_daily_report.update_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield wetool_daily_report
@staticmethod
def detail_cookie(response: HtmlResponse):
"""
将cookie转换为字符串方便放入header
:param response:
:return: string
"""
request_cookie = ''
cookie_list = response.headers.getlist('Set-Cookie')
for cookie in cookie_list:
request_cookie += bytes.decode(cookie) + '; '
return request_cookie
@staticmethod
def get_time(tip='startTime'):
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
yesterday = today - oneday
if tip == 'startTime':
# return datetime.datetime.now().strftime('%Y-%m-%d 00:00:00')
return yesterday.strftime('%Y-%m-%d')
else:
return datetime.datetime.now().strftime('%Y-%m-%d')
```
#### File: spiders/spiders/mafengwo.py
```python
import json
import time
from collections import namedtuple
import scrapy
from scrapy import Request, Selector
from scrapy.http import HtmlResponse
from spiders.common import OTA
from spiders.items.price import price
from spiders.items.spot import spot
from spiders.items.spot.spot import Spot
"""
马蜂窝 todo:暂时不要爬
"""
class MafengwoSpider(scrapy.Spider):
name = 'mafengwo'
allowed_domains = ['www.mafengwo.cn']
start_urls = ['https://www.mafengwo.cn/poi/339.html']
ota_spot_ids = OTA.OtaSpotIdMap.get_ota_spot_list(OTA.OtaCode.MAFENGWO) # ota 景区id列表
base_score_url = r'https://m.mafengwo.cn/poi/comment_{spot_ota_id}.html'
@classmethod
def build_headers(cls, referer: str) -> dict:
return {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0',
'Accept': '*/*',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Connection': 'keep-alive',
'Referer': referer
}
def parse(self, response: HtmlResponse):
pass
"""
马蜂窝景区数据
"""
# todo 景区用城市景区的
# class MafengwoSpotSpider(scrapy.Spider):
# name = 'mafengwo_spot'
# allowed_domains = ['www.mafengwo.cn']
# base_url = r'https://www.mafengwo.cn/poi/{ota_spot_id}.html'
# start_urls = ['https://www.mafengwo.cn/poi/339.html']
#
# def parse(self, response: HtmlResponse):
# for ota_spot_id in MafengwoSpider.ota_spot_ids:
# start_page = 1
# url = self.base_url.format(ota_spot_id=ota_spot_id)
# referer = 'https://www.mafengwo.cn/poi/339.html' # 这里随便马蜂窝任何url
# yield Request(url=url, headers=MafengwoSpider.build_headers(referer), cookies={},
# callback=self.parse_item,
# dont_filter=True, meta={'page': start_page, 'ota_spot_id': ota_spot_id})
#
# def parse_item(self, response: HtmlResponse):
# spot_data = spot.Spot.objects(ota_id=OTA.OtaCode.MAFENGWO.value.id,
# ota_spot_id=response.meta['ota_spot_id']).first()
# # 不存在数据则新增数据
# if not spot_data:
# spot_data = Spot()
# spot_data.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#
# # spot_data.spot_id = OTA.OtaSpotIdMap.get_ota_spot_id(OTA.OtaSpotIdMap.SHI_YAN_HU.name, OTA.OtaCode.HUIQULX) 这里没什么用,到时候给及其学习来做匹配
# spot_data.ota_spot_id = response.meta['ota_spot_id']
#
# spot_data.ota_id = OTA.OtaCode.MAFENGWO.value.id
# spot_data.spot_name = response.xpath('/html/body/div[2]/div[2]/div/div[3]/h1/text()').extract_first()
# desc = response.xpath('/html/body/div[2]/div[3]/div[2]/div[1]/text()').extract_first()
# spot_data.desc = desc.strip() if desc else ''
# spot_data.tel = response.xpath('/html/body/div[2]/div[3]/div[2]/ul/li[1]/div[2]/text()').extract_first()
# spot_data.traffic = response.xpath('/html/body/div[2]/div[3]/div[2]/dl[1]/dd/div[1]/text()').extract_first()
# spot_data.ticket_num = 1
# spot_data.open_time = response.xpath('/html/body/div[2]/div[3]/div[2]/dl[3]/dd/text()').extract_first()
# spot_data.update_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# #spot_data.comment_num = \
# # response.xpath('//*[@id="poi-navbar"]/ul/li[3]/a/span/text()').extract_first().split('条')[0].strip('(')
#
# yield spot_data
"""
爬取马蜂窝评论
注意:马蜂窝评论最多只显示5页(每页15条)共75条,不支持时间排序,无法做增量爬取,建议做全爬取
"""
class MafengwoCommentSpider(scrapy.Spider):
spot_page = namedtuple('spot_page', 'page ota_spot_id ')
name = 'mafengwo_comment'
allowed_domains = ['www.mafengwo.cn']
time = int(time.time() * 1000)
base_url = r'http://pagelet.mafengwo.cn/poi/pagelet/poiCommentListApi?callback=jQuery181022435556804711854_{time}&¶ms=%7B%22poi_id%22%3A%22{spot_id}%22%2C%22page%22%3A{page}%2C%22just_comment%22%3A1%7D&_ts=1565663067492&_sn=a23eb0cba2&_=1565663067493'
start_urls = ['https://www.mafengwo.cn/poi/339.html']
base_referer = r'https://www.mafengwo.cn/poi/{ota_spot_id}.html'
cookies = {}
def start_requests(self):
# 再次请求到详情页,并且声明回调函数callback,dont_filter=True 不进行域名过滤,meta给回调函数传递数据
referer = 'https://www.mafengwo.cn/poi/339.html'
yield Request(url=self.start_urls[0], headers=MafengwoSpider.build_headers(referer), cookies=self.cookies,
callback=self.parse,
dont_filter=True)
def parse(self, response: HtmlResponse):
# 爬取下一个景区的数据
for ota_spot_id in MafengwoSpider.ota_spot_ids:
# 更新景区评分
url = MafengwoSpider.base_score_url.format(spot_ota_id=ota_spot_id)
yield Request(url=url, callback=self.set_spot_score, dont_filter=True,
meta={'ota_spot_id': ota_spot_id})
# 更新景区的评论数量
url = self.base_referer.format(ota_spot_id=str(ota_spot_id))
yield Request(url=url, callback=self.parse_count, dont_filter=True,
meta={'offset': 0, 'ota_spot_id': ota_spot_id})
start_page = 1
url = self.base_url.format(time=self.time, spot_id=ota_spot_id, page=start_page)
referer = self.base_referer.format(ota_spot_id=ota_spot_id)
yield Request(url=url, headers=MafengwoSpider.build_headers(referer), cookies=self.cookies,
callback=self.parse_page,
dont_filter=True, meta={'page': start_page, 'ota_spot_id': ota_spot_id})
def parse_page(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
response_str = response_str.split('(', 1)[1].rstrip(');')
json_data = json.loads(response_str)
# comment_count = json_data['data']['controller_data']['comment_count']
selector = Selector(text=json_data['data']['html'])
items = selector.css('.rev-list > ul li.comment-item')
for item in items:
c_id = item.css('.useful::attr(data-id)').extract_first() # 评论id
spot_comment = spot.SpotComment.objects(ota_id=OTA.OtaCode.MAFENGWO.value.id,
ota_spot_id=response.meta['ota_spot_id'],
c_id=c_id).first()
# 存在不需要新增
if spot_comment:
continue
spot_comment = spot.SpotComment()
spot_comment.c_id = c_id # 评论id
spot_comment.ota_id = OTA.OtaCode.MAFENGWO.value.id
spot_comment.ota_spot_id = response.meta['ota_spot_id']
spot_comment.u_url = item.css('.avatar::attr(href)').extract_first()
spot_comment.u_id = int(spot_comment.u_url.lstrip('/u/').rstrip('.html'))
spot_comment.u_avatar = item.css('.avatar img::attr(src)').extract_first()
spot_comment.u_level = item.css('.level::text').extract_first()
spot_comment.u_name = item.css('.name::text').extract_first()
score = item.css('.s-star::attr(class)').extract_first()
spot_comment.c_score = float(score.split()[1][-1])
spot_comment.c_useful_num = item.css('.useful-num::text').extract_first()
spot_comment.c_content = item.css('.rev-txt::text').extract_first()
spot_comment.c_img = item.css('.rev-img img::attr(src)').extract()
spot_comment.c_from = item.css('.from a::text').extract_first()
spot_comment.c_from = item.css('.from a::text').extract_first()
spot_comment.create_at = item.css('.time::text').extract_first()
# print('=====================', response.meta['ota_spot_id'])
yield spot_comment
# 当前景区分页爬取
page_num = selector.css('.count span:nth-child(1)::text').extract_first()
page = response.meta['page']
ota_spot_id = response.meta['ota_spot_id']
if page_num and page < int(page_num):
page += 1
url = self.base_url.format(time=self.time, spot_id=ota_spot_id, page=page)
referer = self.base_referer.format(ota_spot_id=ota_spot_id)
yield Request(url=url, headers=MafengwoSpider.build_headers(referer), cookies=self.cookies,
callback=self.parse_page, dont_filter=True,
meta={'page': page, 'ota_spot_id': ota_spot_id})
# 更新景区评论数量
def parse_count(self, response: HtmlResponse):
comment_count = response.xpath('//*[@id="poi-navbar"]/ul/li[3]/a/span/text()').extract_first().split('条')[
0].strip('(')
spot.Spot.objects(ota_id=OTA.OtaCode.MAFENGWO.value.id,
ota_spot_id=response.meta['ota_spot_id']).update(
set__comment_num=comment_count)
# 更新景区评分
def set_spot_score(self, response: HtmlResponse):
score = response.xpath('/html/body/div[2]/section[1]/div[1]/div[1]/div[1]/strong/text()').extract_first()
spot_data = spot.Spot.objects(ota_id=OTA.OtaCode.MAFENGWO.value.id,
ota_spot_id=response.meta['ota_spot_id']).first()
spot_data.spot_score = float(score) if score else 0
yield spot_data
class MafengwoCitySpot(scrapy.Spider):
name = 'Mafengwo_price'
allowed_domains = ['www.mafengwo.cn']
base_url = r'http://www.mafengwo.cn/sales/{ota_spot_id}.html'
start_urls = ['http://www.mafengwo.cn/sales/2272257.html']
base_referer = r'http://www.mafengwo.cn/sales/{ota_spot_id}.html'
spot_ota_list = [2272257, 7431278, 2287661, 2387328, 2672069, 6379176]
cookies = {}
def start_requests(self):
# 再次请求到详情页,并且声明回调函数callback,dont_filter=True 不进行域名过滤,meta给回调函数传递数据
referer = 'https://www.mafengwo.cn/poi/339.html'
yield Request(url=self.start_urls[0], headers=MafengwoSpider.build_headers(referer), cookies=self.cookies,
callback=self.parse,
dont_filter=True)
def parse(self, response: HtmlResponse):
for ota_spot_id in self.spot_ota_list:
url = self.base_url.format(ota_spot_id=ota_spot_id)
yield Request(url=url, dont_filter=True, callback=self.parse_spot, meta={'ota_spot_id': ota_spot_id})
def parse_spot(self, response: HtmlResponse):
# print(response.body.decode('utf-8'))
ota_spot_id = response.meta['ota_spot_id']
# spot_city = spot.SpotCity.objects(ota_id=OTA.OtaCode.MAFENGWO.value.id, ota_spot_id=ota_spot_id).first()
# if not spot_city:
# spot_city = spot.SpotCity()
# spot_city.ota_id = OTA.OtaCode.MAFENGWO.value.id
# spot_city.ota_spot_id = ota_spot_id
# spot_city.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#
# spot_city.s_name = response.css('.sales-title > h1::text').extract_first()
# spot_city.city_name = response.css(
# 'div.container > div.wrapper > div.crumb > div:nth-child(2) > a::text').extract_first()
o_price = price.OPrice.objects(ota_spot_id=ota_spot_id, ota_id=OTA.OtaCode.MAFENGWO.value.id).first()
# print(response.css('div.sales-title > h1::text').extract_first(),'%%%'*20)
# 不存在数据则新增数据
if not o_price:
o_price = price.OPrice()
o_price.ota_id = OTA.OtaCode.MAFENGWO.value.id
o_price.ota_spot_id = ota_spot_id
o_price.ota_spot_name = response.css('div.sales-title > h1::text').extract_first()
o_price.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
o_price.update_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
ota_product = []
ticket_dom = response.css('.ticket-info > tbody')
if response.css('li.item-sold::text').extract_first().strip() is None:
sale_num = 0
else:
sale_num = response.css('li.item-sold::text').extract_first().strip()[2:]
for item in ticket_dom:
product_item = {}
type_key = item.css('td.ticket-type.adult-ticket.folded::text').extract_first()
tr_list = item.css('.ticket-item')
for tr in tr_list:
product_item['type_id'] = tr.css('.tobuy-btn > span::attr(data-id)').extract_first()
product_item['type_key'] = type_key
product_item['type_name'] = item.css('td.ticket-name::text').extract_first()
product_item['normal_price'] = item.css('td.ticket-price::text').extract_first().strip('¥起')
product_item['tickets'][0] = {'price_id': product_item['type_id'], 'title': product_item['type_name'],
'seller_nick': product_item['type_name'],
'price': product_item['normal_price'], 'cash_back': 0, 'cut_price': 0,
'sale_num': sale_num,
'url': self.base_url.format(ota_spot_id=response.meta['ota_spot_id'])}
ota_product.append(product_item)
price_calendar = price.OPriceCalendar()
price_calendar.ota_id = OTA.OtaCode.MEITUAN.value.id
price_calendar.ota_spot_id = response.meta['ota_spot_id']
price_calendar.type_key = type_key
price_calendar.type_id = tr.css('.tobuy-btn > span::attr(data-id)').extract_first()
price_calendar.type_name = item.css('td.ticket-name::text').extract_first()
price_calendar.pre_price = product_item['normal_price']
price_calendar.ota_spot_name = response.css('div.sales-title > h1::text').extract_first()
price_calendar.create_at = time.strftime("%Y-%m-%d", time.localtime())
yield price_calendar
print('正在添加 ', response.css('div.sales-title > h1::text').extract_first(), ' 价格日历', "*" * 20)
o_price.ota_product = ota_product
print('正在添加 ', response.css('div.sales-title > h1::text').extract_first(),
' 的票型详情.OTA_id', OTA.OtaCode.MAFENGWO.value.id, "*" * 20)
yield o_price
```
#### File: spiders/markting/jinritoutiao.py
```python
import datetime
import json
import math
import re
import scrapy
from scrapy import Request
from scrapy.http import HtmlResponse
from spiders.common import helper
from spiders.common.marketing import WeMedia, WeMediaType
from spiders.items import marketing
from spiders.items.marketing import Article
from spiders.items.spot import spot
class JinritoutiaoSpider(scrapy.Spider):
name = 'toutiao'
allowed_domains = ['www.toutiao.com/']
start_urls = ['https://mp.toutiao.com']
base_url = 'https://www.toutiao.com/'
we_media_id = WeMedia.TOU_TIAO.value.id
we_media_type = WeMediaType.WE_MEDIA.value.id
cookie_list = {}
def parse(self, response):
result = helper.get_media_account(self)
for detail in result:
[account, cookie_list] = detail
user_url = 'https://mp.toutiao.com/user_login_status_api/'
yield Request(url=user_url, callback=self.parse_user, dont_filter=True, cookies=cookie_list,
meta={'account': account})
def parse_user(self, response: HtmlResponse):
account = response.meta['account']
response_str = response.body.decode('utf-8')
user_detail = json.loads(response_str)['reason']['media']['media_info']
account.account_id = user_detail['user_id']
start = end = helper.get_yesterday()
url = r'https://mp.toutiao.com/mp/agw/statistic/content/content_overview?start_date={start}&end_date={end}' \
.format(start=start, end=end)
yield Request(url=url, callback=self.parse_data, dont_filter=True, cookies=self.cookie_list,
meta={'account': account})
def parse_data(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
content_daily_detail = json.loads(json.loads(response_str)['data']['total_stat'])
account = response.meta['account']
# 曝光量
exposure_num = account.exposure_num if account.exposure_num is not None else 0
account.exposure_num = exposure_num + content_daily_detail['impression_count'] + content_daily_detail[
'go_detail_count']
# 阅读量
read_num = account.read_num if account.read_num is not None else 0
account.read_num = read_num + content_daily_detail['go_detail_count']
# 推荐量
recommend_num = account.recommend_num if account.recommend_num is not None else 0
account.recommend_num = recommend_num + content_daily_detail['impression_count']
# 转发
forward_num = account.forward_num if account.forward_num is not None else 0
account.forward_num = forward_num + content_daily_detail['share_count']
# 发布量
publish_num = account.publish_num if account.publish_num is not None else 0
account.publish_num = publish_num + content_daily_detail['publish_num']
# 评论量
comment_count = account.comment_num if account.comment_num is not None else 0
account.comment_num = comment_count + content_daily_detail['comment_count']
marketing_daily_report = self.handle_marketing_daily_report(account, content_daily_detail)
yield Request(url='https://mp.toutiao.com/statistic/profile_stat/', callback=self.parse_fans_profile,
dont_filter=True, cookies=self.cookie_list,
meta={'account': account, 'marketing_daily_report': marketing_daily_report})
def handle_marketing_daily_report(self, account: marketing.Account, content_daily_detail):
"""
处理文章日报表文章数据
:param account: marketing.Account 账号信息
:param content_daily_detail: dict 文章基础数据
:return:
"""
marketing_daily_report = marketing.MarketingDailyReport()
marketing_daily_report.type = self.we_media_type
marketing_daily_report.platform = self.we_media_id
marketing_daily_report.account_id = account.id
marketing_daily_report.account_name = account.account_name
marketing_daily_report.admin_id = account.admin_id
# 曝光量
marketing_daily_report.exposure_num = content_daily_detail['impression_count'] + content_daily_detail[
'go_detail_count']
# 阅读量
marketing_daily_report.read_num = account.read_num
marketing_daily_report.day_read_num = content_daily_detail['go_detail_count']
# 推荐量
marketing_daily_report.recommend_num = content_daily_detail['impression_count']
# 转发
marketing_daily_report.forward_num = content_daily_detail['share_count']
# 发布量
marketing_daily_report.publish_num = account.publish_num
marketing_daily_report.day_publish_num = content_daily_detail['publish_num']
# 评论量
account.comment_count = content_daily_detail['comment_count']
marketing_daily_report.day_time = datetime.datetime.now().strftime('%Y-%m-%d')
return marketing_daily_report
def parse_fans_profile(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
profile_detail = json.loads(response_str)['data']
account = response.meta['account']
marketing_daily_report = response.meta['marketing_daily_report']
# 总粉丝数
marketing_daily_report.follow_num = account.follow_num = profile_detail['total_subscribe_count']
# 总收入
account.total_income = profile_detail['total_income']
# 总提现
account.drawing = profile_detail['total_withdraw']
# 总余额(实时)
account.balance = profile_detail['actual_income']
yield Request(url='https://mp.toutiao.com/mp/agw/statistic/fans/property', callback=self.parse_property,
dont_filter=True, cookies=self.cookie_list,
meta={'account': account, 'marketing_daily_report': marketing_daily_report})
def parse_property(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
account = response.meta['account']
marketing_daily_report = response.meta['marketing_daily_report']
fans_property_data = json.loads(response_str)['fans_property_data']
fans_age_total = 0
for count in fans_property_data['fansage'].values():
fans_age_total += count
marketing_daily_report.age_proportion = account.age_proportion = {
'<24': round(fans_property_data['fansage']['18-23'] / fans_age_total, 4) * 100,
'25-39': round((fans_property_data['fansage']['24-30'] + fans_property_data['fansage']['31-40']) /
fans_age_total, 4) * 100,
'>40': round((fans_property_data['fansage']['41-50'] + fans_property_data['fansage']['50-']) /
fans_age_total, 4) * 100,
'unknown': 0.00 * 100
}
fans_gender_total = fans_property_data['fansgender']['female'] + fans_property_data['fansgender']['male']
marketing_daily_report.sex_proportion = account.sex_proportion = {
'man': round(fans_property_data['fansgender']['male'] / fans_gender_total, 4) * 100,
'women': round(fans_property_data['fansgender']['female'] / fans_gender_total, 4) * 100,
'unknown': 0.00 * 100
}
account.update_at = datetime.datetime.now().strftime('%Y-%m-%d')
yield account
fans_url = r'https://mp.toutiao.com/mp/agw/statistic/fans/count_trend/?start_date={start}&end_date={end}'
start = end = helper.get_yesterday()
yield Request(url=fans_url.format(start=start, end=end), callback=self.daily_fans,
dont_filter=True, cookies=self.cookie_list,
meta={'marketing_daily_report': response.meta['marketing_daily_report']})
def daily_fans(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
marketing_daily_report = response.meta['marketing_daily_report']
fans_count_trend = json.loads(response_str)['fans_count_trend']['data_list'][0]
# 取消关注数
marketing_daily_report.day_unfollow_num = fans_count_trend['new_dislike_count']
# 净增关注
marketing_daily_report.day_add_follow_num = fans_count_trend['new_growth_count']
# 当日关注
marketing_daily_report.day_follow_num = fans_count_trend['new_like_count']
yield Request(url='https://mp.toutiao.com/pgc/mp/income/withdraw_info?page_no=1&page_size=10',
callback=self.daily_income, dont_filter=True, cookies=self.cookie_list,
meta={'marketing_daily_report': response.meta['marketing_daily_report']})
@staticmethod
def daily_income(response: HtmlResponse):
response_str = response.body.decode('utf-8')
income_statement = json.loads(response_str)['data']['overview']
marketing_daily_report = response.meta['marketing_daily_report']
marketing_daily_report.income = income_statement[0]['amount']
marketing_daily_report.drawing = income_statement[2]['amount']
marketing_daily_report.balance = income_statement[1]['can_withdraw']
marketing_daily_report.create_at = datetime.datetime.now().strftime('%Y-%m-%d')
marketing_daily_report.update_at = datetime.datetime.now().strftime('%Y-%m-%d')
yield marketing_daily_report
class JinritoutiaoArticleSpider(scrapy.Spider):
name = 'toutiao_article'
start_urls = [
'https://mp.toutiao.com/mp/agw/article/list?size=20&status=all&from_time=0&start_time=0&end_time=0'
'&search_word=&page=1&feature=0&source=all']
allowed_domains = ['www.toutiao.com/']
base_url = 'https://www.toutiao.com/'
cookie_list = {}
page_size = 20
page_url = r'https://mp.toutiao.com/mp/agw/article/list?size=20&status=all&from_time=0&start_time=0&end_time=0' \
r'&search_word=&page={page}&feature=0&source=all'
content_url = r'https://www.toutiao.com/i{article_id}/'
account = marketing.Account()
spot_list = []
we_media_id = WeMedia.TOU_TIAO.value.id
we_media_type = WeMediaType.WE_MEDIA.value.id
def parse(self, response: HtmlResponse):
[self.account, self.cookie_list] = helper.get_media_account(self)
yield Request(url=self.start_urls[0], callback=self.parse_article, dont_filter=True, cookies=self.cookie_list,
meta={'page': 1})
def parse_article(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
article_data = json.loads(response_str)['data']
article_list = article_data['content']
page = article_data['page'] + 1
for article_detail in article_list:
yield self.handle_article(article_detail)
if page <= math.ceil(article_data['total'] / self.page_size):
yield Request(url=self.page_url.format(page=page), callback=self.parse_article,
dont_filter=True, cookies=self.cookie_list, meta={'page': page})
def handle_article(self, article_detail):
actual_article = article = Article.objects(platform_type=self.account.type, platform=self.account.platform
, article_id=article_detail['id']).first()
article = Article() if article is None else article
article.exposure_num = article_detail['impression_count'] + article_detail['go_detail_count']
article.recommend_num = article_detail['impression_count']
article.read_num = article_detail['go_detail_count']
article.forward_num = article_detail['share_count']
article.like_num = 0
article.comment_num = article_detail['comment_count']
if actual_article is None:
article.platform_type = self.account.type
article.platform = self.account.platform
article.article_id = article_detail['id']
article.title = article_detail['title']
article.account_id = self.account.id
article.create_at = datetime.datetime.now().strftime('%Y-%m-%d')
article.admin_id = self.account.admin_id
# article.admin_name = self.account.admin_name
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/79.0.3945.88 Safari/537.36',
'referer': self.content_url.format(article_id=article.article_id)
}
return Request(url=self.content_url.format(article_id=article.article_id), callback=self.parse_content
, dont_filter=True, cookies=self.cookie_list, headers=headers, meta={'article': article})
else:
article.update_at = datetime.datetime.now().strftime('%Y-%m-%d')
return article
def parse_content(self, response: HtmlResponse):
article = response.meta['article']
response_str = response.body.decode('utf-8')
content = re.search(r'content: \'"(.*)"\'', response_str)
if content is None:
content = re.search(r'gallery: JSON.parse\(\"(.*)\"\)', response_str)
content = content.group(1).encode('latin-1').decode('unicode-escape')
article.content = content
article.keyword_list = article.spot_id_list = []
for spot_keywords in self.get_spot_list():
if spot_keywords['abbreviation'] in str(content):
article.keyword_list.append(spot_keywords['abbreviation'])
article.spot_id_list.append(spot_keywords['spot_id'])
article.update_at = datetime.datetime.now().strftime('%Y-%m-%d')
return article
def get_spot_list(self):
if not len(self.spot_list):
spo_list = spot.CSpot.objects(self_employed=True).fields(spot_id=1, abbreviation=1)
for spot_detail in spo_list:
self.spot_list.append({'spot_id': spot_detail.spot_id, 'abbreviation': spot_detail.abbreviation})
return self.spot_list
```
#### File: spiders/markting/sougou.py
```python
import datetime
import json
import scrapy
from scrapy import Request
from scrapy.http import HtmlResponse
from spiders.common import helper
from spiders.common.marketing import WeMedia, WeMediaType
class SougouSpider(scrapy.Spider):
name = 'sougou'
allowed_domains = ['mp.sogou.com']
start_urls = ['http://mp.sogou.com/']
base_url = 'https://www.toutiao.com/'
we_media_id = WeMedia.SOGOU.value.id
we_media_type = WeMediaType.WE_MEDIA.value.id
cookie_list = {}
def parse(self, response):
result = helper.get_media_account(self)
for detail in result:
[account, cookie_list] = detail
start = end = helper.get_yesterday()
article_url = r'http://mp.sogou.com/api/statistics/arti-analysis/sum?startDate={start}&endDate={end}'
yield Request(url=article_url.format(start=start, end=end), callback=self.article_analysis,
dont_filter=True, cookies=cookie_list, meta={'account': account})
def article_analysis(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
total_info = json.loads(response_str)
account = response.meta['account']
account.exposure_num = (0 if account.exposure_num is None else account.exposure_num) + \
(total_info['recommendedNum'] + total_info['readingNum'])
account.recommend_num = (0 if account.recommend_num is None else account.recommend_num) + \
(total_info['recommendedNum'])
account.read_num = (0 if account.read_num is None else account.read_num) + total_info['readingNum']
account.forward_num = (0 if account.forward_num is None else account.forward_num) + total_info['sharedNum']
account.like_num = 0
account.comment_num = (0 if account.comment_num is None else account.comment_num) + total_info['commentsNum']
account.publish_num = (0 if account.publish_num is None else account.publish_num) + total_info['articleNum']
account.sex_proportion = {
'man': 0,
'women': 0,
'unknown': 100
}
account.age_proportion = {
'<24': 0,
'25-39': 0,
'>40': 0,
'unknown': 100
}
fans_analysis_url = r'http://mp.sogou.com/api/statistics/fans-analysis/{day}'
yield Request(url=fans_analysis_url.format(day=helper.get_yesterday()), callback=self.fans_analysis,
dont_filter=True, cookies=self.cookie_list, meta={'account': account})
def fans_analysis(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
fans_info = json.loads(response_str)
account = response.meta['account']
account.follow_num = (0 if account.follow_num is None else account.follow_num) + fans_info['subscribe']
start = end = helper.get_yesterday(2)
yield Request(url='http://mp.sogou.com/api/income/withdraw/sum'.format(start=start, end=end),
callback=self.income_analysis, dont_filter=True, cookies=self.cookie_list,
meta={'account': account})
@staticmethod
def income_analysis(response: HtmlResponse):
response_str = response.body.decode('utf-8')
income_info = json.loads(response_str)
account = response.meta['account']
account.total_income = income_info['totalAmount']
account.drawing = income_info['paidAmount']
account.balance = income_info['withdrawableAmount']
account.account_home = 'http://mp.sogou.com/dashboard'
account.update_at = datetime.datetime.now().strftime('%Y-%m-%d')
account.create_at = datetime.datetime.now().strftime('%Y-%m-%d')
yield account
```
#### File: spiders/spiders/qunar.py
```python
import json
import math
import random
import time
import requests
import scrapy
from scrapy.http import HtmlResponse
from scrapy import Request
from spiders.common import OTA
from spiders.items.spot import spot
from spiders.items.price import price
class QunarSpider(scrapy.Spider):
# 标签 0 系统标签,1用户标签
sys_tags = 0
user_tags_true = 1
user_tags_false = 2
name = 'qunar'
allowed_domains = ['www.qunar.com']
start_urls = ['http://www.qunar.com/']
ota_spot_ids = OTA.OtaSpotIdMap.get_ota_spot_list(OTA.OtaCode.QUNAR) # ota 景区id列表
def parse(self, response):
pass
class QunarTagSpider(scrapy.Spider):
name = 'qunar_tag'
allowed_domains = ['www.qunar.com']
total_num = 0 # 总评论
page_size = 20 # 默认爬取每页100条
base_url = r'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize={page_size}&fromType=SIGHT&pageNum={page_num}&sightId={ota_spot_id}&tagType=44&tagName=%E6%9C%80%E6%96%B0'
start_urls = [
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=1&fromType=SIGHT&pageNum=1&sightId=706176810']
def parse(self, response: HtmlResponse):
# 爬取景区列表数据
for ota_spot_id in QunarSpider.ota_spot_ids:
# 更新景区的评论数量
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=1, page_size=1)
yield Request(url=url, callback=self.spot_tag, dont_filter=True,
meta={'page_num': 1, 'ota_spot_id': ota_spot_id})
"""获取景区用户点评标签"""
def spot_tag(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
comment = json.loads(response_str)
if 'data' in comment and 'tagList' in comment['data']:
spot_tag = []
for key, value in enumerate(comment['data']['tagList']):
# print(value['tagName'])
if value['tagType'] in [0, 1, 41, 43, 44]:
tag_type = QunarSpider.sys_tags # 属于系统标签
else:
tag_type = QunarSpider.user_tags_true # 属于用户标签
tag = {'tag_name': value['tagName'], 'tag_num': value['tagNum'], 'tag_score': value['tagScore'],
'tag_type': tag_type}
spot_tag.append(tag)
print(spot_tag, "#" * 20)
print('-' * 20, 'ota_id', OTA.OtaCode.QUNAR.value.id, 'ota_spot_id', response.meta['ota_spot_id'])
spot.Spot.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=response.meta['ota_spot_id']).update(
set__tag_list=spot_tag)
pass
class CommentSpider(scrapy.Spider):
name = 'qunar_comment'
allowed_domains = ['www.qunar.com']
total_num = 0 # 总评论
page_size = 10 # 默认爬取每页100条
base_url = r'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize={page_size}&fromType=SIGHT&pageNum={page_num}&sightId={ota_spot_id}&tagType=44&tagName=%E6%9C%80%E6%96%B0'
start_urls = [
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=1&fromType=SIGHT&pageNum=1&sightId=706176810']
def parse(self, response: HtmlResponse):
headers = {'content-type': 'application/json'}
# 爬取景区列表数据
for ota_spot_id in QunarSpider.ota_spot_ids:
# 更新景区的评论数量
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=1, page_size=10)
# headers = {'content-type': 'application/json'}
data = requests.get(url, headers=headers)
comment = data.json()
print(ota_spot_id, "共", comment['data']['total'], "条", "*" * 20)
page_size = 10
# 网页上总条数
total_page = comment['data']['total']
# 数据库总条数
now_total = spot.SpotComment.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=ota_spot_id).count()
# 准备保存的总条数
to_save_total = total_page - now_total
# 准备保存的总页数
total_page = math.ceil(to_save_total / page_size)
for page_num in range(1, total_page + 1):
if page_num == total_page:
page_size = to_save_total - (page_num - 1) * page_size
else:
page_size = page_size
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=page_num, page_size=page_size)
print("-" * 30)
print(url)
print("+" * 30)
# headers = {'content-type': 'application/json'}
data = requests.get(url, headers=headers)
try:
comment = data.json()
except Exception:
try:
data = requests.get(url, headers=headers)
comment = data.json()
except Exception:
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
print(ota_spot_id, " 第", page_num, "页: ", "共", page_size, "条 ", "*" * 20)
if 'data' in comment and 'commentList' in comment['data']:
for key, value in enumerate(comment['data']['commentList']):
print('正在添加 ', value['author'], ' 的评论', "*" * 20)
spot_comment = spot.SpotComment.objects(ota_id=10004).first()
spot_comment.ota_id = OTA.OtaCode.QUNAR.value.id
spot_comment.ota_spot_id = ota_spot_id
spot_comment.goods_name = value['sightName']
# spot_comment.u_avatar = value['headImg']
spot_comment.u_name = value['author']
spot_comment.c_tag = value['tagList']
spot_comment.c_id = value['commentId']
spot_comment.c_score = value['score']
spot_comment.c_content = value['content']
# spot_comment.c_img = value['imgs']
spot_comment.c_img = [item['small'] for item in value['imgs']]
spot_comment.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield spot_comment
'''
点评数据加tag
'''
class CommentAndTagSpider(scrapy.Spider):
name = 'comment_and_tag'
allowed_domains = ['touch.piao.qunar.com']
def start_requests(self):
for ota_spot_id in QunarSpider.ota_spot_ids:
print(ota_spot_id, 'ota' * 20)
yield scrapy.FormRequest(
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=10&fromType=SIGHT&pageNum=0&sightId=' + str(
ota_spot_id)
, method='GET'
, meta={'ota_spot_id': ota_spot_id}
, callback=self.after_login)
def after_login(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'tagList' in result['data']:
spot_tag = []
for key, value in enumerate(result['data']['tagList']):
if value['tagType'] in [0, 1, 41, 43, 44]:
tag_type = QunarSpider.sys_tags # 属于系统标签
else:
tag_type = QunarSpider.user_tags_true # 属于用户标签
tag = {'tag_name': value['tagName'], 'tag_num': value['tagNum'], 'tag_score': value['tagScore'],
'tag_type': tag_type}
spot_tag.append(tag)
print(spot_tag, "#" * 20)
print('-' * 20, 'ota_id', OTA.OtaCode.QUNAR.value.id, 'ota_spot_id', response.meta['ota_spot_id'])
spot.Spot.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=response.meta['ota_spot_id']).update_one(
set__tag_list=spot_tag, upsert=True)
if 'data' in result and 'total' in result['data']:
print('共', result['data']['total'], '条', '*' * 20)
for pag_num in range(1, math.ceil(result['data']['total'] / 10)):
# for pag_num in range(1, 5):
print('第', pag_num, '页', '+' * 20)
yield scrapy.FormRequest(
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=10&fromType=SIGHT&pageNum=' + str(
pag_num) + '&sightId=' + str(response.meta['ota_spot_id'])
, method='GET'
, meta={'page': pag_num, 'ota_spot_id': response.meta['ota_spot_id']}
, callback=self.each_page)
def each_page(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'commentList' in result['data']:
for key, value in enumerate(result['data']['commentList']):
print(value['author'], '第', response.meta['page'], '页', '+' * 20)
if 'headImg' in value:
headImg = value['headImg']
else:
headImg = ''
yield spot.SpotComment.objects(c_id=value['commentId']).update_one(
set__ota_id=OTA.OtaCode.QUNAR.value.id,
set__ota_spot_id=response.meta['ota_spot_id'],
set__goods_name=value['sightName'],
set__u_avatar=headImg,
set__u_name=value['author'],
set__c_tag=value['tagList'],
set__c_score=value['score'],
set__c_content=value['content'],
set__c_img=[item['small'] for item in value['imgs']],
set__create_at=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
upsert=True)
class PriceSpider(scrapy.Spider):
ota_map = [{'ota_spot_id': 706176810, 'sightId': '14407', 'sightName': '石燕湖'} # 石燕湖
, {'ota_spot_id': 1915618311, 'sightId': '187730', 'sightName': '石牛寨'} # 石牛寨
, {'ota_spot_id': 2877753081, 'sightId': '469141', 'sightName': '益阳嘉年华'} # 益阳嘉年华
, {'ota_spot_id': 2554926827, 'sightId': '470541', 'sightName': '花田溪谷'} # 花田溪谷
, {'ota_spot_id': 225118749, 'sightId': '461232', 'sightName': '东浒寨'} # 东浒寨
, {'ota_spot_id': 3821817759, 'sightId': '11829', 'sightName': '马仁奇峰'} # 马仁奇峰
, {'ota_spot_id': 420237024, 'sightId': '39499', 'sightName': '大茅山'} # 大茅山
, {'ota_spot_id': 4123349957, 'sightId': '35473', 'sightName': '九龙江'} # 九龙江
, {'ota_spot_id': 2333288470, 'sightId': '196586', 'sightName': '侠天下'} # 侠天下
, {'ota_spot_id': 3333064220, 'sightId': '461903', 'sightName': '三翁花园'} # 三翁花园
]
name = 'qunar_price'
allowed_domains = ['piao.qunar.com']
login_url = 'http://piao.qunar.com/ticket/detail/getTickets.json'
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def start_requests(self):
price.OPrice.objects(ota_id=10006).delete()
price.OPriceCalendar.objects(ota_id=10006, create_at=time.strftime("%Y-%m-%d", time.localtime())).delete()
print('start_request')
for value in self.ota_map:
# print(value['sightId'], "*" * 20)
yield scrapy.FormRequest(self.login_url
, formdata={'sightId': value['sightId'], 'from': 'detail'}
, meta={'ota_spot_id': value['ota_spot_id'], 'sight_name': value['sightName']}
, callback=self.after_login)
def after_login(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'groups' in result['data']:
for k1, v1 in enumerate(result['data']['groups']): # group数据 sightId
ota_product = []
for k2, v2 in enumerate(v1): # 票型数据
tickets = []
typeId = str(v2['typeId'])
ota_spot_name = response.meta['sight_name']
typeKey = ota_spot_name + v2['ticketZoneName']
ticketZoneName = v2['typeName']
total_count = v2['totalCount'] # 总共票数
total_price = 0 # 总共票数
normal_price = v2['qunarPrice']
if 'tickets' in v2:
print(v2['qunarPrice'])
for k3, v3 in enumerate(v2['tickets']):
tickets_list = {'price_id': str(v3['priceId'])
, 'title': v3['title']
, 'seller_nick': v3['supplierName']
, 'price': v3['qunarPrice']
, 'cash_back': v3['cashBack']
, 'cut_price': v3['cutPrice']
, 'sale_num': 0
, 'url': 'http://touch.piao.qunar.com/touch/detail_' + str(response.meta[
'ota_spot_id']) + '.html?st=a3clM0QlRTclOUYlQjMlRTclODclOTUlRTYlQjklOTYlMjZpZCUzRDE0NDA3JTI2dHlwZSUzRDAlMjZpZHglM0QxJTI2cXQlM0RuYW1lJTI2YXBrJTNEMiUyNnNjJTNEV1dXJTI2YWJ0cmFjZSUzRGJ3ZCU0MCVFNiU5QyVBQyVFNSU5QyVCMCUyNnVyJTNEJUU5JTk1JUJGJUU<KEY>TI2bHIlM0QlRTklOTUlQkYlRTYlQjIlOTklMjZmdCUzRCU3QiU3RA%3D%3D#from=mpl_search_suggest'
}
tickets.append(tickets_list)
total_price = total_price + v3['qunarPrice']
# print(v3['title']) # priceId qunarPrice cashBack cutPrice supplierId supplierName
ota_product_list = {'type_id': typeId, 'type_key': typeKey, 'type_name': ticketZoneName,
'normal_price': normal_price,
'tickets': tickets}
ota_product.append(ota_product_list)
pre_price = round(total_price / total_count, 2)
print(pre_price, "+" * 20)
# print(ota_product)
'''
价格日历保存
'''
price_calendar = price.OPriceCalendar()
price_calendar.ota_id = OTA.OtaCode.QUNAR.value.id
price_calendar.ota_spot_id = response.meta['ota_spot_id']
price_calendar.ota_spot_name = response.meta['sight_name']
price_calendar.pre_price = pre_price
price_calendar.type_id = typeId
price_calendar.type_key = typeKey
price_calendar.type_name = ticketZoneName
price_calendar.create_at = time.strftime("%Y-%m-%d", time.localtime())
o_price = price.OPrice()
o_price.ota_id = OTA.OtaCode.QUNAR.value.id
o_price.ota_spot_id = response.meta['ota_spot_id']
o_price.ota_spot_name = ota_spot_name
o_price.ota_product = ota_product # typeId typeName qunarPrice
price_calendar.save(force_insert=False, validate=False, clean=True)
yield o_price
```
#### File: spiders/test/PerformanceTest.py
```python
import json
import time
import scrapy
from scrapy import Request
from scrapy.http import HtmlResponse
from spiders.common import OTA
from spiders.items.price import price
from spiders.items.spot import spot
class OrderCreateSpider(scrapy.Spider):
name = 'performance-test-order-create'
allowed_domains = ['www.ctrip.com']
start_urls = ['https://www.ctrip.com/']
ota_spot_ids = OTA.OtaSpotIdMap.get_ota_spot_list(OTA.OtaCode.CTRIP) # ota 景区id列表
def parse(self, response: HtmlResponse):
pass
``` |
{
"source": "jiangxuewen16/py-micro",
"score": 2
} |
#### File: example/client/user.py
```python
from example.proto import user_pb2_grpc
from py_micro.py_grpc import MicroService
@MicroService.consume('user.user', user_pb2_grpc.UserStub)
def GetUser():
pass
```
#### File: py-micro/py_micro/py_grpc.py
```python
import time
from operator import methodcaller
from types import FunctionType
import grpc
from concurrent import futures
from py_micro.py_consul import ConsulMicroServer
class MicroService(object):
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
"""配置"""
START_SERVER = True
MAX_WORKERS = 4
HOST = '127.0.0.1'
PORT = 8111
APP_NAME = ''
REG_HOST = '127.0.0.1'
REG_PORT = 8500
_INSECURE_CHANNEL_LIST = [] # grpc 连接池
_SERVICE_LIST = []
_GRPC_SERVER = grpc.server(futures.ThreadPoolExecutor(MAX_WORKERS))
_CONSUL_SERVER = ConsulMicroServer(REG_HOST, REG_PORT)
"""注册服务->装饰器方法"""
@classmethod
def register(cls, servicer_func: FunctionType, service_name: str = None):
def my_decorator(func):
# 如果开启服务注册,则自动注册
if cls.START_SERVER:
if not isinstance(servicer_func, FunctionType):
raise Exception("微服务注册,必须是方法!")
ob = func()
# 添加需要注册的服务
servicer_func(ob, cls._GRPC_SERVER)
# 注册所有方法列表
tags = list(
filter(lambda m: not m.startswith("__") and not m.endswith("__") and callable(getattr(ob, m)),
dir(ob)))
cls._CONSUL_SERVER.reg_service(service_name, cls.HOST, cls.PORT, tags)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return my_decorator
"""服务消费逻辑"""
@classmethod
def consume(cls, service_id, stub, server_name: str = None):
def my_decorator(func):
def wrapper(*args, **kwargs):
service_addr = cls._CONSUL_SERVER.get_service(service_id)
host = service_addr[0]
port = service_addr[1]
tags = service_addr[2]
method = server_name or func.__name__
if method not in tags:
raise Exception('服务方法不存在')
conn = grpc.insecure_channel("{0}:{1}".format(host, port)) # todo:这个可以维护一个连接池
client = stub(channel=conn)
if args:
param = args[0]
elif kwargs:
param = list(kwargs.values())[0]
else:
raise Exception('参数不存在')
return methodcaller(method, param)(client) # 自调方法
return wrapper
return my_decorator
"""启动微服务"""
@classmethod
def start(cls):
cls._GRPC_SERVER.add_insecure_port("{0}:{1}".format(cls.HOST, cls.PORT))
cls._GRPC_SERVER.start()
try:
while True:
time.sleep(cls._ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
cls._GRPC_SERVER.stop(0)
# class GrpcConnPool(object):
# _POOL: dict = []
#
# @classmethod
# def pop(cls, host, port):
# addr = "{0}:{1}".format(host, port)
# if addr in cls._POOL:
#
# conn = grpc.insecure_channel("{0}:{1}".format(host, port)) # todo:这个可以维护一个连接池
# pass
#
# @classmethod
# def push(cls, host, port):
``` |
{
"source": "jiangyangby/DRDSC",
"score": 2
} |
#### File: jiangyangby/DRDSC/DRDSC-L2-COIL20.py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.io as sio
from models import RSCConvAE
from utils import thrC, post_proC, err_rate, get_ar, get_fpr, get_nmi, get_purity
def train(iteration, X, y, CAE, lr, alpha, max_step):
CAE.initlization()
CAE.restore() # restore from pre-trained model
# fine-tune network
# last_cost = 0
for epoch in range(max_step):
cost, Coef, z_diff, x_diff = CAE.partial_fit(X, lr)
cost = cost / X.shape[0]
if epoch % 5 == 0:
print("epoch: %d" % epoch, "cost: %.8f" % cost)
# last_cost = cost
# if cost < 10 and abs(cost - last_cost) < last_cost * 1e-5: # early stopping
# break
Coef = thrC(Coef, alpha)
d, a = 11, 10
y_pred, _ = post_proC(Coef, y.max(), d, a)
err, y_new = err_rate(y, y_pred)
ar = get_ar(y, y_pred)
nmi = get_nmi(y, y_pred)
f, p, r = get_fpr(y, y_pred)
purity = get_purity(y, y_pred)
print('metrics: %.2f%%, %.2f%%, %.2f%%, %.2f%%, %.2f%%, %.2f%%, %.2f%%' %
(err * 100, ar * 100, nmi * 100, f * 100, p * 100, r * 100, purity * 100))
return Coef
if __name__ == '__main__':
data = sio.loadmat('./data/COIL20.mat')
X = data['fea'].astype(float)
y = data['gnd']
X = np.reshape(X, (X.shape[0], 32, 32, 1))
y = np.squeeze(y)
n_input = [32, 32]
kernel_size = [3]
n_hidden = [15]
save_path = './models/model-COIL20.ckpt'
restore_path = './models/model-COIL20.ckpt'
logs_path = './logs/'
num_class = 20 # how many class we sample
num_sa = 72
batch_size = num_sa * num_class
z_dim = 3840
max_step = 34
alpha = 0.04
lr = 5.5e-4
reg1 = 1.0
reg2 = 150.0
CAE = RSCConvAE(n_input=n_input, n_hidden=n_hidden, z_dim=z_dim, lamda1=reg1,
lamda2=reg2, eta1=10, eta2=10, kernel_size=kernel_size,
batch_size=batch_size, save_path=save_path,
restore_path=restore_path, logs_path=logs_path)
train(0, X, y, CAE, lr, alpha, max_step)
``` |
{
"source": "JiangYangJie/Embedded",
"score": 3
} |
#### File: esp8266/clock/printf.py
```python
from dictionary import dicts
class Printf:
def __init__(self,oled):
self.oled=oled
self.clear()
def clear(self):#清屏
self.oled.fill(0)
self.oled.show()
def en(self, String, x, y):#显示英文,oled:屏幕对象,string:英文内容,x,y:屏幕坐标
self.oled.text(String, x, y)
self.oled.show()
def printf(self,char,x_axis,y_axis,line=30):#line字体行数(高)
offset_ = 0
a=['','','','','']
for k in char:#汉字和数字,英文的16进制列数不同
if k in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',' ',':']:
code = (hex(ord(k))[2:] + " ").replace(' ', '')
byte_data = dicts[code]
else:
code = 0x00 # 将中文转成16进制编码
data_code = k.encode("utf-8")
code |= data_code[0] << 16
code |= data_code[1] << 8
code |= data_code[2]
byte_data = dicts[code]
for y in range(0, line): # 控制y轴
for i in range(0,int((len(byte_data)/line))):#x轴的循环次数
a[i] = bin(byte_data[y+i*line]).replace('0b', '')#二进制换算
while len(a[i]) < 8: # 控制x轴
a[i] = '0' + a[i]
for x in range(0, 8):#填充像素块
# pass
self.oled.pixel(x_axis + offset_ + x+i*8, y + y_axis, int(a[i][x])) # 对8个像素点处理
if k in [':','.']:
offset_ += 6 # 让字体横着显示,控制间距
else:
offset_ += line
self.oled.show()
#
# p1=Printf('oled')
# p1.printf('123 ',1,1)
```
#### File: esp8266/net_alarm/get_time.py
```python
import requests
class TIME:
def __init__(self):
self.headers={
'api-key':'you api key',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36",
}
self.now_time=('','','','','')
self.alarm_time=('','','','','')
def get_now_time(self):
response=requests.get('http://quan.suning.com/getSysTime.do',headers=self.headers)
time=response.json()['sysTime1']
year=time[:4]
month=time[4:6]
day=time[6:8]
hours=time[8:10]
minute=time[10:12]
return year,month,day,hours,minute
def get_alarm_time(self,year,month,day,hours,minute):
try:#从onenet获取时间
Time=requests.get('http://api.heclouds.com/devices/'your id'/datastreams/time',headers=self.headers)
print(Time)
Year=Time.json()['data']['current_value'][:4]
Month = Time.json()['data']['current_value'][5:7]
Day=Time.json()['data']['current_value'][8:10]
Hours=Time.json()['data']['current_value'][11:13]
Minute=Time.json()['data']['current_value'][14:]
print(Year,Month,Day,Hours,Minute)
if self.judge_time(year,month,day,hours,minute,Year,Month,Day,Hours,Minute,flag=1):
self.alarm_time=Year,Month,Day,Hours,Minute
else:
self.alarm_time = '1 ',' ',' ',' ',' '
except:
print('aa')
self.alarm_time = ' ',' ',' ',' ',' '
def judge_time(self,year,month,day,hours,minute,Year,Month,Day,Hours,Minute,flag=1):
#小写是实时时间,大写是规定时间
if flag==1:#判断是否符合闹钟条件
if year==Year:
if month==Month:
if day==Day:
if hours==Hours:
if minute<=Minute:
return True
else:
return False
elif hours<Hours:
return True
else:
return False
elif day<Day:
return True
else:
return False
elif month<Month:
return True
else:
return True
else:
return False
else:#判断是否开启闹钟
if minute==Minute and hours==Hours and day==Day and month==Month and year==Year:
return True
else:
return False
def Now_time(self):
try:
self.get_now_time()
self.now_time =self.get_now_time()
return self.now_time
except:
return ' ',' ',' ',' ',' '
def Alarm_time(self):
now=self.get_now_time()
self.get_alarm_time(now[0],now[1],now[2],now[3],now[4])
if self.alarm_time[1]!=' ':
return self.alarm_time
else:
return ' ',' ',' ',' ',' '
#
# Time=TIME()
# # print(Time.Now_time(),
# # Time.Alarm_time())
# print(Time.judge_time('2019','07','11','08','39','2019','07','11','08','39',2))
# year,month,day,hours,minute=Time.get_now_time()
# print(Time.get_alarm_time(year,month,day,hours,minute))
# headers={
# 'api-key':'<KEY>
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36",
# }
# Time = requests.get('http://api.heclouds.com/devices/532724988/datastreams/time', headers=headers)
# print(Time)
``` |
{
"source": "JiangYangJie/project_python",
"score": 3
} |
#### File: HousePrice/map/second_hand_house.py
```python
import os
from Mysql import mysql
from pyecharts import options as opts
from pyecharts.charts import Map, Page, Pie
if not os.path.exists('./html'):
os.makedirs('./html')
def Renovation():
with mysql() as sql:
info=sql.read("select Renovation,count(*) from second_hand "
"group by Renovation ORDER BY count(*) DESC limit 30"
)
Sum = sum([_[1] for _ in info])
ave = [100*_[1]/Sum for _ in info]
Info = [_[0] for _ in info]
c = (
Pie()
.add("", [list(z) for z in zip(Info,ave)])
.set_colors(["blue", "green", "red", "pink", "orange", "purple"])
.set_global_opts(title_opts=opts.TitleOpts(title="成都二手房装修情况"))
.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
)
Page.add(c).render(path='./html/renovation_second.html')
Renovation()
```
#### File: HousePrice/scipy/get_headers.py
```python
from Mysql import mysql
remakes=[['淘宝浏览器2.0 on Windows 7 x64'],['猎豹浏览器2.0.10.3198 急速模式on Windows 7 x64'],['猎豹浏览器2.0.10.3198 兼容模式on Windows 7 x64'],
['猎豹浏览器2.0.10.3198 兼容模式on Windows XP x86 IE6'],['猎豹浏览器1.5.9.2888 急速模式on Windows 7 x64'],
['QQ浏览器7.0 on Windows 7 x64 IE9'],
['360安全浏览器5.0自带IE8内核版 on Windows XP x86 IE6'],['360安全浏览器5.0 on Windows XP x86 IE6'],
['360安全浏览器5.0 on Windows 7 x64 IE9'],['360急速浏览器6.0 急速模式 on Windows XP x86'],
['360急速浏览器6.0 急速模式 on Windows 7 x64'],
['360急速浏览器6.0 兼容模式 on Windows 7 x64 IE9'],['360急速浏览器6.0 IE9/IE10模式 on Windows 7 x64 IE9'],
['搜狗浏览器4.0 高速模式 on Windows XP x86'],['搜狗浏览器4.0 兼容模式 on Windows XP x86 IE6'],
['Waterfox 16.0 on Windows 7 x64'],['Firefox x64 on Ubuntu 12.04.1 x64'],
['Chrome x64 on Ubuntu 12.04.1 x64'],['Chrome x86 23.0.1271.64 on Windows 7 x64'],
['Chrome x86 10.0.648.133 on Windows 7 x64'],['IE9 x64 9.0.8112.16421 on Windows 7 x64'],
['IE9 x86 9.0.8112.16421 on Windows 7 x64'],['Firefox x64 3.6.10 on ubuntu 10.10 x64'],
['Chrome x64']
]
headers = [['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11'],
['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'],
['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)'],
['Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'],
['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER'],
['Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'],
['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)'],
['Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)'],
['Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'],
['Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'],
['Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'],
['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'],
['Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'],
['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'],
['Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0'],
['Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)'],
['Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'],
['Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'],
['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'],
['Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16'],
['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)'],
['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)'],
['Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'],
["Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/69.0.3497.100 Safari/537.36"]
]
def into_sql(host='localhost', password='<PASSWORD>',db='scipy', port=3306,):
L=[]
for _ in range(len(headers)):
L.append([headers[_][0],remakes[_][0]])
with mysql(host='localhost', password='<PASSWORD>',db='scipy', port=3306,) as sql:
sql.write("insert into headers(header,Remarks) values (%s,%s)",L)
def get_headers(number=5,host='localhost', password='<PASSWORD>',db='scipy', port=3306,):
l=[]
with mysql(host='localhost', password='<PASSWORD>', db='scipy', port=3306, ) as sql:
L=sql.read("select * from headers order by rand() limit %s",number)
for _ in range(number):
l.append(L[_][0])
return l
``` |
{
"source": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver",
"score": 2
} |
#### File: jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver/data_attack.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import numpy as np
import types
from numpy import random
from models.vgg import vgg16_bn
from models.inception import inception_v3
from models.resnet import resnet50
from models.googleNet import googlenet
from models.densenet import densenet121, densenet161
from models.incept_resnet_v2 import InceptionResNetV2
from models.inception_v4 import InceptionV4
import imp
import glob
import os
import PIL
from torch.utils.data import Dataset, DataLoader
import torch.multiprocessing as multiprocessing
#multiprocessing.set_start_method('spawn')
def load_model(model,pth_file, device):
model = model.to(device)
#model = torch.nn.DataParallel(model)
print('loading weights from : ', pth_file)
model.load_state_dict(torch.load(pth_file))
return model
def get_model_dic(device):
models = {}
#densenet_121 = densenet121(num_classes=110)
#load_model(densenet_121,"./pre_weights/ep_38_densenet121_val_acc_0.6527.pth",device)
densenet_161 = densenet161(num_classes=110)
load_model(densenet_161,"./pre_weights/ep_30_densenet161_val_acc_0.6990.pth",device)
resnet_50 = resnet50(num_classes=110)
load_model(resnet_50,"./pre_weights/ep_41_resnet50_val_acc_0.6900.pth",device)
incept_v3 = inception_v3(num_classes=110)
load_model(incept_v3,"./pre_weights/ep_36_inception_v3_val_acc_0.6668.pth",device)
#incept_v1 = googlenet(num_classes=110)
#load_model(incept_v1,"./pre_weights/ep_33_googlenet_val_acc_0.7091.pth",device)
#vgg16 = vgg16_bn(num_classes=110)
#load_model(vgg16, "./pre_weights/ep_30_vgg16_bn_val_acc_0.7282.pth",device)
incept_resnet_v2_adv = InceptionResNetV2(num_classes=110)
load_model(incept_resnet_v2_adv, "./pre_weights/ep_22_InceptionResNetV2_val_acc_0.8214.pth",device)
incept_v4_adv = InceptionV4(num_classes=110)
load_model(incept_v4_adv,"./pre_weights/ep_37_InceptionV4_val_acc_0.7119.pth",device)
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_resnet_v1_50.py")
resnet_model = torch.load('./models_old/tf_to_pytorch_resnet_v1_50.pth').to(device)
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_vgg16.py")
vgg_model = torch.load('./models_old/tf_to_pytorch_vgg16.pth').to(device)
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_inception_v1.py")
inception_model = torch.load('./models_old/tf_to_pytorch_inception_v1.pth').to(device)
models={#"densenet121":densenet_121,
"densenet161":densenet_161,
"resnet_50":resnet_50,
# "incept_v1":incept_v1,
"incept_v3":incept_v3,
"incept_resnet_v2_adv": incept_resnet_v2_adv,
"incept_v4_adv": incept_v4_adv,
#"vgg16":vgg16
"old_incept":inception_model,
"old_res":resnet_model,
"old_vgg":vgg_model
}
return models
def input_diversity(image, prob, low, high):
if random.random()<prob:
return image
rnd = random.randint(low, high)
rescaled = F.upsample(image, size=[rnd, rnd], mode='bilinear')
h_rem = high - rnd
w_rem = high - rnd
pad_top = random.randint( 0, h_rem)
pad_bottom = h_rem - pad_top
pad_left = random.randint(0, w_rem)
pad_right = w_rem - pad_left
padded = F.pad(rescaled, [pad_top, pad_bottom, pad_left, pad_right], 'constant', 0)
return padded
def preprocess(image,model_name, prob):
if model_name=="incept_v3" or 'incept_v4'in model_name or 'incept_resnet_v2' in model_name:
return input_diversity(image,prob,270,299)
else:
image = F.upsample(image, size=(224, 224), mode='bilinear')
if model_name=="old_res" or model_name=="old_vgg":
image = ((image/2.0)+0.5)*255.0
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
image[:, 0,:, :] = image[:, 0,:, :] - _R_MEAN
image[:, 1,:, :] = image[:, 1,:, :] - _G_MEAN
image[:, 2,:, :] = image[:, 2,:, :] - _B_MEAN
return input_diversity(image,prob,200,224)
else:
return input_diversity(image,prob,200,224)
class EnsembleNet(nn.Module):
def __init__(self,device,ablation='',prob=0.5):
super(EnsembleNet, self).__init__()
self.models = get_model_dic(device)
self.preprocess = preprocess
self.ablation = ablation
self.prob=prob
self.models_list = []
def forward(self,x):
i=0
for model in self.models.keys():
if model==self.ablation:
continue
if random.random()<self.prob:
continue
self.models_list.append(model)
pre_x = self.preprocess(x,model, 0.3)
if model=='incept_v3':
out = 0.5*self.models[model](pre_x)[0]+0.5*self.models[model](pre_x)[1]
elif model=='incept_v1':
out = 0.4*self.models[model](pre_x)[0]+0.4*self.models[model](pre_x)[1] + \
0.4*self.models[model](pre_x)[2]
else:
out = self.models[model](pre_x)
out_sum = out if i==0 else out_sum + out
i=i+1
if i==0:
model = random.choice(list(self.models.keys()))
pre_x = self.preprocess(x, model, 0.3)
out_sum = self.models[model](pre_x)
out_sum=sum(out_sum)/len(out_sum) if model=="incept_v1" or model=="incept_v3" else out_sum
else:
out_sum = out_sum/i
return out_sum
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
import scipy.stats as st
x = np.linspace(-nsig, nsig, kernlen)
kern1d = st.norm.pdf(x)
kernel_raw = np.outer(kern1d, kern1d)
kernel = kernel_raw / kernel_raw.sum()
return kernel
def get_kernel(kernel_size):
kernel = gkern(kernel_size, 3).astype(np.float32)
stack_kernel = np.stack([kernel, kernel, kernel])
stack_kernel = np.expand_dims(stack_kernel, 0)
stack_kernel = torch.Tensor(stack_kernel)
return stack_kernel
class Attack(object):
def __init__(self, gpu_ids, prob1=0.7,prob2=0.7, prob3=0.5, prob4=0.5):
self.prob1=prob1
self.prob3=prob3
self.prob4=prob4
print(gpu_ids)
if len(gpu_ids)==1:
self.device=torch.device('cuda:%d'%gpu_ids[0])
self.ens_model = EnsembleNet(self.device)
else:
self.device=torch.device('cuda:%d'%gpu_ids[0])
self.ens_model = EnsembleNet(self.device)
self.ens_model = torch.nn.DataParallel(self.ens_model, device_ids=gpu_ids, output_device=gpu_ids[0])
self.kernels = {9: get_kernel(9), 11: get_kernel(11), 13: get_kernel(13), 15: get_kernel(15), 17: get_kernel(17)}
self.kernel_size=[9,11,13,15,17]
def __call__(self,image, label):
if random.random() > self.prob1:
return image
else:
max_epsilon = random.randint(5,30)
eps = 2.0 * max_epsilon / 255.0
num_iter = 1 if random.random()<self.prob3 else random.randint(2,10)
alpha = eps / num_iter
momentum = 0.8+0.2*random.random()
image.requires_grad = True
image = image.to(self.device)
label = label.to(self.device)
for iter in range(num_iter):
self.ens_model.zero_grad()
out = self.ens_model(image)
loss = nn.CrossEntropyLoss()(out, label)
loss.backward()
data_grad = image.grad.data
if random.random()<self.prob4:
kernel_size = self.kernel_size[random.randint(len(self.kernels))]
stack_kernel = self.kernels[kernel_size].to(self.device)
data_grad = F.conv2d(data_grad, stack_kernel, padding=(kernel_size-1)//2)
for i in range(data_grad.shape[0]):
data_grad[i] = data_grad[i]/torch.mean(data_grad[i].abs())
if iter==0:
noise = data_grad
else:
noise = noise*momentum + data_grad
if random.random()<0.5:
image_adv = image.data + noise*alpha/(iter+1)
else:
image_adv = image.data + noise.sign()*alpha
image_adv = torch.clamp(image_adv,-1.0,1.0)
image.data = image_adv
image.grad.zero_()
return image.cpu()
class ImageAugmentation(object):
def __init__(self, device, size=224):
self.size = size
self.ens_model = EnsembleNet(device)
self.transformer_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(size, (0.7, 1), interpolation=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
def __call__(self, img):
return self.transformer_train(img)
```
#### File: jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver/data_util.py
```python
from PIL import Image
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import glob
import os
import PIL
import pandas as pd
import numpy as np
class ImageSet(Dataset):
def __init__(self, df, transformer):
self.df = df
self.transformer = transformer
def __len__(self):
return len(self.df)
def __getitem__(self, item):
image_path = self.df.iloc[item]['image_path']
image = self.transformer(Image.open(image_path))#.convert('RGB'))
label_idx = self.df.iloc[item]['label_idx']
sample = {
'dataset_idx': item,
'image': image,
'label_idx': label_idx,
'filename':os.path.basename(image_path)
}
return sample
def load_data_for_training_cnn(dataset_dir, img_size, batch_size=16):
all_imgs = glob.glob(os.path.join(dataset_dir, './*/*.jpg'))
all_labels = [int(img_path.split('/')[-2]) for img_path in all_imgs]
train = pd.DataFrame({'image_path':all_imgs,'label_idx':all_labels})
train_data, val_data = train_test_split(train,
stratify=train['label_idx'].values, train_size=0.9, test_size=0.1)
transformer_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(img_size, (0.7, 1), interpolation=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
transformer = transforms.Compose([
transforms.Resize([img_size, img_size], interpolation=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
datasets = {
'train_data': ImageSet(train_data, transformer_train),
'val_data': ImageSet(val_data, transformer)
}
dataloaders = {
ds: DataLoader(datasets[ds],
batch_size=batch_size,
num_workers=8,
shuffle=True) for ds in datasets.keys()
}
return dataloaders
def load_data_for_defense(input_dir, img_size, batch_size=16):
all_img_paths = glob.glob(os.path.join(input_dir, '*.png'))
all_labels = [-1 for i in range(len(all_img_paths))]
dev_data = pd.DataFrame({'image_path':all_img_paths, 'label_idx':all_labels})
transformer = transforms.Compose([
transforms.Resize([img_size, img_size], interpolation=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
datasets = {
'dev_data': ImageSet(dev_data, transformer)
}
dataloaders = {
ds: DataLoader(datasets[ds],
batch_size=batch_size,
num_workers=0,
shuffle=False) for ds in datasets.keys()
}
return dataloaders
``` |
{
"source": "jiangycTarheel/Adversarial-MultiHopQA",
"score": 3
} |
#### File: nectar/corenlp/client.py
```python
import json
import os
import requests
from server import CoreNLPServer
class CoreNLPClient(object):
"""A client that interacts with the CoreNLPServer."""
def __init__(self, hostname='http://localhost', port=7000,
start_server=False, server_flags=None, server_log=None,
cache_file=None,):
"""Create the client.
Args:
hostname: hostname of server.
port: port of server.
start_server: start the server on first cache miss.
server_flags: passed to CoreNLPServer.__init__()
server_log: passed to CoreNLPServer.__init__()
cache_file: load and save cache to this file.
"""
self.hostname = hostname
self.port = port
self.start_server = start_server
self.server_flags = server_flags
self.server_log = server_log
self.server = None
self.cache_file = cache_file
self.has_cache_misses = False
if cache_file:
if os.path.exists(cache_file):
with open(cache_file) as f:
self.cache = json.load(f)
else:
self.cache = {}
else:
self.cache = None
def save_cache(self):
if self.cache_file and self.has_cache_misses:
with open(self.cache_file, 'w') as f:
json.dump(self.cache, f)
self.has_cache_misses = False
def query(self, sents, properties):
"""Most general way to query the server.
Args:
sents: Either a string or a list of strings.
properties: CoreNLP properties to send as part of the request.
"""
url = '%s:%d' % (self.hostname, self.port)
params = {'properties': str(properties)}
if isinstance(sents, list):
data = '\n'.join(sents)
else:
data = sents
key = '%s\t%s' % (data, str(properties))
if self.cache and key in self.cache:
return self.cache[key]
self.has_cache_misses = True
if self.start_server and not self.server:
self.server = CoreNLPServer(port=self.port, flags=self.server_flags,
logfile=self.server_log)
self.server.start()
r = requests.post(url, params=params, data=data.encode('utf-8'))
r.encoding = 'utf-8'
json_response = json.loads(r.text, strict=False)
if self.cache is not None:
self.cache[key] = json_response
return json_response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.server:
self.server.stop()
if self.cache_file:
self.save_cache()
def query_pos(self, sents):
"""Standard query for getting POS tags."""
properties = {
'ssplit.newlineIsSentenceBreak': 'always',
'annotators': 'tokenize,ssplit,pos',
'outputFormat':'json'
}
return self.query(sents, properties)
def query_ner(self, paragraphs):
"""Standard query for getting NERs on raw paragraphs."""
annotators = 'tokenize,ssplit,pos,ner,entitymentions'
properties = {
'ssplit.newlineIsSentenceBreak': 'always',
'annotators': annotators,
'outputFormat':'json'
}
return self.query(paragraphs, properties)
def query_depparse_ptb(self, sents, use_sd=False):
"""Standard query for getting dependency parses on PTB-tokenized input."""
annotators = 'tokenize,ssplit,pos,depparse'
properties = {
'tokenize.whitespace': True,
'ssplit.eolonly': True,
'ssplit.newlineIsSentenceBreak': 'always',
'annotators': annotators,
'outputFormat':'json'
}
if use_sd:
# Use Stanford Dependencies trained on PTB
# Default is Universal Dependencies
properties['depparse.model'] = 'edu/stanford/nlp/models/parser/nndep/english_SD.gz'
return self.query(sents, properties)
def query_depparse(self, sents, use_sd=False, add_ner=False):
"""Standard query for getting dependency parses on raw sentences."""
annotators = 'tokenize,ssplit,pos,depparse'
if add_ner:
annotators += ',ner'
properties = {
'ssplit.eolonly': True,
'ssplit.newlineIsSentenceBreak': 'always',
'annotators': annotators,
'outputFormat':'json'
}
if use_sd:
# Use Stanford Dependencies trained on PTB
# Default is Universal Dependencies
properties['depparse.model'] = 'edu/stanford/nlp/models/parser/nndep/english_SD.gz'
return self.query(sents, properties)
def query_const_parse(self, sents, add_ner=False):
"""Standard query for getting constituency parses on raw sentences."""
annotators = 'tokenize,ssplit,pos,parse'
if add_ner:
annotators += ',ner'
properties = {
'ssplit.eolonly': True,
'ssplit.newlineIsSentenceBreak': 'always',
'annotators': annotators,
'outputFormat':'json'
}
return self.query(sents, properties)
``` |
{
"source": "jiangycTarheel/Compositional-Auxseq",
"score": 2
} |
#### File: jiangycTarheel/Compositional-Auxseq/utils.py
```python
import os
import json
import gzip
from copy import deepcopy, copy
import numpy as np
import csv
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler
from transformers.tokenization_utils import trim_batch
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smooth, tgt_vocab_size, ignore_index=-100):
assert 0. < label_smooth <= 1.
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smooth / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0).unsqueeze(0))
self.confidence = 1.0 - label_smooth
self.lossfct = torch.nn.KLDivLoss(reduction='none')
def forward(self, pred, target):
"""
Args:
pred: [bsz, seq_len, vocab_size]
target: [bsz, seq_len]
Returns:
"""
model_prob = self.one_hot.repeat(target.size(0), target.size(1), 1) # [bsz, seq_len, vocab_size]
model_prob.scatter_(2, target.unsqueeze(2), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(2), 0)
pred_prob = F.log_softmax(pred, dim=2)
#return F.kl_div(pred_prob, model_prob, reduction='mean')
loss = self.lossfct(pred_prob, model_prob)
loss = torch.sum(loss, dim=2).masked_fill_((target == self.ignore_index), 0)
avg_loss = torch.sum(loss) / torch.sum((target != self.ignore_index).to(torch.float))
return avg_loss
# Special symbols
SOS_token = "<SOS>" # start of sentence
EOS_token = "<EOS>" # end of sentence
PAD_token = SOS_token # padding symbol
INPUT_TOKENS_SCAN = ['jump', 'opposite', 'right', 'twice', 'and', 'turn', 'thrice', 'run', 'after', 'around', 'left', 'walk', 'look']
OUTPUT_TOKENS_SCAN = ['I_TURN_RIGHT', 'I_JUMP', 'I_TURN_LEFT', 'I_RUN', 'I_WALK', 'I_LOOK']
# ACTION_TO_TEXT = {'I_TURN_RIGHT': 'right', 'I_JUMP': 'jump', 'I_TURN_LEFT': 'left', 'I_RUN': 'run', 'I_WALK': 'walk', 'I_LOOK': 'look'}
class Lang:
# Class for converting strings/words to numerical indices, and vice versa.
# Should use separate class for input language (English) and output language (actions)
#
def __init__(self, symbols, io_type):
# symbols : list of all possible symbols
n = len(symbols)
self.symbols = [_s.strip('\n') for _s in symbols]
self.io_type = io_type
if SOS_token not in self.symbols:
assert EOS_token not in self.symbols
self.index2symbol = {n: SOS_token, n+1: EOS_token}
self.symbol2index = {SOS_token: n, EOS_token: n + 1}
self.sos_id, self.eos_id = n, n + 1
else:
self.index2symbol = {}
self.symbol2index = {}
self.sos_id, self.eos_id = 0, 1
self.pad_token_id = self.sos_id
for idx,s in enumerate(self.symbols):
self.index2symbol[idx] = s
self.symbol2index[s] = idx
self.n_symbols = len(self.index2symbol)
def variableFromSymbols(self, mylist, add_eos=True):
# Convert a list of symbols to a tensor of indices (adding a EOS token at end)
#
# Input
# mylist : list of m symbols
# add_eos : true/false, if true add the EOS symbol at end
#
# Output
# output : [m or m+1 LongTensor] indices of each symbol (plus EOS if appropriate)
mylist = copy(mylist)
if add_eos:
mylist.append(EOS_token)
indices = [self.symbol2index[s] for s in mylist]
output = torch.LongTensor(indices)
#if USE_CUDA:
output = output.cuda()
return output
def symbolsFromVector(self, v):
# Convert indices to symbols, breaking where we get a EOS token
#
# Input
# v : list of m indices
#
# Output
# mylist : list of m or m-1 symbols (excluding EOS)
mylist = []
for x in v:
s = self.index2symbol[x]
if s == EOS_token:
break
mylist.append(s)
return mylist
def encode_scan_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp[0], dp[1]
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output
encoded = self.variableFromSymbols(raw.split(' '))
encoded_data.append(encoded)
return encoded_data
def encode_scan_file_2_seg(self, data, max_length, cutoffs):
encoded_data_1, encoded_data_2 = [], []
for _id, dp in enumerate(data):
input, output, cutoff = dp[0], dp[1], cutoffs[_id]
assert self.io_type == 'output'
raw = output
encoded_1 = self.variableFromSymbols(raw.split(' ')[:cutoff])
encoded_2 = self.variableFromSymbols(raw.split(' ')[cutoff:])
encoded_data_1.append(encoded_1)
encoded_data_2.append(encoded_2)
return encoded_data_1, encoded_data_2
def encode_cfq_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp['query_ids'], dp['sparql_ids']
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output + [self.eos_id]
encoded = torch.LongTensor(raw).cuda()
encoded_data.append(encoded)
return encoded_data
def encode_cogs_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp['src'], dp['trg']
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output
encoded = self.variableFromSymbols(raw.split(' '))
encoded_data.append(encoded)
return encoded_data
def decode(self, ids):
out = self.symbolsFromVector(ids.cpu().numpy())
if out == []:
return out
if out[0] in ['<SOS>', '<SOS_2>']:
out = out[1:]
return out
def calculate_accuracy(preds, gts):
assert len(preds) == len(gts)
match = 0
for pred, gt in zip(preds, gts):
if pred == gt:
match += 1
return match / len(preds)
def encode_file(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
examples = []
if data_path[-3:] == '.gz':
print('Data file is gzipped')
f = gzip.open(data_path, "rt")
else:
print('Data file is plain text')
print(data_path)
f = open(data_path, "r", encoding='utf-8')
for i, text in enumerate(f.readlines()):
tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length,
pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
if max_examples and i >= max_examples:
break
examples.append(tokenized)
f.close()
return examples
# def encode_file_iterator(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
# '''
# This provides a low-memory usage way of iterating thru all of the source/target lines for processing by JIT loader.
# '''
# if data_path[-3:] == '.gz':
# print('Data file is gzipped')
# f = gzip.open(data_path, "rt")
# else:
# print('Data file is plain text')
# f = open(data_path, "r", encoding='utf-8')
#
# for i, text in enumerate(f):
#
# tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length,
# pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
#
# yield tokenized
#
# if max_examples and i >= max_examples:
# break
#
# f.close()
# def convert_scan_actions_to_text(actions):
# return ' '.join([ACTION_TO_TEXT[_action] for _action in actions.split(' ')])
# def encode_scan_file(tokenizer, data, io_type, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
# examples = []
# # a = tokenizer.batch_encode_plus( ['right jump left run walk look' + ' <s> </s>'], max_length=max_length,
# # pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
# # print(a)
# # exit()
# for dp in data:
# input, output = dp[0], dp[1]
# if io_type == 'input':
# raw = input
# else:
# assert io_type == 'output'
# raw = convert_scan_actions_to_text(output)
#
# tokenized = tokenizer.batch_encode_plus( [raw + ' </s>'], max_length=max_length,
# pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
#
# if max_examples and i >= max_examples:
# break
# examples.append(tokenized)
#
# return examples
def load_scan_file(mytype, split):
# Load SCAN dataset from file
#
# Input
# mytype : type of SCAN experiment
# split : 'train' or 'test'
#
# Output
# commands : list of input/output strings (as tuples)
assert mytype in ['simple', 'addprim_jump', 'length', 'addprim_turn_left', 'all', 'template_around_right', 'viz',
'examine', 'template_jump_around_right', 'template_right', 'template_around_right',
'mcd1', 'mcd2', 'mcd3', 'mcd1.1', 'mcd1.2', 'debug', 'attn_vis']
assert split in ['train', 'test', 'val']
if split == 'val' and mytype not in ['mcd1', 'mcd2', 'mcd3', 'mcd1.1', 'mcd1.2']:
split = 'test'
fn = 'data/scan/tasks_' + split + '_' + mytype + '.txt'
fid = open(fn, 'r')
lines = fid.readlines()
fid.close()
lines = [l.strip() for l in lines]
lines = [l.lstrip('IN: ') for l in lines]
commands = [l.split(' OUT: ') for l in lines]
return commands
class CompositionDataset(Dataset):
def __init__(
self,
src_lang,
trg_lang,
data_dir,
type_path,
sub_task,
max_source_length=20,
max_target_length=20,
tokenized=False,
):
super().__init__()
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.tokenized = tokenized
self.src_lang = src_lang
self.trg_lang = trg_lang
def __len__(self):
if self.tokenized:
return len(self.dataset)
else:
return len(self.source)
def __getitem__(self, index):
if self.tokenized:
dp = self.dataset[index]
source_ids, src_mask, target_ids = dp[0], dp[1], dp[2]
source_ids = source_ids[:self.max_source_length]
#src_mask = src_mask[:self.max_source_length]
target_ids = target_ids[:self.max_target_length]
else:
source_ids = self.source[index]
target_ids = self.target[index]
return {"source_ids": source_ids, "target_ids": target_ids}
@staticmethod
def trim_seq2seq_batch(batch, src_pad_token_id, trg_pad_token_id, trim_y=True):
if trim_y:
y = trim_batch(batch["target_ids"], trg_pad_token_id)
else:
y = batch["target_ids"]
source_ids, source_mask = trim_batch(batch["source_ids"], src_pad_token_id, attention_mask=batch["source_mask"])
return source_ids, source_mask, y
def pad_to_max_len(self, ids, max_len, pad_token_id):
ids_length = ids.size(0)
if ids_length == max_len:
return ids
pad_tokens = torch.tensor([pad_token_id] * (max_len - ids_length))
# if ids.type() == 'torch.cuda.FloatTensor':
# print(ids)
# exit()
padded_ids = torch.cat([ids, pad_tokens.cuda()])
return padded_ids
def create_mask(self, ids, max_len):
ids_length = ids.size(0)
mask = torch.tensor([1] * ids_length + [0] * (max_len - ids_length)).cuda()
return mask
def collate_fn(self, batch):
max_src_len = max(map(len, [x["source_ids"] for x in batch]))
max_trg_len = max(map(len, [x["target_ids"] for x in batch]))
src_mask = torch.stack([self.create_mask(x["source_ids"], max_src_len) for x in batch])
src_ids = torch.stack([self.pad_to_max_len(x["source_ids"], max_src_len, self.src_lang.pad_token_id) for x in batch])
#masks = torch.stack([x["source_mask"] for x in batch])
trg_ids = torch.stack([self.pad_to_max_len(x["target_ids"], max_trg_len, self.trg_lang.pad_token_id) for x in batch])
y = trim_batch(trg_ids, self.trg_lang.pad_token_id)
src_ids, src_mask = trim_batch(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)
return {"source_ids": src_ids, "source_mask": src_mask, "target_ids": y}
class ScanDataset(CompositionDataset):
def __init__(
self,
src_lang,
trg_lang,
data_dir="./data/scan/",
type_path="train",
sub_task="addprim_jump",
max_source_length=20,
max_target_length=20,
tokenized=False,
):
super().__init__(src_lang, trg_lang, data_dir, type_path, sub_task, max_source_length,
max_target_length, tokenized)
scan_data = load_scan_file(sub_task, type_path)
print(len(scan_data))
all_scan_dict = self.convert_to_dict(load_scan_file('all', 'train'))
self.action_count_labels, self.action_group_labels, self.action_type_labels = self.construct_count_label(scan_data, all_scan_dict)
if not tokenized:
self.source = self.src_lang.encode_scan_file(scan_data, max_source_length)
self.target = self.trg_lang.encode_scan_file(scan_data, max_target_length)
else:
self.dataset = torch.load(os.path.join(data_dir, type_path))
def construct_count_label(self, raw_data, all_data_dict):
all_count_labels = []
count_label_scheme = "v1"
group_label_scheme = "v2"
type_label_scheme = "v2"
all_action_group_labels, all_action_type_labels = [], []
# Group 1: single prim (jump), Group 2: prim + direction (jump left), Group 3: prim opposite, Group 4: prim around
#no_skip_id = np.random.randint(0, len(raw_data), int(len(raw_data)*0.05))
#no_skip_id = np.random.choice(range(len(raw_data)), int(len(raw_data)*0.07), replace=False)
# no_skip_id = np.random.choice(range(len(raw_data)), 10, replace=False)
skip_cnt, sup_cnt = 0, 0
for _id, dp in enumerate(raw_data):
input_text, output_text = dp[0], dp[1]
input_tok, output_tok = input_text.split(' '), output_text.split(' ')
count_labels, group_labels, type_labels = [], [], []
first_part_output_text, second_part_output_text = '', ''
if 'and' in input_tok:
first_part_input_tok = input_tok[:input_tok.index('and')]
second_part_input_tok = input_tok[input_tok.index('and')+1:]
first_part_output_text = all_data_dict[' '.join(first_part_input_tok)]
second_part_output_text = all_data_dict[' '.join(second_part_input_tok)]
elif 'after' in input_tok:
second_part_input_tok = input_tok[:input_tok.index('after')]
first_part_input_tok = input_tok[input_tok.index('after') + 1:]
first_part_output_text = all_data_dict[' '.join(first_part_input_tok)]
second_part_output_text = all_data_dict[' '.join(second_part_input_tok)]
else:
first_part_input_tok, second_part_input_tok = input_tok, []
first_part_output_text = output_text
first_part_output_tok, second_part_output_tok = first_part_output_text.split(' '), second_part_output_text.split(' ')
if second_part_output_text == '':
second_part_output_tok = []
assert len(first_part_output_tok) + len(second_part_output_tok) == len(output_tok), \
(len(first_part_output_tok), len(second_part_output_tok), len(output_tok), first_part_output_text, second_part_output_text, output_text)
### 1. Build the action count labels ###
if count_label_scheme == 'v1':
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([4] * int(len(first_part_output_tok) / 2) + [3] * int(len(first_part_output_tok) / 2))
else:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([5] * int(len(first_part_output_tok) / 3) + [4] * int(len(first_part_output_tok) / 3) + \
[3] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([3] * len(first_part_output_tok))
else:
count_labels += ([0] * len(first_part_output_tok))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok)) - 1))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(len(second_part_output_tok) / 2))
else:
count_labels += ([4] * int(len(second_part_output_tok) / 2) + [3] * int(len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([5] * int(len(second_part_output_tok) / 3) + [4] * int(len(second_part_output_tok) / 3) + \
[3] * int(len(second_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([0] * len(second_part_output_tok))
else:
count_labels += ([3] * len(second_part_output_tok))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok)) - 1))
elif count_label_scheme == 'v2':
### For the first part output
if 'twice' in first_part_input_tok:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(
len(first_part_output_tok) / 2))
elif 'thrice' in first_part_input_tok:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(
len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([0] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
elif 'thrice' in second_part_input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + [0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([0] * len(second_part_output_tok))
elif count_label_scheme == 'v3':
### For the first part output
if 'thrice' in first_part_input_tok and 'thrice' in second_part_input_tok:
start_count = 5
elif ('thrice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('twice' in first_part_input_tok and 'thrice' in second_part_input_tok):
start_count = 4
elif ('twice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('thrice' in first_part_input_tok) or ('thrice' in second_part_input_tok):
start_count = 3
elif 'twice' in first_part_input_tok or 'twice' in second_part_input_tok:
start_count = 2
else:
start_count = 1
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 2) + [start_count-1] * int(len(first_part_output_tok) / 2))
else:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 3) + [start_count-1] * int(len(first_part_output_tok) / 3) + \
[start_count-2] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([start_count] * len(first_part_output_tok))
else:
count_labels += ([0] * len(first_part_output_tok))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok)) - 1))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(len(second_part_output_tok) / 2))
else:
count_labels += ([start_count] * int(len(second_part_output_tok) / 2) + [start_count-1] * int(len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([start_count] * int(len(second_part_output_tok) / 3) + [start_count-1] * int(len(second_part_output_tok) / 3) + \
[start_count-2] * int(len(second_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([0] * len(second_part_output_tok))
else:
count_labels += ([start_count] * len(second_part_output_tok))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok)) - 1))
elif count_label_scheme == 'v3.1':
### For the first part output
if 'thrice' in first_part_input_tok and 'thrice' in second_part_input_tok:
start_count = 5
elif ('thrice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('twice' in first_part_input_tok and 'thrice' in second_part_input_tok):
start_count = 4
elif ('twice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('thrice' in first_part_input_tok) or ('thrice' in second_part_input_tok):
start_count = 3
elif 'twice' in first_part_input_tok or 'twice' in second_part_input_tok:
start_count = 2
else:
start_count = 1
if 'twice' in first_part_input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 2) + [start_count - 1] * int(
len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 3) + [start_count - 1] * int(
len(first_part_output_tok) / 3) + \
[start_count - 2] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([start_count] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([0] * len(second_part_output_tok))
else:
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
else:
new_count_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
new_count_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
if 'after' in input_tok:
new_count_labels = list(range(len(first_part_output_tok)))[::-1]
else:
new_count_labels = list(range(len(first_part_output_tok)))[::-1]
count_labels += new_count_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_count_labels = [_c + 8 for _c in new_count_labels]
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
if 'after' in input_tok:
new_count_labels = list(range(len(second_part_output_tok)))[::-1]
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(len(second_part_output_tok)))[::-1]
new_count_labels = [_c + 8 for _c in new_count_labels]
count_labels += new_count_labels
# count_labels = []
# count_labels += list(range(len(first_part_output_tok)))[::-1]
# count_labels += list(range(len(second_part_output_tok)))[::-1]
assert len(count_labels) == len(output_tok), (len(count_labels), len(output_tok), input_text, first_part_input_tok, count_labels, output_tok,
first_part_output_text, first_part_output_tok, second_part_output_text, second_part_output_tok)
count_labels.append(-1) # For the EOS token
# count_labels.append(7) # For the EOS token
### 2. Build the action group labels ###
if group_label_scheme == 'v1': ## As used in exp 9.0-9.4
if 'around' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([4] * len(first_part_output_tok))
else:
group_labels += ([0] * len(first_part_output_tok))
elif 'opposite' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([5] * len(first_part_output_tok))
else:
group_labels += ([1] * len(first_part_output_tok))
elif 'left' in first_part_input_tok or 'right' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([6] * len(first_part_output_tok))
else:
group_labels += ([2] * len(first_part_output_tok))
else:
if 'after' in input_tok:
group_labels += ([7] * len(first_part_output_tok))
else:
group_labels += ([3] * len(first_part_output_tok))
if 'around' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([0] * len(second_part_output_tok))
else:
group_labels += ([4] * len(second_part_output_tok))
elif 'opposite' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([1] * len(second_part_output_tok))
else:
group_labels += ([5] * len(second_part_output_tok))
elif 'left' in second_part_input_tok or 'right' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([2] * len(second_part_output_tok))
else:
group_labels += ([6] * len(second_part_output_tok))
else:
if 'after' in input_tok:
group_labels += ([3] * len(second_part_output_tok))
else:
group_labels += ([7] * len(second_part_output_tok))
else:
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
if 'after' in input_tok:
new_group_labels = list(range(len(first_part_output_tok)))[::-1]
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(len(first_part_output_tok)))[::-1]
group_labels += new_group_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
else:
new_group_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_group_labels = [_c + 8 for _c in new_group_labels]
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
else:
new_group_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
if 'after' in input_tok:
new_group_labels = list(range(len(second_part_output_tok)))[::-1]
else:
new_group_labels = list(range(len(second_part_output_tok)))[::-1]
new_group_labels = [_c + 8 for _c in new_group_labels]
group_labels += new_group_labels
assert len(group_labels) == len(output_tok)
group_labels.append(-1) # For the EOS token
# group_labels.append(17) # For the EOS token
### 3. Build the action type labels ###
### For the first part output
if type_label_scheme == 'v1':
if 'around' in first_part_input_tok:
new_type_labels = [3] * len(first_part_output_tok)
elif 'opposite' in first_part_input_tok:
new_type_labels = [2] * len(first_part_output_tok)
elif 'left' in first_part_input_tok or 'right' in first_part_input_tok:
new_type_labels = [1] * len(first_part_output_tok)
else:
new_type_labels = [0] * len(first_part_output_tok)
# if 'after' in input_tok:
# new_type_labels = [_c + 4 for _c in new_type_labels]
type_labels += new_type_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'around' in second_part_input_tok:
new_type_labels = [3] * len(second_part_output_tok)
elif 'opposite' in second_part_input_tok:
new_type_labels = [2] * len(second_part_output_tok)
elif 'left' in second_part_input_tok or 'right' in second_part_input_tok:
new_type_labels = [1] * len(second_part_output_tok)
else:
new_type_labels = [0] * len(second_part_output_tok)
# if 'after' not in input_tok:
# new_type_labels = [_c + 4 for _c in new_type_labels]
type_labels += new_type_labels
elif type_label_scheme == 'v2':
if 'twice' in first_part_input_tok:
type_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(
len(first_part_output_tok) / 2))
elif 'thrice' in first_part_input_tok:
type_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(
len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
else:
type_labels += ([0] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
type_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
elif 'thrice' in second_part_input_tok:
type_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + [0] * int(len(second_part_output_tok) / 3))
else:
type_labels += ([0] * len(second_part_output_tok))
assert len(type_labels) == len(output_tok)
type_labels.append(-1) # For the EOS token
# group_labels.append(17) # For the EOS token
# if _id not in no_skip_id:
# count_labels = [-1] * len(count_labels)
# group_labels = [-1] * len(group_labels)
# skip_cnt += 1
# else:
# sup_cnt += 1
all_action_type_labels.append(torch.tensor(type_labels).cuda())
all_count_labels.append(torch.tensor(count_labels).cuda())
all_action_group_labels.append(torch.tensor(group_labels).cuda())
print(skip_cnt, sup_cnt)
return all_count_labels, all_action_group_labels, all_action_type_labels
def convert_to_dict(self, raw_data):
dict_data = {}
for dp in raw_data:
input, output = dp[0], dp[1]
assert input not in dict_data
dict_data[input] = output
return dict_data
def __getitem__(self, index):
if self.tokenized:
dp = self.dataset[index]
source_ids, src_mask, target_ids = dp[0], dp[1], dp[2]
source_ids = source_ids[:self.max_source_length]
#src_mask = src_mask[:self.max_source_length]
target_ids = target_ids[:self.max_target_length]
else:
source_ids = self.source[index]
target_ids = self.target[index]
count_labels = self.action_count_labels[index]
group_labels = self.action_group_labels[index]
type_labels = self.action_type_labels[index]
return {"source_ids": source_ids, "target_ids": target_ids, "action_count_labels": count_labels,
"action_group_labels": group_labels, "action_type_labels": type_labels}
@staticmethod
def trim_seq2seq_batch(batch, src_pad_token_id, trg_pad_token_id, trim_y=True):
if trim_y:
y = trim_batch(batch["target_ids"], trg_pad_token_id)
else:
y = batch["target_ids"]
source_ids, source_mask = trim_batch(batch["source_ids"], src_pad_token_id, attention_mask=batch["source_mask"])
return source_ids, source_mask, y
def collate_fn(self, batch):
max_src_len = max(map(len, [x["source_ids"] for x in batch]))
max_trg_len = max(map(len, [x["target_ids"] for x in batch]))
src_mask = torch.stack([self.create_mask(x["source_ids"], max_src_len) for x in batch])
trg_mask = torch.stack([self.create_mask(x["target_ids"], max_trg_len) for x in batch])
src_ids = torch.stack([self.pad_to_max_len(x["source_ids"], max_src_len, self.src_lang.pad_token_id) for x in batch])
#masks = torch.stack([x["source_mask"] for x in batch])
trg_ids = torch.stack([self.pad_to_max_len(x["target_ids"], max_trg_len, self.trg_lang.pad_token_id) for x in batch])
action_count_labels = torch.stack([self.pad_to_max_len(x["action_count_labels"], max_trg_len, -1) for x in batch])
action_group_labels = torch.stack([self.pad_to_max_len(x["action_group_labels"], max_trg_len, -1) for x in batch])
action_type_labels = torch.stack(
[self.pad_to_max_len(x["action_type_labels"], max_trg_len, -1) for x in batch])
y = trim_batch(trg_ids, self.trg_lang.pad_token_id)
#action_count_labels = trim_batch(action_count_labels, -1)
# _src_ids, src_mask = trim_batch(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)
# print(_src_ids.size(), src_ids.size())
return {"source_ids": src_ids, "source_mask": src_mask, "target_ids": y, "target_mask": trg_mask,
"action_count_labels": action_count_labels, "action_group_labels": action_group_labels,
"action_type_labels": action_type_labels}
``` |
{
"source": "jiangycTarheel/EPAr",
"score": 2
} |
#### File: EPAr/basic/model.py
```python
import random
import os
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import BasicLSTMCell
from basic.read_data import DataSet
from basic.batcher import get_feed_dict as _get_feed_dict
from my.tensorflow import get_initializer
from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, reconstruct_batches, span_to_avg_emb, reconstruct_batchesV2, select_topn_doc_idx, reconstruct_batchesV3
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
from reasoning_layers.mac_layer import MACRnn, dynamic_mac_rnn, HierarchicalAttnMACRnn
from reasoning_layers.assembler import BiAttnAssembler
from my.tensorflow.ops import bi_cudnn_rnn_encoder
def get_multi_gpu_models(config, emb_mat=None):
models = []
with tf.variable_scope(tf.get_variable_scope()) as vscope:
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)):
if gpu_idx > 0:
tf.get_variable_scope().reuse_variables()
model = Model(config, scope, emb_mat, rep=gpu_idx == 0)
models.append(model)
return models
class Model(object):
def __init__(self, config, scope, emb_mat, rep=True):
self.scope = scope
self.config = config
self.emb_mat = emb_mat
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
if config.split_supports is True:
N, M, JX, JQ, VW, VC, W = \
config.batch_size, 1, config.max_para_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [None, None, None], name='x')
self.cx = tf.placeholder('int32', [None, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [None, None, None], name='x_mask')
self.x_sents_len = tf.placeholder('int32', [None, M, 10], name='x_sents_len')
else:
# Define forward inputs here
N, M, JX, JQ, VW, VC, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, None, None], name='x')
self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
self.q = tf.placeholder('int32', [N, None], name='q')
self.cq = tf.placeholder('int32', [N, None, W], name='cq')
self.q_sub = tf.placeholder('int32', [N, None], name='q_sub')
self.cq_sub = tf.placeholder('int32', [N, None, W], name='cq_sub')
self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
self.q_sub_mask = tf.placeholder('bool', [N, None], name='q_sub_mask')
self.y = tf.placeholder('bool', [N, None, None], name='y')
self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
self.wy = tf.placeholder('bool', [N, None, None], name='wy')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
self.na = tf.placeholder('bool', [N], name='na')
if (config.reasoning_layer is not None and config.mac_prediction == 'candidates'):
self.candidate_spans = tf.placeholder('int32', [N, None, None, 2], name='cand_spans')
self.candidate_span_y = tf.placeholder('int32', [N, None], name='cand_span_y')
self.num_exceed_cand = tf.placeholder('int32', [N, None, None], name='num_exceed_cand')
self.x_group = tf.placeholder('int32', [N], name='x_group') # Define how sentences could be grouped into batch
if config.supervise_first_doc:
self.first_doc_ids = tf.placeholder('int32', [N], name='first_doc_ids')
if config.use_assembler:
self.selected_sent_ids = tf.placeholder('int32', [config.batch_size, config.num_hops], name='selected_sent_ids')
self.answer_doc_ids = tf.placeholder('int32', [N, None], name='answer_doc_ids')
self.answer_word_ids = tf.placeholder('int32', [N, None], name='answer_word_ids')
self.period_id = None
# Define misc
self.tensor_dict = {}
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
self.na_prob = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if config.mode == 'train':
self._build_ema()
self.summary = tf.summary.merge_all()
self.summary = tf.summary.merge(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.max_word_size
if config.split_supports:
M = 1
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
JQ_sub = tf.shape(self.q_sub)[1]
M = tf.shape(self.x)[1]
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
with tf.variable_scope("emb"):
if config.use_char_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
if config.get_query_subject:
Acq_sub = tf.nn.embedding_lookup(char_emb_mat, self.cq_sub) # [N, JQ, W, dc]
Acq_sub = tf.reshape(Acq_sub, [-1, JQ_sub, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.get_query_subject:
qq_sub = multi_conv1d(Acq_sub, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
if config.get_query_subject:
qq_sub = multi_conv1d(Acq_sub, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.get_query_subject:
qq_sub = tf.reshape(qq_sub, [-1, JQ_sub, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(self.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
if config.get_query_subject:
Aq_sub = tf.nn.embedding_lookup(word_emb_mat, self.q_sub)
self.tensor_dict['q_sub'] = Aq_sub
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(axis=3, values=[xx, Ax]) # [N, M, JX, di]
qq = tf.concat(axis=2, values=[qq, Aq]) # [N, JQ, di]
if config.get_query_subject:
qq_sub = tf.concat(axis=2, values=[qq_sub, Aq_sub])
else:
xx = Ax
qq = Aq
if config.get_query_subject:
qq_sub = Aq_sub
# highway network
if config.highway:
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
if config.get_query_subject:
qq_sub = highway_network(qq_sub, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell_fw = BasicLSTMCell(d, state_is_tuple=True)
cell_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell2_fw = BasicLSTMCell(d, state_is_tuple=True)
cell2_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell2_fw = SwitchableDropoutWrapper(cell2_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell2_bw = SwitchableDropoutWrapper(cell2_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell3_fw = BasicLSTMCell(d, state_is_tuple=True)
cell3_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell3_fw = SwitchableDropoutWrapper(cell3_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell3_bw = SwitchableDropoutWrapper(cell3_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell4_fw = BasicLSTMCell(d, state_is_tuple=True)
cell4_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell4_fw = SwitchableDropoutWrapper(cell4_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell4_bw = SwitchableDropoutWrapper(cell4_bw, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N]
q_sub_len = tf.reduce_sum(tf.cast(self.q_sub_mask, 'int32'), 1) # [N]
with tf.variable_scope("prepro"):
if config.cudnn_rnn:
if config.reasoning_layer == 'mac_rnn' and config.use_control_unit is False:
with tf.variable_scope('u1'):
u_bod, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, qq, q_len-q_sub_len, self.is_train)
u_st = zhong_selfatt(tf.expand_dims(u_bod, axis=1), config.hidden_size*2, seq_len=q_len-q_sub_len, transform='squeeze')
tf.get_variable_scope().reuse_variables()
u, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, qq, q_len, self.is_train)
else: # go to this case if answer_state_update_rule == 'bi-attn'
with tf.variable_scope('u1'):
u, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, qq, q_len, self.is_train)
if config.reasoning_layer == 'mac_rnn':
u_st = zhong_selfatt(tf.expand_dims(u, axis=1), config.hidden_size*2, seq_len=q_len, transform='squeeze')
q_sub_st = None
if config.share_lstm_weights:
with tf.variable_scope('u1', reuse=True):
h, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(xx, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
h = tf.expand_dims(h, axis=1)
if config.reasoning_layer == 'mac_rnn':
h_st = zhong_selfatt(h, config.hidden_size*2, seq_len=tf.squeeze(x_len, axis=1), transform='squeeze')
else: # Need a dumy h_st
h_st = tf.reduce_mean(tf.squeeze(h, axis=1), axis=1)
if config.get_query_subject:
q_sub, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, qq_sub, q_sub_len, self.is_train)
q_sub_st = zhong_selfatt(tf.expand_dims(q_sub, axis=1), config.hidden_size*2, seq_len=q_sub_len, transform='squeeze')
else:
if config.reasoning_layer == 'mac_rnn' and config.use_control_unit is False:
# If control_unit is False, only encode the query body
(fw_u, bw_u), (fw_u_f_st, bw_u_f_st) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len - q_sub_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u_st = tf.concat(axis=1, values=[fw_u_f_st.c, bw_u_f_st.c])
#if config.bidaf:
(fw_u, bw_u), _ = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u = tf.concat(axis=2, values=[fw_u, bw_u])
else: # go to this case if answer_state_update_rule == 'bi-attn'
(fw_u, bw_u), (fw_u_f_st, bw_u_f_st) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u = tf.concat(axis=2, values=[fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
(fw_h, bw_h), (fw_h_f_st, bw_h_f_st) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='u1') # [N, M, JX, 2d]
h = tf.concat(axis=3, values=[fw_h, bw_h]) # [N, M, JX, 2d]
h_st = tf.concat(axis=1, values=[fw_h_f_st.c, bw_h_f_st.c]) # [N, M, 2d]
if config.get_query_subject:
_, (fw_u2_f_st, bw_u2_f_st) = bidirectional_dynamic_rnn(cell_fw, cell_bw, qq_sub, q_sub_len, dtype='float', scope='u1') # [N, M, JX, 2d]
q_sub_st = tf.concat(axis=1, values=[fw_u2_f_st.c, bw_u2_f_st.c]) # [N, M, 2d]
else:
q_sub_st = None
else:
(fw_h, bw_h), (fw_h_f_st, bw_h_f_st) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='h1') # [N, M, JX, 2d]
h = tf.concat(axis=3, values=[fw_h, bw_h]) # [N, M, JX, 2d]
h_st = tf.concat(axis=1, values=[fw_h_f_st.c, bw_h_f_st.c]) # [N, M, 2d]
if config.get_query_subject:
tf.get_variable_scope().reuse_variables()
_, (fw_u2_f_st, bw_u2_f_st) = bidirectional_dynamic_rnn(cell_fw, cell_bw, qq_sub, q_sub_len, dtype='float', scope='u1') # [N, M, JX, 2d]
q_sub_st = tf.concat(axis=2, values=[fw_u2_f_st.c, bw_u2_f_st.c]) # [N, M, 2d]
else:
q_sub_st = None
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope("main"):
context_dim = config.hidden_size * 2
# Reconstruct before bidaf because otherwise we need to build a larger query tensor.
if config.split_supports: # Reconstruct batches into [N, M, JX, 2d]
if config.select_top_n_doc > 0:
first_n_doc_idx = select_topn_doc_idx(N, config.select_top_n_doc, self.x_group)
h_plus_one = tf.concat([h, tf.expand_dims(tf.zeros_like(h[0], tf.float32), axis=0)], axis=0)
h_st_plus_one = tf.concat([h_st, tf.expand_dims(tf.zeros_like(h_st[0], tf.float32), axis=0)], axis=0)
x_len_plus_one = tf.concat([x_len, tf.expand_dims(tf.zeros_like(x_len[0], tf.int32), axis=0)], axis=0)
x_mask_plus_one = tf.concat([self.x_mask, tf.expand_dims(tf.zeros_like(self.x_mask[0], tf.bool), axis=0)], axis=0)
top_n_h = tf.gather(h_plus_one, first_n_doc_idx)
top_n_h_st = tf.gather(h_st_plus_one, first_n_doc_idx)
top_n_x_len = tf.gather(x_len_plus_one, first_n_doc_idx)
top_n_x_mask = tf.gather(x_mask_plus_one, first_n_doc_idx)
if config.hierarchical_attn is False:
h, x_len, x_mask = reconstruct_batches(h, x_len, self.x_group, target_batch_size=N, \
max_para_size=config.max_para_size, model=self)
else:
if config.bidaf:
context_dim = config.hidden_size * 4
# Augment query to match
batch_nums = []
for i in range(config.batch_size):
batch_nums = tf.concat([batch_nums, tf.tile([i], [self.x_group[i]])], axis=0)
u_tiled = tf.gather(u, batch_nums)
q_mask_tiled = tf.gather(self.q_mask, batch_nums)
h = attention_layer(config, self.is_train, h, u_tiled, h_mask=self.x_mask, u_mask=q_mask_tiled, scope="p0", tensor_dict=self.tensor_dict)
W = tf.get_variable('W', [160, 80])
b = tf.get_variable('b', [80])
h = tf.einsum('ijkl,lm->ijkm',h,W) + b
h_reconstruct, _, _ = reconstruct_batches(h, x_len, self.x_group, target_batch_size=N, \
max_para_size=config.max_para_size, model=self, emb_dim=context_dim)
if config.select_top_n_doc > 1:
top_n_x_group = []
for i in range(N):
to_append = tf.cond(self.x_group[i] > config.select_top_n_doc, lambda: config.select_top_n_doc, lambda: self.x_group[i])
top_n_x_group.append(to_append)
top_n_x_group = tf.stack(top_n_x_group)
h, p_st, x_mask, pdoc_mask, self.x_sents_len_reconstruct = reconstruct_batchesV2(top_n_h, top_n_h_st, top_n_x_mask, top_n_x_group, self.x_sents_len, target_batch_size=N, \
max_para_size=config.max_para_size, model=self)
else:
h, p_st, x_mask, pdoc_mask, self.x_sents_len_reconstruct = reconstruct_batchesV2(h, h_st, self.x_mask, self.x_group, self.x_sents_len, target_batch_size=N, \
max_para_size=config.max_para_size, model=self)
if config.select_top_n_doc > 0:
x_len = top_n_x_len
else:
x_mask = self.x_mask
if config.bidaf and config.hierarchical_attn is False:
context_dim = config.hidden_size * 8
if config.use_control_unit is False and config.reasoning_layer == 'mac_rnn':
if config.select_top_n_doc > 0:
p0 = attention_layer(config, self.is_train, top_n_h, u, h_mask=top_n_x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
else:
p0 = attention_layer(config, self.is_train, h, u, h_mask=x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
else:
if config.select_top_n_doc > 0:
p0 = attention_layer(config, self.is_train, top_n_h, u, h_mask=top_n_x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
else:
p0 = attention_layer(config, self.is_train, h, u, h_mask=x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
else:
p0 = h
first_cell_fw = d_cell2_fw
second_cell_fw = d_cell3_fw
first_cell_bw = d_cell2_bw
second_cell_bw = d_cell3_bw
if config.reasoning_layer == 'mac_rnn':
query_dim = config.hidden_size * 2
if config.hierarchical_attn:
mac_rnn_cell = HierarchicalAttnMACRnn(config.batch_size, context_dim, query_dim, num_hops=config.num_hops, reuse_cell=config.reuse_cell, \
is_train=self.is_train, use_control_unit=config.use_control_unit, mode=config.mode, read_strategy=config.mac_read_strategy, \
output_unit_type=config.mac_output_unit, answer_state_update_rule=config.mac_answer_state_update_rule, reasoning_unit=config.mac_reasoning_unit, \
memory_state_update_rule=config.mac_memory_state_update_rule, \
answer_doc_ids=self.answer_doc_ids if config.supervise_final_doc or (config.oracle is not None) else None, \
sents_len=self.x_sents_len_reconstruct, oracle=config.oracle, \
input_keep_prob=config.input_keep_prob, \
attention_cell_dropout=config.attention_cell_dropout, read_topk_docs=config.read_topk_docs)
self.mac_rnn_cell = mac_rnn_cell
if config.mac_prediction == 'candidates':
cand_emb, cand_mask = span_to_avg_emb(self.candidate_spans, h_reconstruct, config.batch_size, self)
g1 = dynamic_mac_rnn(mac_rnn_cell, p0, u, q_len, x_mask, self.q_mask, q_sub_st=q_sub_st, context_st=p_st, query_st=u_st, cdoc_mask=pdoc_mask, candidates=cand_emb, cand_mask=cand_mask)
self.doc_attn_logits = mac_rnn_cell.doc_attn_logits_lst
self.word_attn_logits = mac_rnn_cell.word_attn_logits_lst
self.doc_labels = mac_rnn_cell.doc_attn
self.g1 = g1
self.cand_mask = cand_mask
self.cand_emb = cand_emb
self.pdoc_mask = pdoc_mask
self.p_st = p_st
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, \
is_train=self.is_train, func=config.answer_func, scope='logits1')
JX = tf.shape(g1)[2]
self.JX = JX
self.g1_shape=tf.shape(g1)
flat_logits = tf.reshape(logits, [config.batch_size, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [config.batch_size, M, JX])
self.logits = flat_logits
self.yp = yp
if config.use_assembler or config.attn_visualization:
self.yp_list = []
self.logits_list = []
for i in range(config.num_hops):
logits = get_logits([mac_rnn_cell.answer_list[i]], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, \
is_train=self.is_train, func=config.answer_func, scope='logits1', reuse=True)
flat_logits = tf.reshape(logits, [config.batch_size, M * JX])
flat_yp = tf.nn.softmax(flat_logits)
yp = tf.reshape(flat_yp, [config.batch_size, M, JX])
self.yp_list.append(yp)
self.logits_list.append(flat_logits)
if config.use_assembler:
if config.assembler_type == 'BiAttn':
self.assembler = BiAttnAssembler(config, self.is_train, self, context_dim=context_dim)
self.assembler.build_forward(p0, x_mask, u, u_st, self.q_mask, cand_emb, cand_mask)
else:
raise NotImplementedError
return
else:
raise NotImplementedError
else:
mac_rnn_cell = MACRnn(config.batch_size, p0.get_shape()[-1], u.get_shape()[-1], num_hops=config.num_hops, prediction=config.mac_prediction, \
reuse_cell=config.reuse_cell, is_train=self.is_train, use_control_unit=config.use_control_unit, mode=config.mode)
if config.mac_prediction == 'candidates':
cand_emb, cand_mask = span_to_avg_emb(self.candidate_spans, p0, config.batch_size, self)
g1 = dynamic_mac_rnn(mac_rnn_cell, p0, u, q_len, x_mask, self.q_mask, candidates=cand_emb, cand_mask=cand_mask, q_sub_st=q_sub_st)
self.g1 = g1
self.cand_mask = cand_mask
self.cand_emb = cand_emb
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, \
is_train=self.is_train, func=config.answer_func, scope='logits1')
JX = tf.shape(g1)[2]
flat_logits = tf.reshape(logits, [config.batch_size, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [config.batch_size, M, JX])
self.logits = flat_logits
self.yp = yp
return
elif config.mac_prediction == 'span-dual':
g1, g2 = dynamic_mac_rnn(mac_rnn_cell, p0, qq, q_len)
if config.split_supports is True:
M=1
JX=config.max_para_size
N=config.batch_size
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
logits2 = get_logits([g2], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits2')
else:
assert config.mac_prediction == 'span-single'
g1, logits = dynamic_mac_rnn(mac_rnn_cell, p0, qq, q_len, x_mask, self.q_mask)
if config.split_supports is True:
M=1
JX=config.max_para_size
N=config.batch_size
a1i = softsel(tf.reshape(g1, [N, M * JX, 80]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
else:
if config.cudnn_rnn:
with tf.variable_scope('g0'):
g0, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(p0, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
g0 = tf.expand_dims(g0, axis=1)
else:
(fw_g0, bw_g0), _ = bidirectional_dynamic_rnn(first_cell_fw, first_cell_bw, p0, x_len, dtype='float', scope='g0') # [N, M, JX, 2d]
g0 = tf.concat(axis=3, values=[fw_g0, bw_g0])
if config.cudnn_rnn:
with tf.variable_scope('g1'):
g1, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(g0, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
g1 = tf.expand_dims(g1, axis=1)
else:
(fw_g1, bw_g1), (fw_g1_f_st, bw_g1_f_st) = bidirectional_dynamic_rnn(second_cell_fw, second_cell_bw, g0, x_len, dtype='float', scope='g1') # [N, M, JX, 2d]
g1 = tf.concat(axis=3, values=[fw_g1, bw_g1])
if config.reasoning_layer == 'bidaf' and config.mac_prediction == 'candidates':
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, scope='a_state_logits')
probs = tf.nn.softmax(logits)
a_state = tf.einsum('ijkl,ijk->ijl', h, probs)
a_state = tf.squeeze(a_state, axis=1)
cand_emb, cand_mask = span_to_avg_emb(self.candidate_spans, h, config.batch_size, self)
cand_emb = tf.squeeze(cand_emb, axis=1)
cand_dim = config.hidden_size * 2
with tf.variable_scope('output_unit'):
num_cand = tf.shape(cand_emb)[1]
similarity = tf.einsum('ik,ijk->ijk', a_state, cand_emb)
M = tf.tile(tf.expand_dims(a_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*cand_dim, 2*cand_dim])
b1 = tf.get_variable('b1', [2*cand_dim])
W2 = tf.get_variable('W2', [2*cand_dim, cand_dim])
b2 = tf.get_variable('b2', [cand_dim])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, cand_dim]), tf.reshape(cand_emb, [-1, cand_dim]), tf.reshape(similarity, [-1, cand_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.expand_dims(tf.reshape(output, [self.config.batch_size, -1, 40]), axis=1)
logits = get_logits([g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=cand_mask, \
is_train=self.is_train, func=config.answer_func, scope='logits1')
JX = tf.shape(g1)[2]
flat_logits = tf.reshape(logits, [config.batch_size, JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [config.batch_size, 1, JX])
self.logits = flat_logits
self.yp = yp
return
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
if config.split_supports is True:
M=1
JX=config.max_para_size
N=config.batch_size
a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
if config.reasoning_layer is None or config.mac_prediction == 'span-single':
if config.cudnn_rnn:
with tf.variable_scope('g2'):
g2_in = tf.squeeze(tf.concat(axis=3, values=[p0, g1, a1i, g1 * a1i]), axis=1)
g2, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, g2_in, tf.squeeze(x_len, axis=1), self.is_train)
g2 = tf.expand_dims(g2, axis=1)
else:
(fw_g2, bw_g2), _ = bidirectional_dynamic_rnn(d_cell4_fw, d_cell4_bw, tf.concat(axis=3, values=[p0, g1, a1i, g1 * a1i]),
x_len, dtype='float', scope='g2') # [N, M, JX, 2d]
g2 = tf.concat(axis=3, values=[fw_g2, bw_g2])
logits2 = get_logits([g2, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=x_mask,
is_train=self.is_train, func=config.answer_func, scope='logits2')
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
flat_logits2 = tf.reshape(logits2, [-1, M * JX])
flat_yp2 = tf.nn.softmax(flat_logits2)
if config.na:
na_bias = tf.get_variable("na_bias", shape=[], dtype='float')
na_bias_tiled = tf.tile(tf.reshape(na_bias, [1, 1]), [N, 1]) # [N, 1]
concat_flat_logits = tf.concat(axis=1, values=[na_bias_tiled, flat_logits])
concat_flat_yp = tf.nn.softmax(concat_flat_logits)
na_prob = tf.squeeze(tf.slice(concat_flat_yp, [0, 0], [-1, 1]), [1])
flat_yp = tf.slice(concat_flat_yp, [0, 1], [-1, -1])
concat_flat_logits2 = tf.concat(axis=1, values=[na_bias_tiled, flat_logits2])
concat_flat_yp2 = tf.nn.softmax(concat_flat_logits2)
na_prob2 = tf.squeeze(tf.slice(concat_flat_yp2, [0, 0], [-1, 1]), [1]) # [N]
flat_yp2 = tf.slice(concat_flat_yp2, [0, 1], [-1, -1])
self.concat_logits = concat_flat_logits
self.concat_logits2 = concat_flat_logits2
self.na_prob = na_prob * na_prob2
yp = tf.reshape(flat_yp, [-1, M, JX])
yp2 = tf.reshape(flat_yp2, [-1, M, JX])
wyp = tf.nn.sigmoid(logits2)
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
self.wyp = wyp
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[2]
#
N = config.batch_size
if config.split_supports is True:
M = 1
JX = config.max_para_size
else:
M = tf.shape(self.x)[1]
JQ = tf.shape(self.q)[1]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
if config.wy:
losses = tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.reshape(self.logits2, [-1, M, JX]), labels=tf.cast(self.wy, 'float')) # [N, M, JX]
num_pos = tf.reduce_sum(tf.cast(self.wy, 'float'))
num_neg = tf.reduce_sum(tf.cast(self.x_mask, 'float')) - num_pos
damp_ratio = num_pos / num_neg
dampened_losses = losses * (
(tf.cast(self.x_mask, 'float') - tf.cast(self.wy, 'float')) * damp_ratio + tf.cast(self.wy, 'float'))
new_losses = tf.reduce_sum(dampened_losses, [1, 2])
ce_loss = tf.reduce_mean(loss_mask * new_losses)
tf.add_to_collection('losses', ce_loss)
else:
if config.reasoning_layer is not None and config.mac_prediction == 'candidates':
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.cast(tf.reshape(self.candidate_span_y, [config.batch_size]), 'int32'))
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
else:
if config.na:
na = tf.reshape(self.na, [-1, 1])
concat_y = tf.concat(axis=1, values=[na, tf.reshape(self.y, [-1, M * JX])])
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.concat_logits, labels=tf.cast(concat_y, 'float'))
concat_y2 = tf.concat(axis=1, values=[na, tf.reshape(self.y2, [-1, M * JX])])
losses2 = tf.nn.softmax_cross_entropy_with_logits(logits=self.concat_logits2, labels=tf.cast(concat_y2, 'float'))
else:
losses = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
losses2 = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits2, labels=tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float'))
ce_loss = tf.reduce_mean(loss_mask * losses)
ce_loss2 = tf.reduce_mean(loss_mask * losses2)
tf.add_to_collection('losses', ce_loss)
tf.add_to_collection("losses", ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
self.ansProp_loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='ansProp_loss')
self.docExpl_ansProp_loss = self.ansProp_loss
tf.summary.scalar(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
if config.supervise_first_doc:
doc_first_attn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.doc_attn_logits[0], labels=self.first_doc_ids)
doc_first_attn_loss = tf.reduce_mean(doc_first_attn_loss, name='doc_first_attn_loss')
tf.summary.scalar('doc_first_attn_loss', doc_first_attn_loss)
tf.add_to_collection('ema/scalar', doc_first_attn_loss)
self.loss = self.loss + config.first_attn_loss_coeff * doc_first_attn_loss
self.docExpl_loss = config.first_attn_loss_coeff * doc_first_attn_loss
else:
self.docExpl_loss = 0.
if config.supervise_final_doc:
answer_doc_ids = tf.squeeze(tf.slice(self.answer_doc_ids, [0, 0], [-1, 1]), axis=1)
answer_word_ids = tf.squeeze(tf.slice(self.answer_word_ids, [0, 0], [-1, 1]), axis=1)
if config.mac_read_strategy=='one_doc_per_it_and_repeat_2nd_step':
doc_attn_logits = self.doc_attn_logits[1]
if config.mac_memory_state_update_rule is None:
batch_nums = tf.range(0, limit=N)
doc_indices = tf.stack([batch_nums, answer_doc_ids], axis=1)
word_attn_logits = tf.gather_nd(self.word_attn_logits[1], doc_indices)
else:
word_attn_logits = self.word_attn_logits[1]
else:
doc_attn_logits = self.doc_attn_logits[-1]
if config.mac_memory_state_update_rule is None:
batch_nums = tf.range(0, limit=N)
doc_indices = tf.stack([batch_nums, answer_doc_ids], axis=1)
word_attn_logits = tf.gather_nd(self.word_attn_logits[-1], doc_indices)
else:
word_attn_logits = self.word_attn_logits[-1]
doc_final_attn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=doc_attn_logits, labels=answer_doc_ids)
word_attn_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=word_attn_logits, labels=answer_word_ids)
doc_final_attn_loss = tf.reduce_mean(doc_final_attn_loss, name='doc_final_attn_loss')
word_attn_loss = tf.reduce_mean(word_attn_loss, name='word_attn_loss')
tf.summary.scalar('doc_final_attn_loss', doc_final_attn_loss)
tf.summary.scalar('word_attn_loss', word_attn_loss)
tf.add_to_collection('ema/scalar', word_attn_loss)
tf.add_to_collection('ema/scalar', doc_final_attn_loss)
self.docExpl_loss += config.attn_loss_coeff * (doc_final_attn_loss + word_attn_loss)
self.loss = self.loss + config.attn_loss_coeff * doc_final_attn_loss + config.attn_loss_coeff * word_attn_loss
self.docExpl_ansProp_loss += self.docExpl_loss
tf.summary.scalar('total_loss', self.loss)
if config.use_assembler:
assembler_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.assembler.logits, labels=tf.cast(tf.reshape(self.candidate_span_y, [config.batch_size]), 'int32'))
self.assembler_loss = tf.reduce_mean(loss_mask * assembler_losses, name='assembler_loss')
self.loss += config.assembler_loss_coeff * self.assembler_loss
tf.summary.scalar('assembler_loss', self.assembler_loss)
tf.add_to_collection('ema/scalar', self.assembler_loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope)
ema_op = ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self, model_name):
if model_name == 'expl+prop':
self.var_list = [var for var in tf.trainable_variables() if 'assembler' not in var.name]
elif model_name == 'expl+prop_only':
self.var_list = [var for var in tf.trainable_variables() if 'MACRnn' in var.name or 'main/logits1' in var.name]
elif model_name == 'assembler':
self.var_list = [var for var in tf.trainable_variables() if 'MACRnn' not in var.name \
and 'main/logits1' not in var.name]
elif model_name == 'assembler_only':
self.var_list = [var for var in tf.trainable_variables() if 'assembler' in var.name]
elif model_name == 'model_network' or model_name == 'all':
self.var_list = [var for var in tf.trainable_variables()]
else:
raise NotImplementedError
assert len(self.var_list) > 0
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
return _get_feed_dict(self, batch, is_train, supervised)
def bi_attention(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "bi_attention"):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
h_aug = tf.tile(tf.expand_dims(h, 3), [1, 1, 1, JQ, 1])
u_aug = tf.tile(tf.expand_dims(tf.expand_dims(u, 1), 1), [1, M, JX, 1, 1])
if h_mask is None:
hu_mask = None
else:
h_mask_aug = tf.tile(tf.expand_dims(h_mask, 3), [1, 1, 1, JQ])
u_mask_aug = tf.tile(tf.expand_dims(tf.expand_dims(u_mask, 1), 1), [1, M, JX, 1])
hu_mask = h_mask_aug & u_mask_aug
u_logits = get_logits([h_aug, u_aug], None, True, wd=config.wd, mask=hu_mask,
is_train=is_train, func=config.logit_func, scope='u_logits') # [N, M, JX, JQ]
u_a = softsel(u_aug, u_logits) # [N, M, JX, d]
h_a = softsel(h, tf.reduce_max(u_logits, 3)) # [N, M, d]
h_a = tf.tile(tf.expand_dims(h_a, 2), [1, 1, JX, 1])
if tensor_dict is not None:
a_u = tf.nn.softmax(u_logits) # [N, M, JX, JQ]
a_h = tf.nn.softmax(tf.reduce_max(u_logits, 3))
tensor_dict['a_u'] = a_u
tensor_dict['a_h'] = a_h
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name)
for var in variables:
tensor_dict[var.name] = var
return u_a, h_a
def attention_layer(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "attention_layer"):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
if config.q2c_att or config.c2q_att:
u_a, h_a = bi_attention(config, is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
if not config.c2q_att:
u_a = tf.tile(tf.expand_dims(tf.expand_dims(tf.reduce_mean(u, 1), 1), 1), [1, M, JX, 1])
if config.q2c_att:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a, h * h_a])
else:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a])
return p0
def zhong_selfatt(U, dim, mask=None, seq_len=None, transform=None, scope=None, reuse=None):
if mask is None:
assert seq_len is not None
mask = tf.expand_dims(tf.sequence_mask(seq_len, tf.shape(U)[1]), axis=1)
with tf.variable_scope(scope or 'zhong_selfAttention', reuse=reuse):
W1 = tf.get_variable("W1", [dim, dim])
b1 = tf.get_variable("b1", [dim,])
W2 = tf.get_variable("W2", [dim, 1])
b2 = tf.get_variable("b2", [1,])
layer1_output = tf.nn.tanh(tf.einsum('ijkl,lt->ijkt', U, W1) + b1)
logits = tf.nn.tanh(tf.squeeze(tf.einsum('ijkl,lt->ijkt', layer1_output, W2) + b2, axis=-1))
masked_logits = logits * tf.cast(mask, dtype='float')
att = tf.nn.softmax(masked_logits)
output = tf.einsum("ijkl,ijk->ijl", U, att)
if transform == 'expand':
output = tf.expand_dims(output, axis=1)
elif transform == 'squeeze':
output = tf.squeeze(output, axis=1)
return output
```
#### File: EPAr/reasoning_layers/mac_layer.py
```python
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from my.tensorflow.nn import linear_logits, get_logits, softsel
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
from tensorflow.contrib.rnn import BasicLSTMCell
from reasoning_layers.utils import biattention_layer
def dynamic_mac_rnn(cell, context, query, q_len, c_mask, q_mask, q_sub_st=None, context_st=None, query_st=None, cdoc_mask=None, candidates=None, cand_mask=None, greedy_read=False):
if cdoc_mask is None:
assert context_st is None
return cell.apply(context, query, q_len, c_mask, q_mask, q_sub_st=q_sub_st, candidates=candidates, cand_mask=cand_mask)
else:
assert context_st is not None and q_sub_st is not None
assert isinstance(cell, HierarchicalAttnMACRnn)
return cell.apply(context, context_st, query, query_st, q_sub_st, q_len, c_mask, cdoc_mask, q_mask, candidates=candidates, cand_mask=cand_mask, greedy_read=greedy_read)
class MACRnn(object):
"""
This class implements a standard MAC RNN (https://arxiv.org/abs/1803.03067) adapted for multi-hop qa.
"""
def __init__(self, batch_size, context_dim, query_dim, hidden_dim=80, num_hops=6, bidirectional_input_unit=False, prediction='span-single', \
reuse_cell=True, is_train=None, use_control_unit=True, mode="train", output_unit_type='similarity', reasoning_unit='answer_unit', \
answer_state_update_rule='mlp'):
"""
num_hops: the number of mac cell chained together, or number of reasoning steps.
bidriectional_input_unit: use bi-lstm for input unit. Default to false to save memory.
prediction: prediction layer. Could be 'span-single/dual', 'candidates'
reuse_cell: use one single cell for all reasoning steps. (not sure what Hudson and Mannning did.)
"""
self.batch_size = batch_size
self.hidden_dim = hidden_dim
self.context_dim = context_dim
self.query_dim = query_dim
self.num_hops = num_hops
self.bidirectional_input_unit = bidirectional_input_unit
self.prediction = prediction
self.reuse_cell = reuse_cell
self.is_train = is_train
self.use_control_unit = use_control_unit
self.mode = mode
self.output_unit_type = output_unit_type
self.reasoning_unit = reasoning_unit
self.answer_state_update_rule = answer_state_update_rule
self.top_attn = []
def apply(self, context, query, q_len, c_mask, q_mask, candidates=None, cand_mask=None, q_sub_st=None):
batch_size = self.batch_size
hidden_dim = self.hidden_dim
query_dim = self.query_dim
reuse_cell = self.reuse_cell
context = tf.squeeze(context, axis=1)
if candidates is not None:
candidates = tf.squeeze(candidates, axis=1)
c_state = tf.zeros((batch_size, hidden_dim))
m_state = tf.zeros((batch_size, hidden_dim))
with tf.variable_scope('MACRnn'):
query, q_rep = self.MACInputUnit(query, q_len)
c_history = []
m_history = []
for i in range(self.num_hops):
if reuse_cell:
scope_str = 'MACRnn-layer-%d' % 0
c_state, m_state = self.MACCell(i, query, q_rep, context, c_mask, q_mask, c_history, m_history, \
c_state, m_state, scope_str, reuse=(i!=0))
else:
scope_str = 'MACRnn-layer-%d' % i
c_state, m_state = self.MACCell(i, query, q_rep, context, c_mask, q_mask, c_history, m_history, \
c_state, m_state, scope_str, reuse=False)
c_history.append(c_state)
m_history.append(m_state)
if self.prediction == 'candidates':
g1 = self.MACOutputUnit(m_state, context, candidates)
return tf.expand_dims(g1, axis=1)
elif self.prediction == 'span-dual':
g1, g2 = self.MACOutputUnit(m_state, context)
return tf.expand_dims(g1, axis=1), tf.expand_dims(g2, axis=1)
else:
assert self.prediction == 'span-single'
g1, logits = self.MACOutputUnit(m_state, context)
return tf.expand_dims(g1, axis=1), logits
def MACInputUnit(self, query, query_len, reuse=False):
"""
Inputs: encodede query and length.
Outputs: query encoded by another lstm, and the final state of this lstm as
a fixed-size representation of this query.
"""
with tf.variable_scope('input_unit', initializer=tf.random_uniform_initializer, reuse=reuse):
hidden_dim = self.hidden_dim
if self.bidirectional_input_unit is True:
cell_fw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, query, \
dtype=tf.float32, sequence_length=query_len, swap_memory=True)
query_embed = tf.concat(axis=2, values=encoder_outputs)
query_rep = tf.concat([fw_st.c, bw_st.c], axis=1)
W_emb = tf.get_variable('W_emb', [2*hidden_dim, hidden_dim])
b_emb = tf.get_variable('b_emb', [hidden_dim])
W_rep = tf.get_variable('W_rep', [2*hidden_dim, hidden_dim])
b_rep = tf.get_variable('b_rep', [hidden_dim])
query_embed = tf.einsum('ijk,kl->ijl', query_embed, W_emb) + b_emb
query_rep = tf.matmul(query_rep, W_rep) + b_rep
else:
cell_fw = tf.contrib.rnn.LSTMCell(hidden_dim, state_is_tuple=True)
query_embed, final_st = tf.nn.dynamic_rnn(cell_fw, query, dtype=tf.float32, \
sequence_length=query_len)
query_rep = final_st.c
return query_embed, query_rep
def MACCell(self, layer: int, cw, q, k, c_mask, q_mask, c_history, m_history, c_state, m_state, scope_str, reuse=False):
hidden_dim = self.hidden_dim
context_dim = self.context_dim
query_dim = self.query_dim
def control_unit():
with tf.variable_scope('control_unit'):
W_cq = tf.get_variable('W_cq', [2*hidden_dim, hidden_dim])
b_cq = tf.get_variable('b_cq', [hidden_dim])
cq = tf.matmul(tf.concat([c_state, q], axis=1), W_cq) + b_cq
W_ca = tf.get_variable('W_ca', [hidden_dim, 1])
b_ca = tf.get_variable('b_ca', [1])
ca = tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', cq, cw), W_ca), axis=2) + b_ca
cv = tf.nn.softmax(ca)
return tf.einsum('ijk,ij->ik', cw, cv)
def read_unit(new_c_state):
"""
Does not include the I' in the original MAC paper.
"""
with tf.variable_scope('read_unit'):
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('il,ijl->ijl', tf.matmul(m_state, W_m) + b_m, tf.einsum('ijk,kl->ijl', k, W_k) + b_k) # [batch_size, context_len, hidden_dim]
W_ra = tf.get_variable('W_ra', [hidden_dim, 1])
b_ra = tf.get_variable('b_ra', [1])
ra = tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', new_c_state, I), W_ra), axis=2) + b_ra
rv = tf.nn.softmax(ra)
return tf.einsum('ijk,ij->ik', k, rv)
def write_unit(r, new_c_state):
with tf.variable_scope('write_unit'):
W_m = tf.get_variable('W_m', [context_dim + hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
m_prev = tf.matmul(tf.concat([r, m_state], axis=1), W_m) + b_m
if layer > 0 or self.reuse_cell:
W_c = tf.get_variable('W_c', [hidden_dim, 1])
b_c = tf.get_variable('b_c', [1])
#sa = tf.nn.softmax(tf.squeeze(tf.einsum('ijk,kl->ijl', tf.multiply(new_c_state, c_history), W_c), axis=2))
W_s = tf.get_variable('W_s', [hidden_dim, hidden_dim])
W_p = tf.get_variable('W_p', [hidden_dim, hidden_dim])
b = tf.get_variable('b', [hidden_dim])
if layer > 0:
sa = tf.nn.softmax(tf.squeeze(tf.einsum('ijk,kl->ijl', tf.einsum('ik,ijk->ijk', new_c_state, c_history), W_c) + b_c, axis=2))
m_sa = tf.einsum('ijk,ij->ik', m_history, sa)
m_prime = tf.matmul(m_sa, W_s) + tf.matmul(m_prev, W_p) + b
else:
m_prime = tf.matmul(m_prev, W_p) + b
W_c_2 = tf.get_variable('W_c_2', [hidden_dim, 1])
b_c_2 = tf.get_variable('b_c_2', [1])
c_prime = tf.matmul(new_c_state, W_c_2) + b_c_2
return tf.nn.sigmoid(c_prime) * m_state + (1 - tf.nn.sigmoid(c_prime)) * m_prime
if layer > 0:
c_history = tf.stack(c_history, axis=1)
m_history = tf.stack(m_history, axis=1)
with tf.variable_scope(scope_str, reuse=reuse) as scope:
new_c_state = control_unit()
new_m_state = write_unit(read_unit(new_c_state), new_c_state)
return new_c_state, new_m_state
def MACOutputUnit(self, m_state, context, candidates=None, query=None, reuse=False):
hidden_dim = self.hidden_dim
context_dim = self.context_dim
with tf.variable_scope('output_unit', reuse=reuse):
if self.prediction == 'candidates':
assert candidates is not None
cand_dim = context_dim
#cand_dim = candidates.get_shape()[-1]
if self.output_unit_type == 'similarity':
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
M = tf.matmul(m_state, W_m) + b_m
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('ijk,kl->ijl', candidates, W_k) + b_k
g1 = tf.einsum('ik,ijk->ijk', M, I)
elif self.output_unit_type == 'nested-triplet-mlp':
num_cand = tf.shape(candidates)[1]
if self.reasoning_unit == 'bi-attn' or self.reasoning_unit == 'attention-lstm' or self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc':
similarity = tf.einsum('ik,ijk->ijk', m_state, candidates)
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*cand_dim, 2*cand_dim])
b1 = tf.get_variable('b1', [2*cand_dim])
W2 = tf.get_variable('W2', [2*cand_dim, cand_dim])
b2 = tf.get_variable('b2', [cand_dim])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, cand_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, cand_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
else:
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
similarity = tf.einsum('ik,ijk->ijk', m_state, tf.einsum('ijk,kl->ijl', candidates, W_k)) + b_k
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [2*hidden_dim + cand_dim, hidden_dim])
b1 = tf.get_variable('b1', [hidden_dim])
W2 = tf.get_variable('W2', [hidden_dim, 40])
b2 = tf.get_variable('b2', [40])
concat_in = tf.concat(axis=-1, values=[tf.reshape(M, [-1, hidden_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, hidden_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.reshape(output, [self.batch_size, -1, context_dim])
elif self.output_unit_type == 'triplet-mlp':
assert query is not None
assert self.reasoning_unit == 'None' or self.reasoning_unit is None
num_cand = tf.shape(candidates)[1]
query_dim = self.query_dim
W_q = tf.get_variable('W_q', [query_dim, hidden_dim])
b_q = tf.get_variable('b_q', [hidden_dim])
query = tf.matmul(query, W_q) + b_q
query = tf.tile(tf.expand_dims(query, axis=1), [1, num_cand, 1])
W_k = tf.get_variable('W_k', [cand_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
similarity = tf.einsum('ik,ijk->ijk', m_state, tf.einsum('ijk,kl->ijl', candidates, W_k)) + b_k
M = tf.tile(tf.expand_dims(m_state, axis=1), [1, num_cand, 1])
W1 = tf.get_variable('W1', [3*hidden_dim + cand_dim, hidden_dim])
b1 = tf.get_variable('b1', [hidden_dim])
W2 = tf.get_variable('W2', [hidden_dim, 40])
b2 = tf.get_variable('b2', [40])
concat_in = tf.concat(axis=-1, values=[tf.reshape(query, [-1, hidden_dim]), tf.reshape(M, [-1, hidden_dim]), tf.reshape(candidates, [-1, cand_dim]), tf.reshape(similarity, [-1, hidden_dim])])
output = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
g1 = tf.reshape(output, [self.batch_size, -1, 40])
else:
raise NotImplementedError
return g1
else:
W_m = tf.get_variable('W_m', [hidden_dim, hidden_dim])
b_m = tf.get_variable('b_m', [hidden_dim])
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I = tf.einsum('ijk,kl->ijl', context, W_k) + b_k
M = tf.matmul(m_state, W_m) + b_m
g1 = tf.einsum('ik,ijk->ijk', M, I)
if self.prediction == 'span-dual':
p2 = tf.concat([I, g1], axis=2)
W_p = tf.get_variable('W_p', [2*hidden_dim, hidden_dim])
b_p = tf.get_variable('b_p', [hidden_dim])
I_prime = tf.einsum('ijk,kl->ijl', p2, W_p) + b_p
g2 = tf.einsum('ik,ijk->ijk', M, I_prime)
return g1, g2
else:
W_ra = tf.get_variable('W_ra', [hidden_dim, 1])
b_ra = tf.get_variable('b_ra', [1])
ra = tf.squeeze(tf.einsum('ijk,kl->ijl', g1, W_ra), axis=2) + b_ra
return g1, ra
class HierarchicalAttnMACRnn(MACRnn):
def __init__(self, batch_size, context_dim, query_dim, hidden_dim=80, num_hops=6, bidirectional_input_unit=False, prediction='candidates', input_keep_prob=0.8, reuse_cell=True, \
is_train=None, use_control_unit=True, mode="train", read_strategy='full', output_unit_type='similarity', reasoning_unit='answer_unit', \
memory_state_update_rule=None, answer_state_update_rule='mlp', attention_style='similarity', \
answer_doc_ids=None, sents_len=None, oracle=None, reinforce=False, attention_cell_dropout=False, \
read_topk_docs=0):
"""
num_hops: the number of mac cell chained together, or number of reasoning steps.
bidriectional_input_unit: use bi-lstm for input unit. Default to false to save memory.
prediction: prediction layer. Could be 'span-single/dual', 'candidates'
reuse_cell: use one single cell for all reasoning steps. (not sure what Hudson and Mannning did.)
"""
assert prediction == "candidates"
assert reuse_cell == True
super(HierarchicalAttnMACRnn, self).__init__(batch_size, context_dim, query_dim, hidden_dim, num_hops, \
bidirectional_input_unit, prediction, reuse_cell, is_train, use_control_unit, mode, output_unit_type, \
reasoning_unit, answer_state_update_rule)
self.input_keep_prob = input_keep_prob
self.top_doc_attn = []
self.top_attn_prob = []
self.doc_attn = []
self.read_strategy = read_strategy
self.rv_doc_history = []
self.doc_indices_history = []
self.attention_style = attention_style
self.memory_state_update_rule = memory_state_update_rule
self.oracle = oracle
if self.oracle is not None:
assert answer_doc_ids is not None
self.answer_doc_ids = answer_doc_ids
self.sents_len = sents_len
self.answer_list = []
self._c_state = tf.placeholder('float', [batch_size, query_dim], name='_c_state')
self._m_state = tf.placeholder('float', [batch_size, hidden_dim], name='_m_state')
self._a_state = tf.placeholder('float', [batch_size, hidden_dim], name='_a_state')
self._c_history = tf.placeholder('float', [batch_size, None, query_dim], name='_c_history')
self._m_history = tf.placeholder('float', [batch_size, None, hidden_dim], name='_m_history')
self.reinforce = reinforce
self.attention_cell_dropout = attention_cell_dropout
self.read_topk_docs = read_topk_docs
def apply(self, context, context_st, query, query_st, q_sub_st, q_len, c_mask, cdoc_mask, q_mask, candidates, cand_mask, greedy_read=False, reuse=False):
batch_size = self.batch_size
hidden_dim = self.hidden_dim
query_dim = self.query_dim
self.docs_len = tf.reduce_sum(tf.cast(c_mask, 'int32'), 2)
candidates = tf.squeeze(candidates, axis=1)
c_state = tf.zeros((batch_size, query_dim))
m_state = tf.zeros((batch_size, hidden_dim))
a_state = tf.zeros((batch_size, hidden_dim))
with tf.variable_scope('MACRnn'):
with tf.variable_scope('q_sub_proj'):
W = tf.get_variable('W', [query_dim, hidden_dim])
b = tf.get_variable('b', [hidden_dim])
m_state = tf.matmul(q_sub_st, W) + b
self.c_history = []
self.m_history = []
self.a_history = []
self.doc_attn_logits_lst = []
self.word_attn_logits_lst = []
self.doc_attn_weights_lst = []
cell = tf.contrib.rnn.GRUCell(hidden_dim)
self.cell = cell
for i in range(self.num_hops):
scope_str = 'MACRnn-layer-%d' % 0
if self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step' and i > 1:
m_state = self.m_history[0]
a_state = self.a_history[0]
c_state, m_state, a_state, doc_attn_logits, doc_attn_weights, word_attn_logits = self.HierarchicalAttnMACCell(i, cell, query, query_st, q_sub_st, context, context_st, c_mask, cdoc_mask, \
q_mask, self.c_history, self.m_history, c_state, m_state, a_state, scope_str, reuse=(reuse or i!=0), greedy_read=greedy_read)
self.doc_attn_logits_lst.append(doc_attn_logits)
self.word_attn_logits_lst.append(word_attn_logits)
self.doc_attn_weights_lst.append(doc_attn_weights)
self.c_history.append(c_state)
self.m_history.append(m_state)
if (self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc') and i == self.num_hops - 1:
with tf.variable_scope("concat_read_lstm", reuse=False):
max_len = tf.reduce_max(self.concat_selected_doc_len)
self.concat_selected_doc_mask = []
for k in range(self.batch_size):
concat_selected_doc_mask_k = tf.concat(values=[tf.ones([self.concat_selected_doc_len[k]]), tf.zeros([max_len-self.concat_selected_doc_len[k]])], axis=0)
self.concat_selected_doc_mask.append(concat_selected_doc_mask_k)
self.concat_selected_doc = tf.stack(self.concat_selected_doc, axis=0)
self.concat_selected_doc_mask = tf.cast(tf.stack(self.concat_selected_doc_mask, axis=0), 'bool')
p0 = biattention_layer(self.is_train, self.concat_selected_doc, query, h_mask=self.concat_selected_doc_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
cell_fw = BasicLSTMCell(40, state_is_tuple=True)
cell_bw = BasicLSTMCell(40, state_is_tuple=True)
cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob=self.input_keep_prob)
cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob=self.input_keep_prob)
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(cell_fw, cell_bw, p0, self.concat_selected_doc_len, dtype='float')
x = tf.concat(axis=2, values=[fw_h, bw_h])
logits = linear_logits([x], True, input_keep_prob=self.input_keep_prob, mask=self.concat_selected_doc_mask, is_train=self.is_train, scope='logits1')
probs = tf.nn.softmax(logits)
doc_rep = tf.einsum('ijk,ij->ik', self.concat_selected_doc, probs)
a_state = doc_rep
self.a_history.append(a_state)
if self.oracle == 'extra':
scope_str = 'MACRnn-layer-%d' % 0
if self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step' and i > 1:
m_state = self.m_history[0]
a_state = self.a_history[0]
_, _, a_state, _, _, _ = self.HierarchicalAttnMACCell(self.num_hops, cell, query, query_st, q_sub_st, context, context_st, c_mask, cdoc_mask, \
q_mask, self.c_history, self.m_history, c_state, m_state, a_state, scope_str, reuse=True, greedy_read=greedy_read)
if self.prediction == 'candidates':
if self.output_unit_type == 'triplet-mlp':
g1 = self.MACOutputUnit(a_state, context, candidates, query=query)
if (self.reasoning_unit != 'concat_first_sent' and self.reasoning_unit != 'concat_full_doc') and (self.reasoning_unit != 'attention-lstm' or self.read_strategy != 'one_doc_per_it'):
for i in range(self.num_hops):
gi = self.MACOutputUnit(self.a_history[i], context, candidates, query=query)
self.answer_list.append(tf.expand_dims(gi, axis=1))
else:
g1 = self.MACOutputUnit(a_state, context, candidates)
if (self.reasoning_unit != 'concat_first_sent' and self.reasoning_unit != 'concat_full_doc') and (self.reasoning_unit != 'attention-lstm' or self.read_strategy != 'one_doc_per_it'):
for i in range(self.num_hops):
gi = self.MACOutputUnit(self.a_history[i], context, candidates, reuse=True)
self.answer_list.append(tf.expand_dims(gi, axis=1))
return tf.expand_dims(g1, axis=1)
else:
raise NotImplementedError
def initialize_state(self, q_sub):
with tf.variable_scope('initial_m'):
W = tf.get_variable('W', [self.hidden_dim*2, self.hidden_dim])
b = tf.get_variable('b', [self.hidden_dim])
new_state = tf.matmul(q_sub, W) + b
return new_state
def HierarchicalAttnMACCell(self, layer: int, cell, cw, cw_st, q_sub_st, k, k_st, c_mask, cdoc_mask, q_mask, c_history, m_history, c_state, m_state, a_state, scope_str, \
reuse=False, out_of_graph=False, greedy_read=False):
"""
The 2nd implementation based on MAC Cell with hierarchical attention.
The read unit does not depend on c_state any more.
Added a_state.
Input: k [N, M, JX, context_dim]
"""
hidden_dim = self.hidden_dim
context_dim = self.context_dim
query_dim = self.query_dim
def control_unit():
with tf.variable_scope('control_unit'):
W_cq = tf.get_variable('W_cq', [query_dim + hidden_dim, query_dim])
b_cq = tf.get_variable('b_cq', [query_dim])
cq = tf.matmul(tf.concat([c_state, m_state], axis=1), W_cq) + b_cq
pre_ca = tf.einsum('ik,ijk->ijk', cq, cw)
ca = linear_logits([pre_ca], True, input_keep_prob=self.input_keep_prob, is_train=self.is_train, mask=q_mask)
cv = tf.nn.softmax(ca)
return tf.einsum('ijk,ij->ik', cw, cv)
def read_unit(m_state):
with tf.variable_scope('read_unit'):
W_cm = tf.get_variable('W_cm', [hidden_dim, hidden_dim])
b_cm = tf.get_variable('b_cm', [hidden_dim])
cm_state = tf.matmul(m_state, W_cm) + b_cm
if layer > 1 and self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
ra_doc = self.doc_attn_logits_lst[1]
rv_doc = self.doc_attn_weights_lst[1]
else:
W_k2 = tf.get_variable('W_k2', [query_dim, hidden_dim])
b_k2 = tf.get_variable('b_k2', [hidden_dim])
I_doc = tf.einsum('ijk,kl->ijl', k_st, W_k2) + b_k2 # [N, M, hidden_dim]
pre_ra_doc = tf.einsum('ik,ijk->ijk', cm_state, I_doc)
if self.attention_style == 'Bahdanau':
W_b2 = tf.get_variable('W_b2', [hidden_dim, hidden_dim])
b_b2 = tf.get_variable('b_b2', [hidden_dim])
shape_1 = tf.shape(I_doc)[1]
tiled_cm_state = tf.tile(tf.expand_dims(cm_state, axis=1), [1, shape_1, 1])
concat_in = tf.reshape(tiled_cm_state, [-1, hidden_dim]) + tf.reshape(I_doc, [-1, hidden_dim]) + tf.reshape(pre_ra_doc, [-1, hidden_dim])
pre_ra_doc = tf.matmul(concat_in, W_b2) + b_b2
pre_ra_doc = tf.reshape(pre_ra_doc, [-1, shape_1, hidden_dim])
ra_doc = linear_logits([pre_ra_doc], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=cdoc_mask, scope='logits2')
rv_doc = tf.nn.softmax(ra_doc) # document-level attention weight
# Word-level attention
if self.memory_state_update_rule is None:
W_k = tf.get_variable('W_k', [context_dim, hidden_dim])
b_k = tf.get_variable('b_k', [hidden_dim])
I_word = tf.einsum('ijkl,lm->ijkm', k, W_k) + b_k
pre_ra_word = tf.einsum('il,ijkl->ijkl', cm_state, I_word)
if self.attention_style == 'Bahdanau':
W_b = tf.get_variable('W_b', [hidden_dim, hidden_dim])
b_b = tf.get_variable('b_b', [hidden_dim])
shape_1 = tf.shape(I_word)[1]
shape_2 = tf.shape(I_word)[2]
tiled_cm_state = tf.tile(tf.expand_dims(tf.expand_dims(cm_state, axis=1), axis=1), [1, shape_1, shape_2, 1])
concat_in = tf.reshape(tiled_cm_state, [-1, hidden_dim]) + tf.reshape(I_word, [-1, hidden_dim]) + tf.reshape(pre_ra_word, [-1, hidden_dim])
pre_ra_word = tf.matmul(concat_in, W_b) + b_b
pre_ra_word = tf.reshape(pre_ra_word, [-1, shape_1, shape_2, hidden_dim])
ra_word = linear_logits([pre_ra_word], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=c_mask, scope='logits1')
rv_word = tf.nn.softmax(ra_word) # word-level attention weight
r_doc = tf.einsum('ijkl,ijk->ijl', k, rv_word) # [N, M, context_dim]
doc_indices = None
if self.read_strategy == 'one_doc_per_it' or self.read_strategy == 'one_doc_per_it_and_mask_all_read' or self.read_strategy == 'one_doc_per_it_and_mask_read_pairs' \
or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
if out_of_graph or layer > 0:
if self.read_strategy == 'one_doc_per_it_and_mask_read_pairs':
prev_read = self.doc_attn[layer-1]
doc_idx = tf.expand_dims(tf.stack(self.doc_attn, axis=1), axis=2)
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size, layer]))
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1), axis=1), [1, layer, 1])
indices = tf.concat([batch_nums, doc_idx], axis=2) # [batch_size, layer, 2]
elif self.read_strategy == 'one_doc_per_it':
if out_of_graph:
doc_idx = tf.stack(self.doc_attn, axis=1)[:, layer-1]
else:
doc_idx = self.doc_attn[layer-1]
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size]))
batch_nums = tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1)
indices = tf.concat([batch_nums, tf.reshape(doc_idx, [self.batch_size, 1])], axis=1)
elif self.read_strategy == 'one_doc_per_it_and_mask_all_read' or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
#if self.mode == 'train':
doc_idx = tf.stack(self.doc_attn, axis=1)
# else:
# doc_idx = tf.expand_dims(tf.stack(self.doc_attn, axis=1), axis=2)
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size, layer]))
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1), axis=1), [1, layer, 1])
indices = tf.concat([batch_nums, doc_idx], axis=2) # [batch_size, layer, 2]
updates_2 = tf.ones([self.batch_size, layer]) * 1e-30
very_small_number = tf.scatter_nd(indices, updates_2, shape)
mask = tf.scatter_nd(indices, updates, shape)
mask = mask + 1
rv_doc = rv_doc * mask
if self.read_strategy == 'one_doc_per_it_and_mask_all_read' or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
rv_doc = rv_doc + very_small_number
if self.mode == 'test':
if self.oracle == 'final' and layer == self.num_hops - 1:
new_doc_idx = tf.slice(self.answer_doc_ids, [0, 0], [-1, 1])
else:
new_doc_idx = tf.expand_dims(tf.argmax(tf.log(rv_doc), axis=1), axis=-1)
elif self.mode == 'train':
if (self.oracle == 'final' and layer == self.num_hops - 1) or (self.oracle == 'extra' and layer == self.num_hops):
new_doc_idx = tf.slice(self.answer_doc_ids, [0, 0], [-1, 1])
else:
if self.read_topk_docs > 0:
topk_doc_mask_1 = tf.ones([self.batch_size, tf.minimum(tf.shape(rv_doc)[1], self.read_topk_docs)])
topk_doc_mask_0 = tf.zeros([self.batch_size, tf.maximum(tf.shape(rv_doc)[1]-self.read_topk_docs, 0)])
topk_doc_mask = tf.concat([topk_doc_mask_1, topk_doc_mask_0], axis=1)
rv_doc = rv_doc * topk_doc_mask
if (greedy_read or self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step') and \
self.reinforce is False:
new_doc_idx = tf.expand_dims(tf.argmax(tf.log(rv_doc), axis=1), axis=-1)
else:
new_doc_idx = tf.multinomial(tf.log(rv_doc), 1)
#new_doc_idx = tf.argmax(tf.log(rv_doc), axis=1)
else:
raise NotImplementedError
new_doc_idx = tf.cast(new_doc_idx, 'int32')
shape = tf.shape(rv_doc)
updates = tf.ones([self.batch_size])
batch_nums = tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1)
doc_indices = tf.concat([batch_nums, tf.cast(tf.reshape(new_doc_idx, [self.batch_size, 1]), 'int32')], axis=1)
if self.memory_state_update_rule == 'bi-attn':
selected_doc = tf.gather_nd(k, indices)
selected_mask = tf.gather_nd(c_mask, indices)
p0 = biattention_layer(self.is_train, selected_doc, cw, h_mask=selected_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
W_p0 = tf.get_variable('W_p0', [hidden_dim*2, hidden_dim])
b_p0 = tf.get_variable('b_p0', [hidden_dim])
I_word = tf.einsum('ijk,km->ijm', p0, W_p0) + b_p0
pre_ra_word = tf.einsum('ik,ijk->ijk', cm_state, I_word)
ra_word = linear_logits([pre_ra_word], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=selected_mask, scope='logits1')
rv_word = tf.nn.softmax(ra_word) # word-level attention weight
r_doc = tf.einsum('ikl,ik->il', p0, rv_word) # [N, M, context_dim]
r = r_doc # No need to apply doc_mask again.
else:
r = tf.gather_nd(r_doc, doc_indices)
print('one_doc_per_it')
elif self.read_strategy == 'mask_previous_max':
if layer > 0:
doc_idx = self.doc_attn[layer-1]
shape = tf.shape(rv_doc)
updates = tf.negative(tf.ones([self.batch_size]))
batch_nums = tf.expand_dims(tf.range(0, limit=self.batch_size), axis=1)
indices = tf.concat([batch_nums, tf.cast(tf.reshape(doc_idx, [self.batch_size, 1]), 'int32')], axis=1)
mask = tf.scatter_nd(indices, updates, shape)
mask = mask + 1
#self.mask = mask
rv_doc = rv_doc * mask
new_doc_idx = tf.argmax(tf.log(rv_doc), axis=1)
r = tf.einsum('ijk,ij->ik', r_doc, rv_doc)
else:
assert self.read_strategy == 'full'
new_doc_idx = tf.argmax(tf.log(rv_doc), axis=1)
r = tf.einsum('ijk,ij->ik', r_doc, rv_doc)
if out_of_graph is False:
self.doc_attn.append(new_doc_idx)
self.rv_doc_history.append(rv_doc)
self.doc_indices_history.append(doc_indices)
_, topk_docs = tf.nn.top_k(rv_doc, 3)
topk_words_prob, topk_words = tf.nn.top_k(rv_word[:,topk_docs[0, 0]], 20)
if out_of_graph is False:
self.top_doc_attn.append(topk_docs)
self.top_attn.append(topk_words)
self.top_attn_prob.append(topk_words_prob)
return r, ra_doc, rv_doc, ra_word
def write_unit(r, new_c_state, c_history, m_history, query=None):
with tf.variable_scope('write_unit'):
doc_indices = self.doc_indices_history[layer]
new_m_state, output = cell(r, m_state)
if self.reasoning_unit == 'answer_unit':
W_c = tf.get_variable('W_c', [query_dim, hidden_dim])
b_c = tf.get_variable('b_c', [hidden_dim])
c_proj = tf.matmul(new_c_state, W_c) + b_c
W1 = tf.get_variable('W1', [3*hidden_dim, 2*hidden_dim])
b1 = tf.get_variable('b1', [2*hidden_dim])
W2 = tf.get_variable('W2', [2*hidden_dim, hidden_dim])
b2 = tf.get_variable('b2', [hidden_dim])
concat_in = tf.concat(axis=-1, values=[output, c_proj, output*c_proj])
new_ans = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
if self.answer_state_update_rule == 'bi-attn':
assert query is not None
selected_doc = tf.einsum('ijkl,ij->ikl', k, doc_mask)
selected_mask = tf.cast(tf.einsum('ijk,ij->ik', tf.cast(c_mask, 'float32'), doc_mask), 'bool')
p0 = biattention_layer(self.is_train, selected_doc, query, h_mask=selected_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
logits = linear_logits([selected_doc, p0], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=selected_mask)
weights = tf.nn.softmax(logits)
new_ans_2 = tf.einsum('ijk,ij->ik', selected_doc, weights)
W_a = tf.get_variable('W_a', [self.context_dim, hidden_dim])
b_a = tf.get_variable('b_a', [hidden_dim])
new_ans_2 = tf.matmul(new_ans_2, W_a) + b_a
new_ans = tf.concat([new_ans, new_ans_2], axis=-1)
W_a2 = tf.get_variable('W_a2', [hidden_dim * 2, hidden_dim])
b_a2 = tf.get_variable('b_a2', [hidden_dim])
new_ans = tf.matmul(new_ans, W_a2) + b_a2
else:
assert self.answer_state_update_rule == 'mlp'
W_g = tf.get_variable('W_g', [hidden_dim, 1])
b_g = tf.get_variable('b_g', [1])
gate = tf.matmul(output*c_proj, W_g) + b_g
new_a_state = tf.sigmoid(gate) * new_ans + (1-tf.sigmoid(gate)) * a_state
elif self.reasoning_unit == 'mlp':
c_proj = new_c_state
W1 = tf.get_variable('W1', [3*query_dim, 3*query_dim])
b1 = tf.get_variable('b1', [3*query_dim])
W2 = tf.get_variable('W2', [3*query_dim, hidden_dim])
b2 = tf.get_variable('b2', [hidden_dim])
# concat_in = tf.concat(axis=-1, values=[output, c_proj, output*c_proj])
concat_in = tf.concat(axis=-1, values=[r, c_proj, r*c_proj])
new_a_state = tf.matmul(tf.nn.relu(tf.matmul(concat_in, W1) + b1), W2) + b2
elif self.reasoning_unit == 'bi-attn':
c_proj = new_c_state
#selected_doc = tf.einsum('ijkl,ij->ikl', k, doc_mask)
selected_doc = tf.gather_nd(k, doc_indices)
#selected_mask = tf.cast(tf.einsum('ijk,ij->ik', tf.cast(c_mask, 'float32'), doc_mask), 'bool')
selected_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc_indices), 'bool')
p0 = biattention_layer(self.is_train, selected_doc, query, h_mask=selected_mask, u_mask=q_mask)
p0 = tf.squeeze(p0, axis=1)
logits = linear_logits([selected_doc, p0], True, is_train=self.is_train, input_keep_prob=self.input_keep_prob, mask=selected_mask)
weights = tf.nn.softmax(logits)
new_a_state = tf.einsum('ijk,ij->ik', selected_doc, weights)
elif self.reasoning_unit == 'concat_first_sent' or self.reasoning_unit == 'concat_full_doc':
doc2 = tf.gather_nd(k, doc_indices)
doc2_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc_indices), 'bool')
if self.reasoning_unit == 'concat_first_sent':
doc2_first_sent_len = tf.gather_nd(self.sents_len[:, :, 0], doc_indices)
else:
doc2_first_sent_len = tf.gather_nd(self.docs_len, doc_indices)
if layer == 0:
print(doc2.get_shape())
print(tf.reshape(tf.slice(doc2, [0, 0, 0], [-1, tf.reduce_max(doc2_first_sent_len), -1]), [self.batch_size, -1, context_dim]).get_shape())
self.concat_selected_doc = tf.unstack(tf.reshape(tf.slice(doc2, [0, 0, 0], [-1, tf.reduce_max(doc2_first_sent_len), -1]), [self.batch_size, -1, context_dim]), axis=0)
assert len(self.concat_selected_doc) == self.batch_size, (len(self.concat_selected_doc))
self.concat_selected_doc_len = doc2_first_sent_len
else:
for i in range(self.batch_size):
prev_doc = tf.slice(self.concat_selected_doc[i], [0, 0], [self.concat_selected_doc_len[i], -1])
new_doc = tf.slice(doc2[i], [0, 0], [doc2_first_sent_len[i], -1])
padding_len = tf.reduce_max(self.concat_selected_doc_len + doc2_first_sent_len) - self.concat_selected_doc_len[i] - doc2_first_sent_len[i]
padding = tf.zeros([padding_len, context_dim])
self.concat_selected_doc[i] = tf.concat([prev_doc, new_doc, padding], axis=0)
self.concat_selected_doc_len += doc2_first_sent_len
new_a_state = None
elif self.reasoning_unit == 'attention-lstm':
if layer > 0:
if self.read_strategy == 'one_doc_per_it_and_repeat_2nd_step':
doc1_indices = self.doc_indices_history[0]
else:
doc1_indices = self.doc_indices_history[layer-1]
doc1 = tf.gather_nd(k, doc1_indices)
doc1_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc1_indices), 'bool')
else:
doc1 = cw
doc1_mask = q_mask
if self.read_strategy == 'one_doc_per_it' and (layer < self.num_hops - 1 and layer > 0):
new_a_state = None
else:
doc1_len = tf.reduce_sum(tf.cast(doc1_mask, 'int32'), axis=-1)
doc2 = tf.gather_nd(k, doc_indices)
doc2_mask = tf.cast(tf.gather_nd(tf.cast(c_mask, 'float32'), doc_indices), 'bool')
doc2_len = tf.reduce_sum(tf.cast(doc2_mask, 'int32'), axis=-1)
lstm_cell = BasicLSTMCell(hidden_dim, state_is_tuple=True)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=hidden_dim,
memory=doc1,
memory_sequence_length=doc1_len)
attention_cell = tf.contrib.seq2seq.AttentionWrapper(lstm_cell, attention_mechanism, output_attention=False)
if self.attention_cell_dropout:
attention_cell = tf.contrib.rnn.DropoutWrapper(attention_cell, input_keep_prob=self.input_keep_prob)
decoder_initial_state = attention_cell.zero_state(
dtype=tf.float32, batch_size=self.batch_size)
lstm_output, _ = tf.nn.dynamic_rnn( cell=attention_cell,
inputs=doc2,
sequence_length=doc2_len,
initial_state=decoder_initial_state,
dtype=tf.float32)
W_x = tf.get_variable('W_x', [hidden_dim, context_dim])
b_x = tf.get_variable('b_x', [context_dim])
#x = tf.reshape(tf.einsum('ijk,kl->ijl', lstm_output, W_x) + b_x, [config.batch_size, self.tree_width, -1, d])
x = tf.einsum('ijk,kl->ijl', lstm_output, W_x) + b_x
similarity_with_q_sub = tf.einsum('ijk,ik->ijk', x, q_sub_st)
similarity_with_q_bod = tf.einsum('ijk,ik->ijk', x, cw_st)
doc2_mask = tf.reshape(doc2_mask, [self.batch_size, -1])
logits_q_sub = linear_logits([similarity_with_q_sub], True, input_keep_prob=self.input_keep_prob, mask=doc2_mask, \
is_train=self.is_train, scope='logits1')
logits_q_bod = linear_logits([similarity_with_q_bod], True, input_keep_prob=self.input_keep_prob, mask=doc2_mask, \
is_train=self.is_train, scope='logits2')
similarity_w_qsub_probs = tf.nn.softmax(logits_q_sub)
similarity_w_qbod_probs = tf.nn.softmax(logits_q_bod)
similarity_probs = (similarity_w_qsub_probs + similarity_w_qbod_probs) / 2
doc_rep = tf.einsum('ijk,ij->ik', doc2, similarity_probs)
new_a_state = doc_rep
qsub_topk_probs, qsub_topk_ids = tf.nn.top_k(similarity_w_qsub_probs, 10)
qbod_topk_probs, qbod_topk_ids = tf.nn.top_k(similarity_w_qbod_probs, 10)
if layer > 0:
self.qsub_topk_ids.append(qsub_topk_ids)
self.qsub_topk_probs.append(qsub_topk_probs)
self.qbod_topk_ids.append(qbod_topk_ids)
self.qbod_topk_probs.append(qbod_topk_probs)
self.qsub_all_probs.append(similarity_w_qsub_probs)
else:
self.qsub_topk_ids = [qsub_topk_ids]
self.qsub_topk_probs = [qsub_topk_probs]
self.qbod_topk_ids = [qbod_topk_ids]
self.qbod_topk_probs = [qbod_topk_probs]
self.qsub_all_probs = [similarity_w_qsub_probs]
elif self.reasoning_unit == 'None' or self.reasoning_unit is None:
new_a_state = output
else:
raise NotImplementedError
return new_m_state, new_a_state
if out_of_graph is False and layer > 0:
c_history = tf.stack(c_history, axis=1)
m_history = tf.stack(m_history, axis=1)
with tf.variable_scope(scope_str, reuse=reuse) as scope:
if self.use_control_unit:
new_c_state = control_unit()
else:
new_c_state = cw_st
# Read unit
r, ra_doc, rv_doc, ra_word = read_unit(m_state)
# Write unit
new_m_state, new_a_state = write_unit(r, new_c_state, c_history, m_history, cw)
return new_c_state, new_m_state, new_a_state, ra_doc, rv_doc, ra_word
```
#### File: EPAr/reasoning_layers/utils.py
```python
import tensorflow as tf
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.nn import softsel, get_logits
def biattention_layer(is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "attention_layer"):
h = tf.expand_dims(h, 1)
h_mask = tf.expand_dims(h_mask, 1)
# JX = tf.shape(h)[2]
# M = tf.shape(h)[1]
# JQ = tf.shape(u)[1]
#if config.q2c_att or config.c2q_att:
u_a, h_a = bi_attention(is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
# if not config.c2q_att:
# u_a = tf.tile(tf.expand_dims(tf.expand_dims(tf.reduce_mean(u, 1), 1), 1), [1, M, JX, 1])
#if config.q2c_att:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a, h * h_a])
#else:
# p0 = tf.concat(axis=3, values=[h, u_a, h * u_a])
return p0
def bi_attention(is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "bi_attention"):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
h_aug = tf.tile(tf.expand_dims(h, 3), [1, 1, 1, JQ, 1])
u_aug = tf.tile(tf.expand_dims(tf.expand_dims(u, 1), 1), [1, M, JX, 1, 1])
if h_mask is None:
hu_mask = None
else:
h_mask_aug = tf.tile(tf.expand_dims(h_mask, 3), [1, 1, 1, JQ])
u_mask_aug = tf.tile(tf.expand_dims(tf.expand_dims(u_mask, 1), 1), [1, M, JX, 1])
hu_mask = h_mask_aug & u_mask_aug
u_logits = get_logits([h_aug, u_aug], None, True, wd=0., mask=hu_mask,
is_train=is_train, func='tri_linear', scope='u_logits') # [N, M, JX, JQ]
u_a = softsel(u_aug, u_logits) # [N, M, JX, d]
h_a = softsel(h, tf.reduce_max(u_logits, 3)) # [N, M, d]
h_a = tf.tile(tf.expand_dims(h_a, 2), [1, 1, JX, 1])
if tensor_dict is not None:
a_u = tf.nn.softmax(u_logits) # [N, M, JX, JQ]
a_h = tf.nn.softmax(tf.reduce_max(u_logits, 3))
tensor_dict['a_u'] = a_u
tensor_dict['a_h'] = a_h
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name)
for var in variables:
tensor_dict[var.name] = var
return u_a, h_a
``` |
{
"source": "jiangycTarheel/NMN-MultiHopQA",
"score": 2
} |
#### File: NMN-MultiHopQA/basic/model.py
```python
import random
import os
import itertools
import tensorflow as tf
from tensorflow import newaxis as ax
from basic.attention_modules import hotpot_biattention, zhong_selfatt
from basic.batcher import get_batch_feed_dict
from my.tensorflow import get_initializer
from my.tensorflow.nn import softsel, get_logits, linear_logits, highway_network, multi_conv1d, dense
from my.tensorflow.ops import bi_cudnn_rnn_encoder
from snmn.nmn_model import NMN_Model
def get_multi_gpu_models(config, emb_mat=None):
models = []
with tf.variable_scope(tf.get_variable_scope()) as vscope:
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)):
if gpu_idx > 0:
tf.get_variable_scope().reuse_variables()
model = Model(config, scope, emb_mat, rep=gpu_idx == 0)
models.append(model)
return models
class Model(object):
def __init__(self, config, scope, emb_mat, rep=True):
self.scope = scope
self.config = config
self.emb_mat = emb_mat
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
N, M, JX, JQ, VW, VC, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, None, None], name='x')
self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
if config.dataset == 'hotpotqa':
self.q_type_labels = tf.placeholder('int32', [N, None], name='q_type_labels')
self.q_yesno_labels = tf.placeholder('int32', [N, None], name='q_yesno_labels')
self.yes_no = tf.placeholder('bool', [N], name='yes_no')
self.max_para_size = tf.placeholder('int32', [], name='max_para_size')
self.q = tf.placeholder('int32', [N, None], name='q')
self.cq = tf.placeholder('int32', [N, None, W], name='cq')
self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
self.y = tf.placeholder('bool', [N, None, None], name='y')
self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
self.wy = tf.placeholder('bool', [N, None, None], name='wy')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
self.na = tf.placeholder('bool', [N], name='na')
if config.supervise_bridge_entity:
self.bridge_word_in_context_ids = tf.placeholder('int32', [N, None], name='bridge_word_in_context_ids')
self.bridge_na = tf.placeholder('bool', [N], name='bridge_na')
# if config.reasoning_layer == 'snmn':
# self.module_prob_feed = tf.placeholder('float32', [3, N, 4], name='module_prob_feed')
# Define misc
self.tensor_dict = {}
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
self.na_prob = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if config.mode == 'train':
self._build_ema()
self.summary = tf.summary.merge_all()
self.summary = tf.summary.merge(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N]
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.max_word_size
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
M = tf.shape(self.x)[1]
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
with tf.variable_scope("emb"):
if config.use_char_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(self.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(axis=3, values=[xx, Ax]) # [N, M, JX, di]
qq = tf.concat(axis=2, values=[qq, Aq]) # [N, JQ, di]
else:
xx = Ax
qq = Aq
# highway network
if config.highway:
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
with tf.variable_scope("prepro"):
with tf.variable_scope('u1'):
u, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, qq, q_len, self.is_train)
if config.reasoning_layer == 'snmn':
u_st = zhong_selfatt(u[:, ax, :, :], config.hidden_size*2, seq_len=q_len, transform='squeeze')
if config.share_lstm_weights:
with tf.variable_scope('u1', reuse=True):
h, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(xx, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
h = h[:, ax, :, :]
else:
with tf.variable_scope('h1'):
h, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(xx, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
h = h[:, ax, :, :]
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope("main"):
context_dim = config.hidden_size * 2
### Reconstruct before bidaf because otherwise we need to build a larger query tensor.
x_mask = self.x_mask
x_len_squeeze = tf.squeeze(x_len, axis=1)
p0 = h
### Main model
if config.reasoning_layer == 'snmn':
module_names = ['_Find', '_Compare', '_Relocate', '_NoOp']
self.snmn = NMN_Model(config, u, qq, u_st, self.q_mask, q_len, p0, x_mask, x_len, module_names, \
self.is_train)
self.u_weights = self.snmn.cv_list # question word distribution at each step
self.module_prob_list = self.snmn.module_prob_list # module probability at each step
g0 = tf.squeeze(self.snmn.att_second, axis=-1)
if config.supervise_bridge_entity:
self.hop0_logits = self.snmn.bridge_logits
if config.self_att:
with tf.variable_scope('g0'):
g0, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(g0, axis=1), x_len_squeeze, self.is_train)
g0 = g0[:, ax, :, :]
g0 = hotpot_biattention(config, self.is_train, g0, tf.squeeze(g0, axis=1), h_mask=x_mask, u_mask=tf.squeeze(x_mask, axis=1), scope="self_att", tensor_dict=self.tensor_dict)
g0 = tf.layers.dense(g0, config.hidden_size*2)
with tf.variable_scope('g1'):
g1, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(g0, axis=1), tf.squeeze(x_len, axis=1), self.is_train)
g1 = g1[:, ax, :, :]
logits = get_logits([g1, g0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
with tf.variable_scope('g2'):
a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(a1i[:, ax, ax, :], [1, M, JX, 1])
g2, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob,
tf.squeeze(tf.concat(axis=3, values=[g0, g1, a1i, g0 * a1i]), axis=1),
x_len_squeeze, self.is_train)
g2 = g2[:, ax, :, :]
logits2 = get_logits([g2, g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=x_mask, is_train=self.is_train, func=config.answer_func,
scope='logits2')
if config.dataset == 'hotpotqa':
with tf.variable_scope('g3'):
if config.nmn_qtype_class == 'mem_last':
g3 = tf.concat([self.snmn.mem_last[:, ax, :], u_st[:, ax, :]], axis=-1)
elif config.nmn_qtype_class == 'ctrl_st':
g3 = self.snmn.c_st_list[0][:, ax, :]
else:
raise NotImplementedError
self.predict_type = dense(g3, 2, scope='predict_type')
g3_1 = self.snmn.mem_last[:, ax, :]
self.predict_yesno = dense(g3_1, 2, scope='predict_yesno')
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M * JX]
flat_logits2 = tf.reshape(logits2, [-1, M * JX])
flat_yp2 = tf.nn.softmax(flat_logits2)
yp = tf.reshape(flat_yp, [-1, M, JX])
yp2 = tf.reshape(flat_yp2, [-1, M, JX])
wyp = tf.nn.sigmoid(logits2)
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
self.wyp = wyp
if config.dataset == 'hotpotqa':
flat_predict_type = tf.reshape(self.predict_type, [-1, 2])
flat_yp3 = tf.nn.softmax(flat_predict_type)
self.yp3 = tf.reshape(flat_yp3, [-1, 1, 2])
flat_predict_yesno = tf.reshape(self.predict_yesno, [-1, 2])
flat_yp3_yesno = tf.nn.softmax(flat_predict_yesno)
self.yp3_yesno = tf.reshape(flat_yp3_yesno, [-1, 1, 2])
def _build_loss(self):
config = self.config
M = tf.shape(self.x)[1]
JX = tf.shape(self.x)[2]
# loss_mask will mask out hotpotqa examples with yes/no type answer.
loss_mask = tf.logical_and(tf.cast(tf.reduce_max(tf.cast(self.q_mask, 'float'), 1), 'bool'), tf.logical_not(self.na))
if config.supervise_bridge_entity:
bridge_loss_mask = tf.cast(tf.logical_and(loss_mask, tf.logical_not(self.bridge_na)), 'float')
if config.dataset == 'hotpotqa':
yesno_mask = tf.cast(tf.logical_and(loss_mask, self.yes_no), 'float')
loss_mask = tf.logical_and(loss_mask, tf.logical_not(self.yes_no))
loss_mask = tf.cast(loss_mask, 'float')
q_loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
losses2 = tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits2, labels=tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float'))
if config.dataset == 'hotpotqa':
losses_type = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.predict_type, labels=self.q_type_labels)
ce_loss_type = tf.reduce_mean(q_loss_mask * losses_type, name='loss_q_type')
tf.summary.scalar(ce_loss_type.op.name, ce_loss_type)
tf.add_to_collection('ema/scalar', ce_loss_type)
tf.add_to_collection("losses", ce_loss_type)
losses_yesno = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.predict_yesno, labels=self.q_yesno_labels)
ce_loss_yesno = tf.reduce_mean(yesno_mask * losses_yesno, name='loss_q_yesno') * config.yesno_loss_coeff
tf.summary.scalar(ce_loss_yesno.op.name, ce_loss_yesno)
tf.add_to_collection('ema/scalar', ce_loss_yesno)
tf.add_to_collection("losses", ce_loss_yesno)
ce_loss = tf.reduce_mean(loss_mask * losses)
ce_loss2 = tf.reduce_mean(loss_mask * losses2)
tf.add_to_collection('losses', ce_loss)
tf.add_to_collection("losses", ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
if config.supervise_bridge_entity:
bridge_word_ids = tf.squeeze(tf.slice(self.bridge_word_in_context_ids, [0, 0], [-1, 1]), axis=1)
hop0_attn_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.hop0_logits, labels=bridge_word_ids)
hop0_attn_loss = tf.reduce_mean(hop0_attn_losses * bridge_loss_mask, name='hop0_attn_loss')
tf.summary.scalar('hop0_attn_loss', hop0_attn_loss)
tf.add_to_collection('ema/scalar', hop0_attn_loss)
self.loss += config.hop0_attn_loss_coeff * hop0_attn_loss
tf.summary.scalar('total_loss', self.loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope)
ema_op = ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self, model_name):
if model_name == 'model_network':
var_list = [var for var in tf.trainable_variables() if 'reward_network' not in var.name and 'ranker' not in var.name and 'controller' not in var.name]
elif model_name == 'controller':
var_list = [var for var in tf.trainable_variables() if 'module_controller' in var.name]
elif model_name == 'nmn':
var_list = [var for var in tf.trainable_variables() if 'module_controller' not in var.name]
elif model_name == 'all':
var_list = [var for var in tf.trainable_variables()]
else:
raise NotImplementedError
assert len(var_list) > 0
return var_list
def get_feed_dict(model, batch, is_train, supervised=True):
return get_batch_feed_dict(model, batch, is_train, supervised=True)
```
#### File: NMN-MultiHopQA/basic/trainer.py
```python
import tensorflow as tf
import math
from basic.model import Model
from my.tensorflow import average_gradients
import numpy as np
class Trainer(object):
def __init__(self, config, model):
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdamOptimizer(config.init_lr)
self.loss = model.get_loss()
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.grads = self.opt.compute_gradients(self.loss, var_list=self.var_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
def get_train_op(self):
return self.train_op
def step(self, sess, batch, get_summary=False):
assert isinstance(sess, tf.Session)
_, ds = batch
feed_dict = self.model.get_feed_dict(ds, True)
if get_summary:
loss, summary, train_op = \
sess.run([self.loss, self.summary, self.train_op], feed_dict=feed_dict)
else:
loss, train_op = sess.run([self.loss, self.train_op], feed_dict=feed_dict)
summary = None
return loss, summary, train_op
class MultiGPUTrainer(object):
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.global_step = model.get_global_step()
self.opt = tf.train.AdamOptimizer(config.init_lr)
if config.train_nmn_ctrl_separately:
self.var_list = model.get_var_list('nmn')
self.controller_var_list = model.get_var_list('controller')
controller_grads_list = []
else:
self.var_list = model.get_var_list('all')
self.summary = model.summary
self.models = models
losses, grads_list = [], []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
if config.train_nmn_ctrl_separately:
controller_grads = self.opt.compute_gradients(loss, var_list=self.controller_var_list)
controller_grads_list.append(controller_grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
if config.train_nmn_ctrl_separately:
self.controller_grads = average_gradients(controller_grads_list)
controller_grad_vars = [x[1] for x in self.controller_grads]
controller_gradients = [x[0] for x in self.controller_grads]
controller_clipped, _ = tf.clip_by_global_norm(controller_gradients, 2)
ctrl_accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.controller_var_list]
self.ctrl_zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in ctrl_accum_vars]
self.ctrl_accum_ops = [ctrl_accum_vars[i].assign_add(gv) for i, gv in enumerate(controller_clipped)]
if config.gradient_accum_steps == 1:
self.controller_train_op = self.opt.apply_gradients(zip(controller_clipped, controller_grad_vars), global_step=self.global_step)
else:
self.controller_train_op = self.opt.apply_gradients([(ctrl_accum_vars[i], gv[1]) for i, gv in enumerate(self.controller_grads)], global_step=self.global_step)
#self.grads, global_norm = tf.clip_by_global_norm(self.grads, 2)
grad_vars = [x[1] for x in self.grads]
gradients = [x[0] for x in self.grads]
clipped, _ = tf.clip_by_global_norm(gradients, 2)
accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.var_list]
self.zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]
self.accum_ops = [accum_vars[i].assign_add(gv) for i, gv in enumerate(clipped)]
if config.gradient_accum_steps == 1:
self.train_op = self.opt.apply_gradients(zip(clipped, grad_vars), global_step=self.global_step)
else:
self.train_op = self.opt.apply_gradients([(accum_vars[i], gv[1]) for i, gv in enumerate(self.grads)], global_step=self.global_step)
with tf.control_dependencies([self.train_op]):
self.dummy = tf.constant(0, name='dummy')
def step(self, sess, batches, get_summary=False, lr=None, train_controller=False, accum_gradients=False):
config = self.config
assert isinstance(sess, tf.Session)
feed_dict = {}
if config.gradient_accum_steps == 1 or accum_gradients:
assert batches is not None
for batch, model in zip(batches, self.models):
_, ds = batch
feed_dict.update(model.get_feed_dict(ds, True, sess))
if accum_gradients:
accum_ops = self.accum_ops
if train_controller and config.train_nmn_ctrl_separately:
accum_ops = self.ctrl_accum_ops
if get_summary:
loss, summary, _train_op = \
sess.run([self.loss, self.summary, accum_ops], feed_dict=feed_dict)
else:
loss, _train_op = \
sess.run([self.loss, accum_ops], feed_dict=feed_dict)
summary = None
else:
train_op = self.train_op
if train_controller and config.train_nmn_ctrl_separately:
train_op = self.controller_train_op
if config.gradient_accum_steps == 1:
if get_summary:
loss, summary, _train_op = \
sess.run([self.loss, self.summary, train_op], feed_dict=feed_dict)
else:
loss, _train_op = \
sess.run([self.loss, train_op], feed_dict=feed_dict)
summary = None
else:
_train_op = sess.run(train_op)
summary, loss = None, 0
if math.isnan(loss):
logits, g1, cand_mask, cand_emb = \
sess.run([self.model.logits, self.model.g1, self.model.cand_mask, self.model.cand_emb], feed_dict)
print(logits)
print(candidate_spans[0])
print(candidate_span_y)
print("mask: ")
print(cand_mask[0])
print("cand_emb: ")
print(cand_emb[0])
print(feed_dict[self.model.answer_doc_ids])
print(feed_dict[self.model.first_doc_ids])
print(batches[0][1].data['ids'])
#print(feed_dict[self.model.second_doc_ids])
exit()
return loss, summary, _train_op
```
#### File: NMN-MultiHopQA/snmn/nmn.py
```python
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T, newaxis as ax
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from my.tensorflow.ops import bi_cudnn_rnn_encoder
from snmn.config import cfg
from snmn.util.cnn import fc_layer as fc, conv_layer as conv
from basic.attention_modules import weighted_biattention
from my.tensorflow.nn import dense
from my.tensorflow.nn import linear_logits
MODULE_INPUT_NUM = {
'_NoOp': 2,
'_Find': 0,
'_Relocate': 1,
'_Compare': 2,
}
MODULE_OUTPUT_NUM = {
'_NoOp': 2,
'_Find': 1,
'_Relocate': 1,
'_Compare': 1,
}
class NMN:
def __init__(self, config, kb_batch, kb_mask_batch, kb_len_batch, c_list,
cv_list, q, q_mask, module_names, module_prob_list, is_train,
q_len, max_para_size=None, scope='NMN', reuse=None):
"""
NMN v4 with an attention stack
"""
with tf.variable_scope(scope, reuse=reuse):
self.is_train = is_train
self.main_config = config
self.kb_batch = kb_batch
self.kb_mask_batch = kb_mask_batch
self.kb_len_batch = kb_len_batch
self.c_list = c_list
self.cv_list = cv_list
self.q = q
self.q_len = q_len
self.q_mask = q_mask
self.module_prob_list = module_prob_list
self.kb_dim = cfg.MODEL.KB_DIM
if self.main_config.nmn_relocate_move_ptr:
MODULE_OUTPUT_NUM['_Relocate'] = MODULE_INPUT_NUM['_Relocate'] + 1
else:
MODULE_OUTPUT_NUM['_Relocate'] = MODULE_INPUT_NUM['_Relocate']
self.T_ctrl = cfg.MODEL.T_CTRL
self.mem_dim = cfg.MODEL.NMN.MEM_DIM
self.N = tf.shape(kb_batch)[0]
self.M = tf.shape(kb_batch)[1]
self.JX = tf.shape(kb_batch)[2]
self.att_shape = to_T([self.N, self.M, self.JX, 1])
self.max_para_size = max_para_size
self.stack_len = cfg.MODEL.NMN.STACK.LENGTH
# The initialial stack values are all zeros everywhere
self.att_stack_init = tf.zeros(
to_T([self.N, self.M, self.JX, self.kb_dim, self.stack_len]))
# The initial stack pointer points to the stack bottom
self.stack_ptr_init = tf.one_hot(
tf.zeros(to_T([self.N]), tf.int32), self.stack_len)
self.mem_init = tf.zeros(to_T([self.N, self.mem_dim]))
# zero-outputs that can be easily used by the modules
self.att_zero = tf.zeros(self.att_shape, tf.float32)
if config.nmn_mem_init == 'zero':
self.mem_zero = tf.zeros(to_T([self.N, self.mem_dim]), tf.float32)
elif config.nmn_mem_init == 'random':
self.mem_zero = tf.random.uniform(to_T([self.N, self.mem_dim]), minval=-100, maxval=100, dtype=tf.float32)
else:
raise NotImplementedError
self.score_zero = tf.zeros(to_T([self.N, 1]), tf.float32)
# the set of modules and functions (e.g. "_Find" -> Find)
self.module_names = module_names
self.module_funcs = [getattr(self, m[1:]) for m in module_names]
self.num_module = len(module_names)
self.module_validity_mat = _build_module_validity_mat(module_names)
# unroll the modules with a fixed number of timestep T_ctrl
self.att_list = []
self.att_stack_list = []
self.stack_ptr_list = []
self.mem_list = []
self.score_list = []
self.att_in_1, self.att_in_2 = None, None
att_stack_prev = self.att_stack_init
stack_ptr_prev = self.stack_ptr_init
mem_prev = self.mem_init
self.module_validity = []
for t in range(self.T_ctrl):
c_i = self.c_list[t]
cv_i = self.cv_list[t]
module_prob = self.module_prob_list[t]
# only keep the prob of valid modules (i.e. those won't cause
# stack underflow or overflow. e.g. _Filter can't be run at
# t = 0 since the stack is empty).
if cfg.MODEL.NMN.VALIDATE_MODULES:
module_validity = tf.matmul(
stack_ptr_prev, self.module_validity_mat)
if cfg.MODEL.NMN.HARD_MODULE_VALIDATION:
module_validity = tf.round(module_validity)
self.module_validity.append(module_validity)
module_prob *= module_validity
module_prob /= tf.reduce_sum(
module_prob, axis=1, keepdims=True)
self.module_prob_list[t] = module_prob
# run all the modules, and average their results wrt module_w
res = [f(att_stack_prev, stack_ptr_prev, mem_prev, c_i, cv_i,
t, reuse=(t > 0)) for f in self.module_funcs]
att_stack_avg = tf.reduce_sum(
module_prob[:, ax, ax, ax, ax, :] *
tf.stack([r[0] for r in res], axis=-1), axis=-1)
stack_ptr_avg = _sharpen_ptr(tf.reduce_sum(
module_prob[:, ax, :] *
tf.stack([r[1] for r in res], axis=2), axis=-1))
mem_avg = tf.reduce_sum(
module_prob[:, ax, :] *
tf.stack([r[2] for r in res], axis=2), axis=-1)
score_avg = tf.reduce_sum(
module_prob[:, ax, :] *
tf.stack([r[3] for r in res], axis=2), axis=-1)
self.att_list.append(_read_from_stack(att_stack_avg, stack_ptr_avg))
self.att_stack_list.append(att_stack_avg)
self.stack_ptr_list.append(stack_ptr_avg)
self.mem_list.append(mem_avg)
self.score_list.append(score_avg)
att_stack_prev = att_stack_avg
stack_ptr_prev = stack_ptr_avg
mem_prev = mem_avg
self.att_last = self.att_list[-1]
self.att_first = self.att_list[0]
self.att_second = self.att_list[1]
self.mem_last = self.mem_list[-1]
self.score_last = self.score_list[-1]
def NoOp(self, att_stack, stack_ptr, mem_in, c_i, cv_i, t, scope='NoOp', reuse=None):
"""
Does nothing. It leaves the stack pointer, the stack and mem vector
as-is.
"""
return att_stack, stack_ptr, mem_in, self.score_zero
def Find(self, att_stack, stack_ptr, mem_in, c_i, cv_i, t, scope='Find', reuse=None):
"""
Performs localization, and updates memory vector.
"""
with tf.variable_scope(scope, reuse=reuse):
# Get attention
# 1) linearly map the controller vectors to the KB dimension
# 2) elementwise product with KB
# 3) 1x1 convolution to get attention logits
c_mapped = fc('fc_c_mapped', c_i, output_dim=cfg.MODEL.KB_DIM, \
is_train=self.is_train, dropout=self.main_config.nmn_dropout)
kb_batch = self.kb_batch
elt_prod = tf.nn.l2_normalize(
kb_batch * c_mapped[:, ax, ax, :], axis=-1)
att_out = self.apply_attention(elt_prod, cv_i, t)
# Push to stack
stack_ptr = _move_ptr_fw(stack_ptr)
att_stack = _write_to_stack(att_stack, stack_ptr, att_out)
# Find bridge entity
if t == 0 and (self.main_config.supervise_bridge_entity):
g1, _ = bi_cudnn_rnn_encoder('lstm', self.main_config.hidden_size, 1, 1-self.main_config.input_keep_prob, \
tf.squeeze(att_out, axis=1), tf.squeeze(self.kb_len_batch, axis=1), self.is_train)
g1 = g1[:, ax, :, :]
bridge_logits = linear_logits([g1, att_out], True, input_keep_prob=self.main_config.input_keep_prob, \
mask=self.kb_mask_batch, is_train=self.is_train, scope='logits_bridge')
self.bridge_logits = tf.squeeze(bridge_logits, axis=1)
bridge_attn = tf.nn.softmax(self.bridge_logits)
self.bridge_attn = bridge_attn
new_mem = tf.einsum('ijk,ij->ik', tf.squeeze(kb_batch, axis=1), bridge_attn)
if t == 0:
self.inferred_bridge = new_mem
return att_stack, stack_ptr, self.mem_zero, self.score_zero
def Compare(self, att_stack, stack_ptr, mem_in, c_i, cv_i, t, scope='Compare', reuse=None):
# Pop from stack
att_in_2 = tf.squeeze(_read_from_stack(att_stack, stack_ptr), axis=-1)
stack_ptr = _move_ptr_bw(stack_ptr)
att_in_1 = tf.squeeze(_read_from_stack(att_stack, stack_ptr), axis=-1)
#stack_ptr = _move_ptr_bw(stack_ptr)
stack_ptr = _move_ptr_fw(stack_ptr)
with tf.variable_scope(scope, reuse=reuse):
att_prob_in_1 = linear_logits([att_in_1], True, input_keep_prob=self.main_config.input_keep_prob, \
mask=self.kb_mask_batch, is_train=self.is_train)
att_prob_in_2 = linear_logits([att_in_2], True, input_keep_prob=self.main_config.input_keep_prob, \
mask=self.kb_mask_batch, is_train=self.is_train, reuse=True)
att_prob_in_1, att_prob_in_2 = tf.squeeze(att_prob_in_1, axis=1), tf.squeeze(att_prob_in_2, axis=1)
self.att_in_1 = att_prob_in_1
self.att_in_2 = att_prob_in_2
c_mapped = fc('fc_c_mapped', c_i, output_dim=cfg.MODEL.KB_DIM, is_train=self.is_train, dropout=self.main_config.nmn_dropout)
kb_att_in_1 = _extract_softmax_avg(self.kb_batch, att_prob_in_1[:, ax, :])
kb_att_in_2 = _extract_softmax_avg(self.kb_batch, att_prob_in_2[:, ax, :])
fc_in_1 = tf.concat([c_mapped, c_mapped * kb_att_in_1, c_mapped*kb_att_in_2, kb_att_in_1-kb_att_in_2], axis=1)
mem_out = tf.nn.tanh(fc('fc_mem_out_1', fc_in_1, output_dim=self.mem_dim, is_train=self.is_train, dropout=self.main_config.nmn_dropout))
return att_stack, stack_ptr, mem_out, self.score_zero
def Relocate(self, att_stack, stack_ptr, mem_in, c_i, cv_i, t, scope='Transform', reuse=None):
"""
Relocates the previous attention, and updates memory vector.
"""
kb_batch = self.kb_batch
with tf.variable_scope(scope, reuse=reuse):
c_mapped = fc('fc_c_mapped', c_i, output_dim=cfg.MODEL.KB_DIM, is_train=self.is_train, dropout=self.main_config.nmn_dropout)
# Get attention
# 1) linearly map the controller vectors to the KB dimension
# 2) extract attended features from the input attention
# 2) elementwise product with KB
# 3) 1x1 convolution to get attention logits
if t == 0:
elt_prod = tf.nn.l2_normalize(
kb_batch * c_mapped[:, ax, ax, :], axis=-1)
else:
elt_prod = tf.nn.l2_normalize(
kb_batch * c_mapped[:, ax, ax, :] *
self.inferred_bridge[:, ax, ax, :], axis=-1)
att_out = self.apply_attention(elt_prod, cv_i, t, module='relocate')
# Push to stack
if self.main_config.nmn_relocate_move_ptr:
stack_ptr = _move_ptr_fw(stack_ptr) # cancel-out above
att_stack = _write_to_stack(att_stack, stack_ptr, att_out)
return att_stack, stack_ptr, self.mem_zero, self.score_zero
def apply_attention(self, h, u_weights, t, module='find'):
if self.main_config.nmn_attention_type == 'conv':
out = _1x1conv('conv_att_out', h, output_dim=1)
logits = apply_mask(out, self.kb_mask_batch)
elif self.main_config.nmn_attention_type == 'biattn':
out = self.bi_attention(h, u_weights, t, module=module)
else:
raise NotImplementedError
return out
def bi_attention(self, h, u_weights, t, module='find'):
u_weights = tf.transpose(tf.squeeze(u_weights, axis=-1), perm=[1, 0])
q, q_mask = self.q, self.q_mask
p0, context_dim, weight_one = weighted_biattention(
h, q, self.kb_dim, h_mask=self.kb_mask_batch, u_mask=tf.cast(q_mask, 'bool'),
u_weights=u_weights, scope='biattn')
if module == 'find' and t == 1:
self.weight_one = weight_one
p0 = dense(p0, self.main_config.hidden_size*2, scope='biattn_dense')
return p0
def _move_ptr_fw(stack_ptr):
"""
Move the stack pointer forward (i.e. to push to stack).
"""
# Note: in TF, conv1d is implemented as auto-correlation (instead of
# mathmatical convolution), so no flipping of the filter.
filter_fw = to_T(np.array([1, 0, 0], np.float32).reshape((3, 1, 1)))
new_stack_ptr = tf.squeeze(
tf.nn.conv1d(stack_ptr[..., ax], filter_fw, 1, 'SAME'), axis=[2])
# when the stack pointer is already at the stack top, keep
# the pointer in the same location (otherwise the pointer will be all zero)
if cfg.MODEL.NMN.STACK.GUARD_STACK_PTR:
stack_len = cfg.MODEL.NMN.STACK.LENGTH
stack_top_mask = tf.one_hot(stack_len - 1, stack_len)
new_stack_ptr += stack_top_mask * stack_ptr
return new_stack_ptr
def _move_ptr_bw(stack_ptr):
"""
Move the stack pointer backward (i.e. to pop from stack).
"""
# Note: in TF, conv1d is implemented as auto-correlation (instead of
# mathmatical convolution), so no flipping of the filter.
filter_fw = to_T(np.array([0, 0, 1], np.float32).reshape((3, 1, 1)))
new_stack_ptr = tf.squeeze(
tf.nn.conv1d(stack_ptr[..., ax], filter_fw, 1, 'SAME'), axis=[2])
# when the stack pointer is already at the stack bottom, keep
# the pointer in the same location (otherwise the pointer will be all zero)
if cfg.MODEL.NMN.STACK.GUARD_STACK_PTR:
stack_len = cfg.MODEL.NMN.STACK.LENGTH
stack_bottom_mask = tf.one_hot(0, stack_len)
new_stack_ptr += stack_bottom_mask * stack_ptr
return new_stack_ptr
def _read_from_stack(att_stack, stack_ptr):
"""
Read the value at the given stack pointer.
"""
stack_ptr_expand = stack_ptr[:, ax, ax, ax, :]
# The stack pointer is a one-hot vector, so just do dot product
att = tf.reduce_sum(att_stack * stack_ptr_expand, axis=-1, keepdims=True)
return att
def _write_to_stack(att_stack, stack_ptr, att):
"""
Write value 'att' into the stack at the given stack pointer. Note that the
result needs to be assigned back to att_stack
"""
stack_ptr_expand = stack_ptr[:, ax, ax, ax, :]
att_stack = att[:, :, :, :, ax] * stack_ptr_expand + att_stack * (1 - stack_ptr_expand)
return att_stack
def _sharpen_ptr(stack_ptr):
"""
Sharpen the stack pointers into (nearly) one-hot vectors, using argmax
or softmax. The stack values should always sum up to one for each instance.
"""
hard = cfg.MODEL.NMN.STACK.USE_HARD_SHARPEN
if hard:
# hard (non-differentiable) sharpening with argmax
new_stack_ptr = tf.one_hot(
tf.argmax(stack_ptr, axis=1), tf.shape(stack_ptr)[1])
else:
# soft (differentiable) sharpening with softmax
temperature = cfg.MODEL.NMN.STACK.SOFT_SHARPEN_TEMP
new_stack_ptr = tf.nn.softmax(stack_ptr / temperature)
return new_stack_ptr
def _1x1conv(name, bottom, output_dim, reuse=None):
return conv(name, bottom, kernel_size=1, stride=1, output_dim=output_dim,
reuse=reuse)
def apply_mask(x, mask):
return x - 1e30 * (1. - tf.cast(mask[:, :, :, ax], 'float32'))
def _spatial_softmax(att_raw):
att_shape = tf.shape(att_raw)
N = att_shape[0]
att_softmax = tf.nn.softmax(tf.reshape(att_raw, to_T([N, -1])), axis=1)
att_softmax = tf.reshape(att_softmax, att_shape)
return att_softmax
def _extract_softmax_avg(kb_batch, att_raw):
att_softmax = _spatial_softmax(att_raw)[:, :, :, ax]
return tf.reduce_sum(kb_batch * att_softmax, axis=[1, 2])
def _build_module_validity_mat(module_names):
"""
Build a module validity matrix, ensuring that only valid modules will have
non-zero probabilities. A module is only valid to run if there are enough
attentions to be popped from the stack, and have space to push into
(e.g. _Find), so that stack will not underflow or overflow by design.
module_validity_mat is a stack_len x num_module matrix, and is used to
multiply with stack_ptr to get validity boolean vector for the modules.
"""
stack_len = cfg.MODEL.NMN.STACK.LENGTH
module_validity_mat = np.zeros((stack_len, len(module_names)), np.float32)
for n_m, m in enumerate(module_names):
# a module can be run only when stack ptr position satisfies
# (min_ptr_pos <= ptr <= max_ptr_pos), where max_ptr_pos is inclusive
# 1) minimum position:
# stack need to have MODULE_INPUT_NUM[m] things to pop from
min_ptr_pos = MODULE_INPUT_NUM[m]
# the stack ptr diff=(MODULE_OUTPUT_NUM[m] - MODULE_INPUT_NUM[m])
# ensure that ptr + diff <= stack_len - 1 (stack top)
max_ptr_pos = (
stack_len - 1 + MODULE_INPUT_NUM[m] - MODULE_OUTPUT_NUM[m])
module_validity_mat[min_ptr_pos:max_ptr_pos+1, n_m] = 1.
return to_T(module_validity_mat)
``` |
{
"source": "jiangycTarheel/Undirected-Generation",
"score": 2
} |
#### File: Undirected-Generation/src/utils.py
```python
import os
import re
import sys
import pickle
import random
import inspect
import getpass
import argparse
import subprocess
import numpy as np
import torch
from torch import optim
from .optim_utils import Adafactor
from src.data.dictionary import Dictionary, BOS_WORD, EOS_WORD, PAD_WORD, UNK_WORD, MASK_WORD
from src.model.transformer import TransformerModel, TransformerOrderPredModel
from .logger import create_logger
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
#DUMP_PATH = '/checkpoint/%s/dumped' % getpass.getuser()
DUMP_PATH = None
DYNAMIC_COEFF = ['lambda_clm', 'lambda_mlm', 'lambda_pc', 'lambda_ae', 'lambda_mt', 'lambda_bt']
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def reload_checkpoint(path):
""" Reload params, dictionary, model from a given path """
# Load dictionary/model/datasets first
reloaded = torch.load(path)
params = AttrDict(reloaded['params'])
print("Supported languages: %s" % ", ".join(params.lang2id.keys()))
# build dictionary / update parameters
dico = Dictionary(reloaded['dico_id2word'], reloaded['dico_word2id'], reloaded['dico_counts'])
params.n_words = len(dico)
params.bos_index = dico.index(BOS_WORD)
params.eos_index = dico.index(EOS_WORD)
params.pad_index = dico.index(PAD_WORD)
params.unk_index = dico.index(UNK_WORD)
params.mask_index = dico.index(MASK_WORD)
# build model / reload weights
model = TransformerModel(params, dico, True, True)
model.load_state_dict(reloaded['model'])
return params, dico, model
def reload_order_checkpoint(path):
""" Reload params, dictionary, model from a given path """
# Load dictionary/model/datasets first
reloaded = torch.load(path)
params = AttrDict(reloaded['params'])
print("Supported languages: %s" % ", ".join(params.lang2id.keys()))
# build dictionary / update parameters
dico = Dictionary(reloaded['dico_id2word'], reloaded['dico_word2id'], reloaded['dico_counts'])
params.n_words = len(dico)
params.bos_index = dico.index(BOS_WORD)
params.eos_index = dico.index(EOS_WORD)
params.pad_index = dico.index(PAD_WORD)
params.unk_index = dico.index(UNK_WORD)
params.mask_index = dico.index(MASK_WORD)
# build model / reload weights
# model = TransformerModel(params, dico, True, True)
# model.load_state_dict(reloaded['model'])
order_model = TransformerOrderPredModel(params, dico, True, True)
order_model.load_state_dict(reloaded['order_model'])
return params, dico, order_model
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
def initialize_exp(params):
"""
Initialize the experience:
- dump parameters
- create a logger
"""
# dump parameters
get_dump_path(params)
pickle.dump(params, open(os.path.join(params.dump_path, 'params.pkl'), 'wb'))
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match('^[a-zA-Z0-9_]+$', x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = ' '.join(command)
params.command = command + ' --exp_id "%s"' % params.exp_id
# check experiment name
assert len(params.exp_name.strip()) > 0
# create a logger
logger = create_logger(os.path.join(params.dump_path, 'train.log'), rank=getattr(params, 'global_rank', 0))
logger.info("============ Initialized logger ============")
logger.info("\n".join("%s: %s" % (k, str(v))
for k, v in sorted(dict(vars(params)).items())))
logger.info("The experiment will be stored in %s\n" % params.dump_path)
logger.info("Running command: %s" % command)
logger.info("")
return logger
def get_dump_path(params):
"""
Create a directory to store the experiment.
"""
dump_path = DUMP_PATH if params.dump_path == '' else params.dump_path
assert len(params.exp_name) > 0
# create the sweep path if it does not exist
sweep_path = os.path.join(dump_path, params.exp_name, params.run_id+'_maxlen'+str(params.max_len)+'_minlen'+str(params.min_len)+'_bsz'+str(params.batch_size))
if not os.path.exists(sweep_path):
subprocess.Popen("mkdir -p %s" % sweep_path, shell=True).wait()
# create an ID for the job if it is not given in the parameters.
# if we run on the cluster, the job ID is the one of Chronos.
# otherwise, it is randomly generated
if params.exp_id == '':
chronos_job_id = os.environ.get('CHRONOS_JOB_ID')
slurm_job_id = os.environ.get('SLURM_JOB_ID')
assert chronos_job_id is None or slurm_job_id is None
exp_id = chronos_job_id if chronos_job_id is not None else slurm_job_id
if exp_id is None:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
while True:
exp_id = ''.join(random.choice(chars) for _ in range(10))
if not os.path.isdir(os.path.join(sweep_path, exp_id)):
break
else:
assert exp_id.isdigit()
params.exp_id = exp_id
# create the dump folder / update parameters
params.dump_path = os.path.join(sweep_path, params.exp_id)
if not os.path.isdir(params.dump_path):
subprocess.Popen("mkdir -p %s" % params.dump_path, shell=True).wait()
class AdamWithWarmup(optim.Adam):
"""
Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`warmup-init-lr`) until the configured
learning rate (`lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(warmup_init_lr, lr, warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = lr * sqrt(warmup_updates)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, warmup_updates=1000, warmup_init_lr=1e-7):
super().__init__(
params,
lr=warmup_init_lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
)
#warmup_updates = 1
print(warmup_updates)
self.warmup_updates = warmup_updates
self.warmup_init_lr = warmup_init_lr
# linearly warmup for the first warmup_updates
warmup_end_lr = lr
self.lr_step = (warmup_end_lr - warmup_init_lr) / warmup_updates
self.lr = lr
# then, decay prop. to the inverse square root of the update number
#self.decay_factor = warmup_end_lr * warmup_updates ** 0.5
for param_group in self.param_groups:
param_group['num_updates'] = 0
def get_lr_for_step(self, num_updates):
# update learning rate
if num_updates < self.warmup_updates:
return self.warmup_init_lr + num_updates * self.lr_step
else:
return self.lr
def step(self, closure=None):
super().step(closure)
for param_group in self.param_groups:
param_group['num_updates'] += 1
param_group['lr'] = self.get_lr_for_step(param_group['num_updates'])
class AdamInverseSqrtWithWarmup(optim.Adam):
"""
Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`warmup-init-lr`) until the configured
learning rate (`lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(warmup_init_lr, lr, warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = lr * sqrt(warmup_updates)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, warmup_updates=4000, warmup_init_lr=1e-7):
super().__init__(
params,
lr=warmup_init_lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
)
#warmup_updates = 1
print(warmup_updates)
self.warmup_updates = warmup_updates
self.warmup_init_lr = warmup_init_lr
# linearly warmup for the first warmup_updates
warmup_end_lr = lr
self.lr_step = (warmup_end_lr - warmup_init_lr) / warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * warmup_updates ** 0.5
for param_group in self.param_groups:
param_group['num_updates'] = 0
def get_lr_for_step(self, num_updates):
# update learning rate
if num_updates < self.warmup_updates:
return self.warmup_init_lr + num_updates * self.lr_step
else:
return self.decay_factor * (num_updates ** -0.5)
def step(self, closure=None):
super().step(closure)
for param_group in self.param_groups:
param_group['num_updates'] += 1
param_group['lr'] = self.get_lr_for_step(param_group['num_updates'])
def get_optimizer(parameters, s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adafactor':
optim_fn = Adafactor
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adam_warmup':
optim_fn = AdamWithWarmup
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adam_inverse_sqrt':
optim_fn = AdamInverseSqrtWithWarmup
optim_params['betas'] = (optim_params.get('beta1', 0.9), optim_params.get('beta2', 0.999))
optim_params.pop('beta1', None)
optim_params.pop('beta2', None)
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if method != 'adafactor':
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn(parameters, **optim_params)
def to_cuda(*args):
"""
Move tensors to CUDA.
"""
return [None if x is None else x.cuda() for x in args]
def restore_segmentation(path):
"""
Take a file segmented with BPE and restore it to its original segmentation.
"""
assert os.path.isfile(path)
restore_cmd = "sed -i -r 's/(@@ )|(@@ ?$)//g' %s"
subprocess.Popen(restore_cmd % path, shell=True).wait()
def parse_lambda_config(params):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 iterations, then will linearly increase to 1 until iteration 2000
"""
for name in DYNAMIC_COEFF:
x = getattr(params, name)
split = x.split(',')
if len(split) == 1:
setattr(params, name, float(x))
setattr(params, name + '_config', None)
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
setattr(params, name, float(split[0][1]))
setattr(params, name + '_config', [(int(k), float(v)) for k, v in split])
def get_lambda_value(config, n_iter):
"""
Compute a lambda value according to its schedule configuration.
"""
ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
def update_lambdas(params, n_iter):
"""
Update all lambda coefficients.
"""
for name in DYNAMIC_COEFF:
config = getattr(params, name + '_config')
if config is not None:
setattr(params, name, get_lambda_value(config, n_iter))
def set_sampling_probs(data, params):
"""
Set the probability of sampling specific languages / language pairs during training.
"""
coeff = params.lg_sampling_factor
if coeff == -1:
return
assert coeff > 0
# monolingual data
params.mono_list = [k for k, v in data['mono_stream'].items() if 'train' in v]
if len(params.mono_list) > 0:
probs = np.array([1.0 * len(data['mono_stream'][lang]['train']) for lang in params.mono_list])
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
params.mono_probs = probs
# parallel data
params.para_list = [k for k, v in data['para'].items() if 'train' in v]
if len(params.para_list) > 0:
probs = np.array([1.0 * len(data['para'][(l1, l2)]['train']) for (l1, l2) in params.para_list])
probs /= probs.sum()
probs = np.array([p ** coeff for p in probs])
probs /= probs.sum()
params.para_probs = probs
def concat_batches(x1, len1, lang1_id, x2, len2, lang2_id, pad_idx, eos_idx, reset_positions, assert_eos=True):
"""
Concat batches with different languages.
"""
assert reset_positions is False or lang1_id != lang2_id
lengths = len1 + len2
if not reset_positions:
lengths -= 1
slen, bs = lengths.max().item(), lengths.size(0)
x = x1.new(slen, bs).fill_(pad_idx)
x[:len1.max().item()].copy_(x1)
positions = torch.arange(slen)[:, None].repeat(1, bs).to(x1.device)
langs = x1.new(slen, bs).fill_(lang1_id)
for i in range(bs):
l1 = len1[i] if reset_positions else len1[i] - 1
x[l1:l1 + len2[i], i].copy_(x2[:len2[i], i])
if reset_positions:
positions[l1:, i] -= len1[i].cpu()
langs[l1:, i] = lang2_id
if assert_eos:
assert (x == eos_idx).long().sum().item() == (4 if reset_positions else 3) * bs
return x, lengths, positions, langs
def truncate(x, lengths, max_len, eos_index):
"""
Truncate long sentences.
"""
if lengths.max().item() > max_len:
x = x[:max_len].clone()
lengths = lengths.clone()
for i in range(len(lengths)):
if lengths[i] > max_len:
lengths[i] = max_len
x[max_len - 1, i] = eos_index
return x, lengths
def create_batch(sentences, params, dico):
""" Convert a list of tokenized sentences into a Pytorch batch
args:
sentences: list of sentences
params: attribute params of the loaded model
dico: dictionary
returns:
word_ids: indices of the tokens
lengths: lengths of each sentence in the batch
"""
bs = len(sentences)
slen = max([len(sent) for sent in sentences])
word_ids = torch.LongTensor(slen, bs).fill_(params.pad_index)
for i in range(len(sentences)):
sent = torch.LongTensor([dico.index(w) for w in sentences[i]])
word_ids[:len(sent), i] = sent
lengths = torch.LongTensor([len(sent) for sent in sentences])
return word_ids, lengths
def create_masked_batch(lens, params, dico):
""" Create a batch of all mask tokens of specified lengths.
The first and
args:
lens (torch.Tensor): batch of sequences lengths of size (seq_len,)
params: attribute params of the loaded model
dico: dictionary
returns:
batch (torch.Tensor): batch of (seq_len, batch_size)
"""
sents = []
for _len in lens:
sents.append([EOS_WORD] + ([MASK_WORD] * (_len.item() - 2)) + [EOS_WORD])
return create_batch(sents, params, dico)[0]
def generate_step(logits, topk=1, temperature=1, return_list=True):
""" Generate a word from from out[gen_idx]
args:
- out (torch.Tensor): tensor of logits of size batch_size x seq_len x vocab_size
- gen_idx (int): location for which to generate for
- top_k (int): if >0, only sample from the top k most probable words
- sample (Bool): if True, sample from full distribution. Overridden by top_k
"""
if temperature is not None:
logits /= temperature
if isinstance(topk, str) and topk == "all":
dist = torch.distributions.categorical.Categorical(logits=logits)
idx = dist.sample().squeeze(-1)
else:
kth_vals, kth_idx = logits.topk(topk, dim=-1)
dist = torch.distributions.categorical.Categorical(logits=kth_vals)
idx = kth_idx.gather(dim=1, index=dist.sample().unsqueeze(-1)).squeeze(-1)
return idx.tolist() if return_list else idx
def mask_batch_seq(batch, src_lens, trg_lens, params, n_masks_per_step=1,
start_idxs=None, finished_gen=None, right2left=False, gen_type="src2tgt"):
""" Create a prediction mask over a given batch
by sampling for each target position,
where the batch is concatenated source and target sentences
args:
batch (torch.Tensor):
n_masks_per_step (int): number of elements to mask out
start_idxs (int): if provided and there are no masks, indexes from which to start
predicting n_preds_per_step consecutive tokens per example
Assumes the indexes are in [0, {src/trg}_len] (i.e., don't add src len for trg)
right2left (bool): if True, go right to left
returns:
"""
pred_mask = np.zeros(batch.size())
mask_mask = (batch == params.mask_index)
if mask_mask.is_cuda:
mask_elts = mask_mask.nonzero().cpu().numpy()
src_lens = src_lens.cpu()
trg_lens = trg_lens.cpu()
else:
mask_elts = mask_mask.nonzero().numpy()
for batch_idx, (src_len, trg_len) in enumerate(zip(src_lens, trg_lens)):
if finished_gen[batch_idx]:
continue
# not clear about that part
'''
if mask_elts.size > 0 and mask_elts[np.where(mask_elts[:,1] == batch_idx)][:, 0].size > 0:
row_masks = mask_elts[np.where(mask_elts[:,1] == batch_idx)][:, 0]
start_idx = row_masks[-1] + 1 if right2left else row_masks[0]
elif start_idxs is not None:
start_idx = start_idxs[batch_idx].item()
if gen_type == "src2tgt":
start_idx += src_len
else: # mask_elts is empty, so make row_masks empty too
raise ValueError("No masks found and no starting index provided!")
'''
if start_idxs is not None:
start_idx = start_idxs[batch_idx].item()
if gen_type == "src2tgt":
start_idx += src_len
else: # mask_elts is empty, so make row_masks empty too
raise ValueError("No masks found and no starting index provided!")
assert 'start_idx' in locals(), pdb.set_trace() # hack for debugging, delete later
if right2left: # right to left
if gen_type == "src2tgt":
end_idx = max(src_len, start_idx - n_masks_per_step)
else:
end_idx = max(0, start_idx - n_masks_per_step)
pred_mask[end_idx:start_idx, batch_idx] = 1
else: # left to right
if gen_type == "src2tgt":
end_idx = min(src_len + trg_len, start_idx + n_masks_per_step)
else:
end_idx = min(src_len, start_idx + n_masks_per_step)
pred_mask[start_idx:end_idx, batch_idx] = 1
#import pdb; pdb.set_trace()
pred_mask = torch.from_numpy(pred_mask.astype(np.uint8))
if mask_mask.is_cuda:
pred_mask = pred_mask.cuda()
pred_mask[batch == params.pad_index] = 0
pred_mask[batch == params.eos_index] = 0 # TODO: remove
pred_mask[batch == params.bos_index] = 0
# targets
targs = batch[pred_mask]
# update input by filling with masks
all_masks = targs.clone().fill_(params.mask_index)
masked_batch = batch.masked_scatter(pred_mask, all_masks)
return pred_mask, masked_batch, targs
def shuf_order(langs, params=None, n=5):
"""
Randomize training order.
"""
if len(langs) == 0:
return []
if params is None:
return [langs[i] for i in np.random.permutation(len(langs))]
# sample monolingual and parallel languages separately
mono = [l1 for l1, l2 in langs if l2 is None]
para = [(l1, l2) for l1, l2 in langs if l2 is not None]
# uniform / weighted sampling
if params.lg_sampling_factor == -1:
p_mono = None
p_para = None
else:
p_mono = np.array([params.mono_probs[params.mono_list.index(k)] for k in mono])
p_para = np.array([params.para_probs[params.para_list.index(tuple(sorted(k)))] for k in para])
p_mono = p_mono / p_mono.sum()
p_para = p_para / p_para.sum()
s_mono = [mono[i] for i in np.random.choice(len(mono), size=min(n, len(mono)), p=p_mono, replace=True)] if len(mono) > 0 else []
s_para = [para[i] for i in np.random.choice(len(para), size=min(n, len(para)), p=p_para, replace=True)] if len(para) > 0 else []
assert len(s_mono) + len(s_para) > 0
return [(lang, None) for lang in s_mono] + s_para
``` |
{
"source": "jiangyd/dtest",
"score": 3
} |
#### File: dtest/dtest/report.py
```python
import pymongo
from collections import defaultdict
mymongodb = pymongo.MongoClient("mongodb://172.16.5.9:27017/test")
class report(object):
def __init__(self, project=None, module=None, suite=None, case=None, msg=None, url=None, headers=None, body=None,
method=None, validate=None, resp_code=None, resp_body=None,error=None):
self.url = url
self.headers = headers
self.body = body
self.method = method
self.project = project
self.result = True if msg is None else False
self.module = module
self.suite = suite
self.case = case
self.msg = msg
self.validate = validate
self.resp_code = resp_code
self.resp_body = resp_body
self.error=error
def save(self):
data = lambda: defaultdict(data)
result_data = data()
result_data["request"]["url"] = self.url
result_data["assert"]["validate"] = self.validate
result_data["assert"]["msg"] = self.msg
result_data["request"]["headers"] = self.headers
result_data["request"]["method"] = self.method
result_data["request"]["body"] = self.body
result_data["response"]["body"] = self.resp_body
result_data["response"]["status_code"] = self.resp_code
result_data["project"] = self.project
result_data["module"] = self.module
result_data["suite"] = self.suite
result_data["case"] = self.case
result_data["error"]=self.error
mymongodb.list_database_names()
db = mymongodb["jiangyd"]
post = db.cccc
print(post.insert_one(result_data).inserted_id)
if __name__ == "__main__":
t = report(project="test", module="sc", suite="b", case="test", msg="ttt")
t.save()
``` |
{
"source": "jiangydev/bifrost-faucet",
"score": 2
} |
#### File: jiangydev/bifrost-faucet/main.py
```python
import asyncio
import logging
import random
import re
import time
from threading import Thread
import schedule as schedule
from telethon import TelegramClient, events
from telethon.network import connection
# Use your own values from my.telegram.org
api_id = 0
api_hash = 'api_hash'
proxy_custom = ('1.2.3.4', 443, 'secret')
client = TelegramClient('anon', api_id, api_hash
# , connection=connection.ConnectionTcpMTProxyRandomizedIntermediate
# , proxy=proxy_custom
)
# 日志输出
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.INFO)
handling_bnc = []
bot_reply_time = 180
# Bifrost Faucet has ID -1001352638541
# @bifrost_faucet2_bot: 1173124754
@client.on(events.NewMessage(chats=[-1001352638541], from_users=[1173124754]))
async def my_event_handler(event):
global bot_reply_time
bot_reply_time = 0
msg = event.message.message
print(f'[消息监听] 监听到消息: {msg}')
# 正则查找bnc地址
bnc_obj = re.search('(5\\w{47})|([a-h]\\w{46})', msg)
if bnc_obj:
# 如果能找到bnc地址
bnc = bnc_obj.group()
print(f'[消息监听] 正则匹配到的bnc地址: {bnc}')
if bnc in handling_bnc:
if 'successful' in msg:
print(f'[消息监听] 已成功: {bnc}')
if 'has already dripped' in msg:
print(f'[消息监听] 已发放: {bnc}')
print(f'[消息监听] 移除备选bnc地址: {bnc}')
# 无论成功与否,移除bnc地址
handling_bnc.remove(bnc)
# 发送tl消息
async def send_msg(user, bnc_address):
async with TelegramClient('tmp', api_id, api_hash
# , connection=connection.ConnectionTcpMTProxyRandomizedIntermediate
# , proxy=proxy_custom
) as tmp_client:
await tmp_client.send_message(user, f'/want {bnc_address}')
client.run_until_disconnected()
print(f'[定时任务] 发送消息完成, user: {user}, bnc_address: {bnc_address}')
# 先从文件中读取地址,放入集合;
def load_bnc_job():
print(f'[定时任务] 开始执行')
with open('./bnc_waiting.txt', 'r', encoding='UTF-8') as lines:
# 读取到的每一行末尾有换行符\n, 需要剔除
array = lines.readlines()
for i in array:
i = i.strip('\n')
i = i.strip('!')
# 跳过空行和注释行; 如果地址已存在, 也不加入备选
if len(i) > 0 and not i.startswith('#') and i not in handling_bnc:
handling_bnc.append(i)
print(f'[定时任务] 读取文件完成, 读取地址数: {len(handling_bnc)}')
print(f'[定时任务] 执行结束')
# 任务初始化:先从文件中读取地址,放入集合;
def load_bnc_init():
print(f'[任务初始化] 开始执行')
with open('./bnc_waiting.txt', 'r', encoding='UTF-8') as lines:
# 读取到的每一行末尾有换行符\n, 需要剔除
array = lines.readlines()
for i in array:
i = i.strip('\n')
# 跳过空行和注释行; 如果地址已存在, 也不加入备选
if len(i) > 0 and not i.startswith('#') and not i.startswith('!') and i not in handling_bnc:
handling_bnc.append(i)
print(f'[任务初始化] 读取文件完成, 备选地址数: {len(handling_bnc)}')
print(f'[任务初始化] 执行结束')
# 判断集合元素个数,并一直取第0个,发送 /want bnc 的 message;
def send_msg_job():
global bot_reply_time
if bot_reply_time >= 180:
print('[定时任务-消息发送] 机器人响应超时, 不发送消息')
return None
print('[定时任务-消息发送] 机器人正常')
sleep_seconds = random.randint(1, 10)
print(f'[定时任务-消息发送] 开始随机休眠{sleep_seconds}s')
time.sleep(sleep_seconds)
print(f'[定时任务-消息发送] 结束随机休眠')
if len(handling_bnc) > 0:
current_bnc = handling_bnc[0]
print(f'[定时任务-消息发送] 开始执行: {current_bnc}')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(send_msg(-1001352638541, current_bnc))
except:
bot_reply_time = bot_reply_time + 180
print(f'[定时任务-消息发送] 发生异常: {current_bnc}, 时间重置为 {bot_reply_time}')
finally:
loop.close()
print(f'[定时任务-消息发送] 执行结束')
# 定时任务1:加载bnc地址(每24h)
schedule.every(24).hours.do(load_bnc_job)
# 定时任务2:发送tl消息(每60s)
schedule.every(60).seconds.do(send_msg_job)
def job_start():
while True:
schedule.run_pending()
time.sleep(1)
def time_start():
global bot_reply_time
while True:
time.sleep(1)
bot_reply_time = bot_reply_time + 1
if __name__ == '__main__':
# 先初始化一次bnc地址
load_bnc_init()
# 开启定时任务线程
Thread(target=job_start).start()
Thread(target=time_start).start()
# 运行tl
client.start()
client.run_until_disconnected()
``` |
{
"source": "jiangydev/python-bookshopping",
"score": 3
} |
#### File: jiangydev/python-bookshopping/DbUtil.py
```python
import mysql.connector
from mysql.connector import errorcode
def close_db(cursor, cnx):
cursor.close()
cnx.close()
def open_db():
config = {
'user': 'root',
'password': '<PASSWORD>',
'host': '127.0.0.1',
'database': 'pythontest',
'raise_on_warnings': True
}
try:
return mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
```
#### File: jiangydev/python-bookshopping/run.py
```python
import time
import openpyxl
import tkinter as tk
from tkinter import ttk
import tkinter.messagebox
import re
import DbUtil
def init():
# 窗口
window = tk.Tk()
window.title('图书管理系统')
window.geometry('450x300')
# 画布放置图片
canvas = tk.Canvas(window, height=300, width=500)
imagefile = tk.PhotoImage(file='background.png')
image = canvas.create_image(0, 0, anchor='nw', image=imagefile)
canvas.pack(side='top')
# 标签 用户名密码
tk.Label(window, text='用户名').place(x=120, y=115)
tk.Label(window, text='密 码').place(x=120, y=155)
# 用户名输入框
var_usr_name = tk.StringVar()
entry_usr_name = tk.Entry(window, textvariable=var_usr_name)
entry_usr_name.place(x=180, y=115)
# 密码输入框
var_usr_pwd = tk.StringVar()
entry_usr_pwd = tk.Entry(window, textvariable=var_usr_pwd, show='*')
entry_usr_pwd.place(x=180, y=155)
# 登录 注册按钮
bt_login = tk.Button(window, text='登录', command=lambda: usr_log_in(window, var_usr_name, var_usr_pwd))
bt_login.place(x=140, y=230)
bt_logup = tk.Button(window, text='注册', command=lambda: usr_sign_up(window))
bt_logup.place(x=210, y=230)
bt_logquit = tk.Button(window, text='退出', command=lambda: usr_quit(window))
bt_logquit.place(x=280, y=230)
window.mainloop()
def manager_main(window, usr_name, is_manager):
usr_quit(window)
manager = tk.Tk()
manager.title("书籍管理")
# 创建菜单对象,锁定到窗口(固定菜单)
menubar = tk.Menu(manager)
# 为固定菜单添加【类别 书信息 购买图书 关于我们 帮助】五个主菜单
menuType = tk.Menu(menubar, tearoff=0) # tearoff=0:菜单不能从窗口移走
menuBook = tk.Menu(menubar, tearoff=0)
menuShopping = tk.Menu(menubar, tearoff=0)
menuAbout = tk.Menu(menubar, tearoff=0)
menuHelp = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label="类别", menu=menuType) # 含有下拉子菜单
menubar.add_cascade(label="书信息", menu=menuBook)
menubar.add_cascade(label="购买图书", menu=menuShopping)
menubar.add_cascade(label="关于我们", menu=menuAbout)
menubar.add_cascade(label="帮助", menu=menuHelp)
frame_root = tk.Frame(manager, width=500, height=300)
frame_root.pack()
frame_type = tk.Frame(frame_root, width=500, height=300)
frame_book = tk.Frame(frame_root, width=500, height=300)
frame_shopping = tk.Frame(frame_root, width=500, height=300)
# 【关于我们】页面
frame_about = tk.Frame(frame_root, width=500, height=300)
tk.Label(frame_about, text="作者:jiangydev(王江雨)\n学号:202150827", width=40, height=10).pack()
# 为【类别】添加下拉式子菜单
menuType.add_command(label="查看类别",
command=lambda: on_type_r_selected(frame_root, frame_type, frame_book, frame_shopping,
frame_about))
menuType.add_command(label="增加类别", command=lambda: on_type_c_selected(manager, frame_type))
menuType.add_command(label="删除类别", command=lambda: on_type_d_selected(manager, frame_type))
menuType.add_command(label="修改类别", command=lambda: on_type_u_selected(manager, frame_type))
# 为【书信息】添加下拉式子菜单
menuBook.add_command(label="查看书信息",
command=lambda: on_book_r_selected(frame_root, frame_type, frame_book, frame_shopping,
frame_about))
menuBook.add_command(label="增加书信息", command=lambda: on_book_c_selected(manager, frame_book))
menuBook.add_command(label="删除书信息", command=lambda: on_book_d_selected(manager, frame_book))
menuBook.add_command(label="修改书信息", command=lambda: on_book_u_selected(manager, frame_book))
menuBook.add_command(label="导出书信息", command=lambda: on_book_e_selected(manager))
# 为【购买图书】添加下拉式子菜单
menuShopping.add_command(label="查看购物车",
command=lambda: on_shopping_r_selected(usr_name, frame_root, frame_type, frame_book,
frame_shopping,
frame_about))
menuShopping.add_command(label="查看图书列表",
command=lambda: on_shopping_c_selected(usr_name, frame_root, frame_type, frame_book,
frame_shopping,
frame_about))
menuShopping.add_command(label="导出购物信息", command=lambda: on_shopping_e_selected(manager))
# 为【关于我们】添加下拉式子菜单
menuAbout.add_command(label="作者信息",
command=lambda: on_about_selected(frame_root, frame_type, frame_book, frame_shopping,
frame_about))
# 为【帮助】添加下拉式子菜单
menuHelp.add_command(label="暂无帮助文档")
# 如果不是管理员隐藏【类别 书信息】
if not is_manager:
menubar.delete(0, 2)
# 如果是管理员隐藏【购买图书】
else:
menubar.delete(3)
manager.config(menu=menubar)
# 消息循环
manager.mainloop()
pass
# Treeview、列名、排列方式
def treeview_sort_column(tv, col, reverse):
l = [(tv.set(k, col), k) for k in tv.get_children('')]
print(tv.get_children(''))
l.sort(reverse=reverse) # 排序方式
# rearrange items in sorted positions
for index, (val, k) in enumerate(l): # 根据排序后索引移动
tv.move(k, '', index)
# print(k)
# 重写标题,使之成为再点倒序的标题
tv.heading(col, command=lambda: treeview_sort_column(tv, col, not reverse))
def build_type(frame_type):
for widget in frame_type.winfo_children():
widget.destroy()
columns = ("类别编号", "类别名")
treeview = ttk.Treeview(frame_type, columns=columns, show='headings')
treeview.column("类别编号", width=100, anchor='center') # 表示列,不显示
treeview.column("类别名", width=250, anchor='center')
treeview.heading('类别编号', text='类别编号')
treeview.heading('类别名', text='类别名')
treeview.grid()
query = ("SELECT tid, tname FROM `booktype`")
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
i = 1
for (tid, tname) in cursor:
treeview.insert('', i, values=(tid, tname))
i = i + 1
DbUtil.close_db(cursor, cnx)
for col in columns: # 给所有标题加(循环上边的“手工”)
treeview.heading(col, text=col, command=lambda _col=col: treeview_sort_column(treeview, _col, False))
# 查看类别
def on_type_r_selected(frame_root, frame_type, frame_book, frame_shopping, frame_about):
build_type(frame_type)
frame_type.pack()
frame_book.forget()
frame_shopping.forget()
frame_about.forget()
# 新增类别
def on_type_c_selected(manager, frame_type):
def inserttodb():
# 获取输入框内的内容
tid = t_id.get().strip()
tname = t_name.get().strip()
if tid == '' or tname == '':
tk.messagebox.showerror(message='类别编号或名称为空')
else:
query = ("INSERT INTO `booktype`(tid, tname) VALUES ('%s', '%s')" % (tid, tname))
try:
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('成功', '新增类别成功')
type_c.destroy()
build_type(frame_type)
except:
tk.messagebox.showerror('错误', '新增类别失败')
type_c = tk.Toplevel(manager)
type_c.title('新增类别')
type_c.geometry('350x150')
# 类别编号变量及标签、输入框
t_id = tk.StringVar()
tk.Label(type_c, text='请输入类别编号:').place(x=10, y=10)
tk.Entry(type_c, textvariable=t_id).place(x=150, y=10)
# 类别名变量及标签、输入框
t_name = tk.StringVar()
tk.Label(type_c, text='请输入类别名:').place(x=10, y=50)
tk.Entry(type_c, textvariable=t_name).place(x=150, y=50)
# 确认注册按钮及位置
bt_confirm_c = tk.Button(type_c, text='新增', command=inserttodb)
bt_confirm_c.place(x=150, y=90)
# 删除类别信息
def on_type_d_selected(manager, frame_type):
def delete_type_by_id():
# 获取输入框内的内容
tid = t_id.get().strip()
if tid == '':
tk.messagebox.showerror(message='类别编号为空')
else:
query = ("SELECT COUNT(*) FROM `booktype` WHERE tid='%s'" % (tid))
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
if cursor.fetchone()[0] == 1:
query = ("DELETE FROM `booktype` WHERE tid='%s'" % (tid))
try:
cursor.execute(query)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('成功', '删除类别成功')
type_d.destroy()
build_type(frame_type)
except:
tk.messagebox.showerror('错误', '删除类别失败')
else:
tk.messagebox.showerror('错误', '删除类别失败, 该类别编号不存在')
type_d = tk.Toplevel(manager)
type_d.title('删除类别信息')
type_d.geometry('350x150')
# 类别编号变量及标签、输入框
t_id = tk.StringVar()
tk.Label(type_d, text='请输入类别编号:').place(x=10, y=50)
tk.Entry(type_d, textvariable=t_id).place(x=150, y=50)
# 确认删除按钮及位置
bt_confirm_d = tk.Button(type_d, text='确认删除', command=delete_type_by_id)
bt_confirm_d.place(x=150, y=90)
# 修改类别
def on_type_u_selected(manager, frame_type):
def inserttodb():
# 获取输入框内的内容
tid = t_id.get().strip()
tidn = t_id_n.get().strip()
tnamen = t_name_n.get().strip()
if tid == '' or tidn == '' or tnamen == '':
tk.messagebox.showerror(message='类别编号或名称为空')
else:
query = ("SELECT COUNT(*) FROM `booktype` WHERE tid='%s'" % (tid))
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
if cursor.fetchone()[0] == 1:
query = ("UPDATE `booktype` SET tid='%s', tname='%s' WHERE tid='%s'" % (tidn, tnamen, tid))
try:
cursor.execute(query)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('成功', '修改类别成功')
type_u.destroy()
build_type(frame_type)
except:
tk.messagebox.showerror('错误', '修改类别失败')
else:
tk.messagebox.showerror('错误', '修改类别失败, 该类别编号不存在')
type_u = tk.Toplevel(manager)
type_u.title('修改类别')
type_u.geometry('380x200')
# 类别编号变量及标签、输入框
t_id = tk.StringVar()
tk.Label(type_u, text='请输入需要修改的类别编号:').place(x=10, y=10)
tk.Entry(type_u, textvariable=t_id).place(x=180, y=10)
# 类别编号变量及标签、输入框
t_id_n = tk.StringVar()
tk.Label(type_u, text='请输入新的类别编号:').place(x=10, y=50)
tk.Entry(type_u, textvariable=t_id_n).place(x=180, y=50)
# 类别名变量及标签、输入框
t_name_n = tk.StringVar()
tk.Label(type_u, text='请输入新的类别名:').place(x=10, y=90)
tk.Entry(type_u, textvariable=t_name_n).place(x=180, y=90)
# 确认注册按钮及位置
bt_confirm_c = tk.Button(type_u, text='修改', command=inserttodb)
bt_confirm_c.place(x=180, y=130)
def build_shopping(frame_shopping, username, option):
def on_treeview_click(event):
cnx = DbUtil.open_db()
cursor = cnx.cursor()
for item in treeview.selection():
item_text = treeview.item(item, "values")
bid = item_text[0]
query = ("SELECT bcount FROM `bookinfo` WHERE `bookinfo`.bid = '%s'" % (bid))
cursor.execute(query)
book_balance = cursor.fetchone()[0]
query = ("SELECT scount FROM `shoplist` WHERE bid = '%s' AND username = '%s'" % (bid, username))
cursor.execute(query)
result = cursor.fetchone()
shop_balance = 0 if result is None else result[0]
if item_text[5] == '双击从购物车删除':
if shop_balance > 1:
query_update_shop = (
"UPDATE `shoplist` SET scount = '%s' WHERE username = '%s' AND bid = '%s'" % (
shop_balance - 1, username, bid))
query_update = ("UPDATE `bookinfo` SET bcount = '%s' WHERE bid = '%s'" % (book_balance + 1, bid))
try:
cursor.execute(query_update_shop)
cursor.execute(query_update)
cnx.commit()
tk.messagebox.showinfo(message='从购物车删除成功!')
except:
cnx.rollback()
tk.messagebox.showerror(message='从购物车删除失败!')
elif shop_balance == 1:
query_delete = (
"DELETE FROM `shoplist` WHERE username = '%s' AND bid = '%s'" % (username, bid))
query_update = ("UPDATE `bookinfo` SET bcount = '%s' WHERE bid = '%s'" % (book_balance + 1, bid))
try:
cursor.execute(query_delete)
cursor.execute(query_update)
cnx.commit()
tk.messagebox.showinfo(message='从购物车删除成功!')
except:
cnx.rollback()
tk.messagebox.showerror(message='从购物车删除失败!')
else:
tk.messagebox.showerror(message='无法从购物车删除,数量异常!')
elif item_text[5] == '双击加入购物车':
if shop_balance > 0:
query_update_shop = (
"UPDATE `shoplist` SET scount = '%s' WHERE username = '%s' AND bid = '%s'" % (
shop_balance + 1, username, bid))
query_update = ("UPDATE `bookinfo` SET bcount = '%s' WHERE bid = '%s'" % (book_balance - 1, bid))
try:
cursor.execute(query_update_shop)
cursor.execute(query_update)
cnx.commit()
tk.messagebox.showinfo(message='加入购物车成功!')
except:
tk.messagebox.showerror(message='加入购物车失败!')
elif shop_balance == 0:
query_insert = (
"INSERT INTO `shoplist`(username, bid, scount) VALUES ('%s', '%s', '%s')" % (username, bid, 1))
query_update = ("UPDATE `bookinfo` SET bcount = '%s' WHERE bid = '%s'" % (book_balance - 1, bid))
try:
cursor.execute(query_insert)
cursor.execute(query_update)
cnx.commit()
tk.messagebox.showinfo(message='加入购物车成功!')
except:
tk.messagebox.showerror(message='加入购物车失败!')
else:
tk.messagebox.showerror(message='无库存,无法加入购物车!')
elif item_text[5] == '去付款':
tk.messagebox.showerror(message='支付功能开发中......')
else:
tk.messagebox.showerror(message='非法操作!')
DbUtil.close_db(cursor, cnx)
build_shopping(frame_shopping, username, option)
# 如果是查看购物车,显示删除、+1、-1,到0就是删除
if option == 'shop':
query = (
"SELECT i.bid bid, i.bname bname, i.bprice bprice, b.tname tname, i.bcount bcount, l.scount scount FROM `booktype` b, `bookinfo` i, `shoplist` l WHERE b.tid = i.tid AND i.bid = l.bid AND l.username = '%s'" % (
username))
null_remind = '温馨提示:购物车暂无物品'
book_opt = '双击从购物车删除'
table_4 = '已购数量'
# 如果是购物列表,显示添加到购物车,若以已经在购物车,不在购物列表显示
elif option == 'book':
query = (
"SELECT i.bid bid, i.bname bname, i.bprice bprice, b.tname tname, i.bcount bcount FROM `booktype` b, `bookinfo` i WHERE b.tid = i.tid AND i.bid not in (SELECT DISTINCT bid FROM shoplist l WHERE l.username != '%s')" % (
username))
null_remind = '温馨提示:系统中暂无可购物品'
book_opt = '双击加入购物车'
table_4 = '库存'
else:
tk.messagebox.showerror(message='非法选择!')
return False
for widget in frame_shopping.winfo_children():
widget.destroy()
columns = ("书编号", "书名", "书价", "类别", table_4, "操作")
treeview = ttk.Treeview(frame_shopping, columns=columns, show='headings')
treeview.column("书编号", width=100, anchor='center') # 表示列,不显示
treeview.column("书名", width=150, anchor='center')
treeview.column("书价", width=90, anchor='center')
treeview.column("类别", width=100, anchor='center')
treeview.column(table_4, width=90, anchor='center')
treeview.column("操作", width=200, anchor='center')
treeview.heading('书编号', text='书编号')
treeview.heading('书名', text='书名')
treeview.heading('书价', text='书价')
treeview.heading('类别', text='类别')
treeview.heading(table_4, text=table_4)
treeview.heading('操作', text='操作')
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
result = cursor.fetchall()
# 如果查询到数据则显示,无数据则显示“购物车无商品”
if cursor.rowcount == 0:
for widget in frame_shopping.winfo_children():
widget.destroy()
tk.Label(frame_shopping, text=null_remind, width=80, height=5).pack()
else:
i = 0
for k in result:
treeview.insert('', i, values=(k[0], k[1], k[2], k[3], k[4] if option == 'book' else k[5], book_opt))
i = i + 1
if option == 'shop':
query = (
"SELECT SUM(bprice*scount) total FROM `bookinfo` i, `shoplist` l WHERE i.bid = l.bid AND l.username = '%s'" % (
username))
cursor.execute(query)
result = cursor.fetchone()
treeview.insert('', i, values=('金额合计:', 'RMB ¥', 0 if result is None else result[0], '', '', '去付款'))
treeview.grid()
for col in columns: # 给所有标题加(循环上边的“手工”)
treeview.heading(col, text=col, command=lambda _col=col: treeview_sort_column(treeview, _col, False))
treeview.bind("<Double-1>", on_treeview_click)
DbUtil.close_db(cursor, cnx)
# 购物车信息
def on_shopping_r_selected(usr_name, frame_root, frame_type, frame_book, frame_shopping, frame_about):
build_shopping(frame_shopping, usr_name, 'shop')
frame_type.forget()
frame_book.forget()
frame_shopping.pack()
frame_about.forget()
# 购买图书
def on_shopping_c_selected(usr_name, frame_root, frame_type, frame_book, frame_shopping, frame_about):
build_shopping(frame_shopping, usr_name, 'book')
frame_type.forget()
frame_book.forget()
frame_shopping.pack()
frame_about.forget()
# 导出购物车信息
def on_shopping_e_selected(manager):
pass
def build_book(frame_book):
for widget in frame_book.winfo_children():
widget.destroy()
columns = ("书编号", "书名", "书价", "类别", "库存")
treeview = ttk.Treeview(frame_book, columns=columns, show='headings')
treeview.column("书编号", width=100, anchor='center') # 表示列,不显示
treeview.column("书名", width=100, anchor='center')
treeview.column("书价", width=100, anchor='center')
treeview.column("类别", width=100, anchor='center')
treeview.column("库存", width=100, anchor='center')
treeview.heading('书编号', text='书编号')
treeview.heading('书名', text='书名')
treeview.heading('书价', text='书价')
treeview.heading('类别', text='类别')
treeview.heading('库存', text='库存')
treeview.grid()
query = (
"SELECT bid, bname, bprice, tname, bcount FROM `booktype`, `bookinfo` WHERE `booktype`.tid=`bookinfo`.tid")
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
i = 1
for (bid, bname, bprice, tname, bcount) in cursor:
treeview.insert('', i, values=(bid, bname, bprice, tname, bcount))
i = i + 1
DbUtil.close_db(cursor, cnx)
for col in columns: # 给所有标题加(循环上边的“手工”)
treeview.heading(col, text=col, command=lambda _col=col: treeview_sort_column(treeview, _col, False))
# 查看书信息
def on_book_r_selected(frame_root, frame_type, frame_book, frame_shopping, frame_about):
build_book(frame_book)
frame_type.forget()
frame_book.pack()
frame_shopping.forget()
frame_about.forget()
# 新增书信息
def on_book_c_selected(manager, frame_book):
def inserttodb():
# 获取输入框内的内容
bid = b_id.get().strip()
bname = b_name.get().strip()
bprice = b_price.get().strip()
tid = t_id.get().strip()
bcount = b_count.get().strip()
if bid == '' or bname == '' or bprice == '' or tid == '' or bcount == '':
tk.messagebox.showerror(message='输入框不能为空')
else:
query = ("INSERT INTO `bookinfo`(bid, bname, bprice, tid, bcount) VALUES ('%s', '%s', '%s', '%s', '%s')" % (
bid, bname, bprice, tid, bcount))
try:
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('成功', '新增图书信息成功')
book_c.destroy()
build_book(frame_book)
except:
tk.messagebox.showerror('错误', '新增图书信息失败')
book_c = tk.Toplevel(manager)
book_c.title('新增书信息')
book_c.geometry('350x280')
# 书编号变量及标签、输入框
b_id = tk.StringVar()
tk.Label(book_c, text='请输入书编号:').place(x=10, y=10)
tk.Entry(book_c, textvariable=b_id).place(x=150, y=10)
# 书名变量及标签、输入框
b_name = tk.StringVar()
tk.Label(book_c, text='请输入书名:').place(x=10, y=50)
tk.Entry(book_c, textvariable=b_name).place(x=150, y=50)
# 书价变量及标签、输入框
b_price = tk.StringVar()
tk.Label(book_c, text='请输入书价:').place(x=10, y=90)
tk.Entry(book_c, textvariable=b_price).place(x=150, y=90)
# 类别编号变量及标签、输入框
t_id = tk.StringVar()
tk.Label(book_c, text='请输入书类别编号:').place(x=10, y=130)
tk.Entry(book_c, textvariable=t_id).place(x=150, y=130)
# 类别名变量及标签、输入框
b_count = tk.StringVar()
tk.Label(book_c, text='请输入书数量:').place(x=10, y=170)
tk.Entry(book_c, textvariable=b_count).place(x=150, y=170)
# 确认注册按钮及位置
bt_confirm_c = tk.Button(book_c, text='新增', command=inserttodb)
bt_confirm_c.place(x=150, y=210)
# 删除类图书信息
def on_book_d_selected(manager, frame_book):
def delete_book_by_id():
# 获取输入框内的内容
bid = b_id.get().strip()
if bid == '':
tk.messagebox.showerror(message='图书编号为空')
else:
query = ("SELECT COUNT(*) FROM `bookinfo` WHERE bid='%s'" % (bid))
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
if cursor.fetchone()[0] == 1:
query = ("DELETE FROM `bookinfo` WHERE bid='%s'" % (bid))
try:
cursor.execute(query)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('成功', '删除图书成功')
book_d.destroy()
build_book(frame_book)
except:
tk.messagebox.showerror('错误', '删除图书失败')
else:
tk.messagebox.showerror('错误', '删除图书失败, 该图书编号不存在')
book_d = tk.Toplevel(manager)
book_d.title('删除图书信息')
book_d.geometry('350x150')
# 类别编号变量及标签、输入框
b_id = tk.StringVar()
tk.Label(book_d, text='请输入图书编号:').place(x=10, y=50)
tk.Entry(book_d, textvariable=b_id).place(x=150, y=50)
# 确认删除按钮及位置
bt_confirm_d = tk.Button(book_d, text='确认删除', command=delete_book_by_id)
bt_confirm_d.place(x=150, y=90)
# 修改图书
def on_book_u_selected(manager, frame_book):
def inserttodb():
# 获取输入框内的内容
bido = b_id_o.get().strip()
bid = b_id.get().strip()
bname = b_name.get().strip()
bprice = b_price.get().strip()
tid = t_id.get().strip()
bcount = b_count.get().strip()
if bido == '' or bid == '' or bname == '' or bprice == '' or tid == '' or bcount == '':
tk.messagebox.showerror(message='输入框不能为空')
else:
query = ("SELECT COUNT(*) FROM `bookinfo` WHERE bid='%s'" % (bido))
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
if cursor.fetchone()[0] == 1:
query = (
"UPDATE `bookinfo` SET bid='%s', bname='%s', bprice='%s', tid='%s', bcount='%s' WHERE tid='%s'" % (
bid, bname, bprice, tid, bcount, bido))
try:
cursor.execute(query)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('成功', '修改图书信息成功')
book_u.destroy()
build_book(frame_book)
except:
tk.messagebox.showerror('错误', '修改图书信息失败')
else:
tk.messagebox.showerror('错误', '修改图书信息失败, 该图书编号不存在')
book_u = tk.Toplevel(manager)
book_u.title('修改图书信息')
book_u.geometry('380x320')
# 类别编号变量及标签、输入框
b_id_o = tk.StringVar()
tk.Label(book_u, text='请输入需要修改的图书编号:').place(x=10, y=10)
tk.Entry(book_u, textvariable=b_id_o).place(x=180, y=10)
# 书编号变量及标签、输入框
b_id = tk.StringVar()
tk.Label(book_u, text='请输入新的书编号:').place(x=10, y=50)
tk.Entry(book_u, textvariable=b_id).place(x=180, y=50)
# 书名变量及标签、输入框
b_name = tk.StringVar()
tk.Label(book_u, text='请输入新的书名:').place(x=10, y=90)
tk.Entry(book_u, textvariable=b_name).place(x=180, y=90)
# 书价变量及标签、输入框
b_price = tk.StringVar()
tk.Label(book_u, text='请输入新的书价:').place(x=10, y=130)
tk.Entry(book_u, textvariable=b_price).place(x=180, y=130)
# 类别编号变量及标签、输入框
t_id = tk.StringVar()
tk.Label(book_u, text='请输入新的书类别编号:').place(x=10, y=170)
tk.Entry(book_u, textvariable=t_id).place(x=180, y=170)
# 类别名变量及标签、输入框
b_count = tk.StringVar()
tk.Label(book_u, text='请输入新的书数量:').place(x=10, y=210)
tk.Entry(book_u, textvariable=b_count).place(x=180, y=210)
# 确认注册按钮及位置
bt_confirm_c = tk.Button(book_u, text='修改', command=inserttodb)
bt_confirm_c.place(x=150, y=250)
# 导出书信息
def on_book_e_selected(manager):
def export():
file_name = f_name.get().strip()
if file_name == '':
tk.messagebox.showerror(message='文件名不能为空')
else:
wb = openpyxl.Workbook()
sheet = wb.active
sheet['A1'] = '图书编号'
sheet['B1'] = '图书名称'
sheet['C1'] = '图书价格'
sheet['D1'] = '图书类别'
sheet['E1'] = '图书库存'
query = (
"SELECT bid, bname, bprice, tname, bcount FROM `booktype`, `bookinfo` WHERE `booktype`.tid=`bookinfo`.tid")
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
i = 2
for (bid, bname, bprice, tname, bcount) in cursor:
sheet['A%s' % i] = bid
sheet['B%s' % i] = bname
sheet['C%s' % i] = bprice
sheet['D%s' % i] = tname
sheet['E%s' % i] = bcount
i = i + 1
DbUtil.close_db(cursor, cnx)
time_file = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
file_name = file_name + '-' + time_file + '.xlsx'
wb.save(file_name)
tk.messagebox.showinfo('成功', '导出图书信息成功')
book_e.destroy()
book_e = tk.Toplevel(manager)
book_e.title('导出图书信息')
book_e.geometry('350x150')
# 类别编号变量及标签、输入框
f_name = tk.StringVar()
tk.Label(book_e, text='请输入文件名(不需要后缀):').place(x=10, y=50)
tk.Entry(book_e, textvariable=f_name).place(x=180, y=50)
# 确认导出按钮及位置
bt_confirm_d = tk.Button(book_e, text='确认导出', command=export)
bt_confirm_d.place(x=180, y=90)
# 关于我们
def on_about_selected(frame_root, frame_type, frame_book, frame_shopping, frame_about):
frame_type.forget()
frame_book.forget()
frame_shopping.forget()
frame_about.pack()
# 登录函数
def usr_log_in(window, var_usr_name, var_usr_pwd):
# 输入框获取用户名密码
usr_name = var_usr_name.get().strip()
usr_pwd = var_usr_pwd.get().strip()
# 用户名密码不能为空
if usr_name == '' or usr_pwd == '':
tk.messagebox.showerror(message='用户名或密码不能为空!')
else:
# 从数据库中获取用户信息
query = ("SELECT COUNT(*) FROM `user` WHERE username = '%s'" % usr_name)
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
if cursor.fetchone()[0] == 1:
# 判断用户名和密码是否匹配
query = ("SELECT username, password, is_manager FROM `user` WHERE username = '%s' AND password = '%s'" % (
usr_name, usr_pwd))
cursor.execute(query)
result = cursor.fetchone()
DbUtil.close_db(cursor, cnx)
if result is not None:
# tk.messagebox.showinfo(title='welcome', message='欢迎您:' + usr_name)
# 进入主界面
is_manger = False if (result[2] == 0) else True
manager_main(window, usr_name, is_manger)
else:
tk.messagebox.showerror(message='密码错误')
# 不在数据库中弹出是否注册的框
else:
is_signup = tk.messagebox.askyesno('欢迎', '您还没有注册,是否现在注册')
if is_signup:
usr_sign_up()
# 注册函数
def usr_sign_up(window):
# 确认注册时的相应函数
def signtowcg():
# 获取输入框内的内容
nun = new_username.get().strip()
np = new_pwd.get().strip()
npf = new_pwd_confirm.get().strip()
nn = new_name.get().strip()
ng = new_gender.get().strip()
ne = new_email.get().strip()
nt = new_telephone.get().strip()
nm = new_manager.get().strip()
if np == '' or nun == '' or npf == '' or nn == '' or ng == '' or ne == '' or nt == '' or nm == '':
tk.messagebox.showerror('错误', '输入框不能为空!')
elif np != npf:
tk.messagebox.showerror('错误', '密码前后不一致')
elif re.match(r'^[a-zA-Z0-9_.-]+@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.[a-zA-Z0-9]{2,6}$', ne) is None:
tk.messagebox.showerror('错误', '邮箱格式不正确')
elif re.match(r'^1([38][0-9]|4[579]|5[0-3,5-9]|6[6]|7[0135678]|9[89])\d{8}$', nt) is None:
tk.messagebox.showerror('错误', '手机号格式不正确')
else:
query = ("SELECT COUNT(*) FROM `user` WHERE username = '%s'" % (nun))
cnx = DbUtil.open_db()
cursor = cnx.cursor()
cursor.execute(query)
if cursor.fetchone()[0] != 0:
tk.messagebox.showerror('错误', '用户名已存在')
else:
query = ("INSERT INTO `user`(username, password, is_manager) VALUES ('%s', '%s', '%s')" % (nun, np, nm))
query1 = (
"INSERT INTO `userinfo`(username, password, `name`, gender, email, telephone) VALUES ('%s', '%s', '%s', '%s', '%s', '%s')" % (
nun, np, nn, ng, ne, nt))
try:
cursor.execute(query)
cursor.execute(query1)
cnx.commit()
DbUtil.close_db(cursor, cnx)
tk.messagebox.showinfo('欢迎', '注册成功')
# 注册成功关闭注册框
window_sign_up.destroy()
except:
print()
tk.messagebox.showinfo('错误', '注册失败')
cnx.rollback()
# 新建注册界面
window_sign_up = tk.Toplevel(window)
window_sign_up.geometry('350x400')
window_sign_up.title('注册')
# 用户名变量及标签、输入框
new_username = tk.StringVar()
tk.Label(window_sign_up, text='请输入用户名:').place(x=10, y=10)
tk.Entry(window_sign_up, textvariable=new_username).place(x=150, y=10)
# 密码变量及标签、输入框
new_pwd = tk.StringVar()
tk.Label(window_sign_up, text='请输入密码:').place(x=10, y=50)
tk.Entry(window_sign_up, textvariable=new_pwd, show='*').place(x=150, y=50)
# 重复密码变量及标签、输入框
new_pwd_confirm = tk.StringVar()
tk.Label(window_sign_up, text='请再次输入密码:').place(x=10, y=90)
tk.Entry(window_sign_up, textvariable=new_pwd_confirm, show='*').place(x=150, y=90)
# 真实姓名变量及标签、输入框
new_name = tk.StringVar()
tk.Label(window_sign_up, text='请输入真实姓名:').place(x=10, y=130)
tk.Entry(window_sign_up, textvariable=new_name).place(x=150, y=130)
# 性别变量及标签、输入框
new_gender = tk.StringVar()
new_gender.set('男')
tk.Label(window_sign_up, text='请输入性别:').place(x=10, y=170)
tk.Radiobutton(window_sign_up, text='男', variable=new_gender, value='男').place(x=150, y=170)
tk.Radiobutton(window_sign_up, text='女', variable=new_gender, value='女').place(x=220, y=170)
# 邮箱变量及标签、输入框
new_email = tk.StringVar()
tk.Label(window_sign_up, text='请输入邮箱:').place(x=10, y=210)
tk.Entry(window_sign_up, textvariable=new_email).place(x=150, y=210)
# 电话变量及标签、输入框
new_telephone = tk.StringVar()
tk.Label(window_sign_up, text='请输入电话:').place(x=10, y=250)
tk.Entry(window_sign_up, textvariable=new_telephone).place(x=150, y=250)
# 是否为管理员变量及标签、输入框
new_manager = tk.StringVar()
new_manager.set(0)
tk.Label(window_sign_up, text='是否注册为管理员:').place(x=10, y=290)
tk.Radiobutton(window_sign_up, text='否', variable=new_manager, value=0).place(x=150, y=290)
tk.Radiobutton(window_sign_up, text='是', variable=new_manager, value=1).place(x=220, y=290)
# 确认注册按钮及位置
bt_confirm_sign_up = tk.Button(window_sign_up, text='确认注册', command=signtowcg)
bt_confirm_sign_up.place(x=150, y=340)
# 退出的函数
def usr_quit(window):
window.destroy()
if __name__ == "__main__":
init()
``` |
{
"source": "JiangYeap/ninedld",
"score": 3
} |
#### File: JiangYeap/ninedld/ninedld.py
```python
from __future__ import print_function
import re
from lxml.html import fromstring
from sdld import SimpleDownloader
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class NineDownloader(SimpleDownloader):
DL_URL_REGEX = re.compile('file: \'(.*?)\'.*?label: \'(\d*?) P\'')
def __init__(self, ep_url, quality, file_path):
super(NineDownloader, self).__init__(None, file_path)
self.url = self._get_url(ep_url, quality)
def start(self):
super(NineDownloader, self).start()
def _get_url(self, ep_url, quality):
ep_str = self.scraper.get(ep_url).content.decode('utf-8')
ep_page = fromstring(ep_str)
iframe_url = ep_page.xpath('//iframe')[0].xpath('./@src')[0].strip()
iframe_url = urljoin(ep_url, iframe_url)
iframe_str = self.scraper.get(iframe_url).content.decode('utf-8')
for dl_url, dl_quality in self.DL_URL_REGEX.findall(iframe_str):
if dl_quality == quality:
return dl_url
print('Quality not found for %s. URL: %s.' % (self.file_name, ep_url))
raise ValueError
``` |
{
"source": "JiangYee/key_phrase_extract",
"score": 3
} |
#### File: key_phrase_extract/static_count/preprocess.py
```python
import json
import nltk
from nltk.tokenize import WordPunctTokenizer
import pickle
import pandas as pd
import numpy as np
import re
def load_json(path):
with open(path, 'r', encoding='utf8') as fin:
json_line = fin.readline() # just one line
json_obj = json.loads(json_line)
return json_obj
def get_info(json_obj):
abstract_list =[]
keyword_list =[]
title_list =[]
for one in json_obj:
keyword = one['keyword'].lower().split(';')
if len(keyword) > 10: # 去除关键词个数大于10的数据
continue
abstract_temp = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9-°]', ' ', one['abstract']).lower() # 去除标点 不包含 '-'
abstract = (re.sub(r'\s{2,}', ' ', abstract_temp)).strip() # 消除上述步骤产生的空格
title = one['title'].lower()
abstract_list.append(abstract)
keyword_list.append(keyword)
title_list.append(title)
return abstract_list, keyword_list, title_list
def get_keywords(json_obj):
keyword_list =[]
keyword_len_list = []
for one in json_obj:
keyword = one['keyword'].lower().split(';')
keyword_len_list.append(len(keyword))
keyword_list.append(keyword)
return keyword_list, keyword_len_list
# stemming for a string, use str.split(' ') 使用空格分词 (not remove stopwords)
# return: string
def stemming_str(str):
# str = re.sub(r'[’!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]+]', '', str) #去掉标点
# str = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9~!@#$%^&*()_+<>?:,./;’,。、‘:“《》?~!@#¥%……()]', ' ', str) #去掉标点
# str = re.sub(r'[,.:;?()[]&!*@#$%]', '', str) # 去除标点 不包含 '-'
stemmer = nltk.stem.PorterStemmer()
str_stem = ''
for term in str.split(' '):
if '-' in term: # 将类似 'soft-in-soft-out' 词组处理为一个keyword
term_stem = ''
for sub_term in term.split('-'):
term_stem = term_stem + '-' + stemmer.stem(sub_term)
term_stem = term_stem.strip('-')
else:
term_stem = stemmer.stem(term)
str_stem = str_stem + ' ' + term_stem
return str_stem.strip()
# stemming for a string, use str.split(' ')使用空格分词 (not remove stopwords)
# return: list
def stemming_list(str):
stem_list = []
stemmer = nltk.stem.PorterStemmer()
# words = str.split(' ')
# for word in words:
# word_stem = stemmer.stem(word)
# stem_list.append(word_stem)
for term in str.split(' '):
if '-' in term: # 将类似 'soft-in-soft-out' 词组处理为一个keyword
term_stem = ''
for sub_term in term.split('-'):
term_stem = term_stem + '-' + stemmer.stem(sub_term)
term_stem = term_stem.strip('-')
else:
term_stem = stemmer.stem(term)
stem_list.append(term_stem)
return stem_list
# 对所有的keyword stemming
def stemming_all_keyword_list(keyword_lists):
stemming_results = []
for keyword_list in keyword_lists:
stemming_results.append([stemming_str(keyword) for keyword in keyword_list])
print(stemming_results)
return stemming_results
# # stemming for a string, use tokenizer() (not remove stopwords)
# # return: list
# def stemming_tokenizer(str):
# stem_list = []
# stemmer = nltk.stem.PorterStemmer()
# tokenzer = WordPunctTokenizer()
# words = tokenzer.tokenize(str)
# # english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
# for word in words:
# # if word in english_punctuations: # 去除标点 不包含 '-'
# # continue
# word_stem = stemmer.stem(word)
# stem_list.append(word_stem)
# return stem_list
# pickle读取数据
def read(data_path):
fr = open(data_path,'rb')
data = pickle.load(fr)
fr.close()
return data
# pickle存储数据
def save(data, save_path):
# fp = open(file=save_dir, mode='w', encoding='utf-8')
fw = open(save_path, 'wb')
pickle.dump(data, fw)
fw.close()
# 读取存放在excel中的in or not数据(作图)
def read_excel_count(data_path):
df = pd.read_excel(data_path, usecols=[1, 2], header=0)
in_data = df.get('in')
out_data = df.get('out')
return [in_data, out_data]
if __name__ == '__main__':
in_out_dir = './resulte_data/count.xlsx'
in_out_data = read_excel_count(in_out_dir)
in_data = np.array(in_out_data[0])
out_data = np.array(in_out_data[1])
total_data = in_data + out_data
in_persents = []
for i in range(len(in_data)):
in_num = in_data[i]
total = total_data[i]
in_persent = in_num / total
in_persents.append(in_persent)
print(in_persents)
print(in_out_data)
``` |
{
"source": "jiangyi15/tf-pwa",
"score": 2
} |
#### File: tf_pwa/config_loader/decay_config.py
```python
import copy
import functools
import random
import yaml
from tf_pwa.amp import (
DecayChain,
DecayGroup,
HelicityDecay,
get_decay,
get_particle,
split_particle_type,
)
from .base_config import BaseConfig
def set_min_max(dic, name, name_min, name_max):
if name not in dic and name_min in dic and name_max in dic:
dic[name] = (
random.random() * (dic[name_max] - dic[name_min]) + dic[name_min]
)
def decay_chain_cut_ls(decay):
for i in decay:
if isinstance(i, HelicityDecay):
if len(i.get_ls_list()) == 0:
return False, f"{i} ls not aviable {i.get_ls_list()}"
return True, ""
def decay_chain_cut_mass(decay):
for i in decay:
if isinstance(i, HelicityDecay):
if i.core.mass is None or any([j.mass is None for j in i.outs]):
continue
# print(i, i.core.mass, [j.mass for j in i.outs])
if i.core.mass < sum([j.mass for j in i.outs]):
return (
False,
f"{i} mass break {i.core.mass} < {[j.mass for j in i.outs]}",
)
return True, ""
class DecayConfig(BaseConfig):
decay_chain_cut_list = {
"ls_cut": decay_chain_cut_ls,
"mass_cut": decay_chain_cut_mass,
}
def __init__(self, dic, share_dict={}):
self.config = dic
self.decay_chain_config = dic.get("decay_chain", {})
self.share_dict = share_dict
self.particle_key_map = {
"Par": "P",
"m0": "mass",
"g0": "width",
"J": "J",
"P": "P",
"spins": "spins",
"bw": "model",
"model": "model",
"bw_l": "bw_l",
"running_width": "running_width",
}
self.cut_list = self.config["data"].get("decay_chain_cut", ["ls_cut"])
self.decay_key_map = {"model": "model"}
self.dec = self.decay_item(self.config["decay"])
(
self.particle_map,
self.particle_property,
self.top,
self.finals,
) = self.particle_item(self.config["particle"], share_dict)
self.full_decay = DecayGroup(
self.get_decay_struct(
self.dec,
self.particle_map,
self.particle_property,
self.top,
self.finals,
self.decay_chain_config,
)
)
if self.config["data"].get("cp_trans", True):
self.disable_allow_cc(self.full_decay)
self.decay_struct = DecayGroup(
self.get_decay_struct(self.dec, process_cut=False)
)
identical_particles = self.config["data"].get(
"identical_particles", None
)
if identical_particles is not None:
self.decay_struct.identical_particles = identical_particles
self.full_decay.identical_particles = identical_particles
@staticmethod
def load_config(file_name, share_dict={}):
if isinstance(file_name, dict):
return copy.deepcopy(file_name)
if isinstance(file_name, str):
if file_name in share_dict:
return DecayConfig.load_config(share_dict[file_name])
with open(file_name) as f:
ret = yaml.safe_load(f)
if ret is None:
ret = {}
return ret
raise TypeError("not support config {}".format(type(file_name)))
def get_decay(self, full=True):
if full:
return self.full_decay
else:
return self.decay_struct
@staticmethod
def _list2decay(core, outs):
parts = []
params = {}
for j in outs:
if isinstance(j, dict):
for k, v in j.items():
params[k] = v
else:
parts.append(j)
dec = {"core": core, "outs": parts, "params": params}
return dec
@staticmethod
def decay_item(decay_dict):
decs = []
for core, outs in decay_dict.items():
is_list = [isinstance(i, list) for i in outs]
if all(is_list):
for i in outs:
dec = DecayConfig._list2decay(core, i)
decs.append(dec)
else:
dec = DecayConfig._list2decay(core, outs)
decs.append(dec)
return decs
@staticmethod
def _do_include_dict(d, o, share_dict={}):
s = DecayConfig.load_config(o, share_dict)
for i in s:
if i in d:
if isinstance(d[i], dict):
s[i].update(d[i])
d[i] = s[i]
else:
d[i] = s[i]
@staticmethod
def particle_item_list(particle_list):
particle_map = {}
particle_property = {}
for particle, candidate in particle_list.items():
if isinstance(candidate, list): # particle map
if len(candidate) == 0:
particle_map[particle] = []
for i in candidate:
if isinstance(i, str):
particle_map[particle] = particle_map.get(
particle, []
) + [i]
elif isinstance(i, dict):
map_i, pro_i = DecayConfig.particle_item_list(i)
for k, v in map_i.items():
particle_map[k] = particle_map.get(k, []) + v
particle_property.update(pro_i)
else:
raise ValueError(
"value of particle map {} is {}".format(i, type(i))
)
elif isinstance(candidate, dict):
particle_property[particle] = candidate
else:
raise ValueError(
"value of particle {} is {}".format(
particle, type(candidate)
)
)
return particle_map, particle_property
@staticmethod
def particle_item(particle_list, share_dict={}):
top = particle_list.pop("$top", None)
finals = particle_list.pop("$finals", None)
includes = particle_list.pop("$include", None)
if includes:
if isinstance(includes, list):
for i in includes:
DecayConfig._do_include_dict(
particle_list, i, share_dict=share_dict
)
elif isinstance(includes, str):
DecayConfig._do_include_dict(
particle_list, includes, share_dict=share_dict
)
else:
raise ValueError(
"$include must be string or list of string not {}".format(
type(includes)
)
)
particle_map, particle_property = DecayConfig.particle_item_list(
particle_list
)
if isinstance(top, dict):
particle_property.update(top)
if isinstance(finals, dict):
particle_property.update(finals)
return particle_map, particle_property, top, finals
def rename_params(self, params, is_particle=True):
ret = {}
if is_particle:
key_map = self.particle_key_map
else:
key_map = self.decay_key_map
for k, v in params.items():
ret[key_map.get(k, k)] = v
return ret
def decay_chain_cut(self, decays):
ret = []
for i in decays:
flag = True
for name in self.cut_list:
f = DecayConfig.decay_chain_cut_list[name]
new_flag, msg = f(i)
flag = flag and new_flag
if not flag:
print(
"remove decay chain",
i,
"by",
name,
"\n\tbecause of",
msg,
)
break
if flag:
ret.append(i)
return ret
def get_decay_struct(
self,
decay,
particle_map=None,
particle_params=None,
top=None,
finals=None,
chain_params={},
process_cut=True,
):
""" get decay structure for decay dict"""
particle_map = particle_map if particle_map is not None else {}
particle_params = (
particle_params if particle_params is not None else {}
)
particle_set = {}
def add_particle(name):
if name in particle_set:
return particle_set[name]
params = particle_params.get(name, {})
params = self.rename_params(params)
set_min_max(params, "mass", "m_min", "m_max")
set_min_max(params, "width", "g_min", "g_max")
part = get_particle(name, **params)
particle_set[name] = part
return part
def wrap_particle(name):
name_list = particle_map.get(name, [name])
return [add_particle(i) for i in name_list]
def all_combine(out):
if len(out) < 1:
yield []
else:
for i in out[0]:
for j in all_combine(out[1:]):
yield [i] + j
decs = []
for dec in decay:
core = wrap_particle(dec["core"])
outs = [wrap_particle(j) for j in dec["outs"]]
for i in core:
for j in all_combine(outs):
dec_i = get_decay(i, j, **dec["params"])
decs.append(dec_i)
top_tmp, finals_tmp = set(), set()
if top is None or finals is None:
top_tmp, res, finals_tmp = split_particle_type(decs)
if top is None:
top_tmp = list(top_tmp)
assert len(top_tmp) == 1, "not only one top particle"
top = list(top_tmp)[0]
else:
if isinstance(top, str):
top = particle_set[top]
elif isinstance(top, dict):
keys = list(top.keys())
assert len(keys) == 1
top = particle_set[keys.pop()]
else:
return particle_set[str(top)]
if finals is None:
finals = list(finals_tmp)
elif isinstance(finals, (list, dict)):
finals = [particle_set[i] for i in finals]
else:
raise TypeError("{}: {}".format(finals, type(finals)))
dec_chain = top.chain_decay()
ret = []
for i in dec_chain:
if sorted(DecayChain(i).outs) == sorted(finals):
all_params = chain_params.get("$all", {})
ret.append(DecayChain(i, **all_params))
if process_cut:
return self.decay_chain_cut(ret)
return ret
def disable_allow_cc(self, decay_group):
for decay_chain in decay_group:
for decay in decay_chain:
if hasattr(decay, "allow_cc"):
decay.allow_cc = False
``` |
{
"source": "jiangyifan123/EngLearner",
"score": 2
} |
#### File: EngLearner/Forum/views.py
```python
from django.shortcuts import render, redirect
from django.views.generic import View
from .models import *
from .forms import *
from django.http import JsonResponse
from users.models import UserProfile
import json
# Create your views here.
class forumView(View):
def get(self, request):
pages = Pages.objects.all()[:20]
return render(request, 'Forum/forum.html', {"pages": pages})
def post(self, request):
return render(request, 'Forum/forum.html')
class detailView(View):
def get(self, request, page_id):
try:
page = Pages.objects.get(id = page_id)
page.views += 1
count = len(page.likes_set.all())
commends = page.commends_set.all()[:20]
like_up = 0
like_down = 0
page.comment_num = commends.count()
page.save()
for like in page.likes_set.all():
if like.operation == 1:
like_up += 1
else:
like_down += 1
commend_like = {
"like_up": 0,
"like_down": 0,
}
return render(request, 'Forum/detail_page.html', {"page": page, "count": count, "commends": commends, "like_up": like_up, "like_down": like_down})
except Exception as e:
print(e.args)
return render(request, 'Forum/detail_page.html')
def post(self, request, page_id):
try:
page = Pages.objects.get(id = page_id)
username = request.POST.get('username')
content = request.POST.get('content')
commend = Commends()
commend.content = content
commend.username = username
commend.page = page
commend.save()
return JsonResponse({"status": "success"})
except Exception as e:
print(e.args)
return JsonResponse({"commend": "", "status": "Fail"})
class new_page_View(View):
def get(self, request):
return render(request, 'Forum/set_new_page.html')
def post(self, request):
new_page_form = New_page_form(request.POST)
if new_page_form.is_valid():
try:
title = request.POST.get('title')
area = request.POST.get('area')
page = Pages()
page.title = title
page.content = area
page.save()
return redirect('forum_index')
except Exception as e:
print(e.args)
else:
error = ""
print(new_page_form.errors.items())
for key, errors in new_page_form.errors.items():
if key == "title":
error += "标题: %s " % errors[0]
else:
error += "内容: %s" % errors[0]
return render(request, 'Forum/set_new_page.html', {'error': error})
def page_up_View(request):
if request.method == 'GET' or not request.user.is_authenticated:
return JsonResponse({'status': 'Fail'})
else:
try:
page_id = request.POST.get('page_id')
operation = request.POST.get('operation')
page = Pages.objects.get(id = page_id)
if page.likes_set.filter(user = request.user).exists():
like = page.likes_set.get(user = request.user)
print(operation == "0", like.operation)
if operation == "1" and like.operation == False:
like.operation = True
like.save()
elif operation == "0" and like.operation == True:
like.operation = False
like.save()
print(like.operation)
else:
return JsonResponse({'status': 'Fail'})
return JsonResponse({"status": "change"})
else:
like = likes()
like.user = request.user
like.page = page
if operation == "1" and like.operation == False:
like.operation = True
like.save()
elif operation == "0" and like.operation == True:
like.operation = False
like.save()
else:
return JsonResponse({'status': 'Fail'})
return JsonResponse({'status': 'success'})
except Exception as e:
print(e.args)
return JsonResponse({'status': 'Fail'})
```
#### File: EngLearner/mainsys/listen_spider.py
```python
import requests
from lxml import etree
from bs4 import BeautifulSoup
import os
import re
class LSpider:
def __init__(self):
self.url = 'https://toefl.kmf.com/listen/ets/order/'
def get_html(self, url):
try:
# print(url)
html = requests.get(url=url)
html.encoding = 'utf-8'
if html.status_code == 200:
# print(html.text)
return html.text
else:
return None
except Exception as e:
print(e.args)
def get_all(self, url):
html = self.get_html(url)
soup = BeautifulSoup(html, 'lxml')
div = soup.find('div', class_="origin-hidden-page original-cont")
all_eng = div.find_all('p', class_="sentence-content")
all_chinese = div.find_all('div', class_ = "translation-box")
all_time = div.select('div .orgin-content.js-orgin-content.original-en')
if len(all_time) != 0:
all_time = [x.get('data-time') for x in all_time]
else:
all_time = []
if len(all_chinese) != 0:
all_chinese = [x.text for x in all_chinese]
else:
all_chinese = []
if len(all_eng) != 0:
all_eng = [x.text for x in all_eng]
else:
all_eng = []
return (all_eng, all_chinese, all_time)
def get_url(self):
menu_url = []
for i in range(12):
url = self.url + '%d/0' % i
menu_url.append(url)
return menu_url
def get_listen_url(self, menu_url):
html = self.get_html(menu_url)
soup = BeautifulSoup(html, 'lxml')
urls = soup.select("a.listen-exam-link.button-style.js-listen-link")
texts = soup.select("a.practice-title.js-practice-title")
if len(urls) != 0:
urls = [(t.text, 'https://toefl.kmf.com' + x.get('href')) for t, x in zip(texts, urls)]
return urls
if __name__ == "__main__":
spider = LSpider()
# all_eng, all_chinese, all_time = spider.get_all(spider.url)
menu = spider.get_url()
urls = spider.get_listen_url(menu[0])
all_eng, all_chinese, all_time = spider.get_all(urls[0][1])
print(all_time)
```
#### File: EngLearner/mainsys/readSpider.py
```python
import requests
from lxml import etree
from bs4 import BeautifulSoup
import os
import re
class RSpider:
def __init__(self):
self.url = 'http://www.kekenet.com/Article/media/economist/'
def get_html(self, url):
try:
# print(url)
html = requests.get(url=url)
html.encoding = 'utf-8'
if html.status_code == 200:
# print(html.text)
return html.text
else:
return None
except Exception as e:
print(e.args)
def get_list_text(self, url):
html = self.get_html(url)
tree = etree.HTML(html)
xp = '//*[@id="menu-list"]/li'
ul = tree.xpath(xp)
ans = []
for li in ul:
li = li.xpath('h2/a[2]')[0]
url = li.get('href')
title = li.get('title')
ans.append({"url": url, "title": title})
return ans
def get_mp3(self, url):
url = re.sub('/\w+/', '/mp3/', url)
# print(url)
html = requests.get(url)
html.encoding = 'utf-8'
if html.status_code == 200:
soup = BeautifulSoup(html.text, 'lxml')
ans = soup.find_all('a', href = re.compile('http://k6.kekenet.com/Sound/'))
if len(ans) != 0:
return ans[0].get('href')
else:
return None
else:
return None
def get_text(self, url):
html = self.get_html(url)
tree = etree.HTML(html)
xp = '//*[@id="article_eng"]/div//text()'
div = tree.xpath(xp)
eng = ""
chinese = ""
if len(div) != 0:
for value in div:
if value == '' or value == '\n':
continue
if isChinese(value):
chinese += value + '\n'
else:
eng += value + '\n'
return (chinese, eng)
def get_all(self, url):
return (self.get_text(url), self.get_mp3(url))
def isChinese(s):
flag = 0
for i in s:
if '\u4e00' <= i <= '\u9fff':
flag = 1
break
return flag
if __name__ == "__main__":
spider = RSpider()
ans = spider.get_list_text(spider.url)
for i in ans:
print(spider.get_all(i['url'])[1])
```
#### File: EngLearner/myadmin/models.py
```python
from django.db import models
# Create your models here.
class Task(models.Model):
taskname = models.CharField(max_length = 30, null = True, blank = True, verbose_name = "任务名称")
taskpoints = models.IntegerField(null = True, blank = True, verbose_name = "任务积分")
class Meta:
verbose_name = "任务"
verbose_name_plural = verbose_name
def __str__(self):
return "<Task> taskname: %s, taskpoints: %d" % (self.taskname, self.taskpoints)
class product(models.Model):
product_name = models.CharField(max_length = 30, null = True, blank = True, verbose_name = "产品名", unique = True)
product_price = models.CharField(max_length = 20, null = True, blank = True, verbose_name = "产品价格")
product_description = models.CharField(max_length = 200, null = True, blank = True, verbose_name = "产品说明")
product_type = models.CharField(max_length = 20, null = True, blank = True, verbose_name = "语言考试类别")
product_style = models.CharField(max_length = 20, null = True, blank = True, verbose_name = "类别", default = "单词")
class Meta:
verbose_name = "产品"
verbose_name_plural = verbose_name
def __str__(self):
return "<product> name: %s, price: %s, desc: %s" % (self.product_name, self.product_price, self.product_description )
```
#### File: site-packages/django_notifications/admin.py
```python
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from models import Subscription, SubscriptionMap, \
Message
class SubscriptionMapInline(admin.StackedInline):
model = SubscriptionMap
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ('label', 'model_content_type', 'model_instance',
'action', 'date_subscribed', \
'active', 'notification_count_all', \
'notification_count_active')
list_filter = ('model_content_type',)
date_hierarchy = 'date_subscribed'
search_fields = ('subscriptionmap__message__label',)
inlines = [ SubscriptionMapInline ]
def model_instance(self, obj):
try:
content_type = ContentType.objects.get(model = obj.model_content_type)
model_instance = content_type.get_object_for_this_type(pk = \
obj.model_object_id)
# Not exactly sure, why "obj.model_content_type.get_object_for_this_type()"
# returns None here
return str(model_instance)
except ObjectDoesNotExist:
return str(obj.model_object_id)
def notification_count_all(self, obj):
return obj.subscriptionmap_set.count()
notification_count_all.short_description = 'Notification count (all)'
def notification_count_active(self, obj):
return obj.subscriptionmap_set.filter(active = True).count()
notification_count_active.short_description = 'Notification count (active)'
class MessageAdmin(admin.ModelAdmin):
model = Message
admin.site.register(Subscription, SubscriptionAdmin)
admin.site.register(Message, MessageAdmin)
```
#### File: django_notifications/backends/__init__.py
```python
import sys
from django.conf import settings
BACKEND_CLASSES = {
'email': 'django_notifications.backends.email.EmailBackend',
'xmpp': 'django_notifications.backends.xmpp.XMPPBackend',
'sms_mobitel': 'django_notifications.backends.sms_mobitel.SMSMobitelBackend',
'postmark': 'django_notifications.backends.postmark.PostmarkBackend',
}
def get_available_backends(configured_only = False):
"""
Returns a list of all the available backends.
If configured_only = True only those backends which are
properly configured are returned.
"""
available_backends = []
for key in BACKEND_CLASSES.keys():
module_name = get_module_and_class_name(BACKEND_CLASSES[key])[0]
class_instance = get_class_instance_by_key(key)
module = sys.modules[module_name]
try:
not_available = getattr(module, 'not_available')
except AttributeError:
not_available = None
is_configured = getattr(class_instance, 'is_configured', False)()
meta = getattr(class_instance, 'meta', None)
if not meta or (configured_only and not is_configured) \
or (configured_only and not_available):
continue
name = meta['NAME']
description = meta['DESCRIPTION']
available_backends.append((key, name, description))
return available_backends
def get_settings(backend_key, use_default = True):
"""
Returns all the settings for the provided backend key.
"""
notification_settings = getattr(settings, 'NOTIFICATIONS', None)
if not BACKEND_CLASSES.get(backend_key, None):
raise EnvironmentError('Invalid backend: %s' % (backend_key))
if not notification_settings:
raise EnvironmentError('NOTIFICATIONS was not found.')
# Default backend settings
if use_default:
module_name = get_module_and_class_name(BACKEND_CLASSES[backend_key])[0]
__import__(module_name)
module = sys.modules[module_name]
backend_settings = getattr(module, 'SETTINGS')['required']
else:
backend_settings = {}
try:
backend_settings.update(notification_settings[backend_key])
except KeyError:
pass
return backend_settings
# "Helper" methods
def get_module_and_class_name(class_path):
module_name = class_path[:class_path.rfind('.')]
class_name = class_path[class_path.rfind('.') + 1:]
return module_name, class_name
def get_class_instance_by_key(key):
try:
class_path = BACKEND_CLASSES[key]
except KeyError:
return None
module_name, class_name = get_module_and_class_name(class_path)
__import__(module_name)
module = sys.modules[module_name]
class_instance = getattr(module, class_name)()
return class_instance
```
#### File: django_notifications/backends/xmpp.py
```python
from __init__ import get_settings
from base import BaseBackend
import time
try:
from ..xmpp import *
except ImportError:
not_available = True
# Default settings
SETTINGS = {
'meta': {
'NAME': 'XMPP Backend',
'DESCRIPTION': 'Backend which sends notifications using XMPP (Jabber) protocol',
},
'required': {
'JID': '',
'PASSWORD': '',
'SERVER': 'talk.google.com',
'PORT': 5222,
},
'optional': {
}
}
class XMPPBackend(BaseBackend):
def __init__(self):
self.meta = SETTINGS['meta']
self.settings = get_settings('xmpp')
def is_configured(self):
return super(XMPPBackend, self).is_configured(self.settings, \
SETTINGS['required'].keys())
def send(self, recipient_jid, message):
client = self.__authenticate_and_get_client()
if not client:
return None
client.send(protocol.Message(recipient_jid, message))
time.sleep(1)
client.disconnect()
def __authenticate_and_get_client(self):
jid = protocol.JID(self.settings['JID'])
client = Client(jid.getDomain(), debug = [])
connection = client.connect(server = (self.settings['SERVER'], \
self.settings['PORT']))
if not connection:
return None
auth = client.auth(jid.getNode(), self.settings['PASSWORD'], \
resource = jid.getResource())
if not auth:
return None
return client
```
#### File: management/commands/color.py
```python
from django.core.management import color
from django.utils import termcolors
def color_style():
style = color.color_style()
style.BOLD = termcolors.make_style(opts = ('bold',))
style.GREEN = termcolors.make_style(fg = 'green', opts = ('bold',))
style.YELLOW = termcolors.make_style(fg = 'yellow')
style.BLUE = termcolors.make_style(fg = 'blue', opts = ('bold',))
style.RED = termcolors.make_style(fg = 'red')
return style
style = color_style()
``` |
{
"source": "jiangyigithub/demos",
"score": 2
} |
#### File: pointnet_training/models/pointnet_classifier_extra_skinny_and_short.py
```python
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import torch.autograd as grad
# removed the secound Linear layer and its batch_norm, and the remaning Linears are skinnier
class STN3d(nn.Module):
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 512, 1)
self.fc1 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 9)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(512)
self.bn4 = nn.BatchNorm1d(128)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 512) #x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 512, 1) #torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(512) #nn.BatchNorm1d(1024)
self.global_feat = global_feat
def forward(self, x):
batchsize = x.size()[0]
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans)
x = x.transpose(2,1)
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 512) #x.view(-1, 1024)
if self.global_feat:
return x, trans
else:
x = x.view(-1, 512, 1).repeat(1, 1, n_pts) # x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans
# removed the second linear layer and its batch_norm. and the remaining two are skinnier
class PointNetClassifier(nn.Module):
def __init__(self, k = 3):
super(PointNetClassifier, self).__init__()
self.num_classes = k
self.feat = PointNetfeat(global_feat=True)
self.fc1 = nn.Linear(512, 128)
self.bn1 = nn.BatchNorm1d(128)
self.fc3 = nn.Linear(128, k)
def forward(self, x):
x, trans = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = self.fc3(x)
return x, trans
def run_train(self, points, target, loss_fn, optimizer):
self.train()
points = grad.Variable(points).float().cuda()
target = grad.Variable(target).cuda()
# Zero out the gradients
optimizer.zero_grad()
# Forward pass
outp, _ = self(points)
# compute my error
error = loss_fn(outp, target)
# Backpropagate
error.backward()
# Update the weights
optimizer.step()
return error
def run_eval(self, points, target, loss_fn):
self.eval()
points = grad.Variable(points).float().cuda()
target = grad.Variable(target).cuda()
# Forward pass
outp, _ = self(points)
# compute error and acc
error = loss_fn(outp, target)
_, preds = torch.max(F.softmax(outp, dim=1), 1)
batch_acc = torch.sum(preds == target.data).item() / target.size()[0]
conf_matrix = np.zeros((self.num_classes, self.num_classes), dtype=np.int)
for i in range(self.num_classes):
for j in range(self.num_classes):
# predicted to be i
pred_i = preds == i
# is actually j
gt_j = target.data == j
for k in range(pred_i.size()[0]):
if pred_i[k].item() == 1 and gt_j[k].item() == 1:
conf_matrix[i, j] += 1
return error, batch_acc, conf_matrix
```
#### File: models/pointnet_training/training.py
```python
import argparse
import os
from neurAD.pytorch.examples.utils import save_params, get_timestamp
from NN import Optimus
import torch
from losses import loss
from datasets import PointCloudDataset
from torch.utils.data import DataLoader
from models import pointnet_classifier_git0, pointnet_classifier_git1, pointnet_classifier_skinny
from models import pointnet_classifier_extra_skinny, pointnet_classifier_extra_skinny_and_short
from models import pointnet_classifier_anorexic
def get_data(args):
# Instantiate a dataset loader
model_net_train = PointCloudDataset(args.kitti_data, args.stanford_data, args.carla_data, args.modelnet_data,
test=False, num_points=args.num_points, jitter=args.jitter,
stretch=args.stretch, slide=args.slide, dropout=args.dropout,
rotate=args.train_rotate)
model_net_val = PointCloudDataset(args.kitti_data, args.stanford_data, args.carla_data, args.modelnet_data,
test=True, num_points=args.num_points, jitter=0, stretch=(), slide=(),
dropout=0, rotate=args.test_rotate)
# The neurAD always need both the test and validation datasets
# To keep it simple the test and validation sets are the same
loaders = {}
loaders['train'] = torch.utils.data.DataLoader(model_net_train, batch_size=args.train_batch_size,
shuffle=True, num_workers=args.num_workers)
loaders['validation'] = torch.utils.data.DataLoader(model_net_val, batch_size=args.test_batch_size,
shuffle=True, num_workers=args.num_workers)
loaders['test'] = loaders['validation']
return loaders
def get_model(args):
dims = 3
num_classes = 4
if args.model == "git0":
print("git0 model")
model = pointnet_classifier_git0.PointNetClassifier(num_points=args.num_points, K=dims, num_classes=num_classes).train().cuda()
elif args.model == "git1":
print("git1 model")
model = pointnet_classifier_git1.PointNetClassifier(num_classes).train().cuda()
elif args.model == "skinny":
print("skinny model")
model = pointnet_classifier_skinny.PointNetClassifier(num_classes).train().cuda()
elif args.model == "extra_skinny":
print("extra skinny model")
model = pointnet_classifier_extra_skinny.PointNetClassifier(num_classes).train().cuda()
elif args.model == "extra_skinny_and_short":
print("extra skinny and short model")
model = pointnet_classifier_extra_skinny_and_short.PointNetClassifier(num_classes).train().cuda()
elif args.model == "anorexic":
print("anorexic model")
model = pointnet_classifier_anorexic.PointNetClassifier(num_classes).train().cuda()
return model
def get_loss(args):
if args.weighted_loss and args.reg_loss:
loss_fn = loss.Weighted_CrossEntropyLoss_with_Reg()
elif args.weighted_loss and not args.reg_loss:
loss_fn = loss.Weighted_CrossEntropyLoss()
elif not args.weighted_loss and args.reg_loss:
loss_fn = loss.CrossEntropyLoss_with_Reg()
elif not args.weighted_loss and not args.reg_loss:
loss_fn = loss.CrossEntropyLoss()
print(loss_fn)
return loss_fn
def train_new(args=None):
logdir = os.path.join(args.log_dir, args.run_name + get_timestamp())
if not os.path.isdir(logdir):
os.makedirs(logdir)
save_params(logdir, args)
# Create model based on
model = get_model(args)
# Get loaders
loaders = get_data(args)
# Loss
loss_fn = get_loss(args)
# Evaluation freqs
batch_num = len(loaders["train"])
evaluation_frequencies = {
"validation": max(int(batch_num / args.valid_freq), 1),
"save": max(int(batch_num / args.save_freq), 1),
"log": max(int(batch_num / args.log_freq), 1)
}
# Init trainer
deep = Optimus(model=model, data_loaders=loaders, log_dir=logdir, optimizer=args.optimizer,
learning_rate=args.learning_rate, loss_fn=loss_fn, eval_freq=evaluation_frequencies,
stopper_patience=args.stopper_patience, histogram_save_freq=None, multi_gpu=False, easy_mode=False)
# Train NN
deep.train(epoch=args.num_epochs)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train Pointnet')
parser.add_argument("--run_name", default="medium_augs_alldata_celoss_anorexic_pointnet_256pts")
# model
parser.add_argument("--model", default="anorexic")
# optimizer
parser.add_argument("--optimizer", default='Adam')
parser.add_argument("--learning_rate", type=float, default=0.001)
# datasets and dataloader
parser.add_argument('--kitti_data', default='/home/hsk2bp/datasets/lidar_data/kitti')
parser.add_argument('--stanford_data', default='/home/hsk2bp/datasets/stanford_pcds')
parser.add_argument('--carla_data', default='/home/hsk2bp/datasets/carla_and_modelnet40_pcds/Carla')
parser.add_argument('--modelnet_data', default='/home/hsk2bp/datasets/carla_and_modelnet40_pcds/ModelNet')
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument('--train_batch_size', default=32, type=int)
parser.add_argument("--test_batch_size", default=160, type=int)
parser.add_argument('--num_epochs', default=60, type=int)
parser.add_argument("--num_points", type=int, default=256)
parser.add_argument("--jitter", type=float, default=0.01)
parser.add_argument("--stretch", default=(0.85, 1.15))
parser.add_argument("--slide", default=(-0.1, 0.1))
parser.add_argument("--dropout", type=float, default=0.875)
parser.add_argument("--train_rotate", default=True)
parser.add_argument("--test_rotate", default=False)
# loss function
parser.add_argument("--reg_loss", default = False)
parser.add_argument("--weighted_loss", default=False)
parser.add_argument("--comment", default='')
# logs and saving
parser.add_argument("--log_dir", default='./new_logs')
parser.add_argument("--stopper_patience", type=int, default=900)
parser.add_argument("--log_freq", type=int, default=900)
parser.add_argument("--valid_freq", type=int, default=20)
parser.add_argument("--save_freq", type=int, default=20)
args = parser.parse_args()
train_new(args)
``` |
{
"source": "jiangyihan7/sayhello",
"score": 3
} |
#### File: jiangyihan7/sayhello/commands.py
```python
import click
from sayhello import app,db
from sayhello.models import Message
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database."""
if drop:
click.confirm('This operation will delete the database, do you want to continue?', abort=True)
db.drop_all()
click.echo('Drop tables.')
db.create_all()
click.echo('Initialized database.')
@app.cli.command()
@click.option('--count',default=20,help='Quantity of messages,default is 20.')
#生成虚拟留言
def forge(count):
"""Generate fake messages."""
from faker import Faker
db.drop_all()
db.create_all()
fake = Faker()
click.echo('Working...')
for i in range(count):
message = Message(name=fake.name(),body=fake.sentence(),timestamp=fake.date_time_this_year())
db.session.add(message)
db.session.commit()
click.echo('Create %d fake message.' % count)
``` |
{
"source": "jiangyongitcast/word_cet4",
"score": 3
} |
#### File: word_cet4/words/cet4.py
```python
import xlwt
# 1、创建工作本类似数据库
# 2、创建工作表
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('英语单词四级')
def write_xls():
with open('./res/cet4.text', encoding='utf8') as f:
dict_ = eval(f.read())
print(len(dict_))
length = len(dict_)
i = 0
for word in dict_:
# for i in range(length):
worksheet.write(i, 0, word)
worksheet.write(i, 2, dict_[word])
print(i)
i += 1
print(word)
# if i>=10:
# break
workbook.save('./cet4.xls')
# 2、词性分类
def classify():
with open('./res/cet4.text', encoding='utf8') as f:
dict_ = eval(f.read())
i = 0
for word in dict_:
chinese = dict_[word]
if 'v' in chinese:
print(i)
print(word)
print(dict_[word])
i += 1
if __name__ == '__main__':
classify()
```
#### File: word_cet4/words/findroot.py
```python
def find_word():
with open('./res/词根.txt', encoding='utf8') as f:
list1 = f.read().split('-------------')
print(len(list1))
new_list = list()
for word_area in list1[:10]:
word_list = word_area.split(' ')
i = 0
root_list = list()
for word in word_list:
if word.strip().encode('utf8').isalpha():
print(i)
print(word.strip())
root_list.append(word.strip())
i += 1
new_list.append(root_list)
# if not word.strip().isalpha():
# word_list.remove(word.strip())
# print(word.strip())
return new_list
def find_root(list1):
list1 = ['Abuse', 'Abduct', 'Aberrant']
i = 0
j = 2
root = list1[0][i:j].lower()
# 下面开始找最大词根,先找最小的词根,分两种情况
# 1、找到了
# 2、没找到
count = 0
for word in list1[1:]:
if root in word:
count +=1
if count== len(list1)-1:
print('找的词根')
else:
print('j-1')
# return root
if __name__ == '__main__':
print(find_word())
``` |
{
"source": "jiangyt2112/NetworkMonitor",
"score": 2
} |
#### File: src/agent/experiment.py
```python
import __init__
from func import ping_test
from libvirt_func import get_nic_netstats
from libvirt_func import get_vm_port_netstats
from my_ovs.bridge import get_port_netstats
from func import is_network_node
import time
import json
import commands
import psutil
def exe(cmd):
ret, result = commands.getstatusoutput(cmd)
if ret == 0:
return True, result
else:
return False, result
def get_vm_port(dev):
all_ = get_vm_port_netstats()
dev['perf'] = all_[dev['name']]
return dev
def get_nic_port(dev, nic_port_bandwidth):
dev['perf'] = nic_port_bandwidth[dev['name']]
return dev
def get_ovs_port(dev):
old_stat = get_port_netstats(dev['name'])
time.sleep(1)
new_stat = get_port_netstats(dev['name'])
dev['perf'] = {
"rx": {"packets": new_stat["rx"]['packets'] - old_stat["rx"]['packets'],
"bytes": new_stat["rx"]['bytes'] - old_stat["rx"]['bytes'],
"drop": new_stat["rx"]['drop'] - old_stat["rx"]['drop'],
"err": new_stat["rx"]['err'] - old_stat["rx"]['err']
},
"tx": {"packets": new_stat["tx"]['packets'] - old_stat["tx"]['packets'],
"bytes": new_stat["tx"]['bytes'] - old_stat["tx"]['bytes'],
"drop": new_stat["tx"]['drop'] - old_stat["tx"]['drop'],
"err": new_stat["tx"]['err'] - old_stat["tx"]['err']
}
}
return dev
def get_delay(dev):
cmd = "ip netns exec %s python test_delay.py %s" %(dev['name'], dev['addr'])
ret, info = exe(cmd)
dev['perf'] = None
if ret == False:
print "can't get delay info."
else:
info = info[1: -1].split(',')
try:
info[0] = int(info[0])
info[1] = float(info[1])
info[2] = float(info[2])
except Exception, e:
print "ERROR: " + info
print str(e)
else:
dev['perf'] = info
return dev
def experiment_once():
dev_list = None
nic_port_bandwidth = get_nic_netstats()
if is_network_node():
dev_list = [
{'type': 'vm_port', 'name': 'fa:16:3e:03:f1:60'},
{'type': 'nic', 'name': 'tape5abd65e-b4'},
{'type': 'nic', 'name': 'qvbe5abd65e-b4'},
{'type': 'nic', 'name': 'qvoe5abd65e-b4'},
{'type': 'ovs', 'name': 'qvoe5abd65e-b4'},
{'type': 'ovs', 'name': 'patch-tun'},
{'type': 'ovs', 'name': 'patch-int'},
{'type': 'ovs', 'name': 'vxlan-c0a89062'},
{'type': 'nic', 'name': 'vxlan_sys_4789'},
{'type': 'nic', 'name': 'ens4'},
{'type': 'delay', 'name': 'qdhcp-44609914-f133-4f66-bc6e-e16ecce7beec', 'addr': '192.168.1.3'}
]
else:
dev_list = [
{'type': 'nic', 'name': 'ens4'},
{'type': 'nic', 'name': 'vxlan_sys_4789'},
{'type': 'ovs', 'name': 'vxlan-c0a89009'},
{'type': 'ovs', 'name': 'patch-int'},
{'type': 'ovs', 'name': 'patch-tun'},
{'type': 'ovs', 'name': 'qvoaddfe2a4-7c'},
{'type': 'nic', 'name': 'qvoaddfe2a4-7c'},
{'type': 'nic', 'name': 'qvbaddfe2a4-7c'},
{'type': 'nic', 'name': 'tapaddfe2a4-7c'},
{'type': 'vm_port', 'name': 'fa:16:3e:7b:e3:9d'}
]
result = []
for dev in dev_list:
if dev['type'] == 'vm_port':
ret = get_vm_port(dev)
elif dev['type'] == 'nic':
ret = get_nic_port(dev, nic_port_bandwidth)
elif dev['type'] == 'ovs':
ret = get_ovs_port(dev)
elif dev['type'] == 'delay':
ret = get_delay(dev)
else:
print "error dev type: %s" %(dev['type'])
continue
result.append(ret)
return result
def add_once(result_list, once):
for i in range(len(once)):
if once[i]['type'] != 'delay':
result_list[i]['perf']['rx']['packets'] += once[i]['perf']['rx']['packets']
result_list[i]['perf']['rx']['bytes'] += once[i]['perf']['rx']['bytes']
result_list[i]['perf']['rx']['drop'] += once[i]['perf']['rx']['drop']
result_list[i]['perf']['rx']['err'] += once[i]['perf']['rx']['err']
result_list[i]['perf']['tx']['packets'] += once[i]['perf']['tx']['packets']
result_list[i]['perf']['tx']['bytes'] += once[i]['perf']['tx']['bytes']
result_list[i]['perf']['tx']['drop'] += once[i]['perf']['tx']['drop']
result_list[i]['perf']['tx']['err'] += once[i]['perf']['tx']['err']
else:
# percent lost packages, max round trip time, avrg round trip
result_list[i]['perf'][0] += once[i]['perf'][0]
result_list[i]['perf'][1] += once[i]['perf'][1]
result_list[i]['perf'][2] += once[i]['perf'][2]
def avg_result(result_list, times):
for i in result_list:
if i['type'] != 'delay':
i['perf']['rx']['packets'] /= times
i['perf']['rx']['bytes'] /= times
i['perf']['rx']['drop'] /= times
i['perf']['rx']['err'] /= times
i['perf']['tx']['packets'] /= times
i['perf']['tx']['bytes'] /= times
i['perf']['tx']['drop'] /= times
i['perf']['tx']['err'] /= times
else:
# percent lost packages, max round trip time, avrg round trip
i['perf'][0] /= times
i['perf'][1] /= float(times)
i['perf'][2] /= float(times)
def memory_usage():
phymem = psutil.virtual_memory()
line = "Memory: %5s%% %6s/%s" %(
phymem.percent,
str(int(phymem.used/1024/1024))+"M",
str(int(phymem.total/1024/1024))+"M"
)
return line
def getProcess(pName):
all_pids = psutil.pids()
process = None
for pid in all_pids:
p = psutil.Process(pid)
if (p.name() == pName):
process = p
return process
def experiment(bond, times = 30):
print "all iteration:%d" %(times)
start = time.time()
ovs = getProcess("ovs-vswitchd")
qemu = getProcess("qemu-kvm")
psutil.cpu_percent(None)
ovs.cpu_percent(None)
qemu.cpu_percent(None)
name = bond + ".txt"
fp = open(name, 'w')
fp.write("iperf test: occur %s times.\n" %(times))
result_list = []
once = experiment_once()
for i in once:
result_list.append(i)
for i in range(times - 1):
if (i + 1) % 10 == 0:
print "%d iteration" %(i + 1)
once = experiment_once()
add_once(result_list, once)
avg_result(result_list, times)
stop = time.time()
cup_per = psutil.cpu_percent(None)
mem_usg = memory_usage()
ovs_usg = "%d%%(%dM)" %(ovs.memory_percent(), ovs.memory_info().rss / 1024 / 1024)
qemu_usg = "%d%%(%dM)" %(qemu.memory_percent(), qemu.memory_info().rss / 1024 / 1024)
ovs_cpu = ovs.cpu_percent(None)
qemu_cpu = qemu.cpu_percent(None)
last = stop - start
fp.write("start:%s\t" %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(start))))
fp.write("stop:%s\t" %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(stop))))
fp.write("last:%d:%d:%d\n" %(last / 3600, (last % 3600) / 60, last % 60))
fp.write("CPU: %d%%\t%s\tOVS CPU: %d%%\t OVS Memory: %s\t QEMU CPU: %d%%\tQEMU Memory: %s\n"
%(cup_per, mem_usg, ovs_cpu, ovs_usg, qemu_cpu, qemu_usg))
for result in result_list:
result_str = "%-8s%-20s%-s\n" %(result['type'], result['name'][:20], json.dumps(result['perf']))
fp.write(result_str)
fp.close()
import sys
if __name__ == '__main__':
if len(sys.argv) == 1:
experiment("1M", 60)
elif len(sys.argv) == 2:
experiment(sys.argv[1], 60)
else:
experiment(sys.argv[1], int(sys.argv[2]))
#resource_usage()
#getProcess("ovs-vswitchd")
# iperf -f m -i 1 -p 5001 -u -b 1M -c -t 100 (-d)
# iperf -f m -i 1 -p 5001 -u -s
```
#### File: src/api/func.py
```python
import __init__
from comm.client import api_to_server_msg
from utils.conf import CONF
from openstackapi.auth import check_auth as os_check_auth
from comm.client import api_to_server_msg
from database.manager import Manager
def check_auth(msg):
auth_url = CONF.openstack_conf['auth_url']
return os_check_auth(auth_url, msg['token'], msg['project_name'])
def check_msg(msg):
response = {
'task_type': 'check',
'exe_result': False,
'req_id': msg['req_id'],
'project': msg['project_name'],
'result': None,
'error_msg': None
}
# check auth
if check_auth(msg) == False:
response['error_msg'] = 'no authorization'
return response
# call database api to create a check task entry
db_manager = Manager()
ret, result = db_manager.create_task(msg['project_name'], msg['req_id'])
if ret == False:
response['error_msg'] = result
return response
# dispatch task to back end
task = {'type': 'check', 'req_id': msg['req_id'], 'project': msg['project_name'], 'token': msg['token']}
ret, result = api_to_server_msg(task)
if ret == False:
response['error_msg'] = result
else:
response['exe_result'] = True
response['result'] = 'run check task'
return response
def get_status(msg):
response = {
'task_type': 'get_status',
'exe_result': False,
'req_id': msg['req_id'],
'project': msg['project_name'],
'result': None,
'error_msg': None
}
# check authority
if check_auth(msg) == False:
response['error_msg'] = 'no authorization'
return response
# call database api to obtain check task status
db_manager = Manager()
ret, result = db_manager.get_status(msg['project_name'], msg['req_id'])
if ret == False:
response['error_msg'] = result
else:
response['exe_result'] = True
response['result'] = result
return response
def get_result(msg):
response = {
'task_type': 'get_result',
'exe_result': False,
'req_id': msg['req_id'],
'project': msg['project_name'],
'result': None,
'error_msg': None
}
# check authority
if check_auth(msg) == False:
response['error_msg'] = 'no authorization'
return response
db_manager = Manager()
ret, result = db_manager.get_result(msg['project_name'], msg['req_id'])
if ret == False:
response['error_msg'] = result
else:
response['exe_result'] = True
response['result'] = result
return response
def get_history(msg):
response = {
'task_type': 'get_history',
'exe_result': False,
'req_id': msg['req_id'],
'project': msg['project_name'],
'result': None,
'error_msg': None
}
# check authority
if check_auth(msg) == False:
response['error_msg'] = 'no authorization'
return response
# call database api to obtain project check task history
db_manager = Manager()
ret, result = db_manager.get_history(msg['project_name'], msg['req_id'])
if ret == False:
response['error_msg'] = result
else:
response['exe_result'] = True
response['result'] = result
return response
```
#### File: src/api/start_api.py
```python
import __init__
from api_server import main
import sys
import os
import signal
from database import manager as dm
# from rpc import rpc_check
from utils.log import SELOG
#def test(signum, frame):
# f = open("test.txt", "a")
# f.writelines("receive:%s\n" %signum)
# #print("receive:", signum)
# f.close()
# sys.exit(0)
def start():
print "start service"
pid = os.getpid()
#print "pid:%d" %pid
#print "__file__"
#print __file__
#print os.path.split(os.path.realpath(__file__))
path = os.path.split(os.path.realpath(__file__))[0]
f = os.path.join(path, "api.pid")
fp = open(f, "w")
fp.writelines(str(pid))
fp.close()
print "pid in " + f
#os.system("/root/pre-online/pre_online/pre_online_api/api_server.py &")
main()
def check():
SELOG.info("----------------------------------------------------------------------")
SELOG.info("[api] check [start]")
db = dm.Manager()
db.api_check()
# rpc_check.start_check()
SELOG.info("[api] check [end]")
if __name__ == "__main__":
check()
start()
```
#### File: src/comm/server.py
```python
import __init__
from utils.log import FALOG
import time
import pika
import sys
import json
class Server(object):
def __init__(self, exchange, binding_keys, exchange_type, username = 'network_monitor', passwd = '<PASSWORD>', vhost = 'network_monitor',
host = '192.168.122.1', port = 5672):
self.exchange_type = exchange_type
credentials = pika.PlainCredentials(username, passwd)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host, port, vhost, credentials))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange= exchange,
exchange_type= exchange_type)
result = self.channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
if exchange_type == "topic":
for binding_key in binding_keys:
self.channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=binding_key)
elif exchange_type == "fanout":
self.channel.queue_bind(exchange = exchange, queue = queue_name)
else:
FALOG.error("exchange type error.")
sys.exit(-1)
print(' [*] Waiting for logs. To exit press CTRL+C')
#self.callback()
if exchange_type == "topic":
self.channel.basic_qos(prefetch_count = 1)
self.channel.basic_consume(on_message_callback = self.callback,
queue=queue_name)
else:
self.channel.basic_consume(on_message_callback = self.callback, queue = queue_name, auto_ack = False)
def run(self):
try:
self.channel.start_consuming()
except Exception, e:
print str(e) + "aaaa"
FALOG.error("network-monitor service down:%s" %(str(e)))
#sys.exit(1)
def callback(self, ch, method, props, body):
"""
body:message
properties:prop.reply_to
"""
# fanout type process
if self.exchange_type == "fanout":
pass
return
# topic type process
msg_type = method.routing_key.split(".")
if len(msg_type) < 3:
FALOG.error("receive msg routing_key with wrong format.")
if msg_type[0] == "api_to_server":
# other process
if msg_type[1] == "rpc":
response = {
"task_type": "start_re",
"exe_res": True,
"error_msg": "",
}
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body = json.dumps(response))
elif msg_type[1] == "msg":
print "receive msg"
else:
FALOG.error("receive msg routing_key with wrong format[part 2].")
elif msg_type[0] == "agent_to_server":
if msg_type[1] == "rpc":
response = {
"task_type": "end_re",
"exe_res": True,
"error_msg": "",
}
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=json.dumps(response))
elif msg_type[1] == "msg":
print "receive msg"
else:
FALOG.error("receive msg routing_key with wrong format[part 2].")
# other process
else:
FALOG.error("receive msg routing_key with wrong format[part 1].")
ch.basic_ack(delivery_tag = method.delivery_tag)
print(" [x] %r:%r" % (method.routing_key, body))
class T(Server):
def __init__(self, exchange, binding_keys):
super(T, self).__init__(exchange, binding_keys)
#def callback(self):
# print "aaaaa"
# override callback
if __name__ == "__main__":
server = Server("server", ["api_to_server.*", "agent_to_server.*"], 'topic')
server.run()
#t = T("top", ["bbb"])
```
#### File: src/database/manager.py
```python
import __init__
import connection
from utils.conf import CONF
import sys
from utils.log import DBLOG
from utils.log import SELOG
from utils.ftime import format_time
import time
import json
import datetime
import MySQLdb
class Manager(object):
def __init__(self, conf = CONF):
"""
conf:the conf object that have database connection params
"""
self.db_conf = CONF.db_conf
def any_operation(self):
conn = connection.Connection(conf_dict = self.db_conf)
sql = "show tables;"
conn.execute(sql)
#res = conn.fetchone()
#print res
# remember the ";"!!!!
res = conn.fetchall()
print res
conn.commit()
conn.close()
def exist_item(self, project_name, req_id):
conn = connection.Connection(conf_dict = self.db_conf)
sql = "select 1 from task where project = '%s' limit 1;" %project_name
ret = None
try:
conn.execute(sql)
ret = conn.fetchone()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.exist_item - project-%s req-%s query item exist fail:%s" %(project_name, req_id, str(e)))
raise e
result = None
if ret == None:
result = False
else:
result = True
conn.close()
DBLOG.info("database.exist_item - project-%s req-%s query item exist:%s" %(project_name, req_id, str(result)))
return result
def create_task(self, project_name, req_id):
receive_time = format_time(time.time())
update_sql = ("update task set req_id = '%s', status = 'RECEIVED', receive_time = '%s',"
"start_time = NULL, stop_time = NULL, network_info = NULL, vm_info = NULL, vm_num = NULL,"
"receive_vm_num = 0, network_num = NULL, receive_network_num = 0, result = NULL"
" where project = '%s';") %(req_id, receive_time, project_name)
insert_sql = ("insert into task set project = '%s', req_id = '%s', status = 'RECEIVED',"
"receive_time= '%s', start_time = NULL, stop_time = NULL, network_info = NULL,"
"vm_info = NULL, vm_num = NULL, receive_vm_num = 0, network_num = NULL, receive_network_num = 0,"
"result = NULL;") %(project_name, req_id, receive_time)
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
except MySQLdb.Error, e:
DBLOG.error("database.create_task - project-%s req-%s task_start fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
try:
if self.exist_item(project_name, req_id):
# update
ret, status = self.get_status(project_name, req_id)
if ret == False:
conn.close()
DBLOG.error("database.create_task - project-%s req-%s create start fail:%s" %(project_name, req_id, status))
return False, status
else:
if status != "END" and status != "EXPIRED":
conn.close()
DBLOG.error("database.create_task - project-%s req-%s create start fail:\
the project status is %s" %(project_name, req_id, status))
return False, "task create fail, the project task is running."
conn.execute(update_sql)
else:
# insert
conn.execute(insert_sql)
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.create_task - project-%s req-%s create task fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
else:
conn.commit()
conn.close()
# DBLOG.info("vm-" + vm_id + " " + "req-" + req_id + " " + "task_start:" + str_time)
DBLOG.info("database.create_task - project-%s req-%s create task:%s" %(project_name, req_id, receive_time))
return True, None
def get_status(self, project_name, req_id):
status = None
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
except MySQLdb.Error, e:
DBLOG.error("databtase.get_status - project-%s req-%s get status fail: %s" %(project_name, req_id, str(e)))
return False, str(e)
# query task status for project
sql = "select status from task where project = '%s';" %(project_name)
ret = None
try:
conn.execute(sql)
ret = conn.fetchone()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("databtase.get_status - project-%s req-%s get status fail: %s" %(project_name, req_id, str(e)))
return False, str(e)
conn.close()
state = False
if ret != None:
if ret[0] == "RECEIVED" or ret[0] == "START" or ret[0] == "RUNNING" or ret[0] == "END" or ret[0] == "EXPIRED":
DBLOG.info("databtase.get_status - project-%s req-%s status: %s" %(project_name, req_id, ret[0]))
state = True
result = ret[0]
else:
DBLOG.error("databtase.get_status - project-%s req-%s unkown status: %s" %(project_name, req_id, ret[0]))
result = "unkown status:" + ret[0]
else:
DBLOG.info("database.get_status - project--%s req-%s status: there is no the task" %(project_name, req_id))
result = "no task"
return state, result
def get_result(self, project_name, req_id):
ret, status = self.get_status(project_name, req_id)
if ret == False:
DBLOG.error("databtase.get_result - project-%s req-%s get result fail:%s" %(project_name, req_id, status))
return ret, status
if status != "END" and status != "EXPIRED":
DBLOG.error("databtase.get_result - project-%s req-%s get result fail:task not end(%s)" %(project_name, req_id, status))
return False, "task not end:" + status
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
except MySQLdb.Error, e:
DBLOG.error("databtase.get_result - project-%s req-%s get status fail: %s" %(project_name, req_id, str(e)))
return False, str(e)
# query task status for project
sql = "select result from task where project = '%s';" %(project_name)
ret = None
try:
conn.execute(sql)
ret = conn.fetchone()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("databtase.get_result - project-%s req-%s get result fail: %s" %(project_name, req_id, str(e)))
return False, str(e)
conn.close()
DBLOG.info("database.get_result - vm-%s req-%s result:%s" %(project_name, req_id, ret[0]))
return True, ret[0]
def get_history(self, project_name, req_id):
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
except MySQLdb.Error, e:
DBLOG.error("database.get_history - project-%s req-%s get result fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
sql = "select * from history where project = '%s' order by id desc;" %(project_name)
try:
conn.execute(sql)
all_res = conn.fetchall()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.get_history - project-%s req-%s get result fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
conn.close()
history_num = len(all_res)
# vm item column index map
history_index_map = {
'id': 0,
'project': 1,
'req_id': 2,
'status': 3,
'receive_time': 4,
'start_time': 5,
'stop_time': 6,
'network_info': 7,
'vm_info': 8,
'result': 9
}
history_result = {
'history_num': history_num,
'history_info': []
}
for i in range(history_num):
res = all_res[i]
result = {"index": i, "result": res[history_index_map['result']]}
history_result['history_info'].append(result)
DBLOG.info("database.get_history - project-%s req-%s get result success:%d items"
%(project_name, req_id, history_result['history_num']))
return True, history_result
def start_task(self, project_name, req_id, start_time, network_info, vm_info, network_num, vm_num):
# start_time = format_time(time.time())
update_sql = ("update task set status = 'START', start_time = '%s',"
"network_info = '%s', vm_info = '%s', vm_num = %d, network_num = %d "
"where project = '%s';") %(start_time, network_info, vm_info, vm_num, network_num, project_name)
#print update_sql
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
conn.execute(update_sql)
conn.commit()
conn.close()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.start_task - project-%s req-%s start task fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
else:
DBLOG.info("database.start_task - project-%s req-%s start task:%s" %(project_name, req_id, start_time))
return True, None
def receive_item(self, project_name, req_id, receive_vm_num, receive_network_num, info):
receive_time = format_time(time.time())
get_id_sql = ("select id from task where project = '%s';") %(project_name)
conn = None
task_id = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
conn.execute(get_id_sql)
ret = conn.fetchone()
task_id = ret[0]
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.receive_item - project-%s req-%s receive item fail, "
"can't get task id:%s" %(project_name, req_id, str(e)))
return False, str(e)
set_task_sql = ("update task set status = 'RUNNING', "
"receive_vm_num = %d, receive_network_num = %d "
"where project = '%s';") %(receive_vm_num, receive_network_num, project_name)
store_item_sql = ("insert into item set task_id = %d, receive_time = '%s', "
"info = '%s';") %(task_id, receive_time, info)
try:
conn.execute(set_task_sql)
conn.execute(store_item_sql)
conn.commit()
conn.close()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.receive_item - project-%s req-%s receive item fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
else:
DBLOG.info("database.receive_item - project-%s req-%s receive item:%s" %(project_name, req_id, receive_time))
return True, None
def stop_task(self, project_name, req_id, status, result):
stop_time = format_time(time.time())
get_task_sql = ("select * from task where project = '%s';") %(project_name)
conn = None
task_info = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
conn.execute(get_task_sql)
task_info = conn.fetchone()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.stop_task - project-%s req-%s stop task fail, "
"can't get task info:%s" %(project_name, req_id, str(e)))
return False, str(e)
task_index_map = {
'id': 0,
'project': 1,
'req_id': 2,
'status': 3,
'receive_time': 4,
'start_time': 5,
'stop_time': 6,
'network_info': 7,
'vm_info': 8,
'vm_num': 9,
'receive_vm_num': 10,
'network_num': 11,
'receive_network_num': 12,
'result': 13
}
set_task_sql = ("update task set status = '%s', stop_time = '%s', result = '%s'"
"where project = '%s';") %(status, stop_time, result, project_name)
store_history_sql = ("insert into history set project = '%s', req_id = '%s', status = '%s',"
"receive_time = '%s', start_time = '%s', stop_time = '%s', network_info = '%s',"
"vm_info = '%s', result = '%s';") %(task_info[task_index_map['project']],
task_info[task_index_map['req_id']], status, task_info[task_index_map['receive_time']],
task_info[task_index_map['start_time']], stop_time, task_info[task_index_map['network_info']],
task_info[task_index_map['vm_info']], result)
delete_item_sql = ("delete from item where task_id = %d;" %(task_info[task_index_map['id']]))
try:
conn.execute(set_task_sql)
conn.execute(store_history_sql)
conn.execute(delete_item_sql)
conn.commit()
conn.close()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.stop_task - project-%s req-%s stop task fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
else:
DBLOG.error("database.stop_task - project-%s req-%s stop task item:%s" %(project_name, req_id, stop_time))
return True, None
def get_items(self, project_name, req_id):
task_id_sql = ("select id from task where project = '%s';") %(project_name)
conn = None
task_id = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
conn.execute(task_id_sql)
ret = conn.fetchone()
task_id = ret[0]
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.get_items - project-%s req-%s get items fail, "
"can't get task id:%s" %(project_name, req_id, str(e)))
return False, str(e)
get_items_sql = ("select * from item where task_id = %d;") %(task_id)
try:
conn.execute(get_items_sql)
ret = conn.fetchall()
conn.commit()
conn.close()
except MySQLdb.Error, e:
conn.close()
DBLOG.error("database.get_items - project-%s req-%s get items fail:%s" %(project_name, req_id, str(e)))
return False, str(e)
else:
# process data
item_num = len(ret)
item_index_map = {
'id': 0,
'task_id': 1,
'receive_time': 2,
'info': 3
}
result = {
'item_num': item_num,
'item_info': []
}
for item in ret:
result['item_info'].append(item[item_index_map['info']])
DBLOG.error("database.get_items - project-%s req-%s get items:%d" %(project_name, req_id, item_num))
return True, result
def api_check(self):
SELOG.info("[database] check [start]")
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
conn.close()
except MySQLdb.Error, e:
print "database error:%s" %(str(e))
SELOG.error("database connection fail:%s" %(str(e)))
SELOG.info("[database] check [end]")
sys.exit(1)
SELOG.info("[database] check [end]")
def start_check(self):
# check the database consistence when service start
# the task status must be end or auto_end, change running to stop
SELOG.info("[database] check [start]")
conn = None
try:
conn = connection.Connection(conf_dict = self.db_conf)
except MySQLdb.Error, e:
print "database error:%s" %(str(e))
SELOG.error("database connection fail:%s" %(str(e)))
SELOG.info("[database] check [end]")
sys.exit(1)
SELOG.info("database connection success")
sql = "select count(*) from task where status = 'RUNNING' or status = 'START' or status = 'RECEIVED';"
n = 0
try:
conn.execute(sql)
# res = conn.fetchone()
n = conn.fetchone()[0]
# print res
except MySQLdb.Error, e:
conn.close()
print "database error:%s" %(str(e))
SELOG.error("database qeury task fail:%s" %(str(e)))
SELOG.info("[database] check [end]")
sys.exit(1)
if n == 0:
SELOG.info("database items consistence")
else:
SELOG.info("database items inconsistence:%d items" %(n))
sql = "update task set status = 'ERROR' where status = 'RUNNING' or status = 'START' or status = 'RECEIVED';"
try:
conn.execute(sql)
conn.commit()
conn.close()
except MySQLdb.Error, e:
conn.close()
SELOG.error("database update task status fail:%s" %(str(e)))
SELOG.info("[database] check [end]")
print "database error:%s" %(str(e))
sys.exit(1)
SELOG.info("[database] check [end]")
if __name__ == "__main__":
manager = Manager()
manager.any_operation()
print manager.exist_item("admin", "1")
print manager.create_task("root", "2")
#print manager.get_status("root", "2")
#print manager.get_result("root", "2")
#print manager.get_history("root", "2")
#print manager.start_task("root", "2", "2018-10-09 16:43:00", "none", "none", 0, 0)
print manager.receive_item("root", "2", 2, 3, "info")
#print manager.stop_task("root", "2", "END", "result")
print manager.get_items("root", "2")
```
#### File: src/openstackapi/nova.py
```python
from novaclient import client as nvclient
from auth import get_token
def get_project_server_info(auth_token, auth_url, project_name):
kw = {}
kw['project_domain_id'] = 'default'
nova = nvclient.Client("2", auth_token = auth_token, auth_url = auth_url, project_name = project_name, **kw)
# print nova.servers.list()
s = nova.servers.list()
#h = nova.hypervisors.list()
# print h[0].to_dict()
attributes = ['OS-EXT-STS:task_state', 'addresses', 'OS-EXT-STS:vm_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-SRV-USG:launched_at', 'id', 'security_groups', 'user_id', 'OS-EXT-STS:power_state',
'OS-EXT-AZ:availability_zone', 'status', 'updated', 'hostId', 'OS-EXT-SRV-ATTR:host',
'OS-SRV-USG:terminated_at', 'OS-EXT-SRV-ATTR:hypervisor_hostname', 'name', 'created', 'tenant_id'
]
info = []
for server in s:
s_info = {}
server_info = server.to_dict()
for key in attributes:
s_info[key] = server_info[key]
info.append(s_info)
return info
if __name__ == '__main__':
auth_url = 'http://192.168.122.2:5000/v3'
username = 'admin'
password = '<PASSWORD>'
project_name = 'admin'
auth_token = get_token(username, password, auth_url, project_name)
if auth_token == None:
print 'auth fail.'
exit(0)
print get_project_server_info(auth_token, auth_url, project_name)
```
#### File: src/server/func.py
```python
import __init__
import json
import commands
from utils.log import SELOG, SERVERLOG
def exe(cmd):
ret, result = commands.getstatusoutput(cmd)
if ret == 0:
return True, result
else:
return False, result
def addr_delay(addr):
#print type(addr)
#print addr
if addr['dhcp_netns'] != None:
cmd = "ip netns exec %s python test_delay.py %s" %(addr['dhcp_netns'], addr['addr'])
ret, info = exe(cmd)
else:
cmd = "python test_delay.py %s" %(addr['addr'])
ret, info = exe(cmd)
if ret == False:
SERVERLOG.info("server.func.addr_delay - cmd:%s return error, %s." %(cmd, info))
addr['performance']['error_msg'] = "can't get delay info."
else:
info = info[1: -1].split(',')
try:
info[0] = int(info[0])
info[1] = float(info[1])
info[2] = float(info[2])
except Exception, e:
print "ERROR: " + info
print str(e)
addr['performance']['error_msg'] = "data formate error."
return
else:
addr['performance']['delay'] = info
#print info
def check_delay(result):
#result = {
# "project": self.project,
# "req_id": self.req_id,
# "item_num": items["item_num"],
# "info": None
# }
# info = [info]
# info struct
# {
# "vm_num": 0,
# "host": "ip_addr",
# "is_network_node": False,
# "topo": "topo_struct"
# }
node = result['info'][0]
# for key in node:
# print key + " "
for node in result['info']:
for dev in node['topo']['device']:
if dev['check']['result'] == True:
if dev['type'] == "virtual host" :
for net in dev['addresses']:
for addr in dev['addresses'][net]:
addr_delay(addr)
else:
for addr in dev['addresses']:
addr_delay(addr)
def evaluate_performance(result):
rules = ["compute_node_cpu_rate >= 90% => current network performance is low, CPU is the bottleneck."]
for node in result['info']:
if node['check']['cpu_rate'] >= 90:
print "node[%s] satisfys the rule: %s" %(node['hostname'], rules[0])
```
#### File: test/d2/t2.py
```python
from d1 import t1
def f():
print "in t2"
t1
#ff()
``` |
{
"source": "jiangyu53231323/deeplab-xception",
"score": 3
} |
#### File: dataloaders/datasets/pascal.py
```python
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
class VOCSegmentation(Dataset):
"""
PascalVoc dataset
"""
#NUM_CLASSES = 21
NUM_CLASSES = 8
def __init__(self,
args,
base_dir=Path.db_root_dir('pascal'), # 在mypath.py里面有设置,为数据集的路径 'G:\\LoveDA\\'
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir # 数据集的根目录 'G:\\LoveDA\\'
self._image_dir = os.path.join(self._base_dir, 'JPEGImages') # 'G:\\LoveDA\\JPEGImages'
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass') # 'G:\\LoveDA\\SegmentationClass'
if isinstance(split, str): # split为‘train’训练集,'val'验证集 isinstance():判断两个类型是否相同
self.split = [split]
else:
split.sort() # 对列表进行原址排序
self.split = split
self.args = args
_splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation') # 存放着标签名称的文件夹路径
# 'G:\\LoveDA\\ImageSets\\Segmentation'
self.im_ids = [] # 存标签名的数组
self.images = [] # 存图像地址的数组
self.categories = [] # 存标签图片地址的数组
for splt in self.split: # 将图片信息读取到数组中
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f: # 'G:\\LoveDA\\ImageSets\\Segmentation\\train.txt'
lines = f.read().splitlines() # f.readlines()后面有加\n,f.read().splitlines()没有\n
for ii, line in enumerate(lines): # (ii=0,line='1366') enumerate():返回 enumerate(枚举) 对象
_image = os.path.join(self._image_dir, line + ".jpg") # 'G:\\LoveDA\\JPEGImages\\1366.jpg'
_cat = os.path.join(self._cat_dir, line + ".png") # 'G:\\LoveDA\\SegmentationClass\\1366.png'
# assert检查条件,不符合就终止程序
assert os.path.isfile(_image) # os.path.isfile 用于判断某一对象(需提供绝对路径)是否为文件
assert os.path.isfile(_cat)
self.im_ids.append(line) # 存入标签名 1366
self.images.append(_image) # 存入图像地址 'G:\\LoveDA\\JPEGImages\\1366.jpg'
self.categories.append(_cat) # 存入标签图片地址 'G:\\LoveDA\\SegmentationClass\\1366.png'
assert (len(self.images) == len(self.categories)) # 判断图像个数和标签数量是否一致
# Display stats
print('Number of images in {}: {:d}'.format(split, len(self.images))) # 1156
def __len__(self):
return len(self.images)
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index) # 读取图像 img:RGB形式 (1024, 1024)
sample = {'image': _img, 'label': _target} # {}字典数据类型,每一个键值对
for split in self.split:
if split == "train":
return self.transform_tr(sample) # 返回训练集的图像处理
elif split == 'val':
return self.transform_val(sample) # 返回验证集的图像处理
def _make_img_gt_point_pair(self, index): #仅仅是通过这个读就已经转成了0-7的标签图
# Image.open()函数只是保持了图像被读取的状态,但是图像的真实数据并未被读取
# Image.open()函数默认彩色图像读取通道的顺序为RGB
# 如果不使用.convert(‘RGB’)进行转换的话,读出来的图像是RGBA四通道的,A通道为透明通道,该对深度学习模型训练来说暂时用不到,因此使用convert(‘RGB’)进行通道转换
_img = Image.open(self.images[index]).convert('RGB') # 原图像 RGB: 3x8位像素,真彩色
_target = Image.open(self.categories[index]) # 标注图像
# _img.show()
# _target.show()
return _img, _target
def transform_tr(self, sample): # 返回训练集的图像处理:翻转-切割-平滑-标准化-tensor形式保存 大小控制在[-1.5, 0.5] 区间为2
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(), # 以给定的概率随机水平翻转给定的PIL图像
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), # 将原图片进行切割随机范围内裁剪
tr.RandomGaussianBlur(), # 用高斯滤波器(GaussianFilter)对图像进行平滑处理
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), # 让数值大小变得可以比较
tr.ToTensor()]) # tensor中是以 (c, h, w) 的格式来存储图片的
return composed_transforms(sample)
def transform_val(self, sample): # 验证集的图片处理:切割-标准化-tensor形式保存 大小控制在[0, 7] 区间为8 因为图片的类别数为8
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'VOC2012(split=' + str(self.split) + ')'
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
voc_train = VOCSegmentation(args, split='train') # 训练集的原图像
dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=0) # 只有主进程去加载batch数据
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]): # 字典sample jj:0~5 5个
img = sample['image'].numpy() # (5, 3, 513, 513) 彩色 数值大小在[-1.5, 0.5]之间 [[[[-1.1075435 -1.1760424 -1.1246682 ...
gt = sample['label'].numpy() # 一张图: (5, 513, 513) float型 灰度图,5个通道,然后每一个都是(513,513):r=g=b 错:掩码应该是单通道的,不应该上彩色
plt.show()
tmp = np.array(gt[jj]).astype(np.uint8) # uint8无符号八位整数,数字范围[0,255] unit8型 (513, 513) tmp是二维的 [[1 1 1 ... 0 0 0]
segmap = decode_segmap(tmp, dataset='pascal') # [513, 513,3] 绘制成彩色 [[[1. 0. 0.],
img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) # 黑白色 [513, 513,3] np.transpose:求矩阵的转置 [[[-1.1075435 -0.897759 -0.688976 ],
# 归一标准化的逆操作:恢复0-255之间的彩色值
img_tmp *= (0.229, 0.224, 0.225) # 在图像送入网络训练之前,减去图片的均值,算是一种归一化操作。
img_tmp += (0.485, 0.456, 0.406) # 图像其实是一种平稳的分布,减去数据对应维度的统计平均值,可以消除公共部分。
img_tmp *= 255.0 # 以凸显个体之间的差异和特征。
img_tmp = img_tmp.astype(np.uint8) # 无符号八位整数,数字范围[0,255]
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp) # 显示原图像
plt.subplot(212)
plt.imshow(segmap) # 显示彩色
# print(segmap)
if ii == 1:
break
plt.show(block=True)
``` |
{
"source": "jiangyuang/ModelPruningLibrary",
"score": 3
} |
#### File: mpl/models/alexnet.py
```python
import torch
import torchvision.models
from typing import Any
from .base_model import BaseModel
__all__ = ['AlexNet', 'alexnet']
class AlexNet(BaseModel):
def __init__(self, model: torchvision.models.AlexNet):
super(AlexNet, self).__init__()
self.clone_from_model(model)
self.process_layers()
def process_layers(self):
self.collect_prunable_layers()
self.convert_eligible_layers()
self.collect_prunable_layers()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def alexnet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> AlexNet:
return AlexNet(torchvision.models.alexnet(pretrained, progress, **kwargs))
```
#### File: mpl/models/leaf.py
```python
from torch import nn as nn
from .base_model import BaseModel
from ..nn.conv2d import DenseConv2d
from ..nn.linear import DenseLinear
__all__ = ["Conv2", "conv2", "Conv4", "conv4"]
class Conv2(BaseModel):
def __init__(self):
super(Conv2, self).__init__()
self.features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2), # 32x14x14
DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)) # 64x7x7
self.classifier = nn.Sequential(DenseLinear(64 * 7 * 7, 2048),
nn.ReLU(inplace=True),
DenseLinear(2048, 62))
self.collect_prunable_layers()
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
class Conv4(BaseModel):
def __init__(self):
super(Conv4, self).__init__()
self.features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2))
self.classifier = DenseLinear(in_features=32 * 6 * 6, out_features=2)
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def conv2() -> Conv2:
return Conv2()
def conv4() -> Conv4:
return Conv4()
# TODO: define pretrain etc.
```
#### File: mpl/models/utils.py
```python
import torch.nn as nn
from ..nn.conv2d import DenseConv2d, SparseConv2d
from ..nn.linear import DenseLinear, SparseLinear
from typing import Callable
def is_prunable_fc(layer):
return isinstance(layer, DenseLinear) or isinstance(layer, SparseLinear)
def is_prunable_conv(layer):
return isinstance(layer, DenseConv2d) or isinstance(layer, SparseConv2d)
def is_prunable(layer):
return is_prunable_fc(layer) or is_prunable_conv(layer)
def is_parameterized(layer):
return is_prunable(layer) or isinstance(layer, nn.Linear) or isinstance(layer, nn.Conv2d)
def collect_leaf_modules(module, criterion: Callable, layers: list, names: list, prefix: str = ""):
for key, submodule in module._modules.items():
new_prefix = prefix
if prefix != "":
new_prefix += '.'
new_prefix += key
# is leaf and satisfies criterion
if submodule is not None:
if len(submodule._modules.keys()) == 0 and criterion(submodule):
layers.append(submodule)
names.append(new_prefix)
collect_leaf_modules(submodule, criterion, layers, names, prefix=new_prefix)
```
#### File: mpl/models/vgg.py
```python
import torch
import torch.nn as nn
import torchvision.models
from typing import Any
from .base_model import BaseModel
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(BaseModel):
def __init__(self, model: torchvision.models.VGG):
super(VGG, self).__init__()
self.clone_from_model(model)
self.process_layers()
def process_layers(self):
self.collect_prunable_layers()
self.convert_eligible_layers()
self.collect_prunable_layers()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg11(pretrained, progress, **kwargs))
def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg11_bn(pretrained, progress, **kwargs))
def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg13(pretrained, progress, **kwargs))
def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg13_bn(pretrained, progress, **kwargs))
def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg16(pretrained, progress, **kwargs))
def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg16_bn(pretrained, progress, **kwargs))
def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg19(pretrained, progress, **kwargs))
def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
return VGG(torchvision.models.vgg19_bn(pretrained, progress, **kwargs))
```
#### File: mpl/nn/linear.py
```python
import torch
import torch.nn as nn
import torch.sparse as sparse
from ..autograd.functions import AddmmFunction
__all__ = ["SparseLinear", "DenseLinear"]
class SparseLinear(nn.Module):
__constants__ = ['in_features', 'out_features']
def __init__(self, weight: sparse.FloatTensor, bias, mask):
super(SparseLinear, self).__init__()
if not weight.is_sparse:
raise ValueError("Weight must be sparse")
elif weight._nnz() > 0 and not weight.is_coalesced():
raise ValueError("Weight must be coalesced")
self.in_features = weight.size(1)
self.out_features = weight.size(0)
# in order to add to optimizer
self.weight = nn.Parameter(weight.data.clone(), requires_grad=False)
self.mask = mask.clone()
# Don't move after creation to make it a leaf
self.dense_weight_placeholder = nn.Parameter(torch.empty(size=self.weight.size(), device=self.weight.device))
self.dense_weight_placeholder.is_placeholder = True
# create links
self.weight.dense = self.dense_weight_placeholder
self.weight.mask = self.mask
self.weight.is_sparse_param = True
if bias is None:
self.register_parameter('bias', None)
else:
assert bias.size() == torch.Size((weight.size(0), 1))
self.bias = nn.Parameter(bias.data.clone())
def _sparse_masked_select_abs(self, sparse_tensor: sparse.FloatTensor, thr):
indices = sparse_tensor._indices()
values = sparse_tensor._values()
prune_mask = torch.abs(values) >= thr
return torch.sparse_coo_tensor(indices=indices.masked_select(prune_mask).reshape(2, -1),
values=values.masked_select(prune_mask),
size=[self.out_features, self.in_features]).coalesce()
def prune_by_threshold(self, thr):
self.weight = nn.Parameter(self._sparse_masked_select_abs(self.weight, thr))
def prune_by_rank(self, rank):
weight_val = self.weight._values()
sorted_abs_weight = torch.sort(torch.abs(weight_val))[0]
thr = sorted_abs_weight[rank]
self.prune_by_threshold(thr)
def prune_by_pct(self, pct):
if pct == 0:
return
prune_idx = int(self.weight._nnz() * pct)
self.prune_by_rank(prune_idx)
def move_data(self, device: torch.device):
self.weight = self.weight.to(device)
def forward(self, inp: torch.Tensor):
return AddmmFunction.apply(self.bias, self.weight, self.dense_weight_placeholder, inp.t()).t()
@property
def num_weight(self) -> int:
return self.weight._nnz()
def __repr__(self):
return "SparseLinear(in_features={}, out_features={}, bias={})".format(self.in_features, self.out_features,
self.bias is not None)
def __str__(self):
return self.__repr__()
class DenseLinear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(DenseLinear, self).__init__(in_features, out_features, bias)
self.mask = torch.ones_like(self.weight, dtype=torch.bool, device=self.weight.device)
def forward(self, inp: torch.Tensor):
return nn.functional.linear(inp, self.weight * self.mask, self.bias)
def prune_by_threshold(self, thr):
self.mask *= (self.weight.abs() >= thr)
def prune_by_rank(self, rank):
if rank == 0:
return
weight_val = self.weight[self.mask == 1.]
sorted_abs_weight = weight_val.abs().sort()[0]
thr = sorted_abs_weight[rank]
self.prune_by_threshold(thr)
def prune_by_pct(self, pct):
prune_idx = int(self.num_weight * pct)
self.prune_by_rank(prune_idx)
def random_prune_by_pct(self, pct):
prune_idx = int(self.num_weight * pct)
rand = torch.rand(size=self.mask.size(), device=self.mask.device)
rand_val = rand[self.mask == 1]
sorted_abs_rand = rand_val.sort()[0]
thr = sorted_abs_rand[prune_idx]
self.mask *= (rand >= thr)
@classmethod
def from_linear(cls, linear_module: nn.Linear):
new_linear = cls(linear_module.in_features, linear_module.out_features,
bias=linear_module.bias is not None)
new_linear.weight = nn.Parameter(linear_module.weight.clone())
if linear_module.bias is not None:
new_linear.bias = nn.Parameter(linear_module.bias.clone())
return new_linear
# This method will always remove zero elements, even if you wish to keep zeros in the sparse form
def to_sparse(self) -> SparseLinear:
sparse_bias = None if self.bias is None else self.bias.reshape((-1, 1))
masked_weight = self.weight * self.mask
mask = masked_weight != 0.
return SparseLinear(masked_weight.to_sparse(), sparse_bias, mask)
def move_data(self, device: torch.device):
self.mask = self.mask.to(device)
def to(self, *args, **kwargs):
device = torch._C._nn._parse_to(*args, **kwargs)[0]
if device is not None:
self.move_data(device)
return super(DenseLinear, self).to(*args, **kwargs)
@property
def num_weight(self) -> int:
return self.mask.sum().item()
``` |
{
"source": "jiangyueczr/algorithm",
"score": 4
} |
#### File: algorithm/questions/string_sort.py
```python
def special_sort(string):
length = len(string)
i = 0
list_ = list(string)
while i < length:
j = 0
while j < length - 1 - i:
k = j + 1
if 90 < ord(list_[j]) < 97 or ord(list_[j]) > 122 or ord(list_[j]) < 65:
j += 1
k += 1
elif 90 < ord(list_[k]) < 97 or ord(list_[k]) > 122 or ord(list_[k]) < 65:
k += 1
if 65 <= ord(list_[j]) <= 90:
if 65 <= ord(list_[k]) <= 90:
if ord(list_[j]) > ord(list_[k]):
list_[j], list_[k] = list_[k], list_[j]
j = k
continue
elif 97 <= ord(list_[k]) <= 122:
if ord(list_[j]) + 32 > ord(list_[k]):
list_[j], list_[k] = list_[k], list_[j]
j = k
continue
elif 97 <= ord(list_[j]) <= 122:
if 97 <= ord(list_[k]) <= 122:
if ord(list_[j]) > ord(list_[k]):
list_[j], list_[k] = list_[k], list_[j]
j = k
continue
elif 65 <= ord(list_[k]) <= 90:
if ord(list_[j]) - 32 > ord(list_[k]):
list_[j], list_[k] = list_[k], list_[j]
j = k
continue
j = k
i += 1
return "".join(list_)
if __name__ == '__main__':
print(special_sort("Bab?A"))
print(special_sort("Ba?bA"))
print(special_sort("ZcB.ab/A"))
```
#### File: algorithm/sorts/quick_sort.py
```python
def quick_sort(array):
"""
快速排序算法的代码实现
:param array: 一个数组
:return: 一个升序数组
"""
if len(array) < 2:
return array
pivot = array.pop()
big_elements = list()
small_elements = list()
for element in array:
(big_elements if element > pivot else small_elements).append(element)
return quick_sort(small_elements) + [pivot] + quick_sort(big_elements)
def partition(array, left_index, right_index):
"""
分区函数
:param array: 一个数组
:param left_index: 数组的左边界,起始通常是 0
:param right_index: 数组的右边界,起始通常是数组长度 - 1
:return: 返回基准元素的位置
"""
pivot = array[left_index]
while left_index < right_index:
while left_index < right_index and pivot <= array[right_index]:
right_index -= 1
if left_index < right_index:
array[left_index] = array[right_index]
left_index += 1
while left_index < right_index and pivot >= array[left_index]:
left_index += 1
if left_index < right_index:
array[right_index] = array[left_index]
right_index -= 1
array[left_index] = pivot
return left_index
def quick_sort2(array, left_index, right_index):
"""
快速排序算法的代码实现2
:param array: 一个数组
:param left_index: 数组的左边界,起始通常是 0
:param right_index: 数组的右边界,起始通常是数组长度 - 1
:return: 一个升序数组
"""
if left_index < right_index:
pivot_index = partition(array, left_index, right_index)
quick_sort2(array, left_index, pivot_index - 1)
quick_sort2(array, pivot_index + 1, right_index)
return array
return array
``` |
{
"source": "jiangyx3915/-graph-bed",
"score": 2
} |
#### File: apps/apis/plugin.py
```python
from flask import Blueprint
plugin = Blueprint(__name__, 'plugin')
@plugin.route('/plugins')
def plugins():
"""
获取插件列表
:return:
"""
```
#### File: apps/utils/paths.py
```python
from application.config import base_path
import fnmatch
import os
def get_plugin_list(path):
"""
获取所有插件的路径
:param path:
:return:
"""
paths = []
for f_name in os.listdir(os.path.join(base_path, *path.split('.'))):
if fnmatch.fnmatch(f_name, '*.py') and f_name != '__init__.py':
paths.append(f'{path}.{f_name[0:-3]}')
return paths
if __name__ == '__main__':
print(get_plugin_list('application.plugins'))
pass
```
#### File: jiangyx3915/-graph-bed/manage.py
```python
import os
import click
from apps import create_app, db
from apps.models import User, Image
app = create_app(os.getenv('DEVELOPMENT', 'default'))
@click.group()
def cli():
...
@cli.command()
def startplugins(plugin_name):
print(plugin_name)
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, User=User, Image=Image)
if __name__ == '__main__':
cli()
``` |
{
"source": "jiangyx3915/IPProxyPool",
"score": 3
} |
#### File: IPProxyPool/IPProxyPool/getter.py
```python
import sys
from IPProxyPool.logger import logger
from IPProxyPool.store import RedisClient
from IPProxyPool.crawler import ProxyCrawl
from IPProxyPool.settings import POOL_MAX_THRESHOLD
class Getter:
def __init__(self):
self.client = RedisClient()
self.crawler = ProxyCrawl()
def is_over_threshold(self):
"""判断是否达到了代理池容量的限制"""
if self.client.count() >= POOL_MAX_THRESHOLD:
return True
return False
def run(self):
logger.info("代理池获取器开始运行")
if not self.is_over_threshold():
for callback in self.crawler.__CrawlFunc__:
proxies = self.crawler.get_proxies(callback)
for proxy in proxies:
self.client.add(proxy)
``` |
{
"source": "jiangyx3915/leetcode",
"score": 4
} |
#### File: leetcode/algorithms/design-linked-list.py
```python
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
"""
self.link = list()
def get(self, index):
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
:type index: int
:rtype: int
"""
if index < 0 or index >= len(self.link):
return -1
return self.link[index]
def addAtHead(self, val):
"""
Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.
:type val: int
:rtype: void
"""
self.link.insert(0, val)
def addAtTail(self, val):
"""
Append a node of value val to the last element of the linked list.
:type val: int
:rtype: void
"""
self.link.append(val)
def addAtIndex(self, index, val):
"""
Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.
:type index: int
:type val: int
:rtype: void
"""
if index == len(self.link):
self.link.append(val)
elif index < len(self.link):
self.link.insert(index, val)
def deleteAtIndex(self, index):
"""
Delete the index-th node in the linked list, if the index is valid.
:type index: int
:rtype: void
"""
if index < len(self.link) and index >= 0:
self.link.pop(index)
# Your MyLinkedList object will be instantiated and called as such:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index)
```
#### File: leetcode/algorithms/reverse-linked-list.py
```python
class Solution:
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
header = ListNode(0)
while head:
node = ListNode(head.val)
node.next = header.next
header.next = node
head = head.next
return header.next
``` |
{
"source": "jiangyx3915/oh-my-email",
"score": 2
} |
#### File: oh-my-email/oh_my_email/vo.py
```python
import requests
from abc import abstractmethod
from dataclasses import dataclass
from email.utils import formataddr
from email.mime.image import MIMEImage
from email.mime.application import MIMEApplication
from oh_my_email.utils import analyze_html_img, img2base64
@dataclass()
class OhMyEmailContact:
email: str
name: str = ""
def render(self):
return formataddr([self.name, self.email])
@dataclass()
class OhMyEmailConfig:
mail_host: str
mail_port: int
mail_user: str
mail_pass: str
class OhMyEmailBaseContent:
def __init__(self, content, content_type, extra):
self.content = content
self.content_type = content_type
self.extra = extra
@abstractmethod
def dispatch_content(self, message):
"""
:param message:
:return:
"""
class OhMyEmailPlainContent(OhMyEmailBaseContent):
def __init__(self, content, extra=None):
super().__init__(content, 'plain', extra)
def dispatch_content(self, message):
return self.content
class OhMyEmailHtmlContent(OhMyEmailBaseContent):
def __init__(self, content, extra=None):
super().__init__(content, 'html', extra)
def dispatch_content(self, message):
result = analyze_html_img(self.content)
for item in result:
data = img2base64(item)
self.content = self.content.replace(item, f'data:image/jpg;base64,{data}')
return self.content
class BaseAttachment:
@abstractmethod
def patch(self):
raise NotImplemented()
class UrlAttachment(BaseAttachment):
def __init__(self, url, filename):
self.url = url
self.filename = filename
def patch(self):
raw = requests.get(self.url).content
part = MIMEApplication(raw)
part.add_header('Content-Disposition', 'attachment', filename=self.filename)
return part
class FileAttachment(BaseAttachment):
def __init__(self, filepath, filename):
self.filepath = filepath
self.filename = filename
def patch(self):
with open(self.filepath, 'rb') as fp:
part = MIMEApplication(fp.read())
part.add_header('Content-Disposition', 'attachment', filename=self.filename)
return part
``` |
{
"source": "jiangyy12/application-tracking-system",
"score": 3
} |
#### File: backend/api/routes.py
```python
from flask import Flask, request, jsonify, url_for, Blueprint
from api.models import db, User
from api.utils import generate_sitemap, APIException
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
import os
api = Blueprint('api', __name__)
# Create a route to authenticate your users and return JWTs. The
# create_access_token() function is used to actually generate the JWT.
@app.route("/token", methods=["POST"])
def create_token():
email = request.json.get("email", None)
password = request.json.get("password", None)
if email != "test" or password != "<PASSWORD>":
return jsonify({"msg": "Bad username or password"}), 401
access_token = create_access_token(identity=email)
return jsonify(access_token=access_token)
```
#### File: application-tracking-system/backend/app.py
```python
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
from selenium import webdriver
from bs4 import BeautifulSoup
from itertools import islice
from webdriver_manager.chrome import ChromeDriverManager
from data.connection import query, insert, count, querySchool, countProgram, queryItem, query_groupByCompany
import pandas as pd
import json
import os
import csv
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
app = Flask(__name__)
app.config["JWT_SECRET_KEY"] = os.environ.get('JWT_SECRET') # Change this!
jwt = JWTManager(app)
# make flask support CORS
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# testing API, you can try to access http://localhost:5000/ on your browser after starting the server
# params:
# -name: string
@app.route("/")
@cross_origin()
def hello():
name = request.args.get('name') if request.args.get('name') else ''
obj = {
"str": "Hello World!"+name
}
return jsonify(obj)
# saerch function
# params:
# -keywords: string
@app.route("/search")
def search():
keywords = request.args.get('keywords')
keywords = keywords.replace(' ', '+')
# create a url for a crawler to fetch job information
url = "https://www.google.com/search?q=" + keywords + "&ibp=htl;jobs"
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(url)
content = driver.page_source
driver.close()
soup = BeautifulSoup(content)
# parsing searching results to DataFrame and return
df = pd.DataFrame(columns=["jobTitle", "companyName", "location"])
mydivs = soup.find_all("div", {"class": "PwjeAc"})
for i, div in enumerate(mydivs):
df.at[i, "jobTitle"] = div.find("div", {"class": "BjJfJf PUpOsf"}).text
df.at[i, "companyName"] = div.find("div", {"class": "vNEEBe"}).text
df.at[i, "location"] = div.find("div", {"class": "Qk80Jf"}).text
return jsonify(df.to_dict('records'))
# get data from the CSV file for rendering root page
@app.route("/application", methods=['GET'])
def getDataFromCSV():
try:
results = query()
result = []
for row in results:
if (len(row) == 5):
dic = {}
dic['jobTitle'] = row[0]
dic['companyName'] = row[1]
dic['date'] = row[2].strftime("%Y-%m-%d")
dic['class'] = str(row[3])
dic['id'] = str(row[4])
result.append(dic)
json_str = json.dumps(result)
return json_str
except Exception as e:
print(e)
exit(1)
@app.route("/applicationSummaryPage", methods=['GET'])
def getCompanySummaryPage():
try:
results = query_groupByCompany()
result = []
for row in results:
if (len(row) == 4):
dic = {}
dic['companyName'] = row[0]
dic['Waiting'] = row[1]
dic['Offer'] = row[2]
dic['Rejected'] = row[3]
result.append(dic)
# json_str = json.dumps(result)
json_str = jsonify(result)
return json_str
except Exception as e:
print(e)
exit(1)
# write a new record to the CSV file
@app.route("/application", methods=['POST'])
def editcsv():
# todo: imply database
csvTitle = ['jobTitle', 'companyName', 'date', 'class', 'id']
tables = ['application', 'job']
application = request.get_json()['application']
data = {}
for t in csvTitle:
if (t is 'jobTitle'):
data['jobName'] = application[t]
if (t is 'companyName'):
data['jobCompany'] = application[t]
if (t is 'date'):
data['jobReleaseDate'] = application[t]
data['updateTime'] = application[t]
if (t is 'class'):
data['applyStatus'] = application[t]
data['jobClass'] = application[t]
if (t is 'id'):
data['jobId'] = application[t]
# newLine.append(application[t] if t in application else None)
try:
for table in tables:
insert(table, data)
except Exception as e:
print(e)
exit(1)
return jsonify('Create an application succeddfully!')
@app.route("/school", methods=['GET'])
def getDataFromDB():
try:
results = querySchool()
result = []
for row in results:
if (len(row) == 5):
dic = {}
dic['programTitle'] = row[0]
dic['schoolName'] = row[1]
dic['date'] = row[2].strftime("%Y-%m-%d")
dic['class'] = str(row[3])
dic['id'] = str(row[4])
result.append(dic)
json_str = json.dumps(result)
return json_str
except Exception as e:
print(e)
exit(1)
# write a new record to the CSV file
@app.route("/school", methods=['POST'])
def editDB():
# todo: imply database
# path = "./data/applications.csv"
csvTitle = ['programTitle', 'schoolName', 'date', 'class', 'id']
tables = ['school', 'program']
application = request.get_json()['school']
data = {}
for t in csvTitle:
if (t is 'programTitle'):
data['programName'] = application[t]
if (t is 'schoolName'):
data['programSchool'] = application[t]
if (t is 'date'):
data['jobReleaseDate'] = application[t]
data['updateTime'] = application[t]
if (t is 'class'):
data['applyStatus'] = application[t]
data['jobClass'] = application[t]
if (t is 'id'):
data['programId'] = application[t]
# newLine.append(application[t] if t in application else None)
try:
for table in tables:
insert(table, data)
except Exception as e:
print(e)
exit(1)
return jsonify('Create an school application succeddfully!')
@app.route("/note", methods=['GET'])
def getDataFromDB2():
try:
results = queryItem()
result = []
for row in results:
if (len(row) == 5):
dic = {}
dic['jobName'] = row[0]
dic['jobCompany'] = row[1]
dic['commentTime'] = row[2].strftime("%Y-%m-%d")
dic['class'] = str(row[3])
dic['id'] = str(row[4])
result.append(dic)
json_str = json.dumps(result)
return json_str
except Exception as e:
print(e)
exit(1)
# get the biggest id in the CSV for creating a new application
@app.route("/getNewId", methods=['GET'])
def getNewId():
try:
i = count() + 1
return jsonify(i)
except Exception as e:
print(e)
exit(1)
@app.route("/getNewProgramId", methods=['GET'])
def getNewProgramId():
path = "./data/applications.csv"
try:
i = countProgram() + 1
return jsonify(i)
except Exception as e:
print(e)
exit(1)
@app.route("/token", methods=['POST'])
def create_token():
# data = json.loads(request.get_data())
# print(data)
email = request.json.get("email", None)
password = request.json.get("password", None)
if email != "test" or password != "<PASSWORD>":
return jsonify({"msg": "Bad username or password"}), 401
access_token = create_access_token(identity=email)
return jsonify(access_token=access_token)
if __name__ == "__main__":
app.run()
```
#### File: backend/data/connection.py
```python
import mysql.connector as conn
from mysql.connector import errorcode
Connection = conn.connect(
host="localhost",
port="3306",
user="root",
password="",
database="applicationtrackingsystem"
)
print("Connect to the local database outside method success!")
def connect():
try:
Connection = conn.connect(
host="localhost",
port="3306",
user="root",
password="",
database="applicationtrackingsystem"
)
print("Connect to the local database success!")
return Connection
except conn.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
def query():
try:
# Connection = connect()
# try:
# with open('../database/SET_DATABASE.sql', 'r') as f:
# with Connection.cursor() as cursor:
# cursor.execute(f.read(), multi=True)
# Connection.commit()
# print("Sourcing .sql file succeed!")
# except:
# print("Sourcing .sql file failed!")
query = "SELECT jobName, jobCompany, updateTime, applyStatus, job.jobId " \
"FROM job, users, application " \
"WHERE users.userId=application.userId AND job.jobId=application.jobId;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
jobName = row[0]
jobCompany = row[1]
updateTime = row[2]
applyStatus = row[3]
jobId = row[4]
print("jobName=%s, jobCompany=%s, updateTime=%s, applyStatus=%s, jobId=%s"
% (jobName, jobCompany, updateTime, applyStatus, jobId))
return results
except conn.Error as err:
print("Query failed! Error number is: %s" %err.errno)
Connection.close()
def query_groupByCompany():
try:
# Connection = connect()
# try:
# with open('../database/SET_DATABASE.sql', 'r') as f:
# with Connection.cursor() as cursor:
# cursor.execute(f.read(), multi=True)
# Connection.commit()
# print("Sourcing .sql file succeed!")
# except:
# print("Sourcing .sql file failed!")
query = "SELECT jobCompany, count(case when applyStatus = 2 then 1 end) as Waiting," \
"count(case when applyStatus = 3 then 1 end) as Offer," \
"count(case when applyStatus = 4 then 1 end) as Rejected " \
"FROM job, application " \
"WHERE job.jobId = application.jobId " \
"GROUP BY jobCompany;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
companyName = row[0]
Waiting = row[1]
Offer = row[2]
Rejected = row[3]
print("companyName=%s, Waiting=%s, Offer=%s, Rejected=%s"
% (companyName, Waiting, Offer, Rejected))
return results
except conn.Error as err:
print("Query failed! Error number is: %s" %err.errno)
Connection.close()
def querySchool():
try:
# Connection = connect()
# try:
# with open('../database/SET_DATABASE.sql', 'r') as f:
# with Connection.cursor() as cursor:
# cursor.execute(f.read(), multi=True)
# Connection.commit()
# print("Sourcing .sql file succeed!")
# except:
# print("Sourcing .sql file failed!")
query = "SELECT programName, programSchool, updateTime, applyStatus, program.programId " \
"FROM program, users, school " \
"WHERE users.userId=school.userId AND program.programId=school.programId;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
programName = row[0]
programSchool = row[1]
updateTime = row[2]
applyStatus = row[3]
programId = row[4]
print("programName=%s, programSchool=%s, updateTime=%s, applyStatus=%s, programId=%s"
% (programName, programSchool, updateTime, applyStatus, programId))
return results
except conn.Error as err:
print("School Query failed! Error number is: %s" %err.errno)
Connection.close()
def queryItem():
try:
query = "SELECT jobName, jobCompany, commentTime, itemContent, job.jobId " \
"FROM job, users, item " \
"WHERE users.userId=item.userId AND job.jobId=item.jobId;"
cursor = Connection.cursor()
cursor.execute(query)
results = cursor.fetchall()
for row in results:
jobName = row[0]
jobCompany = row[1]
commentTime = row[2]
itemContent = row[3]
jobId = row[4]
print("jobName=%s, jobCompany=%s, commentTime=%s, itemContent=%s, jobId=%s"
% (jobName, jobCompany, commentTime, itemContent, jobId))
return results
except conn.Error as err:
print("Item Query failed! Error number is: %s" %err.errno)
Connection.close()
def count():
try:
query = "SELECT COUNT(*) FROM job;"
cursor = Connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
for row in result:
count = row[0] + 1
return count
except conn.Error as err:
print("Query failed! Error number is: %s" % err.errno)
def countProgram():
try:
query = "SELECT COUNT(*) FROM program;"
cursor = Connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
for row in result:
count = row[0] + 1
return count
except conn.Error as err:
print("Query failed! Error number is: %s" % err.errno)
def insert(tableName, data):
try:
if (tableName == 'application'):
query = "INSERT INTO application (userId, jobId, applyStatus, updateTime) " \
"VALUES (%s, %s, %s, %s);"
# Gaolin: Since there was no user management before, I temporarily use '123' as the userId when inserting application table.
# todo: record real userId
value = ('1', data['jobId'], data['applyStatus'], data['updateTime'])
cursor = Connection.cursor()
cursor.execute(query, value)
elif (tableName == 'job'):
query = "INSERT INTO job (jobId, jobName, jobCompany, jobReleaseDate, jobClass) " \
"VALUES (%s, %s, %s, %s, %s);"
value = (data['jobId'], data['jobName'], data['jobCompany'], data['jobReleaseDate'], data['jobClass'])
cursor = Connection.cursor()
cursor.execute(query, value)
elif (tableName == 'school'):
query = "INSERT INTO school (userId, programId, applyStatus, updateTime) " \
"VALUES (%s, %s, %s, %s);"
value = ('1', data['programId'], data['applyStatus'], data['updateTime'])
cursor = Connection.cursor()
cursor.execute(query, value)
elif (tableName == 'program'):
query = "INSERT INTO program (programId, programName, programSchool, programReleaseDate, programClass) " \
"VALUES (%s, %s, %s, %s, %s);"
value = (data['programId'], data['programName'], data['programSchool'], data['programReleaseDate'], data['programClass'])
cursor = Connection.cursor()
cursor.execute(query, value)
Connection.commit()
print("Insert table %s succeed!" % tableName)
except:
print("Insert table %s failed!" % tableName)
query()
```
#### File: application-tracking-system/backend/test_api.py
```python
import json
import unittest
import sys
import app
import requests
import pytest
sys.path.append(".")
from data.connection import connect
from data import connection
class Api(unittest.TestCase):
# def setUp(self):
# self.db = connect()
# self.connection = connection
#
# def test_hello(self):
# response = requests.get('http://localhost:5000')
# self.assertEqual(response.status_code, 200)
def setUp(self):
self.app = app.app.test_client()
self.app.testing = True
def test_hello(self):
home = self.app.get('/')
self.assertIn('Hello World!', str(home.data))
def test_search(self):
search = self.app.get('/search')
# self.assertEqual(search.status_code, 200)
def test_application_get(self):
application = self.app.get('/application')
self.assertEqual(application.status_code, 200)
def test_applicaiton_post(self):
application = self.app.post('/application')
# self.assertEqual(application.status_code, 200)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jiangyy12/Simplii",
"score": 2
} |
#### File: jiangyy12/Simplii/app.py
```python
from src.error_handler.error import handle_err
from flask import Flask, render_template, redirect, session
import src.models.task_model as task_model
import src.models.category_model as category_model
from src.controller.task_controller import tasks
from src.login.login import login
import src.models.project_model as project_model
import src.models.employee_model as employee_model
from flask import Blueprint, request, redirect
app = Flask(__name__)
app.register_blueprint(handle_err)
app.register_blueprint(tasks)
app.register_blueprint(login)
app.config['SECRET_KEY'] = 'SECRET_KEY'
@app.route("/")
def homePage():
this_week_tasks = task_model.task_model.get_this_week_tasks()
backlog_tasks = task_model.task_model.get_backlog()
future_tasks = task_model.task_model.get_future_tasks()
categories = category_model.category_model.get_category()
projects = project_model.project_model.get_project()
employees = employee_model.employee_model.get_employee()
"""This function renders the home page."""
return render_template("home.html", this_week_tasks=this_week_tasks,
backlog_tasks=backlog_tasks, future_tasks=future_tasks, categories= categories, projects = projects,
employees = employees)
@app.route("/edit_task")
def edit_task():
"""This function renders the edit task page."""
return render_template("edit_task.html")
# @app.route("/edit_task", methods=["DELETE"])
# def delete_task():
# """This function renders the """
@app.route("/view_all_tasks")
def view_all_tasks():
all_tasks = task_model.task_model.get_all_taks()
"""This function renders the edit task page."""
return render_template("view_all_tasks.html", all_tasks=all_tasks)
@app.route("/view_all_projects")
def view_all_projects():
all_projects = project_model.project_model.get_project()
"""This function renders the edit task page."""
return render_template("view_all_project.html", all_projects=all_projects)
@app.route("/user_details")
def user_details():
"""This function renders the edit task page."""
return render_template("view_user_details.html")
@app.route("/view_task_employee")
def view_tasks_employee():
try :
all_tasks = task_model.task_model.get_all_taks_with_employee()
return render_template("view_task_employee.html", all_tasks=all_tasks)
except Exception as e:
print(e)
exit(1)
@app.route("/view_all_employees")
def view_all_employees():
"""This function renders the edit task page."""
all_employees = employee_model.employee_model.get_employee()
backlog_tasks = task_model.task_model.get_backlog()
return render_template("view_all_employees.html", all_employees=all_employees, backlog_tasks=backlog_tasks)
@app.route("/project", methods=['POST'])
def create_project():
try:
data = request.form
project_model.project_model().create_project(data)
return redirect('/view_all_projects')
except Exception as e:
print(e)
exit(1)
@app.route("/employee", methods=['POST'])
def create_employee():
try:
data = request.form
employee_model.employee_model().create_employee(data=data)
return redirect('/view_all_employees')
except Exception as e:
print(e)
exit(1)
if __name__ == "__main__":
app.run(debug=True)
```
#### File: src/controller/project_controller.py
```python
from flask import Blueprint, request, redirect
from src.controller.task_controller import task, tasks
from src.models.project_model import project_model
# project = Blueprint('project', __name__, url_prefix='/project')
# project = project_model()
# def get_project():
# return project.get_project()
# @project_route('', methods=['POST'])
# def create_project():
# data = request.form
# project.create_project(data)
# return redirect('/view_all_projects')
projects = Blueprint('projects', __name__, url_prefix='/projects')
project = project_model()
def get_project():
return project.get_project()
@projects.route('', methods=['POST'])
def create_project():
data = request.form
project.create_project(data)
return redirect('/view_all_projects')
@tasks.route('', methods=['DELETE'])
def delete_task():
taskid = request.form['taskid']
task.delete_task(taskid)
return 'Task Deleted', 200
@tasks.route('/update', methods=['POST'])
def update_task():
data = request.form
task.update_task(data)
return 'Task updated succesfully!', 200
```
#### File: src/models/project_model.py
```python
import pandas as pd
from src.models.sql_helper import sql_helper
con = sql_helper()
class project_model:
def create_project(self, data):
columns = ''
values = ''
for key, value in data.items():
columns += str(key)+', '
values += "'"+str(value)+"', "
query = "INSERT INTO Project ("+columns[:-2]+" ) VALUES (" + values[:-2]+" );"
print(query)
con.run_query(query)
return
def get_project():
query = "SELECT ProjectID, ProjectName, Description, Technology FROM Project;"
result = con.run_query(query)
result = pd.DataFrame(list(result))
return result.to_dict('records')
# def update_category(self, data):
# values = ''
# for key, value in data.items():
# values += str(key)+"= '"+str(value)+"', "
# query = "UPDATE tasks SET "+values[:-2]+";"
# con.run_query(query)
# return
```
#### File: Simplii/UnitTests/sql_helper.py
```python
import pymysql
import os
class sql_helper:
def __init__(self):
self.connection_obj = None
def connect_database(self):
try:
# self.connection_obj = pymysql.connect(
# host='localhost',
# port = 3306,
# user = 'root',
# password = '',
# db = "simpli",
# autocommit=True
# )
self.connection_obj = pymysql.connect(
host='simpli.cx7psuz6gzvw.us-east-1.rds.amazonaws.com',
port = 3306,
user = 'admin',
password = '<PASSWORD>',
db = "simpli",
autocommit=True
)
except:
pass
#Need to import error handling class
def disconnect_database(self):
try:
self.connection_obj.close()
except:
pass
#Need to import error handling class
def run_query(self, query):
self.connect_database()
tempCursor = self.connection_obj.cursor()
tempCursor.execute(query)
output = tempCursor.fetchall()
self.disconnect_database()
return output
if __name__=='__main__':
sql = sql_helper()
output =sql.run_query("show tables")
print(output)
print("Hello")
``` |
{
"source": "jiangz17THU/ColossalAI",
"score": 2
} |
#### File: amp/torch_amp/torch_amp.py
```python
import torch.nn as nn
import torch.cuda.amp as torch_amp
from torch import Tensor
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from ._grad_scaler import GradScaler
from colossalai.nn.optimizer import ColossalaiOptimizer
from colossalai.utils import clip_grad_norm_fp32
class TorchAMPOptimizer(ColossalaiOptimizer):
"""A wrapper class which integrate Pytorch AMP with an optimizer
Args:
optim (torch.optim.Optimizer): A normal optimizer like Adam or SGD.
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional, default=True): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
"""
def __init__(self, optim: Optimizer, *args, **kwargs):
super().__init__(optim)
self.scaler = GradScaler(*args, **kwargs)
def backward(self, loss: Tensor):
"""Backward with torch amp gradient scaler
Args:
loss (torch.Tensor): Loss computed by a loss function
"""
self.scaler.scale(loss).backward()
def step(self):
"""Update the parameters of the model
"""
self.scaler.step(self.optim)
self.scaler.update()
def clip_grad_norm(self, model: nn.Module, max_norm: float):
"""Apply gradient clipping to the model parameters
Args:
model (torch.nn.Module): Your model object
max_norm (float): Max norm value for gradient clipping
"""
if max_norm > 0.0:
self.scaler.unscale_(self.optim)
clip_grad_norm_fp32(model.parameters(), max_norm)
class TorchAMPModel(nn.Module):
"""A wrapper class for a model object which executes forward with values automatically
cast to fp16
"""
def __init__(self, model: nn.Module) -> None:
super().__init__()
self.model = model
@torch_amp.autocast()
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
class TorchAMPLoss(nn.Module):
"""A wrapper class for a criterion object which computes the loss in mixed-precision context
Args:
loss (torch.nn.modules.loss._Loss): A loss function object
"""
def __init__(self, loss: _Loss):
super().__init__()
self.loss = loss
@torch_amp.autocast()
def forward(self, *args, **kwargs):
return self.loss(*args, **kwargs)
```
#### File: colossalai/communication/ring.py
```python
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device, synchronize
def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode):
"""Sends a tensor to the next member and receives a tensor from the previous member.
This function returns the received tensor from the previous member.
Args:
tensor_send_next: Tensor sent to next member
parallel_mode: Parallel group mode used in this communication
Returns:
:class:`torch.Tensor`: The tensor received from the previous.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
buffer_shape = tensor_send_next.size()
ops = []
current_rank = gpc.get_global_rank()
tensor_recv_prev = torch.empty(buffer_shape,
requires_grad=True,
device=get_current_device(),
dtype=tensor_send_next.dtype)
# send to next rank
send_next_op = torch.distributed.P2POp(
torch.distributed.isend, tensor_send_next,
gpc.get_next_global_rank(parallel_mode))
ops.append(send_next_op)
# receive from prev rank
recv_prev_op = torch.distributed.P2POp(
torch.distributed.irecv, tensor_recv_prev,
gpc.get_prev_global_rank(parallel_mode))
ops.append(recv_prev_op)
if current_rank % 2 == 0:
ops = ops[::-1]
reqs = torch.distributed.batch_isend_irecv(ops)
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
synchronize()
return tensor_recv_prev
```
#### File: engine/gradient_handler/_zero_gradient_handler.py
```python
from colossalai.registry import GRADIENT_HANDLER
from ._base_gradient_handler import BaseGradientHandler
@GRADIENT_HANDLER.register_module
class ZeROGradientHandler(BaseGradientHandler):
"""A helper class to handle all-reduce operations in a data parallel group.
A all-reduce collective communication will be operated in
:func:`handle_gradient` among a data parallel group.
This class is specialized with ZeRO optimization.
"""
def handle_gradient(self):
"""A method running a all-reduce operation in a data parallel group.
"""
self._optimizer.sync_grad()
```
#### File: engine/ophooks/_base_ophook.py
```python
from abc import ABC, abstractmethod
import torch
class BaseOpHook(ABC):
"""This class allows users to add customized operations
before and after the execution of a PyTorch submodule"""
def __init__(self):
pass
@abstractmethod
def pre_fwd_exec(self, module: torch.nn.Module, *args):
pass
@abstractmethod
def post_fwd_exec(self, module: torch.nn.Module, *args):
pass
@abstractmethod
def pre_bwd_exec(self, module: torch.nn.Module, input, output):
pass
@abstractmethod
def post_bwd_exec(self, module: torch.nn.Module, input):
pass
@abstractmethod
def post_iter(self):
pass
```
#### File: engine/paramhooks/_param_hookmgr.py
```python
from typing import Callable, List
import torch
import functools
class BaseParamHookMgr(object):
def __init__(self, param_list: List[torch.nn.Parameter]) -> None:
r"""
register backward hook on every parameters of module
"""
self._param_list = param_list
self._hook_list = []
def register_backward_hooks(self, hook_call: Callable) -> None:
r"""
The hook_call will be called every time a gradient with respect to the a param in self.param_list
is computed.
The hook should have the following signature:
```
hook(param, grad) -> Tensor or None
```
"""
if not torch.is_grad_enabled():
return # don't register grad hooks if grad isn't enabled
for p in self._param_list:
if p.requires_grad and not hasattr(p, '_base_param_hook'):
handle = p.register_hook(functools.partial(hook_call, p))
p._base_param_hook = handle
def remove_hooks(self):
for p in self._param_list:
if p.requires_grad and hasattr(p, '_base_param_hook'):
p._base_param_hook.remove()
```
#### File: layer/colossalai_layer/_utils.py
```python
import torch.nn as nn
from torch import Tensor
from ..parallel_2d._operation import split_tensor_2d
from ..parallel_2p5d._operation import split_tensor_2p5d
from ..parallel_3d._operation import split_batch_3d
from ..utils import get_tensor_parallel_mode
_parallel_split_batch = {'2d': split_tensor_2d, '2.5d': split_tensor_2p5d, '3d': split_batch_3d}
def partition_batch(input_) -> Tensor:
tensor_parallel_mode = get_tensor_parallel_mode()
if tensor_parallel_mode in _parallel_split_batch:
if isinstance(input_, dict):
return {k: _parallel_split_batch[tensor_parallel_mode](v) for k, v in input_.items()}
else:
return _parallel_split_batch[tensor_parallel_mode](input_)
else:
return input_
class ColossalaiModule(nn.Module):
def __init__(self, module: nn.Module, **kwargs):
super().__init__()
# copy values
self.__dict__ = module.__dict__.copy()
# copy methods
for name, attr in module.__class__.__dict__.items():
if name not in ['__init__', 'forward'] and callable(attr):
setattr(self, name, getattr(module, name))
self._forward_func = module.forward
for k, v in kwargs.items():
setattr(self, k, v)
def forward(self, *args):
return self._forward_func(*args)
```
#### File: layer/parallel_1d/layers.py
```python
import math
from collections import OrderedDict
from typing import Callable, Tuple
import torch
import torch.nn.functional as F
from colossalai.communication import broadcast
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.kernel import LayerNorm
from colossalai.nn import init as init
from colossalai.registry import LAYERS
from colossalai.utils.checkpointing import (broadcast_state_dict, gather_tensor_parallel_state_dict,
partition_tensor_parallel_state_dict)
from colossalai.utils.cuda import get_current_device
from torch import Tensor
from torch.nn.parameter import Parameter
from ..vanilla import VanillaPatchEmbedding
from ..base_layer import ParallelLayer
from ..colossalai_layer._utils import ColossalaiModule
from ..utils import divide, set_tensor_parallel_attribute_by_partition
from ._utils import (gather_forward_split_backward, get_parallel_input, reduce_grad, reduce_input, set_parallel_input,
split_forward_gather_backward)
@LAYERS.register_module
class Linear1D(ColossalaiModule):
r"""Linear layer for 1D parallelism.
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
gather_output (bool, optional): Whether to call all-gather on output, defaults to False.
skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to False
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
parallel_input = get_parallel_input()
if not parallel_input:
layer = Linear1D_Col(in_features,
out_features,
bias=bias,
dtype=dtype,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
else:
layer = Linear1D_Row(in_features,
out_features,
bias=bias,
dtype=dtype,
parallel_input=parallel_input,
skip_bias_add=skip_bias_add,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
super().__init__(layer)
@LAYERS.register_module
class LayerNorm1D(ColossalaiModule):
r"""
Layer Normalization for colossalai
:param normalized_shape: input shape from an expected input
of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1]
\times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
:type normalized_shape: int
:param eps: a value added to the denominator for numerical stability, defaults to 1e-05
:type eps: float, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self, normalized_shape: int, eps=1e-05, dtype=None):
norm = LayerNorm(normalized_shape, eps=eps, device=get_current_device(), dtype=dtype)
super().__init__(norm)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = broadcast_state_dict(local_state, ParallelMode.PARALLEL_1D)
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
super()._save_to_state_dict(destination, prefix, keep_vars)
@LAYERS.register_module
class Classifier1D(ParallelLayer):
r"""RowLinear with given weight. Classifier of 1D parallelism.
Args:
in_features (int): size of each input sample.
num_classes (int): number of classes.
weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.parallel_input = get_parallel_input()
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(torch.empty(self.num_classes, self.input_size_per_partition, **factory_kwargs))
self.has_weight = True
if bias:
self.bias = Parameter(torch.empty(self.num_classes, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = False
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D)
def _set_tensor_parallel_attributes(self):
if self.has_weight:
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
if self.has_weight:
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict()
if self.has_weight:
local_state[weight_key] = self.weight
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in Classifier1D forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
input_ = input_
else:
assert divide(input_.shape[-1], gpc.tensor_parallel_size) == self.weight.shape[-1], \
'Invalid shapes in Classifier1D forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1] * gpc.tensor_parallel_size)
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if self.bias is not None:
output = output + self.bias
return output
@LAYERS.register_module
class VocabParallelClassifier1D(ParallelLayer):
r"""ColLinear with given weight. Classifier of 1D parallelism.
Args:
in_features (int): size of each input sample.
num_classes (int): number of classes.
weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.parallel_input = get_parallel_input()
# Divide the weight matrix along the last dimension.
self.num_classes_per_partition = divide(num_classes, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(torch.empty(self.num_classes_per_partition, self.in_features, **factory_kwargs))
self.has_weight = True
if bias:
self.bias = Parameter(torch.empty(self.num_classes_per_partition, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = True
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
if self.has_weight:
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict()
if self.has_weight:
local_state[weight_key] = self.weight
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in VocabParallelClassifier1D forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
output = F.linear(input_parallel, self.weight, self.bias)
return output
@LAYERS.register_module
class Linear1D_Col(ParallelLayer):
r"""Linear layer with column parallelism.
The linear layer is defined as :math:`Y = XA + b`. A is parallelized along
its second dimension as :math:`A = [A_1, ..., A_p]`.
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
gather_output (bool, optional): If true, call all-gather on output and make Y available
to all GPUs, otherwise, every GPU will have its output
which is :math:`Y_i = XA_i`, defaults to False
skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to Fals
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
self.out_features_per_partition = divide(out_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(self.out_features_per_partition, self.in_features, **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(self.out_features_per_partition, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
is_parallel_output = not self.gather_output
set_parallel_input(is_parallel_output)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict({weight_key: self.weight})
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: 0,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: True
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
else:
output = output_parallel
if self.skip_bias_add:
return output, self.bias
else:
return output
@LAYERS.register_module
class Linear1D_Row(ParallelLayer):
r""" Linear layer with row parallelism
Args:
in_features (int): size of each input sample.
out_features (int): size of each output sample.
bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
parallel_input (bool, optional): If set to ``True``, it's assumed that the input is split, defaults to False.
skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to Fals
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
parallel_input: bool = True,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.parallel_input = parallel_input
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(self.out_features, self.input_size_per_partition, **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
# bias
if self.bias is not None:
bias = state_dict.pop(bias_key, None)
if bias is not None:
local_state[bias_key] = bias
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
bias_key = prefix + 'bias'
local_state = OrderedDict({weight_key: self.weight})
if self.bias is not None:
local_state[bias_key] = self.bias
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={
weight_key: -1,
bias_key: 0
},
partition_states={
weight_key: True,
bias_key: False
},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
assert input_.shape[-1] == self.weight.shape[-1], \
'Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1])
input_ = input_
else:
assert divide(input_.shape[-1], gpc.tensor_parallel_size) == self.weight.shape[-1], \
'Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_.shape, self.weight.shape, self.weight.shape[-1] * gpc.tensor_parallel_size)
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if not self.skip_bias_add:
if self.bias is not None:
output = output + self.bias
return output
else:
return output, self.bias
@LAYERS.register_module
class Embedding1D(ParallelLayer):
r"""Embedding for 1D parallelism.
Args:
num_embeddings (int): number of embeddings.
embedding_dim (int): dimension of embedding.
padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient;
therefore, the embedding vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”, defaults to None.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
he initializer of weight, defaults to normal initializer.
The ``args`` and ``kwargs`` used in :class:`torch.nn.functional.embedding` should contain:
::
max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is
renormalized to have norm max_norm. Note: this will modify weight in-place.
norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse
of frequency of the words in the mini-batch. Default False.
sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False.
More details about ``args`` and ``kwargs`` could be found in
`Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
embed_dim_per_partition = divide(embedding_dim, gpc.tensor_parallel_size)
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.weight = Parameter(
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, gpc.tensor_parallel_size)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: -1},
partition_states={weight_key: True})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
local_state = OrderedDict({weight_key: self.weight})
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: -1},
partition_states={weight_key: True},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
output_parallel = F.embedding(input_, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
return output
@LAYERS.register_module
class VocabParallelEmbedding1D(torch.nn.Module):
r"""Embedding parallelized in the vocabulary dimension.
Args:
num_embeddings (int): number of embeddings.
embedding_dim (int): dimension of embedding.
padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient;
therefore, the embedding vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”, defaults to None.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
he initializer of weight, defaults to normal initializer.
The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain:
::
max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is
renormalized to have norm max_norm. Note: this will modify weight in-place.
norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse
of frequency of the words in the mini-batch. Default False.
sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False.
More details about ``args`` and ``kwargs`` could be found in
`Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_.
More details about initializer please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
tensor_parallel_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
tensor_parallel_rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
self.num_embeddings_per_partition = divide(num_embeddings, tensor_parallel_size)
self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition
self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition
self.weight = Parameter(
torch.empty((self.num_embeddings_per_partition, self.embed_dim), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, gpc.tensor_parallel_size)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None and \
self.padding_idx >= self.vocab_start_index and self.padding_idx < self.vocab_end_index:
with torch.no_grad():
self.weight[self.padding_idx - self.vocab_start_index].fill_(0)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
weight_key = prefix + 'weight'
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# weight
weight = state_dict.pop(weight_key, None)
if weight is not None:
local_state[weight_key] = weight
local_state = partition_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: 0},
partition_states={weight_key: True})
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
weight_key = prefix + 'weight'
local_state = OrderedDict({weight_key: self.weight})
local_state = gather_tensor_parallel_state_dict(local_state,
ParallelMode.PARALLEL_1D,
dims={weight_key: 0},
partition_states={weight_key: True},
keep_vars=keep_vars)
destination.update(local_state)
def forward(self, input_: Tensor) -> Tensor:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
output_parallel = F.embedding(masked_input, self.weight, self.padding_idx, *self.embed_args,
**self.embed_kwargs)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.
# Reduce across all the model parallel GPUs.
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
return output
@LAYERS.register_module
class Dropout1D(ParallelLayer):
"""Dropout layer of 1D parallelism.
Args:
p (float, optional): probability of an element to be zeroed, defaults 0.5.
inplace (bool, optional): whether to do dropout in-place, default to be False.
"""
def __init__(self, p: float = 0.5, inplace: bool = False):
super().__init__()
self.parallel_input = get_parallel_input()
self.p = p
self.inplace = inplace
def forward(self, input_: Tensor) -> Tensor:
if self.parallel_input:
with seed(ParallelMode.TENSOR):
output = F.dropout(input_, self.p, self.training, self.inplace)
else:
output = F.dropout(input_, self.p, self.training, self.inplace)
return output
@LAYERS.register_module
class PatchEmbedding1D(ColossalaiModule):
"""
2D Image to Patch Embedding
:param img_size: image size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param in_chans: number of channels of input image
:type in_chans: int
:param embed_size: size of embedding
:type embed_size: int
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param flatten: whether to flatten output tensor, defaults to True
:type flatten: bool, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
:param position_embed_initializer: The intializer of position embedding, defaults to zero
:type position_embed_initializer: typing.Callable, optional
"""
def __init__(self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
dtype: torch.dtype = None,
flatten: bool = True,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
position_embed_initializer: Callable = init.zeros_()):
embed = VanillaPatchEmbedding(img_size,
patch_size,
in_chans,
embed_size,
dtype=dtype,
flatten=flatten,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
position_embed_initializer=position_embed_initializer)
super().__init__(embed)
def _load_from_state_dict(self, state_dict, prefix, *args):
local_state = OrderedDict()
param_keys = [prefix + 'weight', prefix + 'bias', prefix + 'cls_token', prefix + 'pos_embed']
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
for key in param_keys:
param = state_dict.pop(key, None)
if param is not None:
local_state[key] = param
local_state = broadcast_state_dict(local_state, ParallelMode.PARALLEL_1D)
super()._load_from_state_dict(local_state, prefix, *args)
def _save_to_state_dict(self, destination, prefix, keep_vars):
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
super()._save_to_state_dict(destination, prefix, keep_vars)
```
#### File: layer/parallel_2p5d/_operation.py
```python
from typing import Any, Tuple
import torch
import torch.distributed as dist
from colossalai.communication.collective import (all_gather, all_reduce, reduce_scatter)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
def get_parallel_group(parallel_mode: ParallelMode):
return gpc.get_group(parallel_mode)
def get_global_rank():
return gpc.get_global_rank()
def get_parallel_rank(parallel_mode: ParallelMode):
return gpc.get_local_rank(parallel_mode)
class _Classifier2p5D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
A: Tensor,
B: Tensor,
bias,
tesseract_dim: int,
out_shape: Tuple[int, ...],
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
B_temp = all_gather(B, -1, col_parallel_mode)
if ctx:
ctx.save_for_backward(A, B_temp)
C = torch.matmul(A, B_temp.transpose(0, 1))
C = all_reduce(C, row_parallel_mode)
ctx.use_bias = bias is not None
if bias is not None:
C = C + bias
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = torch.matmul(output_grad, B)
A_grad = A_grad.reshape(ctx.A_shape)
B_grad = torch.matmul(output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), A)
B_grad = reduce_scatter(B_grad, -1, ctx.col_parallel_mode)
B_grad = B_grad.reshape(ctx.B_shape)
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(output_grad.ndim - 1)))
bias_grad = all_reduce(bias_grad, ctx.col_parallel_mode)
else:
bias_grad = None
return A_grad, B_grad, bias_grad, None, None, None, None, None, None, None, None, None, None
def classifier_2p5d(A: Tensor, B: Tensor, bias, tesseract_dim: int, out_shape: Tuple[int,
...], row_rank: int, col_rank: int,
row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int,
pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int) -> Tensor:
r"""Classifier.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
bias (:class:`torch.tensor`): matrix of bias.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Classifier2p5D.apply(A, B, bias, tesseract_dim, out_shape, row_rank, col_rank, row_parallel_mode,
col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size,
tensor_parallel_size)
class Matmul_AB_2p5D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = AB`.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
# A: [b / dq, s, h / q] -> [(b * s) / dq, h / q]
# B: [h / dq, s / q]
# C: [b / dq, s, s / q] -> [(b * s) / dq, s / q]
assert A.shape[-1] == B.shape[-2], \
'Invalid shapes: A={}, B={} for AB.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[0], B.shape[-1])
C = torch.zeros(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
A_list = [torch.empty_like(A) for _ in range(2)]
B_list = [torch.empty_like(B) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_a = \
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_b = \
col_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opa = [None] * 2
opb = [None] * 2
A_list[0].copy_(A)
B_list[0].copy_(B)
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
cur = 0
for i in range(tesseract_dim):
if i != tesseract_dim - 1:
A_list[1 - cur].copy_(A)
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
B_list[1 - cur].copy_(B)
opb[1 - cur] = dist.broadcast(B_list[1 - cur],
src=src_b + tesseract_dim,
group=col_group,
async_op=True)
if opa[cur] is not None:
opa[cur].wait()
if opb[cur] is not None:
opb[cur].wait()
torch.addmm(C, A_list[cur], B_list[cur], out=C)
cur = 1 - cur
src_a += 1
src_b += tesseract_dim
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_2p5D.apply(output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
B_grad = Matmul_ATB_2p5D.apply(A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
class Matmul_ABT_2p5D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = AB^T`.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
assert A.shape[-1] == B.shape[-1], \
'Invalid shapes: A={}, B={} for ABT.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[0], B.shape[0])
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
B_list = [torch.empty_like(B) for _ in range(2)]
C_list = [torch.empty_like(C) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_b = \
col_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_c = \
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opb = [None] * 2
opr = [None] * 2
B_list[0].copy_(B)
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
cur = 0
for i in range(tesseract_dim):
if i != tesseract_dim - 1:
B_list[1 - cur].copy_(B)
opb[1 - cur] = dist.broadcast(B_list[1 - cur],
src=src_b + tesseract_dim,
group=col_group,
async_op=True)
if opr[cur] is not None:
opr[cur].wait()
if i - 2 == col_rank:
C.copy_(C_list[cur])
if opb[cur] is not None:
opb[cur].wait()
torch.matmul(A, B_list[cur].transpose(0, 1), out=C_list[cur])
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=row_group, async_op=True)
cur = 1 - cur
src_b += tesseract_dim
src_c += 1
for op in opr:
op.wait()
if tesseract_dim - 2 == col_rank:
C.copy_(C_list[cur])
if tesseract_dim - 1 == col_rank:
C.copy_(C_list[1 - cur])
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_2p5D.apply(output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
B_grad = Matmul_ATB_2p5D.apply(output_grad, A, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
class Matmul_ATB_2p5D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = A^TB`
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int):
assert A.shape[-2] == B.shape[-2], \
'Invalid shapes: A={}, B={} for ATB.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[-1], B.shape[-1])
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
A_list = [torch.empty_like(A) for _ in range(2)]
C_list = [torch.empty_like(C) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_a = \
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_c = \
col_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opa = [None] * 2
opr = [None] * 2
A_list[0].copy_(A)
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
cur = 0
for i in range(tesseract_dim):
if i != tesseract_dim - 1:
A_list[1 - cur].copy_(A)
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
if opr[cur] is not None:
opr[cur].wait()
if i - 2 == row_rank:
C.copy_(C_list[cur])
if opa[cur] is not None:
opa[cur].wait()
torch.matmul(A_list[cur].transpose(0, 1), B, out=C_list[cur])
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=col_group, async_op=True)
cur = 1 - cur
src_a += 1
src_c += tesseract_dim
for op in opr:
op.wait()
if tesseract_dim - 2 == row_rank:
C.copy_(C_list[cur])
if tesseract_dim - 1 == row_rank:
C.copy_(C_list[1 - cur])
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_2p5D.apply(B, output_grad, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
B_grad = Matmul_AB_2p5D.apply(A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
class _Add_Bias_2p5D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int,
row_rank: int, col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
if row_rank == 0:
bias_temp = bias.clone()
else:
bias_temp = torch.zeros(output_size_per_partition, dtype=bias.dtype, device=get_current_device())
src_rank = \
col_rank + dep_rank * tesseract_dim ** 2 + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
dist.broadcast(bias_temp, src=src_rank, group=get_parallel_group(col_parallel_mode))
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.tesseract_dim = tesseract_dim
ctx.col_parallel_mode = col_parallel_mode
ctx.bias = skip_bias_add
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
if skip_bias_add:
return bias_temp
else:
output = input + bias_temp
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
row_rank = ctx.row_rank
col_rank = ctx.col_rank
dep_rank = ctx.dep_rank
tesseract_dim = ctx.tesseract_dim
col_parallel_mode = ctx.col_parallel_mode
data_parallel_rank = ctx.data_parallel_rank
pipeline_parallel_rank = ctx.pipeline_parallel_rank
pipeline_parallel_size = ctx.pipeline_parallel_size
tensor_parallel_size = ctx.tensor_parallel_size
if ctx.bias:
dst_rank = \
col_rank + dep_rank * (tesseract_dim ** 2) + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
dist.reduce(output_grad, dst=dst_rank, group=get_parallel_group(col_parallel_mode))
if row_rank == 0:
return \
None, output_grad, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None
else:
grad_tmp = torch.zeros_like(output_grad)
return \
None, grad_tmp, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None
else:
reduce_dim = tuple(range(output_grad.ndim - 1))
reduce = torch.sum(output_grad, dim=reduce_dim)
dst_rank = \
col_rank + dep_rank * (tesseract_dim ** 2) + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
dist.reduce(reduce, dst=dst_rank, group=get_parallel_group(col_parallel_mode))
if row_rank == 0:
return \
output_grad, reduce, None, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None
else:
reduce_tmp = torch.zeros_like(reduce)
return \
output_grad, reduce_tmp, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None, None
def add_bias_2p5d(input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int, row_rank: int,
col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
r"""Matrix add bias: :math:`C = A + b`.
Args:
input (:class:`torch.tensor`): matrix :math:`A`.
bias (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
output_size_per_partition (int): output size in each partition.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Add_Bias_2p5D.apply(input, bias, output_size_per_partition, tesseract_dim, row_rank, col_rank, dep_rank,
col_parallel_mode, skip_bias_add, data_parallel_rank, pipeline_parallel_rank,
pipeline_parallel_size, tensor_parallel_size)
class _Layernorm2p5D(torch.autograd.Function):
r"""Layernorm.
Args:
input (:class:`torch.tensor`): input matrix.
E_x (:class:`torch.tensor`): mean.
Var_x (:class:`torch.tensor`): variance.
hidden_size (int): hidden size.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx: Any, input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int,
row_parallel_mode: ParallelMode) -> Tensor:
input = input - E_x
# in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps)
ctx.hidden_size = hidden_size
output = input * Var_x
ctx.save_for_backward(output, Var_x)
ctx.row_parallel_mode = row_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad):
row_parallel_mode = ctx.row_parallel_mode
x, Var_x = ctx.saved_tensors
# in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x
with torch.no_grad():
output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True)
torch.distributed.all_reduce(output_grad_sum, group=get_parallel_group(row_parallel_mode))
output_grad_sum /= ctx.hidden_size
output_grad_mul_x_sum = torch.sum(output_grad * x, dim=-1, keepdim=True)
torch.distributed.all_reduce(output_grad_mul_x_sum, group=get_parallel_group(row_parallel_mode))
output_grad_mul_x_sum /= ctx.hidden_size
input_grad = output_grad.clone()
input_grad -= x * output_grad_mul_x_sum
input_grad -= output_grad_sum
input_grad *= Var_x
return input_grad, None, None, None, None, None, None
def layernorm_2p5d(input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int,
row_parallel_mode: ParallelMode) -> Tensor:
r"""Layernorm.
Args:
input (:class:`torch.tensor`): input matrix.
E_x (:class:`torch.tensor`): mean.
Var_x (:class:`torch.tensor`): variance.
hidden_size (int): hidden size.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _Layernorm2p5D.apply(input, E_x, Var_x, hidden_size, row_parallel_mode)
class _AllGatherTensor2p5D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, inputs: Tensor, dim: int, col_parallel_mode: ParallelMode) -> Tensor:
ctx.dim = dim
ctx.col_parallel_mode = col_parallel_mode
outputs = all_gather(inputs, dim, col_parallel_mode)
return outputs
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
grad = reduce_scatter(output_grad, ctx.dim, ctx.col_parallel_mode)
return grad.contiguous(), None, None
def all_gather_tensor_2p5d(inputs: Tensor, dim: int, col_parallel_mode: ParallelMode) -> Tensor:
r"""all gather the weight of 2.5D parallelism.
Args:
inputs (:class:`torch.tensor`): input tensor.
dim (int): dimension of all-gather.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _AllGatherTensor2p5D.apply(inputs, dim, col_parallel_mode)
class SplitFirst(torch.autograd.Function):
r"""
Args:
inputs (:class:`torch.tensor`): input tensor.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, inputs: Tensor, tesseract_dim: int, col_parallel_mode: ParallelMode) -> Tensor:
ctx.tesseract_dim = tesseract_dim
ctx.batch_size = inputs.size(0)
ctx.para_mode = col_parallel_mode
row_rank = gpc.get_local_rank(col_parallel_mode)
outputs = inputs.chunk(tesseract_dim, dim=0)[row_rank]
return outputs
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
grad_shape = (ctx.batch_size,) + output_grad.shape[1:]
grad = torch.empty(grad_shape, dtype=output_grad.dtype, device=get_current_device())
dist.all_gather(list(grad.chunk(ctx.tesseract_dim, dim=0)),
output_grad.contiguous(),
group=gpc.get_group(ctx.para_mode))
return grad, None, None
def split_tensor_2p5d(input_: Tensor, dim: int = 0) -> Tensor:
"""Splits 2P5D tensor in specified dimension across cols.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
Returns:
:class:`torch.tensor`: The tensor has been split.
"""
if input_.size(dim) <= 1:
return input_
return torch.chunk(input_, gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL),
dim=dim)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)].contiguous()
class _ReduceTensor2p5D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_2p5d(input_: Tensor, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the input.
Args:
input_ (:class:`torch.tensor`): Input tensor.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceTensor2p5D.apply(input_, parallel_mode)
class _ReduceScatterTensor2p5D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return all_gather(output_grad, ctx.dim, ctx.parallel_mode), None, None
def reduce_scatter_tensor_2p5d(input_: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Reduce-scatter the input.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to reduce.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceScatterTensor2p5D.apply(input_, dim, parallel_mode)
class _RreduceByBatch2p5D(torch.autograd.Function):
@staticmethod
def symbolic(graph, input_, reduce_mean: bool = False):
output = all_reduce(input_, ParallelMode.PARALLEL_2P5D_COL)
if reduce_mean:
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
return output / reduce_size
return output
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input_, reduce_mean: bool = False):
output = all_reduce(input_, ParallelMode.PARALLEL_2P5D_COL)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad):
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None
else:
return output_grad, None
def reduce_by_batch_2p5d(input_, reduce_mean: bool = False) -> Tensor:
r"""All-reduce the input from the model parallel region.
Args:
input_ (:class:`torch.tensor`): input matrix.
reduce_mean (bool, optional):
If set to ``True``, it will divide the output by column parallel size, default to False.
"""
return _RreduceByBatch2p5D.apply(input_, reduce_mean)
```
#### File: layer/parallel_3d/_operation.py
```python
from typing import Optional, Tuple
import torch
from colossalai.communication import (all_gather, all_reduce, broadcast, reduce, reduce_scatter)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
from ._utils import get_parallel_mode_from_env
from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D
class _Linear3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx,
input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
ctx.use_bias = bias is not None
input_ = all_gather(input_, input_dim, input_parallel_mode)
weight = all_gather(weight, weight_dim, weight_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight)
output = reduce_scatter(output, output_dim, output_parallel_mode)
if bias is not None:
output += bias
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
ctx.input_dim = input_dim
ctx.weight_dim = weight_dim
ctx.output_dim = output_dim
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
output_grad = all_gather(output_grad, ctx.output_dim, ctx.output_parallel_mode)
async_ops = list()
input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
input_grad, op = reduce_scatter(input_grad, ctx.input_dim, ctx.input_parallel_mode, async_op=True)
async_ops.append(op)
weight_grad = torch.matmul(
input_.reshape(-1, input_.shape[-1]).transpose(0, 1), output_grad.reshape(-1, output_grad.shape[-1]))
weight_grad, op = reduce_scatter(weight_grad, ctx.weight_dim, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad, op = all_reduce(bias_grad, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
else:
bias_grad = None
for op in async_ops:
if op is not None:
op.wait()
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
def linear_3d(input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
r"""Linear layer for 3D parallelism.
Args:
input_ (:class:`torch.tensor`): input matrix.
weight (:class:`torch.tensor`): matrix of weight.
bias (:class:`torch.tensor`): matrix of bias.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
input_dim (int, optional): dimension of input, defaults to 0.
weight_dim (int, optional): dimension of weight, defaults to -1.
output_dim (int, optional): dimension of output, defaults to 0.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Linear3D.apply(input_, weight, bias, input_parallel_mode, weight_parallel_mode, output_parallel_mode,
input_dim, weight_dim, output_dim)
class _Classifier3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input_: Tensor, weight: Tensor, bias: Optional[Tensor], input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode) -> Tensor:
ctx.use_bias = bias is not None
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
weight = broadcast(weight, src_rank, input_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight.transpose(0, 1))
output = all_reduce(output, output_parallel_mode)
if bias is not None:
output += bias
ctx.src_rank = src_rank
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
async_ops = list()
weight_grad = torch.matmul(
output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), input_.reshape(-1, input_.shape[-1]))
weight_grad = reduce(weight_grad, ctx.src_rank, ctx.input_parallel_mode)
if gpc.get_local_rank(ctx.input_parallel_mode) == gpc.get_local_rank(ctx.output_parallel_mode):
weight_grad, op = all_reduce(weight_grad, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
else:
weight_grad = None
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = all_reduce(bias_grad, ctx.input_parallel_mode)
bias_grad, op = all_reduce(bias_grad, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
else:
bias_grad = None
input_grad = torch.matmul(output_grad, weight)
for op in async_ops:
if op is not None:
op.wait()
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
def classifier_3d(input_: Tensor, weight: Tensor, bias: Optional[Tensor], input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode) -> Tensor:
r"""3D parallel classifier.
Args:
input_ (:class:`torch.tensor`): input matrix.
weight (:class:`torch.tensor`): matrix of weight.
bias (:class:`torch.tensor`): matrix of bias.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Classifier3D.apply(input_, weight, bias, input_parallel_mode, weight_parallel_mode, output_parallel_mode)
class _Layernorm3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input_: Tensor, weight: Tensor, bias: Tensor, normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
mean = all_reduce(torch.sum(input_, dim=-1, keepdim=True), output_parallel_mode) / normalized_shape
mu = input_ - mean
var = all_reduce(torch.sum(mu**2, dim=-1, keepdim=True), output_parallel_mode) / normalized_shape
sigma = torch.sqrt(var + eps)
ctx.save_for_backward(mu, sigma, weight)
z = mu / sigma
output = weight * z + bias
ctx.normalized_shape = normalized_shape
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
mu, sigma, weight = ctx.saved_tensors
with torch.no_grad():
bias_grad, weight_grad = output_grad, output_grad * mu / sigma
grads = torch.stack([bias_grad, weight_grad]).contiguous()
grads = torch.sum(grads, dim=tuple(range(len(grads.shape))[1:-1]))
grads = all_reduce(grads, ctx.weight_parallel_mode)
grads = all_reduce(grads, ctx.input_parallel_mode)
bias_grad, weight_grad = grads[0], grads[1]
dz = output_grad * weight
dvar = dz * mu * (-0.5) * sigma**(-3)
dvar = all_reduce(torch.sum(dvar, dim=-1, keepdim=True), ctx.output_parallel_mode)
dmean = dz * (-1 / sigma) + dvar * -2 * mu / ctx.normalized_shape
dmean = all_reduce(torch.sum(dmean, dim=-1, keepdim=True), ctx.output_parallel_mode)
input_grad = dz / sigma + dvar * 2 * mu / \
ctx.normalized_shape + dmean / ctx.normalized_shape
return input_grad, weight_grad, bias_grad, None, None, None, None, None
def layernorm_3d(input_: Tensor, weight: Tensor, bias: Tensor, normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
r"""3D parallel Layernorm.
Args:
input_ (:class:`torch.tensor`): input matrix.
weight (:class:`torch.tensor`): matrix of weight.
bias (:class:`torch.tensor`): matrix of bias.
normalized_shape (int): input shape from an expected input of size.
:math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1]
\times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps (float): a value added to the denominator for numerical stability
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Layernorm3D.apply(input_, weight, bias, normalized_shape, eps, input_parallel_mode, weight_parallel_mode,
output_parallel_mode)
def split_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Splits 3D parallel tensor in specified dimension.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): Parallel mode.
Returns:
:class:`torch.tensor`: The tensor has been split.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
if tensor.size(dim) <= 1:
return tensor
output = torch.chunk(tensor, gpc.get_world_size(parallel_mode),
dim=dim)[gpc.get_local_rank(parallel_mode)].contiguous()
return output
def split_batch_3d(input_: Tensor,
dim: int = 0,
input_parallel_mode: ParallelMode = ParallelMode.PARALLEL_3D_INPUT,
weight_parallel_mode: ParallelMode = ParallelMode.PARALLEL_3D_WEIGHT) -> Tensor:
r"""Splits 3D tensor in batch.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): weight parallel mode.
Returns:
:class:`torch.tensor`: The tensor has been split.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
if input_.size(dim) <= 1:
return input_
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
output = torch.chunk(input_, gpc.get_world_size(weight_parallel_mode),
dim=dim)[gpc.get_local_rank(weight_parallel_mode)].contiguous()
output = torch.chunk(output, gpc.get_world_size(input_parallel_mode),
dim=dim)[gpc.get_local_rank(input_parallel_mode)].contiguous()
return output
class _ReduceTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_3d(tensor: Tensor, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the input
Args:
tensor (:class:`torch.tensor`): Input tensor.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): Parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _ReduceTensor3D.apply(tensor, parallel_mode)
class _AllGatherTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
output = all_gather(input_, dim, parallel_mode)
return output
@staticmethod
def backward(ctx, output_grad):
input_grad = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode)
return input_grad, None, None
def all_gather_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the gradient in backward pass.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to gather.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): Parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _AllGatherTensor3D.apply(tensor, dim, parallel_mode)
class _ReduceScatterTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
return input_grad, None, None
def reduce_scatter_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Reduce-scatter the input.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to scatter.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): Parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceScatterTensor3D.apply(tensor, dim, parallel_mode)
class _ReduceByBatch3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx,
input_: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
reduce_mean: bool = False) -> Tensor:
output = all_reduce(input_, input_parallel_mode)
output = all_reduce(output, weight_parallel_mode)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = gpc.get_world_size(input_parallel_mode) * gpc.get_world_size(weight_parallel_mode)
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None, None, None
else:
return output_grad, None, None, None
def reduce_by_batch_3d(tensor: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
reduce_mean: bool = False) -> Tensor:
r"""All-reduce the input from the model parallel region.
Args:
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
reduce_mean (bool, optional): If set to ``True``, it will divide the output by
(input parallel size * weight parallel size), default to False.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceByBatch3D.apply(tensor, input_parallel_mode, weight_parallel_mode, reduce_mean)
class _BroadcastWeight3D_FromDiagonal(torch.autograd.Function):
r"""broadcast weight from diagonal.
Args:
input_ (:class:`torch.tensor`): input matrix.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input_: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
output = broadcast(input_, src_rank, input_parallel_mode)
ctx.src_rank = src_rank
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_grad = reduce(output_grad, ctx.src_rank, ctx.input_parallel_mode)
if gpc.get_local_rank(ctx.input_parallel_mode) == gpc.get_local_rank(ctx.output_parallel_mode):
input_grad = all_reduce(input_grad, ctx.weight_parallel_mode)
else:
input_grad = None
return input_grad, None, None, None
def broadcast_weight_3d_from_diagonal(tensor: Tensor, input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode) -> Tensor:
return _BroadcastWeight3D_FromDiagonal.apply(tensor, input_parallel_mode, weight_parallel_mode,
output_parallel_mode)
```
#### File: colossalai/utils/checkpointing.py
```python
from collections import OrderedDict
from itertools import chain
import torch
import torch.distributed as dist
from colossalai.communication.collective import scatter_object_list
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX
from .common import is_using_pp
__all__ = ["save_checkpoint", "load_checkpoint"]
def broadcast_state_dict(state_dict, parallel_mode):
state_dict = [state_dict.copy() if isinstance(state_dict, dict) else state_dict]
src_rank = gpc.get_ranks_in_group(parallel_mode)[0]
dist.broadcast_object_list(state_dict, src=src_rank, group=gpc.get_cpu_group(parallel_mode))
return state_dict[0]
def partition_tensor_parallel_state_dict(
state_dict: OrderedDict, parallel_mode: ParallelMode, dims: dict = dict(), partition_states: dict = dict()
):
src_rank = gpc.get_ranks_in_group(parallel_mode)[0]
depth = gpc.get_world_size(parallel_mode)
if gpc.get_local_rank(parallel_mode) == 0:
partitioned_state_list = [dict() for _ in range(depth)]
for key in list(state_dict.keys()):
param = state_dict.pop(key)
dim = dims.get(key, 0)
do_partition = partition_states.get(key, True)
if do_partition:
param = torch.chunk(param, depth, dim=dim)
for i, p in enumerate(partitioned_state_list):
p[key] = param[i] if do_partition else param
else:
partitioned_state_list = [None for _ in range(depth)]
partitioned_state = [None]
scatter_object_list(partitioned_state, partitioned_state_list, src=src_rank, group=gpc.get_cpu_group(parallel_mode))
return partitioned_state[0]
def gather_tensor_parallel_state_dict(
state_dict: OrderedDict,
parallel_mode: ParallelMode,
dims: dict = dict(),
partition_states: dict = dict(),
keep_vars: bool = False,
):
dst_rank = gpc.get_ranks_in_group(parallel_mode)[0]
depth = gpc.get_world_size(parallel_mode)
for key in list(state_dict.keys()):
param = state_dict.pop(key)
param = param if keep_vars else param.detach()
dim = dims.get(key, 0)
do_partition = partition_states.get(key, True)
if do_partition:
temp = param.transpose(0, dim).contiguous()
gather_list = None
if gpc.get_local_rank(parallel_mode) == 0:
shape = list(param.shape)
shape[0], shape[dim] = shape[dim], shape[0]
shape[0] *= depth
param = torch.empty(shape, dtype=param.dtype, device=param.device)
gather_list = list(torch.chunk(param, depth, dim=0))
dist.gather(temp, gather_list, dst=dst_rank, group=gpc.get_cpu_group(parallel_mode))
param = torch.transpose(param, 0, dim)
# update params in state_dict only on local rank 0
if gpc.get_local_rank(parallel_mode) == 0:
state_dict[key] = param
return state_dict
def _send_state_dict(state_dict, dst, parallel_mode):
state_tensor, state_size = dist.distributed_c10d._object_to_tensor(state_dict)
dist.send(state_size, dst, group=gpc.get_cpu_group(parallel_mode))
dist.send(state_tensor, dst, group=gpc.get_cpu_group(parallel_mode))
def _recv_state_dict(src, parallel_mode):
state_size = torch.tensor([0], dtype=torch.long)
dist.recv(state_size, src, group=gpc.get_cpu_group(parallel_mode))
state_tensor = torch.empty(state_size.item(), dtype=torch.uint8)
dist.recv(state_tensor, src, group=gpc.get_cpu_group(parallel_mode))
state_dict = dist.distributed_c10d._tensor_to_object(state_tensor, state_size)
return state_dict
def partition_pipeline_parallel_state_dict(model, state_dict):
pipeline_state = OrderedDict()
if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# receive all states from prev stage
if not gpc.is_first_rank(ParallelMode.PIPELINE):
state_dict = _recv_state_dict(gpc.get_prev_global_rank(ParallelMode.PIPELINE), ParallelMode.PIPELINE)
# move states to output
for name, _ in model.named_parameters(recurse=True):
if name in state_dict:
pipeline_state[name] = state_dict.pop(name)
for name, _ in model.named_buffers(recurse=True):
if name in state_dict:
pipeline_state[name] = state_dict.pop(name)
for name, _ in model.named_modules():
extra_state_key = name + "." + _EXTRA_STATE_KEY_SUFFIX
if extra_state_key in state_dict:
pipeline_state[extra_state_key] = state_dict.pop(extra_state_key)
# send rest states to next stage
if not gpc.is_last_rank(ParallelMode.PIPELINE):
_send_state_dict(state_dict, gpc.get_next_global_rank(ParallelMode.PIPELINE), ParallelMode.PIPELINE)
return pipeline_state
def gather_pipeline_parallel_state_dict(state_dict):
gathered_states = (
[None for _ in range(gpc.get_world_size(ParallelMode.PIPELINE))]
if gpc.get_local_rank(ParallelMode.PIPELINE) == 0
else None
)
dist.gather_object(
state_dict,
gathered_states,
dst=gpc.get_ranks_in_group(ParallelMode.PIPELINE)[0],
group=gpc.get_cpu_group(ParallelMode.PIPELINE),
)
state_dict = (
OrderedDict(chain.from_iterable(state.items() for state in gathered_states))
if gpc.get_local_rank(ParallelMode.PIPELINE) == 0
else OrderedDict()
)
return state_dict
def save_checkpoint(
file,
epoch: int,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer = None,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
**kwargs
):
"""Stores the checkpoint to disk. Saves all the training components' parameters or buffers, such as model, optimizer,
lr_scheduler etc. into a checkpoint dictionary.
Args:
file: a file-like object (has to implement write and flush) or a string or os.PathLike object containing a
file name.
epoch (int): Epoch number (indicates how many epochs have you trained this model).
model (:class:`torch.nn.Module`): Model to be saved.
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to be saved.
lr_scheduler (Union[:class:`torch.optim.lr_scheduler`,
:class:`colossalai.nn.lr_scheduler`], optional): lr_scheduler to be saved, defaults to None.
pickle_module: module used for pickling metadata and objects
pickle_protocol: can be specified to override the default protocol
"""
# ckpt container
checkpoint = {"epoch": epoch}
model_state = model.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
model_state = gather_pipeline_parallel_state_dict(model_state)
if gpc.get_global_rank() == 0:
checkpoint["model"] = model_state
# if optimizer is not None:
# checkpoint['optimizer'] = optimizer.state_dict()
# if lr_scheduler is not None:
# checkpoint['lr_scheduler'] = lr_scheduler.state_dict()
torch.save(checkpoint, file, **kwargs)
def load_checkpoint(
file,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer = None,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
strict: bool = True,
):
"""Loads training states from a checkpoint file.
Args:
file: a file-like object (has to implement read(), readline(), tell(), and seek()), or a string or os.PathLike
object containing a file name.
model (:class:`torch.nn.Module`): Model to load saved weights and buffers.
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to recuperate.
lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`, optional):
lr_scheduler to recuperate, defaults to None.
strict (bool, optional): Whether to strictly enforce that the keys in :attr:`state_dict`
of the checkpoint match the names of parameters and buffers in model, defaults to True.
Returns:
int: The saved epoch number.
Raises:
RuntimeError: Raise error if the model/optimizer cannot successfully be recuperated
"""
state_dict = (
torch.load(file, map_location=torch.device("cpu")) if gpc.get_local_rank(ParallelMode.MODEL) == 0 else None
)
# model states
model_state = state_dict.pop("model") if state_dict is not None else dict()
# pipeline
if is_using_pp():
model_state = partition_pipeline_parallel_state_dict(model, model_state)
try:
model.load_state_dict(model_state, strict=strict)
except RuntimeError as e:
error_msgs = str(e)
if error_msgs.startswith("Error(s) in loading state_dict for "):
error_msgs = error_msgs.split("\n\t")[1:]
dst_rank = gpc.get_ranks_in_group(ParallelMode.MODEL)[0]
all_error_msgs = [None for _ in range(gpc.get_world_size(ParallelMode.MODEL))]
dist.gather_object(error_msgs, all_error_msgs, dst=dst_rank, group=gpc.get_cpu_group(ParallelMode.MODEL))
if gpc.get_global_rank() == 0:
all_error_msgs = list(chain.from_iterable(all_error_msgs))
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(all_error_msgs)
)
)
else:
raise e
# broadcast the rest states
state_dict = broadcast_state_dict(state_dict, ParallelMode.MODEL)
# # optimizer states
# if optimizer is not None and 'optimizer' in state_dict:
# optimizer.load_state_dict(state_dict['optimizer'])
# # lr scheduler states
# if lr_scheduler is not None and 'lr_scheduler' in state_dict:
# lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
# last epoch
last_epoch = state_dict.pop("epoch", -1)
return last_epoch
```
#### File: zero/shard_utils/commons.py
```python
import torch
import torch.nn.functional as F
from typing import Tuple
def get_shard(tensor: torch.Tensor, rank: int, world_size: int) -> Tuple[torch.Tensor, int]:
"""Return the local shard of a full tensor."""
# Shard using torch.chunk to match all-gather/reduce-scatter.
chunks = list(torch.flatten(tensor).chunk(world_size))
while len(chunks) < world_size:
chunks.append(chunks[0].new_empty(0))
# Determine number of padding elements.
num_to_pad = chunks[0].numel() - chunks[rank].numel()
assert num_to_pad >= 0, num_to_pad
shard = chunks[rank].clone()
if num_to_pad > 0:
shard = F.pad(shard, [0, num_to_pad])
return shard, num_to_pad
```
#### File: model_zoo/gpt/gpt.py
```python
import math
from typing import Callable
import torch
from colossalai import nn as col_nn
from colossalai.builder.pipeline import partition_uniform
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn.layer.utils import CheckpointModule, divide
from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper
from colossalai.registry import LAYERS, LOSSES, MODELS
from colossalai.utils import get_current_device
from torch import dtype, nn
__all__ = [
'GPT', 'GPTLMLoss', 'gpt2_small', 'gpt2_medium', 'gpt2_large', 'gpt2_xl', 'gpt2_8B', 'gpt2_xl_pipeline',
'gpt2_8B_pipeline', 'gpt3', 'gpt3_pipeline'
]
@LAYERS.register_module
class GPTEmbedding(nn.Module):
def __init__(self,
embedding_dim: int,
vocab_size: int,
max_position_embeddings: int,
num_tokentypes: int = 0,
padding_idx: int = None,
dropout: float = 0.,
dtype: dtype = None) -> None:
super().__init__()
self.word_embeddings = col_nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx, dtype=dtype)
self.position_embeddings = col_nn.Embedding(max_position_embeddings, embedding_dim, dtype=dtype)
if num_tokentypes > 0:
self.tokentype_embeddings = col_nn.Embedding(num_tokentypes, embedding_dim, dtype=dtype)
else:
self.tokentype_embeddings = None
self.dropout = col_nn.Dropout(dropout)
@property
def word_embedding_weight(self):
return self.word_embeddings.weight
def forward(self, input_ids, position_ids=None, tokentype_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=get_current_device()).unsqueeze(0)
x = self.word_embeddings(input_ids) + self.position_embeddings(position_ids)
if self.tokentype_embeddings is not None and tokentype_ids is not None:
x = x + self.tokentype_embeddings(tokentype_ids)
x = self.dropout(x)
return x
@LAYERS.register_module
class GPTSelfAttention(nn.Module):
def __init__(self,
dim: int,
num_heads: int,
attention_dropout: float,
dropout: float,
bias: bool = True,
fuse_scale_mask_softmax: bool = False,
dtype: dtype = None) -> None:
super().__init__()
self.fuse_scale_mask_softmax = fuse_scale_mask_softmax
self.attention_head_size = divide(dim, num_heads)
self.query_key_value = col_nn.Linear(dim, 3 * dim, dtype=dtype, bias=bias)
if fuse_scale_mask_softmax:
from colossalai.kernel import FusedScaleMaskSoftmax
from colossalai.kernel.cuda_native.scaled_softmax import \
AttnMaskType
self.softmax = FusedScaleMaskSoftmax(input_in_fp16=True,
input_in_bf16=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
mask_func=None,
softmax_in_fp32=True,
scale=math.sqrt(self.attention_head_size))
else:
self.softmax = nn.Softmax(dim=-1)
self.attention_dropout = col_nn.Dropout(attention_dropout)
self.dense = col_nn.Linear(dim, dim, dtype=dtype, bias=True)
self.dropout = col_nn.Dropout(dropout)
def forward(self, x, attention_mask=None):
qkv = self.query_key_value(x)
all_head_size = qkv.shape[-1] // 3
num_attention_heads = divide(all_head_size, self.attention_head_size)
new_qkv_shape = qkv.shape[:-1] + \
(num_attention_heads, 3 * self.attention_head_size)
qkv = qkv.view(new_qkv_shape)
qkv = qkv.permute((0, 2, 1, 3))
q, k, v = torch.chunk(qkv, 3, dim=-1)
x = torch.matmul(q, k.transpose(-1, -2))
if self.fuse_scale_mask_softmax:
x = self.softmax(x, attention_mask)
else:
x = x / math.sqrt(self.attention_head_size)
# causal mask
q_len, k_len = q.size(-2), k.size(-2)
causal_mask = torch.tril(torch.ones((q_len, k_len), dtype=torch.uint8,
device=get_current_device())).view(1, 1, q_len, k_len).bool()
x = torch.where(causal_mask, x, torch.tensor(-1e4, dtype=x.dtype, device=get_current_device()))
if attention_mask is not None:
x = x + attention_mask
x = self.softmax(x)
x = self.attention_dropout(x)
x = torch.matmul(x, v)
x = x.transpose(1, 2)
new_context_layer_shape = x.size()[:-2] + (all_head_size,)
x = x.reshape(new_context_layer_shape)
x = self.dense(x)
x = self.dropout(x)
return x
@LAYERS.register_module
class GPTMLP(nn.Module):
def __init__(self,
dim: int,
mlp_ratio: float,
activation: Callable,
dropout: float,
dtype: dtype = None,
bias: bool = True):
super().__init__()
intermediate_dim = int(dim * mlp_ratio)
self.dense_1 = col_nn.Linear(dim, intermediate_dim, dtype=dtype, bias=bias)
self.activation = activation
self.dense_2 = col_nn.Linear(intermediate_dim, dim, dtype=dtype, bias=bias)
self.dropout = col_nn.Dropout(dropout)
def forward(self, x):
x = self.dense_1(x)
x = self.activation(x)
x = self.dense_2(x)
x = self.dropout(x)
return x
@LAYERS.register_module
class GPTBlock(CheckpointModule):
def __init__(self,
dim: int,
num_heads: int,
mlp_ratio: float,
activation: Callable,
attention_dropout: float = 0.,
dropout: float = 0.,
layernorm_epsilon: float = 1e-5,
dtype: dtype = None,
bias: bool = True,
apply_post_layernorm: bool = False,
fuse_scale_mask_softmax: bool = False,
checkpoint: bool = False,
activation_offload: bool = False):
super().__init__(checkpoint, activation_offload)
self.apply_post_layernorm = apply_post_layernorm
self.norm1 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.attn = GPTSelfAttention(dim=dim,
num_heads=num_heads,
attention_dropout=attention_dropout,
dropout=dropout,
bias=bias,
fuse_scale_mask_softmax=fuse_scale_mask_softmax,
dtype=dtype)
self.norm2 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.mlp = GPTMLP(dim=dim, mlp_ratio=mlp_ratio, activation=activation, dropout=dropout, dtype=dtype, bias=bias)
def _forward(self, x, attention_mask=None):
if not self.apply_post_layernorm:
residual = x
x = self.norm1(x)
if self.apply_post_layernorm:
residual = x
x = residual + self.attn(x, attention_mask)
if not self.apply_post_layernorm:
residual = x
x = self.norm2(x)
if self.apply_post_layernorm:
residual = x
x = residual + self.mlp(x)
return x, attention_mask
@LAYERS.register_module
class GPTLMHead(nn.Module):
def __init__(self,
dim: int,
vocab_size: int,
word_embeeding_weight: nn.Parameter = None,
bias: bool = False,
dtype: dtype = None) -> None:
super().__init__()
self.dense = col_nn.Classifier(dim, vocab_size, word_embeeding_weight, bias=bias, dtype=dtype)
@property
def weight(self):
return self.dense.weight
def forward(self, x):
x = self.dense(x)
return x
@LOSSES.register_module
class GPTLMLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = col_nn.CrossEntropyLoss()
def forward(self, logits, labels):
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
return self.loss(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
@MODELS.register_module
class GPT(nn.Module):
def __init__(self,
vocab_size: int = 50304,
max_position_embeddings: int = 1024,
dim: int = 768,
num_heads: int = 12,
depth: int = 12,
mlp_ratio: float = 4.0,
dropout: float = 0.1,
embedding_dropout: float = 0.1,
attention_dropout: float = 0.1,
layernorm_epsilon: float = 1e-5,
activation: Callable = nn.functional.gelu,
padding_idx: int = None,
dtype: dtype = None,
bias: bool = True,
apply_post_layernorm: bool = False,
fuse_scale_mask_softmax: bool = False,
checkpoint: bool = False,
activation_offload: bool = False) -> None:
super().__init__()
self.embed = GPTEmbedding(embedding_dim=dim,
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
padding_idx=padding_idx,
dropout=embedding_dropout,
dtype=dtype)
self.blocks = nn.ModuleList([
GPTBlock(
dim=dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
activation=activation,
attention_dropout=attention_dropout,
dropout=dropout,
layernorm_epsilon=layernorm_epsilon,
dtype=dtype,
bias=bias,
apply_post_layernorm=apply_post_layernorm,
fuse_scale_mask_softmax=fuse_scale_mask_softmax,
checkpoint=checkpoint,
activation_offload=activation_offload
) for _ in range(depth)
])
self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.head = GPTLMHead(dim=dim,
vocab_size=vocab_size,
word_embeeding_weight=self.embed.word_embedding_weight,
dtype=dtype)
def forward(self, input_ids, attention_mask=None):
x = self.embed(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# Adapted from huggingface
if attention_mask is not None:
batch_size = input_ids.shape[0]
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = col_nn.partition_batch(attention_mask)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
for block in self.blocks:
x, attention_mask = block(x, attention_mask)
x = self.head(self.norm(x))
return x
class PipelineGPT(nn.Module):
def __init__(self,
vocab_size: int = 50304,
max_position_embeddings: int = 1024,
dim: int = 768,
num_heads: int = 12,
depth: int = 12,
mlp_ratio: float = 4.0,
dropout: float = 0.1,
embedding_dropout: float = 0.1,
attention_dropout: float = 0.1,
layernorm_epsilon: float = 1e-5,
activation: Callable = nn.functional.gelu,
padding_idx: int = None,
dtype: dtype = None,
bias: bool = True,
apply_post_layernorm: bool = False,
fuse_scale_mask_softmax: bool = False,
checkpoint: bool = False,
first: bool = False,
last: bool = False):
super().__init__()
self.checkpoint = checkpoint
self.first = first
self.last = last
if first:
self.embed = GPTEmbedding(embedding_dim=dim,
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
padding_idx=padding_idx,
dropout=embedding_dropout,
dtype=dtype)
self.blocks = nn.ModuleList([
GPTBlock(
dim=dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
activation=activation,
attention_dropout=attention_dropout,
dropout=dropout,
layernorm_epsilon=layernorm_epsilon,
dtype=dtype,
bias=bias,
apply_post_layernorm=apply_post_layernorm,
fuse_scale_mask_softmax=fuse_scale_mask_softmax,
checkpoint=checkpoint,
) for _ in range(depth)
])
if self.last:
self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.head = GPTLMHead(dim=dim, vocab_size=vocab_size, dtype=dtype)
def forward(self, x=None, input_ids=None, attention_mask=None):
if self.first:
x = self.embed(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# Adapted from huggingface
if attention_mask is not None:
if self.first:
batch_size = input_ids.shape[0]
else:
batch_size = x.shape[0]
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = col_nn.partition_batch(attention_mask)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
for block in self.blocks:
x, attention_mask = block(x, attention_mask)
if self.last:
x = self.head(self.norm(x))
return x
def _create_gpt_model(**model_kwargs):
model = GPT(**model_kwargs)
return model
def _create_gpt_pipeline_model(depth=48, num_chunks=1, layer_partitions=None, **model_kwargs):
logger = get_dist_logger()
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
rank = gpc.get_global_rank()
wrapper = PipelineSharedModuleWrapper([0, pipeline_size - 1])
parts = partition_uniform(depth, pipeline_size,
num_chunks)[pipeline_rank] if layer_partitions is None else layer_partitions
models = []
for start, end in parts:
model_kwargs['first'] = start == 0
model_kwargs['last'] = end == depth
model_kwargs['depth'] = end - start
chunk = PipelineGPT(**model_kwargs).to(get_current_device())
if start == 0:
wrapper.register_parameter(chunk.embed.word_embedding_weight)
elif end == depth:
wrapper.register_parameter(chunk.head.weight)
models.append(chunk)
logger.info(f'==> Rank {rank} built layer {start}-{end} / total {depth}')
if len(models) == 1:
model = models[0]
else:
model = nn.ModuleList(models)
return model
@MODELS.register_module
def gpt2_small(**kwargs):
model_kwargs = dict(dim=768, depth=12, num_heads=12, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_medium(**kwargs):
model_kwargs = dict(dim=1024, depth=24, num_heads=8, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_large(**kwargs):
model_kwargs = dict(dim=1536, depth=36, num_heads=12, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_xl(**kwargs):
model_kwargs = dict(dim=1600, depth=48, num_heads=16, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_8B(**kwargs):
model_kwargs = dict(dim=3072, depth=72, num_heads=24, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_xl_pipeline(**kwargs):
model_kwargs = dict(dim=1600, depth=48, num_heads=20, **kwargs)
return _create_gpt_pipeline_model(**model_kwargs)
@MODELS.register_module
def gpt2_8B_pipeline(**kwargs):
model_kwargs = dict(dim=3072, depth=72, num_heads=24, **kwargs)
return _create_gpt_pipeline_model(**model_kwargs)
@MODELS.register_module
def gpt3(**kwargs):
model_kwargs = dict(dim=12288, depth=96, num_heads=96, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt3_pipeline(**kwargs):
model_kwargs = dict(dim=12288, depth=96, num_heads=96, **kwargs)
return _create_gpt_pipeline_model(**model_kwargs)
```
#### File: tests/components_to_test/bert.py
```python
import torch
import transformers
from packaging import version
from torch.utils.data import SequentialSampler
from transformers import BertConfig, BertForSequenceClassification
from .registry import non_distributed_component_funcs
def get_bert_data_loader(
batch_size,
total_samples,
sequence_length,
device=torch.device('cpu:0'),
is_distrbuted=False,
):
train_data = torch.randint(
low=0,
high=1000,
size=(total_samples, sequence_length),
device=device,
dtype=torch.long,
)
train_label = torch.randint(low=0, high=2, size=(total_samples,), device=device, dtype=torch.long)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
if is_distrbuted:
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
sampler = SequentialSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
return train_loader
@non_distributed_component_funcs.register(name='bert')
def get_training_components():
hidden_dim = 8
num_head = 4
sequence_length = 12
num_layer = 2
def bert_model_builder(checkpoint):
config = BertConfig(gradient_checkpointing=checkpoint,
hidden_size=hidden_dim,
intermediate_size=hidden_dim * 4,
num_attention_heads=num_head,
max_position_embeddings=sequence_length,
num_hidden_layers=num_layer,
hidden_dropout_prob=0.,
attention_probs_dropout_prob=0.)
print('building BertForSequenceClassification model')
# adapting huggingface BertForSequenceClassification for single unitest calling interface
class ModelAaptor(BertForSequenceClassification):
def forward(self, input_ids, labels):
"""
inputs: data, label
outputs: loss
"""
return super().forward(input_ids=input_ids, labels=labels)[0]
model = ModelAaptor(config)
if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"):
model.gradient_checkpointing_enable()
return model
trainloader = get_bert_data_loader(batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=True)
testloader = get_bert_data_loader(batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=True)
criterion = None
return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
```
#### File: tests/test_moe/test_grad_handler.py
```python
from functools import partial
import pytest
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import torch.distributed as dist
import colossalai
from colossalai.utils import free_port, get_current_device
from colossalai.nn.layer.moe import Top1Router, UniformNoiseGenerator, MoeLayer, Experts
from colossalai.context.moe_context import MOE_CONTEXT
from colossalai.utils.moe import sync_moe_model_param
from colossalai.engine.gradient_handler import MoeGradientHandler
from colossalai.testing import assert_equal_in_group
from colossalai.testing import rerun_on_exception
BATCH_SIZE = 4
DIM = 16
CONFIG = dict()
def run_test(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
expert_module = nn.Linear
expert_factor = dict(in_features=DIM, out_features=DIM, device=get_current_device())
MOE_CONTEXT.setup(42) # MOE initialization
noisy_func = UniformNoiseGenerator()
router = Top1Router(noisy_func=noisy_func)
num_experts_list = [1, 2, 4]
layer_list = []
for num_experts in num_experts_list:
exp = Experts(expert_module, num_experts, **expert_factor)
moe_layer = MoeLayer(DIM, num_experts, router, exp)
layer_list.append(moe_layer)
model = nn.Sequential(*layer_list)
model = model.to(get_current_device())
sync_moe_model_param(model)
dist_dict = MOE_CONTEXT.parallel_info_dict
assert_equal_in_group(layer_list[0].experts.experts[0].weight.data, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[1].experts.experts[0].weight.data, dist_dict[2].dp_group)
# MoE model synchronization passed
grad_handler = MoeGradientHandler(model, 0)
rank = dist.get_rank()
torch.cuda.manual_seed(78 + rank)
data = torch.randn(BATCH_SIZE, DIM, device=get_current_device())
grad = torch.randn_like(data)
MOE_CONTEXT.reset_loss()
outputs = model(data)
outputs.backward(grad)
grad_handler.handle_gradient()
assert_equal_in_group(layer_list[0].experts.experts[0].weight.grad, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[0].experts.experts[0].bias.grad, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[1].experts.experts[0].weight.grad, dist_dict[2].dp_group)
assert_equal_in_group(layer_list[1].experts.experts[0].bias.grad, dist_dict[2].dp_group)
# MoE grad handler test passed
@pytest.mark.dist
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_grad_handler():
world_size = 4
run_func = partial(run_test, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_grad_handler()
```
#### File: tests/test_moe/test_moe_zero_init.py
```python
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.context import MOE_CONTEXT
from colossalai.nn.layer import MoeModule
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.testing import rerun_on_exception
from colossalai.utils import get_current_device
from tests.test_zero_data_parallel.common import CONFIG
class MoeModel(nn.Module):
def __init__(self):
super().__init__()
self.proj1 = nn.Linear(4, 16)
expert_cls = nn.Linear
expert_args_dict = dict(in_features=16, out_features=16)
self.moe = MoeModule(dim_model=16, num_experts=8, use_residual=True, expert_cls=expert_cls, **expert_args_dict)
self.proj2 = nn.Linear(16, 4)
def forward(self, x):
x = self.proj1(x)
x = self.moe(x)
x = self.proj2(x)
return x
@parameterize("init_device_type", ['cpu', 'cuda'])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_moe_zero_init(init_device_type, shard_strategy_class):
logger = get_dist_logger("test_moe_zero_init")
if init_device_type == 'cuda':
init_device = torch.device(f"cuda:{get_current_device()}")
elif init_device_type == 'cpu':
init_device = torch.device("cpu")
else:
raise NotImplementedError("Unknown device found.")
model_numel_tensor = torch.zeros(1, dtype=torch.int)
with ZeroInitContext(target_device=init_device,
shard_strategy=shard_strategy_class(),
shard_param=True,
model_numel_tensor=model_numel_tensor,
rm_torch_payload_on_the_fly=False):
model = MoeModel()
for name, param in model.named_parameters():
assert hasattr(param, 'colo_attr')
# the weights in the gate should be fp32
if 'gate' in name:
assert param.colo_attr.sharded_data_tensor.dtype == torch.float32
else:
assert param.colo_attr.sharded_data_tensor.dtype == torch.half
# the parameters in moe experts and its gate should not be sharded
if ('experts' in name) or ('gate' in name) or ('residual_combine' in name):
assert not param.colo_attr.sharded_data_tensor.is_sharded
else:
assert param.colo_attr.sharded_data_tensor.is_sharded
# the parameters in moe experts is not replicated
if 'experts' in name:
assert not param.is_replicated
else:
assert param.is_replicated
assert param.colo_attr.sharded_data_tensor.payload.device.type == init_device.type, \
f'{param.colo_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
MOE_CONTEXT.setup(seed=42)
run_moe_zero_init()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_moe_zero_init(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_moe_zero_init(world_size=2)
```
#### File: tests/test_zero_data_parallel/test_zero_engine.py
```python
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.testing import rerun_on_exception
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from common import (MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params)
def run_dist(rank, world_size, port, parallel_config):
colossalai.launch(config=parallel_config,
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
test_models = ['repeated_computed_layers', 'resnet18', 'bert']
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(target_device=torch.cuda.current_device(),
shard_strategy=gpc.config.zero.model_config.shard_strategy,
shard_param=True):
colo_model = model_builder(checkpoint=True)
colo_optimizer = optimizer_class(colo_model.parameters(), lr=1e-3)
engine, train_dataloader, _, _ = colossalai.initialize(colo_model,
optimizer=colo_optimizer,
criterion=criterion,
train_dataloader=train_dataloader)
torch_model = model_builder(checkpoint=True).half()
col_model_deepcopy(engine.model, torch_model)
torch_model = torch_model.cuda().float()
engine.train()
torch_optimizer = optimizer_class(torch_model.parameters(), lr=1e-3)
if dist.get_world_size() > 1:
torch_model = DDP(torch_model)
i = 0
for data, label in train_dataloader:
if i > 4:
break
data, label = data.cuda(), label.cuda()
engine.zero_grad()
torch_optimizer.zero_grad()
if criterion:
output = engine(data)
loss = engine.criterion(output, label)
torch_output = torch_model(data)
torch_loss = engine.criterion(torch_output, label)
else:
loss = engine(data, label)
torch_loss = torch_model(data, label)
engine.backward(loss)
engine.step()
torch_loss.backward()
for param in torch_model.parameters():
if param.grad is not None:
assert not has_inf_or_nan(param.grad)
torch_optimizer.step()
i += 1
if parallel_config == MP_PARALLEL_CONFIG:
check_params(torch_model, colo_model, loose=True)
elif parallel_config == ZERO_PARALLEL_CONFIG:
check_sharded_model_params(torch_model, colo_model, loose=True)
# FIXME: enable this test in next PR
@pytest.mark.skip
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_mp_engine(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=MP_PARALLEL_CONFIG)
mp.spawn(run_func, nprocs=world_size)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_zero_engine(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=ZERO_PARALLEL_CONFIG)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_engine(world_size=4)
``` |
{
"source": "JiangZehua/control-pcgrl3D",
"score": 4
} |
#### File: gym_pcgrl/envs/helper_3D.py
```python
import matplotlib.pyplot as plt
import numpy as np
from pdb import set_trace as TT
"""
Public function to get a dictionary of all location of all tiles
Parameters:
map (any[][][]): the current map
[[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]
tile_values (any[]): an array of all the tile values that are possible
Returns:
Dict(string,(int,int)[]): positions for every certain tile_value
"""
def get_tile_locations(map, tile_values):
tiles = {}
for t in tile_values:
tiles[t] = []
for z in range(len(map)):
for y in range(len(map[z])):
for x in range(len(map[z][y])):
tiles[map[z][y][x]].append((x,y,z))
return tiles
"""
Get the vertical distance to certain type of tiles
Parameters:
map (any[][][]): the actual map
x (int): the x position of the start location
y (int): the y position of the start location
z (int): the z position of the start location
types (any[]): an array of types of tiles
Returns:
int: the distance to certain types underneath a certain location
"""
def _calc_dist_floor(map, x, y, z, types):
for dz in range(len(map)):
if z+dz >= len(map):
break
if map[z+dz][y][x] in types:
return dz-1
return len(map) - 1
"""
Public function to calculate the distance of a certain tiles to the floor tiles
Parameters:
map (any[][][]): the current map
from (any[]): an array of all the tile values that the method is calculating the distance to the floor
floor (any[]): an array of all the tile values that are considered floor
Returns:
int: a value of how far each tile from the floor where 0 means on top of floor and positive otherwise
"""
def get_floor_dist(map, fromTypes, floorTypes):
result = 0
for z in range(len(map)):
for y in range(len(map[z])):
for x in range(len(map[z][y])):
if map[z][y][x] in fromTypes:
result += _calc_dist_floor(map, x, y, z, floorTypes)
return result
"""
Get number of tiles that have certain value arround certain position
Parameters:
map (any[][][]): the current map
x (int): the x position of the start location
y (int): the y position of the start location
z (int): the z position of the start location
types (any[]): an array of types of tiles
relLocs ((int,int,int)[]): a tuple array of all the relative positions
Returns:
int: the number of similar tiles around a certain location
"""
def _calc_group_value(map, x, y, z, types, relLocs):
result = 0
for l in relLocs:
nx, ny, nz= x+l[0], y+l[1], z+l[2]
if nx < 0 or ny < 0 or nz < 0 or nx >= len(map[0]) or ny >= len(map) or nz>=len(map):
continue
if map[nz][ny][nx] in types:
result += 1
return result
"""
Get the number of tiles that is a group of certain size
Parameters:
map (any[][][]): the current map
types (any[]): an array of types of tiles
relLocs ((int,int,int)[]): a tuple array of all the relative positions
min (int): min number of tiles around
max (int): max number of tiles around
Returns:
int: the number of tiles that have surrounding between min and max
"""
def get_type_grouping(map, types, relLocs, min, max):
result = 0
for z in range(len(map)):
for y in range(len(map[z])):
for x in range(len(map[z][y])):
if map[z][y][x] in types:
value = _calc_group_value(map, x, y, z, types, relLocs)
if value >= min and value <= max:
result += 1
return result
"""
Get the number of changes of tiles in either vertical or horizontal direction
Parameters:
map (any[][][]): the current map
vertical (boolean): calculate the vertical changes instead of horizontal
Returns:
int: number of different tiles either in vertical or horizontal x-direction or horizontal y-direction
"""
def get_changes(map, vertical=False, y_dir=False):
start_z = 0
start_y = 0
start_x = 0
if vertical:
start_z = 1
elif y_dir:
start_y = 1
else:
start_x = 1
value = 0
for z in range(start_z, len(map)):
for y in range(start_y, len(map[z])):
for x in range(start_x, len(map[z][y])):
same = False
if vertical:
same = map[z][y][x] == map[z-1][y][x]
elif y_dir:
same = map[z][y][x] == map[z][y-1][x]
else:
same = map[z][y][x] == map[z][y][x-1]
if not same:
value += 1
return value
"""
Private function to get a list of all tile locations on the map that have any of
the tile_values
Parameters:
map_locations (Dict(string,(int,int,int)[])): the histogram of locations of the current map
tile_values (any[]): an array of all the tile values that the method is searching for
Returns:
(int,int,int)[]: a list of (x,y,z) position on the map that have a certain value
"""
def _get_certain_tiles(map_locations, tile_values):
tiles=[]
for v in tile_values:
tiles.extend(map_locations[v])
return tiles
'''
Private function that see whether the current position is standable: The position is passable only when height >= 2
(character is 2 blocks tall)
Parameters:
x (int): The current x position
y (int): The current y position
z (int): The current z position
map (any[][][]): the current tile map to check
passable_values (any[]): an array of all the passable tile values
Return:
boolen: True if the aisle is passable
'''
def _standable(map, x, y, z, passable_values):
nx, ny, nz = x, y, z+1
if nz < 0 or nz >= len(map):
return False
elif (map[nz][ny][nx] in passable_values
and map[z][y][x] in passable_values):
return True
else:
return False
'''
Private function that see whether the aisle is passable: The aisle is passable only when the agent can move to a
adjacent position.
(The adjacent position won't block the character's head)
We assume that the agent's priority actions are: walk forward > climb ladders > jumps
Parameters:
x (int): The current x position
y (int): The current y position
z (int): The current z position
map (any[][][]): the current tile map to check
passable_values (any[]): an array of all the passable tile values
Return:
boolen: True if the aisle is passable
'''
def _passable(map, x, y, z, passable_values):
passable_tiles = []
# Check 4 adjacent directions: forward, back, left, right. For each, it is passable if we can move to it while
# moving up/down-stairs or staying level.
for dir in [(1,0), (0,1), (-1,0), (0,-1)]:
nx, ny, nz= x+dir[0], y+dir[1], z
jx, jy, jz = x+dir[0]+dir[0], y+dir[1]+dir[1], z
# Check if out of bounds, if so, skip it
if (nx < 0 or ny < 0 or nx >= len(map[z][y]) or ny >= len(map[z])):
continue
# Check whether can stay at the same level.
if (
# nz+1 < len(map) and # Head-room at our next position is guaranteed if our current position is valid.
(nz == 0 or # Either we are on the bottom of the map...
nz > 0 and map[nz-1][ny][nx] not in passable_values) # ...or moving onto an impassable (solid) tile.
and map[nz][ny][nx] in passable_values # Foot-room at our next position.
and map[nz+1][ny][nx] in passable_values # Head-room at our next position.
):
passable_tiles.append((nx, ny, nz))
# Check whether we can go down a step.
elif (
# nz+1 < len(map) and # Head-room is guaranteed if our current position is valid.
(nz-1 == 0 # Either we are moving either onto the bottom of the map...
or nz-1 > 0 and map[nz-2][ny][nx] not in passable_values) # ... or onto am impassable (solid) tile.
and map[nz-1][ny][nx] in passable_values # Foot-room at the lower stair.
and map[nz][ny][nx] in passable_values # Head-room at the lower stair.
and map[nz+1][ny][nx] in passable_values # Extra head-room at the lower (next) stair.
):
passable_tiles.append((nx, ny, nz-1))
# Check whether can go up a step.
elif (nz+2 < len(map) # Our head must remain inside the map.
and map[nz][ny][nx] not in passable_values # There must be a (higher) stair to climb onto.
and map[nz+1][ny][nx] in passable_values # Foot-room at the higher stair.
and map[nz+2][ny][nx] in passable_values # Head-room at the higher stair.
and map[nz+2][y][x] in passable_values # Extra head-room at the lower (current) stair.
):
passable_tiles.append((nx, ny, nz+1))
# TODO: Check for ladder: (ladder tiles are passable)
# if current tile is ladder, then check if extra head-room above(or still ladder above). If so, can move up.
# if tile below is ladder, can move down.
# Check whether we can jump over a tile.
# Note: Currently we only check whether we can jump over a tile and land on the same level since we want
# to make sure the path returnable, i.e. max fall height < 2 (1 is to go down a stair), and max jump distance
# is 1. The max height difference of starting point and foothold is 1
elif (
nz - 2 >= 0 and nz + 2 < len(map) # Our head must remain inside the map and the empty space below must >= 2
and map[nz+2][ny][nx] in passable_values # five blocks ahead must be passable
and map[nz+1][ny][nx] in passable_values
and map[nz][ny][nx] in passable_values
and map[nz-1][ny][nx] in passable_values
and map[nz-2][ny][nx] in passable_values
and map[nz+2][y][x] in passable_values # The extra 1 head-room at the starting point must be passable
and jx >= 0 and jy >= 0 and jx < len(map[z][y]) and jy < len(map[z]) # The foothold must be in the map
):
if (# the height difference is 0
map[jz+1][jy][jx] in passable_values # head room at the foothold
and map[jz][jy][jx] in passable_values # foot room at the foothold
and map[jz-1][jy][jx] not in passable_values # the solid foothold
):
passable_tiles.append((jx, jy, jz))
elif (# the height difference is 1
map[jz+2][jy][jx] in passable_values # head room at the foothold
and map[jz+1][jy][jx] in passable_values # foot room at the foothold
and map[jz][jy][jx] not in passable_values # the solid foothold
):
passable_tiles.append((jx, jy, jz+1))
elif (# the height difference is -1
map[jz][jy][jx] in passable_values # head room at the foothold
and map[jz-1][jy][jx] in passable_values # foot room at the foothold
and map[jz-2][jy][jx] not in passable_values # the solid foothold
):
passable_tiles.append((jx, jy, jz-1))
else:
# check_jump func here
continue
return passable_tiles
# NEXT:
def check_jump(map, x, y, z, dir, passable_values):
"""
Check whether the agent can jump without getting hurt
Note: We assume in Minecraft the maximum jump distance is 3
and the agent will not get hurt if dropping height <=3, i.e. agent can only jump up for 1 block
or for down 3 blocks without ladder.
Note: We assume x+dir[0] and y+dir[1] are valid when we call this function.
Returns:
coordinates: all the possible coordinates of the jump destinations
"""
jx, jy, jz = x+dir[0]+dir[0], y+dir[1]+dir[1], z
return
"""
Private function that runs flood fill algorithm on the current color map
Parameters:
x (int): the starting x position of the flood fill algorithm
y (int): the starting y position of the flood fill algorithm
z (int): the starting z position of the flood fill algorithm
color_map (int[][][]): the color map that is being colored
map (any[][][]): the current tile map to check
color_index (int): the color used to color in the color map
passable_values (any[]): the current values that can be colored over
Returns:
int: the number of tiles that has been colored
"""
def _flood_fill(x, y, z, color_map, map, color_index, passable_values):
num_tiles = 0
queue = [(x, y, z)]
while len(queue) > 0:
(cx, cy, cz) = queue.pop(0)
# If tile has been visited, skip it.
if color_map[cz][cy][cx] != -1: # or (not _passablae(map, cx, cy, cz, passable_values) and not _standable(map, cx, cy, cz, passable_values)):
continue
num_tiles += 1
color_map[cz][cy][cx] = color_index
# Look at all adjacent tiles.
for (dx,dy,dz) in [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]:
nx,ny,nz = cx+dx, cy+dy, cz+dz
# If adjacent tile is out of bounds, skip it.
if nx < 0 or ny < 0 or nz < 0 or nx >= len(map[0][0]) or ny >= len(map[0]) or nz >= len(map):
continue
# If adjacent tile is not passable, skip it.
if map[nz][ny][nx] not in passable_values:
continue
# Otherwise, add adjacent tile to the queue.
queue.append((nx, ny, nz))
return num_tiles
"""
Calculates the number of regions in the current map with passable_values
Parameters:
map (any[][][]): the current map being tested
map_locations(Dict(string,(int,int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all the passable tile values
Returns:
int: number of regions in the map
"""
def calc_num_regions(map, map_locations, passable_values):
empty_tiles = _get_certain_tiles(map_locations, passable_values)
region_index=0
color_map = np.full((len(map), len(map[0]), len(map[0][0])), -1)
for (x,y,z) in empty_tiles:
num_tiles = _flood_fill(x, y, z, color_map, map, region_index + 1, passable_values)
if num_tiles > 0:
region_index += 1
else:
continue
return region_index
"""
Public function that runs dijkstra algorithm and return the map
Parameters:
x (int): the starting x position for dijkstra algorithm
y (int): the starting y position for dijkstra algorithm
z (int): the starting z position for dijkstra algorithm
map (any[][][]): the current map being tested
passable_values (any[]): an array of all the passable tile values
Returns:
int[][][]: returns the dijkstra map after running the dijkstra algorithm
"""
def run_dijkstra(x, y, z, map, passable_values):
dijkstra_map = np.full((len(map), len(map[0]), len(map[0][0])), -1)
visited_map = np.zeros((len(map), len(map[0]), len(map[0][0])))
queue = [(x, y, z, 0)]
while len(queue) > 0:
# Looking at a new tile
(cx,cy,cz,cd) = queue.pop(0)
# Skip tile if we've already visited it
if dijkstra_map[cz][cy][cx] >= 0 and dijkstra_map[cz][cy][cx] <= cd:
continue
# We never start path-finding from a position at which the player cannot stand. Foot-room is guaranteed, so we
# check for headroom.
# Zelda (and other games maybe) calls this function directly without calling calc_longest_path, so we need to
# add this check here.
if cz+1 == len(map) or map[cz+1][y][x] not in passable_values:
visited_map[cz][cy][cx] = 1
continue
# Count the tile as visited and record its distance
visited_map[cz][cy][cx] = 1
dijkstra_map[cz][cy][cx] = cd
# Call passable, which will return, (x, y, z) coordinates of tiles to which the player can travel from here
# not for (dx,dy,dz) in [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]:
# but for (nx,ny,nz) in stairring logic:
for (nx, ny, nz) in _passable(map, cx, cy, cz, passable_values):
# # Check that the new tiles are in the bounds of the level
# nx,ny,nz=cx+dx,cy+dy,cz+dz
# if nx < 0 or ny < 0 or nz <0 or nx >= len(map[0][0]) or ny >= len(map[0]) or nz >=len(map):
# # If out of bounds, do not add the new tile to the frontier
# continue
# Add the new tile to the frontier
queue.append((nx, ny, nz, cd + 1))
# if cz == 3:
# print(f"**********current place: {cx},{cy},{cz}**********")
# print("queue in run_dijkstra: ", queue)
# print("dijkstra_map in run_dijkstra: ", dijkstra_map)
return dijkstra_map, visited_map
"""
Calculate the longest path on the map
Parameters:
map (any[][][]): the current map being tested
map_locations (Dict(string,(int,int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all passable tiles in the map
Returns:
int: the longest path value in tiles in the current map
"""
def calc_longest_path(map, map_locations, passable_values, get_path=False):
empty_tiles = _get_certain_tiles(map_locations, passable_values)
final_visited_map = np.zeros((len(map), len(map[0]), len(map[0][0])))
final_value = 0
# We'll iterate over all empty tiles. But checking against the visited_map means we only perform path-finding
# algorithms once per connected component.
for (x,y,z) in empty_tiles:
if final_visited_map[z][y][x] > 0:
continue
# We never start path-finding from a position at which the player cannot stand. Foot-room is guaranteed, so we
# check for headroom.
if z+1 == len(map) or map[z+1][y][x] not in passable_values:
final_visited_map[z][y][x] = 1
continue
# Calculate the distance from the current tile to all other (reachable) tiles.
dijkstra_map, visited_map = run_dijkstra(x, y, z, map, passable_values)
final_visited_map += visited_map
# Get furthest tile from current tile.
(mz,my,mx) = np.unravel_index(np.argmax(dijkstra_map, axis=None), dijkstra_map.shape)
# Search again from this furthest tile. This tile must belong to a longest shortest path within this connected
# component. Search again to find this path.
dijkstra_map, _ = run_dijkstra(mx, my, mz, map, passable_values)
max_value = np.max(dijkstra_map)
# Store this path/length if it is the longest of all connected components visited thus far.
if max_value > final_value:
final_value = max_value
if get_path:
path_map = dijkstra_map
path = []
if get_path and final_value > 0:
path = get_path_coords(path_map)
return final_value, path
"""
Recover a shortest path (as list of coords) from a dijkstra map,
using either some initial coords, or else from the furthest point
If you have trouble understanding this func, you can refer to the 2D version of this in helper.py
Parameters:
path_map: 3D dijkstra map
x, y, z (optional): ending point of the path
Returns:
list: the longest path's coordinates (in x, y, z form)
"""
ADJ_FILTER = np.array([[[0,1,0],
[1,0,1],
[0,1,0]],
[[0,1,0],
[1,0,1],
[0,1,0]],
[[0,1,0],
[1,0,1],
[0,1,0]]])
def get_path_coords(path_map, x=None, y=None, z=None, can_fly=False):
length, width, height = len(path_map[0][0]), len(path_map[0]), len(path_map)
pad_path_map = np.zeros(shape=(height + 2, width + 2, length + 2), dtype=np.int32)
pad_path_map.fill(0)
pad_path_map[1:height + 1, 1:width + 1, 1:length + 1] = path_map + 1
if not x:
# Work from the greatest cell value (end of the path) backward
max_cell = pad_path_map.max()
curr = np.array(np.where(pad_path_map == max_cell))
else:
curr = np.array([(z, y, x)], dtype=np.int32).T + 1
max_cell = pad_path_map[curr[0][0], curr[1][0], curr[2][0]]
zi, yi, xi = curr[:, 0]
# print("curr: ", curr)
# print("zi, yi, xi is curr[:, 0]: ", zi, yi, xi)
# print("max_cell: ", max_cell)
# print("iterating:")
path = np.zeros(shape=(max_cell, 3), dtype=np.int32)
i = 0
while max_cell > 1:
path[i, :] = [xi - 1, yi - 1, zi -1]
pad_path_map[zi, yi, xi] = -1
max_cell -= 1
x0, x1, y0, y1, z0, z1= xi - 1, xi + 2, yi - 1, yi + 2, zi-1, zi + 2
adj_mask = np.zeros(shape=(height + 2, width + 2, length + 2), dtype=np.int32)
adj_mask[z0: z1, y0: y1, x0: x1] = ADJ_FILTER
# print("curr: ", curr)
# print("zi, yi, xi is curr[:, 0]: ", zi, yi, xi)
# print("max_cell: ", max_cell)
curr = np.array(np.where(adj_mask * pad_path_map == max_cell))
# print("curr is changed to: ", curr)
# print("pad_path_map is : ", pad_path_map)
zi, yi, xi = curr[:, 0]
i += 1
if i > 0:
path[i, :] = [xi - 1, yi - 1, zi - 1]
# if the agent can't fly, delete the blocks with identical vertical coordinates in the path, only reserve the bottom one
if not can_fly:
for i in range(0, len(path)):
if i == 0:
continue
else:
if path[i][0] == path[i-1][0] and path[i][1] == path[i-1][1]:
if path[i-1][2] > path[i][2]:
path[i-1, :] = [-1, -1, -1]
else:
path[i, :] = [-1, -1, -1]
path = np.delete(path, np.where(path < 0)[0], axis=0)
return path
def debug_path(path, map, passable_values):
"""
Path debugging function
"""
if len(path) == 0:
return True
for pos in path:
x, y, z = pos[0], pos[1], pos[2]
# checking if there is some issue with my head
if z + 2 > len(map):
print(f'My head is sticking out of range!!!!!!!!!!!!!!!! My foot is at the position {x}, {y}, {z}')
return False
if map[z+1][y][x] not in passable_values:
print(f'Something in position {x}, {y}, {z+1} blocks my head!!!!!!!!!!!!!!!!!!!!!!!!!!')
return False
# checking if I am floating
if z - 1 > 0 and map[z-1][y][x] in passable_values:
print(f"I am floating illegally!!!!!!!!! My position is {x}, {y}, {z}")
return False
return True
"""
Calculate the number of tiles that have certain values in the map
Returns:
int: get number of tiles in the map that have certain tile values
"""
def calc_certain_tile(map_locations, tile_values):
return len(_get_certain_tiles(map_locations, tile_values))
"""
Calculate the number of reachable tiles of a certain values from a certain starting value
The starting value has to be one on the map
Parameters:
map (any[][][]): the current map
start_value (any): the start tile value it has to be only one on the map
passable_values (any[]): the tile values that can be passed in the map
reachable_values (any[]): the tile values that the algorithm trying to reach
Returns:
int: number of tiles that has been reached of the reachable_values
"""
def calc_num_reachable_tile(map, map_locations, start_value, passable_values, reachable_values):
(sx,sy,sz) = _get_certain_tiles(map_locations, [start_value])[0]
dijkstra_map, _ = run_dijkstra(sx, sy, sz, map, passable_values)
tiles = _get_certain_tiles(map_locations, reachable_values)
total = 0
for (tx,ty,tz) in tiles:
if dijkstra_map[tz][ty][tx] >= 0:
total += 1
return total
"""
Generate random map based on the input Parameters
Parameters:
random (numpy.random): random object to help generate the map
width (int): the generated map width
height (int): the generated map height
prob (dict(int,float)): the probability distribution of each tile value
Returns:
int[][][]: the random generated map
"""
def gen_random_map(random, length, width, height, prob):
map = random.choice(list(prob.keys()), size=(
height, width, length), p=list(prob.values())).astype(np.uint8)
return map
"""
A method to convert the map to use the tile names instead of tile numbers
Parameters:
map (numpy.int[][][]): a numpy 3D array of the current map
tiles (string[]): a list of all the tiles in order
Returns:
string[][][]: a 3D map of tile strings instead of numbers
"""
def get_string_map(map, tiles):
int_to_string = dict((i,s) for i, s in enumerate(tiles))
result = []
for z in range(map.shape[0]):
result.append([])
for y in range(map.shape[1]):
result[z].append([])
for x in range(map.shape[2]):
result[z][y].append(int_to_string[int(map[z][y][x])])
return result
"""
A method to convert the probability dictionary to use tile numbers instead of tile names
Parameters:
prob (dict(string,float)): a dictionary of the probabilities for each tile name
tiles (string[]): a list of all the tiles in order
Returns:
Dict(int,float): a dictionary of tile numbers to probability values (sum to 1)
"""
def get_int_prob(prob, tiles):
string_to_int = dict((s, i) for i, s in enumerate(tiles))
result = {}
total = 0.0
for t in tiles:
result[string_to_int[t]] = prob[t]
total += prob[t]
for i in result:
result[i] /= total
return result
"""
A method to help calculate the reward value based on the change around optimal region
Parameters:
new_value (float): the new value to be checked
old_value (float): the old value to be checked
low (float): low bound for the optimal region
high (float): high bound for the optimal region
Returns:
float: the reward value for the change between new_value and old_value
"""
def get_range_reward(new_value, old_value, low, high):
if new_value >= low and new_value <= high and old_value >= low and old_value <= high:
return 0
if old_value <= high and new_value <= high:
return min(new_value,low) - min(old_value,low)
if old_value >= low and new_value >= low:
return max(old_value,high) - max(new_value,high)
if new_value > high and old_value < low:
return high - new_value + old_value - low
if new_value < low and old_value > high:
return high - old_value + new_value - low
"""
A function to plot the 3D structure of the map
"""
def plot_3D_path(size_x, size_y, size_z, path_coords):
# create the boolen map of the maze
path_boolean_map = np.full((size_z, size_y, size_x), False, dtype=bool)
for (x,y,z) in path_coords:
path_boolean_map[z][y][x] = True
# change the map axis for plotting
path_boolean_map = np.moveaxis(path_boolean_map, (0, 2), (2, 1))
# create the color map of the maze
path_color_map = np.empty(path_boolean_map.shape, dtype=object)
path_color_map[path_boolean_map] = "red"
# make a 3D plot
ax = plt.figure().add_subplot(projection='3d')
# scale the plot so that the blocks are cube but not cuboid
ax.set_box_aspect([path_boolean_map.shape[0]/path_boolean_map.shape[1],
1,
path_boolean_map.shape[2]/path_boolean_map.shape[1]])
# plot it out!
ax.voxels(path_boolean_map, facecolors=path_color_map, edgecolor='k')
plt.show()
```
#### File: envs/probs/face_prob.py
```python
import os
from functools import reduce
import numpy as np
from operator import mul
from PIL import Image, ImageDraw, ImageFont
from gym_pcgrl.envs.probs.problem import Problem
from gym_pcgrl.envs.helper import get_range_reward, get_tile_locations, calc_num_regions, calc_longest_path
from pdb import set_trace as TT
"""
Generate a fully connected top down layout where the longest path is greater than a certain threshold
"""
class FaceProblem(Problem):
"""
The constructor is responsible of initializing all the game parameters
"""
def __init__(self):
super().__init__()
self._width = 32
self._height = 32
# font_size = 32
# try:
# font = ImageFont.truetype("arial.ttf", font_size)
# except OSError:
# try:
# font = ImageFont.truetype("LiberationMono-Regular.ttf", font_size)
# except OSError:
# font = ImageFont.truetype("SFNSMono.ttf", font_size)
# trg_image = Image.new(mode="RGB", size=(16, 16))
# draw = ImageDraw.Draw(trg_image)
# draw.text((1, 1), "A", font=font, fill=(255, 0, 0))
# trg_image.save("trg_img.png")
# self.face_np = np.array(trg_image)
with Image.open("gym_pcgrl/envs/probs/face/lena.jpeg") as im:
# im.show()
im = im.resize((self._width, self._height))
self.face_np = np.array(im)
# im.show()
im.save('face_trg.png')
# self.face_np = self.face_np.transpose(2, 0, 1)
# self._prob = {"empty": 0.5, "solid":0.5}
self._border_tile = "solid"
self._target_path = 20
self._random_probs = True
self._reward_weights = {
"face_1": 1,
}
# default conditional targets
self.static_trgs = {
"face_1": 0,
}
# boundaries for conditional inputs/targets
self.cond_bounds = {
"face_1": (0, 1),
}
self._reward_weights = {"face_1": 1}
"""
Get a list of all the different tile names
Returns:`
string[]: that contains all the tile names
"""
def get_tile_types(self):
return ['r','g','b']
def is_continuous(self):
return True
"""
Adjust the parameters for the current problem
Parameters:
width (int): change the width of the problem level
height (int): change the height of the problem level
probs (dict(string, float)): change the probability of each tile
intiialization, the names are "empty", "solid"
target_path (int): the current path length that the episode turn when it reaches
rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats
"""
def adjust_param(self, **kwargs):
self.render_path = kwargs.get('render', self.render_path) or kwargs.get('render_path', self.render_path)
super().adjust_param(**kwargs)
self._target_path = kwargs.get('target_path', self._target_path)
self._random_probs = kwargs.get('random_probs', self._random_probs)
rewards = kwargs.get('rewards')
if rewards is not None:
for t in rewards:
if t in self._reward_weights:
self._reward_weights[t] = rewards[t]
"""
Resets the problem to the initial state and save the start_stats from the starting map.
Also, it can be used to change values between different environment resets
Parameters:
start_stats (dict(string,any)): the first stats of the map
"""
def reset(self, start_stats):
super().reset(start_stats)
"""
Get the current stats of the map
Returns:
dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.
The used status are "reigons": number of connected empty tiles, "path-length": the longest path across the map
"""
def get_stats(self, map, lenient_paths=False):
# map_locations = get_tile_locations(map, self.get_tile_types())
# self.path_length, self.path_coords = calc_longest_path(map, map_locations, ["empty"], get_path=self.render_path)
# return {
# "regions": calc_num_regions(map, map_locations, ["empty"]),
# "path-length": self.path_length,
# }
stats = {
"face_1": np.sum(np.abs(self.face_np.transpose(2, 0, 1)/255 - map)) / reduce(mul, map.shape),
}
return stats
"""
Get the current game reward between two stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
float: the current reward due to the change between the old map stats and the new map stats
"""
def get_reward(self, new_stats, old_stats):
#longer path is rewarded and less number of regions is rewarded
rewards = {
"face_1": get_range_reward(new_stats["face_1"], old_stats["face_1"], 1, 1),
}
#calculate the total reward
return rewards["face_1"] * self._reward_weights["face_1"]
"""
Uses the stats to check if the problem ended (episode_over) which means reached
a satisfying quality based on the stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
boolean: True if the level reached satisfying quality based on the stats and False otherwise
"""
def get_episode_over(self, new_stats, old_stats):
# return new_stats["regions"] == 1 and new_stats["path-length"] - self._start_stats["path-length"] >= self._target_path
return new_stats["face_1"] == 1
"""
Get any debug information need to be printed
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
dict(any,any): is a debug information that can be used to debug what is
happening in the problem
"""
def get_debug_info(self, new_stats, old_stats):
return {
"face_1": new_stats["face_1"] - self._start_stats["face_1"]
}
"""
Get an image on how the map will look like for a specific map
Parameters:
map (string[][]): the current game map
Returns:
Image: a pillow image on how the map will look like using the binary graphics
"""
def render(self, map):
# FIXME: this seems maaaaad inefficient no?
map = map.transpose(1, 2, 0)
# map = self.face_np.transpose(1, 2, 0)
return Image.fromarray((map*255).astype(np.uint8), 'RGB')
# def render(self, map):
# if self._graphics == None:
# if self.GVGAI_SPRITES:
# self._graphics = {
# "empty": Image.open(os.path.dirname(__file__) + "/sprites/oryx/floor3.png").convert('RGBA'),
# "solid": Image.open(os.path.dirname(__file__) + "/sprites/oryx/wall3.png").convert('RGBA'),
# "path" : Image.open(os.path.dirname(__file__) + "/sprites/newset/snowmanchest.png").convert('RGBA'),
# }
# else:
# self._graphics = {
# "empty": Image.open(os.path.dirname(__file__) + "/binary/empty.png").convert('RGBA'),
# "solid": Image.open(os.path.dirname(__file__) + "/binary/solid.png").convert('RGBA'),
# "path" : Image.open(os.path.dirname(__file__) + "/binary/path_g.png").convert('RGBA'),
# }
# return super().render(map, render_path=self.path_coords)
```
#### File: envs/probs/loderunner_ctrl_prob.py
```python
from pdb import set_trace as TT
import numpy as np
from gym_pcgrl.envs.helper import (
calc_certain_tile,
calc_num_regions,
get_range_reward,
get_tile_locations,
run_dijkstra,
get_path_coords,
)
from gym_pcgrl.envs.probs.loderunner_prob import LoderunnerProblem
class LoderunnerCtrlProblem(LoderunnerProblem):
def __init__(self):
super(LoderunnerCtrlProblem, self).__init__()
# TODO: Do not assume it's a square
# Twice the optimal zig-zag minus one for the end-point at which the player turns around
self._max_path_length = (np.ceil(self._width / 2) * (self._height) + np.floor(self._height / 2)) * 2 - 1
# self._max_path_length = np.ceil(self._width / 2 + 1) * (self._height)
# like "_reward_weights" but for use with ParamRew
self._reward_weights = self._reward_weights
self.static_trgs = {
"player": 1,
"enemies": 2,
"gold": (1, 10),
"win": 1,
"path-length": self._max_path_length,
}
# conditional inputs/targets ( just a default we don't use in the ParamRew wrapper)
self.cond_trgs = self.static_trgs
max_n_tile = self._height * self._width
# boundaries for conditional inputs/targets
self.cond_bounds = {
"player": (0, max_n_tile),
"enemies": (0, max_n_tile),
# "gold": (0, max_n_tile),
"gold": (0, 10),
"win": (0, 1),
"path-length": (0, self._max_path_length),
}
# We do these things in the ParamRew wrapper
def get_episode_over(self, new_stats, old_stats):
return False
def get_reward(self, new_stats, old_stats):
return None
```
#### File: control-pcgrl3D/gym_pcgrl/test3D.py
```python
import numpy as np
import gym
import gym_pcgrl
from pdb import set_trace as TT
# from utils import make_vec_envs
from gym_pcgrl.envs.helper_3D import calc_num_regions, debug_path, get_string_map,\
get_tile_locations, calc_longest_path, run_dijkstra
import matplotlib.pyplot as plt
################################################################################
# test the helper functions
tile_types = ["AIR", "DIRT"]
######## Test the path finding func and region counting func in stairing logic #########
# test_map_1:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 29 = 59
test_map_1 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0]
],
[
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0]
]
]
# test_map_2:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 27 = 57
test_map_2 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0]
]
]
# test_map_3:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 27 = 57
# info: identical to test_map_2, except that some unnecessary tiles are removed (to test region number)
test_map_3 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0] # diff: [0, 0, 0, 1, 0, 0, 0] in test_map_2
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0] # diff: [0, 0, 1, 1, 0, 0, 0] in test_map_2
]
]
# test_map_4:
# size: 3 * 6 * 6
# longest path length: 2 + 1 + 1 + 1 = 5
# info: small map for testing climbing stairs
test_map_4 = [
[
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
]
]
########### For testing the 3D plotting ###########
# test_map_5:
# size: 3 * 3 * 3
test_map_5 = [
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0],
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
]
############ Test the path finding func in the jumping logic #############
# Note: In Minecraft jumping, the extra head room of the staring position and extra head room of the position 1 before
# foothold needs to be garanteded
#
# |__
# O
# 大_ __
# | |
# | |
# test_map_6:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 2
# region number: 1
# jump: 1
# jump distance: 3
test_map_6 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
]
]
# test_map_7:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 2
# region number: 1
# jump: 1
# jump distance: 3
# info: valid jump, the head room of the foothold position is trivial
test_map_7 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 1] # the head room of the foothold position is trivial
]
]
# test_map_8:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 1
# region number: 1
# jump: 0
# jump distance: 3
# info: head blocked in starting position in either direction
test_map_8 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 1] # head blocked in starting position in either direction
]
]
# test_map_9:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 1
# region number: 1
# jump: 0
# jump distance: 3
# info: head blocked in the position before foothold position
test_map_9 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 1, 1] # head blocked in the position before foothold position
]
]
# test_map_10:
# size: 4 * 1 * 6
# jump distance: 2
# path length: 2
# region number: 1
# jump: 1
test_map_10 = [
[
[1, 0, 0, 1]
],
[
[1, 0, 0, 1]
],
[
[1, 0, 0, 1]
],
[
[1, 0, 0, 1]
],
[
[0, 0, 0, 0]
],
[
[0, 0, 0, 0]
],
[
[0, 0, 0, 0]
]
]
# test_map_11:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
test_map_11 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
]
]
# test_map_12:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: 1
# info: the height difference of starting point and foothold position is 1
test_map_12 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 0] # the height difference of starting point and foothold position is 1
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
]
]
# test_map_13:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: 2
# info: the height difference of starting point and foothold position is 1
test_map_13 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 0]
],
[
[1, 0, 0] # the height difference of starting point and foothold position is 2
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
]
]
# test_map_14:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 1
# region number: 1
# jump: 0
# height difference: 0
# info: head blocked in starting position in either direction
test_map_14 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[1, 0, 1]
]
]
# test_map_15:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 1
# region number: 1
# jump: 0
# height difference: 0
# info: head blocked in foothold position
test_map_15 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 1, 1]
]
]
# test_map_16:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: 0
# info: valid jump
test_map_16 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 1]
]
]
# test_map_17:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: -1
# info: valid jump
test_map_17 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 1]
],
[
[0, 0, 1]
]
]
# TODO: test map for falling distance > 1 and <= 3
"""
get the state of the test maps
"""
def get_test_state(test_map, tile_types):
test_map = np.array(test_map)
test_string_map = get_string_map(test_map, tile_types)
map_locations = get_tile_locations(test_string_map, tile_types)
# get the state of the test map
path_length, path_coords = calc_longest_path(test_string_map, map_locations, ["AIR"], get_path=True)
num_regions = calc_num_regions(test_string_map, map_locations, ["AIR"])
debug_path_coords = debug_path(path_coords, test_string_map, ["AIR"])
print("longest path length:", path_length)
print("number of regions:", num_regions)
print(f"The path is: {debug_path_coords}")
return path_length, path_coords, num_regions
"""
plot the test maps using matplotlib 3D voxel / volumetric plotting
"""
def plot_3d_map(test_map):
test_map = np.array(test_map)
# change the map axis for plotting
test_map = np.moveaxis(test_map, (0, 2), (2, 1))
# create the boolen map of the maze
boolen_map = np.array(test_map) == 1
# create the color map of the maze
color_map = np.empty(test_map.shape, dtype=object)
color_map[boolen_map] = "green"
# plot it out!
ax = plt.figure().add_subplot(projection='3d')
ax.set_box_aspect([test_map.shape[0]/test_map.shape[1],
1,
test_map.shape[2]/test_map.shape[1]])
# ax.set_box_aspect([1,
# 1,
# 5/7])
print('test_map.shape:', test_map.shape)
ax.voxels(boolen_map, facecolors=color_map, edgecolor='k')
plt.show()
if __name__=="__main__":
################################################################################
# test the 3D environment
# env = gym.make('minecraft_3D_zelda-narrow3D-v0')
# while True:
# observation = env.reset()
# for step in range(500):
# action = env.action_space.sample()
# observation, reward, done, info = env.step(action)
# print(env._rep_stats)
# env.render()
################################################################################
# test the path finding algorithm
# # path_length_1, path_coords_1, num_regions_1 = get_test_state(test_map_1, tile_types)
# # path_length_2, path_coords_2, num_regions_2 = get_test_state(test_map_2, tile_types)
# # path_length_3, path_coords_3, num_regions_3 = get_test_state(test_map_3, tile_types)
# path_length_4, path_coords_4, num_regions_4 = get_test_state(test_map_4, tile_types)
# dijkstra_map_4, _ = run_dijkstra(1, 0, 0, get_string_map(np.array(test_map_4), tile_types), ["AIR"])
# print("dijkstra_map_4 is \n", dijkstra_map_4)
################################################################################
# test the 3D plotting using matplotlib 3D voxel / volumetric plotting
plot_3d_map(test_map_5)
```
#### File: control-pcgrl3D/rl/envs.py
```python
from collections import namedtuple
import os
from pdb import set_trace as TT
from gym import spaces
from gym_pcgrl import wrappers, conditional_wrappers
#from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
# from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
# from utils import RenderMonitor, get_map_width
# def make_env(env_name, representation, rank=0, log_dir=None, **kwargs):
def make_env(cfg_dict):
"""
Return a function that will initialize the environment when called.
Args:
cfg_dict: dictionary of configuration parameters
"""
# Turn dictionary into an object with attributes instead of keys.
cfg = namedtuple("env_cfg", cfg_dict.keys())(*cfg_dict.values())
crop_size = cfg.crop_size
cfg_dict.pop('crop_size')
if cfg.representation == 'wide':
env = wrappers.ActionMapImagePCGRLWrapper(cfg.env_name, **cfg_dict)
elif cfg.representation == 'wide3D':
# raise NotImplementedError("3D wide representation not implemented")
env = wrappers.ActionMap3DImagePCGRLWrapper(cfg.env_name, **cfg_dict)
elif cfg.representation == 'cellular':
# env = wrappers.CAWrapper(env_name, **kwargs)
env = wrappers.CAactionWrapper(cfg.env_name, **cfg_dict)
elif cfg.representation in ['narrow', 'turtle']:
crop_size = cfg.crop_size
env = wrappers.CroppedImagePCGRLWrapper(cfg.env_name, crop_size, **cfg_dict)
elif cfg.representation in ['narrow3D', 'turtle3D']:
crop_size = cfg.crop_size
env = wrappers.Cropped3DImagePCGRLWrapper(cfg.env_name, crop_size, **cfg_dict)
else:
raise Exception('Unknown representation: {}'.format(cfg.representation))
env.configure(**cfg_dict)
if cfg.max_step is not None:
env = wrappers.MaxStep(env, cfg.max_step)
# if log_dir is not None and cfg.get('add_bootstrap', False):
# env = wrappers.EliteBootStrapping(env,
# os.path.join(log_dir, "bootstrap{}/".format(rank)))
env = conditional_wrappers.ConditionalWrapper(env, ctrl_metrics=cfg.conditionals, **cfg_dict)
if not cfg.evaluate:
if not cfg.alp_gmm:
env = conditional_wrappers.UniformNoiseyTargets(env, **cfg_dict)
else:
env = conditional_wrappers.ALPGMMTeacher(env, **cfg_dict)
# it not conditional, the ParamRew wrapper should just be fixed at default static targets
# if render or log_dir is not None and len(log_dir) > 0:
# # RenderMonitor must come last
# env = RenderMonitor(env, rank, log_dir, **kwargs)
return env
```
#### File: JiangZehua/control-pcgrl3D/run_batch_evo.py
```python
import argparse
from bdb import GENERATOR_AND_COROUTINE_FLAGS
from collections import namedtuple
import copy
import json
import yaml
import os
import re
from pdb import set_trace as TT
from typing import List
from evo.cross_eval import compile_results
from evo.render_gifs import render_gifs
GENERATIVE_ONLY_CROSS_EVAL = True
with open("configs/evo/batch.yaml", "r") as f:
batch_config = yaml.safe_load(f)
batch_config = namedtuple('batch_config', batch_config.keys())(**batch_config)
def launch_batch(exp_name, collect_params=False):
if collect_params:
settings_list = []
assert not EVALUATE
if LOCAL:
print("Testing locally.")
else:
print("Launching batch of experiments on SLURM.")
with open("configs/evo/auto/default_settings.json", "r") as f:
default_config = json.load(f)
# print("Loaded default config:\n{}".format(default_config))
if LOCAL:
default_config["n_generations"] = 50000
i = 0
for exp_id in batch_config.exp_ids:
for prob in batch_config.problems:
prob_bcs = batch_config.global_bcs + batch_config.local_bcs[prob]
for rep in batch_config.representations:
for algo in batch_config.algos:
for model in batch_config.models:
if model == "CNN" and rep == "cellular":
print("Skipping experiments with CNN model and cellular representation, as this would necessitate "
"an explosion of model parameters.")
continue
for bc_pair in prob_bcs:
for fix_el in batch_config.fix_elites:
for fix_seed in batch_config.fix_seeds:
# No reason to re-evaluate other than random seeds so this would cause an error
if fix_seed and not fix_el:
print("Skipping experiment with fix_seed=True and fix_elites=False. There is no "
"point re-evaluating generators (which are deterministic) on the same seeds.")
continue
for n_steps in batch_config.n_steps_lst:
if rep != "cellular":
if n_steps != batch_config.n_steps_lst[0]:
continue
if "NCA" in model and n_steps <= 5:
print("Skipping experiments with NCA model and n_steps <= 5.")
continue
for n_init_states in batch_config.n_init_states_lst:
if n_init_states == 0 and not (fix_seed and fix_el):
print("Skipping experiments with n_init_states=0 and fix_seed=False. The "
"hand-made seed cannot be randomized.")
continue
# The hand-made seed is not valid for Decoders (or CPPNs, handled below)
if n_init_states == 0 and "Decoder" in model:
continue
# For the sake of cross-evaluating over model variable alone, do not look at
# experiments treating models with generative capabilities as indirect encodings
if args.cross_eval and GENERATIVE_ONLY_CROSS_EVAL:
if n_init_states == 0 and not (model == "CPPN" or model == "Sin2CPPN" or model == "SinCPPN"):
continue
if model in ["CPPN", "GenCPPN", "GenCPPN2", "CPPNCA", "DirectBinaryEncoding"]:
if algo != "ME":
print("Skipping experiments with model {model} and algo {algo}. (requires "
"MAP-Elites.)")
continue
else:
pass
# algo = "CMAME"
if 'CPPN' in model:
if 'Gen' not in model and model != "CPPNCA":
# We could have more initial states, randomized initial states, and re-evaluated elites with generator-CPPNs
if n_init_states != 0 or not fix_seed or not fix_el:
continue
if model != "CPPNCA" and n_steps != 1:
continue
# The decoder generates levels in a single pass (from a smaller latent)
if 'Decoder' in model and n_steps != 1:
continue
# Edit the sbatch file to load the correct config file
if EVALUATE:
script_name = "evo/eval.sh"
else:
script_name = "evo/train.sh"
with open(script_name, "r") as f:
content = f.read()
# Replace the ``python scriptname --cl_args`` line.
new_content = re.sub(
"python evo/evolve.py -la \d+",
"python evo/evolve.py -la {}".format(i),
content,
)
# Replace the job name.
new_content = re.sub(
"evo_runs/evopcg_\d+",
"evo_runs/evopcg_{}".format(i),
new_content
)
with open(script_name, "w") as f:
f.write(new_content)
# Write the config file with the desired settings
exp_config = copy.deepcopy(default_config)
exp_config.update({
"problem": prob,
"representation": rep,
"behavior_characteristics": bc_pair,
"algo": algo,
"model": model,
"fix_elites": fix_el,
"fix_level_seeds": fix_seed,
# "exp_name": exp_name,
"exp_name": str(exp_id),
"save_levels": False,
"n_steps": n_steps,
"n_init_states": n_init_states,
"n_generations": 50000,
"multi_thread": not args.single_thread,
"save_interval": 10 if args.local else 100,
}
)
if args.render:
exp_config.update(
{
"infer": True,
"render": True,
"visualize": True,
}
)
elif EVALUATE:
# No real point a mapping that takes only one-step (unless we're debugging latent seeds, in which case just use more steps)
render_levels = RENDER_LEVELS and n_steps > 1
# ... Also, because this isn't compatible with qdpy at the moment
render_levels = RENDER_LEVELS and algo != "ME"
exp_config.update(
{
"infer": True,
"evaluate": True,
"render_levels": render_levels,
"save_levels": True,
"visualize": True,
}
)
print(
"Saving experiment config:\n{}".format(
exp_config
)
)
with open(
"configs/evo/auto/settings_{}.json".format(i), "w"
) as f:
json.dump(
exp_config, f, ensure_ascii=False, indent=4
)
# Launch the experiment. It should load the saved settings
if collect_params:
settings_list.append(exp_config)
elif LOCAL:
os.system("python evo/evolve.py -la {}".format(i))
# Turned off for mid-training evals
# os.system("ray stop")
else:
os.system("sbatch {}".format(script_name))
i += 1
if collect_params:
return settings_list
if __name__ == "__main__":
opts = argparse.ArgumentParser(
description="Launch a batch of experiments/evaluations for evo-pcgrl"
)
opts.add_argument(
"-ex",
"--experiment_name",
help="A name to be shared by the batch of experiments.",
default="",
)
opts.add_argument(
"-ev",
"--evaluate",
help="Evaluate a batch of evo experiments.",
action="store_true",
)
opts.add_argument(
"-l",
"--local",
help="Test the batch script, i.e. run it on a local machine and evolve for minimal number of generations.",
action="store_true",
)
opts.add_argument(
"-r",
"--render",
help="Render and observe",
action="store_true",
)
opts.add_argument(
"-ce",
"--cross_eval",
help="Compile stats from previous evaluations into a table",
action="store_true",
)
opts.add_argument(
"-tex",
"--tex",
help="If compiling cross-eval results, produce latex table (otherwise html).",
action="store_true",
)
opts.add_argument(
"--gif",
help="Make gifs from previously-rendered level-generation episodes.",
action="store_true",
)
opts.add_argument(
"--render_levels",
help="Save images from level-generation (to be subsequently used to render gifs with --gif).",
action="store_true",
)
opts.add_argument(
"-st",
"--single_thread",
help="Run experiment sequentially, instead of using ray to parallelise evaluation.",
action="store_true",
)
args = opts.parse_args()
EXP_NAME = args.experiment_name
EVALUATE = args.evaluate
LOCAL = args.local
RENDER_LEVELS = args.render_levels
if args.cross_eval or args.gif:
settings_list = launch_batch(EXP_NAME, collect_params=True)
if args.cross_eval:
compile_results(settings_list, tex=args.tex)
if not args.tex:
print("Produced html at evo_runs/cross_eval_{}.html".format(args.experiment_name))
else:
os.chdir('eval_experiment')
os.system(f'pdflatex tables.tex')
elif args.gif:
render_gifs(settings_list)
else:
launch_batch(EXP_NAME)
```
#### File: experiments/lin_proj/lin_proj.py
```python
import os
import csv
import time
from pathlib import Path
import fire
import matplotlib.pyplot as plt
import numpy as np
from alive_progress import alive_bar
from dask.distributed import Client, LocalCluster
from ribs.archives import CVTArchive, GridArchive
from ribs.emitters import (GaussianEmitter, ImprovementEmitter, IsoLineEmitter,
GradientEmitter, GradientImprovementEmitter)
from ribs.optimizers import Optimizer
from ribs.visualize import grid_archive_heatmap
def calc_sphere(sol):
dim = sol.shape[1]
# Shift the Sphere function so that the optimal value is at x_i = 2.048.
target_shift = 5.12 * 0.4
# Normalize the objective to the range [0, 100] where 100 is optimal.
best_obj = 0.0
worst_obj = (-5.12 - target_shift)**2 * dim
raw_obj = np.sum(np.square(sol - target_shift), axis=1)
objs = (raw_obj - worst_obj) / (best_obj - worst_obj) * 100
derivatives = -2 * (sol - target_shift)
return objs, derivatives
def calc_rastrigin(sol):
A = 10.0
dim = sol.shape[1]
# Shift the Rastrigin function so that the optimal value is at x_i = 2.048.
target_shift = 5.12 * 0.4
best_obj = np.zeros(len(sol))
displacement = -5.12 * np.ones(sol.shape) - target_shift
sum_terms = np.square(displacement) - A * np.cos(2 * np.pi * displacement)
worst_obj = 10 * dim + np.sum(sum_terms, axis=1)
displacement = sol - target_shift
sum_terms = np.square(displacement) - A * np.cos(2 * np.pi * displacement)
raw_obj = 10 * dim + np.sum(sum_terms, axis=1)
# Normalize the objective to the range [0, 100] where 100 is optimal.
# Approximate 0 by the bottom-left corner.
objs = (raw_obj - worst_obj) / (best_obj - worst_obj) * 100
derivatives = -(2 * displacement + 2 * np.pi * A * np.sin(2 * np.pi * displacement))
return objs, derivatives
# Batch calculate the lin projection for all solutions given.
def calc_measures(sol):
dim = sol.shape[1]
# Calculate BCs.
clipped = sol.copy()
clip_indices = np.where(np.logical_or(clipped > 5.12, clipped < -5.12))
clipped[clip_indices] = 5.12 / clipped[clip_indices]
measures = np.concatenate(
(
np.sum(clipped[:, :dim // 2], axis=1, keepdims=True),
np.sum(clipped[:, dim // 2:], axis=1, keepdims=True),
),
axis=1,
)
derivatives = np.ones(sol.shape)
derivatives[clip_indices] = -5.12 / np.square(sol[clip_indices])
mask_0 = np.concatenate((np.ones(dim//2), np.zeros(dim-dim//2)))
mask_1 = np.concatenate((np.zeros(dim//2), np.ones(dim-dim//2)))
d_measure0 = np.multiply(derivatives, mask_0)
d_measure1 = np.multiply(derivatives, mask_1)
jacobian = np.stack((d_measure0, d_measure1), axis=1)
return measures, jacobian
def create_optimizer(algorithm, dim, seed):
"""Creates an optimizer based on the algorithm name.
Args:
algorithm (str): Name of the algorithm passed into sphere_main.
dim (int): Dimensionality of the sphere function.
seed (int): Main seed or the various components.
Returns:
Optimizer: A ribs Optimizer for running the algorithm.
"""
max_bound = dim / 2 * 5.12
bounds = [(-max_bound, max_bound), (-max_bound, max_bound)]
initial_sol = np.zeros(dim)
batch_size = 36
num_emitters = 1
# Create archive.
if algorithm in [
"map_elites", "map_elites_line", "cma_me_imp",
"og_map_elites", "omg_mega", "cma_mega", "cma_mega_adam",
]:
archive = GridArchive((100, 100), bounds, seed=seed)
else:
raise ValueError(f"Algorithm `{algorithm}` is not recognized")
# Create emitters. Each emitter needs a different seed, so that they do not
# all do the same thing.
emitter_seeds = [None] * num_emitters if seed is None else list(
range(seed, seed + num_emitters))
if algorithm in ["map_elites"]:
emitters = [
GaussianEmitter(archive,
initial_sol,
0.5,
batch_size=batch_size * num_emitters,
seed=s) for s in emitter_seeds
]
elif algorithm in ["map_elites_line"]:
emitters = [
IsoLineEmitter(archive,
initial_sol,
iso_sigma=0.5,
line_sigma=0.2,
batch_size=batch_size,
seed=s) for s in emitter_seeds
]
elif algorithm in ["og_map_elites"]:
emitters = [
GradientEmitter(archive,
initial_sol,
sigma0=0.5,
sigma_g=0.5,
measure_gradients=False,
normalize_gradients=False,
bounds=None,
batch_size=batch_size // 2,
seed=s) for s in emitter_seeds
]
elif algorithm in ["omg_mega"]:
emitters = [
GradientEmitter(archive,
initial_sol,
sigma0=0.0,
sigma_g=10.0,
measure_gradients=True,
normalize_gradients=True,
bounds=None,
batch_size=batch_size // 2,
seed=s) for s in emitter_seeds
]
elif algorithm in ["cma_mega"]:
emitters = [
GradientImprovementEmitter(archive,
initial_sol,
sigma_g=10.0,
stepsize=1.0,
gradient_optimizer="gradient_ascent",
normalize_gradients=True,
selection_rule="mu",
bounds=None,
batch_size=batch_size - 1,
seed=s) for s in emitter_seeds
]
elif algorithm in ["cma_mega_adam"]:
emitters = [
GradientImprovementEmitter(archive,
initial_sol,
sigma_g=10.0,
stepsize=0.002,
gradient_optimizer="adam",
normalize_gradients=True,
selection_rule="mu",
bounds=None,
batch_size=batch_size - 1,
seed=s) for s in emitter_seeds
]
elif algorithm in ["cma_me_imp"]:
emitters = [
ImprovementEmitter(archive,
initial_sol,
0.5,
batch_size=batch_size,
seed=s) for s in emitter_seeds
]
return Optimizer(archive, emitters)
def save_heatmap(archive, heatmap_path):
"""Saves a heatmap of the archive to the given path.
Args:
archive (GridArchive or CVTArchive): The archive to save.
heatmap_path: Image path for the heatmap.
"""
plt.figure(figsize=(8, 6))
grid_archive_heatmap(archive, vmin=0, vmax=100)
plt.tight_layout()
plt.savefig(heatmap_path)
plt.close(plt.gcf())
def run_experiment(algorithm,
trial_id,
dim=1000,
objective='sphere',
init_pop=100,
itrs=10000,
outdir="logs",
log_freq=1,
log_arch_freq=1000,
seed=None):
# Create a directory for this specific trial.
s_logdir = os.path.join(outdir, f"{algorithm}", f"trial_{trial_id}")
logdir = Path(s_logdir)
if not logdir.is_dir():
logdir.mkdir()
# Create a new summary file
summary_filename = os.path.join(s_logdir, f"summary.csv")
if os.path.exists(summary_filename):
os.remove(summary_filename)
with open(summary_filename, 'w') as summary_file:
writer = csv.writer(summary_file)
writer.writerow(['Iteration', 'QD-Score', 'Coverage', 'Maximum', 'Average'])
is_init_pop = algorithm in ['og_map_elites', 'omg_mega', 'map_elites', 'map_elites_line']
is_dqd = algorithm in ['og_map_elites', 'omg_mega', 'cma_mega', 'cma_mega_adam']
# Select the objective based on the input.
obj_func = None
if objective == 'sphere':
obj_func = calc_sphere
elif objective == 'Rastrigin':
obj_func = calc_rastrigin
optimizer = create_optimizer(algorithm, dim, seed)
archive = optimizer.archive
best = 0.0
non_logging_time = 0.0
with alive_bar(itrs) as progress:
if is_init_pop:
# Sample initial population
sols = np.array([np.random.normal(size=dim) for _ in range(init_pop)])
objs, _ = obj_func(sols)
best = max(best, max(objs))
measures, _ = calc_measures(sols)
# Add each solution to the archive.
for i in range(len(sols)):
archive.add(sols[i], objs[i], measures[i])
for itr in range(1, itrs + 1):
itr_start = time.time()
if is_dqd:
sols = optimizer.ask(grad_estimate=True)
objs, jacobian_obj = obj_func(sols)
best = max(best, max(objs))
measures, jacobian_measure = calc_measures(sols)
jacobian_obj = np.expand_dims(jacobian_obj, axis=1)
jacobian = np.concatenate((jacobian_obj, jacobian_measure), axis=1)
optimizer.tell(objs, measures, jacobian=jacobian)
sols = optimizer.ask()
objs, _ = obj_func(sols)
best = max(best, max(objs))
measures, _ = calc_measures(sols)
optimizer.tell(objs, measures)
non_logging_time += time.time() - itr_start
progress()
# Save the archive at the given frequency.
# Always save on the final iteration.
final_itr = itr == itrs
if (itr > 0 and itr % log_arch_freq == 0) or final_itr:
# Save a full archive for analysis.
df = archive.as_pandas(include_solutions = final_itr)
df.to_pickle(os.path.join(s_logdir, f"archive_{itr:05d}.pkl"))
# Save a heatmap image to observe how the trial is doing.
save_heatmap(archive, os.path.join(s_logdir, f"heatmap_{itr:05d}.png"))
# Update the summary statistics for the archive
if (itr > 0 and itr % log_freq == 0) or final_itr:
with open(summary_filename, 'a') as summary_file:
writer = csv.writer(summary_file)
sum_obj = 0
num_filled = 0
num_bins = archive.bins
for sol, obj, beh, idx, meta in zip(*archive.data()):
num_filled += 1
sum_obj += obj
qd_score = sum_obj / num_bins
average = sum_obj / num_filled
coverage = 100.0 * num_filled / num_bins
data = [itr, qd_score, coverage, best, average]
writer.writerow(data)
def lin_proj_main(algorithm,
trials=20,
dim=1000,
objective='sphere',
init_pop=100,
itrs=10000,
outdir="logs",
log_freq=1,
log_arch_freq=1000,
seed=None):
"""Experiment tool for the lin_proj domain from the CMA-ME paper.
Args:
algorithm (str): Name of the algorithm.
trials (int): Number of experimental trials to run.
dim (int): Dimensionality of solutions.
objective (str): Either sphere or Rastrigin as the objective. By default, use sphere.
init_pop (int): Initial population size for MAP-Elites (ignored for CMA variants).
itrs (int): Iterations to run.
outdir (str): Directory to save output.
log_freq (int): Number of iterations between computing QD metrics and updating logs.
log_arch_freq (int): Number of iterations between saving an archive and generating heatmaps.
seed (int): Seed for the algorithm. By default, there is no seed.
"""
if objective not in ['sphere', 'Rastrigin']:
raise ValueError(f"Objective `{objective}` is not recognized")
# Create a shared logging directory for the experiments for this algorithm.
s_logdir = os.path.join(outdir, f"{algorithm}")
logdir = Path(s_logdir)
outdir = Path(outdir)
if not outdir.is_dir():
outdir.mkdir()
if not logdir.is_dir():
logdir.mkdir()
cluster = LocalCluster(
processes=True, # Each worker is a process.
n_workers=trials, # Create one worker per trial (assumes >=trials cores)
threads_per_worker=1, # Each worker process is single-threaded.
)
client = Client(cluster)
exp_func = lambda cur_id: run_experiment(
algorithm, cur_id,
dim=dim,
objective=objective,
init_pop=init_pop,
itrs=itrs,
outdir=outdir,
log_freq=log_freq,
log_arch_freq=log_arch_freq,
seed=seed,
)
# Run an experiment as a separate process to run all exps in parallel.
trial_ids = list(range(trials))
futures = client.map(exp_func, trial_ids)
results = client.gather(futures)
if __name__ == '__main__':
fire.Fire(lin_proj_main)
```
#### File: experiments/lsi_clip/lsi.py
```python
import os
import csv
import time
from pathlib import Path
import fire
import clip
import torch
import torchvision
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from alive_progress import alive_bar
from PIL import Image
from ribs.archives import CVTArchive, GridArchive
from ribs.emitters import (GaussianEmitter, ImprovementEmitter, IsoLineEmitter,
GradientEmitter, GradientImprovementEmitter)
from ribs.optimizers import Optimizer
from ribs.visualize import grid_archive_heatmap
from stylegan_models import g_all, g_synthesis, g_mapping
def tensor_to_pil_img(img):
img = (img.clamp(-1, 1) + 1) / 2.0
img = img[0].permute(1, 2, 0).detach().cpu().numpy() * 255
img = Image.fromarray(img.astype('uint8'))
return img
def compute_clip_loss(device, c_net, img, text):
img = torch.nn.functional.upsample_bilinear(img, (224, 224))
tokenized_text = clip.tokenize([text]).to(device)
img_logits, _text_logits = c_net(img, tokenized_text)
return 1/img_logits * 100
def compute_clip_losses(device, c_net, img, prompts):
tokenized_text = clip.tokenize(prompts).to(device)
img_logits, _text_logits = c_net(img, tokenized_text)
return 1/img_logits * 100
def compute_prompts(device, latent_code, g_net, c_net, prompts, img_batch_size=37):
imgs = []
for i in range(0, len(latent_code), img_batch_size):
latents = torch.nn.Parameter(latent_code[i:i+img_batch_size], requires_grad=False)
dlatents = latents.repeat(1,18,1)
img = g_net(dlatents)
img = torch.nn.functional.upsample_bilinear(img, (224, 224))
imgs.append(img)
img = torch.cat(imgs)
loss = compute_clip_losses(device, c_net, img, prompts)
value = loss.cpu().detach().numpy()
return value
def compute_value_jacobian(device, latent_code, g_net, c_net, text, calc_jacobian=True):
latents = torch.nn.Parameter(latent_code, requires_grad=calc_jacobian)
dlatents = latents.repeat(1,18,1)
img = g_net(dlatents)
loss = compute_clip_loss(device, c_net, img, text)
value = loss.cpu().detach().numpy()
value = np.squeeze(value, axis=1)
jacobian = None
if calc_jacobian:
loss.backward()
jacobian = latents.grad.cpu().detach().numpy()
jacobian = np.squeeze(-jacobian, axis=0)
return value, jacobian
def compute_values_jacobians(device, latent_code, g_net, c_net, texts, calc_jacobian=True):
values = []
jacobians = []
for text in texts:
value, jacobian = compute_value_jacobian(device, latent_code, g_net, c_net,
text, calc_jacobian)
values.append(value)
jacobians.append(jacobian)
jacobian = None
if calc_jacobian:
jacobian = np.array(jacobians)
return np.array(values), jacobian
def transform_obj(objs):
# Remap the objective from minimizing [0, 10] to maximizing [0, 100]
return (10.0-objs)*10.0
def create_optimizer(algorithm, dim, seed):
"""Creates an optimizer based on the algorithm name.
Args:
algorithm (str): Name of the algorithm passed into sphere_main.
dim (int): Dimensionality of the sphere function.
seed (int): Main seed or the various components.
Returns:
Optimizer: A ribs Optimizer for running the algorithm.
"""
bounds = [(0.0, 6.0), (0.0, 6.0)]
initial_sol = np.zeros(dim)
batch_size = 36
num_emitters = 1
# Create archive.
if algorithm in [
"map_elites", "map_elites_line", "cma_me_imp",
"og_map_elites", "omg_mega", "cma_mega", "cma_mega_adam",
]:
archive = GridArchive((200, 200), bounds, seed=seed)
else:
raise ValueError(f"Algorithm `{algorithm}` is not recognized")
# Create emitters. Each emitter needs a different seed, so that they do not
# all do the same thing.
emitter_seeds = [None] * num_emitters if seed is None else list(
range(seed, seed + num_emitters))
if algorithm in ["map_elites"]:
emitters = [
GaussianEmitter(archive,
initial_sol,
0.2,
batch_size=batch_size,
seed=s) for s in emitter_seeds
]
elif algorithm in ["map_elites_line"]:
emitters = [
IsoLineEmitter(archive,
initial_sol,
iso_sigma=0.1,
line_sigma=0.2,
batch_size=batch_size,
seed=s) for s in emitter_seeds
]
elif algorithm in ["og_map_elites"]:
emitters = [
GradientEmitter(archive,
initial_sol,
sigma0=0.2,
sigma_g=0.2,
measure_gradients=False,
bounds=None,
batch_size=batch_size // 2,
seed=s) for s in emitter_seeds
]
elif algorithm in ["omg_mega"]:
emitters = [
GradientEmitter(archive,
initial_sol,
sigma0=0.0,
sigma_g=0.2,
measure_gradients=True,
bounds=None,
batch_size=batch_size // 2,
seed=s) for s in emitter_seeds
]
elif algorithm in ["cma_mega"]:
emitters = [
GradientImprovementEmitter(archive,
initial_sol,
sigma_g=0.002,
stepsize=1.0,
gradient_optimizer="gradient_ascent",
normalize_gradients=True,
selection_rule="mu",
bounds=None,
batch_size=batch_size - 1,
seed=s) for s in emitter_seeds
]
elif algorithm in ["cma_mega_adam"]:
emitters = [
GradientImprovementEmitter(archive,
initial_sol,
sigma_g=0.002,
stepsize=0.002,
gradient_optimizer="adam",
normalize_gradients=True,
selection_rule="mu",
bounds=None,
batch_size=batch_size - 1,
seed=s) for s in emitter_seeds
]
elif algorithm in ["cma_me_imp"]:
emitters = [
ImprovementEmitter(archive,
initial_sol,
0.02,
batch_size=batch_size,
seed=s) for s in emitter_seeds
]
return Optimizer(archive, emitters)
def save_heatmap(archive, heatmap_path):
"""Saves a heatmap of the archive to the given path.
Args:
archive (GridArchive or CVTArchive): The archive to save.
heatmap_path: Image path for the heatmap.
"""
plt.figure(figsize=(8, 6))
grid_archive_heatmap(archive, vmin=0, vmax=100)
plt.tight_layout()
plt.savefig(heatmap_path)
plt.close(plt.gcf())
def run_experiment(algorithm,
trial_id,
clip_model,
generator,
device,
dim=512,
init_pop=100,
itrs=10000,
outdir="logs",
log_freq=1,
log_arch_freq=1000,
seed=None):
# Create a directory for this specific trial.
s_logdir = os.path.join(outdir, f"{algorithm}", f"trial_{trial_id}")
logdir = Path(s_logdir)
if not logdir.is_dir():
logdir.mkdir()
# Create a new summary file
summary_filename = os.path.join(s_logdir, f"summary.csv")
if os.path.exists(summary_filename):
os.remove(summary_filename)
with open(summary_filename, 'w') as summary_file:
writer = csv.writer(summary_file)
writer.writerow(['Iteration', 'QD-Score', 'Coverage', 'Maximum', 'Average'])
is_init_pop = algorithm in ['og_map_elites', 'omg_mega', 'map_elites', 'map_elites_line']
is_dqd = algorithm in ['og_map_elites', 'omg_mega', 'cma_mega', 'cma_mega_adam']
optimizer = create_optimizer(algorithm, dim, seed)
archive = optimizer.archive
objective_prompt = '<NAME> with short hair.'
measure_prompts = ['An man with blue eyes.', 'A person with red hair.']
all_prompts = [objective_prompt] + measure_prompts
best = -1000
non_logging_time = 0.0
with alive_bar(itrs) as progress:
if is_init_pop:
# Sample initial population
sols = np.array([np.random.normal(size=dim) for _ in range(init_pop)])
sols = np.expand_dims(sols, axis=1)
latent_codes = torch.tensor(sols, dtype=torch.float32, device=device)
values = compute_prompts(device, latent_codes, generator, clip_model, all_prompts)
objs = values[:,0]
measures = values[:,1:3]
objs = transform_obj(np.array(objs, dtype=np.float32))
measures = np.array(measures, dtype=np.float32)
best_gen = max(objs)
best = max(best, best_gen)
# Add each solution to the archive.
for i in range(len(sols)):
archive.add(sols[i], objs[i], measures[i])
for itr in range(1, itrs + 1):
itr_start = time.time()
if is_dqd:
sols = optimizer.ask(grad_estimate=True)
nvec = np.linalg.norm(sols)
latent_codes = torch.tensor(sols, dtype=torch.float32, device=device)
objs, jacobian_obj = compute_value_jacobian(device, latent_codes, generator,
clip_model, objective_prompt,
calc_jacobian=True)
objs = transform_obj(objs)
best = max(best, max(objs))
measures, jacobian_measure = compute_values_jacobians(device, latent_codes,
generator, clip_model, measure_prompts,
calc_jacobian=True)
jacobian_obj = np.expand_dims(jacobian_obj, axis=0)
jacobian = np.concatenate((jacobian_obj, jacobian_measure), axis=0)
jacobian = np.expand_dims(jacobian, axis=0)
measures = np.transpose(measures)
objs = objs.astype(np.float32)
measures = measures.astype(np.float32)
jacobian = jacobian.astype(np.float32)
optimizer.tell(objs, measures, jacobian=jacobian)
sols = optimizer.ask()
sols = np.expand_dims(sols, axis=1)
latent_codes = torch.tensor(sols, dtype=torch.float32, device=device)
values = compute_prompts(device, latent_codes, generator, clip_model, all_prompts)
objs = values[:,0]
measures = values[:,1:3]
objs = transform_obj(np.array(objs, dtype=np.float32))
measures = np.array(measures, dtype=np.float32)
best_gen = max(objs)
best = max(best, best_gen)
optimizer.tell(objs, measures)
non_logging_time += time.time() - itr_start
progress()
print('best', best, best_gen)
# Save the archive at the given frequency.
# Always save on the final iteration.
final_itr = itr == itrs
if (itr > 0 and itr % log_arch_freq == 0) or final_itr:
# Save a full archive for analysis.
df = archive.as_pandas(include_solutions = final_itr)
df.to_pickle(os.path.join(s_logdir, f"archive_{itr:06d}.pkl"))
# Save a heatmap image to observe how the trial is doing.
save_heatmap(archive, os.path.join(s_logdir, f"heatmap_{itr:06d}.png"))
# Update the summary statistics for the archive
if (itr > 0 and itr % log_freq == 0) or final_itr:
with open(summary_filename, 'a') as summary_file:
writer = csv.writer(summary_file)
sum_obj = 0
num_filled = 0
num_bins = archive.bins
for sol, obj, beh, idx, meta in zip(*archive.data()):
num_filled += 1
sum_obj += obj
qd_score = sum_obj / num_bins
average = sum_obj / num_filled
coverage = 100.0 * num_filled / num_bins
data = [itr, qd_score, coverage, best, average]
writer.writerow(data)
def lsi_main(algorithm,
trials=5,
init_pop=100,
itrs=10000,
outdir='logs',
log_freq=1,
log_arch_freq=1000,
seed=None):
"""Experimental tool for the StyleGAN+CLIP LSI experiments.
Args:
algorithm (str): Name of the algorithm.
trials (int): Number of experimental trials to run.
init_pop (int): Initial population size for MAP-Elites (ignored for CMA variants).
itrs (int): Iterations to run.
outdir (str): Directory to save output.
log_freq (int): Number of iterations between computing QD metrics and updating logs.
log_arch_freq (int): Number of iterations between saving an archive and generating heatmaps.
seed (int): Seed for the algorithm. By default, there is no seed.
"""
# Create a shared logging directory for the experiments for this algorithm.
s_logdir = os.path.join(outdir, f"{algorithm}")
logdir = Path(s_logdir)
outdir = Path(outdir)
if not outdir.is_dir():
outdir.mkdir()
if not logdir.is_dir():
logdir.mkdir()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
clip_model, clip_preprocess = clip.load("ViT-B/32", device=device)
clip_model.eval()
for p in clip_model.parameters():
p.requires_grad_(False)
g_synthesis.eval()
g_synthesis.to(device)
for p in g_synthesis.parameters():
p.requires_grad_(False)
# Latent space is size 512
dim = 512
for cur_id in range(trials):
run_experiment(algorithm, cur_id, clip_model, g_synthesis,
device, dim=dim, init_pop=init_pop, itrs=itrs,
outdir=outdir, log_freq=log_freq,
log_arch_freq=log_arch_freq, seed=seed)
if __name__ == '__main__':
fire.Fire(lsi_main)
```
#### File: qdpy/examples/artificial_landscapes.py
```python
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from cycler import cycler
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from qdpy import algorithms, containers, benchmarks, plots
import numpy as np
import warnings
import os
import random
from scipy.constants import golden_ratio
def iteration_callback(algo, batch_elapsed, grid_ref):
global_reliability = compute_global_reliability(grid_ref, algo.container)
algo.container.global_reliability.append(global_reliability)
#print(f"global_reliability = {global_reliability}")
def tell_callback(algo, ind, grid_ref):
global_reliability = compute_global_reliability(grid_ref, algo.container)
algo.container.global_reliability.append(global_reliability)
#print(f"global_reliability = {global_reliability}")
def cleanup_data(a): # Assume minimisation
a2 = a.copy()
a2[np.isinf(a2)] = np.nan
return a2
def normalise(a, min_val, max_val): # Assume minimisation
a2 = a.copy()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a2[a2>max_val] = np.nan
a2[a2<min_val] = min_val
return (a2-min_val)/(max_val-min_val)
def compute_global_reliability(grid_ref, grid_test): # Assume minimisation
if isinstance(grid_ref, containers.Grid):
base_ref = cleanup_data(grid_ref.quality_array[...,0])
else:
base_ref = cleanup_data(grid_ref.to_grid(shape=(64,64)).quality_array[...,0]) # XXX
#base_ref = cleanup_data(np.array([ind.fitness.values[0] for ind in grid_ref])) # XXX
if isinstance(grid_test, containers.Grid):
base_test = cleanup_data(grid_test.quality_array[...,0])
else:
base_test = cleanup_data(grid_test.to_grid(shape=(64,64)).quality_array[...,0]) # XXX
#base_test= cleanup_data(np.array([ind.fitness.values[0] for ind in grid_test])) # XXX
min_ref = np.nanmin(base_ref)
max_ref = np.nanmax(base_ref)
#min_ref = min( np.nanmin(base_ref), np.nanmin(base_test) )
#max_ref = max( np.nanmax(base_ref), np.nanmax(base_test) )
normalised_ref = normalise(base_ref, min_ref, max_ref)
normalised_test = normalise(base_test, min_ref, max_ref)
mask = ~np.isnan(normalised_ref)
data_ref = normalised_ref[mask]
data_test = normalised_test[mask]
#print(data_ref)
#print(data_test)
#sqerrors = np.nan_to_num(1. - np.square(data_ref - data_test, dtype=float))
#sqerrors[sqerrors < 0.0] = 0.
##print(sqerrors)
#global_reliability = np.sum(sqerrors) / len(data_ref)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
local_reliability = np.nan_to_num((1.-data_test) / (1.-data_ref))
local_reliability[local_reliability<0.0] = 0.
local_reliability[local_reliability>1.0] = 1.
global_reliability = np.sum(local_reliability) / len(data_ref)
return global_reliability
def compute_ref(bench, budget=60000, dimension=2, nb_bins_per_feature=64, output_path="ref", algo_name=None, fitness_domain=((0., 120.),)):
if not os.path.exists(output_path):
os.makedirs(output_path)
# Create grid and algorithm
grid_ref = containers.Grid(shape=(nb_bins_per_feature,) * bench.nb_features, max_items_per_bin=1,
fitness_domain=bench.fitness_domain, features_domain=bench.features_domain)
algo_ref = algorithms.RandomSearchMutPolyBounded(grid_ref, budget=budget, batch_size=500,
dimension=dimension, optimisation_task=bench.default_task, ind_domain=bench.ind_domain,
name=algo_name)
# Create a logger to pretty-print everything and generate output data files
logger_ref = algorithms.TQDMAlgorithmLogger(algo_ref, log_base_path=output_path)
# Define evaluation function
eval_fn = bench.fn
# Run illumination process !
best = algo_ref.optimise(eval_fn)
# Print results info
#print(algo_ref.summary())
# Plot the results
plots.default_plots_grid(logger_ref, to_grid_parameters={'shape': (nb_bins_per_feature,) * bench.nb_features}, fitness_domain=fitness_domain)
return algo_ref, logger_ref
def compute_test(bench, algo_ref, dimension=3, output_path="test", algo_name=None, mut_pb=0.5, eta=20., fitness_domain=((0., 120.),)):
if not os.path.exists(output_path):
os.makedirs(output_path)
# Create container and algorithm
grid_test = containers.Grid(shape=(algo_ref.container.shape[0],) * bench.nb_features, max_items_per_bin=1,
fitness_domain=bench.fitness_domain, features_domain=bench.features_domain)
#grid_test = containers.NoveltyArchive(k=1, threshold_novelty=0.016, fitness_domain=bench.fitness_domain, features_domain=bench.features_domain, storage_type=list, depot_type=list)
grid_test.global_reliability = []
#algo_test = algorithms.RandomSearchMutPolyBounded(grid_test, budget=algo_ref.budget, batch_size=500,
# dimension=dimension, optimisation_task=bench.default_task, ind_domain=bench.ind_domain,
# name=algo_name)
algo_test = algorithms.MutPolyBounded(grid_test, budget=algo_ref.budget, batch_size=500,
dimension=dimension, optimisation_task=bench.default_task, ind_domain=bench.ind_domain,
#sel_pb = 1.0, init_pb = 0.0, mut_pb = 0.8, eta = 20., name=algo_name)
mut_pb = mut_pb, eta = eta, name=algo_name)
#grid_surrogate = containers.Grid(shape=(algo_ref.container.shape[0],) * bench.nb_features, max_items_per_bin=1,
# fitness_domain=bench.fitness_domain, features_domain=bench.features_domain)
#grid_test = containers.Grid(shape=(algo_ref.container.shape[0],) * bench.nb_features, max_items_per_bin=1,
# fitness_domain=bench.fitness_domain, features_domain=bench.features_domain)
#grid_test.global_reliability = []
#algo_surrogate = algorithms.RandomSearchMutPolyBounded(grid_surrogate, budget=5000, batch_size=500,
# dimension=dimension, optimisation_task=bench.default_task, ind_domain=bench.ind_domain)
#algo_illumination = algorithms.RandomSearchMutPolyBounded(grid_test, budget=algo_ref.budget, batch_size=500,
# dimension=dimension, optimisation_task=bench.default_task, ind_domain=bench.ind_domain)
#algo_test = algorithms.SAIL(illumination_algo=algo_illumination, acquisition_algo=algo_surrogate, name=algo_name)
algo_test.add_callback("tell", algorithms.partial(tell_callback, grid_ref=algo_ref.container))
#algo_test.add_callback("iteration", algorithms.partial(iteration_callback, grid_ref=algo_ref.container))
# Create a logger to pretty-print everything and generate output data files
logger_test = algorithms.TQDMAlgorithmLogger(algo_test, log_base_path=output_path)
# Define evaluation function
eval_fn = bench.fn
# Run illumination process !
best = algo_test.optimise(eval_fn)
# Print results info
#print(algo_test.summary())
# Plot the results
plots.default_plots_grid(logger_test, to_grid_parameters={'shape': algo_ref.container.shape}, fitness_domain=fitness_domain)
# Plot global_reliability per eval
#global_reliability = compute_global_reliability(grid_ref, grid_test)
#print(f"global_reliability = {global_reliability}")
plots.plot_evals(grid_test.global_reliability, os.path.join(logger_test.log_base_path, "global_reliability.pdf"), "global_reliability", ylim=(0., 1.))
print(f"dimension={dimension} global_reliability[-1]:", grid_test.global_reliability[-1])
return algo_test, logger_test
def compute_test2(bench, algo_ref, dimension=3, output_path="test", algo_name=None, mut_pb=0.5, mu=0., sigma=1.0, fitness_domain=((0., 120.),)):
if not os.path.exists(output_path):
os.makedirs(output_path)
# Create container and algorithm
grid_test = containers.Grid(shape=(algo_ref.container.shape[0],) * bench.nb_features, max_items_per_bin=1,
fitness_domain=bench.fitness_domain, features_domain=bench.features_domain)
grid_test.global_reliability = []
algo_test = algorithms.MutGaussian(grid_test, budget=algo_ref.budget, batch_size=500,
dimension=dimension, optimisation_task=bench.default_task, ind_domain=bench.ind_domain,
mut_pb = mut_pb, mu = mu, sigma=sigma, name=algo_name)
algo_test.add_callback("tell", algorithms.partial(tell_callback, grid_ref=algo_ref.container))
#algo_test.add_callback("iteration", algorithms.partial(iteration_callback, grid_ref=algo_ref.container))
# Create a logger to pretty-print everything and generate output data files
logger_test = algorithms.TQDMAlgorithmLogger(algo_test, log_base_path=output_path)
# Define evaluation function
eval_fn = bench.fn
# Run illumination process !
best = algo_test.optimise(eval_fn)
# Print results info
#print(algo_test.summary())
# Plot the results
plots.default_plots_grid(logger_test, to_grid_parameters={'shape': algo_ref.container.shape}, fitness_domain=fitness_domain)
# Plot global_reliability per eval
plots.plot_evals(grid_test.global_reliability, os.path.join(logger_test.log_base_path, "global_reliability.pdf"), "global_reliability", ylim=(0., 1.))
print(f"dimension={dimension} global_reliability[-1]:", grid_test.global_reliability[-1])
return algo_test, logger_test
from collections import OrderedDict
_linestyles = OrderedDict(
[('solid', (0, ())),
#('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 5))),
('densely dotted', (0, (1, 1))),
#('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
#('loosely dashdotted', (0, (3, 10, 1, 10))),
#('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
#('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
def plot_combined_global_reliability(algo_ref, algos, output_filename="global_reliability.pdf", figsize=(4.*golden_ratio,4.)):
assert(len(algos))
data_tests = [a.container.global_reliability for a in algos]
fig, ax = plt.subplots(figsize=figsize)
x = np.arange(len(data_tests[0]))
##linestyle_cycler = cycler('linestyle',['-','--',':','-.','-','--',':']) + cycler(color=plt.get_cmap("Set2",8).colors)
##linestyle_cycler = cycler('linestyle', list(_linestyles.values())[:8]) + cycler(color=plt.get_cmap("Dark2",8).colors)
#linestyle_cycler = cycler('linestyle', list(_linestyles.values())[:8]) + cycler(color=['r', 'r', 'r', 'r', 'g', 'g', 'g', 'g'])
#linestyle_cycler = cycler('linestyle', list(_linestyles.values())[:4] * 2) + cycler(color=['r', 'g', 'r', 'g', 'r', 'g', 'r', 'g'])
#linestyle_cycler = cycler('linestyle',['-','-','-','-',':',':',':',':']) + cycler(color=["#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#e41a1c", "#377eb8", "#4daf4a", "#984ea3"])
linestyle_cycler = cycler('linestyle',['-','-','-','-',':',':',':',':']) + cycler(color=["#e66101", "#fdb863", "#b2abd2", "#5e3c99", "#e66101", "#fdb863", "#b2abd2", "#5e3c99"])
ax.set_prop_cycle(linestyle_cycler)
plt.xticks(rotation=20)
for d, a in zip(data_tests, algos):
ax.plot(x, d, label=a.name, linewidth=3)
ax.set_ylim((0., 1.))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0E'))
plt.xlabel("Evaluations", fontsize=20)
plt.ylabel("Global reliability", fontsize=20)
for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(19)
for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(19)
#plt.tight_layout()
#plt.legend(title="Dimension", loc="lower right", fontsize=12, title_fontsize=14)
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.legend(loc="center left", fontsize=16, title_fontsize=16, bbox_to_anchor=(1.04, 0.5), borderaxespad=0)
fig.savefig(output_filename, bbox_inches="tight")
plt.close(fig)
def plot3D(bench, output_filename="plot3D.pdf", step=0.1):
def fn_arg0(ind):
return bench.fn(ind)[0][0]
fig = plt.figure(figsize=(4.*golden_ratio,4.))
ax = fig.add_subplot(111, projection='3d', azim=-19, elev=30, position=[0.25, 0.15, 0.7, 0.7])
X = np.arange(bench.ind_domain[0], bench.ind_domain[1], step)
Y = np.arange(bench.ind_domain[0], bench.ind_domain[1], step)
X, Y = np.meshgrid(X, Y)
Z = np.fromiter(map(fn_arg0, zip(X.flat,Y.flat)), dtype=np.float, count=X.shape[0]*X.shape[1]).reshape(X.shape)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap("inferno_r"), linewidth=0.2)
ax.set_xlabel("x0", fontsize=14)
ax.set_ylabel("x1", fontsize=14)
#ax.set_xlabel("Feature 1", fontsize=14)
#ax.set_ylabel("Feature 2", fontsize=14)
ax.set_zlabel("Fitness", fontsize=14)
# change fontsize
for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(14)
for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(14)
for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(14)
plt.tight_layout()
#fig.subplots_adjust(right=0.85, bottom=0.10, wspace=0.10)
fig.savefig(output_filename)
plt.close(fig)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=None, help="Numpy random seed")
parser.add_argument('-p', '--parallelismType', type=str, default='none', help = "Type of parallelism to use (none, concurrent, scoop)")
parser.add_argument('-o', '--outputDir', type=str, default="results", help = "Path of the output log files")
parser.add_argument('--bench', type=str, default="rastrigin", help = "Benchmark function to use")
args = parser.parse_args()
# Find random seed
if args.seed is not None:
seed = args.seed
else:
seed = np.random.randint(1000000)
# Update and print seed
np.random.seed(seed)
random.seed(seed)
print("Seed: %i" % seed)
# Find where to put logs
log_base_path = args.outputDir
# Create container and algorithm. Here we use MAP-Elites, by illuminating a Grid container by evo.
sigma=1.0
step=0.1
if args.bench == "rastrigin": #
bench = benchmarks.RastriginBenchmark(nb_features=2)
fitness_domain = ((0., 120.),)
elif args.bench == "normalisedRastrigin":
bench = benchmarks.NormalisedRastriginBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "sphere":
bench = benchmarks.SphereBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "eightedSphere":
bench = benchmarks.WeightedSphereBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "rotatedHyperEllipsoid":
bench = benchmarks.RotatedHyperEllipsoidBenchmark(nb_features=2)
fitness_domain = ((0., 4000.),)
elif args.bench == "rosenbrock":
bench = benchmarks.RosenbrockBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "schwefel":
bench = benchmarks.SchwefelBenchmark(nb_features=2)
fitness_domain = ((0., np.inf),)
step=8.0
elif args.bench == "small_schwefel":
bench = benchmarks.SmallSchwefelBenchmark(nb_features=2)
fitness_domain = ((0., np.inf),)
step=4.0
elif args.bench == "griewangk":
bench = benchmarks.GriewangkBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "sumOfPowers": #
bench = benchmarks.SumOfPowersBenchmark(nb_features=2)
fitness_domain = ((0., 2.),)
elif args.bench == "ackley":
bench = benchmarks.AckleyBenchmark(nb_features=2)
fitness_domain = ((0., 20.),)
elif args.bench == "styblinskiTang":
bench = benchmarks.StyblinskiTangBenchmark(nb_features=2)
fitness_domain = ((-120., 250.),)
elif args.bench == "levy":
bench = benchmarks.LevyBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "perm0db":
bench = benchmarks.Perm0dbBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "permdb":
bench = benchmarks.PermdbBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "trid":
bench = benchmarks.TridBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "zakharov": #
bench = benchmarks.ZakharovBenchmark(nb_features=2)
fitness_domain = ((0., 1000.),)
elif args.bench == "dixonPrice":
bench = benchmarks.DixonPriceBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "powell":
bench = benchmarks.PowellBenchmark(nb_features=2)
fitness_domain = ((-np.inf, np.inf),)
elif args.bench == "michalewicz":
bench = benchmarks.MichalewiczBenchmark(nb_features=2)
fitness_domain = ((-np.inf, 0.),)
elif args.bench == "wavy": #
bench = benchmarks.WavyBenchmark(nb_features=2)
fitness_domain = ((0., 2.0),)
elif args.bench == "trigonometric02":
bench = benchmarks.Trigonometric02Benchmark(nb_features=2)
fitness_domain = ((1., np.inf),)
sigma=200.0
elif args.bench == "qing":
bench = benchmarks.QingBenchmark(nb_features=2)
fitness_domain = ((0., np.inf),)
sigma=200.0
elif args.bench == "small_qing":
bench = benchmarks.SmallQingBenchmark(nb_features=2)
fitness_domain = ((0., 500.),)
sigma=0.5
elif args.bench == "deb01":
bench = benchmarks.Deb01Benchmark(nb_features=2)
fitness_domain = ((-1., 1.),)
sigma=1.0
elif args.bench == "shubert04":
bench = benchmarks.Shubert04Benchmark(nb_features=2)
fitness_domain = ((-30., 30.),)
sigma=2.0
else:
raise f"Unknown benchmark '{args.bench}' !"
#fitness_domain = ((0., np.inf),)
# Plot 3D
plot3D(bench, output_filename=os.path.join(log_base_path, "plot3D.pdf"), step=step)
# Compute reference
algo_name_ref = args.bench + "-ref"
algo_ref, logger_ref = compute_ref(bench, budget=1000000, dimension=2, nb_bins_per_feature=64,
#algo_ref, logger_ref = compute_ref(bench, budget=100000, dimension=2, nb_bins_per_feature=64,
#algo_ref, logger_ref = compute_ref(bench, budget=1000, dimension=2, nb_bins_per_feature=64,
output_path=os.path.join(log_base_path, algo_name_ref), algo_name=algo_name_ref, fitness_domain=fitness_domain)
# Compute benchmark for several dimensions
#tested_dim = [3, 4, 6, 8, 10, 14]
tested_dim = [3, 6, 10, 14]
#tested_dim = [3]
algos = []
loggers = []
for dim in tested_dim:
#algo_name = f"Dimension {dim}"
algo_name = f"ME1 {dim} dim"
a,l = compute_test(bench, algo_ref, dimension=dim,
output_path=os.path.join(log_base_path, algo_name), algo_name=algo_name, fitness_domain=fitness_domain)
algos.append(a)
loggers.append(l)
#print(a.summary())
for dim in tested_dim:
algo_name = f"ME2 {dim} dim"
a,l = compute_test2(bench, algo_ref, dimension=dim, sigma=sigma,
output_path=os.path.join(log_base_path, algo_name), algo_name=algo_name, fitness_domain=fitness_domain)
algos.append(a)
loggers.append(l)
#print(a.summary())
# Make combined plots
plot_combined_global_reliability(algo_ref, algos, output_filename=os.path.join(log_base_path, "global_reliability.pdf"))
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
```
#### File: examples/bipedal_walker/bipedal_walker.py
```python
import qdpy
from qdpy.base import *
from qdpy.experiment import QDExperiment
# bipedal
from sim import Model, simulate, make_env
########## EXPERIMENT CLASS ########### {{{1
class BipedalWalkerExperiment(QDExperiment):
def reinit(self):
super().reinit()
self.env_name = self.config['game']['env_name']
self.init_model()
self.update_dimension()
def init_model(self):
self.model = Model(self.config['game'])
def update_dimension(self):
self.algo.dimension = self.model.param_count
def eval_fn(self, ind, render_mode = False):
env = make_env(self.env_name)
self.model.set_model_params(ind)
scores = simulate(self.model,
env,
render_mode=render_mode,
num_episode=self.config['indv_eps'])
ind.fitness.values = scores[self.fitness_type],
ind.features.values = [scores[x] for x in self.features_list]
return ind
########## BASE FUNCTIONS ########### {{{1
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configFilename', type=str, default='conf/test.yaml', help = "Path of configuration file")
parser.add_argument('-o', '--resultsBaseDir', type=str, default='results/', help = "Path of results files")
parser.add_argument('-p', '--parallelismType', type=str, default='concurrent', help = "Type of parallelism to use")
parser.add_argument('--replayBestFrom', type=str, default='', help = "Path of results data file -- used to replay the best individual")
parser.add_argument('--seed', type=int, default=None, help="Numpy random seed")
return parser.parse_args()
def create_base_config(args):
base_config = {}
if len(args.resultsBaseDir) > 0:
base_config['resultsBaseDir'] = args.resultsBaseDir
return base_config
def create_experiment(args, base_config):
exp = BipedalWalkerExperiment(args.configFilename, args.parallelismType, seed=args.seed, base_config=base_config)
print("Using configuration file '%s'. Instance name: '%s'" % (args.configFilename, exp.instance_name))
return exp
def launch_experiment(exp):
exp.run()
def replay_best(args, exp):
import pickle
path = args.replayBestFrom
with open(path, "rb") as f:
data = pickle.load(f)
best = data['container'].best
exp.eval_fn(best, render_mode = True)
########## MAIN ########### {{{1
if __name__ == "__main__":
import traceback
args = parse_args()
base_config = create_base_config(args)
try:
exp = create_experiment(args, base_config)
if len(args.replayBestFrom) > 0:
replay_best(args, exp)
else:
launch_experiment(exp)
except Exception as e:
warnings.warn(f"Run failed: {str(e)}")
traceback.print_exc()
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
```
#### File: qdpy/examples/custom_eval_fn.py
```python
from qdpy import algorithms, containers, plots
from qdpy.base import ParallelismManager
import math
def eval_fn(ind):
"""An example evaluation function. It takes an individual as input, and returns the pair ``(fitness, features)``, where ``fitness`` and ``features`` are sequences of scores."""
normalization = sum((x for x in ind))
k = 10.
score = 1. - sum(( math.cos(k * ind[i]) * math.exp(-(ind[i]*ind[i])/2.) for i in range(len(ind)))) / float(len(ind))
fit0 = sum((x * math.sin(abs(x) * 2. * math.pi) for x in ind)) / normalization
fit1 = sum((x * math.cos(abs(x) * 2. * math.pi) for x in ind)) / normalization
features = (fit0, fit1)
return (score,), features
if __name__ == "__main__":
# Create container and algorithm. Here we use MAP-Elites, by illuminating a Grid container by evo.
grid = containers.Grid(shape=(16,16), max_items_per_bin=1, fitness_domain=((-math.pi, math.pi),), features_domain=((0., 1.), (0., 1.)))
algo = algorithms.RandomSearchMutPolyBounded(grid, budget=3000, batch_size=500,
dimension=3, optimisation_task="minimisation")
# Create a logger to pretty-print everything and generate output data files
logger = algorithms.TQDMAlgorithmLogger(algo)
# Run illumination process !
with ParallelismManager("none") as pMgr:
best = algo.optimise(eval_fn, executor = pMgr.executor, batch_mode=False) # Disable batch_mode (steady-state mode) to ask/tell new individuals without waiting the completion of each batch
# Print results info
print("\n" + algo.summary())
# Plot the results
plots.default_plots_grid(logger)
print("\nAll results are available in the '%s' pickle file." % logger.final_filename)
print(f"""
To open it, you can use the following python code:
import pickle
# You may want to import your own packages if the pickle file contains custom objects
with open("{logger.final_filename}", "rb") as f:
data = pickle.load(f)
# ``data`` is now a dictionary containing all results, including the final container, all solutions, the algorithm parameters, etc.
grid = data['container']
print(grid.best)
print(grid.best.fitness)
print(grid.best.features)
""")
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
```
#### File: qdpy/qdpy/experiment.py
```python
__all__ = ["QDExperiment"]
#from collections.abc import Iterable
#from typing import Optional, Tuple, TypeVar, Union, Any, MutableSet, Mapping, MutableMapping, Sequence, MutableSequence, Callable, Tuple
#from typing_extensions import runtime, Protocol
#import inspect
from qdpy.algorithms import *
from qdpy.containers import *
from qdpy.plots import *
from qdpy.base import *
from qdpy import tools
import yaml
import random
import datetime
import pathlib
import traceback
from typing import Optional, Tuple, List, Iterable, Iterator, Any, TypeVar, Generic, Union, Sequence, MutableSet, MutableSequence, Type, Callable, Generator, Mapping, MutableMapping, overload
class QDExperiment(object):
def __init__(self, config_filename, parallelism_type = "concurrent", seed = None, base_config = None):
self._loadConfig(config_filename)
if base_config is not None:
self.config = {**self.config, **base_config}
self.parallelism_type = parallelism_type
self.config['parallelism_type'] = parallelism_type
self._init_seed(seed)
self.reinit()
def __getstate__(self):
odict = self.__dict__.copy()
del odict['algo']
del odict['container']
return odict
def _loadConfig(self, config_filename):
self.config_filename = config_filename
self.config_name = os.path.splitext(os.path.basename(config_filename))[0]
self.config = yaml.safe_load(open(config_filename))
def _get_features_list(self):
features_list = self.config['features_list']
fitness_type = self.config['fitness_type']
return features_list, fitness_type
def _define_domains(self):
self.features_list, self.fitness_type = self._get_features_list()
self.config['features_domain'] = []
for feature_name in self.features_list:
val = self.config['%s%s' % (feature_name, "Domain")]
self.config['features_domain'] += [tuple(val)]
self.config['fitness_domain'] = tuple(self.config['%s%s' % (self.fitness_type, "Domain")]),
def _init_seed(self, rnd_seed = None):
# Find random seed
if rnd_seed is not None:
seed = rnd_seed
elif "seed" in self.config:
seed = self.config["seed"]
else:
seed = np.random.randint(1000000)
# Update and print seed
np.random.seed(seed)
random.seed(seed)
print("Seed: %i" % seed)
def reinit(self):
# Name of the expe instance based on the current timestamp
self.instance_name = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# Identify and create result data dir
if not self.config.get('dataDir'):
resultsBaseDir = self.config.get('resultsBaseDir') or "./results/"
dataDir = os.path.join(os.path.expanduser(resultsBaseDir), os.path.splitext(os.path.basename(self.config_filename))[0])
self.config['dataDir'] = dataDir
pathlib.Path(self.config['dataDir']).mkdir(parents=True, exist_ok=True)
# Find the domains of the fitness and features
self._define_domains()
default_config = {}
default_config["fitness_domain"] = self.config['fitness_domain']
default_config["features_domain"] = self.config['features_domain']
#print(default_config)
# Create containers and algorithms from configuration
factory = Factory()
assert "containers" in self.config, f"Please specify configuration entry 'containers' containing the description of all containers."
factory.build(self.config["containers"], default_config)
assert "algorithms" in self.config, f"Please specify configuration entry 'algorithms' containing the description of all algorithms."
factory.build(self.config["algorithms"])
assert "main_algorithm_name" in self.config, f"Please specify configuration entry 'main_algorithm' containing the name of the main algorithm."
self.algo = factory[self.config["main_algorithm_name"]]
self.container = self.algo.container
self.batch_mode = self.config.get('batch_mode', False)
self.log_base_path = self.config['dataDir']
# Create a logger to pretty-print everything and generate output data files
self.iteration_filenames = os.path.join(self.log_base_path, "iteration-%i_" + self.instance_name + ".p")
self.final_filename = os.path.join(self.log_base_path, "final_" + self.instance_name + ".p")
self.save_period = self.config.get('save_period', 0)
self.logger = TQDMAlgorithmLogger(self.algo,
iteration_filenames=self.iteration_filenames, final_filename=self.final_filename, save_period=self.save_period)
def run(self):
# Run illumination process !
with ParallelismManager(self.parallelism_type) as pMgr:
best = self.algo.optimise(self.eval_fn, executor = pMgr.executor, batch_mode=self.batch_mode) # Disable batch_mode (steady-state mode) to ask/tell new individuals without waiting the completion of each batch
print("\n------------------------\n")
print(self.algo.summary())
if isinstance(self.container, Grid):
grid = self.container
else:
# Transform the container into a grid
print("\n{:70s}".format("Transforming the container into a grid, for visualisation..."), end="", flush=True)
grid = Grid(self.container, shape=(10,10), max_items_per_bin=1, fitness_domain=self.container.fitness_domain, features_domain=self.container.features_domain, storage_type=list)
print("\tDone !")
print(grid.summary())
# Create plot of the performance grid
plot_path = os.path.join(self.log_base_path, f"performancesGrid-{self.instance_name}.pdf")
quality = grid.quality_array[(slice(None),) * (len(grid.quality_array.shape) - 1) + (0,)]
plotGridSubplots(quality, plot_path, plt.get_cmap("nipy_spectral"), grid.features_domain, grid.fitness_domain[0], nbTicks=None)
print("\nA plot of the performance grid was saved in '%s'." % os.path.abspath(plot_path))
# Create plot of the activity grid
plot_path = os.path.join(self.log_base_path, f"activityGrid-{self.instance_name}.pdf")
plotGridSubplots(grid.activity_per_bin, plot_path, plt.get_cmap("nipy_spectral"), grid.features_domain, [0, np.max(grid.activity_per_bin)], nbTicks=None)
print("\nA plot of the activity grid was saved in '%s'." % os.path.abspath(plot_path))
print("All results are available in the '%s' pickle file." % self.logger.final_filename)
def _removeTmpFiles(self, fileList):
keepTemporaryFiles = self.config.get('keepTemporaryFiles')
if not keepTemporaryFiles:
for f in fileList:
try:
if os.path.isfile(f):
os.remove(f)
else:
shutil.rmtree(f)
except:
pass
def eval_fn(self, ind):
#print(ind.name)
fitness = [np.random.uniform(x[0], x[1]) for x in self.config['fitness_domain']]
features = [np.random.uniform(x[0], x[1]) for x in self.config['features_domain']]
ind.fitness.values = fitness
ind.features = features
return ind
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
```
#### File: qdpy/qdpy/plots.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from functools import reduce
from operator import mul
from typing import Optional, Tuple, List, Iterable, Iterator, Any, TypeVar, Generic, Union, Sequence, MutableSet, MutableSequence, Type, Callable, Generator, Mapping, MutableMapping, overload
from qdpy.utils import is_iterable
from qdpy import containers
from qdpy import algorithms
########### Plots ########### {{{1
# TODO refactor name, etc
def plotGridSubplots(data, outputFilename, cmap, featuresBounds=((0., 1.), (0., 1.), (0., 1.), (0., 1.)), fitnessBounds=(0., 1.), drawCbar = True, xlabel = "", ylabel = "", cBarLabel = "", nbBins = None, nbTicks = None, binSizeInInches = 0.30):
"""TODO"""
# Verify data dimension is supported by this funtion
if len(data.shape) > 4:
raise ValueError("plotGridSubplots only supports up to 4 dimensions.")
elif len(data.shape) <= 2:
plotGrid(data, outputFilename, cmap, featuresBounds=featuresBounds, fitnessBounds=fitnessBounds, drawCbar=drawCbar, xlabel=xlabel, ylabel=ylabel, cBarLabel=cBarLabel, nbBins=nbBins, nbTicks=nbTicks)
return
# Verify dimension is even
if len(data.shape) % 2 == 1:
data = data.reshape((data.shape[0], 1) + data.shape[1:])
featuresBounds = (featuresBounds[0], (0., 0.)) + tuple(featuresBounds[1:])
if nbBins != None:
nbBins = (nbBins[0], 1) + nbBins[1:]
if not nbBins:
nbBins = data.shape
#data[0,:,:,:] = np.linspace(0., 1., nbBins[1] * nbBins[2] * nbBins[3]).reshape((nbBins[1], nbBins[2], nbBins[3]))
# Compute figure infos from nbBins
horizNbBins = nbBins[::2]
horizNbBinsProd = reduce(mul, horizNbBins, 1)
vertNbBins = nbBins[1::2]
vertNbBinsProd = reduce(mul, vertNbBins, 1)
totProp = horizNbBinsProd + vertNbBinsProd
upperlevelTot = nbBins[0] + nbBins[1]
# Determine figure size from nbBins infos
#figsize = [2.1 + 10. * horizNbBinsProd / upperlevelTot, 1. + 10. * vertNbBinsProd / upperlevelTot]
#if figsize[1] < 2:
# figsize[1] = 2.
figsize = [2.1 + horizNbBinsProd * binSizeInInches, 1. + vertNbBinsProd * binSizeInInches]
# Create figure
fig, axes = plt.subplots(nrows=nbBins[1], ncols=nbBins[0], figsize=figsize)
# Create subplots
for x in range(nbBins[0]):
for y in range(nbBins[1]):
ax = plt.subplot(nbBins[1], nbBins[0], (nbBins[1] - y - 1) * nbBins[0] + x + 1)
#ax = axes[x,y]
cax = drawGridInAx(data[x, y, 0:nbBins[2], 0:nbBins[3]], ax, cmap=cmap, featuresBounds=featuresBounds[-2:], fitnessBounds=fitnessBounds[-2:], aspect="equal", xlabel=xlabel, ylabel=ylabel, nbBins=(nbBins[2], nbBins[3]), nbTicks=nbTicks)
plt.tight_layout()
if drawCbar:
fig.subplots_adjust(right=0.85, wspace=0.40)
#cbarAx = fig.add_axes([0.90, 0.15, 0.01, 0.7])
if figsize[0] < 4.:
cbarAx = fig.add_axes([0.75, 0.15, 0.02, 0.7])
elif figsize[0] < 6.:
cbarAx = fig.add_axes([0.80, 0.15, 0.02, 0.7])
elif figsize[0] < 10.:
cbarAx = fig.add_axes([0.85, 0.15, 0.02, 0.7])
else:
cbarAx = fig.add_axes([0.90, 0.15, 0.02, 0.7])
cbar = fig.colorbar(cax, cax=cbarAx, format="%.2f")
cbar.ax.tick_params(labelsize=20)
cbar.ax.set_ylabel(cBarLabel, fontsize=22)
fig.savefig(outputFilename)
# TODO refactor name, etc
def drawGridInAx(data, ax, cmap, featuresBounds, fitnessBounds, aspect="equal", xlabel = "", ylabel = "", nbBins=None, nbTicks = 5):
# Determine bounds
vmin = fitnessBounds[0]
if np.isnan(vmin) or np.isinf(vmin):
vmin = np.nanmin(data)
vmax = fitnessBounds[1]
if np.isnan(vmax) or np.isinf(vmax):
vmax = np.nanmax(data)
# Draw grid
cax = ax.imshow(data.T, interpolation="none", cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect)
ax.invert_yaxis()
# Define the number of ticks on x,y axis
if is_iterable(nbTicks):
if len(nbTicks) != 2:
raise ValueError("nbTicks can be None, an Integer or a Sequence of size 2.")
nbTicksX, nbTicksY = nbTicks
elif nbTicks == None:
nbTicksX = round(pow(nbBins[0], 1./2.))
nbTicksX = nbTicksX if nbTicksX % 2 == 0 else nbTicksX + 1
nbTicksY = round(pow(nbBins[1], 1./2.))
nbTicksY = nbTicksY if nbTicksY % 2 == 0 else nbTicksY + 1
else:
if nbBins[0] > nbBins[1]:
nbTicksX = nbTicks
nbTicksY = int(nbTicksX * nbBins[1] / nbBins[0])
elif nbBins[1] > nbBins[0]:
nbTicksY = nbTicks
nbTicksX = int(nbTicksY * nbBins[0] / nbBins[1])
else:
nbTicksX = nbTicksY = nbTicks
# Verify than the number of ticks is valid
if nbTicksX > nbBins[0] or nbTicksX < 1:
nbTicksX = min(nbBins[0], nbTicks)
if nbTicksY > nbBins[1] or nbTicksY < 1:
nbTicksY = min(nbBins[1], nbTicks)
# Set ticks
ax.xaxis.set_tick_params(which='major', left=True, bottom=True, top=False, right=False)
ax.yaxis.set_tick_params(which='major', left=True, bottom=True, top=False, right=False)
if len(featuresBounds) > 1:
xticks = list(np.arange(0, data.shape[0] + 1, data.shape[0] / nbTicksX))
yticks = list(np.arange(0, data.shape[1] + 1, data.shape[1] / nbTicksY))
deltaFeature0 = featuresBounds[0][1] - featuresBounds[0][0]
ax.set_xticklabels([round(float(x / float(data.shape[0]) * deltaFeature0 + featuresBounds[0][0]), 2) for x in xticks], fontsize=22)
deltaFeature1 = featuresBounds[1][1] - featuresBounds[1][0]
ax.set_yticklabels([round(float(y / float(data.shape[1]) * deltaFeature1 + featuresBounds[1][0]), 2) for y in yticks], fontsize=22)
plt.xticks(xticks, rotation='vertical')
else:
yticks = list(np.arange(0, data.shape[1] + 1, data.shape[1] / nbTicksY))
deltaFeature0 = featuresBounds[0][1] - featuresBounds[0][0]
ax.set_yticklabels([round(float(y / float(data.shape[1]) * deltaFeature0 + featuresBounds[0][0]), 2) for y in yticks], fontsize=22)
plt.xticks([])
ax.set_xticklabels([])
if nbBins[1] == 1:
yticks = []
plt.yticks(yticks)
# Draw grid
ax.xaxis.set_tick_params(which='minor', direction="in", left=False, bottom=False, top=False, right=False)
ax.yaxis.set_tick_params(which='minor', direction="in", left=False, bottom=False, top=False, right=False)
ax.set_xticks(np.arange(-.5, data.shape[0], 1), minor=True)
ax.set_yticks(np.arange(-.5, data.shape[1], 1), minor=True)
#ax.grid(which='minor', color=(0.8,0.8,0.8,0.5), linestyle='-', linewidth=0.1)
ax.set_xlabel(xlabel, fontsize=25)
ax.set_ylabel(ylabel, fontsize=25)
ax.autoscale_view()
return cax
# TODO refactor name, etc
def plotGrid(data, outputFilename, cmap, featuresBounds=[(0., 1.), (0., 1.)], fitnessBounds=(0., 1.), drawCbar = True, xlabel = "", ylabel = "", cBarLabel = "", nbBins = None, nbTicks = None):
if len(data.shape) == 1:
data = data.reshape((data.shape[0], 1))
featuresBounds = tuple(featuresBounds) + ((0., 0.),)
if nbBins != None:
nbBins = nbBins + (1,)
elif len(data.shape) > 2:
raise ValueError("plotGrid only supports 1 ou 2-dimensional data.")
if not nbBins:
nbBins = data.shape
figsize = [2.1 + 10. * nbBins[0] / (nbBins[0] + nbBins[1]), 1. + 10. * nbBins[1] / (nbBins[0] + nbBins[1])]
aspect = "equal"
if figsize[1] < 2:
figsize[1] = 2.
aspect = "auto"
fig, ax = plt.subplots(figsize=figsize)
cax = drawGridInAx(data, ax, cmap=cmap, featuresBounds=featuresBounds, fitnessBounds=fitnessBounds, aspect=aspect, xlabel=xlabel, ylabel=ylabel, nbBins=nbBins, nbTicks=nbTicks)
if drawCbar:
divider = make_axes_locatable(ax)
#cax2 = divider.append_axes("right", size="5%", pad=0.15)
cax2 = divider.append_axes("right", size=0.5, pad=0.15)
cbar = fig.colorbar(cax, cax=cax2, format="%.2f")
cbar.ax.tick_params(labelsize=22)
cbar.ax.set_ylabel(cBarLabel, fontsize=24)
plt.tight_layout()
fig.savefig(outputFilename)
def plot_evals(logger, output_filename, key="max", ylim=None, ylabel=None, figsize=(4., 4.)):
"""Plot the evo of statistical parameter `key` according to the number of evaluations. """
# Retrieve evaluation data for key `key`
if isinstance(logger, algorithms.AlgorithmLogger):
data = logger.evals[key]
else:
data = logger
#print(data)
fig, ax = plt.subplots(figsize=figsize)
#ax = fig.add_subplot(111)
#fig.subplots_adjust(bottom=0.3)
x = np.arange(len(data))
# Normalise data
tmp_shape = 1
for i in x:
if is_iterable(data[i]):
tmp_shape = max(tmp_shape, len(data[i]))
data_shape = (len(data), tmp_shape)
normalised_data = np.zeros(data_shape)
for i in x:
if is_iterable(data[i]):
normalised_data[i] = np.array(data[i])
else:
normalised_data[i, 0] = data[i]
ax.plot(x, normalised_data, color='k')
if ylim is not None:
ax.set_ylim(ylim)
#x = np.arange(0, nb_iterations+1, 25)
#if len(x) > 4:
# x = x[::2]
#tsplot(ax, data, color='k')
#ax.set_ylim([0., 1.])
plt.xlabel("Evaluations", fontsize=14)
#plt.xticks(x, fontsize=18)
#ax.set_xticklabels([str(i * args.nbEvalsPerIt) for i in x])
ylabel_ = ylabel if ylabel is not None else key
plt.ylabel(ylabel_, fontsize=14)
#plt.yticks(fontsize=18)
#sns.despine()
#plt.tight_layout(rect=[0, 0, 1.0, 0.95])
plt.tight_layout()
fig.savefig(output_filename)
plt.close(fig)
def plot_iterations(logger, output_filename, key="max", ylim=None, ylabel=None, figsize=(4., 4.)):
"""Plot the evo of statistical parameter `key` according to the number of iterations."""
# Retrieve evaluation data for key `key`
if isinstance(logger, algorithms.AlgorithmLogger):
data = logger.iterations[key]
else:
data = logger
fig, ax = plt.subplots(figsize=figsize)
x = np.arange(len(data))
# Normalise data
tmp_shape = 1
for i in x:
if is_iterable(data[i]):
tmp_shape = max(tmp_shape, len(data[i]))
data_shape = (len(data), tmp_shape)
normalised_data = np.zeros(data_shape)
for i in x:
if is_iterable(data[i]):
normalised_data[i] = np.array(data[i])
else:
normalised_data[i, 0] = data[i]
ax.plot(x, normalised_data, color='k')
if ylim is not None:
ax.set_ylim(ylim)
plt.xlabel("Iterations", fontsize=14)
ylabel_ = ylabel if ylabel is not None else key
plt.ylabel(ylabel_, fontsize=14)
plt.tight_layout()
fig.savefig(output_filename)
plt.close(fig)
def default_plots_grid(logger, output_dir=None, to_grid_parameters={}, fitness_domain=None):
"""Make all default plots for algorithms using grid-based or grid-convertible containers."""
container = logger.algorithms[0].container # XXX
if output_dir is None:
output_dir = logger.log_base_path
plot_evals(logger, os.path.join(output_dir, "./evals_fitnessmax0.pdf"), "max0", ylabel="Fitness")
ylim_contsize = (0, len(container)) if np.isinf(container.capacity) else (0, container.capacity)
plot_evals(logger, os.path.join(output_dir, "./evals_contsize.pdf"), "cont_size", ylim=ylim_contsize, ylabel="Container size")
plot_iterations(logger, os.path.join(output_dir, "./iterations_nbupdated.pdf"), "nb_updated", ylabel="Number of updated bins")
if isinstance(container, containers.Grid):
grid = container
else:
if 'shape' not in to_grid_parameters:
to_grid_parameters['shape'] = (32,) * len(container.features_domain)
grid = container.to_grid(**to_grid_parameters)
plot_path = os.path.join(output_dir, "performancesGrid.pdf")
cmap_perf = "inferno" if logger.algorithms[0].optimisation_task == "maximisation" else "inferno_r"
fitness_domain = grid.fitness_domain if fitness_domain is None else fitness_domain
plotGridSubplots(grid.quality_array[... ,0], plot_path, plt.get_cmap(cmap_perf), grid.features_domain, fitness_domain[0], nbTicks=None)
plot_path = os.path.join(output_dir, "activityGrid.pdf")
max_activity = np.max(grid.activity_per_bin)
plotGridSubplots(grid.activity_per_bin, plot_path, plt.get_cmap("Reds", max_activity), grid.features_domain, [0, max_activity], nbTicks=None)
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
```
#### File: qdpy/qdpy/tools.py
```python
import numpy as np
import random
from typing import Sequence, Callable, Tuple
from itertools import repeat
import copy
from qdpy.phenotype import *
from qdpy.base import *
from qdpy import containers
########### SELECTION ########### {{{1
def sel_random(collection: Sequence[Any]) -> Sequence[Any]:
"""Select and return one individual at random among `collection`.
Parameters
----------
:param collection: Sequence[Any]
The individuals to select from.
"""
return random.choice(collection)
def non_trivial_sel_random(container: containers.Container) -> Any:
"""Select and return one individual at random among `container`.
Ignore individuals with a trivial fitness (i.e. equal to the minimum fitness value),
except if there are only individuals with trivial fitness.
Parameters
----------
:param container: Container
The individuals to select from.
"""
fst_ind: Any = container[0]
min_fitness: Any = copy.deepcopy(fst_ind.fitness)
assert container.fitness_domain is not None and len(min_fitness.values) == len(container.fitness_domain), f"You must specify `fitness_domain` in Container, and use individuals with fitness values of the same length as `fitness_domain`."
min_fitness.values = tuple([x[0] for x in container.fitness_domain])
candidates: MutableSequence[Any] = [ind for ind in container if ind.fitness.dominates(min_fitness)]
if len(candidates):
return random.choice(candidates)
else:
return random.choice(container)
def sel_grid_roulette(collection: Sequence[Any]) -> Sequence[Any]:
"""Select and return one individual at random (using a roulette selection) from a random bin of a grid.
Parameters
----------
:param collection: Grid
The grid containing individuals.
"""
assert(isinstance(collection, containers.Grid))
assert(len(collection))
# Select randomly an occupied bin of the grid
tmp_idx = random.randint(0, len(collection)-1)
tmp_ind: IndividualLike = collection[tmp_idx]
bin_coord = collection.index_grid(tmp_ind.features)
bin_pop = collection.solutions[bin_coord]
# Roulette selection of ind within this bin
sum_fit_val = [sum(i.fitness.values) for i in bin_pop]
sum_all_fit = sum(sum_fit_val)
probs = [f / sum_all_fit for f in sum_fit_val]
return random.choices(bin_pop, weights=probs)[0]
########### MUTATIONS ########### {{{1
def mut_gaussian(individual: MutableSequence[Any], mu: float, sigma: float, mut_pb: float) -> MutableSequence[Any]:
"""Return a gaussian mutation of mean `mu` and standard deviation `sigma`
on selected items of `individual`. `mut_pb` is the probability for each
item of `individual` to be mutated.
Mutations are applied directly on `individual`, which is then returned.
Parameters
----------
:param individual
The individual to mutate.
:param mu: float
The mean of the gaussian mutation.
:param sigma: float
The standard deviation of the gaussian mutation.
:param mut_pb: float
The probability for each item of `individual` to be mutated.
"""
for i in range(len(individual)):
if random.random() < mut_pb:
individual[i] += random.gauss(mu, sigma)
return individual
def mut_polynomial_bounded(individual: MutableSequence[Any], eta: float, low: float, up: float, mut_pb: float) -> MutableSequence[Any]:
"""Return a polynomial bounded mutation, as defined in the original NSGA-II paper by Deb et al.
Mutations are applied directly on `individual`, which is then returned.
Inspired from code from the DEAP library (https://github.com/DEAP/deap/blob/master/deap/tools/mutation.py).
Parameters
----------
:param individual
The individual to mutate.
:param eta: float
Crowding degree of the mutation.
A high ETA will produce mutants close to its parent,
a small ETA will produce offspring with more differences.
:param low: float
Lower bound of the search domain.
:param up: float
Upper bound of the search domain.
:param mut_pb: float
The probability for each item of `individual` to be mutated.
"""
for i in range(len(individual)):
if random.random() < mut_pb:
x = individual[i]
delta_1 = (x - low) / (up - low)
delta_2 = (up - x) / (up - low)
rand = random.random()
mut_pow = 1. / (eta + 1.)
if rand < 0.5:
xy = 1. - delta_1
val = 2. * rand + (1. - 2. * rand) * xy**(eta + 1.)
delta_q = val**mut_pow - 1.
else:
xy = 1. - delta_2
val = 2. * (1. - rand) + 2. * (rand - 0.5) * xy**(eta + 1.)
delta_q = 1. - val**mut_pow
x += delta_q * (up - low)
x = min(max(x, low), up)
individual[i] = x
return individual
########### COMBINED OPERATORS ########### {{{1
def sel_or_init(collection: Sequence[IndividualLike], base_ind: IndividualLike,
sel_fn: Callable, sel_pb: float,
init_fn: Callable, init_pb: float = 0., return_flag: bool = True):
"""Either select an individual from `collection` by using function `sel_pb`,
or initialise a new individual by using function `init_pb`.
If `collection` is empty, it will always initialise a new individual, not perform selection.
Parameters
----------
:param collection: Sequence[IndividualLike]
The individuals to select from.
:param base_ind: IndividualLike
The base individual to initialise.
:param sel_fn: Callable
The selection function.
:param sel_pb: float
The probability of performing selection.
:param init_fn: Callable
The initialisation function.
:param init_pb: float
The probability of performing initialisation.
:param return_flag: bool
If set to True, the function will return a Tuple[IndividualLike, bool] with a first item corresponding
to the selected or initialised individual, and the second item a flag set to True if the first item
was selected, and set to False if it was initialised.
If set to False, the function will return the selected or initialised IndividualLike.
"""
def ret(res, f):
return (res, f) if return_flag else res
if len(collection) == 0:
return ret(init_fn(base_ind), False)
operation = np.random.choice(range(2), p=[sel_pb, init_pb])
if operation == 0: # Selection
return ret(sel_fn(collection), True)
else: # Initialisation
return ret(init_fn(base_ind), False)
def mut_or_cx(individuals: Union[IndividualLike, Sequence[IndividualLike]],
mut_fn: Callable, cx_fn: Callable) -> Sequence[IndividualLike]:
"""Either perform a mutation (using function `mut_fn`) or a crossover (using function `cx_fn`)
depending on the nature and length of `individuals`.
If `individuals` is an IndividualLike or a Sequence of one IndividualLike, a mutation will be performed.
If `individuals` is a Sequence of two IndividualLike, a crossover will be performed.
Parameters
----------
:param individuals: Union[IndividualLike, Sequence[IndividualLike]]
The individual(s) to mutate or crossover.
:param mut_fn: Callable
The mutation function.
:param cx_fn: Callable
The crossover function.
Return
------
The resulting individual(s).
"""
if isinstance(individuals, IndividualLike):
return [mut_fn(individuals)]
elif isinstance(individuals, Sequence) and len(individuals) == 1:
return [mut_fn(individuals[0])]
elif isinstance(individuals, Sequence) and len(individuals) > 1:
return cx_fn(individuals)
else:
raise ValueError(f'`individuals` can be an Individual or a Sequence.')
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
```
#### File: qdpy/qdpy/utils.py
```python
from collections.abc import Iterable
from typing import Optional, Tuple, TypeVar, Union, Any, MutableSet, Mapping, MutableMapping, Sequence, MutableSequence, Callable, Tuple
from typing_extensions import runtime, Protocol
import inspect
import numpy as np
########### UTILS ########### {{{1
def is_iterable(obj: Any) -> bool:
"""Return if ``obj`` is iterable or not."""
return isinstance(obj, Iterable)
#def in_bounds(val: Union[float, Sequence[float]], domain: Union[DomainLike, Sequence[DomainLike]]) -> bool:
def in_bounds(val: Any, domain: Any) -> bool:
"""Return if ``val`` (if a value) or all values in ``val`` (if an iterable) are in ``domain``."""
#assert len(domain) >= 2, f"``domain`` must be a 2-tuple of numbers or a sequence of 2-tuples of numbers."
if isinstance(val, Sequence) or isinstance(val, np.ndarray):
if isinstance(domain[0], Sequence) or isinstance(domain[0], np.ndarray):
if len(val) == len(domain):
return all((v >= d[0] and v <= d[1] for v, d in zip(val, domain)))
else:
raise ValueError(f"if ``val`` is a Sequence, ``domain`` must have the same length as ``val``.")
else:
return all((v >= domain[0] and v <= domain[1] for v in val))
else:
if isinstance(domain[0], Sequence) or isinstance(domain[0], np.ndarray):
raise ValueError(f"if ``val`` is not a Sequence, ``domain`` must be a 2-tuple of numbers.")
else:
return val >= domain[0] and val <= domain[1]
#def _hashify(item):
# """Verify if *item* is hashable, if not, try and return it as a tuple."""
# if isinstance(item, collections.abc.Hashable):
# return item
# else:
# return tuple(item)
def tuplify(item: Union[Any, Sequence[Any]]) -> Tuple[Any, ...]:
if isinstance(item, Sequence):
return tuple(item)
else:
return (item,)
def argsort(a, **kwargs):
return sorted(range(len(a)), key=a.__getitem__, **kwargs)
########### NUMBA ########### {{{1
def _dummyJit(*args, **kwargs):
"""
Dummy version of jit decorator, does nothing
"""
if len(args) == 1 and callable(args[0]):
return args[0]
else:
def wrap(func):
return func
return wrap
try:
import numba
from numba import jit
except ImportError:
jit = _dummyJit
# MODELINE "{{{1
# vim:expandtab:softtabstop=4:shiftwidth=4:fileencoding=utf-8
# vim:foldmethod=marker
``` |
{
"source": "JiangZehua/control-pcgrl",
"score": 2
} |
#### File: JiangZehua/control-pcgrl/evolve.py
```python
import argparse
import gc
import json
import os
import pickle
import pprint
import sys
import time
from datetime import datetime
from timeit import default_timer as timer
from pathlib import Path
from pdb import set_trace as TT
from random import randint
import cv2
from typing import Tuple
import gym
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import psutil
import ray
import scipy
import torch as th
import torch.nn.functional as F
from gym import envs
from numba import njit
from qdpy.phenotype import Fitness, Features
from ribs.archives import GridArchive
from ribs.archives._add_status import AddStatus
from ribs.emitters import (
GradientImprovementEmitter,
ImprovementEmitter,
OptimizingEmitter,
)
from ribs.emitters.opt import CMAEvolutionStrategy
from ribs.optimizers import Optimizer
from ribs.visualize import grid_archive_heatmap
from torch import ByteTensor, Tensor, nn
from torch.nn import Conv2d, CrossEntropyLoss, Linear
from torch.utils.tensorboard import SummaryWriter
import deap
import deap.tools
import deap.algorithms
import qdpy
from qdpy import algorithms, containers, benchmarks, plots, tools
from deap.base import Toolbox
import graphviz
import warnings
import copy
# Use for .py file
from tqdm import tqdm
import gym_pcgrl
from evo_args import get_args
from gym_pcgrl.envs.helper import get_int_prob, get_string_map
# from example_play_call import random_player
# gvgai_path = '/home/sme/GVGAI_GYM/'
# sys.path.insert(0,gvgai_path)
# from play import play
# Use for notebook
# from tqdm.notebook import tqdm
# Use print to confirm access to local pcgrl gym
# print([env.id for env in envs.registry.all() if "gym_pcgrl" in env.entry_point])
"""
/// Required Environment ///
conda create -n ribs-pt python=3.7
pip install scipy==1.2.0 # must use this version with GVGAI_GYM
conda install -c conda-forge notebook
conda install pytorch torchvision torchaudio -c pyth
conda install tensorboard
pip install 'ribs[all]' gym~=0.17.0 Box2D~=2.3.10 tqdm
git clone https://github.com/amidos2006/gym-pcgrl.git
cd gym-pcgrl # Must run in project root folder for access to pcgrl modules
/// Instructions ///
To start TensorBoard run the following command:
$ tensorboard --logdir=runs
Then go to:
http://localhost:6006
/// Resources ///
Sam's example code:
https://github.com/smearle/gol-cmame/blob/master/gol_cmame.py
PCGRL Repo:
https://github.com/amidos2006/gym-pcgrl
Neural CA Paper:
https://arxiv.org/pdf/2009.01398.pdf
RIBS examples:
https://docs.pyribs.org/en/stable/tutorials/lunar_lander.html
"""
TARGETS_PENALTY_WEIGHT = 10
def draw_net(config: object, genome: object, view: object = False, filename: object = None, node_names: object = None, show_disabled: object = True,
prune_unused: object = False,
node_colors: object = None, fmt: object = 'svg') -> object:
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add(cg.key)
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled', 'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
def save_level_frames(level_frames, model_name):
renders_dir = os.path.join(SAVE_PATH, "renders")
if not os.path.isdir(renders_dir):
os.mkdir(renders_dir)
model_dir = os.path.join(renders_dir, "model_{}".format(model_name))
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
for j, im in enumerate(level_frames):
im.save(
os.path.join(
model_dir, "frame_{:0>4d}.png".format(j)
)
)
def get_qd_score(archive, env, bc_names):
max_loss = env.get_max_loss(ctrl_metrics=bc_names)
max_loss = max_loss * TARGETS_PENALTY_WEIGHT
if ALGO == 'ME':
# qd_score = archive.qd_score() # we need to specify lower *and upper* bounds for this
# TODO: work out max diversity bonus to make this possible ?? Would this bias scores between n. latent seeds
# though?
qd_score = np.nansum(archive.quality_array + max_loss)
else:
df = archive.as_pandas(include_solutions=False)
qd_score = (df['objective'] + max_loss).sum()
return qd_score
def save_train_stats(objs, archive, env, bc_names, itr=None):
train_time_stats = {
"qd_score": get_qd_score(archive, env, bc_names),
"objective": get_stats(objs),
}
if itr is not None:
save_path = os.path.join(SAVE_PATH, "checkpoint_{}".format(itr))
else:
save_path = SAVE_PATH
json.dump(
train_time_stats,
open(os.path.join(save_path, "train_time_stats.json"), "w"),
indent=4,
)
def get_stats(stats):
"""Take 1D numpy array of data and return some fun facts in the form of a dictionary."""
return {
"mean": np.nanmean(stats),
"std": np.nanstd(stats),
"max": np.nanmax(stats),
"min": np.nanmin(stats),
}
def save_grid(csv_name="levels", d=4):
fontsize = 32
if "zelda" in PROBLEM:
d = 3
fontsize = int(fontsize * d / 4)
elif "smb" in PROBLEM:
d = 4
if CMAES:
# TODO: implement me
return
# save grid using csv file
# get path to CSV
levels_path = os.path.join(SAVE_PATH, csv_name + ".csv")
# get env name
env_name = "{}-{}-v0".format(PROBLEM, REPRESENTATION)
# create env
env = gym.make(env_name)
map_width = env._prob._width
df = pd.read_csv(levels_path, header=0, skipinitialspace=True)
# .rename(
# index=str,
# header=0,
# columns={
# 0: "level",
# 1: "batch_reward",
# 2: "variance",
# 3: "diversity",
# 4: "targets",
# },
# )
bc_names = []
for i in range(5, 7): # assume 2 BCs
bc_names.append(df.columns[i])
# look for the most valid levels
targets_thresh = 0.0
og_df = df
df = og_df[og_df['targets'] == targets_thresh]
last_len = len(df)
while len(df) < d**2 and targets_thresh > og_df['targets'].min():
last_len = len(df)
# Raise the threshold so it includes at least one more individual
targets_thresh = og_df[og_df['targets'] < targets_thresh]['targets'].max()
df = og_df[og_df['targets'] >= targets_thresh]
# d = 6 # dimension of rows and columns
figw, figh = 16.0, 16.0
fig = plt.figure()
fig, axs = plt.subplots(ncols=d, nrows=d, figsize=(figw, figh))
df_g = df.sort_values(by=bc_names, ascending=False)
df_g["row"] = np.floor(np.linspace(0, d, len(df_g), endpoint=False)).astype(int)
for row_num in range(d):
row = df_g[df_g["row"] == row_num]
row = row.sort_values(by=[bc_names[1]], ascending=True)
row["col"] = np.arange(0, len(row), dtype=int)
idx = np.floor(np.linspace(0, len(row) - 1, d)).astype(int)
row = row[row["col"].isin(idx)]
row = row.drop(["row", "col"], axis=1)
# grid_models = np.array(row.loc[:,'solution_0':])
grid_models = row["level"].tolist()
for col_num in range(len(row)):
axs[row_num, col_num].set_axis_off()
level = np.zeros((map_width, map_width), dtype=int)
for i, l_rows in enumerate(grid_models[col_num].split("], [")):
for j, l_col in enumerate(l_rows.split(",")):
level[i, j] = int(
l_col.replace("[", "").replace("]", "").replace(" ", "")
)
# Set map
env._rep._x = env._rep._y = 0
env._rep._map = level
img = env.render(mode="rgb_array")
# axs[row_num, col_num].imshow(img, aspect="auto")
axs[-col_num-1, -row_num-1].imshow(img, aspect="auto")
fig.subplots_adjust(hspace=0.01, wspace=0.01)
levels_png_path = os.path.join(SAVE_PATH, "{}_grid.png".format(csv_name))
fig.text(0.5, 0.01, bc_names[0], ha='center', va='center',fontsize=fontsize)
fig.text(0.01, 0.5, bc_names[1], ha='center', va='center', rotation='vertical', fontsize=fontsize)
plt.tight_layout(rect=[0.025, 0.025, 1, 1])
fig.savefig(levels_png_path, dpi=300)
plt.close()
def auto_garbage_collect(pct=80.0):
if psutil.virtual_memory().percent >= pct:
gc.collect()
def tran_action(action, **kwargs):
skip = False
# return action, skip
return action.swapaxes(1, 2), skip
# usually, if action does not turn out to change the map, then the episode is terminated
# the skip boolean tells us whether, for some representation-specific reason, the agent has chosen not to act, but
# without ending the episode
@njit
def id_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
# the argmax along tile_type dimension is performed inside the representation's update function
skip = False
return action, skip
# @njit
def wide_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
# only consider tiles where the generator suggests something different than the existing tile
act_mask = action.argmax(axis=0) != int_map
n_new_builds = np.sum(act_mask)
act_mask = act_mask.reshape((1, *act_mask.shape))
# action = action * act_mask
action = np.where(act_mask == False, action.min() - 10, action)
coords = np.unravel_index(action.argmax(), action.shape)
if n_new_builds > 0:
assert act_mask[0, coords[1], coords[2]] == 1
coords = coords[2], coords[1], coords[0]
# assert int_map[coords[0], coords[1]] != coords[2]
skip = False
return coords, skip
@njit
def narrow_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action[:, y, x].argmax()
if act == 0:
skip = True
else:
skip = False
return act, skip
@njit
def turtle_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action[:, y, x].argmax()
# moving is counted as a skip, so lack of change does not end episode
if act < n_dirs:
skip = True
else:
skip = False
return act, skip
@njit
def flat_to_box(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
action = action.reshape((n_tiles, *int_map.shape))
skip = False
return action, skip
@njit
def flat_to_wide(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
w = int_map.shape[0]
h = int_map.shape[1]
assert len(action) == int_map.shape[0] + int_map.shape[1] + n_tiles
action = (action[:w].argmax(), action[w : w + h].argmax(), action[w + h :].argmax())
skip = False
return action, skip
@njit
def flat_to_narrow(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action.argmax()
if act == 0:
skip = True
else:
skip = False
return act, skip
@njit
def flat_to_turtle(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action.argmax()
if act < n_dirs:
skip = True
else:
skip = False
return act, skip
preprocess_action_funcs = {
"NCA": {
"cellular": id_action,
"wide": wide_action,
"narrow": narrow_action,
"turtle": turtle_action,
},
"CPPN": {
"cellular": tran_action,
},
"CNN": {
# will try to build this logic into the model
"cellular": flat_to_box,
"wide": flat_to_wide,
"narrow": flat_to_narrow,
"turtle": flat_to_turtle,
},
}
def id_observation(obs, **kwargs):
return obs
def local_observation(obs, **kwargs):
x = kwargs.get("x")
y = kwargs.get("y")
local_obs = np.zeros((1, obs.shape[1], obs.shape[2]))
# Might be some inconsistencies in ordering of x, y?
local_obs[0, y, x] = 1
np.concatenate((obs, local_obs), axis=0)
return obs
preprocess_observation_funcs = {
"NCA": {
"cellular": id_observation,
"wide": id_observation,
"narrow": local_observation,
"turtle": local_observation,
},
"CNN": {
"cellular": id_observation,
"wide": id_observation,
"narrow": local_observation,
"turtle": local_observation,
},
}
@njit
def archive_init_states(init_states_archive, init_states, index):
init_states_archive[index] = init_states
# @njit
def get_init_states(init_states_archive, index):
return init_states_archive[index]
def mate_individuals(ind_0, ind_1):
return ind_0.mate(ind_1)
def mutate_individual(ind):
ind.mutate()
return (ind,)
class MEOptimizer():
def __init__(self, grid, ind_cls, batch_size, ind_cls_args, start_time=None, stats=None):
self.batch_size = batch_size
self.grid = grid
self.inds = []
self.stats=stats
for _ in range(batch_size):
self.inds.append(ind_cls(**ind_cls_args))
toolbox = Toolbox()
toolbox.register("clone", copy.deepcopy)
toolbox.register("mutate", mutate_individual)
toolbox.register("mate", mate_individuals)
toolbox.register("select", tools.sel_random)
self.cxpb = 0
self.mutpb = 1.0
self.toolbox = toolbox
if start_time == None:
self.start_time = timer()
self.logbook = deap.tools.Logbook()
self.logbook.header = ["iteration", "containerSize", "evals", "nbUpdated"] + (stats.fields if stats else []) + ["elapsed"]
self.i = 0
def tell(self, objective_values, behavior_values):
# Update individuals' stats with results of last batch of simulations
# [(ind.fitness.setValues(obj), ind.fitness.features.setValues(bc)) for
# (ind, obj, bc) in zip(self.inds, objective_values, behavior_values)]
for (ind, obj, bc) in zip(self.inds, objective_values, behavior_values):
ind.fitness.setValues([obj])
ind.features.setValues(bc)
# Replace the current population by the offspring
nb_updated = self.grid.update(self.inds, issue_warning=True, ignore_exceptions=False)
# Compile stats and update logs
record = self.stats.compile(self.grid) if self.stats else {}
self.logbook.record(iteration=self.i, containerSize=self.grid.size_str(), evals=len(self.inds), nbUpdated=nb_updated, elapsed=timer()-self.start_time, **record)
self.i += 1
print(self.logbook.stream)
def ask(self):
if len(self.grid) == 0:
# Return the initial batch
return self.inds
elif len(self.grid) < self.batch_size:
# If few elites, supplement the population with individuals from the last generation
np.random.shuffle(self.inds)
breedable = self.grid.items + self.inds[:-len(self.grid)]
else:
breedable = self.grid
# Select the next batch individuals
batch = [self.toolbox.select(breedable) for i in range(self.batch_size)]
## Vary the pool of individuals
self.inds = deap.algorithms.varAnd(batch, self.toolbox, self.cxpb, self.mutpb)
return self.inds
class InitStatesArchive(GridArchive):
"""Save (some of) the initial states upon which the elites were evaluated when added to the archive, so that we can
reproduce their behavior at evaluation time (and compare it to evaluation to other seeds)."""
def __init__(self, bin_sizes, bin_bounds, n_init_states, map_w, map_h, **kwargs):
super(InitStatesArchive, self).__init__(bin_sizes, bin_bounds, **kwargs)
self.init_states_archive = np.empty(
shape=(*bin_sizes, n_init_states, map_w, map_h)
)
def set_init_states(self, init_states):
self.init_states = init_states
def add(self, solution, objective_value, behavior_values, meta, index=None):
status, dtype_improvement = super().add(
solution, objective_value, behavior_values
)
# NOTE: for now we won't delete these when popping an elite for re-evaluation
if status != AddStatus.NOT_ADDED:
if index is None:
index = self.get_index(behavior_values)
archive_init_states(self.init_states_archive, self.init_states, index)
return status, dtype_improvement
class MEGrid(containers.Grid):
def __init__(self, bin_sizes, bin_bounds):
super(MEGrid, self).__init__(shape=bin_sizes, max_items_per_bin=1,
features_domain=bin_bounds,
fitness_domain=((-np.inf, np.inf),),
)
# pyribs compatibility
def get_index(self, bcs):
return self.index_grid(features=bcs)
def add(self, item):
# We'll clip the feature calues at the extremes
# TODO: what's happening in this case using pyribs?
item.features.setValues([np.clip(item.features.values[i], *self.features_domain[i])
for i in range(len(item.features.values))])
return super(MEGrid, self).add(item)
class MEInitStatesArchive(MEGrid):
"""Save (some of) the initial states upon which the elites were evaluated when added to the archive, so that we can
reproduce their behavior at evaluation time (and compare it to evaluation to other seeds)."""
def __init__(self, bin_sizes, bin_bounds, n_init_states, map_w, map_h, **kwargs):
super(MEInitStatesArchive, self).__init__(bin_sizes, bin_bounds, **kwargs)
self.init_states_archive = np.empty(
shape=(*bin_sizes, n_init_states, map_w, map_h)
)
def set_init_states(self, init_states):
self.init_states = init_states
def add(self, item):
index = super(MEInitStatesArchive, self).add(item)
if index is not None:
idx = self.index_grid(item.features)
archive_init_states(self.init_states_archive, self.init_states, idx)
return index
class FlexArchive(InitStatesArchive):
""" Subclassing a pyribs archive class to do some funky stuff."""
def __init__(self, *args, **kwargs):
self.n_evals = {}
# self.obj_hist = {}
# self.bc_hist = {}
super().__init__(*args, **kwargs)
# # "index of indices", so we can remove them from _occupied_indices when removing
# self._index_ranks = {}
self._occupied_indices = set()
def _add_occupied_index(self, index):
# rank = len(self._occupied_indices)
# self._index_ranks[index] = rank # the index of the index in _occupied_indices
return super()._add_occupied_index(index)
def _remove_occupied_index(self, index):
self._occupied_indices.remove(index)
self._occupied_indices_cols = tuple(
[self._occupied_indices[i][j] for i in range(len(self._occupied_indices))]
for j in range(len(self._storage_dims))
)
def pop_elite(self, obj, bcs, old_bcs):
"""
Need to call update_elite after this!
"""
# Remove it, update it
old_idx = self.get_index(np.array(old_bcs))
self._remove_occupied_index(old_idx)
# rank = self._index_ranks.pop(old_idx)
# self._occupied_indices.pop(rank)
# [self._occupied_indices_cols[i].pop(rank) for i in range(len(self._storage_dims))]
n_evals = self.n_evals.pop(old_idx)
old_obj = self._objective_values[old_idx]
mean_obj = (old_obj * n_evals + obj) / (n_evals + 1)
mean_bcs = np.array(
[
(old_bcs[i] * n_evals + bcs[i]) / (n_evals + 1)
for i in range(len(old_bcs))
]
)
# obj_hist = self.obj_hist.pop(old_idx)
# obj_hist.append(obj)
# mean_obj = np.mean(obj_hist)
# bc_hist = self.bc_hist.pop(old_idx)
# bc_hist.append(bcs)
# bc_hist_np = np.asarray(bc_hist)
# mean_bcs = bc_hist_np.mean(axis=0)
self._objective_values[old_idx] = np.nan
self._behavior_values[old_idx] = np.nan
self._occupied[old_idx] = False
solution = self._solutions[old_idx].copy()
self._solutions[old_idx] = np.nan
self._metadata[old_idx] = np.nan
# while len(obj_hist) > 100:
# obj_hist = obj_hist[-100:]
# while len(bc_hist) > 100:
# bc_hist = bc_hist[-100:]
return solution, mean_obj, mean_bcs, n_evals
def update_elite(self, solution, mean_obj, mean_bcs, n_evals):
"""
obj: objective score from new evaluations
bcs: behavior characteristics from new evaluations
old_bcs: previous behavior characteristics, for getting the individuals index in the archive
"""
# Add it back
self.add(solution, mean_obj, mean_bcs, None, n_evals=n_evals)
def add(self, solution, objective_value, behavior_values, meta, n_evals=0):
index = self.get_index(behavior_values)
status, dtype_improvement = super().add(
solution, objective_value, behavior_values, meta, index
)
if not status == AddStatus.NOT_ADDED:
if n_evals == 0:
self.n_evals[index] = 1
else:
self.n_evals[index] = min(n_evals + 1, 100)
return status, dtype_improvement
def unravel_index(
indices: th.LongTensor, shape: Tuple[int, ...]
) -> th.LongTensor:
r"""Converts flat indices into unraveled coordinates in a target shape.
This is a `th` implementation of `numpy.unravel_index`.
Args:
indices: A tensor of indices, (*, N).
shape: The targeted shape, (D,).
Returns:
unravel coordinates, (*, N, D).
"""
shape = th.tensor(shape)
indices = indices % shape.prod() # prevent out-of-bounds indices
coord = th.zeros(indices.size() + shape.size(), dtype=int)
for i, dim in enumerate(reversed(shape)):
coord[..., i] = indices % dim
indices = indices // dim
return coord.flip(-1)
# TODO: Use the GPU!
# if CUDA:
# m.cuda()
# m.to('cuda:0')
class ResettableNN(nn.Module):
def reset(self):
pass
def gauss(x, mean=0, std=1):
return th.exp((-(x - mean) ** 2)/(2 * std ** 2))
class MixActiv(nn.Module):
def __init__(self):
super().__init__()
self.activations = (th.sin, th.tanh, gauss, th.relu)
self.n_activs = len(self.activations)
def forward(self, x):
n_chan = x.shape[1]
chans_per_activ = n_chan / self.n_activs
chan_i = 0
xs = []
for i, activ in enumerate(self.activations):
xs.append(activ(x[:, int(chan_i):int(chan_i + chans_per_activ), :, :]))
chan_i += chans_per_activ
x = th.cat(xs, axis=1)
return x
class AuxNCA(ResettableNN):
def __init__(self, n_in_chans, n_actions, n_aux=3):
super().__init__()
self.n_hid_1 = n_hid_1 = 32
self.n_aux = n_aux
self.l1 = Conv2d(n_in_chans + self.n_aux, n_hid_1, 3, 1, 1, bias=True)
self.l2 = Conv2d(n_hid_1, n_hid_1, 1, 1, 0, bias=True)
self.l3 = Conv2d(n_hid_1, n_actions + self.n_aux, 1, 1, 0, bias=True)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
self.last_aux = None
if RENDER:
cv2.namedWindow("Auxiliary NCA")
def forward(self, x):
with th.no_grad():
if self.last_aux is None:
self.last_aux = th.zeros(size=(1, self.n_aux, *x.shape[-2:]))
x_in = th.cat([x, self.last_aux], axis=1)
x = self.l1(x_in)
x = th.nn.functional.relu(x)
x = self.l2(x)
x = th.nn.functional.relu(x)
x = self.l3(x)
x = th.sigmoid(x)
self.last_aux = x[:,-self.n_aux:,:,:]
x = x[:, :-self.n_aux,:,:]
if RENDER:
# im = self.last_aux[0].cpu().numpy().transpose(1,2,0)
aux = self.last_aux[0].cpu().numpy()
aux = aux / aux.max()
im = np.expand_dims(np.vstack(aux), axis=0)
im = im.transpose(1, 2, 0)
cv2.imshow("Auxiliary NCA", im)
cv2.waitKey(1)
# axis 0 is batch
# axis 1 is the tile-type (one-hot)
# axis 0,1 is the x value
# axis 0,2 is the y value
return x, False
def reset(self, init_aux=None):
self.last_aux = None
class DoneAuxNCA(AuxNCA):
def __init__(self, n_in_chans, n_actions, n_aux=3):
# Add an extra auxiliary ("done") channel after the others
n_aux += 1
super().__init__(n_in_chans, n_actions, n_aux=n_aux)
done_kernel_size = 3
self.l_done = Conv2d(1, 1, 7, stride=999)
def forward(self, x):
with th.no_grad():
x, done = super().forward(x)
# retrieve local activation from done channel
done_x = th.sigmoid(self.l_done(x[:,-1:,:,:])).flatten() - 0.5
done = (done_x > 0).item()
return x, done
def reset(self, init_aux=None):
self.last_aux = None
class GeneratorNN(ResettableNN):
#class NCA(ResettableNN):
""" A neural cellular automata-type NN to generate levels or wide-representation action distributions."""
def __init__(self, n_in_chans, n_actions, **kwargs):
super().__init__()
n_hid_1 = 32
self.l1 = Conv2d(n_in_chans, n_hid_1, 3, 1, 1, bias=True)
self.l2 = Conv2d(n_hid_1, n_hid_1, 1, 1, 0, bias=True)
self.l3 = Conv2d(n_hid_1, n_actions, 1, 1, 0, bias=True)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
with th.no_grad():
x = self.l1(x)
x = th.nn.functional.relu(x)
x = self.l2(x)
x = th.nn.functional.relu(x)
x = self.l3(x)
x = th.sigmoid(x)
# axis 0 is batch
# axis 1 is the tile-type (one-hot)
# axis 0,1 is the x value
# axis 0,2 is the y value
return x, False
class MixNCA(ResettableNN):
def __init__(self, *args, **kwargs):
super(MixNCA, self).__init__()
self.mix_activ = MixActiv()
def forward(self, x):
with th.no_grad():
x = self.l1(x)
x = self.mix_activ(x)
x = self.l2(x)
x = self.mix_activ(x)
x = self.l3(x)
x = th.sigmoid(x)
class CoordNCA(ResettableNN):
""" A neural cellular automata-type NN to generate levels or wide-representation action distributions.
With coordinates as additional input, like a CPPN."""
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid_1 = 28
# n_hid_2 = 16
self.l1 = Conv2d(n_in_chans + 2, n_hid_1, 3, 1, 1, bias=True)
self.l2 = Conv2d(n_hid_1, n_hid_1, 1, 1, 0, bias=True)
self.l3 = Conv2d(n_hid_1, n_actions, 1, 1, 0, bias=True)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
with th.no_grad():
coords = get_coord_grid(x, normalize=True)
x = th.hstack((coords, x))
x = self.l1(x)
x = th.nn.functional.relu(x)
x = self.l2(x)
x = th.nn.functional.relu(x)
x = self.l3(x)
x = th.sigmoid(x)
# axis 0 is batch
# axis 1 is the tile-type (one-hot)
# axis 0,1 is the x value
# axis 0,2 is the y value
return x, False
from pytorch_neat.cppn import create_cppn, Leaf
import neat
from neat.genome import DefaultGenome
def get_coord_grid(x, normalize=False):
width = x.shape[-2]
height = x.shape[-1]
X = th.arange(width)
Y = th.arange(height)
if normalize:
X = X / width
Y = Y / height
else:
X = X / 1
Y = Y / 1
X, Y = th.meshgrid(X, Y)
x = th.stack((X, Y)).unsqueeze(0)
return x
#class ReluCPPN(ResettableNN):
class FeedForwardCPPN(nn.Module):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
x = get_coord_grid(x, normalize=True)
with th.no_grad():
x = th.relu(self.l1(x))
x = th.relu(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class GenReluCPPN(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2+n_in_chans, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
coord_x = get_coord_grid(x, normalize=True)
x = th.cat((x, coord_x), axis=1)
with th.no_grad():
x = th.relu(self.l1(x))
x = th.relu(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class SinCPPN(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
x = get_coord_grid(x, normalize=True) * 2
with th.no_grad():
x = th.sin(self.l1(x))
x = th.sin(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class GenSinCPPN(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2+n_in_chans, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
coord_x = get_coord_grid(x, normalize=True) * 2
x = th.cat((x, coord_x), axis=1)
with th.no_grad():
x = th.sin(self.l1(x))
x = th.sin(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class MixCPPN(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
self.mix_activ = MixActiv()
def forward(self, x):
x = get_coord_grid(x, normalize=True) * 2
with th.no_grad():
x = self.mix_activ(self.l1(x))
x = self.mix_activ(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class GenMixCPPN(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2+n_in_chans, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
self.mix_activ = MixActiv()
def forward(self, x):
coord_x = get_coord_grid(x, normalize=True) * 2
x = th.cat((x, coord_x), axis=1)
with th.no_grad():
x = self.mix_activ(self.l1(x))
x = self.mix_activ(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class FixedGenCPPN(ResettableNN):
"""A fixed-topology CPPN that takes additional channels of noisey input to prompts its output.
Like a CoordNCA but without the repeated passes and with 1x1 rather than 3x3 kernels."""
# TODO: Maybe try this with 3x3 conv, just to cover our bases?
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid = 64
self.l1 = Conv2d(2 + n_in_chans, n_hid, kernel_size=1)
self.l2 = Conv2d(n_hid, n_hid, kernel_size=1)
self.l3 = Conv2d(n_hid, n_actions, kernel_size=1)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
def forward(self, x):
coord_x = get_coord_grid(x, normalize=True) * 2
x = th.cat((x, coord_x), axis=1)
with th.no_grad():
x = th.sin(self.l1(x))
x = th.sin(self.l2(x))
x = th.sigmoid(self.l3(x))
return x, True
class CPPN(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
neat_config_path = 'config_cppn'
self.neat_config = neat.config.Config(DefaultGenome, neat.reproduction.DefaultReproduction,
neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation,
neat_config_path)
self.n_actions = n_actions
self.neat_config.genome_config.num_outputs = n_actions
self.neat_config.genome_config.num_hidden = 2
self.genome = DefaultGenome(0)
self.genome.configure_new(self.neat_config.genome_config)
self.input_names = ['x_in', 'y_in']
self.output_names = ['tile_{}'.format(i) for i in range(n_actions)]
self.cppn = create_cppn(self.genome, self.neat_config, self.input_names, self.output_names)
def mate(self, ind_1, fit_0, fit_1):
self.genome.fitness = fit_0
ind_1.genome.fitness = fit_1
return self.genome.configure_crossover(self.genome, ind_1.genome, self.neat_config.genome_config)
def mutate(self):
# print(self.input_names, self.neat_config.genome_config.input_keys, self.genome.nodes)
self.genome.mutate(self.neat_config.genome_config)
self.cppn = create_cppn(self.genome, self.neat_config, self.input_names, self.output_names)
def draw_net(self):
draw_net(self.neat_config, self.genome, view=True, filename='cppn')
def forward(self, x):
X = th.arange(x.shape[-2])
Y = th.arange(x.shape[-1])
X, Y = th.meshgrid(X/X.max(), Y/Y.max())
tile_probs = [self.cppn[i](x_in=X, y_in=Y) for i in range(self.n_actions)]
multi_hot = th.stack(tile_probs, axis=0)
multi_hot = multi_hot.unsqueeze(0)
return multi_hot, True
class CPPNCA(ResettableNN):
def __init__(self, n_in_chans, n_actions):
super().__init__()
n_hid_1 = 32
with th.no_grad():
self.l1 = Conv2d(n_in_chans, n_hid_1, 3, 1, 1, bias=True)
self.l2 = Conv2d(n_hid_1, n_hid_1, 1, 1, 0, bias=True)
self.l3 = Conv2d(n_hid_1, n_actions, 1, 1, 0, bias=True)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_weights)
n_nca_params = sum(p.numel() for p in self.parameters())
self.cppn_body = GenCPPN(n_in_chans, n_actions)
self.normal = th.distributions.multivariate_normal.MultivariateNormal(th.zeros(1), th.eye(1))
def mate(self):
raise NotImplementedError
def mutate(self):
self.cppn_body.mutate()
with th.no_grad():
for layer in self.layers:
dw = self.normal.sample(layer.weight.shape)
layer.weight = th.nn.Parameter(layer.weight + dw.squeeze(-1))
db = self.normal.sample(layer.bias.shape)
layer.bias = th.nn.Parameter(layer.bias + db.squeeze(-1))
def forward(self, x):
with th.no_grad():
x = self.l1(x)
x = th.nn.functional.relu(x)
x = self.l2(x)
x = th.nn.functional.relu(x)
x = th.sigmoid(x)
x, _ = self.cppn_body(x)
return x, False
class GenCPPN(CPPN):
def __init__(self, n_in_chans, n_actions):
super().__init__(n_in_chans, n_actions)
neat_config_path = 'config_cppn'
self.neat_config = neat.config.Config(DefaultGenome, neat.reproduction.DefaultReproduction,
neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation,
neat_config_path)
self.n_actions = n_actions
self.neat_config.genome_config.num_outputs = n_actions
self.genome = DefaultGenome(0)
self.input_names = ['x_in', 'y_in'] + ['tile_{}_in'.format(i) for i in range(n_actions)]
n_inputs = len(self.input_names)
self.output_names = ['tile_{}_out'.format(i) for i in range(n_actions)]
self.neat_config.genome_config.input_keys = (-1*np.arange(n_inputs) - 1).tolist()
self.neat_config.genome_config.num_inputs = n_inputs
self.neat_config.genome_config.num_hidden = 2
self.genome.configure_new(self.neat_config.genome_config)
self.cppn = create_cppn(self.genome, self.neat_config, self.input_names, self.output_names)
def forward(self, x):
X = th.arange(x.shape[-2])
Y = th.arange(x.shape[-1])
X, Y = th.meshgrid(X/X.max(), Y/Y.max())
inputs = {'x_in': X, 'y_in': Y}
inputs.update({'tile_{}_in'.format(i): th.Tensor(x[0,i,:,:]) for i in range(self.n_actions)})
tile_probs = [self.cppn[i](**inputs) for i in range(self.n_actions)]
multi_hot = th.stack(tile_probs, axis=0)
multi_hot = multi_hot.unsqueeze(0)
return multi_hot, True
class Individual(qdpy.phenotype.Individual):
"An individual for mutating with operators. Assuming we're using vanilla MAP-Elites here."
def __init__(self, model_cls, n_in_chans, n_actions):
super(Individual, self).__init__()
self.model = model_cls(n_in_chans, n_actions)
self.fitness = Fitness([0])
self.fitness.delValues()
def mutate(self):
self.model.mutate()
def mate(self, ind_1):
assert len(self.fitness.values) == 1 == len(ind_1.fitness.values)
self.model.mate(ind_1.model, fit_0=self.fitness.values[0], fit_1=ind_1.fitness.values[0])
def __eq__(self, ind_1):
if not hasattr(ind_1, "model"): return False
return self.model == ind_1.model
# FIXME: this guy don't work
class GeneratorNNDenseSqueeze(ResettableNN):
""" A neural cellular automata-type NN to generate levels or wide-representation action distributions."""
def __init__(self, n_in_chans, n_actions, observation_shape, n_flat_actions):
super().__init__()
n_hid_1 = 16
# Hack af. Pad the input to make it have root 2? idk, bad
sq_i = 2
assert observation_shape[-1] == observation_shape[-2]
# while sq_i < observation_shape[-1]:
# sq_i = sq_i**2
# pad_0 = sq_i - observation_shape[-1]
self.l1 = Conv2d(n_in_chans, n_hid_1, 3, 1, 0, bias=True)
self.l2 = Conv2d(n_hid_1, n_hid_1, 3, 2, 0, bias=True)
self.flatten = th.nn.Flatten()
n_flat = self.flatten(
self.l2(self.l1(th.zeros(size=observation_shape)))
).shape[-1]
# n_flat = n_hid_1
self.d1 = Linear(n_flat, n_flat_actions)
# self.d2 = Linear(16, n_flat_actions)
self.layers = [self.l1, self.l2, self.d1]
self.apply(init_weights)
def forward(self, x):
with th.no_grad():
x = self.l1(x)
x = th.nn.functional.relu(x)
x = self.l2(x)
x = th.nn.functional.relu(x)
# for i in range(int(np.log2(x.shape[2])) + 1):
# x = self.l2(x)
# x = th.nn.functional.relu(x)
x = self.flatten(x)
x = self.d1(x)
x = th.sigmoid(x)
# x = self.d2(x)
# x = th.sigmoid(x)
return x, False
class GeneratorNNDense(ResettableNN):
""" A neural cellular automata-type NN to generate levels or wide-representation action distributions."""
def __init__(self, n_in_chans, n_actions, observation_shape, n_flat_actions):
super().__init__()
n_hid_1 = 16
n_hid_2 = 32
self.conv1 = Conv2d(n_in_chans, n_hid_1, kernel_size=3, stride=2)
self.conv2 = Conv2d(n_hid_1, n_hid_2, kernel_size=3, stride=2)
self.conv3 = Conv2d(n_hid_2, n_hid_2, kernel_size=3, stride=2)
self.flatten = th.nn.Flatten()
n_flat = self.flatten(
self.conv3(self.conv2(self.conv1(th.zeros(size=observation_shape))))
).shape[-1]
# self.fc1 = Linear(n_flat, n_flat_actions)
self.fc1 = Linear(n_flat, n_hid_2)
self.fc2 = Linear(n_hid_2, n_flat_actions)
self.layers = [self.conv1, self.conv2, self.conv3, self.fc1, self.fc2]
self.apply(init_weights)
def forward(self, x):
with th.no_grad():
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = F.softmax(self.fc2(x), dim=1)
return x, False
class PlayerNN(ResettableNN):
def __init__(self, n_tile_types, n_actions=4):
super().__init__()
self.n_tile_types = n_tile_types
assert "zelda" in PROBLEM
self.l1 = Conv2d(n_tile_types, 16, 3, 1, 0, bias=True)
self.l2 = Conv2d(16, 16, 3, 2, 1, bias=True)
self.l3 = Conv2d(16, n_actions, 3, 1, 1, bias=True)
self.layers = [self.l1, self.l2, self.l3]
self.apply(init_play_weights)
self.flatten = th.nn.Flatten()
self.net_reward = 0
self.n_episodes = 0
def forward(self, x):
x = th.Tensor(get_one_hot_map(x, self.n_tile_types))
x = x.unsqueeze(0)
with th.no_grad():
x = th.relu(self.l1(x))
for i in range(int(np.log2(x.shape[2])) + 1):
# for i in range(1):
x = th.relu(self.l2(x))
x = th.relu(self.l3(x))
# x = x.argmax(1)
# x = x[0]
x = x.flatten()
x = th.softmax(x, axis=0)
# x = [x.argmax().item()]
act_ids = np.arange(x.shape[0])
probs = x.detach().numpy()
x = np.random.choice(act_ids, 1, p=probs)
return x
def assign_reward(self, rew):
self.net_reward += rew
self.n_episodes += 1
def reset(self):
self.net_reward = 0
self.n_episodes = 0
def get_reward(self):
mean_rew = self.net_reward / self.n_episodes
return mean_rew
def init_weights(m):
if type(m) == th.nn.Linear:
th.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
if type(m) == th.nn.Conv2d:
th.nn.init.orthogonal_(m.weight)
def init_play_weights(m):
if type(m) == th.nn.Linear:
th.nn.init.xavier_uniform(m.weight, gain=0)
m.bias.data.fill_(0.00)
if type(m) == th.nn.Conv2d:
# th.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
th.nn.init.constant_(m.weight, 0)
def set_nograd(nn):
for param in nn.parameters():
param.requires_grad = False
def get_init_weights(nn):
"""
Use to get flat vector of weights from PyTorch model
"""
init_params = []
if isinstance(nn, CPPN):
for node in nn.cppn:
if isinstance(node, Leaf):
continue
init_params.append(node.weights)
init_params.append(node.bias)
else:
for lyr in nn.layers:
init_params.append(lyr.weight.view(-1).numpy())
init_params.append(lyr.bias.view(-1).numpy())
init_params = np.hstack(init_params)
print("number of initial NN parameters: {}".format(init_params.shape))
return init_params
def set_weights(nn, weights):
if ALGO == "ME":
# then out nn is contained in the individual
individual = weights # I'm sorry mama
return individual.model
with th.no_grad():
n_el = 0
if isinstance(nn, CPPN):
for node in nn.cppn:
l_weights = weights[n_el : n_el + len(node.weights)]
n_el += len(node.weights)
node.weights = l_weights
b_weight = weights[n_el: n_el + 1]
n_el += 1
node.bias = b_weight
else:
for layer in nn.layers:
l_weights = weights[n_el : n_el + layer.weight.numel()]
n_el += layer.weight.numel()
l_weights = l_weights.reshape(layer.weight.shape)
layer.weight = th.nn.Parameter(th.Tensor(l_weights))
layer.weight.requires_grad = False
b_weights = weights[n_el : n_el + layer.bias.numel()]
n_el += layer.bias.numel()
b_weights = b_weights.reshape(layer.bias.shape)
layer.bias = th.nn.Parameter(th.Tensor(b_weights))
layer.bias.requires_grad = False
return nn
def get_one_hot_map(int_map, n_tile_types):
obs = (np.arange(n_tile_types) == int_map[..., None]).astype(int)
obs = obs.transpose(2, 0, 1)
return obs
"""
Behavior Characteristics Functions
"""
def get_entropy(int_map, env):
"""
Function to calculate entropy of levels represented by integers
int_map (numpy array of ints): representation of level
env (gym-pcgrl environment instance): used to get the action space dims
returns the entropy of the level normalized roughly to a range of 0.0 to 1.0
"""
# FIXME: make this robust to different action spaces
n_classes = len(env._prob._prob)
max_val = -(1 / n_classes) * np.log(1 / n_classes) * n_classes
total = len(int_map.flatten())
entropy = 0.0
for tile in range(n_classes):
p = (tile == int_map.flatten()).astype(int).sum() / total
if p != 0:
entropy -= p * np.log(p)
return entropy / max_val
def get_counts(int_map, env):
"""
Function to calculate the tile counts for all possible tiles
int_map (numpy array of ints): representation of level
env (gym-pcgrl environment instance): used to get the action space dims
returns a python list with tile counts for each tile normalized to a range of 0.0 to 1.0
"""
max_val = env._prob._width * env._prob._height # for example 14*14=196
return [
np.sum(int_map.flatten() == tile) / max_val
for tile in range(len(env._prob._prob))
]
def get_emptiness(int_map, env):
"""
Function to calculate how empty the level is
int_map (numpy array of ints): representation of level
env (gym-pcgrl environment instance): used to get the action space dims
returns an emptiness value normalized to a range of 0.0 to 1.0
"""
max_val = env._prob._width * env._prob._height # for example 14*14=196
return np.sum(int_map.flatten() == 0) / max_val
def get_hor_sym(int_map, env):
"""
Function to get the horizontal symmetry of a level
int_map (numpy array of ints): representation of level
env (gym-pcgrl environment instance): used to get the action space dims
returns a symmetry float value normalized to a range of 0.0 to 1.0
"""
max_val = env._prob._width * env._prob._height / 2 # for example 14*14/2=98
m = 0
if int(int_map.shape[0]) % 2 == 0:
m = np.sum(
(
int_map[: int(int_map.shape[0] / 2)]
== np.flip(int_map[int(int_map.shape[0] / 2) :], 0)
).astype(int)
)
m = m / max_val
else:
m = np.sum(
(
int_map[: int(int_map.shape[0] / 2)]
== np.flip(int_map[int(int_map.shape[0] / 2) + 1 :], 0)
).astype(int)
)
m = m / max_val
return m
def get_ver_sym(int_map, env):
"""
Function to get the vertical symmetry of a level
int_map (numpy array of ints): representation of level
env (gym-pcgrl environment instance): used to get the action space dims
returns a symmetry float value normalized to a range of 0.0 to 1.0
"""
max_val = env._prob._width * env._prob._height / 2 # for example 14*14/2=98
m = 0
if int(int_map.shape[1]) % 2 == 0:
m = np.sum(
(
int_map[:, : int(int_map.shape[1] / 2)]
== np.flip(int_map[:, int(int_map.shape[1] / 2) :], 1)
).astype(int)
)
m = m / max_val
else:
m = np.sum(
(
int_map[:, : int(int_map.shape[1] / 2)]
== np.flip(int_map[:, int(int_map.shape[1] / 2) + 1 :], 1)
).astype(int)
)
m = m / max_val
return m
# SYMMETRY
def get_sym(int_map, env):
"""
Function to get the vertical symmetry of a level
int_map (numpy array of ints): representation of level
env (gym-pcgrl environment instance): used to get the action space dims
returns a symmetry float value normalized to a range of 0.0 to 1.0
"""
result = (get_ver_sym(int_map, env) + get_hor_sym(int_map, env)) / 2.0
return result
# CO-OCCURRANCE
def get_co(int_map, env):
max_val = env._prob._width * env._prob._height * 4
result = (
np.sum((np.roll(int_map, 1, axis=0) == int_map).astype(int))
+ np.sum((np.roll(int_map, -1, axis=0) == int_map).astype(int))
+ np.sum((np.roll(int_map, 1, axis=1) == int_map).astype(int))
+ np.sum((np.roll(int_map, -1, axis=1) == int_map).astype(int))
)
return result / max_val
def get_regions(stats):
return stats["regions"]
def get_path_length(stats):
return stats["path-length"]
# TODO: call this once to return the releveant get_bc function, then call this after each eval, so that we don't have to repeatedly compare strings
def get_bc(bc_name, int_map, stats, env):
if bc_name in stats.keys():
return stats[bc_name]
elif bc_name == "co-occurance":
return get_co(int_map, env)
elif bc_name == "symmetry":
return get_sym(int_map, env)
elif bc_name == "symmetry-vertical":
return get_ver_sym(int_map, env)
elif bc_name == "symmetry-horizontal":
return get_hor_sym(int_map, env)
elif bc_name == "emptiness":
return get_emptiness(int_map, env)
elif bc_name == "entropy":
return get_entropy(int_map, env)
elif bc_name == "NONE":
return 0
else:
print("The BC {} is not recognized.".format(bc_name))
raise Exception
return 0.0
class PlayerLeft(nn.Module):
def __init__(self):
super().__init__()
self.act_i = 0
def forward(self, obs):
return [0]
class RandomPlayer(nn.Module):
def __init__(self, action_space):
super().__init__()
self.action_space = action_space
self.act_i = 0
def forward(self, obs):
return [self.action_space.sample()]
class PlayerRight(nn.Module):
def __init__(self):
super().__init__()
self.act_i = 0
def forward(self, obs):
return [1]
def log_archive(archive, name, itr, start_time, level_json=None):
if ALGO == "ME":
# Do this inside optimizer ..?
return
# TensorBoard Logging.
df = archive.as_pandas(include_solutions=False)
elapsed_time = time.time() - start_time
writer.add_scalar("{} ArchiveSize".format(name), len(df), itr)
writer.add_scalar("{} score/mean".format(name), df["objective"].mean(), itr)
writer.add_scalar("{} score/max".format(name), df["objective"].max(), itr)
writer.add_scalar("{} score/min".format(name), df["objective"].min(), itr)
# Change: log mean, max, and min for all stats
if level_json:
stats = ["batch_reward", "targets"]
if N_INIT_STATES > 1:
stats += ["variance", "diversity"]
# level_json = {'level': final_levels.tolist(),'batch_reward':[batch_reward] * len(final_levels.tolist()), 'variance': [variance_penalty] * len(final_levels.tolist()), 'diversity':[diversity_bonus] * len(final_levels.tolist()),'targets':trg.tolist(), **bc_dict}
for stat in stats:
writer.add_scalar(
"Training {}/min".format(stat), np.min(level_json[stat]), itr
)
writer.add_scalar(
"Training {}/mean".format(stat), np.mean(level_json[stat]), itr
)
writer.add_scalar(
"Training {}/max".format(stat), np.max(level_json[stat]), itr
)
# Logging.
if itr % 1 == 0:
print(f"> {itr} itrs completed after {elapsed_time:.2f} s")
print(f" - {name} Archive Size: {len(df)}")
print(f" - {name} Max Score: {df['objective'].max()}")
print(f" - {name} Mean Score: {df['objective'].mean()}")
print(f" - {name} Min Score: {df['objective'].min()}")
N_PLAYER_STEPS = 100
def play_level(env, level, player):
env._rep._old_map = level
env._rep._random_start = False
p_obs = env.reset()
if not env.is_playable():
return 0, None
# TODO: check if env is playable!
env.set_active_agent(1)
if RENDER:
env.render()
net_p_rew = 0
action_hist = []
for p_i in range(N_PLAYER_STEPS):
action = player(p_obs["map"])
if isinstance(action, th.Tensor):
# TODO: this logic belongs with the model
player_coords = env._prob.player.coords
action = np.array(action)[player_coords[0], player_coords[1]]
elif isinstance(action, list) or isinstance(action, np.ndarray):
assert len(action) == 1
action = action[-1]
else:
raise Exception
action_hist.append(action)
p_obs, p_rew, p_done, p_info = env.step(action)
if RENDER:
env.render()
# net_p_rew += p_rew
net_p_rew = p_rew
if p_done:
break
# player.assign_reward(net_p_rew)
action_freqs = np.bincount(action_hist, minlength=len(env.player_actions))
action_entropy = scipy.stats.entropy(action_freqs)
local_action_entropy = np.mean(
[
scipy.stats.entropy(
np.bincount(action_hist[i : i + 10], minlength=len(env.player_actions))
)
for i in np.arange(0, len(action_hist) - 10, 6)
]
)
local_action_entropy = np.nan_to_num(local_action_entropy)
return net_p_rew, [action_entropy, local_action_entropy]
@ray.remote
def multi_evo(
env,
model,
model_w,
n_tile_types,
init_states,
bc_names,
static_targets,
seed,
player_1,
player_2,
proc_id=None,
init_states_archive=None,
index=None,
):
if init_states is None:
init_states = get_init_states(init_states_archive, tuple(index))
if proc_id is not None:
print("simulating id: {}".format(proc_id))
model = set_weights(model, model_w)
result = simulate(
env=env,
model=model,
n_tile_types=n_tile_types,
init_states=init_states,
bc_names=bc_names,
static_targets=static_targets,
seed=seed,
player_1=player_1,
player_2=player_2,
)
return result
@ray.remote
def multi_play_evo(
env,
gen_model,
player_1_w,
n_tile_types,
init_states,
play_bc_names,
static_targets,
seed,
player_1,
player_2,
playable_levels,
proc_id=None,
):
if proc_id is not None:
print("simulating id: {}".format(proc_id))
player_1 = set_weights(player_1, player_1_w)
obj, bcs = player_simulate(
env=env,
n_tile_types=n_tile_types,
play_bc_names=play_bc_names,
seed=seed,
player_1=player_1,
playable_levels=playable_levels,
)
return obj, bcs
def gen_playable_levels(env, gen_model, init_states, n_tile_types):
""" To get only the playable levels of a given generator, so that we can run player evaluations on them more quickly."""
final_levels = []
for int_map in init_states:
obs = get_one_hot_map(int_map, n_tile_types)
if RENDER:
env.render()
done = False
n_step = 0
last_int_map = None
while not done:
int_tensor = th.unsqueeze(th.Tensor(obs), 0)
action, done = gen_model(int_tensor)[0].numpy()
obs = action
int_map = done or action.argmax(axis=0)
env._rep._map = int_map
done = done or (int_map == last_int_map).all() or n_step >= N_STEPS
# if INFER and not EVALUATE:
# time.sleep(1 / 30)
if done:
gen_model.reset()
env._rep._old_map = int_map
env._rep._random_start = False
_ = env.reset()
if env.is_playable():
final_levels.append(int_map)
n_step += 1
return final_levels
def player_simulate(
env, n_tile_types, play_bc_names, player_1, playable_levels, seed=None
):
n_evals = 10
net_reward = 0
bcs = []
for int_map in playable_levels * n_evals:
if INFER:
# env.render()
input("ready player 1")
p_1_rew, p_bcs = play_level(env, int_map, player_1)
bcs.append(p_bcs)
if INFER:
print("p_1 reward: ", p_1_rew)
net_reward += p_1_rew
reward = net_reward / len(playable_levels * n_evals)
bcs = [np.mean([bcs[j][i] for j in range(len(bcs))]) for i in range(len(bcs[0]))]
return reward, bcs
def plot_score_heatmap(scores, score_name, bc_names, cmap_str="magma", bcs_in_filename=True,
lower_bounds=None, upper_bounds=None,
x_bounds=None, y_bounds=None):
scores = scores.T
ax = plt.gca()
ax.set_xlim(lower_bounds[0], upper_bounds[0])
ax.set_ylim(lower_bounds[1], upper_bounds[1])
label_fontdict = {
'fontsize': 16,
}
ax.set_xlabel(bc_names[0], fontdict=label_fontdict)
ax.set_ylabel(bc_names[1], fontdict=label_fontdict)
vmin = np.nanmin(scores)
vmax = np.nanmax(scores)
t = ax.pcolormesh(
x_bounds,
y_bounds,
scores,
cmap=matplotlib.cm.get_cmap(cmap_str),
vmin=vmin,
vmax=vmax,
)
ax.figure.colorbar(t, ax=ax, pad=0.1)
if SHOW_VIS:
plt.show()
if bcs_in_filename:
f_name = score_name + "_" + "-".join(bc_names)
else:
f_name = score_name
if not RANDOM_INIT_LEVELS:
f_name = f_name + "_fixLvls"
f_name += ".png"
plt.title(score_name, fontdict={'fontsize': 24})
plt.tight_layout()
plt.savefig(os.path.join(SAVE_PATH, f_name))
plt.close()
def simulate(
env,
model,
n_tile_types,
init_states,
bc_names,
static_targets,
seed=None,
player_1=None,
player_2=None,
render_levels=False
):
"""
Function to run a single trajectory and return results.
Args:
env (gym.Env): A copy of the binary-wide-v0 environment.
model (np.ndarray): The array of weights for the policy.
seed (int): The seed for the environment.
player_sim (bool): Are we collecting obj and bcs for the player, rather than the generator?
Returns:
total_reward (float): The reward accrued by the lander throughout its
trajectory.
path_length (float): The path length of the final solution.
regions (float): The number of distinct regions of the final solution.
"""
global N_INIT_STATES
if seed is not None:
env.seed(seed)
if PLAY_LEVEL:
assert player_1 is not None
assert player_2 is not None
if CMAES:
bc_names = ["NONE", "NONE"]
# Allow us to manually set the level-map on reset (using the "_old_map" attribute)
# Actually we have found a more efficient workaround for now.
# env._rep._random_start = False
# if n_episode == 0 and False:
# env._rep._old_map = init_state
# obs = env.reset()
# int_map = obs['map']
n_init_states = init_states.shape[0]
width = init_states.shape[1]
height = init_states.shape[2]
bcs = np.empty(shape=(len(bc_names), n_init_states))
# if SAVE_LEVELS:
trg = np.empty(shape=(n_init_states))
final_levels = np.empty(shape=init_states.shape, dtype=np.uint8)
batch_reward = 0
batch_time_penalty = 0
batch_targets_penalty = 0
batch_play_bonus = 0
if render_levels:
level_frames = []
for (n_episode, init_state) in enumerate(init_states):
# NOTE: Sneaky hack. We don't need initial stats. Never even reset. Heh. Be careful!!
# Set the representation to begin in the upper left corner
env._rep._map = init_state.copy()
env._prob.path_coords = []
env._prob.path_length = None
# Only applies to narrow and turtle. Better than using reset, but ugly, and not optimal
# TODO: wrap the env instead
env._rep._x = env._rep._y = 0
# env._rep._x = np.random.randint(env._prob._width)
# env._rep._y = np.random.randint(env._prob._height)
int_map = init_state
obs = get_one_hot_map(int_map, n_tile_types)
if RENDER:
env.render()
if INFER:
# time.sleep(10/30)
# input()
pass
done = False
n_step = 0
while not done:
if render_levels:
level_frames.append(env.render(mode="rgb_array"))
# in_tensor = th.unsqueeze(
# th.unsqueeze(th.tensor(np.float32(obs['map'])), 0), 0)
in_tensor = th.unsqueeze(th.Tensor(obs), 0)
action, done = model(in_tensor)
action = action[0].numpy()
# There is probably a better way to do this, so we are not passing unnecessary kwargs, depending on representation
action, skip = preprocess_action(
action,
int_map=env._rep._map,
x=env._rep._x,
y=env._rep._y,
n_dirs=N_DIRS,
n_tiles=n_tile_types,
)
change, x, y = env._rep.update(action)
int_map = env._rep._map
obs = get_one_hot_map(env._rep.get_observation()["map"], n_tile_types)
preprocess_observation(obs, x=env._rep._x, y=env._rep._y)
# int_map = action.argmax(axis=0)
# obs = get_one_hot_map(int_map, n_tile_types)
# env._rep._map = int_map
done = done or not (change or skip) or n_step >= N_STEPS
# done = n_step >= N_STEPS
# if INFER and not EVALUATE:
# time.sleep(1 / 30)
if done:
model.reset()
if render_levels:
# get final level state
level_frames.append(env.render(mode="rgb_array"))
# we'll need this to compute Hamming diversity
final_levels[n_episode] = int_map
stats = env._prob.get_stats(
get_string_map(int_map, env._prob.get_tile_types()),
# lenient_paths = True,
)
# get BCs
# Resume here. Use new BC function.
for i in range(len(bc_names)):
bc_name = bc_names[i]
bcs[i, n_episode] = get_bc(bc_name, int_map, stats, env)
# TODO: reward calculation should depend on self.reward_names
# ad hoc reward: shorter episodes are better?
time_penalty = n_step
batch_time_penalty -= time_penalty
# we want to hit each of our static targets exactly, penalize for anything else.
# for ranges, we take our least distance to any element in the range
targets_penalty = 0
for k in static_targets:
if k in bc_names:
continue
if isinstance(static_targets[k], tuple):
# take the smallest distance from current value to any point in range
# NOTE: we're assuming this metric is integer-valued
targets_penalty += abs(
np.arange(static_targets[k][0], static_targets[k][1]) - stats[k]
).min()
else:
targets_penalty += abs(static_targets[k] - stats[k])
# targets_penalty = np.sum([abs(static_targets[k] - stats[k]) if not isinstance(static_targets[k], tuple) else abs(np.arange(*static_targets[k]) - stats[k]).min() for k in static_targets])
batch_targets_penalty -= targets_penalty
# if SAVE_LEVELS:
trg[n_episode] = -targets_penalty
if PLAY_LEVEL:
if INFER:
env.render()
input("ready player 1")
p_1_rew, p_bcs = play_level(env, int_map, player_1)
if INFER:
print("p_1 reward: ", p_1_rew)
input("ready player 2")
p_2_rew, p_bcs = play_level(env, int_map, player_2)
if INFER:
print("p_2 reward: ", p_2_rew)
max_regret = env._prob.max_reward - env._prob.min_reward
# add this in case we get worst possible regret (don't want to punish a playable map)
batch_play_bonus += max_regret + p_1_rew - p_2_rew
if RENDER:
if INFER:
stats = env._prob.get_stats(
get_string_map(int_map, env._prob.get_tile_types()),
# lenient_paths=True,
)
env.render()
if done and INFER: # and not (EVALUATE and THREADS):
if not EVALUATE:
# time.sleep(5 / 30)
print(
"stats: {}\n\ntime_penalty: {}\n targets_penalty: {}".format(
stats, time_penalty, targets_penalty
)
)
last_int_map = int_map
n_step += 1
final_bcs = [bcs[i].mean() for i in range(bcs.shape[0])]
batch_targets_penalty = TARGETS_PENALTY_WEIGHT * batch_targets_penalty / max(N_INIT_STATES, 1)
# batch_targets_penalty = batch_targets_penalty / N_INIT_STATES
batch_reward += batch_targets_penalty
if PLAY_LEVEL:
batch_reward += batch_play_bonus / max(N_INIT_STATES, 1)
time_penalty, targets_penalty, variance_penalty, diversity_bonus = (
None,
None,
None,
None,
)
else:
# batch_time_penalty = batch_time_penalty / N_INIT_STATES
N_INIT_STATES = n_init_states
if N_INIT_STATES > 1 and (batch_targets_penalty == 0 or not CASCADE_REWARD):
# Calculate stats that depend on having generated multiple levels. If using gated reward, only calculate these additional components of reward if level is
# perfectly valid.
# Variance penalty is the negative average (per-BC) standard deviation from the mean BC vector.
variance_penalty = (
-np.sum([bcs[i].std() for i in range(bcs.shape[0])]) / bcs.shape[0]
)
# Diversity bonus. We want minimal variance along BCS *and* diversity in terms of the map.
# Sum pairwise hamming distances between all generated maps.
diversity_bonus = np.sum(
[
np.sum(final_levels[j] != final_levels[k]) if j != k else 0
for k in range(N_INIT_STATES)
for j in range(N_INIT_STATES)
]
) / (N_INIT_STATES * N_INIT_STATES - 1)
# ad hoc scaling :/
diversity_bonus = 10 * diversity_bonus / (width * height)
batch_reward = batch_reward + max(0, variance_penalty + diversity_bonus)
else:
variance_penalty = None
diversity_bonus = None
if SAVE_LEVELS:
bc_dict = {}
for i in range(len(bc_names)):
bc_name = bc_names[i]
bc_dict[bc_name] = bcs[i, :].tolist()
level_json = {
"level": final_levels.tolist(),
"batch_reward": [batch_reward] * len(final_levels.tolist()),
"variance": [variance_penalty] * len(final_levels.tolist()),
"diversity": [diversity_bonus] * len(final_levels.tolist()),
"targets": trg.tolist(),
**bc_dict,
}
else:
level_json = {
"level": final_levels.tolist(),
"batch_reward": [batch_reward] * len(final_levels.tolist()),
"variance": [variance_penalty] * len(final_levels.tolist()),
"diversity": [diversity_bonus] * len(final_levels.tolist()),
"targets": trg.tolist(),
}
if render_levels:
return level_frames
if not INFER:
return level_json, batch_reward, final_bcs
else:
return (
level_json,
batch_reward,
final_bcs,
(
batch_time_penalty,
batch_targets_penalty,
variance_penalty,
diversity_bonus,
),
)
class EvoPCGRL:
def __init__(self):
self.init_env()
assert self.env.observation_space["map"].low[0, 0] == 0
# get number of tile types from environment's observation space
# here we assume that all (x, y) locations in the observation space have the same upper/lower bound
self.n_tile_types = self.env.observation_space["map"].high[0, 0] + 1
self.width = self.env._prob._width
self.height = self.env._prob._height
# FIXME why not?
# self.width = self.env._prob._width
# TODO: make reward a command line argument?
# TODO: multi-objective compatibility?
self.bc_names = BCS
# calculate the bounds of our behavioral characteristics
# NOTE: We assume a square map for some of these (not ideal).
# regions and path-length are applicable to all PCGRL problems
self.bc_bounds = self.env._prob.cond_bounds
self.bc_bounds.update(
{
"co-occurance": (0.0, 1.0),
"symmetry": (0.0, 1.0),
"symmetry-vertical": (0.0, 1.0),
"symmetry-horizontal": (0.0, 1.0),
"emptiness": (0.0, 1.0),
"entropy": (0.0, 1.0),
}
)
self.static_targets = self.env._prob.static_trgs
if REEVALUATE_ELITES or (RANDOM_INIT_LEVELS and args.n_init_states != 0):
init_level_archive_args = (N_INIT_STATES, self.height, self.width)
else:
init_level_archive_args = ()
if ALGO == "ME":
if RANDOM_INIT_LEVELS and args.n_init_states != 0:
gen_archive_cls = MEInitStatesArchive
else:
gen_archive_cls = MEGrid
elif REEVALUATE_ELITES:
# If we are constantly providing new random seeds to generators, we may want to regularly re-evaluate
# elites
gen_archive_cls = FlexArchive
elif RANDOM_INIT_LEVELS and not args.n_init_states == 0:
# If we have random seeds each generation but are not re-evaluating elites, then we want to hang onto these
# random seeds.
gen_archive_cls = InitStatesArchive
# gen_archive_cls = GridArchive
else:
gen_archive_cls = GridArchive
init_level_archive_args = ()
if PLAY_LEVEL:
self.play_bc_names = ["action_entropy", "local_action_entropy"]
self.play_bc_bounds = {
"action_entropy": (0, 4),
"local_action_entropy": (0, 4),
}
self.gen_archive = gen_archive_cls(
[100 for _ in self.bc_names],
# [1],
# [(-1, 1)],
[self.bc_bounds[bc_name] for bc_name in self.bc_names],
)
self.play_archive = FlexArchive(
# minimum of: 100 for each behavioral characteristic, or as many different values as the BC can take on, if it is less
# [min(100, int(np.ceil(self.bc_bounds[bc_name][1] - self.bc_bounds[bc_name][0]))) for bc_name in self.bc_names],
[100 for _ in self.play_bc_names],
# min/max for each BC
[self.play_bc_bounds[bc_name] for bc_name in self.play_bc_names],
)
else:
if CMAES:
# Restrict the archive to 1 cell so that we are effectively doing CMAES. BCs should be ignored.
self.gen_archive = gen_archive_cls(
[1, 1], [(0, 1), (0, 1)], *init_level_archive_args
)
else:
self.gen_archive = gen_archive_cls(
# minimum of 100 for each behavioral characteristic, or as many different values as the BC can take on, if it is less
# [min(100, int(np.ceil(self.bc_bounds[bc_name][1] - self.bc_bounds[bc_name][0]))) for bc_name in self.bc_names],
[100 for _ in self.bc_names],
# min/max for each BC
[self.bc_bounds[bc_name] for bc_name in self.bc_names],
*init_level_archive_args,
)
reps_to_out_chans = {
"cellular": self.n_tile_types,
"wide": self.n_tile_types,
"narrow": self.n_tile_types + 1,
"turtle": self.n_tile_types + N_DIRS,
}
reps_to_in_chans = {
"cellular": self.n_tile_types,
"wide": self.n_tile_types,
"narrow": self.n_tile_types + 1,
"turtle": self.n_tile_types + 1,
}
n_out_chans = reps_to_out_chans[REPRESENTATION]
n_in_chans = reps_to_in_chans[REPRESENTATION]
if MODEL == "CNN":
# Adding n_tile_types as a dimension here. Why would this not be in the env's observation space though? Should be one-hot by default?
observation_shape = (
1,
self.n_tile_types,
*self.env.observation_space["map"].shape,
)
if isinstance(self.env.action_space, gym.spaces.Box):
action_shape = self.env.action_space.shape
assert len(action_shape) == 3
n_flat_actions = action_shape[0] * action_shape[1] * action_shape[2]
elif isinstance(self.env.action_space, gym.spaces.MultiDiscrete):
nvec = self.env.action_space.nvec
assert len(nvec) == 3
n_flat_actions = nvec[0] + nvec[1] + nvec[2]
elif isinstance(self.env.action_space, gym.spaces.Discrete):
n_flat_actions = self.env.action_space.n
else:
raise NotImplementedError(
"I don't know how to handle this action space: {}".format(
type(self.env.action_space)
)
)
self.gen_model = GeneratorNNDense(
n_in_chans=self.n_tile_types,
n_actions=n_out_chans,
observation_shape=observation_shape,
n_flat_actions=n_flat_actions,
)
# TODO: remove this, just call model "NCA"
elif MODEL == "NCA":
self.gen_model = globals()["GeneratorNN"](
n_in_chans=self.n_tile_types, n_actions=n_out_chans
)
else:
self.gen_model = globals()[MODEL](
n_in_chans=self.n_tile_types, n_actions=n_out_chans
)
set_nograd(self.gen_model)
initial_w = get_init_weights(self.gen_model)
assert len(initial_w.shape) == 1
self.n_generator_weights = initial_w.shape[0]
self.n_player_weights = 0
# TODO: different initial weights per emitter as in pyribs lunar lander relanded example?
if MODEL == "NCA":
init_step_size = 1
elif MODEL == "CNN":
init_step_size = 1
else:
init_step_size = 1
if CMAES:
# The optimizing emitter will prioritize fitness over exploration of behavior space
emitter_type = OptimizingEmitter
else:
emitter_type = ImprovementEmitter
batch_size = 30
n_emitters = 5
if ALGO == "ME":
pass
elif args.mega:
gen_emitters = [
GradientImprovementEmitter(
self.gen_archive,
initial_w.flatten(),
# TODO: play with initial step size?
sigma_g=10.0,
stepsize=0.002, # Initial step size.
gradient_optimizer="adam",
selection_rule="mu",
batch_size=batch_size,
)
for _ in range(n_emitters) # Create 5 separate emitters.
]
else:
gen_emitters = [
# ImprovementEmitter(
emitter_type(
self.gen_archive,
initial_w.flatten(),
# TODO: play with initial step size?
init_step_size, # Initial step size.
batch_size=batch_size,
)
for _ in range(n_emitters) # Create 5 separate emitters.
]
if PLAY_LEVEL:
# Concatenate designer and player weights
self.play_model = PlayerNN(
self.n_tile_types, n_actions=len(self.env.player_actions)
)
set_nograd(self.play_model)
initial_play_w = get_init_weights(self.play_model)
assert len(initial_play_w.shape) == 1
self.n_player_weights = initial_play_w.shape[0]
play_emitters = [
OptimizingEmitter(
self.play_archive,
initial_play_w.flatten(),
# NOTE: Big step size, no good otherwise
1, # Initial step size.
batch_size=batch_size,
)
for _ in range(n_emitters) # Create 5 separate emitters.
]
self.play_optimizer = Optimizer(self.play_archive, play_emitters)
if ALGO == "ME":
ind_cls_args = {
'model_cls': globals()[MODEL],
'n_in_chans': self.n_tile_types,
'n_actions': self.n_tile_types,
}
self.gen_optimizer = MEOptimizer(self.gen_archive,
ind_cls=Individual,
batch_size=n_emitters*batch_size,
ind_cls_args=ind_cls_args,
)
else:
self.gen_optimizer = Optimizer(self.gen_archive, gen_emitters)
# These are the initial maps which will act as seeds to our NCA models
if args.n_init_states == 0:
# special square patch
self.init_states = np.zeros(shape=(1, self.height, self.width))
self.init_states[0, 5:-5, 5:-5] = 1
else:
# self.init_states = np.random.randint(
# 0, self.n_tile_types, (N_INIT_STATES, self.width, self.height)
# )
self.init_states = gen_random_levels(N_INIT_STATES, self.env)
self.start_time = time.time()
self.total_itrs = N_GENERATIONS
self.n_itr = 1
if PLAY_LEVEL:
self.player_1 = PlayerNN(self.n_tile_types)
self.player_2 = RandomPlayer(self.env.player_action_space)
else:
self.player_1 = None
self.player_2 = None
# This directory might already exist if a previous experiment failed before the first proper checkpoint/save
if not os.path.isdir(SAVE_PATH):
os.mkdir(SAVE_PATH)
# Save the command line arguments with which we launched
with open(os.path.join(SAVE_PATH, "settings.json"), "w", encoding="utf-8") as f:
json.dump(arg_dict, f, ensure_ascii=False, indent=4)
def evolve(self):
net_p_itr = 0
for itr in tqdm(range(self.n_itr, self.total_itrs + 1)):
# Request models from the optimizer.
if args.mega:
gen_sols = self.gen_optimizer.ask(grad_estimate=True)
else:
# if algo is ME, these are "individual" objects
gen_sols = self.gen_optimizer.ask()
# Evaluate the models and record the objectives and BCs.
objs, bcs = [], []
stats = ["batch_reward", "variance", "diversity", "targets"]
stat_json = {
"batch_reward": [],
"variance": [],
"diversity": [],
"targets": [],
}
if RANDOM_INIT_LEVELS and args.n_init_states != 0:
init_states = gen_random_levels(N_INIT_STATES, self.env)
else:
init_states = self.init_states
if THREADS:
n_sols = len(gen_sols)
if N_PROC is not None:
n_proc = N_PROC
else:
n_proc = n_sols
n_launches = np.ceil(n_sols / n_proc)
results = []
for n_launch in range(int(n_launches)):
futures = [
multi_evo.remote(
self.env,
self.gen_model,
model_w,
self.n_tile_types,
init_states,
self.bc_names,
self.static_targets,
seed,
player_1=self.player_1,
player_2=self.player_2,
)
for model_w in gen_sols
]
results += ray.get(futures)
del futures
auto_garbage_collect()
for result in results:
level_json, m_obj, m_bcs = result
if SAVE_LEVELS:
df = pd.DataFrame(level_json)
df = df[df["targets"] == 0]
if len(df) > 0:
df.to_csv(
os.path.join(SAVE_PATH, "levels.csv"),
mode="a",
header=False,
index=False,
)
objs.append(m_obj)
bcs.append([*m_bcs])
[stat_json[stat].extend(level_json[stat]) for stat in stats]
del results
auto_garbage_collect()
else:
for model_w in gen_sols:
gen_model = set_weights(self.gen_model, model_w)
level_json, m_obj, m_bcs = simulate(
env=self.env,
model=gen_model,
n_tile_types=self.n_tile_types,
init_states=init_states,
bc_names=self.bc_names,
static_targets=self.static_targets,
seed=seed,
player_1=self.player_1,
player_2=self.player_2,
)
if SAVE_LEVELS:
# Save levels to disc
df = pd.DataFrame(level_json)
df = df[df["targets"] == 0]
if len(df) > 0:
df.to_csv(
os.path.join(SAVE_PATH, "levels.csv"),
mode="a",
header=False,
index=False,
)
objs.append(m_obj)
bcs.append(m_bcs)
[stat_json[stat].extend(level_json[stat]) for stat in stats]
if RANDOM_INIT_LEVELS:
# Tell the archive what the initial states are, so that we can record them in case an individual is
# added.
self.gen_archive.set_init_states(init_states)
# Send the results back to the optimizer.
if args.mega:
# TODO: Here we need the jacobian
jacobian = None
self.gen_optimizer.tell(objs, bcs, jacobian=jacobian)
else:
self.gen_optimizer.tell(objs, bcs)
# for emitter in self.gen_optimizer.emitters:
#
# Re-evaluate elite generators. If doing CMAES, re-evaluate every iteration. Otherwise, try to let the archive grow.
if REEVALUATE_ELITES and (CMAES or self.n_itr % 1 == 0):
df = self.gen_archive.as_pandas()
# curr_archive_size = len(df)
high_performing = df.sample(frac=1)
elite_models = np.array(high_performing.loc[:, "solution_0":])
elite_bcs = np.array(high_performing.loc[:, "behavior_0":"behavior_1"])
if THREADS:
futures = [
multi_evo.remote(
self.env,
self.gen_model,
elite_models[i],
self.n_tile_types,
init_states,
self.bc_names,
self.static_targets,
seed,
player_1=self.player_1,
player_2=self.player_2,
)
for i in range(min(max(len(elite_models) // 2, 1), 150 // 2))
]
results = ray.get(futures)
for (el_i, result) in enumerate(results):
old_el_bcs = elite_bcs[el_i]
level_json, el_obj, el_bcs = result
if SAVE_LEVELS:
# Save levels to disk
df = pd.DataFrame(level_json)
df = df[df["targets"] == 0]
if len(df) > 0:
df.to_csv(
os.path.join(SAVE_PATH, "levels.csv"),
mode="a",
header=False,
index=False,
)
# mean_obj, mean_bcs, obj_hist, bc_hist = self.gen_archive.pop_elite(el_obj, el_bcs, old_el_bcs)
results[el_i] = self.gen_archive.pop_elite(
el_obj, el_bcs, old_el_bcs
)
[stat_json[stat].extend(level_json[stat]) for stat in stats]
for (el_i, result) in enumerate(results):
self.gen_archive.update_elite(*result)
del results
auto_garbage_collect()
else:
# 150 to match number of new-model evaluations
for elite_i in range(min(max(len(elite_models) // 2, 1), 150 // 2)):
# print(elite_i)
# pprint.pprint(self.gen_archive.obj_hist, width=1)
# pprint.pprint(self.gen_archive.bc_hist, width=1)
old_el_bcs = elite_bcs[elite_i]
gen_model_weights = elite_models[elite_i]
gen_model = set_weights(self.gen_model, gen_model_weights)
level_json, el_obj, el_bcs = simulate(
env=self.env,
model=gen_model,
n_tile_types=self.n_tile_types,
init_states=init_states,
bc_names=self.bc_names,
static_targets=self.static_targets,
seed=seed,
player_1=self.player_1,
player_2=self.player_2,
)
idx = self.gen_archive.get_index(old_el_bcs)
[stat_json[stat].extend(level_json[stat]) for stat in stats]
self.gen_archive.update_elite(
*self.gen_archive.pop_elite(el_obj, el_bcs, old_el_bcs)
)
# last_archive_size = len(self.gen_archive.as_pandas(include_solutions=False))
log_archive(self.gen_archive, "Generator", itr, self.start_time, stat_json)
# FIXME: implement these
# self.play_bc_names = ['action_entropy', 'action_entropy_local']
if PLAY_LEVEL:
# elite_model_w = self.gen_archive.get_random_elite()[0]
df = self.gen_archive.as_pandas()
high_performing = df.sort_values("objective", ascending=False)
models = np.array(high_performing.loc[:, "solution_0":])
np.random.shuffle(models)
playable_levels = []
for m_i in range(len(models)):
elite_model_w = models[m_i]
gen_model = set_weights(self.gen_model, elite_model_w)
playable_levels += gen_playable_levels(
self.env, self.gen_model, self.init_states, self.n_tile_types
)
if len(playable_levels) >= 50:
break
if len(playable_levels) >= 10:
play_start_time = time.time()
self.playable_levels = playable_levels
for p_itr in tqdm(range(1, 2)):
net_p_itr += 1
play_sols = self.play_optimizer.ask()
objs, bcs = [], []
if THREADS:
futures = [
multi_play_evo.remote(
self.env,
gen_model,
player_w,
self.n_tile_types,
init_states,
self.play_bc_names,
self.static_targets,
seed,
player_1=self.player_1,
player_2=self.player_2,
playable_levels=playable_levels,
)
for player_w in play_sols
]
results = ray.get(futures)
for result in results:
m_obj, m_bcs = result
objs.append(m_obj)
bcs.append([*m_bcs])
del results
auto_garbage_collect()
else:
play_i = 0
for play_w in play_sols:
play_i += 1
play_model = set_weights(self.play_model, play_w)
m_obj, m_bcs = player_simulate(
env=self.env,
n_tile_types=self.n_tile_types,
play_bc_names=self.play_bc_names,
seed=seed,
player_1=self.player_1,
playable_levels=playable_levels,
)
objs.append(m_obj)
bcs.append(m_bcs)
self.play_optimizer.tell(objs, bcs)
# TODO: parallelize me
df = self.play_archive.as_pandas()
high_performing = df.sort_values("objective", ascending=False)
elite_models = np.array(high_performing.loc[:, "solution_0":])
for elite_i in range(10):
play_model_weights = elite_models[elite_i]
init_nn = set_weights(self.play_model, play_model_weights)
obj, bcs = player_simulate(
self.env,
self.n_tile_types,
self.play_bc_names,
init_nn,
playable_levels=playable_levels,
)
self.play_archive.update_elite(obj, bcs)
# m_objs.append(obj)
# bc_a = get_bcs(init_nn)
# obj = np.mean(m_objs)
# objs.append(obj)
# bcs.append([bc_a])
log_archive(self.play_archive, "Player", p_itr, play_start_time)
if net_p_itr > 0 and net_p_itr % SAVE_INTERVAL == 0:
# Save checkpoint during player evolution loop
self.save()
df = self.play_archive.as_pandas()
high_performing = df.sort_values("objective", ascending=False)
elite_scores = np.array(high_performing.loc[:, "objective"])
if np.array(elite_scores).max() >= self.env._prob.max_reward:
break
# TODO: assuming an archive of one here! Make it more general, like above for generators
play_model = set_weights(
self.play_model, self.play_archive.get_random_elite()[0]
)
if itr % SAVE_INTERVAL == 0 or itr == 1:
# Save checkpoint during generator evolution loop
self.save()
# if itr % VIS_INTERVAL == 0 or itr == 1:
# ckp_dir = os.path.join(SAVE_PATH, "checkpoint_{}".format(itr))
# if not os.path.isdir(ckp_dir):
# os.mkdir(ckp_dir)
# if not CMAES:
# # Otherwise the heatmap would just be a single cell
# self.visualize(itr=itr)
# archive_objs = np.array(
# self.gen_archive.as_pandas(include_solutions=False).loc[
# :, "objective"
# ]
# )
# save_train_stats(archive_objs, itr=itr)
self.n_itr += 1
def save(self):
global ENV
ENV = self.env
self.env = None
evo_path = os.path.join(SAVE_PATH, "evolver.pkl")
os.system(
'mv "{}" "{}"'.format(evo_path, os.path.join(SAVE_PATH, "last_evolver.pkl"))
)
pickle.dump(
self, open(os.path.join(SAVE_PATH, "evolver.pkl"), "wb"), protocol=4
)
self.env = ENV
def init_env(self):
"""Initialize the PCGRL level-generation RL environment and extract any useful info from it."""
env_name = "{}-{}-v0".format(PROBLEM, REPRESENTATION)
self.env = gym.make(env_name)
self.env.adjust_param(render=RENDER)
if CMAES:
# Give a little wiggle room from targets, to allow for some diversity
if "binary" in PROBLEM:
path_trg = self.env._prob.static_trgs["path-length"]
self.env._prob.static_trgs.update(
{"path-length": (path_trg - 20, path_trg)}
)
elif "zelda" in PROBLEM:
path_trg = self.env._prob.static_trgs["path-length"]
self.env._prob.static_trgs.update(
{"path-length": (path_trg - 40, path_trg)}
)
elif "sokoban" in PROBLEM:
sol_trg = self.env._prob.static_trgs["sol-length"]
self.env._prob.static_trgs.update(
{"sol-length": (sol_trg - 10, sol_trg)}
)
elif "smb" in PROBLEM:
pass
else:
raise NotImplemented
global N_DIRS
if hasattr(self.env._rep, "_dirs"):
N_DIRS = len(self.env._rep._dirs)
else:
N_DIRS = 0
global N_STEPS
# if N_STEPS is None:
# if REPRESENTATION != "cellular":
max_ca_steps = args.n_steps
max_changes = self.env._prob._height * self.env._prob._width
reps_to_steps = {
"cellular": max_ca_steps,
"wide": max_changes,
# "narrow": max_changes,
"narrow": max_changes,
# "turtle": max_changes * 2,
"turtle": 2 * max_changes,
# So that it can move around to each tile I guess
}
N_STEPS = reps_to_steps[REPRESENTATION]
def visualize(self, itr=None):
archive = self.gen_archive
# # Visualize Result
# grid_archive_heatmap(archive, vmin=self.reward_bounds[self.reward_names[0]][0], vmax=self.reward_bounds[self.reward_names[0]][1])
# if PROBLEM == 'binary':
# vmin = -20
# vmax = 20
# elif PROBLEM == 'zelda':
# vmin = -20
# vmax = 20
# grid_archive_heatmap(archive, vmin=vmin, vmax=vmax)
if ALGO == "ME":
obj_min, obj_max = archive.fitness_extrema[0]
qdpy.plots.plotGridSubplots(archive.quality_array[..., 0], os.path.join(SAVE_PATH, 'fitness.pdf'),
plt.get_cmap("inferno_r"), archive.features_domain,
archive.fitness_domain[0], nbTicks=None)
else:
plt.figure(figsize=(8, 6))
df_obj = archive.as_pandas()["objective"]
obj_min = df_obj.min()
obj_max = df_obj.max()
vmin = np.floor(obj_min)
vmax = np.ceil(obj_max)
grid_archive_heatmap(archive, vmin=vmin, vmax=vmax)
label_fontdict = {
'fontsize': 16,
}
if not CMAES:
plt.xlabel(self.bc_names[0], fontdict=label_fontdict)
plt.ylabel(self.bc_names[1], fontdict=label_fontdict)
if itr is not None:
save_path = os.path.join(SAVE_PATH, "checkpoint_{}".format(itr))
else:
save_path = SAVE_PATH
plt.title('fitness', fontdict={'fontsize': 24})
plt.tight_layout()
plt.savefig(os.path.join(save_path, "fitness.png"))
# plt.gca().invert_yaxis() # Makes more sense if larger BC_1's are on top.
if SHOW_VIS:
plt.show()
plt.close()
# Print table of results
# df = archive.as_pandas()
# high_performing = df[df["objective"] > 200].sort_values("objective", ascending=False)
# print(df)
def infer(self, concat_gifs=True):
assert INFER
self.init_env()
archive = self.gen_archive
if args.algo == "ME":
nonempty_idxs = np.stack(np.where(
np.isnan(archive.quality_array) == False), axis=1)
# Assume 2nd BC is a measure of complexity
# Sort according to 2nd BC
idxs = nonempty_idxs.tolist()
idxs.sort(key=lambda x: x[1])
idxs_T = tuple(np.array(idxs).T)
objs = archive.quality_array[idxs_T]
# Get list of individuals in same order. First get list of features belonging to individuals in bin,
# then get individual by bin-coordinate
bcs = [archive.features[tuple(idx[:-1])][idx[-1]].values for idx in idxs]
models = [archive.solutions[tuple(idx[:-1])][idx[-1]] for idx in idxs]
# Get rid of bin coordinate for our purposes
# TODO: for more flexibility, instead adapt the below to get this bin coordinate
idxs = [idx[:-1] for idx in idxs]
else:
df = archive.as_pandas()
rows = df.sort_values("behavior_1", ascending=False)
models = np.array(rows.loc[:, "solution_0":])
bcs_0 = np.array(rows.loc[:, "behavior_0"])
bcs_1 = np.array(rows.loc[:, "behavior_1"])
objs = np.array(rows.loc[:, "objective"])
# FIXME: don't need these
idxs = np.array(rows.loc[:, "index_0":"index_1"])
global N_INIT_STATES
global N_EVAL_STATES
global RENDER
global RANDOM_INIT_LEVELS
if RENDER_LEVELS:
RENDER = False
# N_INIT_STATES = 1
if "smb" in PROBLEM:
d = 4
figw, figh = 32, 4
elif "zelda" in PROBLEM:
d = 3
figw, figh = self.env._prob._width, self.env._prob._height
else:
d = 6 # number of rows and columns
figw, figh = self.env._prob._width, self.env._prob._height
if CMAES:
n_rows = 2
n_cols = 5
n_figs = n_rows * d
fig, axs = plt.subplots(
ncols=d,
nrows=n_rows,
figsize=(figw * n_cols / d, figh * n_rows / d),
)
df_g = df.sort_values(by=["objective"], ascending=False)
grid_models = np.array(df_g.loc[:, "solution_0":])
level_frames = []
for (i, model) in enumerate(grid_models):
for j in range(n_figs):
n_row = j // d
n_col = j % d
axs[n_row, n_col].set_axis_off()
# TODO: select for diversity?
# parallelization would be kind of pointelss here
init_nn = set_weights(self.gen_model, model)
# run simulation, but only on a single level-seed
# init_state = gen_random_levels(1, self.env)
# init_state = np.random.randint(
# 0, self.n_tile_types, size=(1, *self.init_states.shape[1:])
# )
# _, _, _, (
# time_penalty,
# targets_penalty,
# variance_penalty,
# diversity_bonus,
# ) = simulate(
raise NotImplementedError
TT() # don't have a way of rendering CMAES yet??
level_frames_i = simulate(
self.env,
init_nn,
self.n_tile_types,
self.init_states[0:1],
self.bc_names,
self.static_targets,
seed=None,
render_levels=True,
)
if not concat_gifs:
save_level_frames(level_frames_i, i)
else:
level_frames += level_frames_i
# Get image
# img = self.env.render(mode="rgb_array")
img = level_frames[-1]
axs[n_row, n_col].imshow(img, aspect=1)
if concat_gifs:
save_level_frames(level_frames, 'concat')
else:
fig, axs = plt.subplots(ncols=d, nrows=d, figsize=(figw, figh))
if ALGO == "ME":
pass
else:
df_g = df.sort_values(by=["behavior_0", "behavior_1"], ascending=False)
df_g["row"] = np.floor(
np.linspace(0, d, len(df_g), endpoint=False)
).astype(int)
level_frames = []
for row_num in range(d):
row = df_g[df_g["row"] == row_num]
row = row.sort_values(by=["behavior_1"], ascending=True)
row["col"] = np.arange(0, len(row), dtype=int)
idx = np.floor(np.linspace(0, len(row) - 1, d)).astype(int)
row = row[row["col"].isin(idx)]
row = row.drop(["row", "col"], axis=1)
grid_models = np.array(row.loc[:, "solution_0":])
for col_num in range(len(row)):
model = grid_models[col_num]
# axs[row_num, col_num].set_axis_off()
axs[-col_num-1, -row_num-1].set_axis_off()
# initialize weights
gen_model = set_weights(self.gen_model, model)
# run simulation, but only on the first level-seed
# _, _, _, (
# time_penalty,
# targets_penalty,
# variance_penalty,
# diversity_bonus,
# ) = simulate(
level_frames_i = simulate(
self.env,
gen_model,
self.n_tile_types,
self.init_states[0:1],
self.bc_names,
self.static_targets,
seed=None,
render_levels=True,
)
if not concat_gifs:
save_level_frames(level_frames_i, '{}_{}'.format(row_num, col_num))
level_frames += level_frames_i
# Get image
# img = self.env.render(mode="rgb_array")
img = level_frames[-1]
# axs[row_num, col_num].imshow(img, aspect="auto")
axs[-col_num-1, -row_num-1].imshow(img, aspect="auto")
if concat_gifs:
save_level_frames(level_frames, 'concat')
fig.subplots_adjust(hspace=0.01, wspace=0.01)
plt.tight_layout()
fig.savefig(
os.path.join(SAVE_PATH, "levelGrid_{}-bin.png".format(d)), dpi=300
)
plt.close()
if PLAY_LEVEL:
player_simulate(
self.env,
self.n_tile_types,
self.play_bc_names,
self.play_model,
playable_levels=self.playable_levels,
seed=None,
)
i = 0
if EVALUATE:
# First, visualize and aggregate the scores of the elites as they currently stand in the grid
if not VISUALIZE:
# visualize if we haven't already
self.visualize()
# aggregate scores of individuals currently in the grid
save_train_stats(objs, archive, self.env, self.bc_names)
# The level spaces which we will attempt to map to
problem_eval_bc_names = {
"binary": [
# ("regions", "path-length")
],
"zelda": [
# ("nearest-enemy", "path-length"),
# ("symmetry", "path-length"),
# ("emptiness", "path-length"),
],
"sokoban": [
# ("crate", "sol-length")
],
"smb": [
# ("emptiness", "jumps")
],
}
# for k in problem_eval_bc_names.keys():
# problem_eval_bc_names[k] += [
# # ("NONE"),
# ("emptiness", "symmetry")
# ]
for (k, v) in problem_eval_bc_names.items():
if k in PROBLEM:
eval_bc_names = v
break
# toss our elites into an archive with different BCs. For fun!
eval_bc_names = list(set([tuple(self.bc_names)] + eval_bc_names))
if not CMAES:
if ALGO == "ME":
eval_archives = [
MEGrid(
[N_BINS for _ in eval_bcs],
[self.bc_bounds[bc_name] for bc_name in eval_bcs],
)
for eval_bcs in eval_bc_names
]
else:
eval_archives = [
GridArchive(
# minimum of 100 for each behavioral characteristic, or as many different values as the BC can take on, if it is less
# [min(100, int(np.ceil(self.bc_bounds[bc_name][1] - self.bc_bounds[bc_name][0]))) for bc_name in self.bc_names],
[N_BINS for _ in eval_bcs],
# min/max for each BC
[self.bc_bounds[bc_name] for bc_name in eval_bcs],
)
for eval_bcs in eval_bc_names
]
[
eval_archive.initialize(solution_dim=len(models[0]))
for eval_archive in eval_archives
]
RENDER = False
# Iterate through our archive of trained elites, evaluating them and storing stats about them.
# Borrowing logic from grid_archive_heatmap from pyribs.
# Retrieve data from archive
if ALGO == 'ME':
lower_bounds = [archive.features_domain[i][0] for i in range(len(archive.features_domain))]
upper_bounds = [archive.features_domain[i][1] for i in range(len(archive.features_domain))]
x_dim, y_dim = archive.shape
else:
lower_bounds = archive.lower_bounds
upper_bounds = archive.upper_bounds
x_dim, y_dim = archive.dims
x_bounds = np.linspace(lower_bounds[0], upper_bounds[0], x_dim + 1)
y_bounds = np.linspace(lower_bounds[1], upper_bounds[1], y_dim + 1)
# Color for each cell in the heatmap
fitness_scores = np.full((y_dim, x_dim), np.nan)
playability_scores = np.full((y_dim, x_dim), np.nan)
diversity_scores = np.full((y_dim, x_dim), np.nan)
reliability_scores = np.full((y_dim, x_dim), np.nan)
eval_fitness_scores = []
eval_playability_scores = []
eval_diversity_scores = []
eval_reliability_scores = []
if not CMAES:
for j in range(len(eval_archives)):
eval_fitness_scores.append(np.full((y_dim, x_dim), np.nan))
eval_playability_scores.append(np.full((y_dim, x_dim), np.nan))
eval_reliability_scores.append(np.full((y_dim, x_dim), np.nan))
eval_diversity_scores.append(np.full((y_dim, x_dim), np.nan))
def record_scores(
id_0,
id_1,
batch_reward,
targets_penalty,
diversity_bonus,
variance_penalty,
fitness_scores,
playability_scores,
diversity_scores,
reliability_scores,
):
fitness_scores[id_0, id_1] = batch_reward
playability_scores[id_0, id_1] = targets_penalty
if diversity_bonus is not None:
diversity_scores[id_0, id_1] = diversity_bonus
if variance_penalty is not None:
reliability_scores[id_0, id_1] = variance_penalty
def save_levels(level_json, overwrite=False, headers=False):
df = pd.DataFrame.from_dict(level_json
)
# df = df[df['targets'] == 0]
if overwrite:
write_mode = "w"
else:
write_mode = "a"
if len(df) > 0:
csv_name = "eval_levels"
if not RANDOM_INIT_LEVELS:
csv_name += "_fixLvls"
csv_name += ".csv"
if headers:
header = df.columns
else:
header = None
df.to_csv(
os.path.join(SAVE_PATH, csv_name),
mode=write_mode,
header=header,
index=False,
)
init_states_archive = None
if RANDOM_INIT_LEVELS:
# Effectively doing inference on a (presumed) held-out set of levels
if CMAES:
N_EVAL_STATES = N_INIT_STATES = 100
else:
N_EVAL_STATES = N_INIT_STATES = 20 #= 100 # e.g. 10
init_states = gen_random_levels(N_INIT_STATES, self.env)
# init_states = np.random.randint(
# 0,
# self.n_tile_types,
# size=(N_EVAL_STATES, *self.init_states.shape[1:]),
# )
elif args.fix_level_seeds or args.n_init_states == 0:
# If level seeds were fixed throughout training, use those
init_states = self.init_states
N_EVAL_STATES = N_INIT_STATES = init_states.shape[0]
else:
init_states_archive = self.gen_archive.init_states_archive
init_states = None
# Otherwise, use the init level seeds that were entered into the archive with each elite
n_train_bcs = len(self.bc_names)
if THREADS:
futures = [
multi_evo.remote(
self.env,
self.gen_model,
model_w,
self.n_tile_types,
init_states,
[bc for bc_names in eval_bc_names for bc in bc_names],
self.static_targets,
seed,
player_1=self.player_1,
player_2=self.player_2,
proc_id=i,
init_states_archive=init_states_archive,
index=tuple(idxs[i]),
)
for (i, model_w) in enumerate(models)
]
results = ray.get(futures)
i = 0
for result in results:
level_json, batch_reward, final_bcs, (
time_penalty,
batch_targets_penalty,
variance_penalty,
diversity_bonus,
) = result
# id_0 = idxs_0[i]
# id_1 = idxs_1[i]
grid_bcs = final_bcs[:n_train_bcs]
# TODO: remove this (it's for backward compatibility) since we've implemented get_index for qdpy
# grid
if ALGO == "ME":
id_0, id_1 = archive.index_grid(tuple(grid_bcs))
else:
id_0, id_1 = archive.get_index(np.array(grid_bcs))
if SAVE_LEVELS:
save_levels(level_json, overwrite=i == 0, headers=i==0)
# Record directly from evolved archive since we are guaranteed to have only one elite per cell
record_scores(
id_0,
id_1,
batch_reward,
batch_targets_penalty,
diversity_bonus,
variance_penalty,
fitness_scores,
playability_scores,
diversity_scores,
reliability_scores,
)
if not CMAES:
for j, eval_archive in enumerate(eval_archives):
# Record componentes of the fitness for each cell in each evaluation archive
# NOTE: assume 2 BCs per eval archive
eval_bcs = np.array(
# final_bcs[n_train_bcs + 2 * j : n_train_bcs + 2 * j + 2]
final_bcs[2 * j: 2 * (j + 1)]
)
if ALGO == "ME":
id_0, id_1 = archive.index_grid(tuple(eval_bcs))
# Dummy individual
individual = Individual(type(self.gen_model), self.n_tile_types, self.n_tile_types)
individual.fitness = Fitness([batch_reward])
individual.features = Features(final_bcs)
idx = eval_archive.add(individual)
ind_added = idx is not None
else:
id_0, id_1 = eval_archive.get_index(eval_bcs)
# Add dummy solution weights for now
status, _ = eval_archive.add(
np.zeros(eval_archive.solution_dim),
batch_reward,
eval_bcs,
)
ind_added = status != AddStatus.NOT_ADDED
if ind_added:
# For eval archive, only record new best individuals in each filled cell
record_scores(
id_0,
id_1,
batch_reward,
batch_targets_penalty,
diversity_bonus,
variance_penalty,
eval_fitness_scores[j],
eval_playability_scores[j],
eval_diversity_scores[j],
eval_reliability_scores[j],
)
i += 1
auto_garbage_collect()
else:
# NOTE: Note maintaining this single-threaded code at the moment, can refactor and bring it up to date later
while i < len(models):
# iterate through all models and record stats, on either training seeds or new ones (to test evaluation)
model = models[i]
id_0, id_1 = idxs[i]
if init_states is None:
init_states_archive = archive.init_states_archive
else:
init_states_archive = None
if init_states is None:
init_states = get_init_states(
init_states_archive, tuple(idxs[i])
)
gen_model = set_weights(self.gen_model, model)
level_json, batch_reward, final_bcs, (
time_penalty,
targets_penalty,
variance_penalty,
diversity_bonus,
) = simulate(
env=self.env,
model=gen_model,
n_tile_types=self.n_tile_types,
init_states=init_states,
bc_names=self.bc_names,
static_targets=self.static_targets,
seed=None,
player_1=self.player_1,
player_2=self.player_2,
)
if SAVE_LEVELS:
save_levels(level_json)
record_scores(
id_0,
id_1,
batch_reward,
targets_penalty,
diversity_bonus,
variance_penalty,
fitness_scores,
playability_scores,
diversity_scores,
reliability_scores,
)
if ALGO == "ME":
n_filled_bins = eval_archive.filled_bins
assert len(models) == archive.filled_bins
n_total_bins = archive.size
else:
n_filled_bins = len(eval_archive._occupied_indices)
assert len(models) == len(archive._occupied_indices)
n_total_bins = archive.bins
qd_score = get_qd_score(archive, self.env, self.bc_names)
eval_qd_score = get_qd_score(eval_archive, self.env, self.bc_names)
stats = {
"generations completed": self.n_itr,
"% train archive full": len(models) / n_total_bins,
"archive size": n_filled_bins,
"QD score": qd_score,
"eval QD score": eval_qd_score,
"% eval archives full": {},
"eval archive sizes": {},
"eval QD scores": {},
}
if not CMAES:
plot_args = {
'lower_bounds': lower_bounds,
'upper_bounds': upper_bounds,
'x_bounds': x_bounds,
'y_bounds': y_bounds,
}
plot_score_heatmap(playability_scores, "playability", self.bc_names, **plot_args,
bcs_in_filename=False)
plot_score_heatmap(diversity_scores / 10, "diversity", self.bc_names, **plot_args, bcs_in_filename=False)
plot_score_heatmap(reliability_scores, "reliability", self.bc_names, **plot_args, bcs_in_filename=False)
plot_score_heatmap(fitness_scores, "fitness_eval", self.bc_names, **plot_args, bcs_in_filename=False)
for j, eval_archive in enumerate(eval_archives):
bc_names = eval_bc_names[j]
if bc_names != ("NONE") and bc_names != tuple(self.bc_names):
plot_score_heatmap(
eval_playability_scores[j], "playability", bc_names, **plot_args,
)
plot_score_heatmap(
eval_diversity_scores[j] / 10, "diversity", bc_names, **plot_args,
)
plot_score_heatmap(
eval_reliability_scores[j], "reliability", bc_names, **plot_args,
)
plot_score_heatmap(
eval_fitness_scores[j], "fitness_eval", bc_names, **plot_args,
)
if bc_names == tuple(self.bc_names):
# in case a bug appears here, where performance differs from training to inference,
# include this redundant data to try and pinpoint it. Note that this is only redundant in
# stats_fixLvls, though, because otherwise, we are doing evaluation in the same BC space.
pct_archive_full = (
n_filled_bins / n_total_bins
)
if not RANDOM_INIT_LEVELS:
# then this will be the same as the
# if not len(eval_archive._occupied_indices) / eval_archive.bins == stats["% train archive full"]:
# continue
pass
else:
pass
stats["% elites maintained"] = (
pct_archive_full / stats["% train archive full"]
)
stats["% QD score maintained"] = stats["eval QD score"] / stats["QD score"]
stats["% fresh train archive full"] = pct_archive_full
stats["% fresh train archive full"] = pct_archive_full
n_occupied = n_filled_bins
# assert n_occupied == len(eval_archive._occupied_indices)
bcs_key = "-".join(bc_names)
stats["% eval archives full"].update(
{
bcs_key: n_occupied / n_total_bins,
})
stats["eval archive sizes"].update({
bcs_key: n_occupied,
})
stats["eval QD scores"].update({
bcs_key: get_qd_score(eval_archive, self.env, bc_names)
})
stats.update(
{
"playability": get_stats(playability_scores),
"diversity": get_stats(diversity_scores / 10),
"reliability": get_stats(reliability_scores),
}
)
f_name = "stats"
if not RANDOM_INIT_LEVELS:
f_name = f_name + "fixLvls"
f_name += ".json"
with open(os.path.join(SAVE_PATH, f_name), "w", encoding="utf-8") as f:
json.dump(stats, f, ensure_ascii=False, indent=4)
return
while i < len(models):
# model = self.archive.get_random_elite()[0]
# model = models[np.random.randint(len(models))]
model = models[i]
gen_model = set_weights(self.gen_model, model)
# RANDOM_INIT_LEVELS = not opts.fix_level_seeds
if RANDOM_INIT_LEVELS and args.n_init_states != 0:
init_states = gen_random_levels(N_INIT_STATES, self.env)
elif not args.fix_level_seeds and args.n_init_states != 0:
init_states_archive = archive.init_states_archive
init_states = get_init_states(init_states_archive, tuple(idxs[i]))
else:
init_states = self.init_states
_, _, _, (
time_penalty,
targets_penalty,
variance_penalty,
diversity_bonus,
) = simulate(
self.env,
gen_model,
self.n_tile_types,
init_states,
self.bc_names,
self.static_targets,
seed=None,
player_1=self.player_1,
player_2=self.player_2,
)
# input("Mean behavior characteristics:\n\t{}: {}\n\t{}: {}\nMean reward:\n\tTotal: {}\n\ttime: {}\n\ttargets: {}\n\tvariance: {}\n\tdiversity: {}\nPress any key for next generator...".format(
# self.bc_names[0], bcs_0[i], self.bc_names[1], bcs_1[i], objs[i], time_penalty, targets_penalty, variance_penalty, diversity_bonus))
i += 1
# if i == len(models):
# i=0
def gen_random_levels(n_init_states, env):
init_states = np.random.randint(
0, len(env._prob.get_tile_types()), (N_INIT_STATES, env._prob._height, env._prob._width)
)
return init_states
# init_states = np.zeros(shape=(n_init_states, env._prob._height, env._prob._width))
# init_state_maps = []
# for i in range(N_INIT_STATES):
# env._rep.reset(
# env._prob._width,
# env._prob._height,
# get_int_prob(env._prob._prob, env._prob.get_tile_types()),
# )
# # init_state_maps.append(np.expand_dims(get_one_hot_map(self.env._rep._map, self.n_tile_types), axis=0))
# init_state_maps.append(np.expand_dims(env._rep._map, axis=0))
# init_states[:] = np.vstack(init_state_maps)
# # init_states = np.zeros(
# # 0, self.n_tile_types, size=self.init_states.shape
# # )
# return init_states
if __name__ == "__main__":
"""
Set Parameters
"""
N_BINS = 100
CA_ACTION = True
args, arg_dict = get_args()
global INFER
global EVO_DIR
global CUDA
global RENDER
global PROBLEM
global SHOW_VIS
global VISUALIZE
global N_STEPS
global N_GENERATIONS
global N_INIT_STATES
global N_INFER_STEPS
global BCS
global RENDER_LEVELS
global THREADS
global PLAY_LEVEL
global CMAES
global EVALUATE
global SAVE_LEVELS
global RANDOM_INIT_LEVELS
global CASCADE_REWARD
global REPRESENTATION
global MODEL
global REEVALUATE_ELITES
global preprocess_action
global N_PROC
global ALGO
global seed
CONCAT_GIFS = False
if arg_dict["exp_name"] == '5':
seed = 420
else:
try:
seed = int(arg_dict["exp_name"])
except Exception:
print("Assigning random seed")
seed = np.random.randint()
print("Random number seed is: {}".format(seed))
N_PROC = arg_dict["n_cpu"]
MODEL = arg_dict["model"]
ALGO = arg_dict["algo"]
if ALGO == "ME":
# TODO: implement wrapper around other models generically
assert MODEL in ["CPPN", "GenCPPN", "CPPNCA"]
else:
assert ALGO == "CMAME"
REPRESENTATION = arg_dict["representation"]
CASCADE_REWARD = arg_dict["cascade_reward"]
REEVALUATE_ELITES = not arg_dict["fix_elites"] and arg_dict["n_init_states"] != 0
RANDOM_INIT_LEVELS = (
not arg_dict["fix_level_seeds"]
and arg_dict["n_init_states"] != 0
or REEVALUATE_ELITES
)
if REEVALUATE_ELITES:
# Otherwise there is no point in re-evaluating them
assert RANDOM_INIT_LEVELS
CMAES = arg_dict["behavior_characteristics"] == ["NONE"]
EVALUATE = arg_dict["evaluate"]
PLAY_LEVEL = arg_dict["play_level"]
BCS = arg_dict["behavior_characteristics"]
N_GENERATIONS = arg_dict["n_generations"]
N_INIT_STATES = arg_dict["n_init_states"]
N_STEPS = arg_dict["n_steps"]
SHOW_VIS = arg_dict["show_vis"]
PROBLEM = arg_dict["problem"]
CUDA = False
VISUALIZE = arg_dict["visualize"]
INFER = arg_dict["infer"] or EVALUATE
N_INFER_STEPS = N_STEPS
# N_INFER_STEPS = 100
RENDER_LEVELS = arg_dict["render_levels"]
THREADS = arg_dict["multi_thread"] # or EVALUATE
SAVE_INTERVAL = arg_dict["save_interval"]
VIS_INTERVAL = 50
if "CPPN" in MODEL:
if MODEL != "CPPNCA" and "Gen" not in MODEL:
assert N_INIT_STATES == 0 and not RANDOM_INIT_LEVELS and not REEVALUATE_ELITES
if MODEL != "CPPNCA":
assert N_STEPS == 1
SAVE_LEVELS = arg_dict["save_levels"] or EVALUATE
# exp_name = 'EvoPCGRL_{}-{}_{}_{}-batch_{}-step_{}'.format(PROBLEM, REPRESENTATION, BCS, N_INIT_STATES, N_STEPS, arg_dict['exp_name'])
# exp_name = "EvoPCGRL_{}-{}_{}_{}_{}-batch".format(
# PROBLEM, REPRESENTATION, MODEL, BCS, N_INIT_STATES
# )
exp_name = "EvoPCGRL_"
if ALGO == "ME":
exp_name += "ME_"
exp_name += "{}-{}_{}_{}_{}-batch_{}-pass".format(
PROBLEM, REPRESENTATION, MODEL, BCS, N_INIT_STATES, N_STEPS
)
if CASCADE_REWARD:
exp_name += "_cascRew"
if not RANDOM_INIT_LEVELS:
exp_name += "_fixLvls"
if not REEVALUATE_ELITES:
exp_name += "_fixElites"
if args.mega:
exp_name += "_MEGA"
exp_name += "_" + arg_dict["exp_name"]
SAVE_PATH = os.path.join("evo_runs", exp_name)
if MODEL not in preprocess_action_funcs:
if "CPPN" in MODEL:
preprocess_action = preprocess_action_funcs['CPPN'][REPRESENTATION]
else:
preprocess_action = preprocess_action_funcs['NCA'][REPRESENTATION]
else:
preprocess_action = preprocess_action_funcs[MODEL][REPRESENTATION]
if MODEL not in preprocess_observation_funcs:
preprocess_observation = preprocess_observation_funcs['NCA'][REPRESENTATION]
else:
preprocess_observation = preprocess_observation_funcs[MODEL][REPRESENTATION]
def init_tensorboard():
assert not INFER
# Create TensorBoard Log Directory if does not exist
LOG_NAME = "./runs/" + datetime.now().strftime("%Y%m%d-%H%M%S") + "-" + exp_name
writer = SummaryWriter(LOG_NAME)
return writer
if THREADS:
ray.init()
try:
try:
evolver = pickle.load(open(os.path.join(SAVE_PATH, "evolver.pkl"), "rb"))
except:
evolver = pickle.load(
open(os.path.join(SAVE_PATH, "last_evolver.pkl"), "rb")
)
print("Loaded save file at {}".format(SAVE_PATH))
if VISUALIZE:
evolver.visualize()
if INFER:
global RENDER
RENDER = True
N_STEPS = N_INFER_STEPS
# if not RANDOM_INIT_LEVELS:
# evaluate on initial level seeds that each generator has seen before
RANDOM_INIT_LEVELS = False
evolver.infer(concat_gifs=CONCAT_GIFS)
save_grid(csv_name="eval_levels_fixLvls")
# evaluate on random initial level seeds
RANDOM_INIT_LEVELS = True
evolver.infer(concat_gifs=CONCAT_GIFS)
save_grid(csv_name="eval_levels")
# save_grid(csv_name="levels")
if not (INFER or VISUALIZE):
writer = init_tensorboard()
# then we train
RENDER = arg_dict["render"]
evolver.init_env()
evolver.total_itrs = arg_dict["n_generations"]
evolver.evolve()
except FileNotFoundError as e:
if not INFER:
RENDER = arg_dict["render"]
print(
"Failed loading from an existing save-file. Evolving from scratch. The error was: {}".format(
e
)
)
writer = init_tensorboard()
evolver = EvoPCGRL()
evolver.evolve()
else:
print(
"Loading from an existing save-file failed. Cannot run inference. The error was: {}".format(
e
)
)
```
#### File: envs/probs/loderunner_prob.py
```python
from PIL import Image
import os
import numpy as np
from gym_pcgrl.envs.probs.problem import Problem
from gym_pcgrl.envs.helper import get_range_reward, get_tile_locations, calc_certain_tile, get_floor_dist, get_type_grouping, get_changes
from gym_pcgrl.envs.probs.loderunner.engine import get_score
class LRProblem(Problem):
def __init__(self):
super().__init__()
self._width = 12
self._height = 8
self._prob = {"solid": 0.03, "brick": 0.23, "ladder": 0.10, "rope": 0.032, "empty": 0.56, "gold":0.02, "enemy":0.05, "player":0.01}
self._border_size = (0,0)
self._min_enemies = 1
self._max_enemies = 3
self._min_golds = 1
self._max_golds = 10
self._rewards = {
"player": 3,
"enemies": 1,
"golds": 1,
"win": 5,
"path-length": 2
}
def get_tile_types(self):
return ["solid", "brick", "ladder", "rope", "empty", "gold", "enemy", "player"]
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._min_enemies = kwargs.get('min_enemies', self._min_enemies)
self._max_enemies = kwargs.get('max_enemies', self._max_enemies)
self._min_golds = kwargs.get('min_golds', self._min_golds)
self._max_golds = kwargs.get('max_golds', self._max_golds)
rewards = kwargs.get('rewards')
if rewards is not None:
for t in rewards:
if t in self._rewards:
self._rewards[t] = rewards[t]
def _run_game(self, map):
gameCharacters="Bb#-.GEM"
string_to_char = dict((s, gameCharacters[i]) for i, s in enumerate(self.get_tile_types()))
lvl= []
for i in range(len(map)):
line = []
for j in range(len(map[i])):
string = map[i][j]
line.append(string_to_char[string])
lvl.append(line)
score, dist = get_score(lvl)
return score, dist
def get_stats(self, map):
map_locations = get_tile_locations(map, self.get_tile_types())
map_stats = {
"player": calc_certain_tile(map_locations, ["player"]),
"enemies": calc_certain_tile(map_locations, ["enemy"]),
"golds": calc_certain_tile(map_locations, ["gold"]),
"win": 0,
"path-length": 0
}
if map_stats["player"] == 1 and map_stats["golds"] > 0:
map_stats["win"], map_stats["length"] = self._run_game(map)
return map_stats
def get_reward(self, new_stats, old_stats):
#longer path is rewarded and less number of regions is rewarded
rewards = {
"player": get_range_reward(new_stats["player"], old_stats["player"], 1, 1),
"enemies": get_range_reward(new_stats["enemies"], old_stats["enemies"], self._min_enemies, self._max_enemies),
"golds": get_range_reward(new_stats["golds"], old_stats["golds"], self._min_golds, self._max_golds),
"win": get_range_reward(new_stats["win"], old_stats["win"], 0, 1),
"path-length": get_range_reward(new_stats["path-length"], old_stats["path-length"], np.inf, np.inf),
}
#calculate the total reward
return rewards["player"] * self._rewards["player"] +\
rewards["enemies"] * self._rewards["enemies"] +\
rewards["golds"] * self._rewards["golds"] +\
rewards["win"] * self._rewards["win"] +\
rewards["path-length"] * self._rewards["path-length"]
def get_episode_over(self, new_stats, old_stats):
return new_stats["win"] == 1 and new_stats["path-length"] >= 20
def get_debug_info(self, new_stats, old_stats):
return {
"player": new_stats["player"],
"enemies": new_stats["enemies"],
"golds": new_stats["golds"],
"win": new_stats["win"],
"path-length": new_stats["path-length"]
}
def render(self, map):
#new_map = self._get_runnable_lvl(map)
if self._graphics == None:
self._graphics = {
"solid": Image.open(os.path.dirname(__file__) + "/loderunner/solid.png").convert('RGBA'),
"brick": Image.open(os.path.dirname(__file__) + "/loderunner/brick.png").convert('RGBA'),
"ladder": Image.open(os.path.dirname(__file__) + "/loderunner/ladder.png").convert('RGBA'),
"rope": Image.open(os.path.dirname(__file__) + "/loderunner/rope.png").convert('RGBA'),
"enemy": Image.open(os.path.dirname(__file__) + "/loderunner/enemy.png").convert('RGBA'),
"gold": Image.open(os.path.dirname(__file__) + "/loderunner/gold.png").convert('RGBA'),
"empty": Image.open(os.path.dirname(__file__) + "/loderunner/empty.png").convert('RGBA'),
"player": Image.open(os.path.dirname(__file__) + "/loderunner/player.png").convert('RGBA')
}
#self._border_size = (0, 0)
img = super().render(map)
#self._border_size = (3, 0)
return img
```
#### File: envs/reps/narrow_rep.py
```python
from gym_pcgrl.envs.reps.representation import Representation
from PIL import Image
from gym import spaces
import numpy as np
from collections import OrderedDict
"""
The narrow representation where the agent is trying to modify the tile value of a certain
selected position that is selected randomly or sequentially similar to cellular automata
"""
class NarrowRepresentation(Representation):
"""
Initialize all the parameters used by that representation
"""
def __init__(self):
super().__init__()
# self._random_tile = True
self._random_tile = False
"""
Resets the current representation where it resets the parent and the current
modified location
Parameters:
width (int): the generated map width
height (int): the generated map height
prob (dict(int,float)): the probability distribution of each tile value
"""
def reset(self, width, height, prob):
super().reset(width, height, prob)
self._x = self._random.randint(width)
self._y = self._random.randint(height)
# self._x = 0
# self._y = 0
"""
Gets the action space used by the narrow representation
Parameters:
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
Discrete: the action space used by that narrow representation which
correspond to which value for each tile type
"""
def get_action_space(self, width, height, num_tiles):
return spaces.Discrete(num_tiles + 1)
"""
Get the observation space used by the narrow representation
Parameters:
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
Dict: the observation space used by that representation. "pos" Integer
x,y position for the current location. "map" 2D array of tile numbers
"""
def get_observation_space(self, width, height, num_tiles):
return spaces.Dict({
"pos": spaces.Box(low=np.array([0, 0]), high=np.array([width-1, height-1]), dtype=np.uint8),
"map": spaces.Box(low=0, high=num_tiles-1, dtype=np.uint8, shape=(height, width))
})
"""
Get the current representation observation object at the current moment
Returns:
observation: the current observation at the current moment. "pos" Integer
x,y position for the current location. "map" 2D array of tile numbers
"""
def get_observation(self):
return OrderedDict({
"pos": np.array([self._x, self._y], dtype=np.uint8),
"map": self._map.copy()
})
"""
Adjust the current used parameters
Parameters:
random_start (boolean): if the system will restart with a new map (true) or the previous map (false)
random_tile (boolean): if the system will move between tiles random (true) or sequentially (false)
"""
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._random_tile = kwargs.get('random_tile', self._random_tile)
"""
Update the narrow representation with the input action
Parameters:
action: an action that is used to advance the environment (same as action space)
Returns:
boolean: True if the action change the map, False if nothing changed
"""
def update(self, action):
change = 0
if action > 0:
change += [0,1][self._map[self._y][self._x] != action-1]
self._map[self._y][self._x] = action-1
if self._random_tile:
self._x = self._random.randint(self._map.shape[1])
self._y = self._random.randint(self._map.shape[0])
else:
self._x += 1
if self._x >= self._map.shape[1]:
self._x = 0
self._y += 1
if self._y >= self._map.shape[0]:
self._y = 0
return change, self._x, self._y
"""
Modify the level image with a red rectangle around the tile that is
going to be modified
Parameters:
lvl_image (img): the current level_image without modifications
tile_size (int): the size of tiles in pixels used in the lvl_image
border_size ((int,int)): an offeset in tiles if the borders are not part of the level
Returns:
img: the modified level image
"""
def render(self, lvl_image, tile_size, border_size):
x_graphics = Image.new("RGBA", (tile_size,tile_size), (0,0,0,0))
for x in range(tile_size):
x_graphics.putpixel((0,x),(255,0,0,255))
x_graphics.putpixel((1,x),(255,0,0,255))
x_graphics.putpixel((tile_size-2,x),(255,0,0,255))
x_graphics.putpixel((tile_size-1,x),(255,0,0,255))
for y in range(tile_size):
x_graphics.putpixel((y,0),(255,0,0,255))
x_graphics.putpixel((y,1),(255,0,0,255))
x_graphics.putpixel((y,tile_size-2),(255,0,0,255))
x_graphics.putpixel((y,tile_size-1),(255,0,0,255))
lvl_image.paste(x_graphics, ((self._x+border_size[0])*tile_size, (self._y+border_size[1])*tile_size,
(self._x+border_size[0]+1)*tile_size,(self._y+border_size[1]+1)*tile_size), x_graphics)
return lvl_image
``` |
{
"source": "JiangZehua/gym-pcgrl",
"score": 3
} |
#### File: gym_pcgrl/envs/helper_3D.py
```python
import numpy as np
"""
Public function to get a dictionary of all location of all tiles
Parameters:
map (any[][][]): the current map
[[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]
tile_values (any[]): an array of all the tile values that are possible
Returns:
Dict(string,(int,int)[]): positions for every certain tile_value
"""
def get_tile_locations(map, tile_values):
tiles = {}
for t in tile_values:
tiles[t] = []
for z in range(len(map)):
for y in range(len(map[z])):
for x in range(len(map[z][y])):
tiles[map[z][y][x]].append((x,y,z))
return tiles
"""
Get the vertical distance to certain type of tiles
Parameters:
map (any[][][]): the actual map
x (int): the x position of the start location
y (int): the y position of the start location
z (int): the z position of the start location
types (any[]): an array of types of tiles
Returns:
int: the distance to certain types underneath a certain location
"""
def _calc_dist_floor(map, x, y, z, types):
for dz in range(len(map)):
if z+dz >= len(map):
break
if map[z+dz][y][x] in types:
return dz-1
return len(map) - 1
"""
Public function to calculate the distance of a certain tiles to the floor tiles
Parameters:
map (any[][][]): the current map
from (any[]): an array of all the tile values that the method is calculating the distance to the floor
floor (any[]): an array of all the tile values that are considered floor
Returns:
int: a value of how far each tile from the floor where 0 means on top of floor and positive otherwise
"""
def get_floor_dist(map, fromTypes, floorTypes):
result = 0
for z in range(len(map)):
for y in range(len(map[z])):
for x in range(len(map[z][y])):
if map[z][y][x] in fromTypes:
result += _calc_dist_floor(map, x, y, z, floorTypes)
return result
"""
Get number of tiles that have certain value arround certain position
Parameters:
map (any[][][]): the current map
x (int): the x position of the start location
y (int): the y position of the start location
z (int): the z position of the start location
types (any[]): an array of types of tiles
relLocs ((int,int,int)[]): a tuple array of all the relative positions
Returns:
int: the number of similar tiles around a certain location
"""
def _calc_group_value(map, x, y, z, types, relLocs):
result = 0
for l in relLocs:
nx, ny, nz= x+l[0], y+l[1], z+l[2]
if nx < 0 or ny < 0 or nz < 0 or nx >= len(map[0]) or ny >= len(map) or nz>=len(map):
continue
if map[nz][ny][nx] in types:
result += 1
return result
"""
Get the number of tiles that is a group of certain size
Parameters:
map (any[][][]): the current map
types (any[]): an array of types of tiles
relLocs ((int,int,int)[]): a tuple array of all the relative positions
min (int): min number of tiles around
max (int): max number of tiles around
Returns:
int: the number of tiles that have surrounding between min and max
"""
def get_type_grouping(map, types, relLocs, min, max):
result = 0
for z in range(len(map)):
for y in range(len(map[z])):
for x in range(len(map[z][y])):
if map[z][y][x] in types:
value = _calc_group_value(map, x, y, z, types, relLocs)
if value >= min and value <= max:
result += 1
return result
"""
Get the number of changes of tiles in either vertical or horizontal direction
Parameters:
map (any[][][]): the current map
vertical (boolean): calculate the vertical changes instead of horizontal
Returns:
int: number of different tiles either in vertical or horizontal x-direction or horizontal y-direction
"""
def get_changes(map, vertical=False, y_dir=False):
start_z = 0
start_y = 0
start_x = 0
if vertical:
start_z = 1
elif y_dir:
start_y = 1
else:
start_x = 1
value = 0
for z in range(start_z, len(map)):
for y in range(start_y, len(map[z])):
for x in range(start_x, len(map[z][y])):
same = False
if vertical:
same = map[z][y][x] == map[z-1][y][x]
elif y_dir:
same = map[z][y][x] == map[z][y-1][x]
else:
same = map[z][y][x] == map[z][y][x-1]
if not same:
value += 1
return value
"""
Private function to get a list of all tile locations on the map that have any of
the tile_values
Parameters:
map_locations (Dict(string,(int,int,int)[])): the histogram of locations of the current map
tile_values (any[]): an array of all the tile values that the method is searching for
Returns:
(int,int,int)[]: a list of (x,y,z) position on the map that have a certain value
"""
def _get_certain_tiles(map_locations, tile_values):
tiles=[]
for v in tile_values:
tiles.extend(map_locations[v])
return tiles
'''
Private function that see whether the current position is standable: The position is passable only when height >= 2
(character is 2 blocks tall)
Parameters:
x (int): The current x position
y (int): The current y position
z (int): The current z position
map (any[][][]): the current tile map to check
passable_values (any[]): an array of all the passable tile values
Return:
boolen: True if the aisle is passable
'''
def _standable(map, x, y, z, passable_values):
nx, ny, nz = x, y, z+1
if nz < 0 or nz >= len(map):
return False
elif (map[nz][ny][nx] in passable_values
and map[z][y][x] in passable_values):
return True
else:
return False
'''
Private function that see whether the aisle is passable: The aisle is passable only when the agent can move to a
adjacent position.
(The adjacent position won't block the character's head)
Parameters:
x (int): The current x position
y (int): The current y position
z (int): The current z position
map (any[][][]): the current tile map to check
passable_values (any[]): an array of all the passable tile values
Return:
boolen: True if the aisle is passable
'''
def _passable(map, x, y, z, passable_values):
passable_tiles = []
# Check 4 adjcent directions: forward, back, left, right. For each, it is passable if we can move to it while
# moving up/down-stairs or staying level.
for dir in [(1,0), (0,1), (-1,0), (0,-1)]:
nx, ny, nz= x+dir[0], y+dir[1], z
# Check if out of bounds, if so, skip it
if (nx < 0 or ny < 0 or nx >= len(map[z][y]) or ny >= len(map[z])):
continue
# Check whether can go down stairs
if (nz-1 >= 0 and nz+1 < len(map) and (map[nz-1][ny][nx] in passable_values
and map[nz][ny][nx] in passable_values
and map[nz+1][ny][nx] in passable_values)):
passable_tiles.append((nx, ny, nz-1))
# Check whether can stay at the same level
elif (nz+1 < len(map) and (map[nz][ny][nx] in passable_values
and map[nz+1][ny][nx] in passable_values)):
passable_tiles.append((nx, ny, nz))
# Check whether can go up stairs
elif nz+2 < len(map) and (map[nz+2][y][x] in passable_values
and map[nz+1][ny][nx] in passable_values
and map[nz+2][ny][nx] in passable_values):
passable_tiles.append((nx, ny, nz+1))
else:
continue
return passable_tiles
"""
Private function that runs flood fill algorithm on the current color map
Parameters:
x (int): the starting x position of the flood fill algorithm
y (int): the starting y position of the flood fill algorithm
z (int): the starting z position of the flood fill algorithm
color_map (int[][][]): the color map that is being colored
map (any[][][]): the current tile map to check
color_index (int): the color used to color in the color map
passable_values (any[]): the current values that can be colored over
Returns:
int: the number of tiles that has been colored
"""
def _flood_fill(x, y, z, color_map, map, color_index, passable_values):
num_tiles = 0
queue = [(x, y, z)]
while len(queue) > 0:
(cx, cy, cz) = queue.pop(0)
if color_map[cz][cy][cx] != -1: # or (not _passable(map, cx, cy, cz, passable_values) and not _standable(map, cx, cy, cz, passable_values)):
continue
num_tiles += 1
color_map[cz][cy][cx] = color_index
for (dx,dy,dz) in [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]:
nx,ny,nz = cx+dx, cy+dy, cz+dz
if nx < 0 or ny < 0 or nz < 0 or nx >= len(map[0][0]) or ny >= len(map[0]) or nz >= len(map):
continue
if map[nz][ny][nx] in passable_values:
continue
queue.append((nx, ny, nz))
return num_tiles
"""
Calculates the number of regions in the current map with passable_values
Parameters:
map (any[][][]): the current map being tested
map_locations(Dict(string,(int,int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all the passable tile values
Returns:
int: number of regions in the map
"""
def calc_num_regions(map, map_locations, passable_values):
empty_tiles = _get_certain_tiles(map_locations, passable_values)
region_index=0
color_map = np.full((len(map), len(map[0]), len(map[0][0])), -1)
for (x,y,z) in empty_tiles:
num_tiles = _flood_fill(x, y, z, color_map, map, region_index + 1, passable_values)
if num_tiles > 0:
region_index += 1
else:
continue
return region_index
"""
Public function that runs dikjstra algorithm and return the map
Parameters:
x (int): the starting x position for dikjstra algorithm
y (int): the starting y position for dikjstra algorithm
z (int): the starting z position for dikjstra algorithm
map (any[][][]): the current map being tested
passable_values (any[]): an array of all the passable tile values
Returns:
int[][][]: returns the dikjstra map after running the dijkstra algorithm
"""
def run_dijkstra(x, y, z, map, passable_values):
dijkstra_map = np.full((len(map), len(map[0]), len(map[0][0])), -1)
visited_map = np.zeros((len(map), len(map[0]), len(map[0][0])))
queue = [(x, y, z, 0)]
while len(queue) > 0:
# Looking at a new tile
(cx,cy,cz,cd) = queue.pop(0)
# Skip tile if we've already visited it
if dijkstra_map[cz][cy][cx] >= 0 and dijkstra_map[cz][cy][cx] <= cd:
continue
# Count the tile as visited and record its distance
visited_map[cz][cy][cx] = 1
dijkstra_map[cz][cy][cx] = cd
# Call passable, which will return, (x, y, z) coordinates of tiles to which the player can travel from here
# for (dx,dy,dz) in [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]:
for (nx, ny, nz) in _passable(map, cx, cy, cz, passable_values):
# # Check that the new tiles are in the bounds of the level
# nx,ny,nz=cx+dx,cy+dy,cz+dz
# if nx < 0 or ny < 0 or nz <0 or nx >= len(map[0][0]) or ny >= len(map[0]) or nz >=len(map):
# # If out of bounds, do not add the new tile to the frontier
# continue
# Add the new tile to the frontier
queue.append((nx, ny, nz, cd + 1))
return dijkstra_map, visited_map
"""
Calculate the longest path on the map
Parameters:
map (any[][][]): the current map being tested
map_locations (Dict(string,(int,int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all passable tiles in the map
Returns:
int: the longest path value in tiles in the current map
"""
def calc_longest_path(map, map_locations, passable_values):
empty_tiles = _get_certain_tiles(map_locations, passable_values)
final_visited_map = np.zeros((len(map), len(map[0]), len(map[0][0])))
final_value = 0
for (x,y,z) in empty_tiles:
if final_visited_map[z][y][x] > 0:
continue
dikjstra_map, visited_map = run_dijkstra(x, y, z, map, passable_values)
final_visited_map += visited_map
(mz,my,mx) = np.unravel_index(np.argmax(dikjstra_map, axis=None), dikjstra_map.shape)
dikjstra_map, _ = run_dijkstra(mx, my, mz, map, passable_values)
max_value = np.max(dikjstra_map)
if max_value > final_value:
final_value = max_value
return final_value
"""
Recover a shortest path (as list of coords) from a dikjstra map,
using either some initial coords, or else from the furthest point
Parameters:
map (any[][][]): the current map being tested
start (tuple(int, int, int)): the coordinate of the entrance (starting point)
end (tuple(int, int, int)): the coordinate of the exit (destination)
map_locations (Dict(string,(int,int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all passable tiles in the map
Returns:
list: the longest path's coordinates
"""
# TODO change to 3D
def get_path_coords(path_map, init_coords=None):
'''Recover a shortest path (as list of coords) from a dikjstra map, using either some initial coords, or else from the furthest point.'''
width, height = len(path_map), len(path_map[0])
pad_path_map = np.zeros(shape=(width + 2, height + 2), dtype=np.int32)
pad_path_map.fill(0)
pad_path_map[1:width + 1, 1:height + 1] = path_map + 1
if not init_coords:
# Work from the greatest cell value (end of the path) backward
max_cell = pad_path_map.max()
curr = np.array(np.where(pad_path_map == max_cell))
else:
curr = np.array([init_coords], dtype=np.int32).T + 1
max_cell = pad_path_map[curr[0][0], curr[1][0]]
xi, yi = curr[:, 0]
path = np.zeros(shape=(max_cell, 2), dtype=np.int32)
i = 0
while max_cell > 1:
path[i, :] = [xi - 1, yi - 1]
pad_path_map[xi, yi] = -1
max_cell -= 1
x0, x1, y0, y1 = xi - 1, xi + 2, yi - 1, yi + 2
adj_mask = np.zeros((width + 2, height + 2), dtype=np.int32)
#adj_mask[x0: x1, y0: y1] = ADJ_FILTER
curr = np.array(np.where(adj_mask * pad_path_map == max_cell))
xi, yi = curr[:, 0]
i += 1
if i > 0:
path[i, :] = [xi - 1, yi - 1]
return path
"""
Calculate the number of tiles that have certain values in the map
Returns:
int: get number of tiles in the map that have certain tile values
"""
def calc_certain_tile(map_locations, tile_values):
return len(_get_certain_tiles(map_locations, tile_values))
"""
Calculate the number of reachable tiles of a certain values from a certain starting value
The starting value has to be one on the map
Parameters:
map (any[][][]): the current map
start_value (any): the start tile value it has to be only one on the map
passable_values (any[]): the tile values that can be passed in the map
reachable_values (any[]): the tile values that the algorithm trying to reach
Returns:
int: number of tiles that has been reached of the reachable_values
"""
def calc_num_reachable_tile(map, map_locations, start_value, passable_values, reachable_values):
(sx,sy,sz) = _get_certain_tiles(map_locations, [start_value])[0]
dikjstra_map, _ = run_dijkstra(sx, sy, sz, map, passable_values)
tiles = _get_certain_tiles(map_locations, reachable_values)
total = 0
for (tx,ty,tz) in tiles:
if dikjstra_map[tz][ty][tx] >= 0:
total += 1
return total
"""
Generate random map based on the input Parameters
Parameters:
random (numpy.random): random object to help generate the map
width (int): the generated map width
height (int): the generated map height
prob (dict(int,float)): the probability distribution of each tile value
Returns:
int[][][]: the random generated map
"""
def gen_random_map(random, length, width, height, prob):
map = random.choice(list(prob.keys()), size=(
height, width, length), p=list(prob.values())).astype(np.uint8)
return map
"""
A method to convert the map to use the tile names instead of tile numbers
Parameters:
map (numpy.int[][][]): a numpy 2D array of the current map
tiles (string[]): a list of all the tiles in order
Returns:
string[][][]: a 2D map of tile strings instead of numbers
"""
def get_string_map(map, tiles):
int_to_string = dict((i,s) for i, s in enumerate(tiles))
result = []
for z in range(map.shape[0]):
result.append([])
for y in range(map.shape[1]):
result[z].append([])
for x in range(map.shape[2]):
result[z][y].append(int_to_string[int(map[z][y][x])])
return result
"""
A method to convert the probability dictionary to use tile numbers instead of tile names
Parameters:
prob (dict(string,float)): a dictionary of the probabilities for each tile name
tiles (string[]): a list of all the tiles in order
Returns:
Dict(int,float): a dictionary of tile numbers to probability values (sum to 1)
"""
def get_int_prob(prob, tiles):
string_to_int = dict((s, i) for i, s in enumerate(tiles))
result = {}
total = 0.0
for t in tiles:
result[string_to_int[t]] = prob[t]
total += prob[t]
for i in result:
result[i] /= total
return result
"""
A method to help calculate the reward value based on the change around optimal region
Parameters:
new_value (float): the new value to be checked
old_value (float): the old value to be checked
low (float): low bound for the optimal region
high (float): high bound for the optimal region
Returns:
float: the reward value for the change between new_value and old_value
"""
def get_range_reward(new_value, old_value, low, high):
if new_value >= low and new_value <= high and old_value >= low and old_value <= high:
return 0
if old_value <= high and new_value <= high:
return min(new_value,low) - min(old_value,low)
if old_value >= low and new_value >= low:
return max(old_value,high) - max(new_value,high)
if new_value > high and old_value < low:
return high - new_value + old_value - low
if new_value < low and old_value > high:
return high - old_value + new_value - low
```
#### File: gym_pcgrl/envs/helper.py
```python
import numpy as np
"""
Public function to get a dictionary of all location of all tiles
Parameters:
map (any[][]): the current map
tile_values (any[]): an array of all the tile values that are possible
Returns:
Dict(string,(int,int)[]): positions for every certain tile_value
"""
def get_tile_locations(map, tile_values):
tiles = {}
for t in tile_values:
tiles[t] = []
for y in range(len(map)):
for x in range(len(map[y])):
tiles[map[y][x]].append((x,y))
return tiles
"""
Get the vertical distance to certain type of tiles
Parameters:
map (any[][]): the actual map
x (int): the x position of the start location
y (int): the y position of the start location
types (any[]): an array of types of tiles
Returns:
int: the distance to certain types underneath a certain location
"""
def _calc_dist_floor(map, x, y, types):
for dy in range(len(map)):
if y+dy >= len(map):
break
if map[y+dy][x] in types:
return dy-1
return len(map) - 1
"""
Public function to calculate the distance of a certain tiles to the floor tiles
Parameters:
map (any[][]): the current map
from (any[]): an array of all the tile values that the method is calculating the distance to the floor
floor (any[]): an array of all the tile values that are considered floor
Returns:
int: a value of how far each tile from the floor where 0 means on top of floor and positive otherwise
"""
def get_floor_dist(map, fromTypes, floorTypes):
result = 0
for y in range(len(map)):
for x in range(len(map[y])):
if map[y][x] in fromTypes:
result += _calc_dist_floor(map, x, y, floorTypes)
return result
"""
Get number of tiles that have certain value arround certain position
Parameters:
map (any[][]): the current map
x (int): the x position of the start location
y (int): the y position of the start location
types (any[]): an array of types of tiles
relLocs ((int,int)[]): a tuple array of all the relative positions
Returns:
int: the number of similar tiles around a certain location
"""
def _calc_group_value(map, x, y, types, relLocs):
result = 0
for l in relLocs:
nx, ny = x+l[0], y+l[1]
if nx < 0 or ny < 0 or nx >= len(map[0]) or ny >= len(map):
continue
if map[ny][nx] in types:
result += 1
return result
"""
Get the number of tiles that is a group of certain size
Parameters:
map (any[][]): the current map
types (any[]): an array of types of tiles
relLocs ((int,int)[]): a tuple array of all the relative positions
min (int): min number of tiles around
max (int): max number of tiles around
Returns:
int: the number of tiles that have surrounding between min and max
"""
def get_type_grouping(map, types, relLocs, min, max):
result = 0
for y in range(len(map)):
for x in range(len(map[y])):
if map[y][x] in types:
value = _calc_group_value(map, x, y, types, relLocs)
if value >= min and value <= max:
result += 1
return result
"""
Get the number of changes of tiles in either vertical or horizontal direction
Parameters:
map (any[][]): the current map
vertical (boolean): calculate the vertical changes instead of horizontal
Returns:
int: number of different tiles either in vertical or horizontal direction
"""
def get_changes(map, vertical=False):
start_y = 0
start_x = 0
if vertical:
start_y = 1
else:
start_x = 1
value = 0
for y in range(start_y, len(map)):
for x in range(start_x, len(map[y])):
same = False
if vertical:
same = map[y][x] == map[y-1][x]
else:
same = map[y][x] == map[y][x-1]
if not same:
value += 1
return value
"""
Private function to get a list of all tile locations on the map that have any of
the tile_values
Parameters:
map_locations (Dict(string,(int,int)[])): the histogram of locations of the current map
tile_values (any[]): an array of all the tile values that the method is searching for
Returns:
(int,int)[]: a list of (x,y) position on the map that have a certain value
"""
def _get_certain_tiles(map_locations, tile_values):
tiles=[]
for v in tile_values:
tiles.extend(map_locations[v])
return tiles
"""
Private function that runs flood fill algorithm on the current color map
Parameters:
x (int): the starting x position of the flood fill algorithm
y (int): the starting y position of the flood fill algorithm
color_map (int[][]): the color map that is being colored
map (any[][]): the current tile map to check
color_index (int): the color used to color in the color map
passable_values (any[]): the current values that can be colored over
Returns:
int: the number of tiles that has been colored
"""
def _flood_fill(x, y, color_map, map, color_index, passable_values):
num_tiles = 0
queue = [(x, y)]
while len(queue) > 0:
(cx, cy) = queue.pop(0)
if color_map[cy][cx] != -1 or map[cy][cx] not in passable_values:
continue
num_tiles += 1
color_map[cy][cx] = color_index
for (dx,dy) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
nx,ny=cx+dx,cy+dy
if nx < 0 or ny < 0 or nx >= len(map[0]) or ny >= len(map):
continue
queue.append((nx, ny))
return num_tiles
"""
Calculates the number of regions in the current map with passable_values
Parameters:
map (any[][]): the current map being tested
map_locations(Dict(string,(int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all the passable tile values
Returns:
int: number of regions in the map
"""
def calc_num_regions(map, map_locations, passable_values):
empty_tiles = _get_certain_tiles(map_locations, passable_values)
region_index=0
color_map = np.full((len(map), len(map[0])), -1)
for (x,y) in empty_tiles:
num_tiles = _flood_fill(x, y, color_map, map, region_index + 1, passable_values)
if num_tiles > 0:
region_index += 1
else:
continue
return region_index
"""
Public function that runs dikjstra algorithm and return the map
Parameters:
x (int): the starting x position for dikjstra algorithm
y (int): the starting y position for dikjstra algorithm
map (any[][]): the current map being tested
passable_values (any[]): an array of all the passable tile values
Returns:
int[][]: returns the dikjstra map after running the dijkstra algorithm
"""
def run_dikjstra(x, y, map, passable_values):
dikjstra_map = np.full((len(map), len(map[0])),-1)
visited_map = np.zeros((len(map), len(map[0])))
queue = [(x, y, 0)]
while len(queue) > 0:
(cx,cy,cd) = queue.pop(0)
if map[cy][cx] not in passable_values or (dikjstra_map[cy][cx] >= 0 and dikjstra_map[cy][cx] <= cd):
continue
visited_map[cy][cx] = 1
dikjstra_map[cy][cx] = cd
for (dx,dy) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
nx,ny=cx+dx,cy+dy
if nx < 0 or ny < 0 or nx >= len(map[0]) or ny >= len(map):
continue
queue.append((nx, ny, cd + 1))
return dikjstra_map, visited_map
"""
Calculate the longest path on the map
Parameters:
map (any[][]): the current map being tested
map_locations (Dict(string,(int,int)[])): the histogram of locations of the current map
passable_values (any[]): an array of all passable tiles in the map
Returns:
int: the longest path in tiles in the current map
"""
def calc_longest_path(map, map_locations, passable_values):
empty_tiles = _get_certain_tiles(map_locations, passable_values)
final_visited_map = np.zeros((len(map), len(map[0])))
final_value = 0
for (x,y) in empty_tiles:
if final_visited_map[y][x] > 0:
continue
dikjstra_map, visited_map = run_dikjstra(x, y, map, passable_values)
final_visited_map += visited_map
(my,mx) = np.unravel_index(np.argmax(dikjstra_map, axis=None), dikjstra_map.shape)
dikjstra_map, _ = run_dikjstra(mx, my, map, passable_values)
max_value = np.max(dikjstra_map)
if max_value > final_value:
final_value = max_value
return final_value
"""
Calculate the number of tiles that have certain values in the map
Returns:
int: get number of tiles in the map that have certain tile values
"""
def calc_certain_tile(map_locations, tile_values):
return len(_get_certain_tiles(map_locations, tile_values))
"""
Calculate the number of reachable tiles of a certain values from a certain starting value
The starting value has to be one on the map
Parameters:
map (any[][]): the current map
start_value (any): the start tile value it has to be only one on the map
passable_values (any[]): the tile values that can be passed in the map
reachable_values (any[]): the tile values that the algorithm trying to reach
Returns:
int: number of tiles that has been reached of the reachable_values
"""
def calc_num_reachable_tile(map, map_locations, start_value, passable_values, reachable_values):
(sx,sy) = _get_certain_tiles(map_locations, [start_value])[0]
dikjstra_map, _ = run_dikjstra(sx, sy, map, passable_values)
tiles = _get_certain_tiles(map_locations, reachable_values)
total = 0
for (tx,ty) in tiles:
if dikjstra_map[ty][tx] >= 0:
total += 1
return total
"""
Generate random map based on the input Parameters
Parameters:
random (numpy.random): random object to help generate the map
width (int): the generated map width
height (int): the generated map height
prob (dict(int,float)): the probability distribution of each tile value
Returns:
int[][]: the random generated map
"""
def gen_random_map(random, width, height, prob):
map = random.choice(list(prob.keys()),size=(height,width),p=list(prob.values())).astype(np.uint8)
return map
"""
A method to convert the map to use the tile names instead of tile numbers
Parameters:
map (numpy.int[][]): a numpy 2D array of the current map
tiles (string[]): a list of all the tiles in order
Returns:
string[][]: a 2D map of tile strings instead of numbers
"""
def get_string_map(map, tiles):
int_to_string = dict((i, s) for i, s in enumerate(tiles))
result = []
for y in range(map.shape[0]):
result.append([])
for x in range(map.shape[1]):
result[y].append(int_to_string[int(map[y][x])])
return result
"""
A method to convert the probability dictionary to use tile numbers instead of tile names
Parameters:
prob (dict(string,float)): a dictionary of the probabilities for each tile name
tiles (string[]): a list of all the tiles in order
Returns:
Dict(int,float): a dictionary of tile numbers to probability values (sum to 1)
"""
def get_int_prob(prob, tiles):
string_to_int = dict((s, i) for i, s in enumerate(tiles))
result = {}
total = 0.0
for t in tiles:
result[string_to_int[t]] = prob[t]
total += prob[t]
for i in result:
result[i] /= total
return result
"""
A method to help calculate the reward value based on the change around optimal region
Parameters:
new_value (float): the new value to be checked
old_value (float): the old value to be checked
low (float): low bound for the optimal region
high (float): high bound for the optimal region
Returns:
float: the reward value for the change between new_value and old_value
"""
def get_range_reward(new_value, old_value, low, high):
if new_value >= low and new_value <= high and old_value >= low and old_value <= high:
return 0
if old_value <= high and new_value <= high:
return min(new_value,low) - min(old_value,low)
if old_value >= low and new_value >= low:
return max(old_value,high) - max(new_value,high)
if new_value > high and old_value < low:
return high - new_value + old_value - low
if new_value < low and old_value > high:
return high - old_value + new_value - low
```
#### File: gym_pcgrl/envs/pcgrl_env_3D.py
```python
from pdb import set_trace as TT
import imp
from gym_pcgrl.envs.probs import PROBLEMS
from gym_pcgrl.envs.reps import REPRESENTATIONS
from gym_pcgrl.envs.helper_3D import get_int_prob, get_string_map
from gym_pcgrl.envs.pcgrl_env import PcgrlEnv
import numpy as np
import gym
from gym import spaces
import PIL
"""
The 3D PCGRL GYM Environment
"""
class PcgrlEnv3D(PcgrlEnv):
def __init__(self, prob="minecraft_3D_maze", rep="narrow3D"):
self._prob = PROBLEMS[prob]()
self._rep = REPRESENTATIONS[rep]()
self._rep_stats = None
self._iteration = 0
self._changes = 0
# NOTE: allow overfitting: can take as many steps as there are tiles in the maps, can change every tile on the map
self._max_changes = np.inf
# self._max_changes = max(
# int(0.2 * self._prob._length * self._prob._width * self._prob._height), 1)
self._max_iterations = self._prob._length * self._prob._width * self._prob._height
# self._max_iterations = self._max_changes * \
# self._prob._length * self._prob._width * self._prob._height
self._heatmap = np.zeros(
(self._prob._height, self._prob._width, self._prob._length))
self.seed()
self.viewer = None
self.action_space = self._rep.get_action_space(
self._prob._length, self._prob._width, self._prob._height, self.get_num_tiles())
self.observation_space = self._rep.get_observation_space(
self._prob._length, self._prob._width, self._prob._height, self.get_num_tiles())
self.observation_space.spaces['heatmap'] = spaces.Box(low=0, high=self._max_changes, dtype=np.uint8, shape=(
self._prob._height, self._prob._width, self._prob._length))
def reset(self):
self._changes = 0
self._iteration = 0
self._rep.reset(self._prob._length, self._prob._width, self._prob._height, get_int_prob(
self._prob._prob, self._prob.get_tile_types()))
self._rep_stats = self._prob.get_stats(
get_string_map(self._rep._map, self._prob.get_tile_types()))
self._prob.reset(self._rep_stats)
self._heatmap = np.zeros(
(self._prob._height, self._prob._width, self._prob._length))
observation = self._rep.get_observation()
observation["heatmap"] = self._heatmap.copy()
return observation
def adjust_param(self, **kwargs):
if 'change_percentage' in kwargs:
percentage = min(1, max(0, kwargs.get('change_percentage')))
self._max_changes = max(
int(percentage * self._prob._length * self._prob._width * self._prob._height), 1)
# self._max_iterations = self._max_changes * \
# self._prob._length * self._prob._width * self._prob._height
self._prob.adjust_param(**kwargs)
self._rep.adjust_param(**kwargs)
self.action_space = self._rep.get_action_space(
self._prob._length, self._prob._width, self._prob._height, self.get_num_tiles())
self.observation_space = self._rep.get_observation_space(
self._prob._length, self._prob._width, self._prob._height, self.get_num_tiles())
self.observation_space.spaces['heatmap'] = spaces.Box(low=0, high=self._max_changes, dtype=np.uint8, shape=(
self._prob._height, self._prob._width, self._prob._length))
def step(self, action):
self._iteration += 1
#save copy of the old stats to calculate the reward
old_stats = self._rep_stats
# update the current state to the new state based on the taken action
change, x, y, z= self._rep.update(action)
if change > 0:
self._changes += change
self._heatmap[z][y][x] += 1.0
self._rep_stats = self._prob.get_stats(
get_string_map(self._rep._map, self._prob.get_tile_types()))
# calculate the values
observation = self._rep.get_observation()
observation["heatmap"] = self._heatmap.copy()
reward = self._prob.get_reward(self._rep_stats, old_stats)
# NOTE: not ending the episode if we reach targets in our metrics of interest for now
# done = self._prob.get_episode_over(self._rep_stats, old_stats) or \
# self._changes >= self._max_changes or \
# self._iteration >= self._max_iterations
done = self._iteration >= self._max_iterations
info = self._prob.get_debug_info(self._rep_stats, old_stats)
info["iterations"] = self._iteration
info["changes"] = self._changes
info["max_iterations"] = self._max_iterations
info["max_changes"] = self._max_changes
#return the values
return observation, reward, done, info
def render(self, mode='human'):
self._prob.render(get_string_map(
self._rep._map, self._prob.get_tile_types()))
self._rep.render(get_string_map(
self._rep._map, self._prob.get_tile_types()))
return
```
#### File: probs/mdungeon/mdungeon_prob.py
```python
import os
import numpy as np
from PIL import Image
from gym_pcgrl.envs.probs.problem import Problem
from gym_pcgrl.envs.helper import get_range_reward, get_tile_locations, calc_certain_tile, calc_num_regions
from gym_pcgrl.envs.probs.mdungeon.mdungeon.engine import State,BFSAgent,AStarAgent
"""
Generate a fully connected level for a simple dungeon crawler similar to MiniDungeons 1 (http://minidungeons.com/)
where the player has to kill 50% of enemies before done
"""
class MDungeonProblem(Problem):
"""
The constructor is responsible of initializing all the game parameters
"""
def __init__(self):
super().__init__()
self._width = 7
self._height = 11
self._prob = {"empty":0.4, "solid": 0.4, "player":0.02, "exit":0.02, "potion":0.03, "treasure":0.03, "goblin":0.05, "ogre": 0.05}
self._border_tile = "solid"
self._solver_power = 5000
self._max_enemies = 6
self._max_potions = 2
self._max_treasures = 3
self._target_col_enemies = 0.5
self._target_solution = 20
self._rewards = {
"player": 3,
"exit": 3,
"potions": 1,
"treasures": 1,
"enemies": 2,
"regions": 5,
"col-enemies": 2,
"dist-win": 0.1,
"sol-length": 1
}
"""
Get a list of all the different tile names
Returns:
string[]: that contains all the tile names
"""
def get_tile_types(self):
return ["empty", "solid", "player", "exit", "potion", "treasure", "goblin", "ogre"]
"""
Adjust the parameters for the current problem
Parameters:
width (int): change the width of the problem level
height (int): change the height of the problem level
probs (dict(string, float)): change the probability of each tile
intiialization, the names are "empty", "solid", "player", "exit","potion",
"treasure", "goblin", "ogre"
max_enemies (int): the max amount of enemies that should appear in a level
max_potions (int): the max amount of potions that should appear in a level
max_treasures (int): the max amount of treasure that should appear in a level
target_col_enemies (int): the target amount of killed enemies that the game is considered a success
target_solution (int): the minimum amount of movement needed to consider the level a success
rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats
"""
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._solver_power = kwargs.get('solver_power', self._solver_power)
self._max_enemies = kwargs.get('max_enemies', self._max_enemies)
self._max_potions = kwargs.get('max_potions', self._max_potions)
self._max_treasures = kwargs.get('max_treasures', self._max_treasures)
self._target_col_enemies = kwargs.get('target_col_enemies', self._target_col_enemies)
self._target_solution = kwargs.get('target_solution', self._target_solution)
rewards = kwargs.get('rewards')
if rewards is not None:
for t in rewards:
if t in self._rewards:
self._rewards[t] = rewards[t]
"""
Private function that runs the game on the input level
Parameters:
map (string[][]): the input level to run the game on
Returns:
float: how close you are to winning (0 if you win)
int: the solution length if you win (0 otherwise)
dict(string,int): get the status of the best node - "health": the current player health,
"col_treasures": number of collected treasures, "col_potions": number of collected potions,
"col_enemies": number of killed enemies
"""
def _run_game(self, map):
gameCharacters=" #@H*$go"
string_to_char = dict((s, gameCharacters[i]) for i, s in enumerate(self.get_tile_types()))
lvlString = ""
for x in range(self._width+2):
lvlString += "#"
lvlString += "\n"
for i in range(len(map)):
for j in range(len(map[i])):
string = map[i][j]
if j == 0:
lvlString += "#"
lvlString += string_to_char[string]
if j == self._width-1:
lvlString += "#\n"
for x in range(self._width+2):
lvlString += "#"
lvlString += "\n"
state = State()
state.stringInitialize(lvlString.split("\n"))
aStarAgent = AStarAgent()
bfsAgent = BFSAgent()
sol,solState,iters = aStarAgent.getSolution(state, 1, self._solver_power)
if solState.checkWin():
return 0, len(sol), solState.getGameStatus()
sol,solState,iters = aStarAgent.getSolution(state, 0.5, self._solver_power)
if solState.checkWin():
return 0, len(sol), solState.getGameStatus()
sol,solState,iters = aStarAgent.getSolution(state, 0, self._solver_power)
if solState.checkWin():
return 0, len(sol), solState.getGameStatus()
sol,solState,iters = bfsAgent.getSolution(state, self._solver_power)
if solState.checkWin():
return 0, len(sol), solState.getGameStatus()
return solState.getHeuristic(), 0, solState.getGameStatus()
"""
Get the current stats of the map
Returns:
dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.
The used status are "player": number of player tiles, "exit": number of exit tiles,
"potions": number of potion tiles, "treasures": number of treasure tiles, "enemies": number of goblin and ogre tiles,
"reigons": number of connected empty tiles, "col-potions": number of collected potions by a planning agent,
"col-treasures": number of collected treasures by a planning agent, "col-enemies": number of killed enemies by a planning agent,
"dist-win": how close to the win state, "sol-length": length of the solution to win the level
"""
def get_stats(self, map):
map_locations = get_tile_locations(map, self.get_tile_types())
map_stats = {
"player": calc_certain_tile(map_locations, ["player"]),
"exit": calc_certain_tile(map_locations, ["exit"]),
"potions": calc_certain_tile(map_locations, ["potion"]),
"treasures": calc_certain_tile(map_locations, ["treasure"]),
"enemies": calc_certain_tile(map_locations, ["goblin","ogre"]),
"regions": calc_num_regions(map, map_locations, ["empty","player","exit","potion","treasure","goblin","ogre"]),
"col-potions": 0,
"col-treasures": 0,
"col-enemies": 0,
"dist-win": self._width * self._height,
"sol-length": 0
}
if map_stats["player"] == 1 and map_stats["exit"] == 1 and map_stats["regions"] == 1:
map_stats["dist-win"], map_stats["sol-length"], play_stats = self._run_game(map)
map_stats["col-potions"] = play_stats["col_potions"]
map_stats["col-treasures"] = play_stats["col_treasures"]
map_stats["col-enemies"] = play_stats["col_enemies"]
return map_stats
"""
Get the current game reward between two stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
float: the current reward due to the change between the old map stats and the new map stats
"""
def get_reward(self, new_stats, old_stats):
#longer path is rewarded and less number of regions is rewarded
rewards = {
"player": get_range_reward(new_stats["player"], old_stats["player"], 1, 1),
"exit": get_range_reward(new_stats["exit"], old_stats["exit"], 1, 1),
"potions": get_range_reward(new_stats["potions"], old_stats["potions"], -np.inf, self._max_potions),
"treasures": get_range_reward(new_stats["treasures"], old_stats["treasures"], -np.inf, self._max_treasures),
"enemies": get_range_reward(new_stats["enemies"], old_stats["enemies"], 1, self._max_enemies),
"regions": get_range_reward(new_stats["regions"], old_stats["regions"], 1, 1),
"col-enemies": get_range_reward(new_stats["col-enemies"], old_stats["col-enemies"], np.inf, np.inf),
"dist-win": get_range_reward(new_stats["dist-win"], old_stats["dist-win"], -np.inf, -np.inf),
"sol-length": get_range_reward(new_stats["sol-length"], old_stats["sol-length"], np.inf, np.inf)
}
#calculate the total reward
return rewards["player"] * self._rewards["player"] +\
rewards["exit"] * self._rewards["exit"] +\
rewards["enemies"] * self._rewards["enemies"] +\
rewards["treasures"] * self._rewards["treasures"] +\
rewards["potions"] * self._rewards["potions"] +\
rewards["regions"] * self._rewards["regions"] +\
rewards["col-enemies"] * self._rewards["col-enemies"] +\
rewards["dist-win"] * self._rewards["dist-win"] +\
rewards["sol-length"] * self._rewards["sol-length"]
"""
Uses the stats to check if the problem ended (episode_over) which means reached
a satisfying quality based on the stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
boolean: True if the level reached satisfying quality based on the stats and False otherwise
"""
def get_episode_over(self, new_stats, old_stats):
return new_stats["sol-length"] >= self._target_solution and\
new_stats["enemies"] > 0 and\
new_stats["col-enemies"] / max(1,new_stats["enemies"]) > self._target_col_enemies
"""
Get any debug information need to be printed
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
dict(any,any): is a debug information that can be used to debug what is
happening in the problem
"""
def get_debug_info(self, new_stats, old_stats):
return {
"player": new_stats["player"],
"exit": new_stats["exit"],
"potions": new_stats["potions"],
"treasures": new_stats["treasures"],
"enemies": new_stats["enemies"],
"regions": new_stats["regions"],
"col-potions": new_stats["col-potions"],
"col-treasures": new_stats["col-treasures"],
"col-enemies": new_stats["col-enemies"],
"dist-win": new_stats["dist-win"],
"sol-length": new_stats["sol-length"]
}
"""
Get an image on how the map will look like for a specific map
Parameters:
map (string[][]): the current game map
Returns:
Image: a pillow image on how the map will look like using mdungeon graphics
"""
def render(self, map):
if self._graphics == None:
self._graphics = {
"empty": Image.open(os.path.dirname(__file__) + "/mdungeon/empty.png").convert('RGBA'),
"solid": Image.open(os.path.dirname(__file__) + "/mdungeon/solid.png").convert('RGBA'),
"player": Image.open(os.path.dirname(__file__) + "/mdungeon/player.png").convert('RGBA'),
"exit": Image.open(os.path.dirname(__file__) + "/mdungeon/exit.png").convert('RGBA'),
"potion": Image.open(os.path.dirname(__file__) + "/mdungeon/potion.png").convert('RGBA'),
"treasure": Image.open(os.path.dirname(__file__) + "/mdungeon/treasure.png").convert('RGBA'),
"goblin": Image.open(os.path.dirname(__file__) + "/mdungeon/goblin.png").convert('RGBA'),
"ogre": Image.open(os.path.dirname(__file__) + "/mdungeon/ogre.png").convert('RGBA'),
}
return super().render(map)
```
#### File: envs/probs/problem.py
```python
from gym.utils import seeding
from PIL import Image
"""
The base class for all the problems that can be handled by the interface
"""
class Problem:
"""
Constructor for the problem that initialize all the basic parameters
"""
def __init__(self):
self._width = 9
self._height = 9
tiles = self.get_tile_types()
self._prob = []
for _ in range(len(tiles)):
self._prob.append(1.0/len(tiles))
self._border_size = (1,1)
self._border_tile = tiles[0]
self._tile_size=16
self._graphics = None
"""
Seeding the used random variable to get the same result. If the seed is None,
it will seed it with random start.
Parameters:
seed (int): the starting seed, if it is None a random seed number is used.
Returns:
int: the used seed (same as input if not None)
"""
def seed(self, seed=None):
self._random, seed = seeding.np_random(seed)
return seed
"""
Resets the problem to the initial state and save the start_stats from the starting map.
Also, it can be used to change values between different environment resets
Parameters:
start_stats (dict(string,any)): the first stats of the map
"""
def reset(self, start_stats):
self._start_stats = start_stats
"""
Get a list of all the different tile names
Returns:
string[]: that contains all the tile names
"""
def get_tile_types(self):
raise NotImplementedError('get_tile_types is not implemented')
"""
Adjust the parameters for the current problem
Parameters:
width (int): change the width of the problem level
height (int): change the height of the problem level
probs (dict(string, float)): change the probability of each tile
intiialization, the names are the same as the tile types from get_tile_types
"""
def adjust_param(self, **kwargs):
self._width, self._height = kwargs.get('width', self._width), kwargs.get('height', self._height)
prob = kwargs.get('probs')
if prob is not None:
for t in prob:
if t in self._prob:
self._prob[t] = prob[t]
"""
Get the current stats of the map
Returns:
dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations
"""
def get_stats(self, map):
raise NotImplementedError('get_graphics is not implemented')
"""
Get the current game reward between two stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
float: the current reward due to the change between the old map stats and the new map stats
"""
def get_reward(self, new_stats, old_stats):
raise NotImplementedError('get_reward is not implemented')
"""
Uses the stats to check if the problem ended (episode_over) which means reached
a satisfying quality based on the stats
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
boolean: True if the level reached satisfying quality based on the stats and False otherwise
"""
def get_episode_over(self, new_stats, old_stats):
raise NotImplementedError('get_graphics is not implemented')
"""
Get any debug information need to be printed
Parameters:
new_stats (dict(string,any)): the new stats after taking an action
old_stats (dict(string,any)): the old stats before taking an action
Returns:
dict(any,any): is a debug information that can be used to debug what is
happening in the problem
"""
def get_debug_info(self, new_stats, old_stats):
raise NotImplementedError('get_debug_info is not implemented')
"""
Get an image on how the map will look like for a specific map
Parameters:
map (string[][]): the current game map
Returns:
Image: a pillow image on how the map will look like using the problem
graphics or default grey scale colors
"""
def render(self, map):
tiles = self.get_tile_types()
if self._graphics == None:
self._graphics = {}
for i in range(len(tiles)):
color = (i*255/len(tiles),i*255/len(tiles),i*255/len(tiles),255)
self._graphics[tiles[i]] = Image.new("RGBA",(self._tile_size,self._tile_size),color)
full_width = len(map[0])+2*self._border_size[0]
full_height = len(map)+2*self._border_size[1]
lvl_image = Image.new("RGBA", (full_width*self._tile_size, full_height*self._tile_size), (0,0,0,255))
for y in range(full_height):
for x in range(self._border_size[0]):
lvl_image.paste(self._graphics[self._border_tile], (x*self._tile_size, y*self._tile_size, (x+1)*self._tile_size, (y+1)*self._tile_size))
lvl_image.paste(self._graphics[self._border_tile], ((full_width-x-1)*self._tile_size, y*self._tile_size, (full_width-x)*self._tile_size, (y+1)*self._tile_size))
for x in range(full_width):
for y in range(self._border_size[1]):
lvl_image.paste(self._graphics[self._border_tile], (x*self._tile_size, y*self._tile_size, (x+1)*self._tile_size, (y+1)*self._tile_size))
lvl_image.paste(self._graphics[self._border_tile], (x*self._tile_size, (full_height-y-1)*self._tile_size, (x+1)*self._tile_size, (full_height-y)*self._tile_size))
for y in range(len(map)):
for x in range(len(map[y])):
lvl_image.paste(self._graphics[map[y][x]], ((x+self._border_size[0])*self._tile_size, (y+self._border_size[1])*self._tile_size, (x+self._border_size[0]+1)*self._tile_size, (y+self._border_size[1]+1)*self._tile_size))
return lvl_image
```
#### File: envs/reps/ca_3D_rep.py
```python
from pdb import set_trace as TT
from pdb import set_trace as T
from gym_pcgrl.envs.reps.representation3D import Representation3D
from PIL import Image
from gym import spaces
import numpy as np
"""
The cellular (autamaton-like) representation, where the agent may change all tiles on the map at each step.
"""
class CA3DRepresentation(Representation3D):
"""
Initialize all the parameters used by that representation
"""
def __init__(self):
super().__init__()
"""
Gets the action space used by the cellular representation
Parameters:
length: the current map length
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
Box: the action space is the same as the observation space, and consists of selected tile-types for
each tile-coordinate in the level.
"""
def get_action_space(self, length, width, height, num_tiles):
return self.get_observation_space(length, width, height, num_tiles)
"""
Get the observation space used by the cellular representation
Parameters:
length: the current map length
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
Box: the observation space used by that representation. A 3D array of tile numbers
"""
def get_observation_space(self, length, width, height, num_tiles):
return spaces.Dict({
"map": spaces.Box(low=0, high=num_tiles-1, dtype=np.uint8, shape=(height, width, length))
})
"""
Get the current representation observation object at the current moment
Returns:
observation: the current observation at the current moment. A 3D array of tile numbers
"""
def get_observation(self):
return {
"map": self._map.copy()
}
"""
Update the cellular representation with the input action
Parameters:
action: an action that is used to advance the environment (same as action space)
Returns:
boolean: True if the action change the map, False if nothing changed
"""
def update(self, action, continuous=False):
if not continuous:
next_map = action.argmax(axis=0)
else:
next_map = action
if self._map is None:
# This is the case when using an actual latent seed (so we do only one pass through the generator and have
# no need to set an initial map in the environment).
change = True
else:
if next_map.shape != self._map.shape:
print(next_map.shape, self._map.shape)
raise Exception
change = (next_map != self._map).any()
self._map = next_map
return change, None, None, None
```
#### File: envs/reps/wide_3D_rep.py
```python
from gym_pcgrl.envs.reps.representation3D import Representation3D
from PIL import Image
from gym import spaces
import numpy as np
from gym_pcgrl.envs.probs.minecraft.mc_render import reps_3D_render
"""
The wide representation where the agent can pick the tile position and tile value at each update.
"""
class Wide3DRepresentation(Representation3D):
"""
Initialize all the parameters used by that representation
"""
def __init__(self):
super().__init__()
"""
Gets the action space used by the wide representation
Parameters:
length: the current map length
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
MultiDiscrete: the action space used by that wide representation which
consists of the x position, y position, z position and the tile value
"""
def get_action_space(self, length, width, height, num_tiles):
return spaces.MultiDiscrete([length, width, height, num_tiles])
"""
Get the observation space used by the wide representation
Parameters:
length: the current map length
width: the current map width
height: the current map height
num_tiles: the total number of the tile values
Returns:
Box: the observation space used by that representation. A 3D array of tile numbers
"""
def get_observation_space(self, length, width, height, num_tiles):
return spaces.Dict({
"map": spaces.Box(low=0, high=num_tiles-1, dtype=np.uint8, shape=(height, width, length))
})
"""
Get the current representation observation object at the current moment
Returns:
observation: the current observation at the current moment. A 3D array of tile numbers
"""
def get_observation(self):
return {
"map": self._map.copy()
}
"""
Update the wide representation with the input action
Parameters:
action: an action that is used to advance the environment (same as action space)
Returns:
boolean: True if the action change the map, False if nothing changed
"""
def update(self, action):
change = [0,1][self._map[action[2]][action[1]][action[0]] != action[3]]
self._map[action[2]][action[1]][action[0]] = action[3]
return change, action[0], action[1], action[2]
``` |
{
"source": "jiangzeyinzi/EssentialMC2",
"score": 2
} |
#### File: bricks/stems/r2plus1d_stem.py
```python
import math
import torch.nn as nn
from essmc2.models.registry import STEMS
from .base_3d_stem import Base3DStem
@STEMS.register_class()
class R2Plus1DStem(Base3DStem):
def __init__(self, **kwargs):
super(R2Plus1DStem, self).__init__(**kwargs)
def _construct(self):
mid_dim = int(
math.floor(
(self.kernel_size[0] * self.kernel_size[1] * self.kernel_size[2] * self.dim_in * self.num_filters) / \
(self.kernel_size[1] * self.kernel_size[2] * self.dim_in + self.kernel_size[0] * self.num_filters)))
self.a1 = nn.Conv3d(
in_channels=self.dim_in,
out_channels=mid_dim,
kernel_size=(1, self.kernel_size[1], self.kernel_size[2]),
stride=(1, self.stride[1], self.stride[2]),
padding=(0, self.kernel_size[1] // 2, self.kernel_size[2] // 2),
bias=False
)
self.a1_bn = nn.BatchNorm3d(mid_dim, **self.bn_params)
self.a1_relu = nn.ReLU(inplace=True)
self.a2 = nn.Conv3d(
in_channels=mid_dim,
out_channels=self.num_filters,
kernel_size=(self.kernel_size[0], 1, 1),
stride=(self.stride[0], 1, 1),
padding=(self.kernel_size[0] // 2, 0, 0),
bias=False
)
self.a2_bn = nn.BatchNorm3d(self.num_filters, **self.bn_params)
self.a2_relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.a1(x)
x = self.a1_bn(x)
x = self.a1_relu(x)
x = self.a2(x)
x = self.a2_bn(x)
x = self.a2_relu(x)
return x
```
#### File: models/networks/train_module.py
```python
from abc import ABCMeta, abstractmethod
import torch.nn as nn
class TrainModule(nn.Module, metaclass=ABCMeta):
def __init__(self, *args, **kwargs):
super(TrainModule, self).__init__()
@abstractmethod
def forward(self, *inputs, **kwargs):
pass
@abstractmethod
def forward_train(self, *inputs, **kwargs):
pass
@abstractmethod
def forward_test(self, *inputs, **kwargs):
pass
```
#### File: essmc2/solvers/train_val_solver.py
```python
import torch
from .base_solver import BaseSolver
from .registry import SOLVERS
from ..utils.data import transfer_data_to_cuda
@SOLVERS.register_class()
class TrainValSolver(BaseSolver):
""" Standard train and eval steps solver
Args:
model (torch.nn.Module): Model to train or eval.
eval_interval (int): Interval between epochs, default is 1.
"""
def __init__(self, model, eval_interval=1, **kwargs):
super().__init__(model, **kwargs)
self.eval_interval = eval_interval
def run_train_epoch(self, train_data_loader):
self.train_mode()
self.before_all_iter()
self._epoch_max_iter = len(train_data_loader)
for data in train_data_loader:
self.before_iter()
data_gpu = transfer_data_to_cuda(data)
self._iter_outputs = self.model(**data_gpu)
self.after_iter()
self.after_all_iter()
@torch.no_grad()
def run_eval_epoch(self, val_data_loader):
self.eval_mode()
self._iter = 0
self._epoch_max_iter = len(val_data_loader)
self.before_all_iter()
for data in val_data_loader:
self.before_iter()
data_gpu = transfer_data_to_cuda(data)
self._iter_outputs = self.model(**data_gpu)
self.after_iter()
self.after_all_iter()
def run_epoch(self, data_loaders):
self.logger.info(f"Begin to train at Epoch [{self._epoch}/{self.max_epochs}]...")
self.run_train_epoch(data_loaders["train"])
if "val" in data_loaders and (
(self._epoch + 1) % self.eval_interval == 0 or self._epoch == self.max_epochs - 1):
self.logger.info(f"Begin to val at Epoch [{self._epoch}/{self.max_epochs}]...")
self.run_eval_epoch(data_loaders["val"])
def load_checkpoint(self, checkpoint: dict):
self._epoch = checkpoint["epoch"]
self._total_train_iter = checkpoint["total_train_iter"]
self.model.load_state_dict(checkpoint["state_dict"])
self.optimizer.load_state_dict(checkpoint["checkpoint"])
self.lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
self._epoch += 1 # Move to next epoch
def save_checkpoint(self) -> dict:
checkpoint = {
"epoch": self._epoch,
"total_train_iter": self._total_train_iter,
"state_dict": self.model.state_dict(),
"checkpoint": self.optimizer.state_dict(),
"lr_scheduler": self.lr_scheduler.state_dict()
}
return checkpoint
```
#### File: essmc2/transforms/registry.py
```python
from ..utils.registry import Registry, build_from_config
def build_pipeline(pipeline, registry):
if isinstance(pipeline, list):
if len(pipeline) == 0:
return build_from_config(dict(type="Identity"), registry)
elif len(pipeline) == 1:
return build_pipeline(pipeline[0], registry)
else:
return build_from_config(dict(type='Compose', transforms=pipeline), registry)
elif isinstance(pipeline, dict):
return build_from_config(pipeline, registry)
elif pipeline is None:
return build_from_config(dict(type='Identity'), registry)
else:
raise TypeError(f"Expect pipeline_cfg to be dict or list or None, got {type(pipeline)}")
TRANSFORMS = Registry("TRANSFORMS", build_func=build_pipeline)
```
#### File: utils/file_systems/local_fs.py
```python
from .base_fs import BaseFs
from .registry import FILE_SYSTEMS
import logging
@FILE_SYSTEMS.register_class()
class LocalFs(BaseFs):
def __init__(self):
super(LocalFs, self).__init__()
def get_object_to_local_file(self, path) -> str:
return path
def get_object_to_memory(self, path) -> bytes:
with open(path, "rb") as f:
return f.read()
def remove_local_file(self, local_path):
# Do not delete local file in local file system
return
def put_object_from_local_file(self, local_path, target_path):
# Do Not.
return
def get_prefix(self):
return "file://"
def support_write(self):
return True
def get_logging_handler(self, logging_path):
return logging.FileHandler(logging_path)
```
#### File: models/heads/mosi_heads.py
```python
import torch.nn as nn
from essmc2.models import HEADS
@HEADS.register_class()
class MoSIHead(nn.Module):
def __init__(self,
dim,
num_classes,
dropout_rate=0.5):
super(MoSIHead, self).__init__()
self.dim = dim
self.num_classes = num_classes
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
self.out_joint = nn.Linear(dim, num_classes, bias=True)
def forward(self, x, need_features=False):
if hasattr(self, "dropout"):
x = self.dropout(x)
out = self.out_joint(x)
if need_features:
return out, x
else:
return out
```
#### File: models/networks/mosi_net.py
```python
from collections import OrderedDict
import torch
import torch.nn as nn
from essmc2.models import TrainModule, MODELS, BACKBONES, NECKS, HEADS, LOSSES
from essmc2.utils.metric import accuracy
@MODELS.register_class()
class MoSINet(TrainModule):
def __init__(self,
backbone,
neck,
head,
loss=None,
label_mode='joint',
freeze_bn=False,
topk=(1,)):
super().__init__()
self.backbone = BACKBONES.build(backbone)
self.neck = NECKS.build(neck)
self.label_mode = label_mode
if self.label_mode == "joint":
self.head = HEADS.build(head)
else:
self.head_x = HEADS.build(head)
self.head_y = HEADS.build(head)
self.freeze_bn = freeze_bn
if isinstance(topk, int):
self.topk = (topk,)
else:
self.topk = topk
self.loss = LOSSES.build(loss or dict(type="CrossEntropy"))
def train(self, mode=True):
self.training = mode
super(MoSINet, self).train(mode=mode)
for module in self.modules():
if isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.LayerNorm)) and self.freeze_bn:
module.train(False)
return self
def forward(self, video, **kwargs):
return self.forward_train(video, **kwargs) if self.training else self.forward_test(video, **kwargs)
def forward_train(self, video, mosi_label, **kwargs):
ret = OrderedDict()
if len(video.shape) == 6:
b, n, c, t, h, w = video.shape
video = video.reshape(b * n, c, t, h, w)
neck_features = self.neck(self.backbone(video))
if self.label_mode == "joint":
probs = self.head(neck_features)
if mosi_label is None:
return probs
labels = mosi_label["move_joint"].reshape(probs.shape[0])
loss = self.loss(probs, labels)
ret["loss"] = loss
with torch.no_grad():
acc_topk = accuracy(probs, labels, self.topk)
for acc, k in zip(acc_topk, self.topk):
ret[f"acc@{k}"] = acc
else:
probs_x = self.head_x(neck_features)
probs_y = self.head_y(neck_features)
if mosi_label is None:
return probs_x, probs_y
labels_x = mosi_label["move_x"].reshape(probs_x.shape[0])
labels_y = mosi_label["move_y"].reshape(probs_y.shape[0])
loss_x = self.loss(probs_x, labels_x)
loss_y = self.loss(probs_y, labels_y)
loss = loss_x + loss_y
ret["loss"] = loss
ret["loss_x"] = loss_x
ret["loss_y"] = loss_y
with torch.no_grad():
acc_topk_x = accuracy(probs_x, labels_x, self.topk)
for acc, k in zip(acc_topk_x, self.topk):
ret[f"acc_x@{k}"] = acc
with torch.no_grad():
acc_topk_y = accuracy(probs_y, labels_y, self.topk)
for acc, k in zip(acc_topk_y, self.topk):
ret[f"acc_y@{k}"] = acc
ret["batch_size"] = video.size(0)
return ret
def forward_test(self, video, mosi_label=None, **kwargs):
if len(video.shape) == 6:
b, n, c, t, h, w = video.shape
video = video.reshape(b * n, c, t, h, w)
neck_features = self.neck(self.backbone(video))
if self.label_mode == "joint":
probs = nn.functional.softmax(self.head(neck_features), dim=1)
if mosi_label is None:
return probs
ret = OrderedDict()
labels = mosi_label["move_joint"].reshape(probs.shape[0])
acc_topk = accuracy(probs, labels, self.topk)
for acc, k in zip(acc_topk, self.topk):
ret[f"acc@{k}"] = acc
return ret
else:
probs_x = nn.functional.softmax(self.head_x(neck_features), dim=1)
probs_y = nn.functional.softmax(self.head_y(neck_features), dim=1)
if mosi_label is None:
return probs_x, probs_y
ret = OrderedDict()
labels_x = mosi_label["move_x"].reshape(probs_x.shape[0])
labels_y = mosi_label["move_y"].reshape(probs_y.shape[0])
acc_topk_x = accuracy(probs_x, labels_x, self.topk)
acc_topk_y = accuracy(probs_y, labels_y, self.topk)
for acc, k in zip(acc_topk_x, self.topk):
ret[f"acc_x@{k}"] = acc
for acc, k in zip(acc_topk_y, self.topk):
ret[f"acc_y@{k}"] = acc
ret["batch_size"] = video.size(0)
return ret
```
#### File: impls/datasets/cifar_noisy_dataset.py
```python
import _pickle as pkl
import os.path as osp
import random
import numpy as np
from PIL import Image
from essmc2.datasets import BaseDataset, DATASETS
ASYM_TRANSITION = {0: 0, 2: 0, 4: 7, 7: 7, 1: 1, 9: 1, 3: 5, 5: 3, 6: 6, 8: 8}
@DATASETS.register_class()
class CifarNoisyDataset(BaseDataset):
def __init__(self,
root_dir="",
cifar_type="cifar10",
noise_mode="sym",
noise_ratio=0.5,
**kwargs):
super(CifarNoisyDataset, self).__init__(**kwargs)
self.root_dir = root_dir
assert cifar_type in ('cifar10', 'cifar100')
self.cifar_type = cifar_type
assert noise_mode in ('asym', 'sym')
self.noise_mode = noise_mode
self.noise_ratio = noise_ratio
self.num_classes = 10 if cifar_type == "cifar10" else 100
self.images = []
self.clean_labels = []
self.noise_labels = []
self._load_cifar_data()
self._make_noise()
def _load_cifar_data(self):
if self.cifar_type == "cifar10":
if self.mode == "test":
pkl_path = osp.join(self.root_dir, 'test_batch')
with open(pkl_path, "rb") as f:
data = pkl.load(f, encoding="latin1")
images = data['data']
images = images.reshape((10000, 3, 32, 32))
images = images.transpose((0, 2, 3, 1))
self.images = images
self.clean_labels = data['labels']
else:
images_list = []
for i in range(1, 6):
pkl_path = osp.join(self.root_dir, f'data_batch_{i}')
with open(pkl_path, "rb") as f:
data = pkl.load(f, encoding='latin1')
images = data['data']
images_list.append(images)
self.clean_labels.extend(data['labels'])
images = np.concatenate(images_list)
images = images.reshape((50000, 3, 32, 32))
images = images.transpose((0, 2, 3, 1))
self.images = images
elif self.cifar_type == "cifar100":
if self.mode == "test":
pkl_path = osp.join(self.root_dir, 'test')
else:
pkl_path = osp.join(self.root_dir, 'train')
with open(pkl_path, "rb") as f:
data = pkl.load(f, encoding='latin1')
images = data['data']
images = images.reshape((10000 if self.mode == "test" else 50000, 3, 32, 32))
images = images.transpose((0, 2, 3, 1))
self.images = images
self.clean_labels = data['fine_labels']
else:
raise ValueError(f"Unexpected cifar_type, support cifar10, cifar100, got {self.cifar_type}")
def _make_noise(self):
self.noise_labels = self.clean_labels.copy()
if self.mode == "test":
return
data_len = len(self.clean_labels)
idx = list(range(data_len))
random.shuffle(idx)
noise_num = int(self.noise_ratio * data_len)
noise_idx = set(idx[:noise_num])
for i in range(data_len):
if i in noise_idx:
if self.noise_mode == "sym":
noise_label = random.randint(0, self.num_classes - 1)
else:
noise_label = ASYM_TRANSITION[self.clean_labels[i]]
self.noise_labels[i] = noise_label
def __len__(self) -> int:
return len(self.images)
def _get(self, index: int):
img = Image.fromarray(self.images[index])
if self.mode == "test":
gt_label = self.clean_labels[index]
else:
gt_label = self.noise_labels[index]
return dict(img=img,
index=np.array(index, dtype=np.int64),
gt_label=np.array(gt_label, dtype=np.int64),
meta=dict()
)
```
#### File: datasets/utils/transformations.py
```python
import torch
import math
import torchvision.transforms._functional_video as F
from torchvision.transforms import Lambda, Compose
import random
import numbers
class ColorJitter(object):
"""
Modified from https://github.com/TengdaHan/DPC/blob/master/utils/augmentation.py.
Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
grayscale (float): possibility to transform the video to grayscale.
Should have a value range of [0, 1]
consistent (bool): indicates whether or not to keep all the color transformations consistent for all the frames.
shuffle (bool): indicates whether or not to shuffle the sequence of the augmentations.
gray_first (bool): indicates whether or not to put grayscale transform first.
"""
def __init__(
self, brightness=0, contrast=0, saturation=0, hue=0, grayscale=0, consistent=False, shuffle=True, gray_first=True
):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
self.grayscale = grayscale
self.consistent = consistent
self.shuffle = shuffle
self.gray_first = gray_first
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
def _get_transform(self, T, device):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Arg:
T (int): number of frames. Used when consistent = False.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if self.brightness is not None:
if self.consistent:
brightness_factor = random.uniform(self.brightness[0], self.brightness[1])
else:
brightness_factor = torch.empty([1, T, 1, 1], device=device).uniform_(self.brightness[0], self.brightness[1])
transforms.append(Lambda(lambda frame: adjust_brightness(frame, brightness_factor)))
if self.contrast is not None:
if self.consistent:
contrast_factor = random.uniform(self.contrast[0], self.contrast[1])
else:
contrast_factor = torch.empty([1, T, 1, 1], device=device).uniform_(self.contrast[0], self.contrast[1])
transforms.append(Lambda(lambda frame: adjust_contrast(frame, contrast_factor)))
if self.saturation is not None:
if self.consistent:
saturation_factor = random.uniform(self.saturation[0], self.saturation[1])
else:
saturation_factor = torch.empty([1, T, 1, 1], device=device).uniform_(self.saturation[0], self.saturation[1])
transforms.append(Lambda(lambda frame: adjust_saturation(frame, saturation_factor)))
if self.hue is not None:
if self.consistent:
hue_factor = random.uniform(self.hue[0], self.hue[1])
else:
hue_factor = torch.empty([T, 1, 1], device=device).uniform_(self.hue[0], self.hue[1])
transforms.append(Lambda(lambda frame: adjust_hue(frame, hue_factor)))
if self.shuffle:
random.shuffle(transforms)
if random.uniform(0, 1) < self.grayscale:
gray_transform = Lambda(lambda frame: rgb_to_grayscale(frame))
if self.gray_first:
transforms.insert(0, gray_transform)
else:
transforms.append(gray_transform)
transform = Compose(transforms)
return transform
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
raw_shape = clip.shape #(C, T, H, W)
device = clip.device
T = raw_shape[1]
transform = self._get_transform(T, device)
clip = transform(clip)
assert clip.shape == raw_shape
return clip #(C, T, H, W)
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
format_string += ', grayscale={0})'.format(self.grayscale)
return format_string
def _is_tensor_a_torch_image(input):
return input.ndim >= 2
def _blend(img1, img2, ratio):
# type: (Tensor, Tensor, float) -> Tensor
bound = 1 if img1.dtype in [torch.half, torch.float32, torch.float64] else 255
return (ratio * img1 + (1 - ratio) * img2).clamp(0, bound).to(img1.dtype)
def rgb_to_grayscale(img):
# type: (Tensor) -> Tensor
"""Convert the given RGB Image Tensor to Grayscale.
For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which
is L = R * 0.2989 + G * 0.5870 + B * 0.1140
Args:
img (Tensor): Image to be converted to Grayscale in the form [C, H, W].
Returns:
Tensor: Grayscale image.
Args:
clip (torch.tensor): Size is (T, H, W, C)
Return:
clip (torch.tensor): Size is (T, H, W, C)
"""
orig_dtype = img.dtype
rgb_convert = torch.tensor([0.299, 0.587, 0.114])
assert img.shape[0] == 3, "First dimension need to be 3 Channels"
if img.is_cuda:
rgb_convert = rgb_convert.to(img.device)
img = img.float().permute(1,2,3,0).matmul(rgb_convert).to(orig_dtype)
return torch.stack([img, img, img], 0)
def _rgb2hsv(img):
r, g, b = img.unbind(0)
maxc, _ = torch.max(img, dim=0)
minc, _ = torch.min(img, dim=0)
eqc = maxc == minc
cr = maxc - minc
s = cr / torch.where(eqc, maxc.new_ones(()), maxc)
cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = (hr + hg + hb)
h = torch.fmod((h / 6.0 + 1.0), 1.0)
return torch.stack((h, s, maxc))
def _hsv2rgb(img):
l = len(img.shape)
h, s, v = img.unbind(0)
i = torch.floor(h * 6.0)
f = (h * 6.0) - i
i = i.to(dtype=torch.int32)
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
if l == 3:
tmp = torch.arange(6)[:, None, None]
elif l == 4:
tmp = torch.arange(6)[:, None, None, None]
if img.is_cuda:
tmp = tmp.to(img.device)
mask = i == tmp #(H, W) == (6, H, W)
a1 = torch.stack((v, q, p, p, t, v))
a2 = torch.stack((t, v, v, q, p, p))
a3 = torch.stack((p, p, t, v, v, q))
a4 = torch.stack((a1, a2, a3)) #(3, 6, H, W)
if l == 3:
return torch.einsum("ijk, xijk -> xjk", mask.to(dtype=img.dtype), a4) #(C, H, W)
elif l == 4:
return torch.einsum("itjk, xitjk -> xtjk", mask.to(dtype=img.dtype), a4) #(C, T, H, W)
def adjust_brightness(img, brightness_factor):
# type: (Tensor, float) -> Tensor
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return _blend(img, torch.zeros_like(img), brightness_factor)
def adjust_contrast(img, contrast_factor):
# type: (Tensor, float) -> Tensor
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
mean = torch.mean(rgb_to_grayscale(img).to(torch.float), dim=(-4, -2, -1), keepdim=True)
return _blend(img, mean, contrast_factor)
def adjust_saturation(img, saturation_factor):
# type: (Tensor, float) -> Tensor
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return _blend(img, rgb_to_grayscale(img), saturation_factor)
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (Tensor): Image to be adjusted. Image type is either uint8 or float.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
Tensor: Hue adjusted image.
"""
if isinstance(hue_factor, float) and not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
elif isinstance(hue_factor, torch.Tensor) and not ((-0.5 <= hue_factor).sum() == hue_factor.shape[0] and (hue_factor <= 0.5).sum() == hue_factor.shape[0]):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
orig_dtype = img.dtype
if img.dtype == torch.uint8:
img = img.to(dtype=torch.float32) / 255.0
img = _rgb2hsv(img)
h, s, v = img.unbind(0)
h += hue_factor
h = h % 1.0
img = torch.stack((h, s, v))
img_hue_adj = _hsv2rgb(img)
if orig_dtype == torch.uint8:
img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)
return img_hue_adj
class AutoResizedCropVideo(object):
def __init__(
self,
size,
scale=(0.08, 1.0),
interpolation_mode="bilinear",
mode = "cc"
):
# mode how many clips return
if isinstance(size, tuple):
assert len(size) == 2, "size should be tuple (height, width)"
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.mode = mode
self.idx = 0
def set_spatial_index(self, idx):
self.idx = idx
def get_crop(self, clip):
crop_mode = self.mode[self.idx:self.idx+2]
scale = random.uniform(*self.scale)
# Get the crop size for the scale cropping
_, _, image_height, image_width = clip.shape
min_length = min(image_width, image_height)
crop_size = int(min_length * scale)
center_x = image_width // 2
center_y = image_height // 2
box_half = crop_size // 2
th = crop_size
tw = crop_size
if crop_mode == "cc":
x1 = center_x - box_half
y1 = center_y - box_half
x2 = center_x + box_half
y2 = center_y + box_half
elif crop_mode == "ll":
x1 = 0
y1 = center_y - box_half
x2 = crop_size
y2 = center_y + box_half
elif crop_mode == "rr":
x1 = image_width - crop_size
y1 = center_y - box_half
x2 = image_width
y2 = center_y + box_half
elif crop_mode == "tl":
x1 = 0
y1 = 0
x2 = crop_size
y2 = crop_size
elif crop_mode == "tr":
x1 = image_width - crop_size
y1 = 0
x2 = image_width
y2 = crop_size
elif crop_mode == "bl":
x1 = 0
y1 = image_height - crop_size
x2 = crop_size
y2 = image_height
elif crop_mode == "br":
x1 = image_width - crop_size
y1 = image_height - crop_size
x2 = image_width
y2 = image_height
crop = F.resized_crop(clip, y1, x1, th, tw, self.size, self.interpolation_mode)
return crop
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
if self.idx == -1:
# return self.get_random_crop(clip)
pass
else:
return self.get_crop(clip)
class KineticsResizedCrop(object):
def __init__(
self,
short_side_range,
crop_size,
num_spatial_crops=1,
):
self.idx = -1
self.short_side_range = short_side_range
self.crop_size = int(crop_size)
self.num_spatial_crops = num_spatial_crops
def _get_controlled_crop(self, clip):
_, _, clip_height, clip_width = clip.shape
length = self.short_side_range[0]
if clip_height < clip_width:
new_clip_height = int(length)
new_clip_width = int(clip_width / clip_height * new_clip_height)
new_clip = torch.nn.functional.interpolate(
clip, size=(new_clip_height, new_clip_width), mode="bilinear"
)
else:
new_clip_width = int(length)
new_clip_height = int(clip_height / clip_width * new_clip_width)
new_clip = torch.nn.functional.interpolate(
clip, size=(new_clip_height, new_clip_width), mode="bilinear"
)
x_max = int(new_clip_width - self.crop_size)
y_max = int(new_clip_height - self.crop_size)
if self.num_spatial_crops == 1:
x = x_max // 2
y = y_max // 2
elif self.num_spatial_crops == 3:
if self.idx == 0:
if new_clip_width == length:
x = x_max // 2
y = 0
elif new_clip_height == length:
x = 0
y = y_max // 2
elif self.idx == 1:
x = x_max // 2
y = y_max // 2
elif self.idx == 2:
if new_clip_width == length:
x = x_max // 2
y = y_max
elif new_clip_height == length:
x = x_max
y = y_max // 2
return new_clip[:, :, y:y+self.crop_size, x:x+self.crop_size]
def _get_random_crop(self, clip):
_, _, clip_height, clip_width = clip.shape
if clip_height < clip_width:
new_clip_height = int(random.uniform(*self.short_side_range))
new_clip_width = int(clip_width / clip_height * new_clip_height)
new_clip = torch.nn.functional.interpolate(
clip, size=(new_clip_height, new_clip_width), mode="bilinear"
)
else:
new_clip_width = int(random.uniform(*self.short_side_range))
new_clip_height = int(clip_height / clip_width * new_clip_width)
new_clip = torch.nn.functional.interpolate(
clip, size=(new_clip_height, new_clip_width), mode="bilinear"
)
x_max = int(new_clip_width - self.crop_size)
y_max = int(new_clip_height - self.crop_size)
x = int(random.uniform(0, x_max))
y = int(random.uniform(0, y_max))
return new_clip[:, :, y:y+self.crop_size, x:x+self.crop_size]
def set_spatial_index(self, idx):
self.idx = idx
def __call__(self, clip):
if self.idx == -1:
return self._get_random_crop(clip)
else:
return self._get_controlled_crop(clip)
```
#### File: module_zoo/branches/csn_branch.py
```python
import torch
import torch.nn as nn
from models.base.base_blocks import BaseBranch, Base3DStem, BaseHead
from models.base.base_blocks import BRANCH_REGISTRY
@BRANCH_REGISTRY.register()
class CSNBranch(BaseBranch):
"""
The ir-CSN branch.
See Du Tran et al.
Video Classification with Channel-Separated Convolutional Networks.
"""
def __init__(self, cfg, block_idx):
"""
Args:
cfg (Config): global config object.
block_idx (list): list of [stage_id, block_id], both starting from 0.
"""
super(CSNBranch, self).__init__(cfg, block_idx)
def _construct_bottleneck(self):
self.a = nn.Conv3d(
in_channels = self.dim_in,
out_channels = self.num_filters//self.expansion_ratio,
kernel_size = 1,
stride = 1,
padding = 0,
bias = False
)
self.a_bn = nn.BatchNorm3d(self.num_filters//self.expansion_ratio, eps=self.bn_eps, momentum=self.bn_mmt)
self.a_relu = nn.ReLU(inplace=True)
self.b = nn.Conv3d(
in_channels = self.num_filters//self.expansion_ratio,
out_channels = self.num_filters//self.expansion_ratio,
kernel_size = self.kernel_size,
stride = self.stride,
padding = [self.kernel_size[0]//2, self.kernel_size[1]//2, self.kernel_size[2]//2],
bias = False,
groups = self.num_filters//self.expansion_ratio,
)
self.b_bn = nn.BatchNorm3d(self.num_filters//self.expansion_ratio, eps=self.bn_eps, momentum=self.bn_mmt)
self.b_relu = nn.ReLU(inplace=True)
self.c = nn.Conv3d(
in_channels = self.num_filters//self.expansion_ratio,
out_channels = self.num_filters,
kernel_size = 1,
stride = 1,
padding = 0,
bias = False
)
self.c_bn = nn.BatchNorm3d(self.num_filters, eps=self.bn_eps, momentum=self.bn_mmt)
def forward(self, x):
if self.transformation == 'bottleneck':
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
x = self.c(x)
x = self.c_bn(x)
return x
```
#### File: module_zoo/branches/s3dg_branch.py
```python
import torch
import torch.nn as nn
from models.base.base_blocks import (
BRANCH_REGISTRY, InceptionBaseConv3D
)
class InceptionBlock3D(nn.Module):
"""
Element constructing the S3D/S3DG.
See models/base/backbone.py L99-186.
Modifed from https://github.com/TengdaHan/CoCLR/blob/main/backbone/s3dg.py.
"""
def __init__(self, cfg, in_planes, out_planes):
super(InceptionBlock3D, self).__init__()
_gating = cfg.VIDEO.BACKBONE.BRANCH.GATING
assert len(out_planes) == 6
assert isinstance(out_planes, list)
[num_out_0_0a,
num_out_1_0a, num_out_1_0b,
num_out_2_0a, num_out_2_0b,
num_out_3_0b] = out_planes
self.branch0 = nn.Sequential(
InceptionBaseConv3D(cfg, in_planes, num_out_0_0a, kernel_size=1, stride=1),
)
self.branch1 = nn.Sequential(
InceptionBaseConv3D(cfg, in_planes, num_out_1_0a, kernel_size=1, stride=1),
BRANCH_REGISTRY.get(cfg.VIDEO.BACKBONE.BRANCH.NAME)(cfg, num_out_1_0a, num_out_1_0b, kernel_size=3, stride=1, padding=1),
)
self.branch2 = nn.Sequential(
InceptionBaseConv3D(cfg, in_planes, num_out_2_0a, kernel_size=1, stride=1),
BRANCH_REGISTRY.get(cfg.VIDEO.BACKBONE.BRANCH.NAME)(cfg, num_out_2_0a, num_out_2_0b, kernel_size=3, stride=1, padding=1),
)
self.branch3 = nn.Sequential(
nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1),
InceptionBaseConv3D(cfg, in_planes, num_out_3_0b, kernel_size=1, stride=1),
)
self.out_channels = sum([num_out_0_0a, num_out_1_0b, num_out_2_0b, num_out_3_0b])
self.gating = _gating
if _gating:
self.gating_b0 = SelfGating(num_out_0_0a)
self.gating_b1 = SelfGating(num_out_1_0b)
self.gating_b2 = SelfGating(num_out_2_0b)
self.gating_b3 = SelfGating(num_out_3_0b)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
if self.gating:
x0 = self.gating_b0(x0)
x1 = self.gating_b1(x1)
x2 = self.gating_b2(x2)
x3 = self.gating_b3(x3)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class SelfGating(nn.Module):
def __init__(self, input_dim):
super(SelfGating, self).__init__()
self.fc = nn.Linear(input_dim, input_dim)
def forward(self, input_tensor):
"""Feature gating as used in S3D-G"""
spatiotemporal_average = torch.mean(input_tensor, dim=[2, 3, 4])
weights = self.fc(spatiotemporal_average)
weights = torch.sigmoid(weights)
return weights[:, :, None, None, None] * input_tensor
@BRANCH_REGISTRY.register()
class STConv3d(nn.Module):
"""
Element constructing the S3D/S3DG.
See models/base/backbone.py L99-186.
Modifed from https://github.com/TengdaHan/CoCLR/blob/main/backbone/s3dg.py.
"""
def __init__(self,cfg,in_planes,out_planes,kernel_size,stride,padding=0):
super(STConv3d, self).__init__()
if isinstance(stride, tuple):
t_stride = stride[0]
stride = stride[-1]
else: # int
t_stride = stride
self.bn_mmt = cfg.BN.MOMENTUM
self.bn_eps = cfg.BN.EPS
self._construct_branch(
cfg,
in_planes,
out_planes,
kernel_size,
stride,
t_stride,
padding
)
def _construct_branch(
self,
cfg,
in_planes,
out_planes,
kernel_size,
stride,
t_stride,
padding=0
):
self.conv1 = nn.Conv3d(in_planes, out_planes, kernel_size=(1,kernel_size,kernel_size),
stride=(1,stride,stride),padding=(0,padding,padding), bias=False)
self.conv2 = nn.Conv3d(out_planes,out_planes,kernel_size=(kernel_size,1,1),
stride=(t_stride,1,1),padding=(padding,0,0), bias=False)
self.bn1=nn.BatchNorm3d(out_planes, eps=self.bn_eps, momentum=self.bn_mmt)
self.bn2=nn.BatchNorm3d(out_planes, eps=self.bn_eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(inplace=True)
# init
self.conv1.weight.data.normal_(mean=0, std=0.01) # original s3d is truncated normal within 2 std
self.conv2.weight.data.normal_(mean=0, std=0.01) # original s3d is truncated normal within 2 std
self.bn1.weight.data.fill_(1)
self.bn1.bias.data.zero_()
self.bn2.weight.data.fill_(1)
self.bn2.bias.data.zero_()
def forward(self,x):
x=self.conv1(x)
x=self.bn1(x)
x=self.relu(x)
x=self.conv2(x)
x=self.bn2(x)
x=self.relu(x)
return x
```
#### File: module_zoo/heads/transformer_head.py
```python
import torch
import torch.nn as nn
from models.base.base_blocks import BaseHead
from models.base.base_blocks import HEAD_REGISTRY
from collections import OrderedDict
from models.utils.init_helper import lecun_normal_, trunc_normal_, _init_transformer_weights
@HEAD_REGISTRY.register()
class TransformerHead(BaseHead):
"""
Construct head for video vision transformers.
"""
def __init__(self, cfg):
"""
Args:
cfg (Config): global config object.
"""
super(TransformerHead, self).__init__(cfg)
self.apply(_init_transformer_weights)
def _construct_head(
self,
dim,
num_classes,
dropout_rate,
activation_func,
):
if self.cfg.VIDEO.HEAD.PRE_LOGITS:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(dim, dim)),
('act', nn.Tanh())
]))
self.linear = nn.Linear(dim, num_classes)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
if activation_func == "softmax":
self.activation = nn.Softmax(dim=-1)
elif activation_func == "sigmoid":
self.activation = nn.Sigmoid()
elif activation_func == "identity":
self.activation = nn.Identity()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(activation_func)
)
def forward(self, x):
"""
Returns:
x (Tensor): classification predictions.
logits (Tensor): global average pooled features.
"""
if hasattr(self, "dropout"):
out = self.dropout(x)
else:
out = x
if hasattr(self, "pre_logits"):
out = self.pre_logits(out)
out = self.linear(out)
if not self.training:
out = self.activation(out)
return out, x
@HEAD_REGISTRY.register()
class TransformerHeadx2(BaseHead):
"""
The Transformer head for EPIC-KITCHENS dataset.
"""
def __init__(self, cfg):
"""
Args:
cfg (Config): global config object.
"""
super(TransformerHeadx2, self).__init__(cfg)
self.apply(_init_transformer_weights)
def _construct_head(
self,
dim,
num_classes,
dropout_rate,
activation_func,
):
if self.cfg.VIDEO.HEAD.PRE_LOGITS:
self.pre_logits1 = nn.Sequential(OrderedDict([
('fc', nn.Linear(dim, dim)),
('act', nn.Tanh())
]))
self.pre_logits2 = nn.Sequential(OrderedDict([
('fc', nn.Linear(dim, dim)),
('act', nn.Tanh())
]))
self.linear1 = nn.Linear(dim, num_classes[0], bias=True)
self.linear2 = nn.Linear(dim, num_classes[1], bias=True)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
if activation_func == "softmax":
self.activation = nn.Softmax(dim=-1)
elif activation_func == "sigmoid":
self.activation = nn.Sigmoid()
elif activation_func == "identity":
self.activation = nn.Identity()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(activation_func)
)
def forward(self, x):
"""
Returns:
x (dict): dictionary of classification predictions,
with keys "verb_class" and "noun_class" indicating
the predictions on the verb and noun.
logits (Tensor): global average pooled features.
"""
if hasattr(self, "dropout"):
out1 = self.dropout(x)
out2 = self.dropout(x)
else:
out1 = x
out2 = x
if hasattr(self, "pre_logits1"):
out1 = self.pre_logits1(out1)
out2 = self.pre_logits2(out2)
out1 = self.linear1(out1)
out2 = self.linear2(out2)
if not self.training:
out1 = self.activation(out1)
out2 = self.activation(out2)
return {"verb_class": out1, "noun_class": out2}, x
```
#### File: module_zoo/stems/r2plus1d_stem.py
```python
import math
import torch
import torch.nn as nn
from models.base.base_blocks import Base3DStem
from models.base.base_blocks import STEM_REGISTRY
@STEM_REGISTRY.register()
class R2Plus1DStem(Base3DStem):
"""
R(2+1)D Stem.
"""
def __init__(
self,
cfg
):
super(R2Plus1DStem, self).__init__(cfg)
def _construct_block(
self,
cfg,
dim_in,
num_filters,
kernel_sz,
stride,
bn_eps=1e-5,
bn_mmt=0.1
):
mid_dim = int(
math.floor((kernel_sz[0] * kernel_sz[1] * kernel_sz[2] * dim_in * num_filters) / \
(kernel_sz[1] * kernel_sz[2] * dim_in + kernel_sz[0] * num_filters)))
self.a1 = nn.Conv3d(
in_channels = dim_in,
out_channels = mid_dim,
kernel_size = [1, kernel_sz[1], kernel_sz[2]],
stride = [1, stride[1], stride[2]],
padding = [0, kernel_sz[1]//2, kernel_sz[2]//2],
bias = False
)
self.a1_bn = nn.BatchNorm3d(mid_dim, eps=bn_eps, momentum=bn_mmt)
self.a1_relu = nn.ReLU(inplace=True)
self.a2 = nn.Conv3d(
in_channels = mid_dim,
out_channels = num_filters,
kernel_size = [kernel_sz[0], 1, 1],
stride = [stride[0], 1, 1],
padding = [kernel_sz[0]//2, 0, 0],
bias = False
)
self.a2_bn = nn.BatchNorm3d(num_filters, eps=bn_eps, momentum=bn_mmt)
self.a2_relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.a1(x)
x = self.a1_bn(x)
x = self.a1_relu(x)
x = self.a2(x)
x = self.a2_bn(x)
x = self.a2_relu(x)
return x
```
#### File: models/utils/localization_losses.py
```python
import torch
import numpy as np
import torch.nn.functional as F
from utils.registry import Registry
LOCALIZATION_LOSSES = Registry("Localization_Losses")
@LOCALIZATION_LOSSES.register()
def Loss_Tem(cfg, preds, logits, labels={}, cur_epoch=0):
"""
Calculate start and end loss.
Args:
preds (dict): predicted start and end sequences.
logits (Tensor): Only for placeholders, no use.
labels (Tensor): start and end sequences label.
"""
pred_start = preds['start']
pred_end = preds['end']
gt_start = labels['supervised']['start_map']
gt_end = labels['supervised']['end_map']
label_weight = torch.ones(pred_start.shape[0], device=pred_start.device)
def bi_loss(pred_score, gt_label, label_weight):
label_weight = label_weight.unsqueeze(1).expand_as(pred_score).reshape(-1)
pred_score = pred_score.view(-1)
gt_label = gt_label.view(-1)
pmask = (gt_label > 0.5).float() * label_weight
num_entries = label_weight.sum()
num_positive = torch.sum(pmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask * label_weight
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * (1.0 - pmask) * label_weight
loss = -1 * torch.mean(loss_pos + loss_neg)
return loss
loss_start = bi_loss(pred_start, gt_start, label_weight)
loss_end = bi_loss(pred_end, gt_end, label_weight)
loss = loss_start + loss_end
return {"tem": loss}, None
@LOCALIZATION_LOSSES.register()
def Loss_BmnActionCls(cfg, preds, logits, labels={}, cur_epoch=0):
"""
Calculate action classification loss for proposals, but this donot work in epic dataset.
Args:
preds (dict): predicted action classification maps.
logits (Tensor): Only for placeholders, no use.
labels (Tensor): classification maps label.
"""
b, c, _, _ = labels['supervised']['label_map'].shape
gt_label = labels['supervised']['label_map'].flatten(2, 3)
gt_iou_map = (labels['supervised']['iou_map'] * labels['supervised']['mask']).flatten(1, 2)
verb_map = preds['verb_map'].flatten(2, 3)
noun_map = preds['noun_map'].flatten(2, 3)
select_action = gt_iou_map >= 0.75
select_action = select_action.view(-1)
gt_label = gt_label.permute(0, 2, 1).flatten(0, 1)[select_action, :]
verb_map = verb_map.permute(0, 2, 1).flatten(0, 1)[select_action, :]
noun_map = noun_map.permute(0, 2, 1).flatten(0, 1)[select_action, :]
verb_loss = F.cross_entropy(verb_map, gt_label[:, 0])
noun_loss = F.cross_entropy(noun_map, gt_label[:, 1])
return {"verb_loss": verb_loss, "noun_loss": noun_loss}, None
@LOCALIZATION_LOSSES.register()
def Loss_PemReg(cfg, preds, logits, labels={}, cur_epoch=0):
"""
Regression confidence maps.
Args:
preds (dict): predicted regression confidence maps.
logits (Tensor): Only for placeholders, no use.
labels (Tensor): iou maps for label.
"""
pred_score = preds['confidence_map'][:, 0]
gt_iou_map = labels['supervised']['iou_map']
mask = labels['supervised']['mask']
gt_iou_map = gt_iou_map * mask
u_hmask = (gt_iou_map > cfg.LOCALIZATION.POS_REG_THRES).float()
u_mmask = ((gt_iou_map <= cfg.LOCALIZATION.POS_REG_THRES) & (gt_iou_map > cfg.LOCALIZATION.NEG_REG_THRES)).float()
u_lmask = ((gt_iou_map <= cfg.LOCALIZATION.NEG_REG_THRES) & (gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
if num_m == 0:
r_m = num_h / (num_m+1)
else:
r_m = num_h / num_m
u_smmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(loss * torch.ones(*weights.shape).cuda()) / torch.sum(weights)
if torch.isnan(loss):
stop = 1
return {"pem_reg": loss}, None
@LOCALIZATION_LOSSES.register()
def Loss_PemCls(cfg, preds, logits, labels={}, cur_epoch=0):
"""
Binary classification confidence maps.
Args:
preds (dict): predicted classification confidence maps.
logits (Tensor): Only for placeholders, no use.
labels (Tensor): iou maps for label.
"""
pred_score = preds['confidence_map'][:, 1]
gt_iou_map = labels['supervised']['iou_map']
mask = labels['supervised']['mask']
gt_iou_map = gt_iou_map * mask
pmask = (gt_iou_map > cfg.LOCALIZATION.POS_CLS_THRES).float()
nmask = (gt_iou_map <= cfg.LOCALIZATION.POS_CLS_THRES).float()
nmask = nmask * mask
num_positive = torch.sum(pmask)
num_entries = num_positive + torch.sum(nmask)
if num_positive == 0:
ratio = 0.0
else:
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
if torch.isnan(loss):
stop = 1
return {"pem_cls": loss}, None
```
#### File: pytorch-video-understanding/runs/test.py
```python
import numpy as np
import os
import pickle
import torch
import json
import utils.bucket as bu
import utils.checkpoint as cu
import utils.distributed as du
import utils.logging as logging
import utils.misc as misc
from datasets.base.builder import build_loader
from models.base.builder import build_model
from utils.meters import TestMeter, EpicKitchenMeter
logger = logging.get_logger(__name__)
@torch.no_grad()
def perform_test(test_loader, model, test_meter, cfg):
"""
Perform multi-view test on the specified test set, where {cfg.TEST.NUM_ENSEMBLE_VIEWS}
clips and {cfg.TEST.NUM_SPATIAL_CROPS} crops are sampled temporally and spatially, forming
in total cfg.TEST.NUM_ENSEMBLE_VIEWS x cfg.TEST.NUM_SPATIAL_CROPS views.
The softmax scores are aggregated by summation.
The predictions are compared with the ground-truth labels and the accuracy is logged.
Args:
test_loader (loader): video testing loader.
model (model): the pretrained video model to test.
test_meter (TestMeter): testing meters to log and ensemble the testing
results.
cfg (Config): The global config object.
"""
# Enable eval mode.
model.eval()
test_meter.iter_tic()
res_dic = {}
for cur_iter, (inputs, labels, video_idx, meta) in enumerate(test_loader):
if misc.get_num_gpus(cfg):
# Transfer the data to the current GPU device.
for k, v in inputs.items():
if not isinstance(v, (torch.Tensor, list)):
continue
if isinstance(inputs[k], list):
for i in range(len(inputs[k])):
inputs[k][i] = v[i].cuda(non_blocking=True)
else:
inputs[k] = v.cuda(non_blocking=True)
# Transfer the labels to the current GPU device.
if isinstance(labels["supervised"], dict):
for k, v in labels["supervised"].items():
labels["supervised"][k] = v.cuda()
else:
labels["supervised"] = labels["supervised"].cuda()
video_idx = video_idx.cuda()
if cfg.PRETRAIN.ENABLE:
for k, v in labels["self-supervised"].items():
labels["self-supervised"][k] = v.cuda(non_blocking=True)
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
if cfg.PRETRAIN.ENABLE:
# currently supports MoSI test.
preds, _ = model(inputs)
if misc.get_num_gpus(cfg) > 1:
pred, label, video_idx = du.all_gather(
[preds["move_joint"], labels["self-supervised"]['move_joint'].reshape(preds["move_joint"].shape[0]), video_idx]
)
if misc.get_num_gpus(cfg):
pred = pred.cpu()
label = label.cpu()
video_idx = video_idx.cpu()
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(
pred.detach(), label.detach(), video_idx.detach()
)
test_meter.log_iter_stats(cur_iter)
else:
# Perform the forward pass.
preds, _ = model(inputs)
if cfg.DATA.MULTI_LABEL:
# Mainly for the EPIC-KITCHENS dataset.
if misc.get_num_gpus(cfg) > 1:
preds_verb, preds_noun, labels_verb, labels_noun, video_idx = du.all_gather(
[
preds["verb_class"],
preds["noun_class"],
labels["supervised"]["verb_class"],
labels["supervised"]["noun_class"],
video_idx
]
)
else:
preds_verb = preds["verb_class"]
preds_noun = preds["noun_class"]
labels_verb = labels["supervised"]["verb_class"]
labels_noun = labels["supervised"]["noun_class"]
if misc.get_num_gpus(cfg):
preds_verb = preds_verb.cpu()
preds_noun = preds_noun.cpu()
labels_verb = labels_verb.cpu()
labels_noun = labels_noun.cpu()
video_idx = video_idx.cpu()
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(
preds_verb.detach(),
preds_noun.detach(),
labels_verb.detach(),
labels_noun.detach(),
video_idx.detach(),
[test_loader.dataset._get_sample_info(i)["name"] for i in video_idx.tolist()] if "name" in test_loader.dataset._get_sample_info(0).keys() else []
)
test_meter.log_iter_stats(cur_iter)
else:
# Gather all the predictions across all the devices to perform ensemble.
if misc.get_num_gpus(cfg) > 1:
preds, labels_supervised, video_idx = du.all_gather(
[preds, labels["supervised"], video_idx]
)
else:
labels_supervised = labels["supervised"]
if misc.get_num_gpus(cfg):
preds = preds.cpu()
labels_supervised = labels_supervised.cpu()
video_idx = video_idx.cpu()
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(
preds.detach(), labels_supervised.detach(), video_idx.detach()
)
test_meter.log_iter_stats(cur_iter)
test_meter.iter_tic()
# save epic-kitchens statistics
if "epickitchen100" in cfg.TEST.DATASET:
if cfg.DATA.MULTI_LABEL or not hasattr(cfg.DATA, "TRAIN_VERSION"):
verb = test_meter.video_preds["verb_class"]
noun = test_meter.video_preds["noun_class"]
file_name_verb = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_verb" + f"{'_ema' if test_meter.model_ema_enabled else ''}" + ".pyth")
file_name_noun = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_noun" + f"{'_ema' if test_meter.model_ema_enabled else ''}" + ".pyth")
torch.save(verb, file_name_verb)
torch.save(noun, file_name_noun)
logger.info(
"Successfully saved verb and noun results to {} and {}.".format(file_name_verb, file_name_noun)
)
elif hasattr(cfg.DATA, "TRAIN_VERSION") and cfg.DATA.TRAIN_VERSION == "only_train_verb":
file_name = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_verb" + f"{'_ema' if test_meter.model_ema_enabled else ''}" + ".pyth")
torch.save(test_meter.video_preds, file_name)
logger.info(
"Successfully saved verb results to {}.".format(file_name)
)
elif hasattr(cfg.DATA, "TRAIN_VERSION") and cfg.DATA.TRAIN_VERSION == "only_train_noun":
file_name = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_noun" + f"{'_ema' if test_meter.model_ema_enabled else ''}" + ".pyth")
torch.save(test_meter.video_preds, file_name)
logger.info(
"Successfully saved noun results to {}.".format(file_name)
)
test_meter.finalize_metrics()
test_meter.reset()
def test(cfg):
"""
Perform multi-view testing on the pretrained video model.
Args:
cfg (Config): The global config object.
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RANDOM_SEED)
torch.manual_seed(cfg.RANDOM_SEED)
# Setup logging format.
logging.setup_logging(cfg, cfg.TEST.LOG_FILE)
# Print config.
if cfg.LOG_CONFIG_INFO:
logger.info("Test with config:")
logger.info(cfg)
# Build the video model and print model statistics.
model, model_ema = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=False)
if cfg.OSS.ENABLE:
model_bucket_name = cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
model_bucket = bu.initialize_bucket(cfg.OSS.KEY, cfg.OSS.SECRET, cfg.OSS.ENDPOINT, model_bucket_name)
else:
model_bucket = None
cu.load_test_checkpoint(cfg, model, model_ema, model_bucket)
# Create video testing loaders.
test_loader = build_loader(cfg, "test")
logger.info("Testing model for {} iterations".format(len(test_loader)))
assert (
len(test_loader.dataset)
% (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)
== 0
)
# Create meters for multi-view testing.
cfg.LOG_PERIOD = max(len(test_loader) // 10, 5)
if cfg.DATA.MULTI_LABEL or hasattr(cfg.DATA, "TRAIN_VERSION"):
test_meter = EpicKitchenMeter(
cfg,
len(test_loader.dataset)
// (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS),
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS,
cfg.VIDEO.HEAD.NUM_CLASSES,
len(test_loader),
cfg.DATA.ENSEMBLE_METHOD,
)
else:
test_meter = TestMeter(
cfg,
len(test_loader.dataset)
// (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS),
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS,
cfg.VIDEO.HEAD.NUM_CLASSES,
len(test_loader),
cfg.DATA.ENSEMBLE_METHOD,
)
# Perform multi-view test on the entire dataset.
test_meter.set_model_ema_enabled(False)
perform_test(test_loader, model, test_meter, cfg)
if model_ema is not None:
test_meter.set_model_ema_enabled(True)
perform_test(test_loader, model_ema.module, test_meter, cfg)
# upload results to bucket
if model_bucket is not None:
filename = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE)
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
result_file_name = cfg.TEST.LOG_FILE
result_file_name = result_file_name.split('.')[0] + "_res" + ".json"
filename = os.path.join(cfg.OUTPUT_DIR, result_file_name)
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
result_file_name = cfg.TEST.LOG_FILE
result_file_name = result_file_name.split('.')[0] + "_res_ema" + ".json"
filename = os.path.join(cfg.OUTPUT_DIR, result_file_name)
if os.path.exists(filename):
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
if os.path.exists(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_verb.pyth")):
filename = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_verb.pyth")
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
if os.path.exists(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_verb_ema.pyth")):
filename = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_verb_ema.pyth")
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
if os.path.exists(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_noun.pyth")):
filename = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_noun.pyth")
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
if os.path.exists(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_noun_ema.pyth")):
filename = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.LOG_FILE.split('.')[0]+"_noun_ema.pyth")
bu.put_to_bucket(
model_bucket,
cfg.OSS.CHECKPOINT_OUTPUT_PATH + 'log/',
filename,
cfg.OSS.CHECKPOINT_OUTPUT_PATH.split('/')[2]
)
# synchronize all processes on different GPUs to prevent collapsing
du.synchronize()
```
#### File: pytorch-video-understanding/utils/meters.py
```python
import datetime
import numpy as np
import os
from collections import defaultdict, deque
import torch
from utils.timer import Timer
import utils.logging as logging
import utils.metrics as metrics
import utils.misc as misc
import utils.distributed as du
logger = logging.get_logger(__name__)
class TestMeter(object):
"""
Perform the multi-view ensemble for testing: each video with an unique index
will be sampled with multiple clips, and the predictions of the clips will
be aggregated to produce the final prediction for the video.
The accuracy is calculated with the given ground truth labels.
"""
def __init__(
self,
cfg,
num_videos,
num_clips,
num_cls,
overall_iters,
ensemble_method="sum",
):
"""
Construct tensors to store the predictions and labels. Expect to get
num_clips predictions from each video, and calculate the metrics on
num_videos videos.
Args:
num_videos (int): number of videos to test.
num_clips (int): number of clips sampled from each video for
aggregating the final prediction for the video.
num_cls (int): number of classes for each prediction.
overall_iters (int): overall iterations for testing.
multi_label (bool): if True, use map as the metric.
ensemble_method (str): method to perform the ensemble, options
include "sum", and "max".
"""
self.cfg = cfg
self.iter_timer = Timer()
self.num_clips = num_clips
self.overall_iters = overall_iters
self.ensemble_method = ensemble_method
# Initialize tensors.
self.video_preds = torch.zeros((num_videos, num_cls))
self.video_labels = (
torch.zeros((num_videos)).long()
)
self.clip_count = torch.zeros((num_videos)).long()
self.clip_indices = torch.linspace(0, num_videos-1, num_videos).long()
self.model_ema_enabled = False
# Reset metric.
self.reset()
def reset(self):
"""
Reset the metric.
"""
self.clip_count.zero_()
self.video_preds.zero_()
self.video_labels.zero_()
def update_stats(self, preds, labels, clip_ids):
"""
Collect the predictions from the current batch and perform on-the-flight
summation as ensemble.
Args:
preds (tensor): predictions from the current batch. Dimension is
N x C where N is the batch size and C is the channel size
(num_cls).
labels (tensor): the corresponding labels of the current batch.
Dimension is N.
clip_ids (tensor): clip indexes of the current batch, dimension is
N.
"""
for ind in range(preds.shape[0]):
vid_id = int(clip_ids[ind]) // self.num_clips
if self.video_labels[vid_id].sum() > 0:
assert torch.equal(
self.video_labels[vid_id].type(torch.FloatTensor),
labels[ind].type(torch.FloatTensor),
)
self.video_labels[vid_id] = labels[ind]
if self.ensemble_method == "sum":
self.video_preds[vid_id] += preds[ind]
elif self.ensemble_method == "max":
self.video_preds[vid_id] = torch.max(
self.video_preds[vid_id], preds[ind]
)
else:
raise NotImplementedError(
"Ensemble Method {} is not supported".format(
self.ensemble_method
)
)
self.clip_count[vid_id] += 1
def log_iter_stats(self, cur_iter):
"""
Log the stats.
Args:
cur_iter (int): the current iteration of testing.
"""
if (cur_iter + 1) % self.cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"split": "test_iter" if not self.model_ema_enabled else "ema_test_iter",
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"time_diff": self.iter_timer.seconds(),
}
logging.log_json_stats(stats)
def iter_tic(self):
self.iter_timer.reset()
def iter_toc(self):
self.iter_timer.pause()
def finalize_metrics(self, ks=(1, 5)):
"""
Calculate and log the final ensembled metrics.
ks (tuple): list of top-k values for topk_accuracies. For example,
ks = (1, 5) correspods to top-1 and top-5 accuracy.
"""
if not all(self.clip_count == self.num_clips):
logger.warning(
"clip count {} ~= num clips {}".format(
", ".join(
[
# "{}: {}".format(i, k)
# for i, k in enumerate(self.clip_count.tolist())
"{}: {}".format(ind, self.clip_count[ind]) for idx, ind in enumerate(self.clip_indices[self.clip_count!=self.num_clips].tolist())
]
),
self.num_clips,
)
)
stats = {"split": "test_final" if not self.model_ema_enabled else "ema_test_final"}
num_topks_correct = metrics.topks_correct(
self.video_preds, self.video_labels, ks
)
topks = [
(x / self.video_preds.size(0)) * 100.0
for x in num_topks_correct
]
assert len({len(ks), len(topks)}) == 1
for k, topk in zip(ks, topks):
stats["top{}_acc".format(k)] = "{:.{prec}f}".format(
topk, prec=2
)
logging.log_json_stats(stats)
def set_model_ema_enabled(self, model_ema_enabled):
self.model_ema_enabled = model_ema_enabled
class EpicKitchenMeter(object):
"""
Perform the multi-view ensemble for testing: each video with an unique index
will be sampled with multiple clips, and the predictions of the clips will
be aggregated to produce the final prediction for the video.
The accuracy is calculated with the given ground truth labels.
For the EpicKitchenMeter specifically, it caters to the need of the EpicKitchens
dataset, where both verbs and nouns are predicted before actions are predicted using
those predictions.
"""
def __init__(
self,
cfg,
num_videos,
num_clips,
num_cls,
overall_iters,
ensemble_method="sum",
):
"""
Construct tensors to store the predictions and labels. Expect to get
num_clips predictions from each video, and calculate the metrics on
num_videos videos.
Args:
cfg (Config): the global config object.
num_videos (int): number of videos to test.
num_clips (int): number of clips sampled from each video for
aggregating the final prediction for the video.
num_cls (int): number of classes for each prediction.
overall_iters (int): overall iterations for testing.
multi_label (bool): if True, use map as the metric.
ensemble_method (str): method to perform the ensemble, options
include "sum", and "max".
"""
self.cfg = cfg
self.iter_timer = Timer()
self.num_clips = num_clips
self.num_videos = num_videos
self.overall_iters = overall_iters
self.ensemble_method = ensemble_method
assert self.ensemble_method in ["sum", "max"], f"Ensemble Method {ensemble_method} is not supported"
if cfg.DATA.MULTI_LABEL or not hasattr(cfg.DATA, "TRAIN_VERSION"):
# Initialize tensors.
self.video_preds = {
"verb_class": torch.zeros((num_videos, self.num_clips, num_cls[0])),
"noun_class": torch.zeros((num_videos, self.num_clips, num_cls[1])),
"action_class_ind_pred": torch.zeros((num_videos, self.num_clips, num_cls[0]*num_cls[1]))
}
self.video_labels = {
"verb_class": torch.zeros((num_videos)), # verb
"noun_class": torch.zeros((num_videos)), # noun
"action_class_ind_pred": torch.zeros((num_videos)),
}
self.update_stats = self.update_stats_multi_label
self.finalize_metrics = self.finalize_metrics_multi_label
elif hasattr(cfg.DATA, "TRAIN_VERSION") and cfg.DATA.TRAIN_VERSION in ["only_train_verb", "only_train_noun"]:
self.video_preds = torch.zeros((num_videos, self.num_clips, num_cls))
self.video_labels = torch.zeros((num_videos))
self.update_stats = self.update_stats_separate_label
self.finalize_metrics = self.finalize_metrics_separate_label
else: raise NotImplementedError
self.video_names = {i: "" for i in range(num_videos)}
self.clip_count = torch.zeros((num_videos)).long()
self.clip_indices = torch.linspace(0, num_videos-1, num_videos).long()
# Reset metric.
self.reset()
def reset(self):
"""
Reset the metric.
"""
self.clip_count.zero_()
if isinstance(self.video_preds, dict):
for k, v in self.video_preds.items():
v.zero_()
for k, v in self.video_labels.items():
v.zero_()
else:
self.video_preds.zero_()
self.video_labels.zero_()
def update_stats_separate_label(self, preds, labels, clip_ids):
"""
Collect the predictions from the current batch and perform on-the-flight
summation as ensemble, for separate verb and noun training.
Args:
preds (tensor): predictions from the current batch. Dimension is
N x C where N is the batch size and C is the channel size
(num_cls).
labels (tensor): the corresponding labels of the current batch.
Dimension is N.
clip_ids (tensor): clip indexes of the current batch, dimension is
N.
"""
for ind in range(preds.shape[0]):
vid_id = int(clip_ids[ind]) // self.num_clips
view_id = int(clip_ids[ind]) % self.num_clips
if self.video_labels[vid_id].sum() > 0:
assert torch.equal(
self.video_labels[vid_id].type(torch.FloatTensor),
labels[ind].type(torch.FloatTensor),
)
self.video_labels[vid_id] = labels[ind]
self.video_preds[vid_id][view_id] = preds[ind]
self.clip_count[vid_id] += 1
def update_stats_multi_label(self, preds_verb, preds_noun, labels_verb, labels_noun, clip_ids, names=[]):
"""
Collect the predictions from the current batch and perform on-the-flight
summation as ensemble, for joint verb and noun training.
Args:
preds_verb (tensor): verb predictions from the current batch. Dimension is
N x C where N is the batch size and C is the channel size
(num_cls[0]).
preds_noun (tensor): noun predictions from the current batch. Dimension is
N x C where N is the batch size and C is the channel size
(num_cls[1]).
labels_verb (tensor): the corresponding verb labels of the current batch.
Dimension is N.
labels_noun (tensor): the corresponding noun labels of the current batch.
Dimension is N.
clip_ids (tensor): clip indexes of the current batch, dimension is
N.
names (list): list of video names.
"""
for ind in range(preds_verb.shape[0]):
vid_id = int(clip_ids[ind]) // self.num_clips
view_id = int(clip_ids[ind]) % self.num_clips
if self.video_labels["verb_class"][vid_id].sum() > 0:
assert torch.equal(
self.video_labels["verb_class"][vid_id].type(torch.FloatTensor),
labels_verb[ind].type(torch.FloatTensor),
)
assert torch.equal(
self.video_labels["noun_class"][vid_id].type(torch.FloatTensor),
labels_noun[ind].type(torch.FloatTensor),
)
if len(names) > 0:
if self.video_names[vid_id] != "":
assert self.video_names[vid_id] == names[ind], \
f"For {vid_id}, its name {self.video_names[vid_id]} should be equal to {names[ind]}"
else:
self.video_names[vid_id] = names[ind]
self.video_labels["verb_class"][vid_id] = labels_verb[ind]
self.video_labels["noun_class"][vid_id] = labels_noun[ind]
self.video_labels["action_class_ind_pred"][vid_id] = labels_verb[ind] * preds_noun.shape[1] + labels_noun[ind]
self.video_preds["verb_class"][vid_id][view_id] = preds_verb[ind]
self.video_preds["noun_class"][vid_id][view_id] = preds_noun[ind]
self.video_preds["action_class_ind_pred"][vid_id][view_id] = (preds_verb[ind].unsqueeze(-1) * preds_noun[ind].unsqueeze(-2)).reshape(-1)
self.clip_count[vid_id] += 1
def log_iter_stats(self, cur_iter):
"""
Log the stats.
Args:
cur_iter (int): the current iteration of testing.
"""
if (cur_iter + 1) % self.cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"split": "test_iter" if not self.model_ema_enabled else "ema_test_iter",
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"time_diff": self.iter_timer.seconds(),
}
logging.log_json_stats(stats)
def iter_tic(self):
self.iter_timer.reset()
def iter_toc(self):
self.iter_timer.pause()
def finalize_metrics_multi_label(self, ks=(1, 5)):
"""
Calculate and log the final ensembled metrics for joint verb and
noun training.
ks (tuple): list of top-k values for topk_accuracies. For example,
ks = (1, 5) correspods to top-1 and top-5 accuracy.
"""
if not all(self.clip_count == self.num_clips):
logger.warning(
"clip count {} ~= num clips {}".format(
", ".join(
[
# "{}: {}".format(i, k)
# for i, k in enumerate(self.clip_count.tolist())
"{}: {}".format(ind, self.clip_count[ind]) for idx, ind in enumerate(self.clip_indices[self.clip_count!=self.num_clips].tolist())
]
),
self.num_clips,
)
)
stats = {"split": "test_final" if not self.model_ema_enabled else "ema_test_final"}
video_preds = {}
if self.ensemble_method == "sum":
video_preds["verb_class"] = self.video_preds["verb_class"].sum(1)
video_preds["noun_class"] = self.video_preds["noun_class"].sum(1)
video_preds["action_class_ind_pred"] = self.video_preds["action_class_ind_pred"].sum(1)
elif self.ensemble_method == "max":
video_preds["verb_class"] = self.video_preds["verb_class"].max(1)[0]
video_preds["noun_class"] = self.video_preds["noun_class"].max(1)[0]
video_preds["action_class_ind_pred"] = self.video_preds["action_class_ind_pred"].max(1)[0]
num_topks_correct, b = metrics.joint_topks_correct(
video_preds, self.video_labels, ks
)
for name, v in num_topks_correct.items():
topks = [ (x / b) * 100.0 for x in v ]
assert len({len(ks), len(topks)}) == 1
for k, topk in zip(ks, topks):
stats["top_{}_acc_{}".format(name, k)] = "{:.{prec}f}".format(
topk, prec=2
)
logging.log_json_stats(stats)
def finalize_metrics_separate_label(self, ks=(1, 5)):
"""
Calculate and log the final ensembled metrics, for separate verb
and noun training.
ks (tuple): list of top-k values for topk_accuracies. For example,
ks = (1, 5) correspods to top-1 and top-5 accuracy.
"""
if not all(self.clip_count == self.num_clips):
logger.warning(
"clip count {} ~= num clips {}".format(
", ".join(
[
"{}: {}".format(ind, self.clip_count[ind]) for idx, ind in enumerate(self.clip_indices[self.clip_count!=self.num_clips].tolist())
]
),
self.num_clips,
)
)
stats = {"split": "test_final" if not self.model_ema_enabled else "ema_test_final"}
if self.ensemble_method == "sum":
video_preds = self.video_preds.sum(1)
elif self.ensemble_method == "max":
video_preds = self.video_preds.max(1)[0]
num_topks_correct = metrics.topks_correct(
video_preds, self.video_labels, ks
)
topks = [
(x / self.video_preds.size(0)) * 100.0
for x in num_topks_correct
]
assert len({len(ks), len(topks)}) == 1
for k, topk in zip(ks, topks):
stats["top{}_acc".format(k)] = "{:.{prec}f}".format(
topk, prec=2
)
logging.log_json_stats(stats)
def set_model_ema_enabled(self, model_ema_enabled):
"""
Whether the meter logs for ema models or not.
Args:
model_ema_enabled (bool): indicator of whether ema model
is enabled.
"""
self.model_ema_enabled = model_ema_enabled
def get_video_preds(self):
"""
Returns the saved video predictions.
"""
video_preds = {}
if self.ensemble_method == "sum":
video_preds["verb_class"] = self.video_preds["verb_class"].sum(1)
video_preds["noun_class"] = self.video_preds["noun_class"].sum(1)
video_preds["action_class_ind_pred"] = self.video_preds["action_class_ind_pred"].sum(1)
elif self.ensemble_method == "max":
video_preds["verb_class"] = self.video_preds["verb_class"].max(1)[0]
video_preds["noun_class"] = self.video_preds["noun_class"].max(1)[0]
video_preds["action_class_ind_pred"] = self.video_preds["action_class_ind_pred"].max(1)[0]
return video_preds
class ScalarMeter(object):
"""
A scalar meter uses a deque to track a series of scaler values with a given
window size. It supports calculating the median and average values of the
window, and also supports calculating the global average.
"""
def __init__(self, window_size=10):
"""
Args:
window_size (int): size of the max length of the deque.
"""
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
"""
Reset the deque.
"""
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
"""
Add a new scalar value to the deque.
"""
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
"""
Calculate the current median value of the deque.
"""
return np.median(self.deque)
def get_win_avg(self):
"""
Calculate the current average value of the deque.
"""
return np.mean(self.deque)
def get_global_avg(self):
"""
Calculate the global mean value.
"""
return self.total / self.count
class TrainMeter(object):
"""
Measure training stats.
"""
def __init__(self, epoch_iters, cfg):
"""
Args:
epoch_iters (int): the overall number of iterations of one epoch.
cfg (Config): the global config object.
"""
self._cfg = cfg
self.epoch_iters = epoch_iters
self.MAX_EPOCH = cfg.OPTIMIZER.MAX_EPOCH * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.LOG_PERIOD)
self.loss_total = 0.0
self.lr = None
# Current minibatch errors (smoothed over a window).
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Number of misclassified examples.
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.opts = defaultdict(ScalarMeter)
def reset(self):
"""
Reset the Meter.
"""
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.opts = defaultdict(ScalarMeter)
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
def update_stats(self, top1_err, top5_err, loss, lr, mb_size, **kwargs):
"""
Update the current stats.
Args:
top1_err (float): top1 error rate.
top5_err (float): top5 error rate.
loss (float): loss value.
lr (float): learning rate.
mb_size (int): mini batch size.
"""
self.loss.add_value(loss)
self.lr = lr
self.loss_total += loss * mb_size
self.num_samples += mb_size
for k,v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.opts[k].add_value(v)
if not self._cfg.PRETRAIN.ENABLE and not self._cfg.LOCALIZATION.ENABLE:
# Current minibatch stats
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
# Aggregate stats
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
def update_custom_stats(self, stats):
"""
Update stats using custom keys.
Args:
stats (dict): additional stats to be updated.
"""
for k,v in stats.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.opts[k].add_value(v)
def log_iter_stats(self, cur_epoch, cur_iter):
"""
log the stats of the current iteration.
Args:
cur_epoch (int): the number of current epoch.
cur_iter (int): the number of current iteration.
"""
if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (
self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1)
)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"_type": "train_iter",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.OPTIMIZER.MAX_EPOCH),
"iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"loss": self.loss.get_win_median(),
"lr": self.lr,
# "gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
}
for k,v in self.opts.items():
stats[k] = v.get_win_median()
if not self._cfg.PRETRAIN.ENABLE and not self._cfg.LOCALIZATION.ENABLE:
stats["top1_err"] = self.mb_top1_err.get_win_median()
stats["top5_err"] = self.mb_top5_err.get_win_median()
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
eta_sec = self.iter_timer.seconds() * (
self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters
)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"_type": "train_epoch",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.OPTIMIZER.MAX_EPOCH),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"lr": self.lr,
"gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
"RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()),
}
for k,v in self.opts.items():
stats[k] = v.get_global_avg()
if not self._cfg.PRETRAIN.ENABLE:
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
avg_loss = self.loss_total / self.num_samples
stats["top1_err"] = top1_err
stats["top5_err"] = top5_err
stats["loss"] = avg_loss
logging.log_json_stats(stats)
class ValMeter(object):
"""
Measures validation stats.
"""
def __init__(self, max_iter, cfg):
"""
Args:
max_iter (int): the max number of iteration of the current epoch.
cfg (Config): the global config object.
"""
self._cfg = cfg
self.max_iter = max_iter
self.iter_timer = Timer()
# Current minibatch errors (smoothed over a window).
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Min errors (over the full val set).
self.min_top1_err = 100.0
self.min_top5_err = 100.0
# Number of misclassified examples.
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.all_preds = []
self.all_labels = []
self.model_ema_enabled = False
self.opts = defaultdict(ScalarMeter)
def reset(self):
"""
Reset the Meter.
"""
self.iter_timer.reset()
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.all_preds = []
self.all_labels = []
self.opts = defaultdict(ScalarMeter)
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
def update_stats(self, top1_err, top5_err, mb_size, **kwargs):
"""
Update the current stats.
Args:
top1_err (float): top1 error rate.
top5_err (float): top5 error rate.
mb_size (int): mini batch size.
"""
for k,v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
self.opts[k].add_value(v)
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.num_samples += mb_size
def update_custom_stats(self, stats):
"""
Update stats using custom keys.
Args:
stats (dict): additional stats to be updated.
"""
for k,v in stats.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.opts[k].add_value(v)
def update_predictions(self, preds, labels):
"""
Update predictions and labels.
Args:
preds (tensor): model output predictions.
labels (tensor): labels.
"""
# TODO: merge update_prediction with update_stats.
self.all_preds.append(preds)
self.all_labels.append(labels)
def log_iter_stats(self, cur_epoch, cur_iter):
"""
log the stats of the current iteration.
Args:
cur_epoch (int): the number of current epoch.
cur_iter (int): the number of current iteration.
"""
if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"_type": "val_iter" if not self.model_ema_enabled else "ema_val_iter",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.OPTIMIZER.MAX_EPOCH),
"iter": "{}/{}".format(cur_iter + 1, self.max_iter),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
}
for k,v in self.opts.items():
stats[k] = v.get_win_median()
stats["top1_err"] = self.mb_top1_err.get_win_median()
stats["top5_err"] = self.mb_top5_err.get_win_median()
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
stats = {
"_type": "val_epoch" if not self.model_ema_enabled else "ema_val_epoch",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.OPTIMIZER.MAX_EPOCH),
"time_diff": self.iter_timer.seconds(),
"gpu_mem": "{:.2f} GB".format(misc.gpu_mem_usage()),
"RAM": "{:.2f}/{:.2f} GB".format(*misc.cpu_mem_usage()),
}
for k,v in self.opts.items():
if "top1_err" in k or "top5_err" in k:
stats[k] = v.get_win_median()
else:
stats[k] = v.get_global_avg()
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
self.min_top1_err = min(self.min_top1_err, top1_err)
self.min_top5_err = min(self.min_top5_err, top5_err)
stats["top1_err"] = top1_err
stats["top5_err"] = top5_err
stats["min_top1_err"] = self.min_top1_err
stats["min_top5_err"] = self.min_top5_err
logging.log_json_stats(stats)
def set_model_ema_enabled(self, model_ema_enabled):
self.model_ema_enabled = model_ema_enabled
```
#### File: EssentialMC2/tools/train.py
```python
import argparse
import os
import os.path as osp
import time
from functools import partial
import torch.cuda
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from essmc2 import Config, DATASETS, MODELS, SOLVERS, get_logger
from essmc2.utils.collate import gpu_batch_collate
from essmc2.utils.distribute import init_dist, get_dist_info
from essmc2.utils.ext_module import import_ext_module
from essmc2.utils.file_systems import FS, LocalFs
from essmc2.utils.logger import init_logger
from essmc2.utils.random import set_random_seed
from essmc2.utils.sampler import MultiFoldDistributedSampler
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, type=str)
parser.add_argument("--work_dir", default="./work_dir", type=str)
parser.add_argument("--seed", default=123, type=int)
parser.add_argument("--resume_from", type=str)
parser.add_argument("--data_root_dir", type=str)
parser.add_argument("--annotation_dir", type=str)
parser.add_argument("--backbone_pretrain", type=str)
parser.add_argument("--dist_launcher", type=str, help="""
Distribute Launcher, etc
pytorch(use pytorch torch.distributed.launch),
pai(Platform of Artificial Intelligence in Alibaba),
slurm,
...
""")
parser.add_argument("--dist_backend", default="nccl", type=str, help="""
Distribute backend, etc
nccl(default),
gloo,
accl(Powered by pai and ais),
...
""")
parser.add_argument("--local_rank", default=-1, type=int, help="""
Argument for command torch.distributed.launch or other distribute systems.
""")
parser.add_argument("--ext_module", default="", type=str, help="""
Extension module path to be imported for inside custom modules.
""")
parser.add_argument("--user_parameters", type=str, help="""
User script to hack cfg. Make it simpler in a string, such as 'cfg.a=100;cfg.b=200;'
""")
return parser.parse_args()
def get_model(cfg, logger):
model = MODELS.build(cfg.model)
if cfg.dist.distributed and cfg.dist.get("sync_bn") is True:
logger.info("Convert BatchNorm to Synchronized BatchNorm...")
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.cuda()
if cfg.dist.distributed:
model = DistributedDataParallel(model,
device_ids=[torch.cuda.current_device()])
else:
model = DataParallel(model)
return model
def get_data(cfg, logger):
use_gpu_preprocess = False
rank, world_size = get_dist_info()
if cfg.dist.distributed and cfg.dist["dist_launcher"] == "pytorch":
# when use single-machine, multi cards,
# check TensorToGPU operation on CORRECT card
pipeline = cfg.data['train']['dataset']['pipeline']
for p in pipeline:
if p["type"] == "TensorToGPU":
p["device_id"] = rank
use_gpu_preprocess = True
if "eval" in cfg.data:
eval_pipeline = cfg.data["eval"]["dataset"]["pipeline"]
for p in eval_pipeline:
if p["type"] == "TensorToGPU":
p["device_id"] = rank
train_dataset = DATASETS.build(cfg.data['train']['dataset'], fs_cfg=cfg.get("file_systems"))
logger.info(f"Built train dataset {train_dataset}")
if "eval" in cfg.data:
eval_dataset = DATASETS.build(cfg.data["eval"]["dataset"], fs_cfg=cfg.get("file_systems"))
logger.info(f"Built eval dataset {eval_dataset}")
else:
eval_dataset = None
# Load Dataloader
pin_memory = cfg.data.get("pin_memory") or False
if cfg.dist.distributed:
if (cfg.data["train"].get("num_folds") or 1) > 1:
num_folds = cfg.data["train"].get("num_folds")
train_sampler = MultiFoldDistributedSampler(train_dataset, num_folds, world_size, rank,
shuffle=True)
else:
train_sampler = DistributedSampler(train_dataset, world_size, rank, shuffle=True)
collate_fn = partial(gpu_batch_collate, device_id=rank) \
if cfg.dist["dist_launcher"] == "pytorch" and use_gpu_preprocess else None
train_dataloader = DataLoader(
train_dataset,
batch_size=cfg.data['train']['samples_per_gpu'],
shuffle=False,
sampler=train_sampler,
num_workers=cfg.data['train']['workers_per_gpu'],
pin_memory=pin_memory,
drop_last=True,
collate_fn=collate_fn
)
else:
train_dataloader = DataLoader(
train_dataset,
batch_size=cfg.data['train']['samples_per_gpu'],
shuffle=True,
sampler=None,
num_workers=cfg.data['train']['workers_per_gpu'],
pin_memory=pin_memory,
)
logger.info(f"Built train dataloader {len(train_dataloader)}")
if eval_dataset is not None:
eval_dataloader = DataLoader(
eval_dataset,
batch_size=cfg.data['eval']['samples_per_gpu'],
shuffle=False,
sampler=None,
num_workers=cfg.data['eval']['workers_per_gpu'],
pin_memory=pin_memory,
drop_last=False
)
logger.info(f"Built eval dataloader {len(eval_dataloader)}")
else:
eval_dataloader = None
data = dict(train=train_dataloader)
if eval_dataloader:
data["val"] = eval_dataloader
return data
def main():
args = parse_args()
# Load extension modules
if args.ext_module is not None:
import_ext_module(args.ext_module)
# Load config file
cfg = Config.load_file(args.config)
# Load user script to modify cfg
if args.user_parameters is not None:
try:
exec(args.user_parameters)
except Exception as e:
raise Exception(f"Invoke {args.user_parameters} failed for Reason: {e}")
# ------ Change config by args and specify task ----- #
# # Change distribute config
if cfg.get("dist") is None:
cfg.dist = dict(distributed=False)
if args.dist_launcher:
assert args.dist_launcher in ("pytorch", "pai", "slurm")
cfg.dist["dist_launcher"] = args.dist_launcher
cfg.dist["dist_backend"] = args.dist_backend
cfg.dist.distributed = True
# # Change work directory
work_dir = args.work_dir
config_name = osp.splitext(osp.basename(args.config))[0]
work_dir = osp.join(work_dir, config_name)
cfg.solver["work_dir"] = work_dir
# # Seed
if args.seed is not None:
cfg.seed = args.seed
# # Model
if args.backbone_pretrain is not None:
cfg.model["use_pretrain"] = True
cfg.model["load_from"] = args.backbone_pretrain
# # Datasets
if args.data_root_dir is not None:
data_root_dir = args.data_root_dir
annotation_dir = args.annotation_dir
cfg.data["train"]["dataset"]["data_root_dir"] = data_root_dir
cfg.data["train"]["dataset"]["annotation_dir"] = annotation_dir
if "eval" in cfg.data:
cfg.data["eval"]["dataset"]["data_root_dir"] = data_root_dir
cfg.data["eval"]["dataset"]["annotation_dir"] = annotation_dir
if "test" in cfg.data:
cfg.data["test"]["dataset"]["data_root_dir"] = data_root_dir
cfg.data["test"]["dataset"]["annotation_dir"] = annotation_dir
# # Resume
if args.resume_from is not None:
cfg.solver["resume_from"] = args.resume_from
# ------ Done Change config by args and specify task ----- #
# Configure file system client
FS.init_fs_client(cfg.get("file_systems"))
# Configure distribute environment
if cfg.dist.get("dist_launcher") is not None:
init_dist(backend=cfg.dist["dist_backend"], launcher=args.dist_launcher)
rank, world_size = get_dist_info()
# Prepare work directory
work_fs_client = FS.get_fs_client(work_dir)
if type(work_fs_client) is LocalFs:
if not osp.exists(work_dir) and rank == 0:
os.makedirs(work_dir, exist_ok=True)
else:
local_work_dir = osp.join("./", args.work_dir, config_name)
os.makedirs(local_work_dir, exist_ok=True)
work_fs_client.add_target_local_map(work_dir, local_work_dir)
# Configure logger
run_id = int(time.time())
log_file = os.path.join(work_dir, f"{run_id}.log")
logger = get_logger()
init_logger(logger, log_file, args.dist_launcher)
logger.info(f"Running task with work directory: {work_dir}")
logger.info(f"Running task with config: \n{cfg}")
# Set torch constant
random_seed = cfg.get("seed")
if random_seed is not None:
logger.info(f"Set random seed to {random_seed}")
set_random_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.multiprocessing.set_start_method('spawn')
# Load Model
logger.info("Building model...")
model = get_model(cfg, logger)
logger.info(f"Built model: \n{model}")
# Load Dataset
logger.info(f"Building dataset...")
data = get_data(cfg, logger)
logger.info(f"Built dataset: \n{data}")
# Load Solver
logger.info("Building solver...")
solver = SOLVERS.build(model, cfg.solver, logger=logger)
logger.info(f"Built solver: {solver}")
# Save config
if rank == 0:
config_path = osp.join(work_dir, "final_" + osp.basename(args.config))
local_config_path = work_fs_client.convert_to_local_path(config_path)
cfg.dump(local_config_path)
work_fs_client.put_object_from_local_file(local_config_path, config_path)
# Begin solve
solver.solve(data)
logger.info(f"Solved")
if __name__ == "__main__":
main()
``` |
{
"source": "jiangzhiwei2018/Pytorch_CapsNet",
"score": 2
} |
#### File: Pytorch_CapsNet/model_src/model_main.py
```python
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader, Dataset
from model_src import bease_capsuleNet
from load_data_src import load_dataset_main
import os
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
class MainModule(nn.Module):
def __init__(self, data_dir, dataset_name, bs=32, num_epochs=10, save_dir="./checkpoint", num_classes=10,
**kwargs):
super(MainModule, self).__init__()
self.num_epochs = num_epochs
assert dataset_name in ["MNIST", "CIFAR", "ImageNet"]
dataset_dict = {
"MNIST": {"dataset": load_dataset_main.MNISTDataset, "in_channel": 1},
"CIFAR": {"dataset": load_dataset_main.CIFARDataset, "in_channel": 3},
"ImageNet": {"dataset": load_dataset_main.ImageNetDataset, "in_channel": 3}
}
dataset = dataset_dict[dataset_name]
self.train_dl = DataLoader(dataset["dataset"](data_dir=data_dir, train=True, num_classes=num_classes, **kwargs),
shuffle=True,
batch_size=bs)
self.test_dl = DataLoader(dataset["dataset"](data_dir=data_dir, train=False, num_classes=num_classes, **kwargs),
shuffle=True,
batch_size=bs)
self.net = bease_capsuleNet.BaseCapsuleNet(num_classes=num_classes,
in_channel=dataset["in_channel"]).to(device=device)
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=0.001)
self.loss_func = bease_capsuleNet.Margin_loss()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.save_pth = os.path.join(save_dir, f"{dataset_name}_{num_classes}_dataset_final_model.pt")
self.model_fit()
def model_fit(self):
"""
:return:
"""
def eval_func(net, data_ld):
all_loss = 0
cnt = 0
all_acc = 0
for idx, (batch_x, batch_y_one_hot, batch_y_label) in enumerate(data_ld):
batch_x, batch_y_one_hot, batch_y_label = \
batch_x.to(device), batch_y_one_hot.to(device), \
batch_y_label.cpu().detach().numpy()
b_h, v_h = net(batch_x)
v_h = v_h[..., -1]
# reg_pre_out = torch.abs(reg_pre_out)
# clf_loss = self.multi_label_loss(clf_pre_out, batch_y_encode)
loss = self.loss_func(v_h, batch_y_one_hot)
# reg_loss = self.mse_loss(reg_pre_out, batch_y)
all_loss += float(loss.data)
# all_reg_loss += float(reg_loss.data)
pred_labels = self.predict(batch_x).cpu().detach().numpy()
acc = cal_acc(pred_labels, batch_y_label)
all_acc += acc
cnt = idx + 1
return all_loss / cnt, all_acc / cnt
min_loss = float("inf")
all_cnt = 0
for epoch in range(self.num_epochs):
for train_idx, (train_batch_x, train_batch_y_one_hot, train_batch_y_label) in enumerate(self.train_dl):
self.net.train()
print(train_batch_x.size())
train_batch_x, train_batch_y_one_hot, train_batch_y_label = \
train_batch_x.to(device), train_batch_y_one_hot.to(device), \
train_batch_y_label.cpu().detach().numpy()
train_b_h, train_v_h = self.net(train_batch_x)
train_v_h = train_v_h[..., -1]
train_loss = self.loss_func(train_v_h, train_batch_y_one_hot)
self.optimizer.zero_grad()
train_loss.backward()
self.optimizer.step()
self.net.eval()
# print(f"epoch={epoch}, train_loss={train_loss.data}")
# all_cnt = all_cnt + 1
# if all_cnt % 10 != 0:
# continue
torch.save(self.net.to(device=torch.device("cpu")), self.save_pth)
self.net.to(device=device)
with torch.no_grad():
test_loss, test_acc = eval_func(self.net, self.test_dl)
print(f"test_loss:{test_loss}, test_acc={test_acc}")
def predict(self, inx):
"""
:return:
"""
inx = inx.to(device=device)
self.net = self.net.to(device=device)
b_h, v_h = self.net(inx)
v = v_h[..., -1]
v = bease_capsuleNet.cal_normal(v, dim=-1, keepdim=False)
soft_max = F.softmax(v, dim=-1)
arg_max = torch.argmax(soft_max, dim=-1, keepdim=False)
return arg_max
def cal_acc(pred, real_label):
"""
:param pred:
:param real_label:
:return:
"""
return np.equal(pred, real_label).mean()
``` |
{
"source": "jiangzhongkai/Python_Project",
"score": 4
} |
#### File: jiangzhongkai/Python_Project/matmult.py
```python
"""
desc:
main function:Matrix multiplication
input:two matrixs
output:result
"""
def str_list(input_Str):
"""convert string to list"""
temp=input_Str.split(' ') #define temp list to save the result
for i in range(len(temp)):
temp[i]=float(temp[i])
return temp
def get_Row_Col(input_Str):
"""get the rows and columns of matrix"""
rows,columns=input_Str.split(' ')
return int(rows),int(columns)
if __name__=="__main__":
"""step1:get the rows and columns"""
import sys
commandLine=sys.stdin.readlines()
row_1,col_1=get_Row_Col(commandLine[0].strip('\n'))
"""step2:get the matrix value"""
temp_a_list=[] #save value
temp_b_list=[]
# temp_row_1=row_1
for i in range(row_1):
temp_a_list.append(str_list(commandLine[i+1].strip('\n')))
row_2,col_2=get_Row_Col(commandLine[row_1+1].strip('\n'))
for i in range(row_2):
temp_b_list.append(str_list(commandLine[i+row_1+2].strip('\n')))
if col_1!=row_2:
print("invalid input")
# exit()
else:
mat_c=[[0]*col_2 for i in range(len(temp_a_list))]
for i in range(row_1):
for j in range(col_2):
for k in range(len(temp_b_list)):
mat_c[i][j]+=temp_a_list[i][k]*temp_b_list[k][j]
"""step3:output the final result"""
print(mat_c[i][j])
``` |
{
"source": "jiangzhongkai/TimeSeries_Attention",
"score": 3
} |
#### File: jiangzhongkai/TimeSeries_Attention/mlt.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
#TODO:主要是画图代码
#箱型线图
def plot_boxplot(xlabel,ylabel,title,xticklabels):
fig = plt.figure(figsize=(9,7)) # 创建画布
ax = plt.subplot() # 创建作图区域
plt.title(title,fontsize=14)
f=ax.boxplot([[0.0026,0.00087,0.00075], [0.00234,0.00075,0.00128], [0.00105,0.00075]],patch_artist=False,boxprops={'color':'green'}) # 设置最大值不超过95分位点;最小值不小于5%分位点。
#箱体内部填充颜色
# for box in f['boxes']:
# box.set(facecolor='red')
#设置横坐标的刻度值
ax.set_xticklabels(xticklabels,fontsize=12)
for cap in f['caps']:
cap.set(color='blue',linewidth=2)
for median in f['medians']:
median.set(color='red',linewidth=2)
for whisker in f['whiskers']:
whisker.set(color='#CA6F1E',linewidth=2)
ax.set_alpha(alpha=0.2)
plt.grid(True,linestyle='--',color='lightgrey',alpha=0.8)
plt.yticks(fontsize=12)
plt.ylabel(ylabel,fontsize=14)
# plt.xlabel(xlabel,fontsize=14)
plt.savefig(title+'_'+xlabel+'.pdf')
plt.show()
def attention_plot():
"""仿照nlp中的权中进行简单查看"""
pass
def plot_figure(data,save_name,drop_cols):
"""
:param data:
:return:
"""
data = pd.read_csv(data)
for col in drop_cols:
data.pop(col)
values = data.values[:1500]
size=len(data.columns)
group=[i for i in range(1,size,1)]
i = 1
plt.figure(figsize=(10,10))
for g in group:
plt.subplot(len(group), 1, i)
plt.plot(values[:,g],'r')
plt.xlabel("Date")
plt.title(data.columns[g], y=0.5, loc="right")
i += 1
plt.savefig(save_name)
plt.show()
if __name__=='__main__':
# plot_boxplot(xlabel='Time Steps',ylabel='RMSE',title='SML2010 Dataset',xticklabels=['learning rate','timesteps','hidden units'])
plot_boxplot(xlabel='Time Steps', ylabel='MSE', title='SML2010 Dataset',
xticklabels=['learning rate', 'timesteps', 'hidden units'])
# plot_figure("data/NEW-DATA-1.csv",save_name='state.pdf',drop_cols=["1:Date",
# "2:Time",
# "19:Exterior_Entalpic_1",
# "20:Exterior_Entalpic_2",
# "21:Exterior_Entalpic_turbo",
# "24:Day_Of_Week"])
``` |
{
"source": "jiangzhongshi/acorns-benchmark",
"score": 3
} |
#### File: tests/complex/graph_file_sizes.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import math
import re
fontsize = 30
num_params = [78, 465, 465, 1830, 5565]
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def convert_split_size_to_number_of_files(split_sizes, num_params):
num_files = []
for split in split_sizes:
print(f'split={split}')
num_file = math.ceil(float(num_params) / float(split))
print(f'num_file={num_file}')
num_files.append(num_file)
return num_files
def convert_files_to_lists(file_location):
o_sizes = {}
functions = []
split_set = set()
with open(file_location) as json_data:
data = json.load(json_data)
for key in sorted(data):
o_sizes[key] = []
functions.append(key)
for split in sorted(data[key], key=natural_keys):
if '.o' in data[key][split]:
o_size = float(data[key][split]['.o'] / 1e+6)
split_set.add(int(split))
print("Key: {}, Split: {}, Data: {} ".format(
key, split, data[key][split]))
# print("C Size is {} GB".format(c_size))
# print("O Size is {} GB".format(o_size))
o_sizes[key].append(o_size)
print(split_set)
split_list = list(sorted(split_set))
return o_sizes, functions, split_list
def generate_two_graph(avg_us, denom, function, suffix="", ymin=1.e+00, ymax=1.e+02):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.plot(denom, avg_us, color='#1abc9c', linestyle='dashed', markersize=7)
ax.set_ylim([ymin, ymax])
plt.setp(ax.get_xticklabels(), fontsize=20)
plt.setp(ax.get_yticklabels(), fontsize=20)
ax.set_yscale('log')
ax.margins(0, 0)
plt.savefig('./tests/complex/graphs/sizes/{}-{}.pdf'.format(function, suffix), bbox_inches='tight',
pad_inches=0)
plt.clf()
o_sizes, functions, split_list, = convert_files_to_lists(
"./tests/complex/data/sizes/file_sizes.json")
print(f'o sizes: {o_sizes}')
print(f'functions: {functions}')
for i, function in enumerate(functions):
num_files = convert_split_size_to_number_of_files(
split_list, num_params[i])
print('{}: \n O Sizes: {}\n Num Files {}'.format(
function, o_sizes[function], num_files))
generate_two_graph(o_sizes[function], num_files,
function, suffix="O", ymin=1.e-01, ymax=1.e+02)
# generate_full_graph_without_dynamic(us_times[label], pytorch_times[label], wenzel_static_times[label], enoki_times[label], tapenade_times[label], num_params, label, 'Wenzel', i)
```
#### File: tests/complex/graph_runs.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import re
import math
fontsize = 30
num_params = [78, 465, 1830]
def convert_split_size_to_number_of_files(split_sizes, num_params):
num_files = []
for split in split_sizes:
num_file = math.ceil(float(num_params) / float(split))
num_files.append(num_file)
return num_files
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def convert_files_to_lists(file_location):
runtimes = {}
compile_times = {}
functions = []
split_set = set()
with open(file_location) as json_data:
data = json.load(json_data)
for key in sorted(data):
runtimes[key] = []
compile_times[key] = []
functions.append(key)
for split in sorted(data[key], key=natural_keys):
split_set.add(int(split))
print("Key: {}, Split: {}, Data: {}".format(
key, split, data[key][split]))
runtimes[key].append(data[key][split]['us'])
print(split_set)
split_list = list(sorted(split_set))
return runtimes, functions, split_list
def generate_two_graph(avg_us, denom, function, suffix=""):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
plt.plot(denom, avg_us, color='#1abc9c', linestyle='dashed', markersize=7)
plt.ylim(1.e-05, 1.e-01)
plt.xlim(denom[-1], denom[0])
print(plt.xlim)
plt.setp(ax.get_xticklabels(), fontsize=20)
plt.setp(ax.get_yticklabels(), fontsize=20)
plt.yscale('log')
plt.margins(0, 0)
plt.savefig('./tests/complex/graphs/runs/{}-{}.pdf'.format(function, suffix), bbox_inches='tight',
pad_inches=0)
plt.clf()
runtimes, functions, split_list, = convert_files_to_lists(
"./tests/complex/data/runs/test_data.json")
for i, function in enumerate(functions):
num_files = convert_split_size_to_number_of_files(
split_list, num_params[i])
print(function, num_files)
generate_two_graph(runtimes[function],
num_files, function, suffix="Run Times")
```
#### File: tests/graph/parallel_graphs.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import re
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def convert_files_to_lists(file_location):
our_times = []
with open(file_location) as json_data:
data = json.load(json_data)
for i, key in enumerate(sorted(data)):
for num_cores in sorted(data[key], key=natural_keys):
our_times.append(data[key][num_cores]['us'])
return our_times
def get_speedup_list(time_list):
speedup_list = []
single_thread_time = time_list[0]
for time in time_list[1:]:
speedup_list.append(float(single_thread_time) / float(time))
return speedup_list
def generate_two_graph(avg_us, denom, suffix="", ylabel="Time (s)"):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.plot(denom, avg_us, color='#130f40', markersize=7, linewidth=5)
plt.margins(0, 0)
plt.xlim(1, 47)
plt.setp(ax.get_xticklabels(), fontsize=20)
plt.setp(ax.get_yticklabels(), fontsize=20)
plt.savefig('./tests/results/hess/graphs/parallel/parallel-graph{}.pdf'.format(suffix), bbox_inches='tight',
pad_inches=0)
# plt.savefig('./tests/complex/graphs/graph_by_128_speedup.pdf')
plt.clf()
our_times = convert_files_to_lists(
"./tests/results/grad/json/parallel/parallel_results_good.json")
print(our_times)
generate_two_graph(our_times, range(1, 48))
speedup_list = get_speedup_list(our_times)
generate_two_graph(speedup_list, range(1, 47), suffix="-speedup",
ylabel="Speedup (Time Single Thread / Time X Threads)")
```
#### File: tests/old_tests/hessian_graphs.py
```python
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import seaborn as sns
import re
sns.set(style="darkgrid")
num_params_list = [10, 2010, 4010, 6010, 8010, 10010, 20010, 30010, 40010]
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def convert_files_to_lists(file_location):
wenzel_times_grad_static = {}
wenzel_times_grad_dynamic = {}
wenzel_times_hess_static = {}
wenzel_times_hess_dynamic = {}
us_times_grad = {}
us_times_hess = {}
functions = []
wenzel_grad_static_max = []
wenzel_grad_dynamic_max = []
wenzel_hess_static_max = []
wenzel_hess_dynamic_max = []
us_max_grad = []
us_max_hess = []
with open(file_location) as json_data:
data = json.load(json_data)
for i, key in enumerate(sorted(data)):
wenzel_times_grad_static[key] = []
wenzel_times_grad_dynamic[key] = []
wenzel_times_hess_static[key] = []
wenzel_times_hess_dynamic[key] = []
us_times_grad[key] = []
us_times_hess[key] = []
functions.append(key)
for num_params in num_params_list:
num_params_str = str(num_params)
wenzel_times_grad_static[key].append(data[key][num_params_str]['wenzel_grad_static'])
wenzel_times_grad_dynamic[key].append(data[key][num_params_str]['wenzel_grad_dynamic'])
wenzel_times_hess_static[key].append(data[key][num_params_str]['wenzel_hess_static'])
wenzel_times_hess_dynamic[key].append(data[key][num_params_str]['wenzel_hess_dynamic'])
us_times_grad[key].append(data[key][num_params_str]['us_grad'])
us_times_hess[key].append(data[key][num_params_str]['us_hessian'])
wenzel_grad_static_max.append(wenzel_times_grad_static[key][-1])
wenzel_grad_dynamic_max.append(wenzel_times_grad_dynamic[key][-1])
wenzel_hess_static_max.append(wenzel_times_hess_static[key][-1])
wenzel_hess_dynamic_max.append(wenzel_times_hess_dynamic[key][-1])
us_max_grad.append(us_times_grad[key][-1])
us_max_hess.append(us_times_hess[key][-1])
return wenzel_times_grad_static, wenzel_times_grad_dynamic, wenzel_times_hess_static, \
wenzel_times_hess_dynamic, us_times_grad, us_times_hess, \
functions, num_params_list, wenzel_grad_static_max, wenzel_grad_dynamic_max, \
wenzel_hess_static_max, wenzel_hess_dynamic_max, us_max_grad, us_max_hess
def generate_two_graph(avg_us, avg_them, denom, function, label, num_vars):
plt.plot(denom, avg_us, color='#1abc9c', linestyle='dashed', markersize=7)
plt.plot(denom, avg_them, color='#f1c40f', linestyle='dashed', markersize=7)
# legend
plt.xlabel('Parameters', fontfamily='monospace')
plt.ylabel('Time (s)', fontfamily='monospace')
plt.legend( ('Us', label),
shadow=False, fontsize=10, frameon=False)
plt.margins(0,0)
plt.savefig('./tests/results/hess/graphs/graph_{}_{}.pdf'.format(label, num_vars), bbox_inches = 'tight',
pad_inches = 0)
# plt.savefig('./tests/complex/graphs/graph_by_128_speedup.pdf')
plt.clf()
def generate_three_graph(avg_us, avg_them_static, avg_them_dynamic, denom, num_vars):
# print("Wenzel Static: {}".format(avg_them_static))
# print("Wenzel Dynamic: {}".format(avg_them_dynamic))
fig = plt.figure(figsize=(20, 5))
ax = fig.add_subplot(1, 1, 1)
ax.plot(denom, avg_us, color='#1abc9c', linestyle='dashed', markersize=7)
ax.plot(denom, avg_them_static, color='#f1c40f', linestyle='dashed', markersize=7)
ax.plot(denom, avg_them_dynamic, color='#3498db', linestyle='dashed', markersize=7)
ax.set_yscale('log')
# legend
plt.xlabel('Parameters', fontfamily='monospace')
plt.ylabel('Time (s)', fontfamily='monospace')
plt.legend( ('Ours', 'Mitsuba (Static)', 'Mitsuba (Dynamic)'),
shadow=False, fontsize=10, frameon=False)
plt.margins(0,0)
plt.savefig('./tests/results/hess/graphs/full/graph_{}_static_and_dynamic.pdf'.format(num_vars), bbox_inches = 'tight',
pad_inches = 0)
plt.clf()
def generate_full_graph(avg_us_grad, avg_wenzel_grad_static, avg_wenzel_grad_dynamic, avg_us_hess, avg_wenzel_hess_static, avg_wenzel_hess_dynamic, denom, function, label, num_vars):
plt.plot(denom, avg_us_grad, color='#1abc9c', linestyle='dashed', markersize=7)
plt.plot(denom, avg_wenzel_grad_static, color='#f1c40f', linestyle='dashed', markersize=7)
plt.plot(denom, avg_wenzel_grad_dynamic, color='#3498db', linestyle='dashed', markersize=7)
plt.plot(denom, avg_us_hess, color='#34495e', linestyle='dashed', markersize=7)
plt.plot(denom, avg_wenzel_hess_static, color='#bdc3c7', linestyle='dashed', markersize=7)
plt.plot(denom, avg_wenzel_hess_dynamic, color='#e74c3c', linestyle='dashed', markersize=7)
# legend
plt.xlabel('Parameters', fontfamily='monospace')
plt.ylabel('Time (s)', fontfamily='monospace')
plt.legend( ('Us Grad', 'Mitsuba Grad (Static)', 'Mitsuba Grad (Dynamic)', 'Us Hess', 'Mitsuba Hess (Static)', 'Mitsuba Hess (Dynamic)'),
shadow=False, fontsize=10, frameon=False)
plt.margins(0,0)
plt.savefig('./tests/results/hess/graphs/graph_{}_full.pdf'.format(num_vars), bbox_inches = 'tight',
pad_inches = 0)
plt.clf()
def generate_max_graph(max_us_hess, max_wenzel_hess_static, max_wenzel_hess_dynamic, denom):
fig = plt.figure(figsize=(20, 5))
ax = fig.add_subplot(1, 1, 1)
ax.plot(denom, max_us_hess, color='#1abc9c', linestyle='dashed', markersize=7)
ax.plot(denom, max_wenzel_hess_static, color='#f1c40f', linestyle='dashed', markersize=7)
ax.plot(denom, max_wenzel_hess_dynamic, color='#3498db', linestyle='dashed', markersize=7)
ax.set_yscale('log')
# legend
plt.xlabel('Variables', fontfamily='monospace')
plt.ylabel('Time (s)', fontfamily='monospace')
plt.legend( ('Ours', 'Mitsuba (Static)', 'Mitsuba (Dynamic)'),
shadow=False, fontsize=10, frameon=False)
plt.margins(0,0)
plt.savefig('./tests/results/hess/graphs/max/graph_max.pdf', bbox_inches = 'tight',
pad_inches = 0)
plt.clf()
wenzel_times_grad_static, wenzel_times_grad_dynamic, wenzel_times_hess_static, \
wenzel_times_hess_dynamic, us_times_grad, us_times_hess, functions, num_params, \
wenzel_grad_static_max, wenzel_grad_dynamic_max, wenzel_hess_static_max, \
wenzel_hess_dynamic_max, us_max_grad, us_max_hess = convert_files_to_lists("./tests/results/hess/json/full_results_hessian-gcc49.json")
for i, label in enumerate(functions):
# print(wenzel_times_hess_static[label])
generate_three_graph(us_times_hess[label], wenzel_times_hess_static[label], wenzel_times_hess_dynamic[label], num_params, i)
print("Us: {}\n Wenzel Static: {}\n Wenzel Dynamic: {}".format(us_max_hess, wenzel_hess_static_max, wenzel_hess_dynamic_max))
generate_max_graph(us_max_hess, wenzel_hess_static_max, wenzel_hess_dynamic_max, range(1, 20))
```
#### File: tests/python_test_utils/tapenade_utils.py
```python
import os
import sys
import general_utils
import os
from subprocess import PIPE, run
def generate_function_c_file(func_num, functions, input_filename):
f = open(input_filename, 'w')
signature = ""
function = functions[func_num]
signature += "double function_" + str(func_num) + "("
for j in range(len(function[1])):
var = function[1][j]
signature += "double " + var
if j == len(function[1]) - 1:
signature += ")\n"
else:
signature += ", "
body = "{"
body += "\ndouble p = " + function[0] + ";"
body += "\n\treturn p;"
body += "\n}"
output = signature + body
f.write(output)
f.close()
def generate_derivatives_c_file(func_num):
cmd = "./tests/utils/ext/tapenade/bin/tapenade ./tests/utils/tapenade_func.c -head function_{} -reverse -output \"./tests/utils/tapenade_ders\"".format(func_num)
os.system(cmd)
with open("./tapenade_ders_b.c") as file:
c_code = file.read()
c_code = c_code.replace("#include <adBuffer.h>", "")
output_file = open('./tests/utils/tapenade_ders.c', "w+")
output_file.write(c_code)
output_file.close()
def generate_hessian_c_file(func_num):
cmd = "./tests/utils/ext/tapenade/bin/tapenade ./tests/utils/tapenade_func.c -head function_{} -tangent -output \"./tests/utils/tapenade_grad\"".format(func_num)
os.system(cmd)
with open("./tapenade_grad_d.c") as file:
c_code = file.read()
c_code = c_code.replace("#include <adBuffer.h>", "")
output_file = open('./tests/utils/tapenade_grad.c', "w+")
output_file.write(c_code)
output_file.close()
cmd = "./tests/utils/ext/tapenade/bin/tapenade ./tests/utils/tapenade_grad.c -head function_{}_b -tangent -output \"./tests/utils/tapenade_hess\"".format(func_num)
os.system(cmd)
with open("./tapenade_hess_d.c") as file:
c_code = file.read()
c_code = c_code.replace("#include <adBuffer.h>", "")
output_file = open('./tests/utils/tapenade_hess.c', "w+")
output_file.write(c_code)
output_file.close()
def generate_runnable_tapenade_hess(vars, num_vars, func_num):
dd_vars = generate_dd_vars(vars, num_vars)
vals_string = generate_vals_hess(vars, num_vars)
ders_string = generate_vars_from_params_string(vars, num_vars, func_num)
tapenade_der = None
with open("./tests/utils/tapenade_hess.c") as file:
tapenade_der = file.read()
with open("./tests/utils/static_code/runnable_tapenade_hess.txt") as file:
tapenade = file.read()
print(tapenade_der)
print(tapenade)
c_code = tapenade.format(tapenade_der, num_vars, num_vars, dd_vars, vals_string, ders_string)
output_file = open('./tests/utils/runnable_tapenade_hess.c', "w+")
output_file.write(c_code)
output_file.close()
def generate_dd_vars(vars, num_vars):
dd_vars_string = ""
for var in vars:
dd_vars_string += "double {}dd = 0;\n\t".format(var)
return dd_vars_string
def generate_hess_function_call(vars, num_vars, func_num):
function_call_str = "function_{}_d_d(".format(func_num)
for i, var in enumerate(vars):
function_call_str += "{}, ders_flags[second_der][{}], ders_flags[first_der][{}], {}dd,".format(var, i, i, var)
function_call_str += "&function_0_d, &function_0d, &function_0_d)"
return function_call_str
def generate_vals_hess(vars, num_vars):
vals_string = ""
for i, var in enumerate(vars):
vals_string += "\t\tdouble {} = values[i * {} + {}];\n".format(var, num_vars, i)
return vals_string
def generate_vars_from_params_string(vars, num_vars, func_num):
ders_string = ""
function_call = generate_hess_function_call(vars, num_vars, func_num)
ders_string += "\t\tdouble output = {};\n".format(function_call)
return ders_string
def generate_derivative_string(vars, num_vars, func_num):
der_string = ""
for i, var in enumerate(vars):
der_string += "\t\tdouble {} = values[i * {} + {}];\n".format(var, num_vars, i)
der_string += "\t\tdouble {}b = 0;\n".format(var)
der_string += "\t\tdouble function_{}b = 1;\n".format(func_num)
der_string += "\t\tfunction_{}_b(".format(func_num)
for var in vars:
der_string += "{}, &{}b, ".format(var, var)
der_string += "function_{}b);\n".format(func_num)
for i, var in enumerate(vars):
der_string += "\t\tders[i * {} + {}] = {}b;\n".format(num_vars, i, var)
return der_string
def generate_runnable_tapenade(vars, num_vars, func_num):
ders_string = generate_derivative_string(vars, num_vars, func_num)
tapenade_der = None
with open("./tests/utils/tapenade_ders.c") as file:
tapenade_der = file.read()
with open("./tests/utils/static_code/runnable_tapenade.txt") as file:
tapenade = file.read()
print(tapenade_der)
print(tapenade)
c_code = tapenade.format(tapenade_der, ders_string)
output_file = open('./tests/utils/runnable_tapenade.c', "w+")
output_file.write(c_code)
output_file.close()
def run_tapenade(func, num_params, functions, params_filename, output_filename, runnable_filename):
if sys.platform.startswith('win'):
print("running....")
run_command = "\"utils/program.exe\" " + \
str(num_params) + " " + \
str(len(func[1])) + " " + params_filename + " " + output_filename
else:
run_command = runnable_filename + " " + \
str(num_params) + " " + \
str(len(func[1])) + " " + params_filename + " " + output_filename
print(run_command)
run(run_command, stdout=PIPE, stderr=PIPE,
universal_newlines=True, shell=True)
return general_utils.parse_output(output_filename)
def compile(runnable_filename):
if sys.platform.startswith('win'):
cmd = "cl " + runnable_filename + ".c /out:utils/program.exe"
else:
cmd = "gcc -O3 -ffast-math -o " + runnable_filename + " " + runnable_filename + ".c -lm"
print(cmd)
os.system(cmd)
# vars = ["x", "y"]
# num_vars = 2
# generate_runnable_tapenade_hess(vars, num_vars)
``` |
{
"source": "jiangzhuochi/pymonkey",
"score": 3
} |
#### File: pymonkey/lexer/lexer.py
```python
from __future__ import annotations
from dataclasses import dataclass
from pymonkey import token
TOKENMAPS = {
";": token.Token(token.SEMICOLON, ";"),
"(": token.Token(token.LPAREN, "("),
")": token.Token(token.RPAREN, ")"),
",": token.Token(token.COMMA, ","),
"{": token.Token(token.LBRACE, "{"),
"}": token.Token(token.RBRACE, "}"),
"+": token.Token(token.PLUS, "+"),
"-": token.Token(token.MINUS, "-"),
"*": token.Token(token.ASTERISK, "*"),
"/": token.Token(token.SLASH, "/"),
">": token.Token(token.GT, ">"),
"<": token.Token(token.LT, "<"),
"": token.Token(token.EOF, ""),
}
KEYWORDS = {
"fn": token.FUNCTION,
"let": token.LET,
"if": token.IF,
"else": token.ELSE,
"return": token.RETURN,
"true": token.TRUE,
"false": token.FALSE,
}
def new(_input: str) -> Lexer:
l = Lexer(_input)
l.read_char()
return l
def is_letter(ch: str) -> bool:
return "a" <= ch <= "z" or "A" <= ch <= "Z" or ch == "_"
def is_digit(ch: str) -> bool:
return "0" <= ch <= "9"
@dataclass
class Lexer:
_input: str = ""
_position: int = 0
_read_position: int = 0
_ch: str = ""
def read_char(self):
if self._read_position >= len(self._input):
self._ch = ""
else:
self._ch = self._input[self._read_position]
self._position = self._read_position
self._read_position += 1
def next_token(self) -> token.Token:
self.skip_whitespace()
ch = self._ch
if ch == "=":
if self.peek_char() == "=":
self.read_char()
tok = token.Token(token.EQ, "==")
else:
tok = token.Token(token.ASSIGN, "=")
elif ch == "!":
if self.peek_char() == "=":
self.read_char()
tok = token.Token(token.NOT_EQ, "!=")
else:
tok = token.Token(token.BANG, "!")
else:
tok = TOKENMAPS.get(ch)
if tok is None:
if is_letter(ch):
_literal = self.read_identifier()
tok = token.Token(KEYWORDS.get(_literal, token.IDENT), _literal)
return tok
elif is_digit(ch):
tok = token.Token(token.INT, self.read_number())
return tok
else:
tok = token.Token(token.ILLEGAL, ch)
self.read_char()
return tok
def skip_whitespace(self):
while any(
[self._ch == " ", self._ch == "\t", self._ch == "\n", self._ch == "\r"]
):
self.read_char()
def read_identifier(self) -> str:
pos = self._position
while is_letter(self._ch):
self.read_char()
return self._input[pos : self._position]
def read_number(self) -> str:
pos = self._position
while is_digit(self._ch):
self.read_char()
return self._input[pos : self._position]
def peek_char(self) -> str:
if self._read_position >= len(self._input):
return token.EOF
else:
return self._input[self._read_position]
```
#### File: pymonkey/tests/test_lexer.py
```python
from pymonkey import lexer, token
def test_next_token():
_input = r"""let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;
"""
tests = [
token.Token(token.LET, "let"),
token.Token(token.IDENT, "five"),
token.Token(token.ASSIGN, "="),
token.Token(token.INT, "5"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.LET, "let"),
token.Token(token.IDENT, "ten"),
token.Token(token.ASSIGN, "="),
token.Token(token.INT, "10"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.LET, "let"),
token.Token(token.IDENT, "add"),
token.Token(token.ASSIGN, "="),
token.Token(token.FUNCTION, "fn"),
token.Token(token.LPAREN, "("),
token.Token(token.IDENT, "x"),
token.Token(token.COMMA, ","),
token.Token(token.IDENT, "y"),
token.Token(token.RPAREN, ")"),
token.Token(token.LBRACE, "{"),
token.Token(token.IDENT, "x"),
token.Token(token.PLUS, "+"),
token.Token(token.IDENT, "y"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.RBRACE, "}"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.LET, "let"),
token.Token(token.IDENT, "result"),
token.Token(token.ASSIGN, "="),
token.Token(token.IDENT, "add"),
token.Token(token.LPAREN, "("),
token.Token(token.IDENT, "five"),
token.Token(token.COMMA, ","),
token.Token(token.IDENT, "ten"),
token.Token(token.RPAREN, ")"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.BANG, "!"),
token.Token(token.MINUS, "-"),
token.Token(token.SLASH, "/"),
token.Token(token.ASTERISK, "*"),
token.Token(token.INT, "5"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.INT, "5"),
token.Token(token.LT, "<"),
token.Token(token.INT, "10"),
token.Token(token.GT, ">"),
token.Token(token.INT, "5"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.IF, "if"),
token.Token(token.LPAREN, "("),
token.Token(token.INT, "5"),
token.Token(token.LT, "<"),
token.Token(token.INT, "10"),
token.Token(token.RPAREN, ")"),
token.Token(token.LBRACE, "{"),
token.Token(token.RETURN, "return"),
token.Token(token.TRUE, "true"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.RBRACE, "}"),
token.Token(token.ELSE, "else"),
token.Token(token.LBRACE, "{"),
token.Token(token.RETURN, "return"),
token.Token(token.FALSE, "false"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.RBRACE, "}"),
token.Token(token.INT, "10"),
token.Token(token.EQ, "=="),
token.Token(token.INT, "10"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.INT, "10"),
token.Token(token.NOT_EQ, "!="),
token.Token(token.INT, "9"),
token.Token(token.SEMICOLON, ";"),
token.Token(token.EOF, ""),
]
l = lexer.new(_input)
for _, tt in enumerate(tests):
tok = l.next_token()
# By default, an __eq__() method will be generated in the dataclass.
# This method compares the class as if it were a tuple of its fields, in order.
# Both instances in the comparison must be of the identical type.
assert tok == tt
``` |
{
"source": "jiangziguo/dl",
"score": 3
} |
#### File: code/fnn/core_code_lv3.py
```python
import tensorflow as tf
import numpy as np
class FNN(object):
"""
创建一个前馈神经网络
参数:
-------------
learning_rate : float
drop_out : float
Layers : list
the number of layers
N_hidden : list
the number of nodes in layer
D_input : int
input dimension
D_label : int
label dimension
Task_type : string
'regression' or 'classification'
L2_lambda : float
"""
def __init__(self, learning_rate, drop_keep, Layers, N_hidden,
D_input, D_label, Task_type='regression', L2_lambda=0.0):
# 全部共有属性
self.learning_rate = learning_rate
self.drop_keep = drop_keep
self.Layers = Layers
self.N_hidden = N_hidden
self.D_input = D_input
self.D_label = D_label
self.Task_type = Task_type
self.L2_lambda = L2_lambda
self.l2_penalty = tf.constant(0.0)
self.hid_layers = []
self.W = []
self.b = []
self.total_l2 = []
self.train_step = None
self.output = None
self.loss = None
self.accuracy = None
self.total_loss = None
with tf.name_scope('Input'):
self.inputs = tf.placeholder(tf.float32, [None, D_input], name="inputs")
with tf.name_scope('Label'):
self.labels = tf.placeholder(tf.float32, [None, D_label], name='labels')
with tf.name_scope('keep_rate'):
self.drop_keep_rate = tf.placeholder(tf.float32, name='dropout_keep')
self.build('F')
@staticmethod
def weight_init(shape):
"""
Initialize weight of neural network and initialization could be changed here
:param shape: [in_dim, out_dim]
:return: a Varible which is initialized by random_uniform
"""
initial = tf.random_uniform(shape,
minval=-np.sqrt(5) * np.sqrt(1.0 / shape[0]),
maxval=np.sqrt(5) * np.sqrt(1.0 / shape[0]))
return tf.Variable(initial)
@staticmethod
def bias_init(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
@staticmethod
def variable_summaries(var, name):
with tf.name_scope(name + '_summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean_' + name, mean)
with tf.name_scope(name + '_stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('_stddev_' + name, stddev)
tf.summary.scalar('_max_' + name, tf.reduce_max(var))
tf.summary.scalar('_min_' + name, tf.reduce_min(var))
tf.summary.histogram(name=name, values=var)
def layer(self, in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):
with tf.name_scope(layer_name):
with tf.name_scope(layer_name + '_weights'):
weights = self.weight_init([in_dim, out_dim])
self.W.append(weights)
self.variable_summaries(weights, layer_name + '_weights')
with tf.name_scope(layer_name + 'biases'):
biases = self.bias_init([out_dim])
self.b.append(biases)
self.variable_summaries(biases, layer_name + '_biases')
with tf.name_scope(layer_name + '_Wx_plus_b'):
pre_activate = tf.matmul(in_tensor, weights) + biases
tf.summary.histogram(layer_name + '_pre_activations', pre_activate)
activations = act(pre_activate, name='activation')
tf.summary.histogram(layer_name + '_activations', activations)
return activations, tf.nn.l2_loss(weights)
def drop_layer(self, in_tensor):
dropped = tf.nn.dropout(in_tensor, self.drop_keep_rate)
return dropped
def build(self, prefix):
"""
构建网络
:param prefix:
:return:
"""
incoming = self.inputs
if self.Layers != 0:
layer_nodes = [self.D_input] + self.N_hidden
else:
layer_nodes = [self.D_input]
for l in range(self.Layers):
incoming, l2_loss = self.layer(incoming, layer_nodes[l], layer_nodes[l + 1],
prefix + '_hid_' + str(l + 1), act=tf.nn.relu)
self.total_l2.append(l2_loss)
print('Add dense layer: relu with drop_keep:%s' % self.drop_keep)
print(' %sD --> %sD' % (layer_nodes[l], layer_nodes[l + 1]))
self.hid_layers.append(incoming)
incoming = self.drop_layer(incoming)
if self.Task_type == 'regression':
out_act = tf.identity
else:
out_act = tf.nn.softmax
self.output, l2_loss = self.layer(incoming, layer_nodes[-1], self.D_label,
layer_name='output', act=out_act)
print('Add output layer: linear')
print(' %sD --> %sD' % (layer_nodes[-1], self.D_label))
with tf.name_scope('total_l2'):
for l2 in self.total_l2:
self.l2_penalty += l2
tf.summary.scalar('l2_penalty', self.l2_penalty)
if self.Task_type == 'regression':
with tf.name_scope('SSE'):
self.loss = tf.reduce_mean((self.output - self.labels) ** 2)
tf.summary.scalar('loss', self.loss)
else:
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.output,
labels=self.labels)
with tf.name_scope('cross_entropy'):
self.loss = tf.reduce_mean(entropy)
tf.summary.scalar('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(self.output, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', self.accuracy)
with tf.name_scope('total_loss'):
self.total_loss = self.loss + self.l2_penalty * self.L2_lambda
tf.summary.scalar('total_loss', self.total_loss)
with tf.name_scope('train'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.total_loss)
@staticmethod
def shufflelists(lists):
ri = np.random.permutation(len(lists[1]))
out = []
for l in list:
out.append(l[ri])
return out
def Standardize(seq):
"""
:param seq:
:return:
"""
# subtract mean
centerized = seq - np.mean(seq, axis=0)
# divide standard deviation
normalized = centerized / np.std(centerized, axis=0)
return normalized
def Makewindows(indata, window_size=41):
outdata = []
mid = int(window_size / 2)
indata = np.vstack((np.zeros((mid, indata.shape[1])), indata, np.zeros((mid, indata.shape[1]))))
for index in range(indata.shape[0] - window_size + 1):
outdata.append(np.hstack(indata[index: index + window_size]))
return np.array(outdata)
# prepare data for training "XOR"
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
outputs = [0, 1, 1, 0]
X = np.array(inputs).reshape((4, 1, 2)).astype('int16')
Y = np.array(outputs).reshape((4, 1, 1)).astype('int16')
ff = FNN(learning_rate=1e-3,
drop_keep=1.0,
Layers=1,
N_hidden=[2],
D_input=2,
D_label=1,
Task_type='regression',
L2_lambda=1e-2)
session = tf.InteractiveSession()
session.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('log' + '/train', session.graph)
W0 = session.run(ff.W[0])
W1 = session.run(ff.W[1])
print('W_0:\n%s' % session.run(ff.W[0]))
print('W_1:\n%s' % session.run(ff.W[1]))
pY = session.run(ff.output, feed_dict={ff.inputs: X.reshape((4, 2)),
ff.drop_keep_rate: 1.0})
print(pY)
pY = session.run(ff.hid_layers[0], feed_dict={ff.inputs: X.reshape((4, 2)), ff.drop_keep_rate: 1.0})
print(pY)
k = 0.0
for i in range(10000):
k += 1
summary, _ = session.run([merged, ff.train_step],
feed_dict={ff.inputs: X.reshape((4, 2)),
ff.labels: Y.reshape((4, 1)),
ff.drop_keep_rate: 1.0})
train_writer.add_summary(summary, k)
W0 = session.run(ff.W[0])
W1 = session.run(ff.W[1])
print('W_0:\n%s' % session.run(ff.W[0]))
print('W_1:\n%s' % session.run(ff.W[1]))
pY = session.run(ff.output, feed_dict={ff.inputs: X.reshape((4, 2)),
ff.drop_keep_rate: 1.0})
print('pY:\n')
print(pY)
``` |
{
"source": "jiangzl2016/SUSA-Facial-Emotion-Recognition",
"score": 2
} |
#### File: SUSA-Facial-Emotion-Recognition/keras-ensemble/overfitmodel.py
```python
import matplotlib
import pandas as pd
import numpy as np
import keras
import h5py
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import layers
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.models import Sequential
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Conv2D, Dense, MaxPool2D, Dropout,Flatten
from keras.optimizers import SGD
from keras.activations import relu, tanh, elu
from keras.backend import clear_session
from keras.models import load_model
parser = argparse.ArgumentParser()
parser.add_argument('--number', type=int, default=None)
args = parser.parse_args()
train_data_x = pd.read_pickle('normalized_fer2013.pkl')
train_data_y = pd.read_pickle('normalized_fer2013_labels.pkl').astype(int)
test_data_x = pd.read_pickle('normalized_test_fer2013.pkl')
test_data_y = pd.read_pickle('normalized_test_fer2013_labels.pkl').astype(int)
train_data_x = train_data_x.as_matrix().reshape((-1,48,48,1))
test_data_x = test_data_x.as_matrix().reshape((-1,48,48,1))
emotion = {0:'Angry', 1:'Disgust', 2:'Fear', 3:'Happy', 4:'Sad', 5:'Surprise', 6:'Neutral'}
positive_emotes = [3, 5]
neutral = [6]
negative_emotes = [0, 1, 2, 4]
def lump_labels(label):
if label in negative_emotes + neutral:
return 0
elif label in positive_emotes:
return 1
else:
return 2
test_data_y2 = test_data_y.apply(lump_labels)
train_data_y2 = train_data_y.apply(lump_labels)
train_data_y = train_data_y.as_matrix()
test_data_y = test_data_y.as_matrix()
train_data_y2 = train_data_y2.as_matrix()
test_data_y2 = test_data_y2.as_matrix()
train_data_y = keras.utils.to_categorical(train_data_y, num_classes=7)
test_data_y = keras.utils.to_categorical(test_data_y, num_classes=7)
train_data_y2 = keras.utils.to_categorical(train_data_y2, num_classes=2)
test_data_y2 = keras.utils.to_categorical(test_data_y2, num_classes=2)
clear_session()
model = keras.models.Sequential()
EPOCHS = 1000
PATIENCE = 20
PERIOD = 100
FILEPATH = "model_{}/".format(args.number) + "weights.epoch-{epoch:02d}-val_loss-{val_loss:.2f}-train_loss-{loss:.2f}.hdf5"
CSV_FILENAME = "model_{}/train.log".format(args.number)
if args.number % 3 == 0:
active = 'elu'
else:
active = 'relu'
if args.number % 2 == 0:
model.add(Dropout(0.5,input_shape=(48,48,1)))
model.add(Conv2D(8, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(32, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(64, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(64, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(32, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Flatten())
model.add(Dense(128,activation='elu'))
model.add(Dense(2,activation='softmax'))
else:
model.add(Dropout(0.5,input_shape=(48,48,1)))
model.add(Conv2D(16, (20,20), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(32, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(32, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(32, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Conv2D(32, (5,5), strides=(1,1),activation=active,padding='valid'))
model.add(Flatten())
if args.number % 5 == 0:
model.add(Dense(128,activation='elu'))
model.add(Dense(32,activation='elu'))
model.add(Dense(2,activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=PATIENCE, verbose=0, mode='auto')
checkpoint = keras.callbacks.ModelCheckpoint(FILEPATH, monitor='val_loss',
verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=PERIOD)
csv_logger = keras.callbacks.CSVLogger(CSV_FILENAME, separator=',', append=False)
model.fit(train_data_x, train_data_y2, validation_data=(test_data_x,test_data_y2),
epochs=EPOCHS, batch_size=32, callbacks=[early_stopping, checkpoint, csv_logger])
model.save("model_{}/final_model.h5py".format(args.number))
```
#### File: SUSA-Facial-Emotion-Recognition/tensorflow_ensemble/ensemble_CNN_keras_API.py
```python
import numpy as np
import time
import os
from Utils import load_pd_data
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import InputLayer, Input
from tensorflow.python.keras.layers import Reshape, MaxPooling2D
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten
from keras.models import load_model
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.metrics import categorical_accuracy
from Utils import next_batch
'''
The class create CNN model objects. Include build, compile, train, save and evaluate.
Users could also define other model objects and modify ensemble function to train and
save other models.
'''
class CNN_model(object):
def __init__(self, image_size, num_channels, num_classes):
self.model = None
self.image_size = image_size
self.num_channels = num_channels
self.num_classes = num_classes
self.X = None
self.Y = None
def load_train_data(self, X, Y):
self.X = X
self.Y = Y
def build_model(self):
img_size_flat = self.image_size * self.image_size
img_shape = (self.image_size, self.image_size, self.num_channels)
model = Sequential()
model.add(InputLayer(input_shape=(img_size_flat,)))
model.add(Reshape(img_shape))
# model.add(Dropout(0.5, input_shape=(48, 48, 1)))
model.add(Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
activation='elu', name='layer_conv1'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(kernel_size=5, strides=1, filters=32, padding='same',
activation='elu', name='layer_conv2'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(kernel_size=5, strides=1, filters=64, padding='same',
activation='elu', name='layer_conv3'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Flatten())
model.add(Dense(128, activation='elu'))
model.add(Dense(32, activation='elu'))
# Last fully-connected / dense layer with softmax-activation
# for use in classification.
model.add(Dense(self.num_classes, activation='softmax'))
self.model = model
def compile_model(self, optimizer, loss_metric = 'categorical_crossentropy', metrics = ['accuracy']):
if self.model is not None:
#self.optimizer = optimizer
#self.loss_metric = loss_metric
#self.metrics = ['accuracy']
self.model.compile(optimizer=optimizer,
loss= loss_metric,
metrics=metrics)
def train(self, model_name = None, epochs = 10, batch_size = 128):
if self.model is not None:
self.model.fit(x=self.X, y=self.Y, epochs= epochs, batch_size= batch_size)
def save(self, filepath):
if self.model is not None:
self.model.save(filepath + ".h5py")
model_json = self.model.to_json()
with open(filepath + ".json", "w") as json_file:
json_file.write(model_json)
def evaluate_train(self):
if self.model is not None:
result = self.model.evaluate(self.X, self.Y)
for name, value in zip(self.model.metrics_names, result):
print(name, value)
def evaluate_test(self, X_val, Y_val):
if self.model is not None:
result = self.model.evaluate(X_val, Y_val)
for name, value in zip(self.model.metrics_names, result):
print(name, value)
def predict(self, X_test):
if self.model is None:
return None, None
y_pred = self.model.predict(x=X_test)
cls_pred = np.argmax(y_pred, axis=1)
return y_pred, cls_pred
'''
The function trains and saves n networks, currently all CNNs. This function could be extended
into other models as well.
'''
def CNN_ensemble(image_size, num_networks, num_classes, train_X, train_Y, epochs = 5, batch_size = 128):
num_channels= 1
list_of_model = []
for i in range(num_networks):
# for j in range(epochs):
#X_batch, Y_batch = next_batch(train_X, train_Y, batch_size)
model = CNN_model(image_size, num_channels, num_classes)
model.load_train_data(train_X, train_Y) #######################
model.build_model()
optimizer = Adam(lr=1e-3)
model.compile_model(optimizer)
model.train(epochs= epochs, batch_size=batch_size)
#model.evaluate_train()
model_name = "CNN_model_{0}".format(i)
model.save("./model/" + model_name) #####save the model here
if model is None:
print ("missing model_{0}".format(i))
'''
The function loads n models and make predictiona using average of n networks.
'''
def ensemble_predict(number_of_models, num_classes, test_set, filepath= '../model/', pattern= ".h5py"):
all_files = os.listdir(filepath)
if len(all_files) == 0:
print('No Models in the directory. Please first train and save models.')
else:
predictions = np.zeros((test_set.shape[0], num_classes))
for (i,filename) in enumerate(all_files):
count = 0
if pattern in filename:
model = load_model(filepath + filename)
preds = model.predict(test_set)
predictions += preds
#cls_preds = np.argmax(preds, axis=1)
predictions = predictions / num_networks
return predictions
if __name__ == '__main__':
print('Load in Data ...')
train_X, train_Y, _, _ = load_pd_data('../data/pixel_nocomplex_train.pd')
test_X, test_Y, _, _ = load_pd_data('../data/pixel_nocomplex_test.pd')
epochs = 10
batch_size = 128
num_networks = 3
num_classes = 8
image_size = 96
CNN_ensemble(image_size, num_networks, num_classes, train_X, train_Y, epochs, batch_size)
preds = ensemble_predict(num_networks, num_classes, test_X)
preds_cls = np.argmax(preds, axis=1)
print("The prediction for first 100 images are: ")
print(preds[0:99])
# measure accuracy
acc = np.sum(preds_cls == test_Y) / len(preds_cls)
print("The accuracy of ensemble model is: {0}".format(acc))
``` |
{
"source": "jiangzoi/incubator-tvm",
"score": 2
} |
#### File: tf_tvmdsoop/tests/test_tfop_module.py
```python
import tempfile
import os
import logging
import tensorflow as tf
import numpy as np
import tvm
from tvm import te
from tvm.contrib import tf_op
def test_use_tvmdso_op():
"""main test function"""
def export_cpu_add_lib():
"""create cpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name='ph_a')
ph_b = te.placeholder((n,), name='ph_b')
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name='ph_c')
sched = te.create_schedule(ph_c.op)
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "c", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def export_gpu_add_lib():
"""create gpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name='ph_a')
ph_b = te.placeholder((n,), name='ph_b')
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name='ph_c')
sched = te.create_schedule(ph_c.op)
b_axis, t_axis = sched[ph_c].split(ph_c.op.axis[0], factor=64)
sched[ph_c].bind(b_axis, te.thread_axis("blockIdx.x"))
sched[ph_c].bind(t_axis, te.thread_axis("threadIdx.x"))
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "cuda", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_cuda_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def test_add(session, lib_path, tf_device):
"""test add lib with TensorFlow wrapper"""
module = tf_op.OpModule(lib_path)
left = tf.placeholder("float32", shape=[4])
right = tf.placeholder("float32", shape=[4])
feed_dict = {left: [1.0, 2.0, 3.0, 4.0], right: [5.0, 6.0, 7.0, 8.0]}
expect = np.asarray([6.0, 8.0, 10.0, 12.0])
add1 = module.func("vector_add", output_shape=[4], output_dtype="float")
add2 = module.func("vector_add", output_shape=tf.shape(left), output_dtype="float")
add3 = module.func("vector_add", output_shape=[tf.shape(left)[0]], output_dtype="float")
with tf.device(tf_device):
output1 = session.run(add1(left, right), feed_dict)
np.testing.assert_equal(output1, expect)
output2 = session.run(add2(left, right), feed_dict)
np.testing.assert_equal(output2, expect)
output3 = session.run(add3(left, right), feed_dict)
np.testing.assert_equal(output3, expect)
def cpu_test(session):
"""test function for cpu"""
cpu_lib = None
try:
cpu_lib = export_cpu_add_lib()
test_add(session, cpu_lib, "/cpu:0")
finally:
if cpu_lib is not None:
os.remove(cpu_lib)
def gpu_test(session):
"""test function for gpu"""
gpu_lib = None
try:
gpu_lib = export_gpu_add_lib()
test_add(session, gpu_lib, "/gpu:0")
finally:
if gpu_lib is not None:
os.remove(gpu_lib)
with tf.Session() as session:
if tvm.runtime.enabled("cpu"):
logging.info("Test TensorFlow op on cpu kernel")
cpu_test(session)
if tvm.runtime.enabled("gpu"):
logging.info("Test TensorFlow op on gpu kernel")
gpu_test(session)
if __name__ == "__main__":
test_use_tvmdso_op()
```
#### File: tvm/arith/int_set.py
```python
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
class IntSet(Object):
"""Represent a set of integer in one dimension."""
def is_nothing(self):
"""Whether the set represent nothing"""
return _ffi_api.IntSetIsNothing(self)
def is_everything(self):
"""Whether the set represent everything"""
return _ffi_api.IntSetIsEverything(self)
@staticmethod
def vector(vec):
"""Construct an integer set that covers the vector expr
Parameters
----------
vec : PrimExpr
The vector expression.
Returns
-------
rset : IntSet
The result set.
"""
return _ffi_api.intset_vector(vec)
@staticmethod
def single_point(point):
"""Construct a point set.
Parameters
----------
point : PrimExpr
The vector expression.
Returns
-------
rset : IntSet
The result set.
"""
return _ffi_api.intset_single_point(point)
@tvm._ffi.register_object("arith.IntervalSet")
class IntervalSet(IntSet):
"""Represent set of continuous interval [min_value, max_value]
Parameters
----------
min_value : PrimExpr
The minimum value in the interval.
max_value : PrimExpr
The maximum value in the interval.
"""
def __init__(self, min_value, max_value):
self.__init_handle_by_constructor__(
_ffi_api.IntervalSet, min_value, max_value)
```
#### File: tvm/arith/int_solver.py
```python
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("arith.IntGroupBounds")
class IntGroupBounds(Object):
"""Represent integer grouped bounds which are classified into
lower bounds (include), upper bounds (include) and equalities.
Parameters
----------
coef : tvm.ir.PrimExpr
The coefficient. Must be integer type.
coef * var >= lower
coef * var == equal
coef * var >= upper
lower : List[tvm.ir.PrimExpr]
the lower bounds (include)
equal : List[tvm.ir.PrimExpr]
equalities
upper : List[tvm.ir.PrimExpr]
the upper bounds (include)
"""
def __init__(self, coef, lower, equal, upper):
self.__init_handle_by_constructor__(
_ffi_api.IntGroupBounds, coef, lower, equal, upper)
@staticmethod
def from_range(rng):
"""Construct a IntGroupedBounds by Range.
Parameters
----------
rng : tvm.ir.Range
Returns
-------
ret : Range
The constructed range.
"""
return _ffi_api.IntGroupBounds_from_range(rng)
def find_best_range(self):
"""Return the best range from the grouped bounds.
None if (-inf, +inf).
"""
return _ffi_api.IntGroupBounds_FindBestRange(self)
@tvm._ffi.register_object("arith.IntConstraints")
class IntConstraints(Object):
"""Represent a set of integer constraints including variables, their ranges and
the relations between them (either equations or inequalities)
Parameters
----------
variables : List[tvm.tir.Var]
The variables in the constraints. Must be integers
ranges : Map[tvm.tir.Var, tvm.ir.Range]
The ranges of the variables.
relations : List[tvm.ir.PrimExpr]
The relations between the variables (either equations or inequalities)
"""
def __init__(self, variables, ranges, relations):
self.__init_handle_by_constructor__(
_ffi_api.IntConstraints, variables, ranges, relations)
@tvm._ffi.register_object("arith.IntConstraintsTransform")
class IntConstraintsTransform(Object):
"""We can have different set of variables to represent the same integer constraints.
For example, the following two constrains are equivalent,
{a + b = 0 | a >= 0, b >= 0} and
{m - n = 0 | m >= 0, n <= 0}
This data structure represents the transformation
between two equivalent integer constraints.
In the above example,
src : {a + b = 0 | a >= 0, b >= 0}
dst : {m - n = 0 | m >= 0, n <= 0}
src_to_dst : {a -> m, b -> -n}
dst_to_src : {m -> a, n -> -b}
Parameters
----------
src : arith.IntConstraints
source integer constraints, e.g., {a + b = 0 | a >= 0, b >= 0}
dst : arith.IntConstraints
integer constraints equivalent to the source, e.g., {m - n = 0 | m >= 0, n <= 0}
src_to_dst : Map[tvm.tir.Var, tvm.ir.PrimExpr]
mapping from variables in the src to the variables in the dst,
e.g., {a -> m, b -> -n}
dst_to_src : Map[tvm.tir.Var, tvm.ir.PrimExpr]
mapping from variables in the dst to the variables in the src,
e.g., {m -> a, n -> -b}
"""
def __init__(self, src, dst, src_to_dst, dst_to_src):
self.__init_handle_by_constructor__(
_ffi_api.IntConstraintsTransform, src, dst, src_to_dst, dst_to_src)
def solve_linear_equations(equations, variables=None, ranges=None):
"""Solve linear equations.
Parameters
----------
equations: List[tvm.ir.PrimExpr] or IntConstraints
The equations of the variables
variables : Optional[List[tvm.tir.Var]]
The variables in the system.
ranges : Optional[Map[tvm.tir.Var, tvm.ir.Range]]
The ranges of the variables.
Returns
-------
int_constraints_transform : IntConstraintsTransform
New integer constraints, with less variables (if the problem is NOT of full rank),
or no variable (if the problem is of full rank),
or an empty integer constraints (if the problem is unsolvable).
It also provides the ranges of the variables in the new system,
as well as inequalities inferred from the problem.
You can get the mapping from the original variables to the solution via
int_constraints_transform.src_to_dst.
"""
if isinstance(equations, IntConstraints):
return _ffi_api.SolveLinearEquations(equations)
return _ffi_api.SolveLinearEquations(variables, ranges, equations)
def solve_linear_inequalities(equations, variables=None, ranges=None, deskew_range=False):
"""Solve linear inequalities.
Parameters
----------
equations : List[tvm.ir.PrimExpr] or IntConstraints
The inequalities of the variables
variables : Optional[List[tvm.tir.Var]]
The variables in the system.
ranges : Optional[Map[tvm.tir.Var, tvm.ir.Range]]
The ranges of the variables.
deskew_range: Optional[bool]
Whether deskew the result ranges to be started from zero.
Default false.
Returns
-------
ret_ranges: IntConstraints or IntConstraintsTransform
The result ranges for each variables.
Constrains that cannot be transformed to Range will be stored in IntConstraints.relations.
If deskew_range is set (=True), the result ranges will be deskewed to be started from zero.
New variables are created accordingly therefore IntConstraintsTransform is returned.
"""
solver = _ffi_api.SolveInequalitiesDeskewRange \
if deskew_range else _ffi_api.SolveInequalitiesToRange
if isinstance(equations, IntConstraints):
assert variables is None
assert ranges is None
return solver(equations)
return solver(variables, ranges, equations)
```
#### File: tvm/auto_scheduler/workload_registry.py
```python
import pickle
import json
import tvm._ffi
from .utils import serialize_args, deserialize_args, get_func_name
WORKLOAD_FUNC_REGISTRY = {}
def register_workload(func_name, f=None, override=False):
""" Register a function that generates a certain workload.
The input function should take hashable and jsonable arguments
(int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor.
Parameters
----------
func_name : Union[Function, str]
The generation function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The generation function to be registered.
override : boolean = False
Whether override existing entry.
Examples
--------
@auto_scheduler.register_workload
def matmul(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in WORKLOAD_FUNC_REGISTRY and not override:
raise RuntimeError('%s has been registered already' % func_name)
WORKLOAD_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def make_workload_key(func, args):
""" Make a workload key by function and arguments.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Args
The args of the function.
Returns
-------
workload_key : Str
The workload key of the function.
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func):
func_name = get_func_name(func)
elif isinstance(func, str):
func_name = func
else:
raise ValueError("Invalid function: " + str(func) +
" . `make_workload_key` expects a callable function or its function name")
if not func_name in WORKLOAD_FUNC_REGISTRY:
raise ValueError("%s is not registered. " % func,
"Please register it with @auto_scheduler.register_workload")
args = serialize_args(args)
return json.dumps((func_name,) + args)
def decode_workload_key_to_func_args(workload_key):
""" Decode a workload key to the registerd function name and its corresponding args.
Parameters
----------
workload_key : str
The input workload key.
Returns
-------
name : str
The function name of this workload key.
args : List[Tensor]
The args of the generation function.
"""
global WORKLOAD_FUNC_REGISTRY
workload = json.loads(workload_key)
if not workload[0] in WORKLOAD_FUNC_REGISTRY:
raise ValueError("%s is not registered. " % workload[0] +
"Please register it with @auto_scheduler.register_workload")
return workload[0], deserialize_args(workload[1:])
@tvm._ffi.register_func("auto_scheduler.workload_key_to_tensors")
def workload_key_to_tensors(workload_key):
""" Get the input/output tensors from the workload key.
This method is usually used to create a ComputeDAG by workload key.
Parameters
----------
workload_key : str
The input workload key.
Returns
-------
tensors : List[Tensor]
The registered compute declaration Tensors.
"""
global WORKLOAD_FUNC_REGISTRY
name, args = decode_workload_key_to_func_args(workload_key)
lookup = WORKLOAD_FUNC_REGISTRY[name]
assert callable(lookup)
return lookup(*args)
def save_workload_func_registry(filename):
""" Dump workload function registry to a pickle binary file.
Parameters
----------
filename : str
The filename to dump workload function registry to.
"""
global WORKLOAD_FUNC_REGISTRY
pickle.dump(WORKLOAD_FUNC_REGISTRY, open(filename, 'wb'))
def load_workload_func_registry(filename):
""" Load workload function registry from a pickle binary file.
Parameters
----------
filename : str
The filename to load workload function registry from.
"""
global WORKLOAD_FUNC_REGISTRY
WORKLOAD_FUNC_REGISTRY = pickle.load(open(filename, 'rb'))
```
#### File: tvm/contrib/nnpack.py
```python
import tvm
from tvm import te
import tvm._ffi
def is_available():
"""Check whether NNPACK is available, that is, `nnp_initialize()`
returns `nnp_status_success`.
"""
return _initialize() == 0
def fully_connected_inference(lhs, rhs, nthreads=1):
"""Create an extern op that compute fully connected of 1D tensor lhs and
2D tensor rhs with nnpack.
Parameters
----------
lhs : Tensor
lhs 1D array input[input_channels] of FP32 elements
rhs : Tensor
lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
Returns
-------
C : Tensor
lhs 1D array out[output_channels] of FP32 elements.
"""
m = rhs.shape[0]
return te.extern(
(m, ), [lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.fully_connected_inference",
ins[0], ins[1], outs[0], nthreads), name="C")
class ConvolutionAlgorithm:
AUTO = 0
FFT_8x8 = 1
FFT_16x16 = 2
WT_8x8 = 3
IMPLICIT_GEMM = 4
DIRECT = 5
WT_8x8_FP16 = 6
class ConvolutionTransformStrategy:
COMPUTE = 1
PRECOMPUTE = 2
def convolution_inference(
data, kernel, bias, padding, stride, nthreads=1,
algorithm=ConvolutionAlgorithm.AUTO):
"""Create an extern op to do inference convolution of 4D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch][input_channels][input_height][input_width] of
FP32 elements.
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 4D tensor output[batch][output_channels][output_height][output_width]
of FP32 elements.
"""
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
batch, _, input_height, input_width = data.shape
output_channels, _, kernel_height, kernel_width = kernel.shape
idxdiv = te.indexdiv
output_height = idxdiv(
input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1
output_width = idxdiv(
input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1
return te.extern(
(batch, output_channels, output_height, output_width),
[data, kernel, bias] if bias is not None else [data, kernel],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.convolution_inference",
ins[0],
ins[1],
ins[2] if bias is not None else 0,
outs[0], padding[0], padding[1], padding[2], padding[3],
stride[0], stride[1], nthreads, algorithm), name="C")
def convolution_inference_without_weight_transform(
data, transformed_kernel, bias, padding, stride, nthreads=1,
algorithm=ConvolutionAlgorithm.AUTO):
"""Create an extern op to do inference convolution of 4D tensor data and
4D pre-transformed tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch][input_channels][input_height][input_width] of
FP32 elements.
transformed_kernel : Tensor
transformed_kernel 4D tensor kernel[output_channels][input_channels][tile]
[tile] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 4D tensor output[batch][output_channels][output_height][output_width]
of FP32 elements.
"""
assert algorithm in (ConvolutionAlgorithm.WT_8x8,
ConvolutionAlgorithm.WT_8x8_FP16)
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
batch, _, input_height, input_width = data.shape
output_channels, _, _, _ = transformed_kernel.shape
kernel_height, kernel_width = (3, 3)
idxdiv = te.indexdiv
output_height = idxdiv(input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1
output_width = idxdiv(input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1
return te.extern(
(batch, output_channels, output_height, output_width),
[data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.convolution_inference_without_weight_transform",
ins[0],
ins[1],
ins[2] if bias is not None else 0,
outs[0], padding[0], padding[1], padding[2], padding[3],
stride[0], stride[1], nthreads, algorithm), name="C", dtype='float32')
def convolution_inference_weight_transform(
kernel, nthreads=1,
algorithm=ConvolutionAlgorithm.AUTO,
dtype='float32'):
"""Create an extern op to do inference convolution of 3D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
Returns
-------
output : Tensor
output 4D tensor output[output_channels][input_channels][tile][tile]
of FP32 elements.
"""
assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)
output_channels, input_channels, _, _ = kernel.shape
transform_tile_size = 8
if not isinstance(dtype, str):
dtype = dtype.dtype
return te.extern(
(output_channels, input_channels, transform_tile_size, transform_tile_size),
[kernel],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.convolution_inference_weight_transform",
ins[0], outs[0], nthreads, algorithm), name="transform_kernel", dtype=dtype)
tvm._ffi._init_api("tvm.contrib.nnpack")
```
#### File: tvm/contrib/spirv.py
```python
import subprocess
import os
from . import util
from .._ffi.base import py_str
def optimize(spv_bin):
"""Optimize SPIRV using spirv-opt via CLI
Note that the spirv-opt is still experimental.
Parameters
----------
spv_bin : bytearray
The spirv file
Return
------
cobj_bin : bytearray
The HSA Code Object
"""
tmp_dir = util.tempdir()
tmp_in = tmp_dir.relpath("input.spv")
tmp_out = tmp_dir.relpath("output.spv")
with open(tmp_in, "wb") as out_file:
out_file.write(bytes(spv_bin))
sdk = os.environ.get("VULKAN_SDK", None)
cmd = os.path.join(sdk, "bin/spirv-opt") if sdk else "spirv-opt"
args = [cmd, "-O", tmp_in, "-o", tmp_out]
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Opitmizationerror using spirv-opt:\n"
msg += py_str(out)
raise RuntimeError(msg)
return bytearray(open(tmp_out, "rb").read())
```
#### File: tvm/ir/expr.py
```python
import tvm._ffi
from .base import Node
from . import _ffi_api
class BaseExpr(Node):
"""Base class of all the expressions."""
class PrimExpr(BaseExpr):
"""Base class of all primitive expressions.
PrimExpr is used in the low-level code
optimizations and integer analysis.
"""
class RelayExpr(BaseExpr):
"""Base class of all non-primitive expressions."""
@property
def checked_type(self):
"""Get the checked type of tvm.relay.Expr.
Returns
-------
checked_type : tvm.relay.Type
The checked type.
"""
ret = self._checked_type_
if ret is None:
raise ValueError("The type checker has not populated"
" the checked_type for this node")
return ret
@tvm._ffi.register_object("GlobalVar")
class GlobalVar(RelayExpr):
"""A global variable in the IR.
GlobalVar is used to refer to the global functions
stored in the IRModule.
Parameters
----------
name_hint: str
The name of the variable.
"""
def __init__(self, name_hint):
self.__init_handle_by_constructor__(_ffi_api.GlobalVar, name_hint)
def __call__(self, *args):
"""Call the global variable.
Parameters
----------
args: List[RelayExpr]
The arguments to the call.
Returns
-------
call: BaseExpr
A call taking the variable as a function.
"""
# pylint: disable=import-outside-toplevel
if all(isinstance(x, RelayExpr) for x in args):
from tvm import relay
return relay.Call(self, args)
arg_types = [type(x) for x in args]
raise RuntimeError(
"Do not know how to handle GlobalVar.__call__ for types {}".format(arg_types))
@tvm._ffi.register_object
class Range(Node):
"""Represent a range in TVM.
You do not need to create a Range explicitly.
Python lists and tuples will be converted automatically to a Range in API functions.
Parameters
----------
begin : PrimExpr
The begin value of the range when end is None.
Otherwise it is the length of the range.
end : Optional[PrimExpr]
The end value of the range.
Note
----
The constructor creates the range `[begin, end)`
if the end argument is not None. Otherwise, it creates `[0, begin)`.
"""
def __init__(self, begin, end=None):
if end is None:
self.__init_handle_by_constructor__(
_ffi_api.Range, 0, begin)
else:
self.__init_handle_by_constructor__(
_ffi_api.Range, begin, end)
@staticmethod
def from_min_extent(min_value, extent):
"""Construct a Range by min and extent.
This constructs a range in [min_value, min_value + extent)
Parameters
----------
min_value : PrimExpr
The minimum value of the range.
extent : PrimExpr
The extent of the range.
Returns
-------
rng : Range
The constructed range.
"""
return _ffi_api.Range_from_min_extent(min_value, extent)
```
#### File: relay/frontend/caffe2.py
```python
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_channels
__all__ = ['from_caffe2']
def dimension_picker(prefix, surfix=''):
def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 2:
return prefix + '2d' + surfix
raise tvm.error.OpAttributeUnImplemented(
'Non-2D kernels are not supported for operator {}2d'.format(prefix))
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid(
'Number of pads must equal 2 or 4.')
return pads
def dimension_constraint():
def _dim_check(args):
if len(args['kernel_shape']) == 2:
return True
return False
return _dim_check, "Only 2d kernel supported."
def _clean_up_pool_args(args):
""" A helper function to clean up common arguments in conv and pooling ops.
"""
assert isinstance(args, dict)
if 'stride_h' in args and 'stride_w' in args:
assert 'stride' not in args and 'strides' not in args
args['strides'] = [args['stride_h'], args['stride_w']]
args.pop('stride_h')
args.pop('stride_w')
elif 'stride' in args:
args['strides'] = [args['stride'], args['stride']]
args.pop('stride')
# rename 'kernel', 'kernels', to 'kernel_shape'
if 'kernel_h' in args and 'kernel_w' in args:
assert 'kernel' not in args and 'kernels' not in args
args['kernel_shape'] = [args['kernel_h'], args['kernel_w']]
args.pop('kernel_h')
args.pop('kernel_w')
elif 'kernel' in args:
args['kernel_shape'] = [args['kernel'], args['kernel']]
args.pop('kernel')
elif 'kernels' in args:
args['kernel_shape'] = args['kernels']
args.pop('kernels')
if 'pad_t' in args and 'pad_l' in args and 'pad_b' in args and 'pad_r' in args:
assert 'pad' not in args and 'pads' not in args
args['pads'] = [
args['pad_t'], args['pad_l'], args['pad_b'], args['pad_r']
]
for pad in ['pad_t', 'pad_l', 'pad_b', 'pad_r']:
args.pop(pad)
elif 'pad' in args:
args['pads'] = [args['pad'], args['pad']]
args.pop('pad')
if 'dilation_h' in args and 'dilation_w' in args:
assert 'dilation' not in args and 'dilations' not in args
args['dilations'] = [args['dilation_h'], args['dilation_w']]
args.pop('dilation_h')
args.pop('dilation_w')
elif 'dilation' in args:
args['dilations'] = [args['dilation'], args['dilation']]
args.pop('dilation')
return args
class Caffe2OpConverter(object):
""" A helper class for holding Caffe2 op converters.
"""
@classmethod
def get_converter(cls):
""" Get converter.
:return: converter, which should be `_impl`.
"""
if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(cls.__name__))
_caffe2_internal_args = [
# nnpack args
'algo',
'convolution_transform_strategy',
'float16_compute',
'shared_buffer',
# training args
'init_params',
'cudnn_exhaustive_search',
'exhaustive_search',
# training args
'adj',
'hwgq',
# args that we don't care
'legacy_pad',
]
class Elemwise(Caffe2OpConverter):
""" A helper class for elemwise op converters.
"""
name = ''
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 2, "Math op take 2 inputs, {} given".format(
len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if args.get('broadcast', 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(args.get('axis', 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Add(Elemwise):
""" Operator converter for Add.
"""
name = 'add'
class Mul(Elemwise):
""" Operator converter for Mul.
"""
name = 'multiply'
class Pool(Caffe2OpConverter):
""" A helper class for pool op converters.
"""
name = ''
@classmethod
def _impl(cls, inputs, args, params):
_clean_up_pool_args(args)
if 'global_pooling' in args and args['global_pooling'] == 1:
op_name = dimension_picker('global_' + cls.name)
return get_relay_op(op_name(args))(*inputs)
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'strides': 'strides',
},
ignores=['dilations', 'order', 'legacy_pad', 'global_pooling'],
extras={'ceil_mode': False},
custom_check=dimension_constraint())(inputs, args, params)
class AveragePool(Pool):
name = 'avg_pool'
class MaxPool(Pool):
name = 'max_pool'
class Conv(Caffe2OpConverter):
""" Operator converter for Conv.
"""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1])
args['channels'] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker('conv'),
transforms={
'group': ('groups', 1),
'kernel_shape': 'kernel_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'strides': 'strides',
'dilations': ('dilation', (1, 1)),
'order': ('data_layout', ("NCHW"), lambda x: x if isinstance(x, str) else x.decode('UTF-8')),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint())(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(Caffe2OpConverter):
""" Operator converter for ConvTranspose.
"""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1], True)
args['channels'] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker('conv', '_transpose'),
transforms={
'kernel_shape': 'kernel_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'dilations': ('dilation', (1, 1)),
'order': ('data_layout', ("NCHW"), lambda x: x if isinstance(x, str) else x.decode('UTF-8')),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint())(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class Concat(Caffe2OpConverter):
""" Operator converter for Concat.
"""
@classmethod
def _impl(cls, inputs, args, params):
def _get_axis_from_order_str(order):
order = order if isinstance(order, str) else order.decode('UTF-8')
if order == 'NCHW':
return 1
if order == 'NHWC':
return 3
raise tvm.error.OpAttributeUnImplemented(
'Order {} is not supported in operator Concat.'.format(order))
return AttrCvt(
op_name='concatenate',
transforms={
'order': ('axis', (1), _get_axis_from_order_str),
},
excludes=['add_axis'])((inputs,), args, params)
class NormalizePlanarYUV(Caffe2OpConverter):
""" Operator converter for NormalizePlanarYUV.
caffe2 definition: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/norm_planar_yuv_op.cc
"""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 3
mean = _op.expand_dims(inputs[1], axis=2, num_newaxis=2)
std = _op.expand_dims(inputs[2], axis=2, num_newaxis=2)
return _op.divide(_op.subtract(inputs[0], mean), std)
class ResizeNearest(Caffe2OpConverter):
""" Operator converter for Upsample (nearest mode).
"""
@classmethod
def _impl(cls, inputs, args, params):
width_scale = args['width_scale'] if 'width_scale' in args else 1
height_scale = args['height_scale'] if 'height_scale' in args else 1
assert width_scale == height_scale
return _op.nn.upsampling(
inputs[0], scale_h=int(width_scale), scale_w=int(width_scale), method="NEAREST_NEIGHBOR")
class Sum(Caffe2OpConverter):
""" Operator converter for Sum.
"""
@classmethod
def _impl(cls, inputs, args, params):
# Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Softmax(Caffe2OpConverter):
""" Operator converter for Softmax.
"""
@classmethod
def _impl(cls, inputs, args, params):
# set default value when axis is not set in the model
if 'axis' not in args:
args['axis'] = 1
return AttrCvt('softmax', transforms={'axis': ('axis', args['axis'])})(inputs, args, params)
class FC(Caffe2OpConverter):
""" Operator converter for FC.
"""
@classmethod
def _impl(cls, inputs, args, params):
inputs[0] = _op.nn.batch_flatten(inputs[0])
units = infer_channels(inputs[1])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
use_bias = len(inputs) == 3
if use_bias:
res = _op.nn.bias_add(res, inputs[2])
return res
class SpatialBN(Caffe2OpConverter):
""" Operator converter for SpatialBN.
"""
@classmethod
def _impl(cls, inputs, args, params):
return AttrCvt(
op_name='batch_norm',
disables=['momentum'],
ignores=[
'order', 'spatial', 'is_test', 'consumed_inputs', 'num_batches'
])(inputs, args, params)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
# Minimal set of ops for squeezenet and resnet50
def _get_convert_map():
return {
# caffe2 common operators
'Add': Add.get_converter(),
'Sum': Sum.get_converter(),
'Mul': Mul.get_converter(),
'Softmax': Softmax.get_converter(),
# nn
'AveragePool': AveragePool.get_converter(),
'MaxPool': MaxPool.get_converter(),
'Conv': Conv.get_converter(),
'ConvTranspose': ConvTranspose.get_converter(),
'Concat': Concat.get_converter(),
'FC': FC.get_converter(),
'SpatialBN': SpatialBN.get_converter(),
'ResizeNearest': ResizeNearest.get_converter(),
'Relu': AttrCvt('relu', {}, ignores=['order']),
'Sigmoid': Renamer('sigmoid'),
'Dropout': AttrCvt('dropout', {'ratio': 'rate'}, ignores=['is_test']),
# c2 image preprocessing ops
'NormalizePlanarYUV': NormalizePlanarYUV.get_converter(),
}
class Caffe2NetDef(object):
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/pytorch/pytorch/blob/master/caffe2/proto/caffe2.proto
"""
def __init__(self, shape, dtype):
self._nodes = {}
self._params = {}
self._visited_nodes = set()
self._ops = {}
self._shape = shape
self._dtype = dtype
self._mod = IRModule({})
def from_caffe2(self, init_net, predict_net):
"""Construct Relay expression from caffe2 graph.
Parameters
----------
init_net : protobuf object
predict_net : protobuf object
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# pylint: disable=import-outside-toplevel
from caffe2.python import workspace
workspace.RunNetOnce(init_net)
# Input
input_name = predict_net.op[0].input[0]
# Params
self._params = {}
used_blobs = set()
for c2_op in predict_net.op:
for i in c2_op.input:
used_blobs.add(i)
for blob in workspace.Blobs():
if blob in used_blobs and blob != input_name:
self._params[blob] = _nd.array(workspace.FetchBlob(blob))
# Variables
self._nodes = {}
for blob in predict_net.external_input:
if blob in self._params:
self._nodes[blob] = new_var(blob, shape=self._params[blob].shape, dtype=self._params[blob].dtype)
else:
shape = self._shape[blob] if blob in self._shape else ()
if isinstance(self._dtype, dict) and blob in self._dtype:
dtype = str(self._dtype[blob])
elif isinstance(self._dtype, str):
dtype = self._dtype
else:
dtype = "float32"
self._nodes[blob] = new_var(blob, shape=shape, dtype=dtype)
# Ops
for c2_op in predict_net.op:
for blob in c2_op.output:
self._ops[blob] = c2_op
for c2_op in predict_net.op:
self._process_op(c2_op)
# Outputs
out = []
for blob in predict_net.external_output:
out.append(self._nodes[blob])
if len(out) > 1:
outputs = _expr.Tuple(out)
else:
outputs = out[0]
func = _function.Function(analysis.free_vars(outputs), outputs)
self._mod["main"] = func
return self._mod, self._params
def _get_node(self, blob):
"""Get the Symbol of blob and detect cyclic dependency in the graph."""
if blob in self._nodes:
return self._nodes[blob]
assert blob not in self._visited_nodes, 'Cyclic dependency in the graph (in {})'.format(
blob)
self._visited_nodes.add(blob)
self._process_op(self._ops[blob])
return self._nodes[blob]
def _process_op(self, c2_op):
op_type = c2_op.type
args = self._parse_arg(c2_op.arg)
inputs = [self._get_node(i) for i in c2_op.input]
tvm_op = self._convert_operator(op_type, inputs, args)
if not isinstance(tvm_op, _expr.TupleWrapper):
self._nodes[c2_op.output[0]] = tvm_op
else:
for k, i in zip(list(c2_op.output), range(len(tvm_op))):
self._nodes[k] = tvm_op[i]
def _parse_arg(self, arg):
"""Convert a list of Argument to a dict, with names as keys."""
args = {}
for a in arg:
for f in ['f', 'i', 's']:
if a.HasField(f):
args[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in args, "Only one type of attr is allowed"
args[a.name] = tuple(getattr(a, f))
for f in ['n']:
if a.HasField(f):
raise NotImplementedError(
"Field {} is not supported in relay.".format(f))
for f in ['nets']:
if list(getattr(a, f)):
raise NotImplementedError(
"Field {} is not supported in relay.".format(f))
if a.name not in args:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return args
def _convert_operator(self,
op_type,
inputs,
args,
identity_list=None,
convert_map=None):
"""Convert from Caffe2 operator to Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_type : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of input inputs.
args : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to relay, callable are functions which
take args and return (new_op_type, new_args)
Returns
-------
func : tvm.relay.function.Function
Converted relay function
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _get_convert_map()
if op_type in identity_list:
func = get_relay_op(op_type)(*inputs, **args)
elif op_type in convert_map:
# Add a sanitizing step to convert all byte strings in args to strings
func = convert_map[op_type](inputs, args, self._params)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(op_type))
return func
def from_caffe2(init_net, predict_net, shape=None, dtype="float32"):
"""Load caffe2 graph which contains init_net and predict_net into Relay Function.
Parameters
----------
init_net : protobuf object
Caffe2 NetDef containing the weights
predict_net : protobuf object
Caffe2 NetDef containing the graph
shape : dict of str to tuple
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.nd.NDArray
Dict of converted parameters stored in tvm.nd.NDArray format
"""
caffe2 = Caffe2NetDef(shape, dtype)
return caffe2.from_caffe2(init_net, predict_net)
```
#### File: op/contrib/register.py
```python
_PATTERN_TABLES = {}
def register_pattern_table(compiler, table=None):
"""Register a pattern table for an external compiler.
Pattern tables are used to create composite functions.
See the MergeComposite pass.
Parameters
----------
compiler : str
The name of compiler
table : function, optional
A function that returns the pattern table
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(t):
"""internal register function"""
_PATTERN_TABLES[compiler] = t()
return t
return _register(table) if table is not None else _register
def get_pattern_table(compiler):
"""Get the pattern table associated with a compiler (if it's registered)."""
return _PATTERN_TABLES[compiler] if compiler in _PATTERN_TABLES else None
```
#### File: relay/op/_reduce.py
```python
from __future__ import absolute_import
from tvm.runtime import convert
from tvm.te.hybrid import script
from topi.util import get_const_int, get_const_tuple
from . import op as _reg
_reg.register_reduce_schedule("argmax")
_reg.register_reduce_schedule("argmin")
_reg.register_reduce_schedule("sum")
_reg.register_reduce_schedule("all")
_reg.register_reduce_schedule("any")
_reg.register_reduce_schedule("max")
_reg.register_reduce_schedule("min")
_reg.register_reduce_schedule("prod")
_reg.register_reduce_schedule("mean")
_reg.register_reduce_schedule("variance")
def _create_axis_record(attrs, inputs):
axes = attrs.axis if attrs.axis is None else list(get_const_tuple(attrs.axis))
exclude = get_const_int(attrs.exclude) > 0
keepdims = get_const_int(attrs.keepdims) > 0
data_shape = inputs[0]
shape_size = data_shape.shape[0].value
axis_record = [-1] * shape_size
if axes is None:
axes = list(range(shape_size))
for i, axis in enumerate(axes):
if axis < 0:
axes[i] = shape_size + axis
if exclude:
ex_axes = []
for i in range(shape_size):
if i not in axes:
ex_axes.append(i)
axes = ex_axes
for i in range(shape_size):
if i not in axes:
axis_record[i] = i
if not keepdims:
tmp = []
for i in axis_record:
if i >= 0:
tmp.append(i)
axis_record = tmp
return axis_record
@script
def _reduce_shape_func(data_shape, axis_record):
out = output_tensor((len(axis_record),), "int64")
for i in const_range(len(axis_record)):
if axis_record[i] >= 0:
out[i] = data_shape[axis_record[i]]
else:
out[i] = int64(1)
return out
def reduce_shape_func(attrs, inputs, _):
"""
Shape function for reduce op.
"""
axis_record = _create_axis_record(attrs, inputs)
return [_reduce_shape_func(inputs[0], convert(axis_record))]
_reg.register_shape_func("argmax", False, reduce_shape_func)
_reg.register_shape_func("argmin", False, reduce_shape_func)
_reg.register_shape_func("all", False, reduce_shape_func)
_reg.register_shape_func("sum", False, reduce_shape_func)
_reg.register_shape_func("max", False, reduce_shape_func)
_reg.register_shape_func("min", False, reduce_shape_func)
_reg.register_shape_func("prod", False, reduce_shape_func)
_reg.register_shape_func("mean", False, reduce_shape_func)
_reg.register_shape_func("variance", False, reduce_shape_func)
```
#### File: op/vision/_vision.py
```python
from __future__ import absolute_import
import topi
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
# multibox_prior
reg.register_strategy("vision.multibox_prior", strategy.multibox_prior_strategy)
reg.register_pattern("vision.multibox_prior", OpPattern.OPAQUE)
# multibox_transform_loc
reg.register_strategy("vision.multibox_transform_loc", strategy.multibox_transform_loc_strategy)
reg.register_pattern("vision.multibox_transform_loc", OpPattern.OPAQUE)
# Get counts of valid boxes
reg.register_strategy("vision.get_valid_counts", strategy.get_valid_counts_strategy)
reg.register_pattern("vision.get_valid_counts", OpPattern.OPAQUE)
# non-maximum suppression
reg.register_strategy("vision.non_max_suppression", strategy.nms_strategy)
reg.register_pattern("vision.non_max_suppression", OpPattern.OPAQUE)
@script
def _get_valid_counts_shape_func(data_shape):
valid_counts_shape = output_tensor((1,), "int64")
out_tensor_shape = output_tensor((data_shape.shape[0],), "int64")
out_indices_shape = output_tensor((2,), "int64")
valid_counts_shape[0] = data_shape[0]
for i in const_range(data_shape.shape[0]):
out_tensor_shape[i] = data_shape[i]
out_indices_shape[0] = data_shape[0]
out_indices_shape[1] = data_shape[1]
return valid_counts_shape, out_tensor_shape, out_indices_shape
@reg.register_shape_func("vision.get_valid_counts", False)
def get_valid_counts_shape_func(attrs, inputs, _):
return _get_valid_counts_shape_func(inputs[0])
@script
def _nms_shape_func(data_shape):
out_shape = output_tensor((2,), "int64")
count_shape = output_tensor((2,), "int64")
out_shape[0] = data_shape[0]
out_shape[1] = data_shape[1]
count_shape[0] = data_shape[0]
count_shape[1] = int64(1)
return out_shape, count_shape
@reg.register_shape_func("vision.non_max_suppression", False)
def nms_shape_func(attrs, inputs, _):
if attrs.return_indices:
return _nms_shape_func(inputs[0])
return [topi.math.identity(inputs[0])]
```
#### File: coreml/model_zoo/__init__.py
```python
import os
from PIL import Image
import numpy as np
from tvm.contrib.download import download_testdata
def get_mobilenet():
url = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel'
dst = 'mobilenet.mlmodel'
real_dst = download_testdata(url, dst, module='coreml')
return os.path.abspath(real_dst)
def get_resnet50():
url = 'https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel'
dst = 'resnet50.mlmodel'
real_dst = download_testdata(url, dst, module='coreml')
return os.path.abspath(real_dst)
def get_cat_image():
url = 'https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png'
dst = 'cat.png'
real_dst = download_testdata(url, dst, module='data')
img = Image.open(real_dst).resize((224, 224))
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img)
```
#### File: relay/dyn/test_dynamic_op_level10.py
```python
import numpy as np
import tvm
from tvm import relay
from tvm.relay.testing import ctx_list, run_infer_type
import random
def test_dyn_broadcast_to():
dtype = 'uint8'
rank = 3
shape_type = 'int64'
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x_shape = (1,)
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
dyn_shape = (1,)*rank
ref_res = np.broadcast_to(x, dyn_shape)
for target, ctx in ctx_list():
if (target != 'cuda'): #skip cuda because we don't have dynamic support for GPU
for kind in ["vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func)
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x,np.array(dyn_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
test_dyn_broadcast_to()
```
#### File: python/relay/test_sparse_dense_convert.py
```python
import itertools
import numpy as np
import scipy.sparse as sp
import tvm
from tvm.ir import IRModule
from tvm import relay
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"):
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r:r+BS_R,c:c+BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.data.size >= nnz
assert s.indices.shape == (num_blocks, )
assert s.indptr.shape == (M // BS_R + 1, )
return s
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
graph, lib, new_params = relay.build(func, "llvm", params=params)
from tvm.contrib import graph_runtime
ctx = tvm.cpu(0)
dtype = 'float32'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('data', tvm.nd.array(x.astype(dtype)))
m.set_input(**new_params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
return tvm_output.asnumpy()
def test_bsr_sparse_dense():
data = relay.var("data", shape=(1, 128), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(768, 128), dtype="float32")
y = relay.nn.dense(x, w)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(random_bsr_matrix(768, 128, 32, 1, 0.1).todense())
}
x_np = np.random.randn(1, 128).astype("float32")
# dense output
dense_output = run_func(func, params, x_np)
# sparse
sparse_func, params = relay.data_dep_optimization.bsr_dense.convert(func, params, (32, 1), 0.2)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_bsr_sparse_dense()
```
#### File: python/unittest/test_autotvm_executor.py
```python
import time
from tvm.autotvm.measure import LocalExecutor, executor
def slow(n):
r = 0
for i in range(0, n+1):
r += i
return r
def fast(n):
return n*(n+1)//2
def test_local_measure_async():
ex = LocalExecutor()
f1 = ex.submit(slow, 9999999)
f2 = ex.submit(fast, 9999999)
t1 = 0
t2 = 0
while True:
if t1 == 0 and f1.done():
t1 = time.time()
if t2 == 0 and f2.done():
t2 = time.time()
if t1 != 0 and t2 != 0:
break
assert t2 < t1, "Expected fast async job to finish first!"
assert f1.get() == f2.get()
def timeout_job(n):
time.sleep(n * 1.5)
def test_timeout():
timeout = 0.5
ex = LocalExecutor(timeout=timeout)
f1 = ex.submit(timeout_job, timeout)
while not f1.done():
pass
res = f1.get()
assert isinstance(res, executor.TimeoutError)
if __name__ == "__main__":
test_local_measure_async()
test_timeout()
```
#### File: python/unittest/test_te_hybrid_script.py
```python
import tvm, inspect, sys, traceback, numpy, pytest, types, os
from tvm import te
from tvm.contrib import util
from tvm.te.hybrid import script
from tvm.te.hybrid.runtime import HYBRID_GLOBALS
@pytest.mark.skip
def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):
def tvm_val_2_py_val(val):
val = tvm.tir.stmt_functor.substitute(val, var_dict)
val = tvm.arith.Analyzer().simplify(val)
assert isinstance(val, (tvm.tir.IntImm,))
return val.value
ctx = tvm.context(target, 0)
op = None
if sch is None:
outs = func(*tuple(tvm.runtime.convert(i) if isinstance(i, list) else i for i in args))
op = outs[0].op if isinstance(outs, list) else outs.op
sch = te.create_schedule(op)
else:
assert outs is not None
assert isinstance(outs, list)
op = outs[0].op
emu_args = []
nd_args = []
for i in args:
if isinstance(i, te.tensor.Tensor):
shape = [tvm_val_2_py_val(j) for j in i.shape]
emu_args.append(numpy.random.randn(*shape).astype(i.dtype))
nd_args.append(tvm.nd.array(emu_args[-1], ctx))
elif isinstance(i, tvm.tir.Var):
emu_args.append(tvm_val_2_py_val(i))
nd_args.append(emu_args[-1])
else:
assert isinstance(i, list)
emu_args.append(numpy.array(i))
compile_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))] + \
(outs if isinstance(outs, list) else [outs])
module = tvm.build(sch,
compile_args,
target=target)
assert module
out_tensors = []
for i in range(op.num_outputs):
output = op.output(i)
shape = [tvm_val_2_py_val(j) for j in output.shape]
nd_args.append(tvm.nd.array(numpy.zeros(shape).astype(output.dtype), ctx))
out_tensors.append(nd_args[-1])
ref_data = func(*emu_args)
if isinstance(ref_data, numpy.ndarray):
ref_data = [ref_data]
module(*nd_args)
for nd, np in zip(out_tensors, ref_data):
tvm.testing.assert_allclose(nd.asnumpy(), np, rtol=1e-5, atol=1e-5)
module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))]
module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
h_module = te.hybrid.build(sch, module_args, module_outs)
return h_module, module_args, module_outs
@script
def outer_product(n, m, a, b):
"""This is a simple outer product.
Actually this function is not required to be documented.
I write this docstring to test skipping docstring functionality.
"""
c = output_tensor((n, m), a.dtype)
for i in range(n):
for j in range(m):
assert i < n and j < m, "index out of range!"
c[i, j] = a[i] * b[j]
return c
#Test global function
#Test bridge between frontend and backend
def test_outer_product():
n = te.size_var('n')
m = te.size_var('m')
a = te.placeholder((n, ), name='a')
b = te.placeholder((m, ), name='b')
try:
c = outer_product(n, m, a, b)
ir = c.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == 'could not get source code'
return
#Check for i in (0, n)
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'i'
assert ir.min.value == 0
assert ir.extent.name == 'n'
ibody = ir.body
assert isinstance(ibody, tvm.tir.For)
#Check for j in (0, m)
assert ibody.loop_var.name == 'j'
assert ibody.min.value == 0
assert ibody.extent.name == 'm'
#Check loop body
jblock = ibody.body
assert isinstance(jblock, tvm.tir.SeqStmt)
jbody = jblock[0]
assert isinstance(jbody, tvm.tir.AssertStmt)
assert isinstance(jbody.message, tvm.tir.StringImm)
assert jbody.message.value == "index out of range!"
jbody = jblock[1]
assert isinstance(jbody, tvm.tir.ProducerStore)
assert jbody.producer.op.name == 'c'
assert len(jbody.indices) == 2
assert jbody.indices[0].name == 'i'
assert jbody.indices[1].name == 'j'
assert isinstance(jbody.value, tvm.tir.Mul)
mul = jbody.value
assert isinstance(mul.a, tvm.tir.ProducerLoad)
assert mul.a.producer.name == 'a'
assert mul.b.producer.name == 'b'
func, ins, outs = run_and_check(outer_product, [n, m, a, b], {n: 99, m: 101})
temp = util.tempdir()
path = temp.relpath('%s.py' % func.name)
func.save(path)
func_ = te.hybrid.HybridModule()
func_.load(path)
run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)
for key, _ in HYBRID_GLOBALS.items():
assert key not in globals().keys()
assert key not in outer_product.__globals__.keys()
#Test local function
#Test allocation of local variable
def test_fanout():
@script
def fanout(n, a):
three = 3.0
b = output_tensor((a.shape[0] - 3, ), a.dtype)
for i in range(a.shape[0] - 3):
sigma = 0.0
for j in range(3):
sigma += a[i + j]
sigma = sigma / three
b[i] = sigma
return b
n = te.size_var('n')
a = te.placeholder((n, ), 'float32', name='a')
try:
b = fanout(n, a)
ir = b.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == 'could not get source code'
return
#Check for i in (0, n-3)
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'i'
assert ir.min.value == 0
assert tvm.ir.structural_equal(ir.extent, n - 3)
#Check loopbody
ibody = ir.body
assert isinstance(ibody, tvm.tir.AttrStmt)
abody = ibody.body
assert isinstance(abody, tvm.tir.ProducerRealize)
assert abody.bounds[0].min.value == 0
assert abody.bounds[0].extent.value == 1
assert abody.producer.op.name == 'sigma'
#Check i loop body
rbody = abody.body
assert isinstance(rbody[0], tvm.tir.ProducerStore)
assert rbody[0].producer.op.name == 'sigma'
assert len(rbody[0].indices) == 1
assert rbody[0].indices[0].value == 0
#Check fanout loop
jloop = rbody[1]
assert jloop.loop_var.name == 'j'
assert jloop.min.value == 0
assert jloop.extent.value == 3
jbody = jloop.body
assert isinstance(jbody, tvm.tir.ProducerStore)
assert len(jbody.indices) == 1
assert jbody.indices[0].value == 0
assert jbody.producer.op.name == 'sigma'
assert isinstance(jbody.value, tvm.tir.Add)
value = jbody.value
assert isinstance(value.a, tvm.tir.ProducerLoad)
assert value.a.producer.name == 'sigma'
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert value.b.producer.name == 'a'
assert len(value.b.indices) == 1
assert tvm.ir.structural_equal(value.b.indices[0], ir.loop_var + jloop.loop_var)
divide= rbody[2]
assert isinstance(divide, tvm.tir.ProducerStore)
assert len(divide.indices) == 1
assert divide.indices[0].value == 0
value = divide.value
assert isinstance(value, tvm.tir.Mul)
assert value.a.producer.name == 'sigma'
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert abs(value.b.value - (1 / 3.0)) < 1e-5
write = rbody[3]
assert isinstance(write, tvm.tir.ProducerStore)
assert write.producer.op.name == 'b'
assert write.value.producer.name == 'sigma'
assert len(write.value.indices) == 1
assert write.value.indices[0].value == 0
func, ins, outs = run_and_check(fanout, [n, a], {n: 10})
run_and_check(func, ins, {n: 10}, outs=outs)
def test_looptype():
@script
def looptype(a, b, c):
d = output_tensor((16, ), 'int32')
e = output_tensor((16, ), 'int32')
f = output_tensor((16, ), 'int32')
for i in parallel(16):
d[i] = a[i]
for j in vectorize(16):
e[j] = b[j]
for k in unroll(16):
f[k] = c[k]
return d, e, f
a = te.placeholder((16, ), name='a', dtype='int32')
b = te.placeholder((16, ), name='b', dtype='int32')
c = te.placeholder((16, ), name='c', dtype='int32')
try:
d, e, f = looptype(a, b, c)
ir = d.op.body
except:
return
iloop = ir[0]
jloop = ir[1]
kloop = ir[2]
assert iloop.for_type == tvm.tir.For.Parallel
assert jloop.for_type == tvm.tir.For.Vectorized
assert kloop.for_type == tvm.tir.For.Unrolled
func, ins, outs = run_and_check(looptype, [a, b, c])
run_and_check(func, ins, outs=outs)
def test_if():
@script
def if_then_else(a):
b = output_tensor((10, ), 'int32')
c = output_tensor((10, ), 'int32')
for i in range(10):
if i % 2 == 0:
c[i] = a[i]
else:
c[i] = b[i]
for i in unroll(10):
b[i] = -1 if i % 2 == 0 else 1
return b, c
a = te.placeholder((10, ), dtype='int32', name='a')
func, ins, outs = run_and_check(if_then_else, [a])
run_and_check(func, ins, outs=outs)
@script
def if_triple_condition(a):
b = output_tensor((10, ), 'int32')
for i in range(10):
if 0 <= i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_triple_condition, [a])
run_and_check(func, ins, outs=outs)
@script
def if_and(a):
b = output_tensor((10, ), 'int32')
for i in range(10):
if i >= 0 and i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_and, [a])
run_and_check(func, ins, outs=outs)
def test_bind():
if not tvm.gpu(0).exist:
print('[Warning] No GPU found! Skip bind test!')
return
@script
def vec_add(a, b):
c = output_tensor((1000, ), 'float32')
for tx in bind('threadIdx.x', 1000):
c[tx] = a[tx] + b[tx]
return c
a = te.placeholder((1000, ), dtype='float32', name='a')
b = te.placeholder((1000, ), dtype='float32', name='b')
func, ins, outs = run_and_check(vec_add, [a, b], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')
@script
def raw(a, b):
c = output_tensor((1000, ), 'float32')
for i in range(1000):
c[i] = a[i] + b[i]
return c
c = raw(a, b)
sch = te.create_schedule(c.op)
x = te.thread_axis('threadIdx.x')
sch[c].bind(c.op.axis[0], x)
func, ins, outs = run_and_check(raw, [a, b], sch=sch, outs=[c], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')
@te.hybrid.script
def foo(a):
c = output_tensor((a.shape[0],), a.dtype)
total = allocate((1,), a.dtype, 'local')
len_i = a.shape[0]
len_j = a.shape[1]
for i in bind('threadIdx.x', len_i):
total[0] = 0.
for k in const_range(len_j):
total[0] += a[i, k]
c[i] = total[0]
return c
a = te.placeholder((8, 4), 'float32')
c = foo(a)
s = te.create_schedule(c.op)
ir = tvm.lower(s, [a, c])
func, ins, outs = run_and_check(foo, [a], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')
@te.hybrid.script
def max_threads(a):
b = output_tensor(a.shape, a.dtype)
n = a.shape[0]
m = max_num_threads(True)
for i in bind('threadIdx.x', m):
for j in bind('blockIdx.x', ceil_div(n, m)):
if i * m + j < n:
b[i * m + j] = a[i * m + j] + a[i * m + j]
return b
a = te.placeholder((10000, ), 'float32')
with tvm.target.create('cuda'):
func, ins, outs = run_and_check(max_threads, [a], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')
def test_math_intrin():
@script
def intrin_real(a):
b = output_tensor((8, ), 'float32')
b[0] = sqrt(a[0])
b[1] = log(a[1])
b[2] = exp(a[2])
b[3] = sigmoid(a[3])
b[4] = power(a[4], a[5])
b[5] = tanh(a[5])
b[6] = min(a[4], a[5])
b[7] = max(a[5], a[6])
return b
a8 = te.placeholder((8, ), dtype='float32', name='a')
b8 = intrin_real(a8)
sch = te.create_schedule(b8.op)
func = tvm.build(sch, [a8, b8])
assert func
a = numpy.arange(2, 10).astype('float32')
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.zeros((8, ), dtype='float32'))
b = intrin_real(a)
func(tvm_a, tvm_b)
tvm.testing.assert_allclose(b, tvm_b.asnumpy(), rtol=1e-5)
@script
def intrin_int(a):
b = output_tensor((1, ), 'int32')
b[0] = popcount(a[0])
return b
a1 = te.placeholder((1, ), dtype='int32')
b1 = intrin_int(a1)
sch = te.create_schedule(b1.op)
func = tvm.build(sch, [a1, b1])
assert func
a = numpy.array([114514]).astype('int32')
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.array([0]).astype('int32'))
b = intrin_int(a)
func(tvm_a, tvm_b)
assert tvm_b.asnumpy()[0] == b[0]
# test non caconical loops
def test_non_zero():
@te.hybrid.script
def blur(a):
b = output_tensor((30, 30), 'float32')
for i in range(2, 32):
for j in range(2, 32):
s = 0.0
for di in range(3):
for dj in range(3):
s += a[i-di, j-dj]
b[i-2, j-2] = s / 9.0
return b
a = te.placeholder((32, 32), 'float32', 'a')
func, ins, outs = run_and_check(blur, [a])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype='float32')
for i in range(10):
for j in range(i, 10):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((10, ), dtype='float32', name='a')
b = te.placeholder((10, ), dtype='float32', name='b')
func, ins, outs = run_and_check(triangle, [a, b])
run_and_check(func, ins, outs=outs)
def test_allocate():
@te.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), 'float32')
for i in range(30):
ha = allocate((3, 30), 'float32')
for j in range(3):
for k in range(30):
ha[j, k] = a[i+j, k] + a[i+j, k+1] + a[i+j, k+2]
for j in range(30):
b[i, j] = (ha[0, j] + ha[1, j] + ha[2, j]) / 9.0
return b
a = te.placeholder((32, 32), 'float32', 'a')
b = blur2d(a)
sch = te.create_schedule(b.op)
func, ins, outs = run_and_check(blur2d, [a])
run_and_check(func, ins, outs=outs)
if tvm.gpu().exist:
@te.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256, ), 'float32')
shared = allocate((256, ), 'float32', 'shared')
for i in bind("threadIdx.x", 256):
shared[i] = a[i]
local = allocate((256, ), 'float32', 'local')
for i in bind("threadIdx.x", 256):
local[i] = b[i]
for i in bind("threadIdx.x", 256):
c[i] = shared[i] + local[i]
return c
a = te.placeholder((256, ), dtype='float32', name='a')
b = te.placeholder((256, ), dtype='float32', name='b')
c = share_vec_add(a, b)
func, ins, outs = run_and_check(share_vec_add, [a, b], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')
else:
print('[Warning] No GPU found! Skip shared mem test!')
def test_upstream():
@te.hybrid.script
def upstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20, ), 'float32')
b = te.placeholder((20, ), 'float32')
c = te.compute((20, ), lambda x: a[x] + b[x])
d = upstream(c)
sch = te.create_schedule([c.op, d.op])
ir = tvm.lower(sch, [a, b, d])
func = tvm.build(sch, [a, b, d])
assert(func)
a = numpy.random.randn(20).astype('float32')
b = numpy.random.randn(20).astype('float32')
ref = numpy.zeros((20, ), 'float32')
for i in range(20):
ref[i] = (a[i] + b[i]) * i
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(b)
tvm_d = tvm.nd.array(numpy.zeros((20, )).astype('float32'))
func(tvm_a, tvm_b, tvm_d)
tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5)
def test_downstream():
@te.hybrid.script
def downstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20, ), 'float32')
b = downstream(a)
c = te.compute((20, ), lambda x: b[x] + 1.0)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c])
assert module
a = numpy.random.randn(20).astype('float32')
ref = numpy.zeros((20, )).astype('float32')
for i in range(20):
ref[i] = (a[i] * i) + 1.0
tvm_a = tvm.nd.array(a)
tvm_c = tvm.nd.array(numpy.zeros((20, )).astype('float32'))
module(tvm_a, tvm_c)
tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5)
def test_const_param():
@te.hybrid.script
def add_something(a, b):
c = output_tensor((11, ), 'int32')
for i in range(11):
c[i] = a[i] + b
return c
a = te.placeholder((11, ), dtype='int32', name='a')
b = tvm.tir.const(11, 'int32')
c = add_something(a, b)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c], 'llvm')
assert(module)
np_a = numpy.arange(11).astype('int32')
np_b = 11
np_c = numpy.zeros((11, )).astype('int32')
nd_a = tvm.nd.array(np_a)
nd_c = tvm.nd.array(numpy.zeros((11, )).astype('int32'))
module(nd_a, nd_c)
ref = add_something(np_a, 11)
tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5)
def test_value_index():
@te.hybrid.script
def kernel_a(a):
b = output_tensor((16, ), 'int32')
c = output_tensor((4, 4), 'int32')
for i in range(16):
b[i] = a[i] + 2
c[i // 4, i % 4] = a[i] + 1
return b, c
@te.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), 'int32')
for i in range(4):
for j in range(4):
c[i, j] = a[i * 4 + j] * b[i, j]
return c
a = te.placeholder((16, ), 'int32')
b, c = kernel_a(a)
d = kernel_b(c, b)
sch = te.create_schedule(d.op)
module = tvm.build(sch, [a, d])
assert module
np_a = numpy.arange(16).astype('int32')
np_b, np_c = kernel_a(np_a)
ref = kernel_b(np_c, np_b)
res = tvm.nd.array(numpy.zeros((4, 4)).astype('int32'))
module(tvm.nd.array(np_a), res)
tvm.testing.assert_allclose(res.asnumpy(), ref)
def test_func_call():
@te.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
for i in range(len(a)):
b[i] = i + 1.0
c = outer_product(10, 10, a, b)
d = output_tensor(c.shape, c.dtype)
for i in range(10):
for j in range(10):
d[i, j] = c[i, j] + i * j
return d
a = te.placeholder((10, ), name='a')
b = te.placeholder((10, ), name='b')
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
def test_bool():
@te.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
for i in range(1, a.shape[0] - 1):
if a[i] * a[i - 1] < a[i] or a[i] * a[i - 1] < a[i - 1] or i * a[i] == a[i]:
b[i] = a[i]
else:
b[i] = 0.0
return b
a = te.placeholder((10, ), name='a')
func, ins, outs = run_and_check(foo, [a])
run_and_check(func, ins, outs=outs)
def test_const_range():
@te.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, 'int32')
for i in const_range(2):
for j in const_range(5):
c[i, j] = float32(int32(a[i, j]) + b[i, j])
for i in const_range(len(b)):
for j in const_range(len(b[0])):
d[i, j] = int32(a[i, j] + b[i, j])
return c, d
a = te.placeholder((2, 5), name='a', dtype='float32')
b = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def goo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in const_range(len_b * 2):
if i < len_b:
c[i] = a[i] + b[i]
else:
c[i - len_b] = a[i - len_b] + b[i - len_b]
return c
a = te.placeholder((5, ), name='a', dtype='int32')
b = [1, 2, 3, 4, 5]
c = goo(a, tvm.runtime.convert(b))
sch = te.create_schedule(c.op)
func, ins, outs = run_and_check(goo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def hoo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in range(a.shape[0]):
for j in const_range(len(b)):
d = a[i] * b[j]
d += a[i] + b[j]
c[i] = d
return c
a = te.placeholder((5, ), name='a', dtype='int32')
b = [1, 2, 3, 4, 5]
func, ins, outs = run_and_check(hoo, [a, b])
run_and_check(func, ins, outs=outs)
def test_schedule():
@script
def outer_product(a, b):
c = output_tensor((64, 64), a.dtype)
for i in range(64):
for j in range(64):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((64,), name='a', dtype='float32')
b = te.placeholder((64,), name='b', dtype='float32')
c = outer_product(a, b)
# Test perfect loop split
# Test loop reorder
# Test loop annotation
sch = te.create_schedule(c.op)
i, j = c.op.axis
io, ii = sch[c].split(i, 4)
sch[c].parallel(ii)
jo, ji = sch[c].split(j, 4)
joo, joi = sch[c].split(jo, 4)
sch[c].vectorize(ji)
sch[c].reorder(ii, io, joo, joi, ji)
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'i.inner'
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'i.outer'
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'j.outer.outer'
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'j.outer.inner'
ir = ir.body
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test fuse
sch = te.create_schedule(c.op)
sch[c].fuse(c.op.axis[0], c.op.axis[1])
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == 'i.j.fused'
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test imperfect loop split
sch = te.create_schedule(c.op)
sch[c].split(c.op.axis[0], 3)
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test loop binds
def test_capture():
n = 8
constant_tuple = (10, n)
constant_list = [[1, 2], [3, n]]
const_value = 1
@te.hybrid.script
def add_something(a):
c = output_tensor((constant_tuple[1],), 'int32')
for i in range(constant_tuple[1]):
c[i] = a[i] + constant_list[1][const_value]
return c
a = te.placeholder((n, ), dtype='int32', name='a')
func, ins, outs = run_and_check(add_something, [a])
run_and_check(func, ins, outs=outs)
def test_array_inputs():
@script
def sum_array(inputs):
out = output_tensor((10,), inputs[0].dtype)
n = len(inputs)
for i in range(10):
for j in const_range(n):
out[i] += inputs[j][i]
return out
n = 5
inputs = []
for i in range(n):
inputs.append(te.placeholder((10,), name='t%s' % i, dtype='float32'))
out = sum_array(tvm.runtime.convert(inputs))
assert len(out.op.inputs) == n
sch = te.create_schedule(out.op)
mod = tvm.build(sch, inputs + [out], target='llvm')
assert mod
input_nd = []
out_ref = numpy.zeros((10,))
for _ in range(n):
arr = numpy.random.uniform(size=(10,)).astype('float32')
input_nd.append(tvm.nd.array(arr))
out_ref += arr
out_nd = tvm.nd.array(numpy.zeros((10,), 'float32'))
mod(*input_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_ref)
if __name__ == "__main__":
test_outer_product()
test_fanout()
test_looptype()
test_if()
test_bind()
test_math_intrin()
test_non_zero()
test_allocate()
test_upstream()
test_downstream()
test_const_param()
test_value_index()
test_func_call()
test_bool()
test_const_range()
test_schedule()
test_capture()
test_array_inputs()
# TODO:
# test_inplace()
```
#### File: python/unittest/test_te_verify_compute.py
```python
import tvm
from tvm import te
def test_verify_compute():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name='A')
k = te.reduce_axis((0, m), "k")
k_ = te.reduce_axis((0, m-1), "k_")
f1 = lambda i: te.sum(A[i, k], axis=k)
f2 = lambda i: A[i,0] + 1
f3 = lambda i: te.sum(A[i, k], axis=k) + 1
f4 = lambda i: A[i,0] * (te.sum(A[i, k], axis=k) + 1)
f5 = lambda i: (te.sum(A[i, k], axis=k), A[i,0] + 1)
f6 = lambda i: (te.sum(A[i, k], axis=k), te.sum(A[i, k_], axis=k_))
#
# Valid compute
try:
B = te.compute((n,), f1, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
#
# Valid compute
try:
B = te.compute((n,), f2, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
#
# Invalid compute with non top level reduction
try:
B = te.compute((n,), f3, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with non top level reduction
try:
B = te.compute((n,), f4, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with reduction and non-reduction batch ops
try:
B0, B1 = te.compute((n,), f5, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with unequal batch reduction ops
try:
B0, B1 = te.compute((n,), f6, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
if __name__ == "__main__":
test_verify_compute()
```
#### File: python/topi/argwhere.py
```python
from tvm.te import hybrid
@hybrid.script
def hybrid_argwhere_1d(output_shape, condition):
"""Find the indices of elements of a 1-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
1-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
valid_index = 0
for i1 in range(a1):
if condition[i1] != 0:
a[valid_index, 0] = i1
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_2d(output_shape, condition):
"""Find the indices of elements of a 2-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
2-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
if condition[i1, i2] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_3d(output_shape, condition):
"""Find the indices of elements of a 3-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
3-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
if condition[i1, i2, i3] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_4d(output_shape, condition):
"""Find the indices of elements of a 4-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
4-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
a4 = condition.shape[3]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
for i4 in range(a4):
if condition[i1, i2, i3, i4] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
a[valid_index, 3] = i4
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_5d(output_shape, condition):
"""Find the indices of elements of a 5-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
5-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
a4 = condition.shape[3]
a5 = condition.shape[4]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
for i4 in range(a4):
for i5 in range(a5):
if condition[i1, i2, i3, i4, i5] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
a[valid_index, 3] = i4
a[valid_index, 4] = i5
valid_index += 1
return a
def argwhere(output_shape, condition):
"""Find the indices of elements of a tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
if len(condition.shape) == 1:
return hybrid_argwhere_1d(output_shape.shape, condition)
if len(condition.shape) == 2:
return hybrid_argwhere_2d(output_shape.shape, condition)
if len(condition.shape) == 3:
return hybrid_argwhere_3d(output_shape.shape, condition)
if len(condition.shape) == 4:
return hybrid_argwhere_4d(output_shape.shape, condition)
if len(condition.shape) == 5:
return hybrid_argwhere_5d(output_shape.shape, condition)
raise ValueError("Does not support rank higher than 5 in argwhere")
```
#### File: topi/arm_cpu/conv2d_transpose.py
```python
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from ..nn import dilate, pad, get_pad_tuple
from ..util import get_const_tuple, traverse_inline
from .conv2d_spatial_pack import schedule_conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_transpose_nchw.arm_cpu")
def conv2d_transpose_nchw(cfg, Input, Filter, strides, padding, out_dtype,
output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output data type. This is used for mixed precision.
output_padding : tuple of int
Used to get the right output shape in gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(cfg, Input, Filter, strides, padding, "NCHW", out_dtype, 2,
output_padding)
def _decl_spatial_pack(cfg, data, kernel, strides, padding, layout, out_dtype, num_tile,
output_padding):
assert layout == "NCHW", "Only support NCHW"
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
_, CO, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
opad_h, opad_w = output_padding
assert opad_h < HSTR and opad_w < WSTR
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (KH, KW))
bpad_top, bpad_bottom = KH - 1 - pad_top, KH - 1 - pad_bottom + opad_h
bpad_left, bpad_right = KW - 1 - pad_left, KW - 1 - pad_right + opad_w
OH = (IH - 1) * HSTR - pad_top - pad_bottom + KH + opad_h
OW = (IW - 1) * WSTR - pad_left - pad_right + KW + opad_w
dilated_input = dilate(data, [1, 1, HSTR, WSTR])
data_pad = pad(dilated_input, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right])
# ==================== define configuration space ====================
n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split('tile_co', co, num_outputs=2)
oh, vh = cfg.define_split('tile_oh', oh, num_outputs=2)
ow, vw = cfg.define_split('tile_ow', ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split('tile_co', co, num_outputs=3)
oh, _, vh = cfg.define_split('tile_oh', oh, num_outputs=3)
ow, _, vw = cfg.define_split('tile_ow', ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder("reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy='candidate', candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw]])
cfg.define_annotate("ann_reduce", [kh, kw], policy='try_unroll')
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy='try_unroll_vec')
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (N, OH // VH, OW // VW, CI, VH + KH-1, VW + KW-1)
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
data_vec = te.compute(dvshape, lambda n, h, w, ci, vh, vw:
data_pad[n][ci][h*VH + vh][w*VW + vw],
name='data_vec')
kernel_vec = te.compute(kvshape, lambda co, ci, kh, kw, vc:
kernel[ci][co*VC+vc][kh][kw],
name='kernel_vec_conv2d_transpose')
ci = te.reduce_axis((0, CI), name='ci')
kh = te.reduce_axis((0, KH), name='kh')
kw = te.reduce_axis((0, KW), name='kw')
conv = te.compute(ovshape, lambda n, co, h, w, vh, vw, vc: \
te.sum(data_vec[n, h, w, ci, vh + kh, vw + kw].astype(out_dtype) *
kernel_vec[co, ci, KH - 1 - kh, KW - 1 - kw, vc].astype(out_dtype),
axis=[ci, kh, kw]), name='conv')
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
output = te.compute(oshape, lambda n, co, h, w:
conv[n,
idxdiv(co, VC), idxdiv(h, VH), idxdiv(w, VW),
idxmod(h, VH), idxmod(w, VW), idxmod(co, VC)],
name='output_unpack', tag='spatial_conv2d_transpose_output')
return output
# register customized schedule for arm cpu.
@autotvm.register_topi_schedule("conv2d_transpose_nchw.arm_cpu")
def schedule_conv2d_transpose_nchw(cfg, outs):
"""Schedule conv2d transpose for arm cpu"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'spatial_conv2d_transpose_output' in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
dilated_input = data_pad.op.input_tensors[0]
s[data_pad].compute_inline()
s[dilated_input].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == 'kernel_vec':
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec,
conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
```
#### File: topi/cuda/conv1d.py
```python
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..util import traverse_inline, get_const_tuple
@autotvm.register_topi_compute("conv1d_ncw.cuda")
def conv1d_ncw(cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype='float32'):
return nn.conv1d_ncw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv1d_ncw.cuda")
def schedule_conv1d_ncw(cfg, outs):
"""TOPI schedule callback of conv1d ncw for cuda gpu
Parameters
----------
cfg : ConfigEntity
the config for this template.
outs : Array of Tensor
The computation graph description of conv1d
in the format of an array of tensors.
Returns
-------
s : Schedule
The computation schedule for conv1d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == 'conv1d_ncw':
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.id.name in ['nvptx', 'rocm']:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if isinstance(kernel.op,
tvm.te.ComputeOp) and 'dilate' in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, 'local')
else:
output = s.outputs[0].output(0)
s[conv].set_scope('local')
OL = conv
# create cache stage
s[pad_data].set_scope('shared')
AA = pad_data
WW = s.cache_read(kernel, 'shared', [OL])
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, bx, vn, vf, vx, tn, tf, tx, ni, fi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, x = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg['tile_rc'].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, f, x)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, f, x = s[load].op.axis
fused = s[load].fuse(f, x)
tz, fused = s[load].split(fused, nparts=n_tz)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, 'auto_unroll_max_step',
cfg['auto_unroll_max_step'].val)
s[output].pragma(kernel_scope, 'unroll_explicit',
cfg['unroll_explicit'].val)
N, CO, OW = get_const_tuple(output.shape)
_, CI, KW = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OW * CO * KW * CI)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv1d_nwc.cuda")
def conv1d_nwc(cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype='float32'):
return nn.conv1d_nwc(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv1d_nwc.cuda")
def schedule_conv1d_nwc(cfg, outs):
"""TOPI schedule callback of conv1d nwc for cuda gpu
Parameters
----------
cfg : ConfigEntity
the config for this template.
outs : Array of Tensor
The computation graph description of conv1d
in the format of an array of tensors.
Returns
-------
s : Schedule
The computation schedule for conv1d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == 'conv1d_nwc':
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, x, f = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.id.name in ['nvptx', 'rocm']:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if isinstance(kernel.op,
tvm.te.ComputeOp) and 'dilate' in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, 'local')
else:
output = s.outputs[0].output(0)
s[conv].set_scope('local')
OL = conv
# create cache stage
s[pad_data].set_scope('shared')
AA = pad_data
WW = s.cache_read(kernel, 'shared', [OL])
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
s[output].reorder(bn, bx, bf, vn, vx, vf, tn, tx, tf, ni, xi, fi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bx, te.thread_axis("blockIdx.y"))
s[output].bind(bf, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tf)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_x"].size[2]
n_tx = cfg["tile_f"].size[2]
# tile reduction axes
n, x, f = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg['tile_rc'].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, x, f)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, x, f = s[load].op.axis
fused = s[load].fuse(x, f)
tz, fused = s[load].split(fused, nparts=n_tz)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, 'auto_unroll_max_step',
cfg['auto_unroll_max_step'].val)
s[output].pragma(kernel_scope, 'unroll_explicit',
cfg['unroll_explicit'].val)
N, OW, CO = get_const_tuple(output.shape)
KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OW * CO * KW * CI)
traverse_inline(s, outs[0].op, _callback)
return s
```
#### File: topi/mali/conv2d.py
```python
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.autotvm.task.space import get_factors
from ..util import traverse_inline, get_const_int, get_const_tuple
from .. import nn
from ..nn.winograd_util import winograd_transform_matrices
# reuse some compute declarations from ARM CPU
from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.mali")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding,
dilation, out_dtype, num_tile=3)
@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.mali")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""TOPI schedule callback for conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d
"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if 'spatial_conv2d_output' in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == 'kernel_vec':
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec)
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec):
"""schedule the spatial packing for conv2d"""
data = s[data_vec].op.input_tensors[0]
max_unroll = 16
vec_size = [1, 2, 4, 8, 16]
# get tunable parameters (they are defined in compute)
BC, TC, VC = cfg["tile_co"].size
BH, TH, VH = cfg["tile_oh"].size
BW, TW, VW = cfg["tile_ow"].size
# schedule padding
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
s[data_pad].compute_inline()
# schedule data packing
if isinstance(data_vec.op, tvm.te.ComputeOp) and data_vec.op.name == 'data_vec_undilated':
_, h, w, ci, _, _, vh, vw = s[data_vec].op.axis
else:
_, h, w, ci, vh, vw = s[data_vec].op.axis
tile_and_bind3d(s, data_vec, h, w, ci, 1)
if vh.dom.extent.value < max_unroll:
s[data_vec].unroll(vh)
if vw.dom.extent.value < max_unroll:
s[data_vec].unroll(vw)
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == 'kernel_vec':
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
co, ci, kh, kw, vc = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(co, ci, kh, kw, vc)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
s[kernel_vec].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_vec].bind(tt, te.thread_axis("threadIdx.x"))
if VC in vec_size:
s[kernel_vec].vectorize(vec)
# schedule convolution
n, c, h, w, vh, vw, vc = s[conv].op.axis
kc, kh, kw = s[conv].op.reduce_axis
cfg["reorder_0"].apply(s, conv, [n, c, h, w, kc, kh, kw, vh, vw, vc])
tile_and_bind3d(s, conv, c, h, w, TC, TH, TW)
cfg["ann_reduce"].apply(s, conv, [kh, kw],
axis_lens=[get_const_int(kernel_vec.shape[2]),
get_const_int(kernel_vec.shape[3])],
max_unroll=max_unroll)
cfg["ann_spatial"].apply(s, conv, [vh, vw, vc],
axis_lens=[VH, VW, VC],
max_unroll=max_unroll,
vec_size=vec_size,
cfg=cfg)
# schedule output
if output.op not in s.outputs: # has bias
s[output].compute_inline()
output = s.outputs[0]
_, co, oh, ow = s[output].op.axis
tile_and_bind3d(s, output, co, oh, ow, TC, TH, TW)
return s
##### WINOGRAD TEMPLATE #####
def _pick_tile_size(data, kernel):
N, CI, H, W = get_const_tuple(data.shape)
if H % 4 == 0:
return 4
else:
return 2
@autotvm.register_topi_compute("conv2d_nchw_winograd.mali")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
tile_size = _pick_tile_size(data, kernel)
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype,
tile_size)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.mali")
def schedule_conv2d_nchw_winograd(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'winograd_conv2d_output' in op.tag:
_schedule_winograd(cfg, s, op)
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size):
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI, VC = get_const_tuple(kernel.shape)
CO *= VC
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
nH, nW = (H + m-1) // m, (W + m-1) // m
P = N * nH * nW
##### space definition begin #####
tile_bna_candidates = [1, 2, 4, 8, 16]
factors = get_factors(CO)
cfg.define_knob('tile_bna', [x for x in tile_bna_candidates if x in factors])
cfg.define_knob('tile_bnb', [1, 2, 4, 8, 16])
cfg.define_split('tile_t1', CI, num_outputs=2, max_factor=128)
cfg.define_split('tile_t2', CO, num_outputs=2, max_factor=128)
cfg.define_split('c_unroll', CI, num_outputs=2, max_factor=8)
cfg.define_knob('yt', [1, 2, 4, 8, 16, 32])
##### space definition end #####
if cfg.is_fallback:
cfg['tile_bnb'].val = 4
cfg['tile_bna'].val = 4
while CO % cfg['tile_bna'].val != 0:
cfg['tile_bna'].val //= 2
cfg['yt'].val = 8
cfg.fallback_split('tile_t1', [-1, 128])
cfg.fallback_split('tile_t2', [-1, 128])
cfg.fallback_split('c_unroll', [-1, 8])
bna = cfg['tile_bna'].val
bnb = cfg['tile_bnb'].val
P_round = (P + bnb - 1) // bnb * bnb
assert CO % bna == 0 and P_round % bnb == 0
# pack input tile
input_tile = te.compute(
(CI, P_round // bnb, alpha, alpha, bnb), lambda ci, b, eps, nu, bb: \
tvm.tir.if_then_else(
b * bnb + bb < P,
data_pad[(b*bnb+bb) // (nH*nW)][ci][(b*bnb+bb) // nW % nH * m + eps]
[(b*bnb+bb) % nW * m + nu], tvm.tir.const(0, data_pad.dtype)), name='d')
if autotvm.GLOBAL_SCOPE.in_tuning:
VC = cfg['tile_k'].size[-1]
kvshape = (KH + tile_size - 1, KW + tile_size - 1, tvm.tir.indexdiv(CO, VC), CI, VC)
U = tvm.te.placeholder(kvshape, kernel.dtype, name="U")
else:
# transform kernel
if pre_computed:
U = kernel
else:
r_kh = te.reduce_axis((0, KH), 'r_kh')
r_kw = te.reduce_axis((0, KW), 'r_kw')
U = te.compute((alpha, alpha, CO // bna, CI, bna), lambda eps, nu, co, ci, vco:
te.sum(kernel[co * bna + vco][ci][r_kh][r_kw] *
G[eps][r_kh] * G[nu][r_kw],
axis=[r_kh, r_kw]), name='U')
# transform image
r_a = te.reduce_axis((0, alpha), 'r_a')
r_b = te.reduce_axis((0, alpha), 'r_b')
V = te.compute((alpha, alpha, P_round // bnb, CI, bnb), lambda eps, nu, p, ci, vp:
te.sum(input_tile[ci][p][r_a][r_b][vp] * B[r_a][eps] * B[r_b][nu],
axis=[r_a, r_b]), name='V')
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# batch gemm
ci = te.reduce_axis((0, CI), name='c')
M = te.compute((alpha, alpha, CO, P_round), lambda eps, nu, co, p:
te.sum(U[eps][nu][idxdiv(co, bna)][ci][idxmod(co, bna)] *
V[eps][nu][idxdiv(p, bnb)][ci][idxmod(p, bnb)], axis=ci), name='M')
r_a = te.reduce_axis((0, alpha), 'r_a')
r_b = te.reduce_axis((0, alpha), 'r_b')
Y = te.compute((CO, P, m, m), lambda co, p, vh, vw:
te.sum(M[r_a][r_b][co][p] * A[r_a][vh] * A[r_b][vw],
axis=[r_a, r_b]), name='Y')
# unpack output
output = te.compute((N, CO, H, W), lambda n, co, h, w:
Y[co, n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m),
idxmod(h, m), idxmod(w, m)]
# The following hack term is used to make the padding in batch gemm ("M")
# effective, otherwise the padding will be eliminated by bound inference.
# Use `tvm.tir.Mul` instead of `*` to avoid issues in const folding.
+ tvm.tir.Mul(tvm.tir.const(0, out_dtype),
M[alpha-1][alpha-1][CO-1][P_round-1]),
name='output', tag='winograd_conv2d_output')
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CO * H * W * KH * KW * CI)
return output
def _schedule_winograd(cfg, s, op):
"""schedule winograd fast convolution F(2x2, 3x3) for conv2d"""
# get ops and tensors
output = op.output(0)
Y = op.input_tensors[0]
M, A = s[Y].op.input_tensors
U, V = s[M].op.input_tensors
d, B = s[V].op.input_tensors
data_pad = s[d].op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# transform kernel
if isinstance(U.op, tvm.te.ComputeOp):
kernel, G = s[U].op.input_tensors
s[G].compute_inline()
eps, nu, co, ci, vco, = s[U].op.axis
if not autotvm.GLOBAL_SCOPE.in_tuning:
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)
_ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]
s[U].vectorize(vco)
tile_and_bind(s, U, co, ci, 1, 256)
# dilation
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# transform image
s[B].compute_inline()
VL = s.cache_write(V, 'local')
eps, nu, p, ci, vp = s[V].op.axis
s[V].reorder(p, ci, eps, nu, vp)
for axis in [eps, nu]:
s[V].unroll(axis)
s[V].vectorize(vp)
fused = s[V].fuse(p, ci)
bb, tt = cfg['tile_t1'].apply(s, V, fused)
s[V].bind(bb, te.thread_axis('blockIdx.x'))
s[V].bind(tt, te.thread_axis('threadIdx.x'))
eps, nu, p, ci, vp = s[VL].op.axis
r_a, r_b = s[VL].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[VL].unroll(axis)
s[VL].vectorize(vp)
s[d].compute_at(s[V], tt)
s[VL].compute_at(s[V], tt)
# batch gemm
bna = cfg['tile_bna'].val
bnb = cfg['tile_bnb'].val
eps, nu, k, b = s[M].op.axis
alpha = eps.dom.extent
c = s[M].op.reduce_axis[0]
yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)
c, c_unroll = cfg['c_unroll'].apply(s, M, c)
s[M].reorder(yo, xo, c, c_unroll, yi, xi)
s[M].unroll(c_unroll)
s[M].unroll(yi)
s[M].vectorize(xi)
z = s[M].fuse(eps, nu)
tile_and_bind3d(s, M, z, yo, xo, 1, cfg['yt'].val, 1)
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_a, r_b = s[Y].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[Y].unroll(axis)
# schedule output and fusion
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0]
n, co, h, w = s[output].op.axis
m = alpha - 3 + 1
h, w, hi, wi = s[output].tile(h, w, m, m)
s[output].unroll(hi)
s[output].unroll(wi)
fused = s[output].fuse(n, co, h, w)
bb, tt = cfg['tile_t2'].apply(s, output, fused)
s[output].bind(bb, te.thread_axis('blockIdx.x'))
s[output].bind(tt, te.thread_axis('threadIdx.x'))
s[Y].compute_at(s[output], tt)
##### REGISTER ALTER OP LAYOUT #####
@nn.conv2d_alter_layout.register(["mali"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.compile_engine.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
idxd = tvm.tir.indexdiv
if topi_tmpl == "conv2d_nchw_spatial_pack.mali":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg['tile_co'].size[-1]
new_attrs['kernel_layout'] = 'OIHW%do' % VC
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_spatial_pack.mali")
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
elif topi_tmpl == "conv2d_nchw_winograd.mali":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
tile_size = _pick_tile_size(data, kernel)
VC = cfg['tile_bna'].val
weight_expr = inputs[1]
weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform(
weight_expr, tile_size=tile_size)
weight_expr = relay.reshape(weight_expr,
newshape=(KH + tile_size - 1,
KW + tile_size - 1,
idxd(CO, VC), VC, CI))
weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3])
new_attrs['tile_size'] = tile_size
new_data = data
new_kernel = te.placeholder((KH + tile_size - 1,
KW + tile_size -1,
idxd(CO, VC), CI, VC),
kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
'conv2d_nchw_winograd.mali')
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight_expr, **new_attrs)
else:
return None
##### SCHECULE UTILITIES #####
def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):
""" tile and bind to GPU threads """
x_factor = x_factor or y_factor
yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
return yo, xo, yi, xi
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
""" tile and bind 3d """
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
s[tensor].reorder(zo, yo, xo, zi, yi, xi)
return zo, yo, xo, zi, yi, xi
```
#### File: topi/testing/conv3d_transpose_ncdhw_python.py
```python
import numpy as np
import topi
from topi.nn.util import get_pad_tuple3d
def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding):
"""Transposed 3d convolution operator in NCDHW layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, in_c, in_d, in_h, in_w = a_np.shape
_, out_c, filter_d, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
# dilate stage
dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
# padding stage
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
padding, (filter_d, filter_h, filter_w))
bpad_front = filter_d - 1 - fpad_front
bpad_back = filter_d - 1 - fpad_back
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right
padded_a_np = np.zeros((batch,
in_c,
dilated_a_np.shape[2]+bpad_front+bpad_back,
dilated_a_np.shape[3]+bpad_top+bpad_bottom,
dilated_a_np.shape[4]+bpad_left+bpad_right))
padded_a_np[:, :, bpad_front:dilated_a_np.shape[2]+bpad_back,
bpad_top:dilated_a_np.shape[3]+bpad_top,
bpad_left:dilated_a_np.shape[4]+bpad_left] = dilated_a_np
# convolution stage
out_d = (in_d - 1) * stride_d - bpad_front - bpad_back + filter_d
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4))
b_np = topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0))
return b_np
```
#### File: topi/testing/l2_normalize_python.py
```python
import numpy as np
def l2_normalize_python(a_np, eps, axis=None):
"""L2 normalize operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
eps : float
epsilon constant value
axis : list of int
axis over the normalization applied
Returns
-------
l2_normalize_out : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
dot_value = np.power(a_np, 2.0)
sqr_sum = np.sum(dot_value, axis, keepdims=True)
sqrt_sum = np.sqrt(np.maximum(np.broadcast_to(sqr_sum, a_np.shape), eps))
l2_normalize_out = np.divide(a_np, sqrt_sum)
return l2_normalize_out
```
#### File: recipe/conv/depthwise_conv2d_test.py
```python
import os
import tvm
from tvm import te
import numpy as np
from scipy import signal
from tvm.contrib import nvcc
import topi
from topi.util import get_const_tuple
from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc
TASK = "depthwise_conv2d"
USE_MANUAL_CODE = False
@tvm.register_func
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_depthwise_conv2d_nchw():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = 'SAME' # or 'VALID'
# Placeholder
Input = te.placeholder((batch, in_channel, in_height, in_width), name='Input')
Filter = te.placeholder((filter_channel, channel_multiplier, filter_height, filter_width), name='Filter')
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name='Scale')
Shift = te.placeholder((in_channel * channel_multiplier,), name='Shift')
# Declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_nchw(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nchw(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# Schedule
s1 = schedule_depthwise_conv2d_nchw(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nchw(ScaleShift)
s3 = schedule_depthwise_conv2d_nchw(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
ctx = tvm.context(device, 0)
# Build the kernel
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare data
input_tvm = tvm.nd.array(input_np, ctx)
filter_tvm = tvm.nd.array(filter_np, ctx)
scale_tvm = tvm.nd.array(scale_np, ctx)
shift_tvm = tvm.nd.array(shift_np, ctx)
depthwise_conv2d_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape),dtype=DepthwiseConv2d.dtype), ctx)
scale_shift_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), ctx)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), ctx)
# Measure time cost of kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, ctx, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# Measure time cost of kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, ctx, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, ctx, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1*1e6))
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6))
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6))
# correctness
depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:,c,:,:] = depthwise_conv2d_scipy[:,c,:,:] * scale_np[c] + shift_np[c]
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ['cuda', 'opencl', 'rocm']:
with tvm.transform.PassContext(config={"tir.UnrollLoop": {
"auto_max_step": 128,
"explicit_unroll": device != "rocm"
}}):
check_device(device)
def test_depthwise_conv2d_nhwc():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = 'SAME' # or 'VALID'
# Placeholder
Input = te.placeholder((batch, in_height, in_width, in_channel), name='Input')
Filter = te.placeholder((filter_height, filter_width,filter_channel, channel_multiplier), name='Filter')
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name='Scale')
Shift = te.placeholder((in_channel * channel_multiplier,), name='Shift')
# Declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_nhwc(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nhwc(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# Schedule
s1 = schedule_depthwise_conv2d_nhwc(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nhwc(ScaleShift)
s3 = schedule_depthwise_conv2d_nhwc(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
ctx = tvm.context(device, 0)
# Build the kernel
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare data
input_tvm = tvm.nd.array(input_np, ctx)
filter_tvm = tvm.nd.array(filter_np, ctx)
scale_tvm = tvm.nd.array(scale_np, ctx)
shift_tvm = tvm.nd.array(shift_np, ctx)
depthwise_conv2d_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape),dtype=DepthwiseConv2d.dtype), ctx)
scale_shift_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), ctx)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), ctx)
# Measure time cost of kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, ctx, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# Measure time cost of kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, ctx, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, ctx, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1*1e6))
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6))
print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6))
# correctness
depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:,:,:,c] = depthwise_conv2d_scipy[:,:,:,c] * scale_np[c] + shift_np[c]
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ['cuda', 'opencl', 'rocm']:
with tvm.transform.PassContext(config={"tir.UnrollLoop": {
"auto_max_step": 128,
"explicit_unroll": device != "cuda"
}}):
check_device(device)
if __name__ == "__main__":
test_depthwise_conv2d_nchw()
test_depthwise_conv2d_nhwc()
```
#### File: tests/python/test_topi_bitserial_dense.py
```python
import os
import numpy as np
import tvm
from tvm import te
import topi
import topi.testing
from topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
_bitserial_dense_implement = {
"generic": (topi.nn.bitserial_dense, topi.generic.schedule_bitserial_dense),
"cpu": (topi.x86.bitserial_dense, topi.x86.schedule_bitserial_dense),
"arm_cpu": (topi.arm_cpu.bitserial_dense, topi.arm_cpu.schedule_bitserial_dense),
}
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_dense(batch, in_dim, out_dim, activation_bits, weight_bits, unipolar):
out_dtype = 'int16'
def get_ref_data(a_shape, b_shape, input_dtype):
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
b_np = generate_quantized_np(get_const_tuple(b_shape), weight_bits, input_dtype)
if unipolar:
b_ = np.copy(b_np).astype(out_dtype)
for x in np.nditer(b_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
c_np = np.dot(a_np, b_.T)
else:
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
for target in ["llvm", "llvm -device=arm_cpu"]:
if "arm_cpu" in target and 'arm' not in os.uname()[4]:
print ("Skipped running code, not an arm device")
continue
input_dtype = 'uint8' if "arm_cpu" in target else "uint32"
A = te.placeholder((batch, in_dim), dtype=input_dtype, name='A')
B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name='B')
fcompute, fschedule = topi.testing.dispatch(target, _bitserial_dense_implement)
C = fcompute(A, B, activation_bits, weight_bits,
input_dtype, out_dtype, unipolar)
s = fschedule([C])
a_shape = get_const_tuple(A.shape)
b_shape = get_const_tuple(B.shape)
a_np, b_np, c_np = get_ref_data(a_shape, b_shape, input_dtype)
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
func = tvm.build(s, [A, B, C], target)
func(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
def test_bitserial_dense():
verify_bitserial_dense(1, 1024, 1000, 1, 1, True)
verify_bitserial_dense(1, 1024, 1000, 2, 1, True)
verify_bitserial_dense(1, 1024, 1000, 1, 1, False)
verify_bitserial_dense(1, 1024, 1000, 2, 1, False)
if __name__ == "__main__":
test_bitserial_dense()
```
#### File: tutorials/language/tensorize.py
```python
from __future__ import absolute_import, print_function
import tvm
from tvm import te
import numpy as np
######################################################################
# Define Matrix Multiplication
# ----------------------------
# Take matrix multiplication as our example.
# Matmul first multiply the corresponding elements between two matrix,
# then accumulate across a certain axis.
# The following lines describe the computation :code:`A * B^T` in TVM.
#
N, M, L = 1024, 512, 64
A = te.placeholder((N, L), name='A')
B = te.placeholder((M, L), name='B')
k = te.reduce_axis((0, L), name='k')
C = te.compute((N, M), lambda i, j:
te.sum(A[i, k] * B[j, k], axis=k), name='C')
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Schedule the Matmul
# -------------------
# Now, suppose we have an accelerator that supports
# matrix-vector multiplication (GEMV) as a hardware primitive,
# which can take arbitrary size of reduce axis,
# but another axis needs to be no larger than 16.
# Thus we break down the matmul loops to make the innermost loops a (16x64) GEMV.
#
factor = 16
x, y = C.op.axis
z, = C.op.reduce_axis
yo, yi = s[C].split(y, factor=factor)
s[C].reorder(x, yo, yi, z)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# As showed in the IR printed above,
# the inner loops :code:`j.inner` along with :code:`k` together form a computation of GEMV
# - within the inner most two loops, the index :code:`i` is fixed,
# the access to the matrix :code:`A` only varies by :code:`k`,
# which makes the access pattern of :code:`A` a "vector".
# In order to leverage our hypothetical hardware's GEMV instruction,
# we can tensorize over :code:`j.inner`.
#
# Define GEMV Tensorization Intrinsic
# -----------------------------------
# Before scheduling the tensorization, we need to first define the intrinsic function for GEMV.
# It includes two parts, the first is a compute definition of GEMV.
# TVM uses it to match the computing pattern in the original Matmul schedule.
# The second is to specify how to execute GEMV on the device,
# which is done in :code:`intrin_func` below.
#
def intrin_gemv(m, l):
a = te.placeholder((l,), name='a')
b = te.placeholder((m, l), name='b')
k = te.reduce_axis((0, l), name='k')
c = te.compute((m,), lambda i: te.sum(a[k] * b[i, k], axis=k), name='c')
Ab = tvm.tir.decl_buffer(a.shape, a.dtype,
name="A",
offset_factor=1,
strides=[1])
Bb = tvm.tir.decl_buffer(b.shape, b.dtype,
name="B",
offset_factor=1,
strides=[te.var("s1"), 1])
Cb = tvm.tir.decl_buffer(c.shape, c.dtype,
name="C",
offset_factor=1,
strides=[1])
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
aa, bb = ins
cc = outs[0]
ib.emit(tvm.tir.call_extern("int32", "gemv_update",
cc.access_ptr("w"),
aa.access_ptr("r"),
bb.access_ptr("r"),
m, l, bb.strides[0]))
return ib.get()
return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, b: Bb, c: Cb})
######################################################################
# Here :code:`te.decl_tensor_intrin` declares how to execute the computation :code:`c.op`.
# Our implementation simply takes the inputs and outputs,
# converts them to pointers and emit an external function call.
# Note that tensorization requires user to specify :code:`offset_factor`,
# with this information, TVM has knowledge of whether the data is aligned
# between the start address of the original data structure
# and the offset being passed to tensorize,
# so that it has chance to optimize with vectorized loading.
# We set the factor to 1 for simplification.
#
# Buffers are also declared for inputs and outputs, though this is not required,
# we benefit from the extra information provided by buffers. For example, we pass
# :code:`bb.strides[0]` as an argument to the external function :code:`gemv_update`.
# For now :code:`bb.strides[0] == l`,
# but later we will see how they can differ with more complicated schedules.
#
# Note that we use :code:`te.var("s1")` as the first stride dimension for :code:`B`.
# If the strides can be inferred
# - in this case, TVM knows tensor B is compact thus the strides are :code:`[L, 1]` -
# such placeholder can be put to let TVM automatically bind the inferred value for us.
#
gemv = intrin_gemv(factor, L)
s[C].tensorize(yi, gemv)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# By tensorizing over :code:`yi`, the inner most two loops are
# now replaced by the intrinsic function we defined before.
# In order to build and run the module, let's define the external function :code:`gemv_update`,
# it is a naive implementation of GEMV, just for demonstration.
#
def gemv_impl():
cc_code = """
extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < l; ++j) {
cc[i] += aa[j] * bb[i * stride + j];
}
}
return 0;
}
"""
from tvm.contrib import util, clang
temp = util.tempdir()
ll_path = temp.relpath("temp.ll")
# Create LLVM ir from c source code
ll_code = clang.create_llvm(cc_code, output=ll_path)
return ll_code
######################################################################
# Now we leverage the pragma attribute :code:`import_llvm` to import llvm asm inline.
# The importing needs to happen before the tensorized GEMV being executed.
#
s[C].pragma(x, "import_llvm", gemv_impl())
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Finally we compare the tensorize version with that :code:`numpy.dot` produces,
# ensure our implementation is correct.
#
func = tvm.build(s, [A, B, C], target="llvm", name="gemv")
from topi.util import get_const_tuple
dtype = A.dtype
ctx = tvm.context("cpu", 0)
a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype)
b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), ctx)
func(tvm.nd.array(a, ctx), tvm.nd.array(b, ctx), c)
tvm.testing.assert_allclose(c.asnumpy(), np.dot(a, b.T), rtol=1e-3)
######################################################################
# Reduce-update for Tensorize
# ---------------------------
# So far you have learned the basic idea of tensorize,
# now let's move one step forward to a more complicated case.
#
# Assume our accelerator could only multiply a vector by a square matrix,
# in which the vector size needs to be no larger than 16.
# Given such hardware constrain, now we need to split the reduce axis as following,
#
zo, zi = s[C].split(z, factor=factor)
s[C].reorder(x, yo, zo, yi, zi)
######################################################################
# However, since the tensorize intrinsic now only covers a part of the reduce axis,
# instead of using one "body" function, TVM requires a :code:`reduce_reset` function,
# which will be invoked before the reduce for-loop, and a :code:`reduce_update` function,
# which defines the "update" computing strategy.
#
def gemv_impl():
cc_code = """
extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < l; ++j) {
cc[i] += aa[j] * bb[i * stride + j];
}
}
return 0;
}
extern "C" int gemv_reset(float *cc, int m) {
for (int i = 0; i < m; ++i) {
cc[i] = 0.0;
}
return 0;
}
"""
from tvm.contrib import util, clang
temp = util.tempdir()
ll_path = temp.relpath("temp.ll")
# Create LLVM ir from c source code
ll_code = clang.create_llvm(cc_code, output=ll_path)
return ll_code
def intrin_gemv(m, l):
a = te.placeholder((l,), name='a')
b = te.placeholder((m, l), name='b')
k = te.reduce_axis((0, l), name='k')
c = te.compute((m,), lambda i:
te.sum(a[k] * b[i, k], axis=k), name='c')
Ab = tvm.tir.decl_buffer(a.shape, a.dtype,
name="A",
offset_factor=1,
strides=[1])
Bb = tvm.tir.decl_buffer(b.shape, b.dtype,
name="B",
offset_factor=1,
strides=[te.var("s1"), 1])
Cb = tvm.tir.decl_buffer(c.shape, c.dtype,
name="C",
offset_factor=1,
strides=[1])
def intrin_func(ins, outs):
aa, bb = ins
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(tvm.tir.call_extern("int32", "gemv_update",
cc.access_ptr("w"),
aa.access_ptr("r"),
bb.access_ptr("r"),
m, l, bb.strides[0]))
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(tvm.tir.call_extern("int32", "gemv_reset", cc.access_ptr("w"), m))
return ib.get()
def _reduce_update():
return _body()
return _body(), _reduce_reset(), _reduce_update()
return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, b: Bb, c: Cb})
######################################################################
# Note that :code:`intrin_func` now returns a triplet:
# :code:`(body, reduce_reset, reduce_update)`.
# If tensorization includes all the reduce axes, function :code:`body()` will be invoked,
# otherwise :code:`reduce_reset()` and :code:`reduce_update()` together will be used.
# In our example :code:`body()` and :code:`reduce_update()`
# share the same implementation,
# while in other cases, hardware may have different instructions for these two functions.
# Moreover, we can see now :code:`bb.strides[0]` is different from :code:`l`
# due to the tiling.
#
# Tensorize for squared GEMV, build and check the results,
#
gemv = intrin_gemv(factor, factor)
s[C].tensorize(yi, gemv)
s[C].pragma(yo, "import_llvm", gemv_impl())
func = tvm.build(s, [A, B, C], target="llvm", name="gemv")
a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype)
b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), ctx)
func(tvm.nd.array(a, ctx), tvm.nd.array(b, ctx), c)
tvm.testing.assert_allclose(c.asnumpy(), np.dot(a, b.T), rtol=1e-3)
######################################################################
# Summary
# -------
# This tutorial demonstrates the usage of tensorize intrinsic in TVM.
# Tensorize provides a way for users to get fully optimized schedule via micro-kernels.
# For example, INT8 quantization on Intel CPUs uses tensorization
# to invoke AVX instruction directly.
# It also enables TVM to compile to ASICs -
# checkout :ref:`vta-index` for details.
# We also demonstrates how to use inline assembly importing,
# which helps users inject asm easily into the schedule.
#
```
#### File: vta/top/vta_conv2d.py
```python
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
import topi
from .util import is_packed_layout
from ..environment import get_env
@autotvm.register_topi_compute("conv2d_packed.vta")
def conv2d_packed(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
""" Packed conv2d function."""
if not is_packed_layout(layout):
raise topi.InvalidShapeError()
assert dilation == (1, 1)
if padding[0]:
pad_data = topi.nn.pad(data, [0, 0, padding[0], padding[1], 0, 0], name="pad_data")
else:
pad_data = data
assert len(data.shape) == 6
assert len(kernel.shape) == 6
oheight = topi.util.get_const_int((pad_data.shape[2] - kernel.shape[2]) // strides[0] + 1)
owidth = topi.util.get_const_int((pad_data.shape[3] - kernel.shape[3]) // strides[1] + 1)
oshape = (data.shape[0], kernel.shape[0], oheight, owidth, data.shape[4], kernel.shape[4])
ishape = topi.util.get_const_tuple(data.shape)
kshape = topi.util.get_const_tuple(kernel.shape)
d_i = te.reduce_axis((0, kshape[2]), name='d_i')
d_j = te.reduce_axis((0, kshape[3]), name='d_j')
k_o = te.reduce_axis((0, ishape[1]), name='k_o')
k_i = te.reduce_axis((0, ishape[-1]), name='k_i')
hstride, wstride = strides
res = te.compute(
oshape,
lambda b_o, c_o, i, j, b_i, c_i: te.sum(
pad_data[b_o, k_o, i*hstride+d_i, j*wstride+d_j, b_i, k_i].astype(out_dtype) *
kernel[c_o, k_o, d_i, d_j, c_i, k_i].astype(out_dtype),
axis=[k_o, d_i, d_j, k_i]),
name="res", tag="conv2d_dense")
cfg.add_flop(2 * np.prod(topi.util.get_const_tuple(oshape)) *
kshape[2] * kshape[3] * ishape[1] * ishape[-1])
return res
@autotvm.register_topi_schedule("conv2d_packed.vta")
def schedule_conv2d_packed(cfg, outs):
"""Schedule packed conv2d"""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "conv2d_dense"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, x_i, x_j, _, _ = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split('tile_b', b, num_outputs=2)
cfg.define_split('tile_h', x_i, num_outputs=2)
cfg.define_split('tile_w', x_j, num_outputs=2)
cfg.define_split('tile_ci', c_i, num_outputs=2)
cfg.define_split('tile_co', c_o, num_outputs=2)
cfg.define_knob('oc_nthread', [1, 2])
cfg.define_knob('h_nthread', [1, 2])
###### space definition end ######
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
# setup pad
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(
s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
# tile
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg['tile_co'].apply(s, output, x_co)
x_i0, x_i1 = cfg['tile_h'].apply(s, output, x_i)
x_j0, x_j1 = cfg['tile_w'].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
# set all compute scopes
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg['oc_nthread'].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg['oc_nthread'].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
# virtual threading along spatial rows
if cfg['h_nthread'].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg['h_nthread'].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
s[conv2d_stage].reorder(x_bo, k_o, x_j, d_j, d_i, x_co, x_i, x_bi, x_ci, k_i)
k_o, _ = cfg['tile_ci'].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_co1, env.dma_copy)
return s
```
#### File: vta/top/vta_dense.py
```python
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
import topi
from ..environment import get_env
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
@autotvm.register_topi_compute("dense_packed.vta")
def dense_packed(cfg, data, weight, bias=None, out_dtype=None):
"""Dense function declaration."""
# Make sure that the dense operator is packed
if len(data.shape) != 4 or len(weight.shape) != 4:
raise topi.InvalidShapeError()
# Derive shapes
ishape = topi.util.get_const_tuple(data.shape)
wshape = topi.util.get_const_tuple(weight.shape)
oshape = (data.shape[0], weight.shape[0], data.shape[2], weight.shape[2])
# Reduction axes (input channel)
assert ishape[1] == wshape[1]
assert ishape[3] == wshape[3]
k_o = te.reduce_axis((0, ishape[1]), name='k_o')
k_i = te.reduce_axis((0, ishape[3]), name='k_i')
res = te.compute(
oshape,
lambda b_o, c_o, b_i, c_i: te.sum(
data[b_o, k_o, b_i, k_i].astype(out_dtype) *
weight[c_o, k_o, c_i, k_i].astype(out_dtype),
axis=[k_o, k_i]),
name="res", tag="dense_pack")
cfg.add_flop(2 * np.prod(topi.util.get_const_tuple(oshape)) *
ishape[1] * ishape[3])
return res
@autotvm.register_topi_schedule("dense_packed.vta")
def schedule_dense_packed(cfg, outs):
"""Packed dense schedule."""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
dense_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "dense_pack"
dense_res.append(op)
_traverse(output.op)
assert len(dense_res) == 1
dense_stage = dense_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, _, _ = s[dense_stage].op.axis
c_i, _ = s[dense_stage].op.reduce_axis
cfg.define_split('tile_b', b, num_outputs=2)
cfg.define_split('tile_ci', c_i, num_outputs=2)
cfg.define_split('tile_co', c_o, num_outputs=2)
cfg.define_knob('oc_nthread', [1, 2])
###### space definition end ######
data, weight = dense_stage.op.input_tensors
env = get_env()
cdata = s.cache_read(data, env.inp_scope, [dense_stage])
cweight = s.cache_read(weight, env.wgt_scope, [dense_stage])
s[dense_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(
s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
# apply tiling for SRAM reuse
x_b, x_c, _, _ = s[output].op.axis
x_bo, x_bi = cfg['tile_b'].apply(s, output, x_b)
x_co, x_ci = cfg['tile_co'].apply(s, output, x_c)
s[output].reorder(x_bo, x_co, x_bi, x_ci)
store_pt = x_co
# set all compute scopes
s[dense_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg['oc_nthread'].val > 1:
_, v_t = s[output].split(x_co, factor=cfg['oc_nthread'].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_bi, _ = s[dense_stage].op.axis
k_o, _ = s[dense_stage].op.reduce_axis
s[dense_stage].reorder(x_bo, k_o, x_co)
k_o, _ = cfg['tile_ci'].apply(s, dense_stage, k_o)
s[cdata].compute_at(s[dense_stage], k_o)
s[cweight].compute_at(s[dense_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[cweight].pragma(s[cweight].op.axis[0], env.dma_copy)
s[dense_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_ci, env.dma_copy)
return s
```
#### File: python/vta/transform.py
```python
import tvm
from tvm import te
from topi import util
from .environment import get_env
def _match_pragma(stmt, key):
"""Internal helper to match stmt to pragma stmt.
Parameters
----------
stmt : Stmt
The AttrStmt
key : str
The pragma key
"""
return ((stmt.attr_key == "pragma_" + key) or
(stmt.attr_key == "pragma_scope" and stmt.value.value == key))
def FoldUopLoop():
"""Detect and fold uop loop.
VTA support uop programming model
that recognizes loop structure.
This pass detect the loop structure
and extract that into uop loop AST.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _fold_outermost_loop(body):
stmt = body
if not isinstance(stmt, tvm.tir.For):
return None, body, None
loop_var = stmt.loop_var
gemm_offsets = [None, None, None]
fail = [False]
builtin_uop_push = tvm.ir.Op.get("tir.vta.uop_push")
def _post_order(op):
assert isinstance(op, tvm.tir.Call)
base_args = 2
if op.op.same_as(builtin_uop_push):
args = []
args += op.args[:base_args]
for i in range(3):
m = tvm.arith.detect_linear_equation(
op.args[i + base_args], [loop_var])
if not m:
fail[0] = True
return op
if gemm_offsets[i] is not None:
if not tvm.ir.structural_equal(m[0], gemm_offsets[i]):
fail[0] = True
return op
args.append(m[1])
else:
gemm_offsets[i] = m[0]
args.append(m[1])
args += op.args[base_args+3:]
return tvm.tir.call_intrin("int32", builtin_uop_push, *args)
if op.op.name not in ("tir.vta.command_handle", "tir.tvm_thread_context"):
raise RuntimeError("unexpected op %s" % op)
return op
ret = tvm.tir.stmt_functor.ir_transform(
stmt.body, None, _post_order, ["tir.Call"])
if not fail[0] and all(x is not None for x in gemm_offsets):
def _visit(op):
if op.same_as(loop_var):
fail[0] = True
tvm.tir.stmt_functor.post_order_visit(ret, _visit)
if not fail[0]:
begin = tvm.tir.call_extern(
"int32", "VTAUopLoopBegin", stmt.extent, *gemm_offsets)
end = tvm.tir.call_extern("int32", "VTAUopLoopEnd")
return [begin, ret, end]
raise ValueError("Failed to fold the GEMM instructions..")
def _do_fold(stmt):
env = get_env()
if (stmt.attr_key == "coproc_uop_scope" and
isinstance(stmt.value, tvm.tir.StringImm) and
stmt.value.value == env.dev.vta_push_uop.value):
body = stmt.body
begins = []
ends = []
try:
begin, body, end = _fold_outermost_loop(body)
if begin is not None:
begins.append(begin)
if end is not None:
ends.append(end)
begin, body, end = _fold_outermost_loop(body)
if begin is not None:
begins.append(begin)
if end is not None:
ends.append(end)
except ValueError:
pass
if body == stmt.body:
return stmt
ends = list(reversed(ends))
body = tvm.tir.stmt_seq(*(begins + [body] + ends))
return tvm.tir.AttrStmt(
stmt.node, stmt.attr_key, stmt.value, body)
return None
def _ftransform(f, mod, ctx):
return f.with_body(tvm.tir.stmt_functor.ir_transform(
f.body, _do_fold, None, ["tir.AttrStmt"]))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.FoldUopLoop")
def CPUAccessRewrite():
"""Detect CPU access to VTA buffer and get address correctly.
VTA's buffer is an opaque handle that do not
correspond to address in CPU.
This pass detect CPU access and rewrite to use pointer
returned VTABufferCPUPtr for CPU access.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, mod, ctx):
rw_info = {}
env = get_env()
def _post_order(op):
if isinstance(op, tvm.tir.Allocate):
buffer_var = op.buffer_var
if not buffer_var in rw_info:
return None
new_var = rw_info[buffer_var]
let_stmt = tvm.tir.LetStmt(
new_var, tvm.tir.call_extern(
"handle", "VTABufferCPUPtr",
env.dev.command_handle,
buffer_var), op.body)
alloc = tvm.tir.Allocate(
buffer_var, op.dtype, op.extents,
op.condition, let_stmt)
del rw_info[buffer_var]
return alloc
if isinstance(op, tvm.tir.Load):
buffer_var = op.buffer_var
if not buffer_var in rw_info:
rw_info[buffer_var] = te.var(
buffer_var.name + "_ptr", "handle")
new_var = rw_info[buffer_var]
return tvm.tir.Load(op.dtype, new_var, op.index)
if isinstance(op, tvm.tir.Store):
buffer_var = op.buffer_var
if not buffer_var in rw_info:
rw_info[buffer_var] = te.var(
buffer_var.name + "_ptr", "handle")
new_var = rw_info[buffer_var]
return tvm.tir.Store(new_var, op.value, op.index)
raise RuntimeError("not reached")
stmt_in = f.body
stmt = tvm.tir.stmt_functor.ir_transform(
stmt_in, None, _post_order, ["tir.Allocate", "tir.Load", "tir.Store"])
for buffer_var, new_var in rw_info.items():
stmt = tvm.tir.LetStmt(
new_var, tvm.tir.call_extern(
"handle", "VTABufferCPUPtr",
env.dev.command_handle,
buffer_var), stmt)
return f.with_body(stmt)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.CPUAccessRewrite")
def LiftAllocToScopeBegin():
"""Lift allocate to beginning of the current scope.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, mod, ctx):
lift_stmt = [[]]
def _merge_block(slist, body):
for op in slist:
if op.body == body:
body = op
elif isinstance(op, tvm.tir.Allocate):
body = tvm.tir.Allocate(
op.buffer_var, op.dtype,
op.extents, op.condition, body)
elif isinstance(op, tvm.tir.AttrStmt):
body = tvm.tir.AttrStmt(
op.node, op.attr_key, op.value, body)
elif isinstance(op, tvm.tir.For):
body = tvm.tir.For(
op.loop_var, op.min, op.extent, op.for_type,
op.device_api, body)
else:
raise RuntimeError("unexpected op")
del slist[:]
return body
def _pre_order(op):
if isinstance(op, tvm.tir.For):
lift_stmt.append([])
elif isinstance(op, tvm.tir.AttrStmt):
if op.attr_key == "virtual_thread":
lift_stmt.append([])
def _post_order(op):
if isinstance(op, tvm.tir.Allocate):
lift_stmt[-1].append(op)
return op.body
if isinstance(op, tvm.tir.AttrStmt):
if op.attr_key == "storage_scope":
lift_stmt[-1].append(op)
return op.body
if op.attr_key == "virtual_thread":
return _merge_block(lift_stmt.pop() + [op], op.body)
return op
if isinstance(op, tvm.tir.For):
return _merge_block(lift_stmt.pop() + [op], op.body)
raise RuntimeError("not reached")
stmt_in = f.body
stmt = tvm.tir.stmt_functor.ir_transform(
stmt_in, _pre_order, _post_order, ["tir.Allocate", "tir.AttrStmt", "tir.For"])
assert len(lift_stmt) == 1
return f.with_body(_merge_block(lift_stmt[0], stmt))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.LiftAllocToScopeBegin")
def InjectSkipCopy():
"""Pass to inject skip copy stmt, used for debug purpose.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _do_fold(stmt):
if _match_pragma(stmt, "skip_dma_copy"):
return tvm.tir.Evaluate(0)
return None
def _ftransform(f, mod, ctx):
return f.with_body(tvm.tir.stmt_functor.ir_transform(
f.body, _do_fold, None, ["tir.AttrStmt"]))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectSkipCopy")
def InjectCoProcSync():
"""Pass inject coproc sync
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, *_):
success = [False]
def _do_fold(stmt):
if _match_pragma(stmt, "coproc_sync"):
success[0] = True
sync = tvm.tir.Call(
"int32", "vta.coproc_sync", [])
return tvm.tir.SeqStmt([stmt.body, tvm.tir.Evaluate(sync)])
if _match_pragma(stmt, "trim_loop"):
op = stmt.body
assert isinstance(op, tvm.tir.For)
return tvm.tir.For(
op.loop_var, op.min, 2, op.for_type,
op.device_api, op.body)
return None
return f.with_body(tvm.tir.stmt_functor.ir_transform(
f.body, None, _do_fold, ["tir.AttrStmt"]))
return tvm.transform.Sequential(
[tvm.tir.transform.prim_func_pass(_ftransform, 0, "tir.vta.InjectCoProcSync"),
tvm.tir.transform.CoProcSync()],
opt_level=0, name="tir.vta.InjectCoProcSync")
def InjectDMAIntrin():
"""Pass to inject DMA copy intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
def _check_compact(buf):
ndim = len(buf.shape)
size = tvm.tir.const(1, buf.shape[0].dtype)
for i in reversed(range(ndim)):
if not util.equal_const_int(size - buf.strides[i], 0):
raise RuntimeError(
"Cannot prove compact: shape=%s, strides=%s" % (buf.shape, buf.strides))
size = size * buf.shape[i]
def _fold_buffer_dim(buf, scope, elem_block):
ndim = len(buf.shape)
x_size = 1
base = 0
for i in range(1, ndim + 1):
if not util.equal_const_int(buf.strides[ndim - i] - x_size, 0):
raise RuntimeError("scope %s needs to have block=%d" % (scope, elem_block))
x_size = x_size * buf.shape[ndim - i]
if util.equal_const_int(x_size - elem_block, 0):
base = i + 1
break
if base == 0:
raise RuntimeError("scope %s need to have block=%d, shape=%s" % (
scope, elem_block, buf.shape))
shape = [elem_block]
strides = [1]
if base < ndim + 1 and not util.equal_const_int(buf.strides[ndim - base], elem_block):
shape.append(1)
strides.append(elem_block)
analyzer = tvm.arith.Analyzer()
while base < ndim + 1:
x_size = 1
x_stride = buf.strides[ndim - base]
next_base = base
if not util.equal_const_int(idxm(x_stride, elem_block), 0):
raise RuntimeError(
"scope %s need to have block=%d, shape=%s, strides=%s" % (
scope, elem_block, buf.shape, buf.strides))
for i in range(base, ndim + 1):
k = ndim - i
if not util.equal_const_int(x_size * x_stride - buf.strides[k], 0):
break
x_size = x_size * buf.shape[k]
next_base = i + 1
shape.append(analyzer.simplify(x_size))
strides.append(x_stride)
assert next_base != base
base = next_base
strides = list(reversed(strides))
shape = list(reversed(shape))
return shape, strides
def _get_2d_pattern(buf, elem_width, elem_bytes, dtype, scope, allow_fold):
elem_block = elem_bytes * 8 // elem_width
if buf.dtype != dtype:
raise RuntimeError("Expect buffer type to be %s instead of %s" %
(dtype, buf.dtype))
shape, strides = buf.shape, buf.strides
if not util.equal_const_int(idxm(buf.elem_offset, elem_block), 0):
raise RuntimeError("scope %s need to have block=%d" % (scope, elem_block))
if allow_fold:
shape, strides = _fold_buffer_dim(buf, scope, elem_block)
else:
shape = list(x for x in shape)
strides = list(x for x in strides)
def raise_error():
"""Internal function to raise error """
raise RuntimeError(
("Scope[%s]: cannot detect 2d pattern with elem_block=%d:" +
" shape=%s, strides=%s") % (scope, elem_block, buf.shape, buf.strides))
ndim = len(shape)
# Check if the inner-tensor is already flat
flat = util.equal_const_int(shape[-1], elem_block)
if flat:
if not util.equal_const_int(strides[-1], 1):
raise_error()
if ndim == 1:
x_size = 1
x_stride = 1
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not util.equal_const_int(strides[-2] - elem_block, 0):
raise_error()
if ndim == 2:
x_size = shape[-2]
x_stride = shape[-2]
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not util.equal_const_int(idxm(strides[-3], elem_block), 0):
raise_error()
if ndim == 3:
x_size = shape[-2]
x_stride = idxd(strides[-3], elem_block)
y_size = shape[-3]
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
else:
if not util.equal_const_int(strides[-1], 1):
raise_error()
if not util.equal_const_int(strides[-2] - shape[-1], 0):
raise_error()
if not util.equal_const_int(shape[-1] * shape[-2], elem_block):
raise_error()
if ndim == 2:
x_size = 1
x_stride = 1
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not util.equal_const_int(strides[-3], elem_block):
raise_error()
if ndim == 3:
x_size = shape[-3]
x_stride = shape[-3]
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not util.equal_const_int(idxm(strides[-4], elem_block), 0):
raise_error()
if ndim == 4:
x_size = shape[-3]
x_stride = idxd(strides[-4], elem_block)
y_size = shape[-4]
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
raise_error()
def _inject_copy(src, dst, pad_before, pad_after, pad_value):
# FIXME: pad_value is ignored...
env = get_env()
_ = pad_value
if dst.scope == "global":
# Store
if pad_before or pad_after:
raise RuntimeError("Do not support copy into DRAM with pad")
if src.scope == env.acc_scope:
elem_width = env.OUT_WIDTH
elem_bytes = env.OUT_ELEM_BYTES
mem_type = env.dev.MEM_ID_OUT
data_type = "int%d" % env.OUT_WIDTH
task_qid = env.dev.QID_STORE_OUT
else:
raise RuntimeError("Do not support copy %s->dram" % (src.scope))
_check_compact(src)
x_size, y_size, x_stride, offset = _get_2d_pattern(
dst, elem_width, elem_bytes, data_type, src.scope, allow_fold=True)
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope",
env.dev.get_task_qid(task_qid))
irb.emit(tvm.tir.call_extern(
"int32", "VTAStoreBuffer2D",
env.dev.command_handle,
src.access_ptr("r", "int32"),
mem_type, dst.data, offset, x_size, y_size, x_stride))
return irb.get()
elif src.scope == "global":
if dst.scope == env.acc_scope:
elem_width = env.ACC_WIDTH
elem_bytes = env.ACC_ELEM_BYTES
mem_type = env.dev.MEM_ID_ACC
data_type = "int%d" % env.ACC_WIDTH
task_qid = env.dev.QID_LOAD_OUT
elif dst.scope == env.inp_scope:
elem_width = env.INP_WIDTH
elem_bytes = env.INP_ELEM_BYTES
mem_type = env.dev.MEM_ID_INP
data_type = "int%d" % env.INP_WIDTH
task_qid = env.dev.QID_LOAD_INP
elif dst.scope == env.wgt_scope:
elem_width = env.WGT_WIDTH
elem_bytes = env.WGT_ELEM_BYTES
mem_type = env.dev.MEM_ID_WGT
data_type = "int%d" % env.WGT_WIDTH
task_qid = env.dev.QID_LOAD_WGT
else:
raise RuntimeError("Do not support copy dram->%s" % (dst.scope))
# collect pad statistics
if pad_before:
assert pad_after
ndim = len(pad_before)
if ndim <= 2 or ndim > 5:
raise ValueError("Limitation of 2D pad load forbid ndim=%d" % ndim)
if ndim == 5:
# This case occurs when batch size N > 1
y_pad_before = pad_before[1]
x_pad_before = pad_before[2]
y_pad_after = pad_after[1]
x_pad_after = pad_after[2]
for dim in range(3, ndim):
if not util.equal_const_int(pad_before[dim], 0):
raise ValueError("Do not support pad on the innermost block")
if not util.equal_const_int(pad_after[dim], 0):
raise ValueError("Do not support pad on the innermost block")
else:
y_pad_before = pad_before[0]
x_pad_before = pad_before[1]
y_pad_after = pad_after[0]
x_pad_after = pad_after[1]
for dim in range(2, ndim):
if not util.equal_const_int(pad_before[dim], 0):
raise ValueError("Do not support pad on the innermost block")
if not util.equal_const_int(pad_after[dim], 0):
raise ValueError("Do not support pad on the innermost block")
allow_fold = False
else:
x_pad_before = 0
y_pad_before = 0
x_pad_after = 0
y_pad_after = 0
allow_fold = True
_check_compact(dst)
x_size, y_size, x_stride, offset = _get_2d_pattern(
src, elem_width, elem_bytes, data_type,
dst.scope, allow_fold=allow_fold)
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope",
env.dev.get_task_qid(task_qid))
irb.emit(tvm.tir.call_extern(
"int32", "VTALoadBuffer2D",
env.dev.command_handle,
src.data, offset, x_size, y_size, x_stride,
x_pad_before, y_pad_before,
x_pad_after, y_pad_after,
dst.access_ptr("r", "int32"), mem_type))
return irb.get()
else:
raise RuntimeError("Do not support copy %s->%s" % (src.scope, dst.scope))
return tvm.tir.transform.InjectCopyIntrin("dma_copy", _inject_copy)
def _get_gemm_intrin_buffer():
env = get_env()
wgt_lanes = env.WGT_ELEM_BITS // env.WGT_WIDTH
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS // env.INP_WIDTH
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS // env.ACC_WIDTH
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = te.placeholder((wgt_shape[0], wgt_shape[1]),
dtype="int%d" % env.WGT_WIDTH,
name=env.wgt_scope)
inp = te.placeholder((inp_shape[0], inp_shape[1]),
dtype="int%d" % env.INP_WIDTH,
name=env.inp_scope)
k = te.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = te.compute((out_shape[0], out_shape[1]),
lambda i, j: te.sum(inp[i, k].astype(out_dtype) *
wgt[j, k].astype(out_dtype),
axis=[k]),
name="out")
wgt_layout = tvm.tir.decl_buffer(
wgt.shape, wgt.dtype, env.wgt_scope,
scope=env.wgt_scope, offset_factor=wgt_lanes, data_alignment=wgt_lanes)
inp_layout = tvm.tir.decl_buffer(
inp.shape, inp.dtype, env.inp_scope,
scope=env.inp_scope, offset_factor=inp_lanes, data_alignment=inp_lanes)
out_layout = tvm.tir.decl_buffer(
out.shape, out.dtype, env.acc_scope,
scope=env.acc_scope, offset_factor=out_lanes, data_alignment=out_lanes)
return wgt_layout, inp_layout, out_layout
def InjectConv2DTransposeSkip():
"""Pass to skip 0-weights in conv2d transpose with stride > 1.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
dwgt, dinp, dout = _get_gemm_intrin_buffer()
calls = []
selects = []
def _find_basics(op):
if isinstance(op, tvm.tir.BufferLoad):
calls.append(op)
elif isinstance(op, tvm.tir.Select):
selects.append(op)
def _do_fold(op):
if _match_pragma(op, "conv2d_transpose_gemm"):
is_init = ".init" in str(op)
tvm.tir.stmt_functor.post_order_visit(op, _find_basics)
if is_init:
# create inner most block
irb = tvm.tir.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
irb.emit(tvm.tir.call_intrin("int32", "tir.vta.uop_push",
0, 1,
dout.access_ptr("rw", "int32"),
0, 0,
0, 0, 0))
inner = irb.get()
# TODO(@tmoreau89): This is only a temporary fix, please take a look.
body = op.body.body
while isinstance(body, tvm.tir.IfThenElse):
body = body.then_case
args = body.indices
res_buffer = body.buffer
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_OUT)
inner = tvm.tir.AttrStmt(
[dout, res_buffer], 'buffer_bind_scope',
tvm.tir.call_intrin('handle', 'tir.tvm_tuple', *tpl), inner)
return inner
else:
conv_call, data_call, kernel_call = calls[-3:]
pad_data_tensor = data_call.buffer
kernel_tensor = kernel_call.buffer
res_tensor = conv_call.buffer
if selects:
condition = selects[0].condition
else:
condition = tvm.tir.const(1, 'int')
# create inner most block
irb = tvm.tir.ir_builder.create()
with irb.if_scope(condition):
dev = env.dev
irb.scope_attr(
dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
irb.emit(tvm.tir.call_intrin("int32", "tir.vta.uop_push",
0, 0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0, 0, 0))
inner = irb.get()
args = conv_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3],
1, 0, 1, 0, env.BLOCK_OUT)
inner = tvm.tir.AttrStmt(
[dout, res_tensor], 'buffer_bind_scope',
tvm.tir.call_intrin('handle', 'tir.tvm_tuple', *tpl), inner)
args = kernel_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3],
1, 0, env.BLOCK_OUT, 0, env.BLOCK_IN)
inner = tvm.tir.AttrStmt(
[dwgt, kernel_tensor], 'buffer_bind_scope',
tvm.tir.call_intrin('handle', 'tir.tvm_tuple', *tpl), inner)
args = data_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3],
1, 0, 1, 0, env.BLOCK_IN)
inner = tvm.tir.AttrStmt(
[dinp, pad_data_tensor], 'buffer_bind_scope',
tvm.tir.call_intrin('handle', 'tir.tvm_tuple', *tpl), inner)
return inner
return None
return func.with_body(tvm.tir.stmt_functor.ir_transform(
func.body, _do_fold, None, ["tir.AttrStmt"]))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectConv2DTrasnposeSkip")
def AnnotateALUCoProcScope():
"""Pass to insert ALU instruction.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
def _do_fold(stmt):
if _match_pragma(stmt, "alu"):
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope",
env.dev.get_task_qid(env.dev.QID_COMPUTE))
irb.scope_attr(env.dev.vta_axis, "coproc_uop_scope",
tvm.tir.StringImm("VTAPushALUOp"))
irb.emit(stmt)
return irb.get()
if _match_pragma(stmt, "skip_alu"):
return tvm.tir.Evaluate(0)
return stmt
return func.with_body(tvm.tir.stmt_functor.ir_transform(
func.body, None, _do_fold, ["tir.AttrStmt"]))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.AnnotateALUCoProcScope")
def InjectALUIntrin():
"""Pass to inject ALU micro-ops.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
idxm = tvm.tir.indexmod
analyzer = tvm.arith.Analyzer()
def _do_fold(stmt):
def _equal(x, y):
return tvm.ir.structural_equal(analyzer.simplify(x - y), 0)
def _flatten_loop(src_coeff, dst_coeff, extents):
src_coeff = list(src_coeff)
dst_coeff = list(dst_coeff)
extents = list(extents)
rev_src_coeff = [src_coeff.pop()]
rev_dst_coeff = [dst_coeff.pop()]
rev_extents = []
assert src_coeff
vsrc = src_coeff.pop()
vdst = dst_coeff.pop()
vext = extents.pop()
while src_coeff:
next_src = src_coeff.pop()
next_dst = dst_coeff.pop()
next_ext = extents.pop()
if _equal(next_src, vsrc * vext) and _equal(next_dst, vdst * vext):
vext = analyzer.simplify(vext * next_ext)
else:
rev_src_coeff.append(vsrc)
rev_dst_coeff.append(vdst)
rev_extents.append(vext)
vsrc = next_src
vdst = next_dst
vext = next_ext
rev_src_coeff.append(vsrc)
rev_dst_coeff.append(vdst)
rev_extents.append(vext)
rev_src_coeff.reverse()
rev_dst_coeff.reverse()
rev_extents.reverse()
return rev_src_coeff, rev_dst_coeff, rev_extents
if _match_pragma(stmt, "alu"):
# Get to the innermost loop body
loop_body = stmt.body
nest_size = 0
while isinstance(loop_body, tvm.tir.For):
loop_body = loop_body.body
nest_size += 1
# Get the src/dst arguments
dst_var = loop_body.buffer_var
dst_idx = loop_body.index
# Derive loop variables and extents
tmp_body = stmt.body
indices = []
extents = []
for _ in range(nest_size):
indices.append(tmp_body.loop_var)
extents.append(tmp_body.extent)
tmp_body = tmp_body.body
# Derive opcode
if isinstance(loop_body.value, tvm.tir.Add):
alu_opcode = env.dev.ALU_OPCODE_ADD
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Sub):
alu_opcode = env.dev.ALU_OPCODE_SUB
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Mul):
alu_opcode = env.dev.ALU_OPCODE_MUL
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Min):
alu_opcode = env.dev.ALU_OPCODE_MIN
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Max):
alu_opcode = env.dev.ALU_OPCODE_MAX
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Call):
if loop_body.value.op.name == 'tir.shift_left':
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value.args[0]
rhs = analyzer.simplify(-loop_body.value.args[1])
elif loop_body.value.op.name == 'tir.shift_right':
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value.args[0]
rhs = loop_body.value.args[1]
else:
raise RuntimeError(
"Function call not recognized %s" % (loop_body.value.name))
elif isinstance(loop_body.value, tvm.tir.Load):
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value
rhs = tvm.tir.const(0, "int32")
else:
raise RuntimeError(
"Expression not recognized %s, %s, %s" % (
type(loop_body.value), str(loop_body.value), str(stmt)))
# Derive array index coefficients
dst_coeff = tvm.arith.detect_linear_equation(dst_idx, indices)
# Check if lhs/rhs is immediate
use_imm = False
imm_val = None
if isinstance(rhs, tvm.tir.IntImm):
assert lhs.buffer_var.same_as(dst_var)
src_coeff = tvm.arith.detect_linear_equation(lhs.index, indices)
use_imm = True
imm_val = rhs
if isinstance(lhs, tvm.tir.IntImm):
assert rhs.buffer_var.same_as(dst_var)
src_coeff = tvm.arith.detect_linear_equation(rhs.index, indices)
use_imm = True
imm_val = lhs
if imm_val is None:
imm_val = 0
assert lhs.buffer_var.same_as(dst_var) and rhs.buffer_var.same_as(dst_var)
src_lhs_coeff = tvm.arith.detect_linear_equation(lhs.index, indices)
src_rhs_coeff = tvm.arith.detect_linear_equation(rhs.index, indices)
# Determine which side has the same coefficients
lhs_equal = True
rhs_equal = True
for i, coef in enumerate(dst_coeff):
if not tvm.ir.structural_equal(coef, src_lhs_coeff[i]):
lhs_equal = False
if not tvm.ir.structural_equal(coef, src_rhs_coeff[i]):
rhs_equal = False
# Make sure at least one of the source is identical to the
# destination (in-place computation)
assert lhs_equal or rhs_equal
# Assign the source coefficients
if lhs_equal:
src_coeff = src_rhs_coeff
else:
src_coeff = src_lhs_coeff
# Ensure that we have the proper tensor dimensions in the
# innermost loop (pattern match)
src_coeff = list(src_coeff)
dst_coeff = list(dst_coeff)
extents = list(extents)
assert len(src_coeff) > 1
assert len(dst_coeff) > 1
assert len(extents) != 0
assert tvm.ir.structural_equal(
analyzer.simplify(
idxm(src_coeff[-1], env.BATCH * env.BLOCK_OUT)), 0)
assert tvm.ir.structural_equal(
analyzer.simplify(
idxm(dst_coeff[-1], env.BATCH * env.BLOCK_OUT)), 0)
assert tvm.ir.structural_equal(src_coeff[-2], 1)
assert tvm.ir.structural_equal(dst_coeff[-2], 1)
if env.BATCH > 1:
assert len(src_coeff) > 2
assert len(dst_coeff) > 2
assert len(extents) > 1
assert tvm.ir.structural_equal(src_coeff[-3], env.BLOCK_OUT)
assert tvm.ir.structural_equal(dst_coeff[-3], env.BLOCK_OUT)
# Apply tensorization of the loop coefficients
src_offset = src_coeff[-1]
dst_offset = dst_coeff[-1]
if env.BATCH == 1:
src_coeff = src_coeff[:-2]
dst_coeff = dst_coeff[:-2]
extents = extents[:-1]
else:
src_coeff = src_coeff[:-3]
dst_coeff = dst_coeff[:-3]
extents = extents[:-2]
src_coeff.append(src_offset)
dst_coeff.append(dst_offset)
src_coeff = [
analyzer.simplify(c // (env.BATCH * env.BLOCK_OUT)) for c in src_coeff]
dst_coeff = [
analyzer.simplify(c // (env.BATCH * env.BLOCK_OUT)) for c in dst_coeff]
# Flatten the outer loops
if extents:
src_coeff, dst_coeff, extents = _flatten_loop(src_coeff, dst_coeff, extents)
# Insert ALU micro-ops
irb = tvm.tir.ir_builder.create()
for idx, extent in enumerate(extents):
irb.emit(tvm.tir.call_extern(
"int32", "VTAUopLoopBegin",
extent, dst_coeff[idx], src_coeff[idx], 0))
use_imm = int(use_imm)
irb.emit(tvm.tir.call_intrin(
"int32", "tir.vta.uop_push",
1, 0,
dst_coeff[len(dst_coeff)-1],
src_coeff[len(src_coeff)-1],
0,
alu_opcode, use_imm, imm_val))
for extent in extents:
irb.emit(tvm.tir.call_extern(
"int32", "VTAUopLoopEnd"))
return irb.get()
return stmt
return func.with_body(tvm.tir.stmt_functor.ir_transform(
func.body, None, _do_fold, ["tir.AttrStmt"]))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectALUIntrin")
``` |
{
"source": "JiangZongKang/AG-ResU-Net",
"score": 2
} |
#### File: JiangZongKang/AG-ResU-Net/train.py
```python
import numpy as np
import random
import json
from glob import glob
from keras.models import model_from_json,load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler
import keras.backend as K
from model import Unet_model
from losses import *
#from keras.utils.visualize_util import plot
class SGDLearningRateTracker(Callback):
def on_epoch_begin(self, epoch, logs={}):
optimizer = self.model.optimizer
lr = K.get_value(optimizer.lr)
decay = K.get_value(optimizer.decay)
lr=lr/10
decay=decay*10
K.set_value(optimizer.lr, lr)
K.set_value(optimizer.decay, decay)
print('LR changed to:',lr)
print('Decay changed to:',decay)
class Training(object):
def __init__(self, batch_size,nb_epoch,load_model_resume_training=None):
self.batch_size = batch_size
self.nb_epoch = nb_epoch
#loading model from path to resume previous training without recompiling the whole model
if load_model_resume_training is not None:
self.model =load_model(load_model_resume_training,custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
print("pre-trained model loaded!")
else:
unet =Unet_model(img_shape=(128,128,4))
self.model=unet.model
print("U-net CNN compiled!")
def fit_unet(self,X33_train,Y_train,X_patches_valid=None,Y_labels_valid=None):
train_generator=self.img_msk_gen(X33_train,Y_train,9999)
checkpointer = ModelCheckpoint(filepath='brain_segmentation/ResUnet.{epoch:02d}_{val_loss:.3f}.hdf5', verbose=1)
self.model.fit_generator(train_generator,steps_per_epoch=len(X33_train)//self.batch_size,epochs=self.nb_epoch, validation_data=(X_patches_valid,Y_labels_valid),verbose=1, callbacks = [checkpointer,SGDLearningRateTracker()])
#self.model.fit(X33_train,Y_train, epochs=self.nb_epoch,batch_size=self.batch_size,validation_data=(X_patches_valid,Y_labels_valid),verbose=1, callbacks = [checkpointer,SGDLearningRateTracker()])
def img_msk_gen(self,X33_train,Y_train,seed):
'''
a custom generator that performs data augmentation on both patches and their corresponding targets (masks)
'''
datagen = ImageDataGenerator(horizontal_flip=True,data_format="channels_last")
datagen_msk = ImageDataGenerator(horizontal_flip=True,data_format="channels_last")
image_generator = datagen.flow(X33_train,batch_size=4,seed=seed)
y_generator = datagen_msk.flow(Y_train,batch_size=4,seed=seed)
while True:
yield(image_generator.next(), y_generator.next())
def save_model(self, model_name):
'''
INPUT string 'model_name': path where to save model and weights, without extension
Saves current model as json and weights as h5df file
'''
model_tosave = '{}.json'.format(model_name)
weights = '{}.hdf5'.format(model_name)
json_string = self.model.to_json()
self.model.save_weights(weights)
with open(model_tosave, 'w') as f:
json.dump(json_string, f)
print ('Model saved.')
def load_model(self, model_name):
'''
Load a model
INPUT (1) string 'model_name': filepath to model and weights, not including extension
OUTPUT: Model with loaded weights. can fit on model using loaded_model=True in fit_model method
'''
print ('Loading model {}'.format(model_name))
model_toload = '{}.json'.format(model_name)
weights = '{}.hdf5'.format(model_name)
with open(model_toload) as f:
m = next(f)
model_comp = model_from_json(json.loads(m))
model_comp.load_weights(weights)
print ('Model loaded.')
self.model = model_comp
return model_comp
if __name__ == "__main__":
#set arguments
#reload already trained model to resume training
model_to_load="Models/ResUnet.04_0.646.hdf5"
#save=None
#compile the model
brain_seg = Training(batch_size=4,nb_epoch=3,load_model_resume_training=model_to_load)
print("number of trainabale parameters:",brain_seg.model.count_params())
#print(brain_seg.model.summary())
#plot(brain_seg.model, to_file='model_architecture.png', show_shapes=True)
#load data from disk
Y_labels=np.load("y_training.npy").astype(np.uint8)
X_patches=np.load("x_training.npy").astype(np.float32)
Y_labels_valid=np.load("y_valid.npy").astype(np.uint8)
X_patches_valid=np.load("x_valid.npy").astype(np.float32)
print("loading patches done\n")
# fit model
brain_seg.fit_unet(X_patches,Y_labels,X_patches_valid,Y_labels_valid)#*
#if save is not None:
# brain_seg.save_model('models/' + save)
``` |
{
"source": "Jiang-zzz/Python",
"score": 5
} |
#### File: data_structures/linked_list/print_reverse.py
```python
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
def __repr__(self):
"""Returns a visual representation of the node and all its following nodes."""
string_rep = ""
temp = self
while temp:
string_rep += f"<{temp.data}> ---> "
temp = temp.next
string_rep += "<END>"
return string_rep
def make_linked_list(elements_list):
"""Creates a Linked List from the elements of the given sequence
(list/tuple) and returns the head of the Linked List."""
# if elements_list is empty
if not elements_list:
raise Exception("The Elements List is empty")
# Set first element as Head
head = Node(elements_list[0])
current = head
# Loop through elements from position 1
for data in elements_list[1:]:
current.next = Node(data)
current = current.next
return head
def print_reverse(head_node):
"""Prints the elements of the given Linked List in reverse order"""
# If reached end of the List
if head_node is None:
return None
else:
# Recurse
print_reverse(head_node.next)
print(head_node.data)
list_data = [14,52,14,12,43]
linked_list = make_linked_list(list_data)
print("Linked List:")
print(linked_list)
print("Elements in Reverse:")
print_reverse(linked_list)
```
#### File: Python/other/least_recently_used.py
```python
from abc import abstractmethod
import sys
from collections import deque
class LRUCache:
""" Page Replacement Algorithm, Least Recently Used (LRU) Caching."""
dq_store = object() # Cache store of keys
key_reference_map = object() # References of the keys in cache
_MAX_CAPACITY: int = 10 # Maximum capacity of cache
@abstractmethod
def __init__(self, n: int):
""" Creates an empty store and map for the keys.
The LRUCache is set the size n.
"""
self.dq_store = deque()
self.key_reference_map = set()
if not n:
LRUCache._MAX_CAPACITY = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.')
else:
LRUCache._MAX_CAPACITY = n
def refer(self, x):
"""
Looks for a page in the cache store and adds reference to the set.
Remove the least recently used key if the store is full.
Update store to reflect recent access.
"""
if x not in self.key_reference_map:
if len(self.dq_store) == LRUCache._MAX_CAPACITY:
last_element = self.dq_store.pop()
self.key_reference_map.remove(last_element)
else:
index_remove = 0
for idx, key in enumerate(self.dq_store):
if key == x:
index_remove = idx
break
self.dq_store.remove(index_remove)
self.dq_store.appendleft(x)
self.key_reference_map.add(x)
def display(self):
"""
Prints all the elements in the store.
"""
for k in self.dq_store:
print(k)
if __name__ == "__main__":
lru_cache = LRUCache(4)
lru_cache.refer(1)
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer(1)
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
``` |
{
"source": "jianhanlim/ipr-imagecaptioning",
"score": 3
} |
#### File: multiplication_bi/utils/misc.py
```python
import numpy as np
import cv2
import heapq
import os
from datetime import datetime as dt
from concurrent.futures import ThreadPoolExecutor
class ImageLoader(object):
def __init__(self, mean_file, shape, config):
self.config = config
self.bgr = True
self.scale_shape = np.array([shape, shape], np.int32)
self.crop_shape = np.array([shape, shape], np.int32)
self.mean = np.load(mean_file).mean(1).mean(1)
self.threadExecutor = ThreadPoolExecutor(max_workers=128)
def preprocess(self, image):
if self.config.cnn in ['vgg16','resnet50','resnet101']:
image = cv2.resize(image, (self.scale_shape[0], self.scale_shape[1]))
offset = (self.scale_shape - self.crop_shape) / 2
offset = offset.astype(np.int32)
image = image[offset[0]:offset[0] + self.crop_shape[0],
offset[1]:offset[1] + self.crop_shape[1]]
image = image - self.mean
return image
elif self.config.cnn == 'inceptionv4':
image = cv2.resize(image, (self.scale_shape[0], self.scale_shape[1]))
image = np.array(image, np.float32)
image /= 255.
image -= 0.5
image *= 2.
return image
else:
return image
def load_image(self, image_file):
""" Load and preprocess an image. """
image = cv2.imread(image_file)
if self.bgr:
# convert bgr to rgb
temp = image.swapaxes(0, 2)
temp = temp[::-1]
image = temp.swapaxes(0, 2)
return self.preprocess(image)
def load_images(self, image_files):
""" Load and preprocess a list of images. """
#before_time = dt.now()
images = []
for image_file in image_files:
images.append(self.load_image(image_file))
# execs = []
# for image_file in image_files:
# execs.append(self.threadExecutor.submit(self.load_image, image_file))
# for exe in execs:
# images.append(exe.result())
images = np.array(images, np.float32)
#after_time = dt.now()
#print("Load Images Time: {}".format((after_time-before_time).total_seconds()))
return images
class CaptionData(object):
def __init__(self, sentence, memory, output, score):
self.sentence = sentence
self.memory = memory
self.output = output
self.score = score
def __cmp__(self, other):
assert isinstance(other, CaptionData)
if self.score == other.score:
return 0
elif self.score < other.score:
return -1
else:
return 1
def __lt__(self, other):
assert isinstance(other, CaptionData)
return self.score < other.score
def __eq__(self, other):
assert isinstance(other, CaptionData)
return self.score == other.score
class TopN(object):
def __init__(self, n):
self._n = n
self._data = []
def size(self):
assert self._data is not None
return len(self._data)
def push(self, x):
assert self._data is not None
if len(self._data) < self._n:
heapq.heappush(self._data, x)
else:
heapq.heappushpop(self._data, x)
def extract(self, sort=False):
assert self._data is not None
data = self._data
self._data = None
if sort:
data.sort(reverse=True)
return data
def reset(self):
self._data = []
def generate_binary_key(dims, sentence, seed):
seed_key = np.ones(dims)
if seed != -1:
np.random.seed(seed)
seed_key = np.random.randint(0, 2, dims)
seed_key[seed_key<1] = -1
max_word = dims/8
new_sentence = sentence
if len(sentence) < max_word:
left = max_word - len(sentence)
while left > 0:
index = left if left <= len(sentence) else len(sentence)
left -= len(sentence)
new_sentence = new_sentence + sentence[:int(index)]
new_sentence = new_sentence[:int(max_word)]
binary_key = ''.join(format(ord(x), '08b') for x in new_sentence)
binary_key = ' '.join(list(binary_key))
binary_key = np.fromstring(binary_key, dtype=int, sep=' ')
binary_key[binary_key<1] = -1
binary_key = binary_key * seed_key
return binary_key
def bits2str(key, seed, dims):
seed_key = np.ones(dims)
if seed != -1:
np.random.seed(seed)
seed_key = np.random.randint(0, 2, dims)
seed_key[seed_key<1] = -1
key = key / seed_key
key[key<1] = 0
key = key.astype(int)
key = ''.join([item for item in key.astype(str)])
return ''.join(chr(int(''.join(x), 2)) for x in zip(*[iter(key)]*8))
``` |
{
"source": "jianhao2016/online_cvxMF",
"score": 2
} |
#### File: jianhao2016/online_cvxMF/cvx_online_NMF.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import time
import ipdb
import os
import argparse
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import skcuda.linalg as linalg
# import cvxpy as cvx
from functools import reduce
from sklearn.decomposition import PCA
from sklearn.linear_model import LassoLars
from sklearn.preprocessing import normalize
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, adjusted_mutual_info_score
from cluster_assignment_method import get_clustering_assignment_1, get_clustering_assignment_2
from common_functions import get_g_hat_value, evaluation_clustering
from common_functions import my_nmf_clustering, nmf_clustering
from common_functions import geo_projection_to_cvx_cmb
# from cvxpy_update_functions import update_D_hat_cvxpy, update_W_hat_cvxpy
from pycuda_update_W_comparison import update_W_hat_skcuda, opt_cal_W_hat_numpy
from pycuda_update_W_comparison import opt_cal_W_hat_solve, update_W_hat_numpy
from online_NMF import online_dict_learning
from convex_NMF import CNMF
from visualization_NMF import plot_diff_method
def my_normalize(X):
'''
scale X to be in a unit ball
X: n x m, n samples in row
'''
n_dim, m_dim = X.shape
max_norm = max([np.linalg.norm(X[k, :]) for k in range(n_dim)])
X_new = X / max_norm
return X_new
def cvx_online_dict_learning(X, y_true, n_hat, k_cluster, T, lmda, eps,
flag=True, version = 'Rr'):
'''
X: R^(n * m)
y_true: str^n
W_0: R^(n_hat * k)
x_i : R^m
alpha: R^k
cvx_online problem
min||x_i - X.T * W * alpha|| + lambda * ||alpha||
in the online setting, there is no X in (n * m),
instead, we need to store a candidate set and solve the subproblem:
min ||x_i - X_hat * W_hat * alpha|| + lambda * ||alpha||
X_hat : R^(m * n_hat)
W_hat : R^(n_hat * k)
version: Rr, restricted, heuristic approach
Ru, uniform, random assignment
'''
n_dim, m_dim = X.shape
A_t = np.zeros((k_cluster, k_cluster))
B_t = np.zeros((m_dim, k_cluster))
x_sum = 0
alpha_sum = 0
# step 1: sample n_hat * k_cluster points as initial X_hat.
X_0 = np.zeros((m_dim, n_hat))
for idx in range(n_hat):
sample_idx = np.random.randint(0, n_dim)
x_sample = X[sample_idx, :]
X_0[:, idx] = x_sample
# step 1: initialization, get X_hat (including clusters info)
# and W_hat from X_0, using same init as in CNMF.
# here representative_size_count is the n_1_hat, n_2_hat, ..., n_k_hat.
t1 = time.time()
X_hat, W_hat, representative_size_count = initialize_X_W_hat(X_0, k_cluster)
X_0, W_0 = X_hat.copy(), W_hat.copy()
t2 = time.time()
# print('init cost {:.4f}'.format(t2 - t1))
# step 2: after initialization of X_hat, update alpha, W_hat and X_hat alternatively.
t_start = time.time()
print(lmda, _NF, eps)
for t in range(T):
# t_start_online = time.time()
if t % 50 == 0 and flag:
D_t = np.matmul(X_hat, W_hat)
tmp_assignment = get_clustering_assignment_1(X, D_t, k_cluster)
tmp_acc, tmp_AMI = evaluation_clustering(tmp_assignment, y_true)
print('1)iteration {}, distance acc = {:.4f}, AMI = {:.4f}'.format(t, tmp_acc, tmp_AMI))
tmp_assignment = get_clustering_assignment_2(X, D_t, k_cluster, lmda)
tmp_acc, tmp_AMI = evaluation_clustering(tmp_assignment, y_true)
print('2)iteration {}, kmeans of weights acc = {:.4f}, AMI = {:.4f}'.format(t, tmp_acc, tmp_AMI))
t_end = time.time()
print('time elapse = {:.4f}s'.format(t_end - t_start))
t_start = t_end
print('-' * 7)
sample_idx = np.random.randint(0, n_dim)
x_sample = X[sample_idx, :]
# update alpha
t1 = time.time()
lars_lasso = LassoLars(alpha = lmda, max_iter = 500)
D_t = np.matmul(X_hat, W_hat)
lars_lasso.fit(D_t, x_sample)
alpha_t = lars_lasso.coef_
t2 = time.time()
# print('lasso cost {:.4f}s'.format(t2 - t1))
# using different clustering assignment
t1 = time.time()
if version == 'Rr':
cluster_of_x_i = np.argmax(alpha_t)
# elif version == 'Ru':
else:
cluster_of_x_i = int(np.random.uniform(0, k_cluster))
t2 = time.time()
# print('argmax alpha cost {:.4f}s'.format(t2 - t1))
t1 = time.time()
A_t += np.matmul(alpha_t.reshape(k_cluster, 1), alpha_t.reshape(1, k_cluster))
B_t += np.matmul(x_sample.reshape(m_dim, 1), alpha_t.reshape(1, k_cluster))
x_sum += (np.linalg.norm(x_sample) ** 2)
alpha_sum += lmda * np.linalg.norm(alpha_t, 1)
t2 = time.time()
# print('update At, Bt cost {:.4f}s'.format(t2 - t1))
# update X_hat
t1 = time.time()
W_hat, X_hat = update_W_X_hat(W_hat, X_hat, representative_size_count, x_sample, cluster_of_x_i,
A_t, B_t, x_sum, alpha_sum, t, eps)
t2 = time.time()
# print('update X_hat, W_hat cost {:.4f}s'.format(t2 - t1))
print('Dcitionary update done! Time elapse {:.04f}s'.format(time.time() - t_start))
return W_hat, X_hat, representative_size_count, X_0, W_0
def initialize_X_W_hat(X_0, k_cluster):
'''
takes intial collection of X and number of cluster as input,
run k-Means on it, return the sorted (by cluster) X_hat, W_hat,
and number of points in each cluster, i.e. n_hat_i
'''
# this function takes the initialziation step of CNMF and gives a X_hat, W_hat
# cluster X_hat, get X_hat, W_0 as output of some method, and assignment of X_0
# kmeans works with row vector, however, X_0 is a column vec matrix.
kmeans = KMeans(n_clusters = k_cluster, max_iter = 1000)
kmeans.fit(X_0.T)
X_hat_assignments = kmeans.labels_
# now we need to classify the X_hat to X_1, X_2, X_3
# by using a dictionary candidate_clusters
candidate_clusters = {x:np.array([]) for x in set(X_hat_assignments)}
for idx, label in enumerate(X_hat_assignments):
if candidate_clusters[label].size == 0:
candidate_clusters[label] = X_0[:, idx]
else:
candidate_clusters[label] = np.vstack((candidate_clusters[label], X_0[:, idx]))
X_hat = np.array([])
check_list = []
sorted_assignment = []
for label in candidate_clusters:
candidate_clusters[label] = candidate_clusters[label].T
shape_of_cluster = candidate_clusters[label].shape
print('label {} has shape of: {}'.format(label, shape_of_cluster))
check_list.append(shape_of_cluster[1])
if X_hat.size == 0:
X_hat = candidate_clusters[label]
sorted_assignment = [label] * shape_of_cluster[1]
else:
X_hat = np.hstack((X_hat, candidate_clusters[label]))
sorted_assignment += [label] * shape_of_cluster[1]
sorted_assignment = np.array(sorted_assignment)
# based on the CNMF paper, we start the initialization with fresh k-Means
# H: R^{n * k} matrix, indicate the cluster assignments
# centroids can be calculated as F = X*W*D^{-1}, Where D: R^{k * k} is the count diagonal matrix
# then we can say W = H*D^{-1}
m_dim, n_dim = X_hat.shape
cluster_count = [len(np.where(X_hat_assignments == i)[0]) for i in range(k_cluster)]
assert cluster_count == check_list
D = np.zeros((k_cluster, k_cluster), int)
for idx in range(k_cluster):
D[idx][idx] = cluster_count[idx] + 1e-3
H = np.zeros((n_dim, k_cluster), int)
for idx in range(k_cluster):
non_zero_idx = np.where(sorted_assignment == idx)[0]
H[non_zero_idx, idx] = 1
W_hat = np.matmul((H + np.ones(H.shape, int) * 0.2), np.linalg.inv(D))
return X_hat, W_hat, cluster_count
def update_W_X_hat(W_hat, X_hat, repre_size_count, x_sample, cluster_of_x_i,
A_t, B_t, x_sum, alpha_sum, t, eps):
# add W_hat block diagonal constraint,
# using projection.
# linalg.init()
# W_hat_gpu = gpuarray.to_gpu(W_hat.astype(np.float64))
# tmp_x = np.ascontiguousarray(X_hat)
# X_hat_gpu = gpuarray.to_gpu(tmp_x.astype(np.float64))
# A_t_gpu = gpuarray.to_gpu(A_t.astype(np.float64))
# B_t_gpu = gpuarray.to_gpu(B_t.astype(np.float64))
cluster_seperation_idx = np.cumsum(repre_size_count)
end_idx = cluster_seperation_idx[cluster_of_x_i]
start_idx = end_idx - repre_size_count[cluster_of_x_i]
A_t_inv = np.linalg.pinv(A_t)
# W_opt_old_X = opt_cal_W_hat_numpy(W_hat, X_hat, A_t, B_t, x_sum, alpha_sum, eps, t)
W_opt_old_X = opt_cal_W_hat_solve(W_hat, X_hat, A_t_inv, B_t, x_sum, alpha_sum, eps, t)
g_hat_old_X = get_g_hat_value(t, W_opt_old_X, X_hat, A_t, B_t, x_sum, alpha_sum)
# W_opt_old_X = update_W_hat_skcuda(W_hat_gpu, X_hat_gpu, A_t_gpu, B_t_gpu,
# x_sum, alpha_sum, eps, t)
# g_hat_old_X = get_g_hat_value(t, W_opt_old_X.get(), X_hat, A_t, B_t, x_sum, alpha_sum)
list_of_W_opt_new_X = [W_opt_old_X]
list_of_g_hat_new_X = [g_hat_old_X]
list_of_new_X = [X_hat]
# print('starting loop in update_W_X, total {}'.format(end_idx - start_idx))
for idx in range(start_idx, end_idx):
# print('iter # {}'.format(idx))
t1 = time.time()
X_hat_new = X_hat.copy()
X_hat_new[:, idx] = x_sample
list_of_new_X.append(X_hat_new)
# tmp_x = np.ascontiguousarray(X_hat_new)
# X_hat_new_gpu = gpuarray.to_gpu(tmp_x.astype(np.float64))
t2 = time.time()
# print('\t update X_hat cost {:.4f}s'.format(t2 - t1))
t1 = time.time()
# W_opt_new_X = opt_cal_W_hat_numpy(W_hat, X_hat_new, A_t, B_t, x_sum, alpha_sum, eps, t)
# W_opt_new_X = update_W_hat_numpy(W_hat, X_hat_new, A_t, B_t, x_sum, alpha_sum, eps, t)
W_opt_new_X = opt_cal_W_hat_solve(W_hat, X_hat_new, A_t_inv, B_t, x_sum, alpha_sum, eps, t)
g_hat_new_X = get_g_hat_value(t, W_opt_new_X, X_hat_new, A_t, B_t, x_sum, alpha_sum)
# W_opt_new_X = update_W_hat_skcuda(W_hat_gpu, X_hat_new_gpu, A_t_gpu, B_t_gpu,
# x_sum, alpha_sum, eps, t)
# g_hat_new_X = get_g_hat_value(t, W_opt_new_X.get(), X_hat_new, A_t, B_t, x_sum, alpha_sum)
t2 = time.time()
# print('\t update W_hat_new cost {:.4f}'.format(t2 - t1))
t1 = time.time()
list_of_W_opt_new_X.append(W_opt_new_X)
list_of_g_hat_new_X.append(g_hat_new_X)
t2 = time.time()
# print('appending W_opt list cost {:.4f}s'.format(t2 - t1))
min_g_idx = np.argmin(list_of_g_hat_new_X)
X_hat_new = list_of_new_X[min_g_idx]
W_hat_new = list_of_W_opt_new_X[min_g_idx]
# if list_of_g_hat_new_X[min_g_idx] <= g_hat_old_X:
# X_hat_new = X_hat.copy()
# X_hat_new[:, start_idx + min_g_idx] = x_sample
# # W_hat_new = list_of_W_opt_new_X[min_g_idx].get()
# W_hat_new = list_of_W_opt_new_X[min_g_idx].copy()
# else:
# X_hat_new = X_hat.copy()
# # W_hat_new = W_opt_old_X.get()
# W_hat_new = W_opt_old_X.copy()
return W_hat_new, X_hat_new
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--numIter', type=int, default=1200)
parser.add_argument('--lmda', type=float, default=1e-1)
parser.add_argument('--eps', type=float, default=1e-5)
parser.add_argument('--normal_factor', '--NF', type=float, default=200)
parser.add_argument('--file_name', type=str, default='tmp_pic')
parser.add_argument('--dtype', type=str, default='scRNA',
choices=['scRNA', 'synthetic',
'synthetic_1', 'synthetic_2'],
help='synthetic1: well sep, 2: close cluster')
parser.add_argument('--k_cluster', type=int, default=10)
parser.add_argument('--csize', type=int, default=500,
help='size of each cluster, integer, default 500')
parser.add_argument('--candidate_size', type=int, default=15)
parser.add_argument('--pca', type=int, default = 100)
parser.add_argument('--numAver', type=int, default=1)
args = parser.parse_args()
# set number of iteration, lambda in lasso, epsilon in dictionary update and normalization factor
print(args)
numIter = args.numIter
lmda = args.lmda
eps = args.eps
_NF = args.normal_factor
file_name = args.file_name
k_cluster = args.k_cluster
cluster_size = args.csize
candidate_set_size = args.candidate_size
P_component = args.pca
aver_num = args.numAver
data_type = args.dtype
# np.random.seed(42)
data_root = '.'
data_root_shared = 'sample_data/'
# df_file = os.path.join(data_root, 'pandas_dataframe')
# feat_file = os.path.join(data_root, 'df_feature_column')
if data_type == 'synthetic':
k_cluster = 10
df_name = 'df_synthetic_well_sep'
fc_name = 'feature_column_synthetic_well_sep'
df_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', df_name)
feat_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', fc_name)
elif data_type == 'synthetic_1':
k_cluster = 10
df_name = 'df_synthetic_disjoint_{}'.format(cluster_size)
fc_name = 'feature_column_synthetic_disjoint_{}'.format(cluster_size)
df_file = os.path.join(data_root_shared, 'synthetic_data', df_name)
feat_file = os.path.join(data_root_shared, 'synthetic_data', fc_name)
elif data_type == 'synthetic_2':
k_cluster = 10
df_name = 'df_synthetic_overlap_{}'.format(cluster_size)
fc_name = 'feature_column_synthetic_overlap_{}'.format(cluster_size)
df_file = os.path.join(data_root_shared, 'synthetic_data', df_name)
feat_file = os.path.join(data_root_shared, 'synthetic_data', fc_name)
elif data_type == 'scRNA':
k_cluster = 10
df_name = 'pandas_dataframe_10_clusters_-1'
fc_name = 'df_feature_column_10_clusters_-1'
df_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', df_name)
feat_file = os.path.join(data_root_shared, '10xGenomics_scRNA/pandasDF', fc_name)
# np.random.seed(42)
df = pd.read_pickle(df_file)
with open(feat_file, 'rb') as f:
feat_cols = pickle.load(f)
X_raw = df[feat_cols].values
X_raw = X_raw - np.min(X_raw) + 0.1
Y = df['label'].values
# # ----------------------------------------------------
# X_for_nmf = normalize(X_raw) * _NF
# D_nmf, label_nmf = nmf_clustering(X_for_nmf, k_cluster, numIter = 1000)
# acc_nmf, AMI_nmf = evaluation_clustering(label_nmf, Y)
# print(' ------ final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_nmf, AMI_nmf))
# ----------------------------------------------------
# use PCA to reduce X_raw to [num_of_cells * number of PCA componets]
if P_component != -1:
pca = PCA(n_components = P_component)
# X_pca_all = pca.fit_transform(np.vstack((X_raw, D_nmf)))
# X_pca = X_pca_all[:-k_cluster, :]
X_pca = pca.fit_transform(X_raw)
else:
X_pca = X_raw
pca_cols = ['Principle component {}'.format(i) for i in range(X_pca.shape[1])]
# ----------------------------------------------------
# 1) Frist, traditional NMF
# D_nmf_pca = X_pca_all[-k_cluster:, :]
if np.min(X_pca) < 0:
X_for_nmf = X_pca - np.min(X_pca)
else:
X_for_nmf = X_pca
# X_for_nmf = normalize(X_for_nmf) * _NF
X_for_nmf = my_normalize(X_for_nmf) * _NF
# D_nmf, _, label_nmf = nmf_clustering(X_for_nmf, k_cluster, numIter = 1000)
# D_nmf_pca = pca.transform(D_nmf)
# ipdb.set_trace()
t1 = time.time()
D_nmf, _, label_nmf = nmf_clustering(X_for_nmf, k_cluster, numIter = numIter)
t2 = time.time()
t_nmf = t2 - t1
acc_nmf, AMI_nmf = evaluation_clustering(label_nmf, Y)
print(' ------ nmf final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_nmf, AMI_nmf))
D_nmf_pca = my_normalize(D_nmf) * _NF
df_centroids_nmf = pd.DataFrame(D_nmf_pca, columns = pca_cols)
df_centroids_nmf['label'] = ['NMF cell type {}'.format(x) for x in range(1, k_cluster + 1)]
# test
df_centroids_nmf.to_pickle('results_logging/nmf_centroid_df')
# X = normalize(X_pca) * _NF
X = my_normalize(X_pca) * _NF
# X = X_pca
n_dim, m_dim = X.shape
# ----------------------------------------------------
# 2) online cvxMF, our algorithm.
n_hat = k_cluster * candidate_set_size
t_ocmf = 0
acc = 0
acc_array = []
for round_num in range(aver_num):
t1 = time.time()
W_hat_tmp, X_hat_tmp, repre_size_count_tmp, X_0_tmp, W_0_tmp = cvx_online_dict_learning(X, Y, n_hat, k_cluster,
numIter, lmda, eps,
flag = False, version = 'Rr')
t2 = time.time()
t_ocmf += (t2 - t1)
D_final_tmp = np.matmul(X_hat_tmp, W_hat_tmp)
# clustered_label = get_clustering_assignment_1(X, D_final)
clustered_label_ocmf = get_clustering_assignment_2(X, D_final_tmp, k_cluster, lmda)
acc_tmp, AMI_tmp = evaluation_clustering(clustered_label_ocmf, Y)
acc_array.append(acc_tmp)
if acc_tmp >= acc:
W_hat = W_hat_tmp
X_hat = X_hat_tmp
X_0 = X_0_tmp
W_0 = W_0_tmp
D_final = D_final_tmp
acc = acc_tmp
AMI = AMI_tmp
repre_size_count = repre_size_count_tmp
if acc >= 0.9:
break
acc_aver = np.mean(acc_array)
t_ocmf = t_ocmf / (round_num + 1)
print(' ------ ocmf final accuracy = {:.4f}, AMI = {:.4f}'.format(acc, AMI))
df_centroids = pd.DataFrame(D_final.T, columns = pca_cols)
df_centroids['label'] = ['ocmf: type {}'.format(x) for x in range(1, k_cluster + 1)]
df_centroids.to_pickle('results_logging/ocmf_centroid_df')
df_x_hat = pd.DataFrame(X_hat.T, columns = pca_cols)
X_hat_set = ['group {}'.format(i) for i in range(k_cluster)]
X_hat_label = []
for idx in range(k_cluster):
X_hat_label += [X_hat_set[idx]] * repre_size_count[idx]
# ipdb.set_trace()
df_x_hat['label'] = X_hat_label
df_x_hat.to_pickle('results_logging/x_hat_df')
# ----------------------------------------------------
# 3) compare with online NMF in their paper
# D_0 = np.random.randn(m_dim, k_cluster)
# D_0 = np.absolute(D_0)
# ipdb.set_trace()
D_0 = (X_0 @ W_0).reshape(m_dim, k_cluster)
# D_0 = normalize(D_0, axis = 0) * _NF
acc_omf = 0
AMI_omf = 0
acc_omf_array = []
t_omf = 0
for round_num in range(aver_num):
t1 = time.time()
D_omf_final_tmp = online_dict_learning(X, lmda = lmda, D_0 = D_0, T = numIter, k_cluster = k_cluster, eps = eps, _NF = _NF)
t2 = time.time()
t_omf += (t2 - t1)
clustered_label_omf = get_clustering_assignment_2(X, D_omf_final_tmp,
k_cluster, lmda)
acc_omf_tmp, AMI_omf_tmp = evaluation_clustering(clustered_label_omf, Y)
acc_omf_array.append(acc_omf_tmp)
if acc_omf_tmp >= acc_omf:
D_omf_final = D_omf_final_tmp
acc_omf, AMI_omf = acc_omf_tmp, AMI_omf_tmp
if acc_omf >= 0.9:
break
acc_aver_omf = np.mean(acc_omf_array)
t_omf = t_omf/(round_num + 1)
print(' ------ onlineMF final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_omf,
AMI_omf))
df_centroids_omf = pd.DataFrame(D_omf_final.T, columns = pca_cols)
df_centroids_omf['label'] = ['online MF cell type {}'.format(x) for x in range(1, k_cluster + 1)]
df_centroids_omf.to_pickle('results_logging/omf_centroid_df')
# ----------------------------------------------------
# 4) CNMF in jordan's paper.
t1 = time.time()
W_cnmf, _, clustered_label_cnmf = CNMF(X.T, k_cluster, max_iter = numIter * 1)
D_cnmf = (X.T @ W_cnmf).T
t2 = time.time()
t_cmf = t2 - t1
clustered_label_cnmf = get_clustering_assignment_2(X, D_cnmf,
k_cluster, lmda)
acc_cnmf, AMI_cnmf = evaluation_clustering(clustered_label_cnmf, Y)
print(' ------ cnmf final accuracy = {:.4f}, AMI = {:.4f}'.format(acc_cnmf,
AMI_cnmf))
df_centroids_cnmf = pd.DataFrame(D_cnmf, columns = pca_cols)
df_centroids_cnmf['label'] = ['CNMF: type{}'.format(x) for x in
range(1, k_cluster + 1)]
df_centroids_cnmf.to_pickle('results_logging/cmf_centroid_df')
# ----------------------------------------------------
df_final = pd.DataFrame(X, columns = pca_cols)
df_final.to_pickle('results_logging/X_df')
df_final['label'] = Y
df_final = df_final.append(df_x_hat)
df_final = df_final.append(df_centroids_cnmf)
df_final = df_final.append(df_centroids_nmf)
df_final = df_final.append(df_centroids)
df_final = df_final.append(df_centroids_omf)
print('shape of df_final: ', df_final.shape)
accuracy_dict = {
'nmf':[acc_nmf, AMI_nmf],
'onmf': [acc_omf, AMI_omf],
'ocnmf': [acc, AMI],
'cnmf': [acc_cnmf, AMI_cnmf]
}
size_of_cluster = n_dim//k_cluster
tmp_type = Y[0]
tmp_count = 0
cluster_size_count = []
for cur_type in Y:
if cur_type == tmp_type:
tmp_count += 1
else:
cluster_size_count.append(tmp_count)
tmp_count = 1
tmp_type = cur_type
cluster_size_count.append(tmp_count)
fig = plot_diff_method(df_final, pca_cols, n_dim, k_cluster,
accuracy_dict, repre_size_count,
size_of_cluster = None,
cluster_size_count = cluster_size_count)
tmp = 'test_fig_dummy_pa.png'
save_File_name = tmp
p2f = os.path.join(save_File_name)
fig.savefig(p2f, dpi = 150,
bbox_inces = 'tight')
print('===' * 7)
print('nmf takes {:.4f}s'.format(t_nmf))
print('ocmf takes {:.4f}s'.format(t_ocmf))
print('omf takes {:.4f}s'.format(t_omf))
print('cmf takes {:.4f}s'.format(t_cmf))
```
#### File: jianhao2016/online_cvxMF/cvxpy_update_functions.py
```python
import numpy as np
import cvxpy as cvx
def update_D_hat_cvxpy(t, W_hat, X_hat, A_t, B_t, x_sum, alpha_sum, eps):
'''
use cvxpy to update D
'''
n_hat, k_cluster = W_hat.shape
D_hat = (X_hat @ W_hat)
D = cvx.Variable(shape = D_hat.shape)
# constraint = [W >= 0, W <= 1]
constraint = [D >= 0, D <= 1]
# T1 = cvx.trace(B_t * W.T * X_hat.T)
# tmp = X_hat * W * A_t * W.T * X_hat.T
# T2 = cvx.trace(X_hat * W * A_t * W.T * X_hat.T)
# T2 = cvx.trace(W.T * W)
XW = D
T1 = cvx.sum(cvx.multiply(B_t, XW))
m_dim = XW.shape[0]
print('m_dim = ', m_dim)
quad_sum = 0
for idx in range(m_dim):
quad_sum += cvx.quad_form(XW[idx, :].T, A_t)
T2 = quad_sum
# tmp = XW * (A_t.T)
# T2 = cvx.sum(cvx.multiply(XW, tmp))
print('is T1 cvx? ', T1.is_convex())
print('is T2 cvx? ', T2.is_convex())
# print('tmp shape:', tmp.shape)
print('T2 shape:', T2.shape)
obj = cvx.Minimize(1/t * (1/2 * x_sum - T1 + 1/2 * T2 + alpha_sum))
# obj = cvx.Minimize((1/t) * (1/2 * x_sum - cvx.trace(B_t * W.T * X_hat.T)
# + 1/2 * cvx.trace(X_hat * W * A_t * W.T * X_hat.T) + alpha_sum))
prob = cvx.Problem(obj, constraint)
# prob.solve(solver = cvx.CVXOPT)
prob.solve(solver = cvx.OSQP)
# if prob.status != cvx.OPTIMAL:
# raise Exception('CVX solver did not converge!')
print('residual norm = {:.06f}'.format(prob.value))
D_hat_new = D.value
return D_hat_new
def update_W_hat_cvxpy(t, W_hat, X_hat, A_t, B_t, x_sum, alpha_sum, eps):
'''
use cvxpy to update W_hat
'''
n_hat, k_cluster = W_hat.shape
W_hat_new = W_hat.copy()
W = cvx.Variable(shape = W_hat.shape)
# constraint = [W >= 0, W <= 1]
constraint = [W >= 0]
# T1 = cvx.trace(B_t * W.T * X_hat.T)
# tmp = X_hat * W * A_t * W.T * X_hat.T
# T2 = cvx.trace(X_hat * W * A_t * W.T * X_hat.T)
# T2 = cvx.trace(W.T * W)
XW = X_hat * W
T1 = cvx.sum(cvx.multiply(B_t, XW))
m_dim = XW.shape[0]
print('m_dim = ', m_dim)
quad_sum = 0
for idx in range(m_dim):
quad_sum += cvx.quad_form(XW[idx, :].T, A_t)
T2 = quad_sum
# tmp = XW * (A_t.T)
# T2 = cvx.sum(cvx.multiply(XW, tmp))
print('is T1 cvx? ', T1.is_convex())
print('is T2 cvx? ', T2.is_convex())
# print('tmp shape:', tmp.shape)
print('T2 shape:', T2.shape)
obj = cvx.Minimize(1/t * (1/2 * x_sum - T1 + 1/2 * T2 + alpha_sum))
# obj = cvx.Minimize((1/t) * (1/2 * x_sum - cvx.trace(B_t * W.T * X_hat.T)
# + 1/2 * cvx.trace(X_hat * W * A_t * W.T * X_hat.T) + alpha_sum))
prob = cvx.Problem(obj, constraint)
# prob.solve(solver = cvx.CVXOPT)
prob.solve(solver = cvx.OSQP)
# if prob.status != cvx.OPTIMAL:
# raise Exception('CVX solver did not converge!')
print('residual norm = {:.06f}'.format(prob.value))
W_hat_new = W.value
g_val = get_g_hat_value(1, W_hat_new, X_hat, A_t, B_t, x_sum, alpha_sum)
print('g_val = {:.06f}'.format(g_val))
return W_hat_new
```
#### File: jianhao2016/online_cvxMF/pycuda_multiplication.py
```python
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import numpy as np
import skcuda.linalg as linalg
import skcuda.misc as misc
import time
from functools import reduce
def fast_matmul(x, y, x_type, y_type):
'''
use pycuda to compute c = a * b
'''
linalg.init()
a_gpu = gpuarray.to_gpu(x.astype(x_type))
a_t_gpu = gpuarray.to_gpu(x.T.copy().astype(x_type))
b_gpu = gpuarray.to_gpu(y.astype(y_type))
# row_sum = gpuarray.zeros(shape = x[0].shape, dtype = x_type)
row_sum = 0
# a = np.asarray(x, x_type)
# b = np.asarray(y, y_type)
# a_gpu = gpuarray.to_gpu(a)
# b_gpu = gpuarray.to_gpu(b)
t1_inside = time.time()
c_gpu = linalg.dot(a_gpu, b_gpu)
for a_i in a_gpu:
# row_sum = misc.add(row_sum, a_i)
row_sum += a_i
gg = linalg.dot(a_gpu, b_gpu)
gg = linalg.dot(a_i, a_i)
gg = reduce(linalg.dot, (a_gpu, b_gpu, b_gpu, b_gpu))
# tmp1, tmp2 = linalg.dot(a_gpu, b_gpu), linalg.dot(b_gpu, b_gpu)
z_gpu = a_gpu.copy()
tmp = a_t_gpu
# print('x.T\n', x.T)
# print('tmp\n', tmp)
# print('x = a_gpu: ', np.allclose(x, a_gpu.get()))
# print('x.T = tmp: ', np.allclose(x.T, tmp.get()))
a_prod = linalg.dot(a_gpu, tmp)
t2_inside = time.time()
print('inside cost {:.4f}s'.format(t2_inside - t1_inside))
a = np.random.randint(-5, 5, (3, 4)).astype(np.float32)
a_gpu = gpuarray.to_gpu(a)
norm_gpu = linalg.norm(a_gpu)
print('is norm right?', np.linalg.norm(a) == norm_gpu)
a_gpu = abs(a_gpu)
column_sum = misc.sum(a_gpu, axis = 0)
column_sum = column_sum.reshape((1, -1))
all_one_gpu = gpuarray.to_gpu(np.ones((3, 1), np.float32))
div_mat_gpu = linalg.dot(all_one_gpu, column_sum)
norm_1 = a_gpu / (div_mat_gpu + 1e-3)
print(a_gpu)
print(column_sum)
print(column_sum.shape)
print(norm_1)
# abs_a = a_gpu.__abs__()
# print(a)
# print(abs_a)
# c = abs_a + a_gpu
# print(repr(c))
# print(type(c))
# c = 1/2 * c
# print(a_gpu, c)
return c_gpu.get(), a_prod.get(), row_sum.get()
def np_matmul(x, y):
row_sum = np.zeros_like(x[0])
t1 = time.time()
z = x @ y
for x_i in x:
row_sum += x_i
gg = x @ y
gg = reduce(np.dot, (x, y, y, y))
z_gpu = x.copy()
x_prod = x @ x.T
t2 = time.time()
print('a @ b cost {:.4f}s'.format(t2 - t1))
return z, x_prod, row_sum
def fast_add(x, y, x_type, y_type):
'''
use pycuda to compute c = a * b
'''
linalg.init()
a_gpu = gpuarray.to_gpu(x.astype(x_type))
b_gpu = gpuarray.to_gpu(y.astype(y_type))
t1_inside = time.time()
# c_gpu = misc.add(a_gpu, b_gpu)
c_gpu = a_gpu + b_gpu
t2_inside = time.time()
print('inside cost {:.4f}s'.format(t2_inside - t1_inside))
return c_gpu.get()
def np_add(x, y):
t1 = time.time()
z = x + y
t2 = time.time()
print('a + b cost {:.4f}s'.format(t2 - t1))
if __name__ == '__main__':
# x = np.load('/data/jianhao/tmp_test/a_mat_full.npy')
# y = np.load('/data/jianhao/tmp_test/b_mat_full.npy')
x = np.random.randn(50, 30).astype(np.float32)
y = np.random.randn(30, 30).astype(np.float32)
print(x.shape, y.shape)
t1 = time.time()
zz_cuda, x_prod_cuda, row_sum_cuda = fast_matmul(x, y, np.float64, np.float64)
t2 = time.time()
print('pycuda cost {:.4f}s'.format(t2 - t1))
zz, x_prod, row_sum = np_matmul(x, y)
print('-' * 7)
print('x * y:', np.allclose(zz, zz_cuda))
print('x * x.T:', np.allclose(x_prod, x_prod_cuda))
print('row sum:', np.allclose(row_sum, row_sum_cuda))
# t1 = time.time()
# zz = fast_add(y, y, np.float32, np.float32)
# t2 = time.time()
# print('pycuda cost {:.4f}s'.format(t2 - t1))
# np_add(y, y)
``` |
{
"source": "JianhengHou/Medical-Sieve",
"score": 2
} |
#### File: Medical-Sieve/Medical_Sieve_Pipeline/ensemble_model.py
```python
import pandas as pd
from Medical_Sieve_Model_Pipeline.config import config
from Medical_Sieve_Model_Pipeline import models
from sklearn.model_selection import train_test_split
from Medical_Sieve_Model_Pipeline.dataprocessing import processor as proc
from argparse import ArgumentParser
from Medical_Sieve_Model_Pipeline import estimator
import logging
_logger = logging.getLogger("Medical_Sieve_Model_Pipeline")
def run_train():
_logger.info(f"Start training process for the model: ensemble_aspect_model")
base_models = [config.MODEL1_NAME, config.MODEL2_NAME, config.MODEL3_NAME]
# read probability prediction results of three models
_logger.info(f"Loading training data...")
features_df = proc.read_predictions(base_models,
config.MODEL_TRAINING_STAGE_PREDICTION_MAPPING)
# load original training data
training_df = proc.load_data(config.TRAINING_DATA_PATH)
# divide train and test
X_train, X_test, y_train, y_test = train_test_split(features_df,
training_df[config.ASPECT_TARGET],
shuffle=False,
test_size=0.05)
# model fitting
_logger.info(f"Model fitting...")
model = models.stacking_model_aspect_clf
model.set_params(validation_data=(X_test, y_test))
model.fit(X_train, y_train)
prob_prediction = model.predict_proba(X_test)
_logger.info(f"Finished training process for the model: ensemble_model")
# model evaluation
_logger.info(f"\n====== Model Evaluation=====")
thresholds = [[0.2, 0.25, 0.3, 0.4, 0.45, 0.5, 0.55] for each in range(5)]
thresholds_set = estimator.combinations(thresholds)
estimator.model_evaluation(prob_prediction, y_test, thresholds_set)
def run_predict():
_logger.info(f"Start predict process for the model: ensemble_model")
base_models = [config.MODEL1_NAME, config.MODEL2_NAME, config.MODEL3_NAME]
# read probability prediction results of three models
_logger.info(f"Loading test data...")
features_df = proc.read_predictions(base_models,
config.MODEL_PREDICTION_MAPPING)
# load original training data
df = proc.load_data(config.TEST_DATA_PATH)
# load pretrained model
model = models.ensemble_aspect_model()
model.load_weights(config.MODEL4_PATH)
# make prediction
_logger.info(f"Predicting test data...")
prob_prediction = model.predict(features_df)
prediction = proc.transform_prediction(prob_prediction)
# fransform probability prediction to real prediction
aspects_df = pd.DataFrame(prediction, columns=config.ASPECT_TARGET)
result_df = pd.concat([df, aspects_df], axis=1)
# save result df
_logger.info(f"Save the aspect prediction result")
result_df.to_csv(config.ASPECT_RESULT_PATH, index=False)
_logger.info(f"Finished predict process for the model: ensemble_aspect_model")
def main():
parser = ArgumentParser()
parser.add_argument("--do_train", action='store_true')
parser.add_argument("--do_predict", action='store_true')
args = parser.parse_args()
if args.do_train:
run_train()
if args.do_predict:
run_predict()
if __name__ == '__main__':
main()
```
#### File: Medical_Sieve_Model_Pipeline/dataprocessing/dataManager.py
```python
from pyspark import SparkContext, SparkConf
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tag import pos_tag
import re, unicodedata
import csv
import json
import os
class dataManager:
def __init__(self, contraction_mapping, domain_term_mapping, fields):
self.contraction_mapping = contraction_mapping
self.domain_maping = domain_term_mapping
self.fields = fields
def run_separator(self, input_dictionary_path, line_level_dataset_output_path, text_only_file_path):
with open(line_level_dataset_output_path, 'w') as line_level_dataset_output, open(text_only_file_path, 'w') as text_only_file_output:
csvWriter = csv.writer(line_level_dataset_output)
csvWriter.writerow(self.fields)
"""use spark to process each file"""
for filename in os.listdir(input_dictionary_path):
if filename[-2:] != 'jl': continue
conf = SparkConf() \
.setAppName("preprocessor") \
.set("spark.driver.memory", "32g")\
.set("spark.executor.memory", "16g")\
.set("spark.driver.host", "localhost")
sc = SparkContext(conf=conf)
inputData = sc.textFile(os.path.abspath(input_dictionary_path + '/' + filename))
"""break each post into discussion and replies into lines"""
line_level_data = inputData.map(lambda x: json.loads(x))\
.flatMap(lambda x: self.separate_discussion_and_reply((x['content_id'],(x['post'], x['reply'], x['group'], x['category']))))\
.collect()
"""write line in the list into a csv file"""
for each in line_level_data:
csvWriter.writerow([each[field] for field in self.fields])
text_only_file_output.write(each['text_processed'] + '\n')
sc.stop()
def preprocess_terms(self, line):
line = line.lower()
"""remove links"""
line = re.sub(r"http\S+", "url ", line)
line = re.sub(r"www.\S+", "url ", line)
line = re.sub(r"([a-z]+)\.+([a-z]+)", r'\1 . \2', line)
"""replace some domain contraction words with their complete words"""
for key, value in self.domain_maping.items():
line = re.sub(key, value, line)
"""replace some general contraction words with their complete words"""
word_list = []
for word in line.split():
new_word = word
if self.contraction_mapping.__contains__(new_word):
new_word = self.contraction_mapping[new_word]
word_list.append(new_word)
final_line = ' '.join(word_list).replace("'", "")
return final_line
def preprocess_punctuactions_and_links(self, line):
"""deal with punctuations"""
line = re.sub(r'\?+', "? ", line)
line = re.sub(r'\!+', "! ", line)
line = re.sub(r'\.+\s', " . ", line)
line = re.sub(r',[\w+\s]', " , ", line)
line = line.replace('"',' " ') \
.replace(':',' : ')\
.replace('\n', ' ')\
.replace('/', ' ')\
.replace('_', ' ')\
.replace("\u00a0", " ")\
.replace("\u00a0", "")\
.replace("\u2019", "'")\
.replace("\u2018", "'")
"""fill period if there isn't one"""
if "." not in line[-3:] and "?" not in line[-3:] and "!" not in line[-3:]:
line = line + ' .'
return line
def normalize(self, word_pos):
"""
input: tuple(word, pos)
output: string
"""
if len(word_pos) != 2: return ""
"""Remove non-ASCII characters from list of tokenized words"""
new_word = unicodedata.normalize('NFKD', word_pos[0]).encode('ascii', 'ignore').decode('utf-8', 'ignore')
new_word = re.sub(r'[^\w\s\.\!\?\:\,\-\(\)}]', '', new_word)
# lemmatize plural nouns only
if word_pos[1] == 'NNP' or word_pos[1] == 'NNS':
lemmatizer = WordNetLemmatizer()
new_word = lemmatizer.lemmatize(new_word, pos='n')
return new_word
def separate_discussion_and_reply(self, entry):
result = []
content_id = entry[0]
category = entry[1][3]
group = entry[1][2]
# Discussion part
sent_count = 0
discussion_dict = entry[1][0]
cleaned_raw_discussion = self.preprocess_punctuactions_and_links(discussion_dict['text'])
for sent in sent_tokenize(cleaned_raw_discussion):
sent_count += 1
discussion_sent_entry = {}
discussion_sent_entry["content_id"] = group + '_' + content_id + "-0-" + str(sent_count)
discussion_sent_entry["post_type"] = "discussion"
discussion_sent_entry["group"] = group
discussion_sent_entry["category"] = category
discussion_sent_entry["poster"] = discussion_dict["poster"]
discussion_sent_entry["timestamp"] = discussion_dict["timestamp"]
discussion_sent_entry["text"] = sent
discussion_sent_entry["text_processed"] = " ".join([self.normalize(word) for word in pos_tag(word_tokenize(self.preprocess_terms(sent)))])
result.append(discussion_sent_entry)
# Reply part according to the discussion post above
reply_count = 0
for reply in entry[1][1].values():
reply_count += 1
sent_count = 0
cleaned_raw_reply = self.preprocess_punctuactions_and_links(reply['text'])
for sent in sent_tokenize(cleaned_raw_reply):
sent_count += 1
reply_sent_entry = {}
reply_sent_entry["content_id"] = group + '_' + content_id + "-" + str(reply_count) + '-' + str(sent_count)
reply_sent_entry["post_type"] = "reply"
reply_sent_entry["group"] = group
reply_sent_entry["category"] = category
reply_sent_entry["poster"] = reply["poster"]
reply_sent_entry["timestamp"] = reply["timestamp"]
reply_sent_entry["text"] = sent
reply_sent_entry["text_processed"] = " ".join([self.normalize(word) for word in pos_tag(word_tokenize(self.preprocess_terms(sent)))])
result.append(reply_sent_entry)
return result
``` |
{
"source": "jianhong/16S_pipeline",
"score": 2
} |
#### File: dumpsoftwareversions/templates/dumpsoftwareversions.py
```python
import yaml
import platform
from textwrap import dedent
def _make_versions_html(versions):
html = [
dedent(
"""\\
<style>
#nf-core-versions tbody:nth-child(even) {
background-color: #f2f2f2;
}
</style>
<table class="table" style="width:100%" id="nf-core-versions">
<thead>
<tr>
<th> Process Name </th>
<th> Software </th>
<th> Version </th>
</tr>
</thead>
"""
)
]
for process, tmp_versions in sorted(versions.items()):
html.append("<tbody>")
for i, (tool, version) in enumerate(sorted(tmp_versions.items())):
html.append(
dedent(
f"""\\
<tr>
<td><samp>{process if (i == 0) else ''}</samp></td>
<td><samp>{tool}</samp></td>
<td><samp>{version}</samp></td>
</tr>
"""
)
)
html.append("</tbody>")
html.append("</table>")
return "\\n".join(html)
versions_this_module = {}
versions_this_module["${task.process}"] = {
"python": platform.python_version(),
"yaml": yaml.__version__,
}
with open("$versions") as f:
versions_by_process = yaml.load(f, Loader=yaml.BaseLoader)
versions_by_process.update(versions_this_module)
# aggregate versions by the module name (derived from fully-qualified process name)
versions_by_module = {}
for process, process_versions in versions_by_process.items():
module = process.split(":")[-1]
try:
assert versions_by_module[module] == process_versions, (
"We assume that software versions are the same between all modules. "
"If you see this error-message it means you discovered an edge-case "
"and should open an issue in nf-core/tools. "
)
except KeyError:
versions_by_module[module] = process_versions
versions_by_module["Workflow"] = {
"Nextflow": "$workflow.nextflow.version",
"$workflow.manifest.name": "$workflow.manifest.version",
}
versions_mqc = {
"id": "software_versions",
"section_name": "${workflow.manifest.name} Software Versions",
"section_href": "https://github.com/${workflow.manifest.name}",
"plot_type": "html",
"description": "are collected at run time from the software output.",
"data": _make_versions_html(versions_by_module),
}
with open("software_versions.yml", "w") as f:
yaml.dump(versions_by_module, f, default_flow_style=False)
with open("software_versions_mqc.yml", "w") as f:
yaml.dump(versions_mqc, f, default_flow_style=False)
with open("versions.yml", "w") as f:
yaml.dump(versions_this_module, f, default_flow_style=False)
``` |
{
"source": "jianhong/chipseq",
"score": 2
} |
#### File: process/ucsc_track/create_trackhub.py
```python
import os
import sys
import glob
import errno
import argparse
import trackhub
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
def parse_args(args=None):
Description = 'Create UCSC trackhub file from a list of files and associated colours - ".bed", ".narrowPeak", ".broadPeak", ".bw", ".bigwig" files currently supported.'
Epilog = """Example usage: python create_trackhub.py <OUTPUT_FOLDER> <LIST_FILE> <GENOME>"""
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
## REQUIRED PARAMETERS
argParser.add_argument('LIST_FILE', help="Tab-delimited file containing two columns i.e. samplename\tsignalfile. Header isnt required.")
argParser.add_argument('GENOME', help="Full path to genome fasta file or shorthand for genome available in UCSC e.g. hg19.")
argParser.add_argument('CHROM_SIZE', help="Full path to chrom size")
argParser.add_argument('EMAIL', help="email address")
argParser.add_argument('DESIGN_FILE', help="design file")
return argParser.parse_args(args)
############################################
############################################
## HELPER FUNCTIONS
############################################
############################################
def makedir(path):
if not len(path) == 0:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
############################################
############################################
## MAIN FUNCTION
############################################
############################################
tracktypes = ['bigWig', 'bam', 'bigBed', 'vcfTabix', 'bigNarrowPeak',
'bigBarChart', 'bigChain', 'bigGenePred', 'bigBroadPeak',
'bigMaf', 'bigPsl', 'halSnake']
TrackType = {'bw':'bigWig', 'bb':'bigBed', 'bed':'bigBed',
'narrowpeak':'bigBed 6+4', 'broadpeak':'bigBed 6+3'}
Visibility = {'bw':'full', 'bb':'dense', 'bed':'dense',
'narrowpeak':'dense', 'broadpeak':'dense'}
bigNarrowPeak=open("narrowPeak.as", "w")
bigNarrowPeak.write('''table bigNarrowPeak
"BED6+4 Peaks of signal enrichment based on pooled, normalized (interpreted) data."
(
string chrom; "Reference sequence chromosome or scaffold"
uint chromStart; "Start position in chromosome"
uint chromEnd; "End position in chromosome"
string name; "Name given to a region (preferably unique). Use . if no name is assigned"
uint score; "Indicates how dark the peak will be displayed in the browser (0-1000) "
char[1] strand; "+ or - or . for unknown"
float signalValue; "Measurement of average enrichment for the region"
float pValue; "Statistical significance of signal value (-log10). Set to -1 if not used."
float qValue; "Statistical significance with multiple-test correction applied (FDR -log10). Set to -1 if not used."
int peak; "Point-source called for this peak; 0-based offset from chromStart. Set to -1 if no point-source called."
)
''')
bigNarrowPeak.close()
bigBroadPeak=open("broadPeak.as", "w")
bigBroadPeak.write('''table bigBroadPeak
"BED6+4 Peaks of signal enrichment based on pooled, normalized (interpreted) data."
(
string chrom; "Reference sequence chromosome or scaffold"
uint chromStart; "Start position in chromosome"
uint chromEnd; "End position in chromosome"
string name; "Name given to a region (preferably unique). Use . if no name is assigned"
uint score; "Indicates how dark the peak will be displayed in the browser (0-1000) "
char[1] strand; "+ or - or . for unknown"
float signalValue; "Measurement of average enrichment for the region"
float pValue; "Statistical significance of signal value (-log10). Set to -1 if not used."
float qValue; "Statistical significance with multiple-test correction applied (FDR -log10). Set to -1 if not used."
)
''')
bigBroadPeak.close()
for tt in tracktypes:
TrackType[tt.lower()] = tt
if tt in ['bam', 'bigBed', 'vcfTabix', 'bigNarrowPeak',
'bigBarChart', 'bigChain', 'bigGenePred', 'bigBroadPeak',
'bigMaf', 'bigPsl', 'halSnake']:
Visibility[tt.lower()] = 'dense'
else:
Visibility[tt.lower()] = 'full'
def create_trackhub(OutFolder,ListFile,Genome,ChrSize, EMAIL,DesignFile):
ERROR_STR = 'ERROR: Please check design file'
HEADER = ['group', 'replicate', 'fastq_1', 'fastq_2']
makedir(OutFolder)
dIn = open(DesignFile, 'r')
header = dIn.readline().strip().split(',')
if header[:4] != HEADER:
print("{} header: {} != {}".format(ERROR_STR,','.join(header),','.join(HEADER)))
sys.exit(1)
paramColn = {}
for i in range(len(header)):
if header[i][:6]=="track_": # header start with track_
paramColn[header[i][6:]]=i
sampleDesignDict = {}
designDict = {}
if paramColn:
while True:
line = dIn.readline()
if line:
lspl = [x.strip() for x in line.strip().split(',')]
lspl[0] = [lspl[0]+Postfix[1], lspl[0]+'_R'+lspl[1]]
lspl[0] = [trackhub.helpers.sanitize(lspl[0][0].replace(".", "_"), strict=False),trackhub.helpers.sanitize(lspl[0][1].replace(".", "_"), strict=False)]
sampleDesignDict[lspl[0][0]] = {}
sampleDesignDict[lspl[0][1]] = {}
for k in paramColn.keys():
sampleDesignDict[lspl[0][0]][k]=lspl[paramColn[k]]
sampleDesignDict[lspl[0][1]][k]=lspl[paramColn[k]]
if k in designDict:
designDict[k][lspl[paramColn[k]]] = lspl[paramColn[k]]
else:
designDict[k] = {lspl[paramColn[k]]:lspl[paramColn[k]]}
else:
break
dIn.close()
fileList = []
fin = open(ListFile,'r')
while True:
line = fin.readline()
if line:
ifile = [x.strip() for x in line.strip().split('\t')]
colour = ""
if sampleDesignDict:
kfile = trackhub.helpers.sanitize(ifile[0].replace(".", "_"), strict=False)
if kfile in sampleDesignDict:
if "color" in sampleDesignDict[kfile]:
h = sampleDesignDict[kfile]["color"].lstrip('#')
colour = ','.join(str(x) for x in tuple(int(h[i:i+2], 16) for i in (0, 2, 4)))
if len(colour.strip()) == 0:
colour = '0,0,178'
fileList.append((ifile[1],colour,ifile[0]))
else:
break
fin.close()
fileList = sorted(fileList, key=lambda x: x[2])
# Initialize the components of a track hub, already connected together
hub, genomes_file, genome, trackdb = trackhub.default_hub(
hub_name="RNISRS_hub",
short_label='Regeneromics Shared Resource hub',
long_label='Regeneration Next Initiative Regeneromics Shared Resource hub',
genome=Genome,
email=EMAIL)
# create compositeTracks
if sampleDesignDict:
composite = trackhub.CompositeTrack(
name = 'composite',
short_label='singlal'
)
# Add those subgroups to the composite track
subgroups = []
for k in designDict.keys():
if k!='color':
subg = trackhub.SubGroupDefinition(
name=k,
label=k,
mapping=designDict[k]
)
subgroups.append(subg)
composite.add_subgroups(subgroups)
# Add the composite track to the trackDb
trackdb.add_tracks(composite)
signal_view = trackhub.ViewTrack(
name='signalviewtrack',
view='signal',
short_label='Signal')
composite.add_view(signal_view)
regions_view = trackhub.ViewTrack(
name='regionsviewtrack',
view='regions',
short_label='Regions')
composite.add_view(regions_view)
for ifile,color,id in fileList:
extension = os.path.splitext(ifile)[1].replace(".", "").lower()
filename = trackhub.helpers.sanitize(os.path.splitext(os.path.basename(ifile))[0].replace(".", "_"), strict=False)
if extension in ['bed','broadpeak','narrowpeak']:
# convert bed to bigbed
# sort bed file
cmd = "sort -k1,1 -k2,2n " + ifile +" >" + ifile + "_sorted.bed"
os.system(cmd)
# bedToBigBed
os.system("awk '$1 != \"track\" {$5=($5>1000)?1000:$5; print ;}' "+ifile+"_sorted.bed > "+ifile+"_srt.bed")
if extension == "bed":
cmd = "bedToBigBed "+ifile+"_srt.bed"+" "+ChrSize+" "+ifile+".bb"
extension = "bb"
if extension == "broadpeak":
cmd = "bedToBigBed -type=bed6+3 -as=broadPeak.as "+ifile+"_srt.bed"+" "+ChrSize+" "+ifile+".bb"
if extension == "narrowpeak":
cmd = "bedToBigBed -type=bed6+4 -as=narrowPeak.as "+ifile+"_srt.bed"+" "+ChrSize+" "+ifile+".bb"
os.system(cmd)
# change ifile to new bigbed file
ifile = ifile+".bb"
if extension in TrackType.keys():
if sampleDesignDict:
track = trackhub.Track(
name=filename,
source=filename,
short_label=id,
long_label=filename,
color=color,
visibility=Visibility[extension],
tracktype=TrackType[extension],
subgroups=sampleDesignDict[filename],
autoScale='on')
signal_view.add_tracks(track)
else:
track = trackhub.Track(
name=filename,
source=filename,
short_label=id,
long_label=filename,
color=color,
visibility=Visibility[extension],
tracktype=TrackType[extension],
autoScale='on')
trackdb.add_tracks(track)
linkname=os.path.join(OutFolder, Genome, filename+"."+TrackType[extension].split()[0])
makedir(os.path.join(OutFolder, Genome))
os.symlink("../../"+ifile, linkname)
else:
pass
hub.render(staging=OutFolder)
############################################
############################################
## RUN FUNCTION
############################################
############################################
def main(args=None):
args = parse_args(args)
create_trackhub(
OutFolder="trackhub",
ListFile=args.LIST_FILE,
Genome=args.GENOME,
ChrSize=args.CHROM_SIZE,
EMAIL=args.EMAIL,
DesignFile=args.DESIGN_FILE)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jianhong/hicar_tools",
"score": 3
} |
#### File: hicar_tools/bin/generate_site_positions_yq.py
```python
import sys
import argparse
from Bio import SeqIO
import re
def find_site(fasta,seq,outfile):
f = open(outfile, 'w')
for seq_record in SeqIO.parse(fasta, "fasta"):
sys.stderr.write("processing "+seq_record.id+"\n")
site = [m.start() + 1 for m in re.finditer(seq.lower(), str(seq_record.seq).lower())]
f.write("%s " % seq_record.id.replace("chr",""))
for i in site:
f.write("%s " % i)
f.write("%d\n" % len(str(seq_record.seq)))
f.close()
def main():
parser = argparse.ArgumentParser(description='')
parser = argparse.ArgumentParser(description='Given a fasta file and a restricted recognition site, generate genomic features')
parser.add_argument("-f", "--fasta", dest="fasta",required=True,help="input fasta file")
parser.add_argument("-s", "--seq", dest="seq",required=True,help="RE cut sequence")
parser.add_argument("-o", "--out", dest="outfile",required=True,help="Output file")
args = parser.parse_args()
find_site(args.fasta,args.seq,args.outfile)
if __name__ == "__main__":
sys.exit(main())
```
#### File: hicar_tools/bin/MAPS.py
```python
from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import sys
import itertools
import re
def get_segment_range(bin_start, bin_end):
return range(int(bin_start), int(bin_end)+1)
def get_1D_peak(segment_range, MACS2_peak_ranges_list):
for segment in segment_range:
if segment in MACS2_peak_ranges_list:
return True
return False
def validate_input_data(input_data):
params = {}
if 'OUT_DIR' in input_data:
params['OUT_DIR'] = input_data['OUT_DIR'][1]
if 'BINNING_RANGE' in input_data:
params['BINNING_RANGE'] = input_data['BINNING_RANGE'].astype('int')[1]
if 'BIN_SIZE' in input_data:
params['BIN_SIZE'] = input_data['BIN_SIZE'].astype('int')[1]
if 'DATASET_NAME' in input_data:
params['DATASET_NAME'] = input_data['DATASET_NAME'][1]
if 'MACS2_PATH' in input_data:
params['MACS2_PATH'] = input_data['MACS2_PATH'][1]
if 'GF_PATH' in input_data:
params['GF_PATH'] = input_data['GF_PATH'][1]
if 'LONG_PATH' in input_data:
params['LONG_PATH'] = input_data['LONG_PATH'][1]
if 'LONG_FORMAT' in input_data:
params['LONG_FORMAT'] = input_data['LONG_FORMAT'][1]
if 'SHORT_PATH' in input_data:
params['SHORT_PATH'] = input_data['SHORT_PATH'][1]
if 'SHORT_FORMAT' in input_data:
params['SHORT_FORMAT'] = input_data['SHORT_FORMAT'][1]
if 'N_CHROMS' in input_data:
params['N_CHROMS'] = input_data['N_CHROMS'].astype('int')[1]
if 'SEX_CHROMS' in input_data:
params['SEX_CHROMS'] = input_data['SEX_CHROMS'][1]
else:
params['SEX_CHROMS'] = ''
return params
def load_MACS2(MACS2_PATH):
try:
MACS2_full = pd.read_csv(MACS2_PATH, sep='\t', skip_blank_lines=True,comment='#',header=None, usecols=[0,1,2])
MACS2_full.columns = ['chr','start','end']
MACS2_full = MACS2_full.astype({'chr':str})
except pd.errors.EmptyDataError:
MACS2_full = pd.DataFrame(columns=['chr','start','end'])
return(MACS2_full)
def load_metadata(GF_PATH, BIN_SIZE):
try:
metadata_full = pd.read_csv(GF_PATH, sep='\t',header=None, low_memory=False)
metadata_full.columns = ['chr','start','end','effective_length','gc','mappability']
metadata_full = metadata_full.astype({'chr':str})
metadata_full['bin1_mid'] = metadata_full['start']//BIN_SIZE
metadata_full['bin2_mid'] = metadata_full['bin1_mid']
metadata_full['bin'] = metadata_full['bin1_mid']
except pd.errors.EmptyDataError:
metadata_full = pd.DataFrame(columns=['chr','start','end','effective_length','gc','mappability','bin1_mid','bin2_mid','bin'])
return(metadata_full)
def parse_fname(chrom, type, params):
if type == 'short':
fname = params['SHORT_PATH']+params['SHORT_FORMAT']
else:
fname = params['LONG_PATH']+params['LONG_FORMAT']
for p in params:
pn = '[' + p + ']'
if pn in fname:
fname = fname.replace(pn, params[p])
if '[CHROMOSOME]' in fname:
fname = fname.replace('[CHROMOSOME]', chrom)
else:
if type == 'short':
print('File format needs to contain [CHROMOSOME] tag:', params['SHORT_FORMAT'])
else:
print('File format needs to contain [CHROMOSOME] tag:', params['LONG_FORMAT'])
exit()
return fname
def get_chrom_from_MACS2(MACS2_full):
chr = []
if MACS2_full.shape[0]:
chr = MACS2_full['chr'].unique().tolist()
## remove XY, MT, ...
p = re.compile('^(chr)?((\d{1,2})|(IX|IV|V?I{0,3}))$')
chr = [s for s in chr if p.match(s)]
return chr
def init(p):
## checking that all files are available
print('loading parameters file')
input_data = pd.read_csv(p.run_file,sep='=',skip_blank_lines=True, comment='#',index_col=0,header=None)
input_data = input_data.transpose()
params = validate_input_data(input_data)
print('loading MACS2 peaks')
MACS2_full = load_MACS2(params['MACS2_PATH'])
## setting up chromosomes, TODO: extract the chromosome from MACS2_full["chr"], make sure use the clean chromosomes
#chroms = ['chr' + str(i) for i in range(1,params['N_CHROMS']+1,1)]
chroms = get_chrom_from_MACS2(MACS2_full)
print(chroms)
if params['SEX_CHROMS'] == 'X':
chroms.append('chrX')
elif params['SEX_CHROMS'] == 'Y':
chroms.append('chrY')
elif params['SEX_CHROMS'] == 'XY':
chroms.append('chrX')
chroms.append('chrY')
print(chroms)
params['BIN_RANGE'] = float(params['BINNING_RANGE'])/float(params['BIN_SIZE'])
print('loading metadata file')
metadata_full = load_metadata(params['GF_PATH'], params['BIN_SIZE'])
qc_str = '' ## content of qc.maps file
for CHR in chroms:
print('doing chromosome ',CHR,'\n')
#handling MACS2 peaks
print('-- handling MACS2 peaks')
MACS2 = MACS2_full[MACS2_full['chr'] == CHR].copy()
if not MACS2.shape[0]:
CHR = CHR.replace("chr", "")
MACS2 = MACS2_full[MACS2_full['chr'] == CHR].copy()
if not MACS2.shape[0]:
continue
MACS2['start_bin'] = np.floor(MACS2['start']/params['BIN_SIZE']).fillna(0)
MACS2['end_bin'] = np.ceil(MACS2['end']/params['BIN_SIZE']).fillna(0)
#perform this hack becasue apply returns wrong data type in some rare case
specialCase = False
if MACS2.iloc[0]['end_bin'] - MACS2.iloc[0]['start_bin'] == MACS2.shape[1] - 1:
MACS2.iloc[0,MACS2.columns.get_loc('start_bin')] = MACS2.iloc[0]['start_bin'] - 1
specialCase = True
MACS2_peak_ranges = MACS2.apply(lambda row: range(int(row['start_bin']),int(row['end_bin'])), axis=1).values.tolist()
MACS2_peak_ranges_list = set(itertools.chain.from_iterable(MACS2_peak_ranges))
if specialCase:
MACS2_peak_ranges_list.remove(MACS2.iloc[0]['start_bin'])
print('-- handling short.bed\n')
ps_short = pd.read_csv(parse_fname(CHR, 'short', params),header=None,sep='\t')
if ps_short.shape[0]:
new_cols = ['chr','start','end','name']
ps_short.rename(columns=dict(zip(ps_short.columns[0:], new_cols)),inplace=True)
ps_short = ps_short.astype({'chr':str})
ps_short['bin'] = ps_short[['start','end']].mean(axis=1)//params['BIN_SIZE']
ps_short['short_count'] = 1
count_data_short = ps_short[['chr','bin','short_count']].groupby(['chr','bin']).count()
count_data_short.reset_index(inplace=True)
print('-- handling long.bedpe\n')
##### getting overlap
## load long.bed file
ps_long = pd.read_csv(parse_fname(CHR, 'long', params),header=None,sep='\t')
long_cols = ['chr1','start1','end1','chr2','start2','end2', 'count']
ps_long.rename(columns=dict(zip(ps_long.columns[0:], long_cols)),inplace=True)
ps_long = ps_long.astype({'chr1':str, 'chr2':str})
## filter only reads at the same chromosome and proper orientation
ps_long = ps_long[(ps_long['chr1'] == CHR) & (ps_long['chr2'] == CHR) ]
if ps_long.shape[0]:
ps_long['read1_bin_mid'] = ((ps_long['start1'] + ps_long['end1']) / 2.0)//params['BIN_SIZE']
ps_long['read2_bin_mid'] = ((ps_long['start2'] + ps_long['end2']) / 2.0)//params['BIN_SIZE']
ps_long['bin1_mid'] = ps_long.loc[:,['read1_bin_mid','read2_bin_mid']].min(axis=1)
ps_long['bin2_mid'] = ps_long.loc[:,['read1_bin_mid','read2_bin_mid']].max(axis=1)
#ps_long['count'] = 1
#count_data = ps_long[['bin1_mid', 'bin2_mid','count']].groupby(['bin1_mid','bin2_mid']).count()
count_data = ps_long[['bin1_mid', 'bin2_mid', 'count']]
count_data.reset_index(inplace=True)
count_data_and = count_data[(count_data['bin1_mid'].isin(MACS2_peak_ranges_list)) & (count_data['bin2_mid'].isin(MACS2_peak_ranges_list))].copy()
count_data_and = count_data_and[(np.abs(count_data_and['bin1_mid'] - count_data_and['bin2_mid']) <= params['BIN_RANGE'])
& (np.abs(count_data_and['bin1_mid'] - count_data_and['bin2_mid']) >= 1)]
count_data_and['1D_peak_bin1'] = 1
count_data_and['1D_peak_bin2'] = 1
count_data_xor = count_data[(count_data.bin1_mid.isin(MACS2_peak_ranges_list)) ^ (count_data.bin2_mid.isin(MACS2_peak_ranges_list))]
count_data_xor = count_data_xor[(np.abs(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid']) <= params['BIN_RANGE'])
& (np.abs(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid']) >= 1)]
count_data_xor_bin1 = count_data_xor[(count_data_xor.bin1_mid.isin(MACS2_peak_ranges_list))].copy()
count_data_xor_bin1['1D_peak_bin1'] = 1
count_data_xor_bin1['1D_peak_bin2'] = 0
count_data_xor_bin2 = count_data_xor[(count_data_xor.bin2_mid.isin(MACS2_peak_ranges_list))].copy()
count_data_xor_bin2['1D_peak_bin1'] = 0
count_data_xor_bin2['1D_peak_bin2'] = 1
count_data_xor = pd.concat([count_data_xor_bin1, count_data_xor_bin2],ignore_index=True)
print('-- calculating values for maps.qc file\n')
AND_sum = count_data_and['count'].sum()
XOR_sum = count_data_xor['count'].sum()
NOT_sum = count_data['count'].sum() - AND_sum - XOR_sum
qc_str = qc_str +\
'AND_set\t' + str(AND_sum) + '\tnumber of pairs in AND set at chromsome ' + CHR + '\n' +\
'XOR_set\t' + str(XOR_sum) + '\tnumber of pairs in XOR set at chromsome ' + CHR + '\n' +\
'NOT_set\t' + str(NOT_sum) + '\tnumber of pairs in NOT set at chromsome ' + CHR + '\n'
print('-- handling metadata\n')
metadata = metadata_full[metadata_full['chr'] == CHR].copy()
metadata = pd.merge(metadata, count_data_short, on = ['bin','chr'], how='outer')
metadata['short_count'] = metadata['short_count'].fillna(0)
print('-- attaching genome features atributes to AND set')
reg_and = pd.merge(count_data_and, metadata[['bin1_mid','effective_length','gc','mappability','short_count']],
on='bin1_mid')
reg_and.rename(columns={'effective_length':'effective_length1','gc':'gc1','mappability':'mappability1',
'short_count':'short_count1'},inplace=True)
reg_and = pd.merge(reg_and, metadata[['bin2_mid','effective_length','gc','mappability','short_count']],
on='bin2_mid')
reg_and.rename(columns={'effective_length':'effective_length2','gc':'gc2','mappability':'mappability2',
'short_count':'short_count2'},inplace=True)
reg_and = reg_and[(reg_and['effective_length1'] > 0) & (reg_and['effective_length2'] > 0)]
reg_and['dist'] = pd.to_numeric(np.abs(reg_and['bin1_mid'] - reg_and['bin2_mid']))
reg_and['logl'] = np.log((reg_and['effective_length1'] + 1.0) * (reg_and['effective_length2'] + 1.0) / (params['BIN_SIZE'] * params['BIN_SIZE']))
reg_and['loggc'] = np.log(reg_and['gc1'] * reg_and['gc2'])
reg_and['logm'] = np.log(reg_and['mappability1'] * reg_and['mappability2'])
reg_and['logdist'] = np.log((1.0 + reg_and['dist']) / params['BIN_RANGE'])
max_short_and = (reg_and['short_count1'].max() + 1.0) * (reg_and['short_count2'].max() + 1.0)
reg_and['logShortCount'] = np.log(
(reg_and['short_count1'] + 1.0) * (reg_and['short_count2'] + 1.0) / max_short_and
)
reg_and['bin1_mid'] = reg_and['bin1_mid'] * params['BIN_SIZE']
reg_and['bin2_mid'] = reg_and['bin2_mid'] * params['BIN_SIZE']
print('-- attaching genome features atributes to XOR set')
reg_xor = pd.merge(count_data_xor, metadata[['bin1_mid','effective_length','gc','mappability','short_count']],
on='bin1_mid')
reg_xor.rename(columns={'effective_length':'effective_length1','gc':'gc1','mappability':'mappability1',
'short_count':'short_count1'},inplace=True)
reg_xor = pd.merge(reg_xor, metadata[['bin2_mid','effective_length','gc','mappability','short_count']],
on='bin2_mid')
reg_xor.rename(columns={'effective_length':'effective_length2','gc':'gc2','mappability':'mappability2',
'short_count':'short_count2'},inplace=True)
reg_xor = reg_xor[(reg_xor['effective_length1'] > 0) & (reg_xor['effective_length2'] > 0)]
reg_xor['dist'] = pd.to_numeric(np.abs(reg_xor['bin1_mid'] - reg_xor['bin2_mid']))
reg_xor['logl'] = np.log((reg_xor['effective_length1'] + 1.0) * (reg_xor['effective_length2'] + 1.0) / (params['BIN_SIZE'] * params['BIN_SIZE']))
reg_xor['loggc'] = np.log(reg_xor['gc1'] * reg_xor['gc2'])
reg_xor['logm'] = np.log(reg_xor['mappability1'] * reg_xor['mappability2'])
reg_xor['logdist'] = np.log((1.0 + reg_xor['dist']) / params['BIN_RANGE'])
max_short_xor = (reg_xor['short_count1'].max() + 1.0) * (reg_xor['short_count2'].max() + 1.0)
reg_xor['logShortCount'] = np.log(
(reg_xor['short_count1'] + 1.0) * (reg_xor['short_count2'] + 1.0) / max_short_xor
)
reg_xor['bin1_mid'] = reg_xor['bin1_mid'] * params['BIN_SIZE']
reg_xor['bin2_mid'] = reg_xor['bin2_mid'] * params['BIN_SIZE']
print ('--saving output\n')
fout_name = params['OUT_DIR'] + 'reg_raw.' + str(CHR) + '.' + params['DATASET_NAME'] + '.'+ str(int(params['BIN_SIZE']/1000)) + 'k.and'
reg_and.to_csv(fout_name, sep='\t')
fout_name = params['OUT_DIR'] + 'reg_raw.' + str(CHR) + '.' + params['DATASET_NAME'] + '.'+ str(int(params['BIN_SIZE']/1000)) + 'k.xor'
reg_xor.to_csv(fout_name, sep='\t')
else:
print('no bin pairs in long or short bedpe files for chromosome ',CHR,'. Doing next chromosome')
print('-- saving .qc.maps file\n')
qc_fname = params['OUT_DIR'] + params['DATASET_NAME'] + '.maps.qc'
qc_file = open(qc_fname,'w')
qc_file.write(qc_str)
qc_file.close()
def main():
parser = argparse.ArgumentParser()
parser.prog = 'PROG'
parser.description = "MAPS"
parser.epilog = "This is where the command-line utility's epilog goes."
parser.add_argument('run_file', help = 'file containing run parameters')
p = parser.parse_args(sys.argv[1:])
init(p)
if __name__ == "__main__":
main()
```
#### File: hicar_tools/bin/restriction_cut_multipleenzyme.py
```python
import sys
import argparse
from Bio import SeqIO
from Bio.SeqUtils import GC
import re
def find_site(fasta,seq,outfile,pos, binsize):
f = open(outfile, 'w')
mnase = False
if (len(seq.split(",")) > 1):
seqs = seq.split(",")
sizes = [len(seq) for seq in seqs]
poses = pos.split(",")
poses = [int(x) for x in poses]
else:
mnase = True if seq == "mnase" else False
seqs = [seq]
sizes = [len(seq)]
poses = [int(pos)] if not mnase else [0]
site = []
site_pos = []
site_size = []
for seq_record in SeqIO.parse(fasta, "fasta"):
#sys.stderr.write("processing "+ str(mnase)+"\n")
#sys.stderr.write("processing "+seq_record.id+"\n")
#sys.stderr.write("processing "+str(len(seq_record.seq))+" "+str(binsize)+"\n")
if not mnase:
sys.stderr.write("processing "+seq_record.id+"\n")
#print(seq_record.seq[3000181: (3000185 + 30)])
site = []
site_pos = []
site_size = []
#so [0-pos] and [len(str(seq_record.seq)) + 1 + pos - size] are added so we can work with them in the for loop, instead of [0-pos] do [0-pos for the first seq found in the seq_record] and instead of pos and size in the second one use pos and size specific to the last re found in the seq_record
for i in range(len(seqs)):
seq = seqs[i]
size = sizes[i]
pos = poses[i]
#print("searching for:" + seq.lower())
###this is what needs to change:
current_site = [m.start() + 1 for m in re.finditer(seq.lower(), str(seq_record.seq).lower())]
site = site + current_site
site_pos = site_pos + ([pos] * len(current_site))
site_size = site_size + ([size] * len(current_site))
##for each site record what is its size and pos and then zip and sort by site, you need the site and pos in the loop below
if (len(site) == 0):
continue
site, site_pos, site_size = (list(x) for x in zip(*sorted(zip(site, site_pos, site_size))))
#print(site[:10])
#break
first_pos = site_pos[0]
first_size = site_size[0]
last_pos = site_pos[-1]
last_size = site_size[-1]
site = [0 - first_pos] + site + [len(str(seq_record.seq)) + 1 + last_pos - last_size]
site_pos = [first_pos] + site_pos + [last_pos]
site_size = [first_size] + site_size + [last_size]
for i in range(1,len(site)-1):
count = (i -1) * 2 + 1
frag_start = site[i-1] + site_pos[i-1]
frag_end = site[i] + site_size[i] - site_pos[i] - 1
frag_len = frag_end - frag_start
frag_gc = GC(str(seq_record.seq)[max(frag_end-200,0):frag_end]) / 100
f.write("{num}\t{strand}\t{chr}\t{pos}\t{fraglen}\t{GC}\n".format(num=count,strand="-",chr=seq_record.id,pos=frag_end,fraglen=frag_len,GC=frag_gc))
frag_start = site[i] + site_pos[i]
frag_end = site[i+1] + site_size[i+1] - site_pos[i+1] - 1
frag_len = frag_end - frag_start
frag_gc = GC(str(seq_record.seq)[frag_start:frag_start+200]) / 100
f.write("{num}\t{strand}\t{chr}\t{pos}\t{fraglen}\t{GC}\n".format(num=count+1,strand="+",chr=seq_record.id,pos=frag_start,fraglen=frag_len,GC=frag_gc))
elif mnase:
for i, frag_start in enumerate(range(1, len(str(seq_record.seq)) + 1, binsize)):
count = i * 2 + 1
frag_end = min(frag_start + binsize - 1, len(str(seq_record.seq)))
frag_len = binsize - 1
frag_gc = GC(str(seq_record.seq)[frag_start:frag_end]) / 100
f.write("{num}\t{strand}\t{chr}\t{pos}\t{fraglen}\t{GC}\n".format(num=count,strand="-",chr=seq_record.id,pos=frag_end,fraglen=frag_len,GC=frag_gc))
f.write("{num}\t{strand}\t{chr}\t{pos}\t{fraglen}\t{GC}\n".format(num=count+1,strand="+",chr=seq_record.id,pos=frag_start,fraglen=frag_len,GC=frag_gc))
f.close()
def main():
parser = argparse.ArgumentParser(description='')
parser = argparse.ArgumentParser(description='Given a fasta file and a restricted recognition site, generate genomic features')
parser.add_argument("-f", "--fasta", dest="fasta",required=True,help="input fasta file")
parser.add_argument("-s", "--seq", dest="seq",required=True,help="RE cut sequence")
parser.add_argument("-o", "--out", dest="outfile",required=True,help="Output file")
parser.add_argument("-p", "--pos", dest="pos",required=True,help="RE cut position")
parser.add_argument("-b", "--binsize", dest = "binsize", required = False, help = "bin size for MNase-based", default = "5Kb")
args = parser.parse_args()
bin_size = args.binsize.replace('Kb','000')
bin_size = bin_size.replace('Mb','000000')
try:
bin_size = int(bin_size)
except ValueError:
sys.exit("Unknown bin size %s, please double check." % args.bin_size)
find_site(args.fasta,args.seq,args.outfile,args.pos, bin_size)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "JianHongHong/Car-Rental-Management-System",
"score": 3
} |
#### File: crcapp/controllers/customer.py
```python
from django.db import models
from crcapp.models import Employee,Customer,Store
from django.contrib.auth.hashers import make_password, check_password
from django.core.exceptions import ValidationError
from django.contrib.sessions.models import Session
from django.utils import timezone
# Functions related to Customer
class CustomerController:
# Developer: <NAME> and with fixes and debugging done by Aidan
# Create a new customer
def create(request):
# Grab the customer with the largest customerID from the database
custObj = Customer.objects.raw("SELECT customerID FROM `crcapp_customer` ORDER BY customerID DESC LIMIT 1")[0]
custID = custObj.customerID#grab the customerID
custID = custID[1:]
custID = int(custID)+1#increment by one
custID = str(custID).zfill(7)
try:
# Get all the form data and put into relted variables. Also prepare the other variables
customerID_ = "C" + custID
firstName_ = request.POST.get("firstName")
lastName_ = request.POST.get("lastName")
streetAddress_ = request.POST.get("streetAddress")
cityAddress_ = request.POST.get("cityAddress")
postCodeAddress_ = request.POST.get("postCodeAddress")
stateAddress_ = request.POST.get("stateAddress")
DOB_ = request.POST.get("DOB")
driverLicenseNumber_ = request.POST.get("driverLicenseNumber")
gender_ = request.POST.get("gender")
occupation_ = request.POST.get("occupation")
phoneNumber_ = request.POST.get("phoneNumber")
email_ = request.POST.get("email")
userName_ = request.POST.get("userName")
password_ = make_password(request.POST.get('password', ''))
dateJoined_ = timezone.now()
lastLogin_ = timezone.now()
# Create a new customer
newCustomer = Customer(
customerID = customerID_,
firstName = firstName_,
lastName = lastName_,
streetAddress = streetAddress_,
cityAddress = cityAddress_,
postCodeAddress = postCodeAddress_,
stateAddress = stateAddress_,
DOB = DOB_,
driverLicenceNumber = driverLicenseNumber_,
gender = gender_,
occupation = occupation_,
phoneNumber = phoneNumber_,
email = email_,
userName = userName_,
password = password_,
dateJoined = dateJoined_,
lastLogin = lastLogin_,
disable = 0)
# Check that the new customer data is correct and valid
vali = newCustomer.full_clean()
if vali:#if errors occurred return those
return vali
else:#else the data must be valid and can therefore be saved to the database
newCustomer.save()
return True
# If there is an error validating the data then return the error
except ValidationError as e:
return e
# Developer Tom, Sam and with fixes and debugging done by Aidan
def modify(request):
try:
# Get all the form data and put into relted variables. Also prepare the other variables
customerID_ = request.POST.get("customerID")#supplied
firstName_ = request.POST.get("firstName")#required
lastName_ = request.POST.get("lastName")#required
streetAddress_ = request.POST.get("streetAddress")#required
cityAddress_ = request.POST.get("cityAddress")
postCodeAddress_ = request.POST.get("postCodeAddress")
stateAddress_ = request.POST.get("stateAddress")
DOB_ = request.POST.get("DOB")#required
driverLicenceNumber_ = request.POST.get("driverLicenseNumber")
gender_ = request.POST.get("gender")#required
occupation_ = request.POST.get("occupation")#required
phoneNumber_ = request.POST.get("phoneNumber")#required
email_ = request.POST.get("email")
userName = request.POST.get("userName")
password = request.POST.get("password")
# Get the exisitng customer from the database
existingCustomer = Customer.objects.get(customerID__exact=customerID_)
# Update the details of the customer
existingCustomer.firstName = firstName_
existingCustomer.lastName = lastName_
existingCustomer.streetAddress = streetAddress_
# For each non-required integer field check if it contains a value and if so update accordingly
if (postCodeAddress_ != ""):
existingCustomer.postCodeAddress = postCodeAddress_
else:
existingCustomer.postCodeAddress = None
existingCustomer.DOB = DOB_
if (driverLicenceNumber_ != ""):
existingCustomer.driverLicenceNumber = driverLicenceNumber_
else:
existingCustomer.driverLicenceNumber = None
existingCustomer.gender = gender_
existingCustomer.occupation = occupation_
existingCustomer.phoneNumber = phoneNumber_
# Check that the new customer details are correct and valid
vali = existingCustomer.full_clean()
if vali:#if errors occurred return those
return vali
else:#else the data must be valid and can therefore be saved to the database
existingCustomer.save()
return True
# If there is an error validating the data then return the error
except ValidationError as e:
return e.message_dict
# Developer : Sam
# Deletes Customer based on given ID
def delete(ID):
x = Customer.objects.get(customerID = ID)
x.delete()
# Developer: Sam
# Has a purpose that is only the Developer who wrote it remembers
def search(arg):
if(arg == "all"):
for each in Customer.objects.all():
return(
each.customerID,
each.firstName,
each.lastName,
each.streetAddress,
each.cityAddress,
each.postCodeAddress,
each.stateAddress,
each.DOB,
each.driverLicenceNumber,
each.gender,
each.occupation,
each.phoneNumber,
each.email,
each.userName,
each.dateJoined,
each.lastLogin)
if(arg!= "all"):
customerID_min = request.POST.get("customerID_min")
customerID_max = request.POST.get("customerID_max")
firstName = request.POST.get("firstName")
lastName = request.POST.get("lastName")
streetAdress = request.POST.get("streetAdress")
cityAddress = request.POST.get("cityAddress")
postCodeAddress = request.POST.get("postCodeAddress")
stateAddress = request.POST.get("stateAddress")
DOB_min = request.POST.get("DOB_min")
DOB_max = request.POST.get("DOB_max")
driverLicenceNumber = request.POST.get("driverLicenceNumber")
gender = request.POST.get("gender")
occupation = request.POST.get("occupation")
phoneNumber = request.POST.get("phoneNumber")
email = request.POST.get("email")
userName = request.POST.get("userName")
dateJoined_min = request.POST.get("dateJoined_min")
dateJoined_max = request.POST.get("dateJoined_max")
lastLogin_min = request.POST.get("lastLogin_min")
lastLogin_max = request.POST.get("lastLogin_max")
condition = " "
if (customerID_min != ""):
condition = condition + "customerID >= \'" + customerID_min + "\' AND "
if (customerID_max != ""):
condition = condition + "customerID <= \'" + customerID_max + "\' AND "
if (firstName != ""):
condition = condition + "firstName LIKE \'%" + firstName + "%\' AND "
if (lastName != ""):
condition = condition + "lastName LIKE \'%" + lastName + "%\' AND "
if (streetAdress != ""):
condition = condition + "streetAdress LIKE \'%" + streetAdress + "%\' AND "
if (cityAddress != ""):
condition = condition + "cityAddress LIKE \'%" + cityAddress + "%\' AND "
if (postCodeAddress != ""):
condition = condition + "postCodeAddress = \'" + postCodeAddress + "\' AND "
if (stateAddress != ""):
condition = condition + "stateAddress = \'" + stateAddress + "\' AND "
if (DOB_min != ""):
condition = condition + "DOB >= \'" + DOB_min + "\' AND "
if (DOB_max != ""):
condition = condition + "DOB <= \'" + DOB_max + "\' AND "
if (driverLicenceNumber != ""):
condition = condition + "driverLicenceNumber = \'" + driverLicenceNumber + "\' AND "
if (gender != ""):
condition = condition + "gender = \'" + gender + "\' AND "
if (occupation != ""):
condition = condition + "occupation LIKE \'%" + occupation + "\' AND "
if (phoneNumber != ""):
condition = condition + "phoneNumber = \'" + phoneNumber + "\' AND "
if (email != ""):
condition = condition + "email = \'" + email + "\' AND "
if (userName != ""):
condition = condition + "userName = \'" + userName + "\' AND "
if (dateJoined_min != ""):
condition = condition + "dateJoined >= \'" + dateJoined_min + "\' AND "
if (dateJoined_max != ""):
condition = condition + "dateJoined <= \'" + dateJoined_max + "\' AND "
if (lastLogin_min != ""):
condition = condition + "lastLogin >= \'" + lastLogin_min + "\' AND "
if (lastLogin_max != ""):
condition = condition + "lastLogin <= \'" + lastLogin_max + "\' AND "
query = 'SELECT * FROM carrentaldb.crcapp_customer WHERE' + condition[:-5] +';'
for each in Customer.objects.raw(query):
return(
each.customerID,
each.firstName,
each.lastName,
each.streetAddress,
each.cityAddress,
each.postCodeAddress,
each.stateAddress,
each.DOB,
each.driverLicenceNumber,
each.gender,
each.occupation,
each.phoneNumber,
each.email,
each.userName,
each.dateJoined,
each.lastLogin)
# Developer : Aidan
# To change the password of a customer
def changePW(request):
customerID_ = request.POST.get("customerID")
password_ = make_password(request.POST.get('password', ''))
existingCustomer = Customer.objects.get(customerID=customerID_)
try:
existingCustomer.password = password_
vali = existingCustomer.full_clean()
if vali:
return vali
else:
existingCustomer.save()
return True
return False
except ValidationError as e:
return e.message_dict
``` |
{
"source": "jianhong/nf-core-hicar",
"score": 2
} |
#### File: nf-core-hicar/bin/pairsqc.py
```python
import pypairix
import math
import os
SEPARATOR = '|'
CIS_TRANS_OUT_FILE_SUFFIX = 'cis_to_trans.out'
PLOT_TABLE_OUT_FILE_SUFFIX = 'plot_table.out'
class ColIndices(object):
"""Column indices for position1, position2, strand1 and strand2, 0-based"""
def __init__(self, pos1, pos2, strand1, strand2):
self.pos1 = pos1
self.pos2 = pos2
self.strand1 = strand1
self.strand2 = strand2
## column indices per file type
cols_pairs = ColIndices(2, 4, 5, 6)
cols_merged_nodups = ColIndices(2, 6, 0, 4)
cols_old_merged_nodups = ColIndices(3, 7, 1, 5)
## orientation representation per file type
orientation_list_pairs = ['+-','-+','++','--']
orientation_list_merged_nodups = ['016','160','00','1616']
## common across input formats
orientation_names = ['Inner','Outer','Right','Left']
class GenomeSize(object):
def __init__(self, chromsize_file):
"""return a dictionary of chromosome : size pairs from a chromsize file."""
self.chrsize=dict()
self.total_len=0
with open(chromsize_file,'r') as f:
for line in f:
chrom, size = line.strip().split('\t')
self.chrsize[chrom] = int(size)
self.total_len += int(size)
self.nChr = len(self.chrsize)
class CisTransStat(object):
"""Summary statistics including cis-trans ratio"""
def __init__(self):
self.cis = 0
self.trans = 0
self.cis_short = 0
self.total = 0
def calculate_total(self):
self.total = self.cis + self.cis_short + self.trans
def calculate_cis_to_trans(self):
self.cis_to_trans = float(self.cis) / float(self.cis+self.trans) * 100
def calculate_percent_long_range_intra(self):
self.p_long_range_intra = float(self.cis) / float(self.total) * 100
def print_stat(self, fout):
fout.write("Total reads\t{:,}\n".format(self.total))
fout.write("Short cis reads (<20kb)\t{:,}\n".format(self.cis_short))
fout.write("Cis reads (>=20kb)\t{:,}\n".format(self.cis))
fout.write("Trans reads\t{:,}\n".format(self.trans))
fout.write("Cis/Trans ratio\t{:.3f}\n".format(self.cis_to_trans))
fout.write("% Long-range intrachromosomal reads\t{:.3f}\n".format(self.p_long_range_intra))
class SeparationStat(object):
"""Statistics to be calculated for each separation distance bin"""
def __init__(self, orientation_list, gs, pseudocount=1E-100):
"""gs: GenomeSize object"""
self.orientation_list = orientation_list
self.gs = gs
self.chr_list = list(gs.chrsize.keys())
self.chr_list.sort()
self.pseudocount = pseudocount
# per-orientation
self.count_per_ori = { a: 0 for a in orientation_list }
self.log10count_per_ori = { a: 0 for a in orientation_list }
self.pcount_per_ori = { a: 0 for a in orientation_list }
# per-chromosome
self.count_per_chr = { a: 0 for a in gs.chrsize.keys() }
self.allpossible_count_per_chr = { a: 0 for a in gs.chrsize.keys() }
self.prob_per_chr = { a: 0 for a in gs.chrsize.keys() }
self.log10prob_per_chr = { a: 0 for a in gs.chrsize.keys() }
# total
self.sumcount = 0
self.log10sumcount = 0
self.prob = 0
self.log10prob = 0
self.allpossible_sumcount = 0
def increment(self, orientation, chrom):
"""increment both count_per_ori and count_per_chr together, so that we don't count the read on a weird chromosome for orientation and vice versa"""
if orientation in self.orientation_list: # skip if not included in orientation list
if chrom in self.chr_list: # skip if not included in chr list
self.count_per_ori[orientation] += 1
self.count_per_chr[chrom] += 1
def calculate_sumcount(self):
self.sumcount = sum(self.count_per_ori.values())
assert self.sumcount == sum(self.count_per_chr.values())
def calculate_log10count_per_ori(self):
for orientation in self.orientation_list:
c = self.count_per_ori[orientation] + self.pseudocount
self.log10count_per_ori[orientation] = math.log10( c )
def calculate_log10sumcount(self):
sc = self.sumcount + self.pseudocount * 4
self.log10sumcount = math.log10( sc )
def calculate_pcount_per_ori(self):
sc = self.sumcount + self.pseudocount * 4
for orientation in self.orientation_list:
c = self.count_per_ori[orientation] + self.pseudocount
self.pcount_per_ori[orientation] = c / sc
def calculate_contact_probability_per_chr(self, s, bin_size):
"""Calculate contact probability for a given separation distance and bin size
s is the representative log10 separation distance.
"""
for chrom in self.chr_list:
self.allpossible_count_per_chr[chrom] = self.gs.chrsize[chrom] - 10**s - 1
if self.allpossible_count_per_chr[chrom] <= 0: # the chromosome is smaller than s
self.allpossible_count_per_chr[chrom] = 0
self.prob_per_chr[chrom] = 0
else:
self.prob_per_chr[chrom] = self.count_per_chr[chrom] / self.allpossible_count_per_chr[chrom] / bin_size
self.log10prob_per_chr[chrom] = math.log10(self.prob_per_chr[chrom] + self.pseudocount)
def calculate_contact_probability(self, s, bin_size):
"""Calculate contact probability for a given separation distance and bin size
s is the representative log10 separation distance.
"""
self.allpossible_sumcount = sum(self.allpossible_count_per_chr.values())
if self.allpossible_sumcount == 0:
self.prob = 0
else:
self.prob = self.sumcount / self.allpossible_sumcount / bin_size
self.log10prob = math.log10(self.prob + self.pseudocount)
def print_content(self, fout, bin_mid, bin_range_string):
print_str = "{:.3f}\t".format(bin_mid)
print_str += "{}\t".format(bin_range_string)
print_str += '\t'.join('{}'.format(self.count_per_ori[ori]) for ori in self.orientation_list )
print_str += "\t{}\t".format(self.sumcount)
print_str += '\t'.join('{:.3f}'.format(self.log10count_per_ori[ori]) for ori in self.orientation_list )
print_str += "\t{:.3f}\t".format(self.log10sumcount)
print_str += '\t'.join('{:.3f}'.format(self.pcount_per_ori[ori]) for ori in self.orientation_list )
print_str += "\t{:.3E}".format(self.allpossible_sumcount)
print_str += "\t{:.3E}".format(self.prob)
print_str += "\t{:.3f}\t".format(self.log10prob)
print_str += '\t'.join('{:.3E}'.format(self.count_per_chr[chr]) for chr in self.chr_list )
print_str += '\t'
print_str += '\t'.join('{:.3E}'.format(self.allpossible_count_per_chr[chr]) for chr in self.chr_list )
print_str += '\t'
print_str += '\t'.join('{:.3E}'.format(self.prob_per_chr[chr]) for chr in self.chr_list )
print_str += '\t'
print_str += '\t'.join('{:.3f}'.format(self.log10prob_per_chr[chr]) for chr in self.chr_list )
print_str += '\n'
fout.write(print_str)
def print_header(self, fout):
header_str = "distance\t" \
+ 'distance_range(bp)\t' \
+ '\t'.join('count.{}'.format(k) for k in orientation_names) \
+ '\tsum\t' \
+ '\t'.join('log10count.{}'.format(k) for k in orientation_names) \
+ '\tlog10sum\t' \
+ '\t'.join('proportion.{}'.format(k) for k in orientation_names) \
+ '\tallpossible_sumcount' \
+ '\tprob' \
+ '\tlog10prob\t' \
+ '\t'.join('count_per_chr.{}'.format(k) for k in self.chr_list) \
+ '\t' \
+ '\t'.join('allpossible_count_per_chr.{}'.format(k) for k in self.chr_list) \
+ '\t' \
+ '\t'.join('prob_per_chr.{}'.format(k) for k in self.chr_list) \
+ '\t' \
+ '\t'.join('log10prob_per_chr.{}'.format(k) for k in self.chr_list) \
+ '\n'
fout.write(header_str)
class DistanceBin(object):
"""class related to conversion between distance, log distance, distance bin number, bin size, etc"""
def __init__(self, min_logdistance, max_logdistance, log_binsize):
self.min_logdistance = min_logdistance
self.max_logdistance = max_logdistance
self.log_binsize = log_binsize
self.max_bin_number = int( ( max_logdistance - log_binsize / 2 ) / log_binsize )
self.range = range(0, self.max_bin_number+1)
def get_bin_size(self, bin_mid):
return(10**( bin_mid + self.log_binsize/2 ) - 10**( bin_mid - self.log_binsize/2 ))
def get_bin_mid(self, bin_number):
"""return midpoint of a bin at log scale"""
return(bin_number * self.log_binsize + self.log_binsize/2)
def get_bin_number(self, distance):
log_distance = math.log10(distance)
bin_number = int(log_distance / self.log_binsize)
return(bin_number)
def get_bin_range_string(self, bin_mid):
minval = int(round(10**(bin_mid - self.log_binsize/2)))
maxval = int(round(10**(bin_mid + self.log_binsize/2)))
return("{:,}~{:,}".format(minval, maxval))
def get_distance_and_orientation (line, cols):
"""return distance and orientation, given a list representing a line from the pairs input file and a ColIndices object """
distance = int(line[cols.pos2]) - int(line[cols.pos1])
# distance will always be > 0 for upper triangle, but in case it is not true.
if distance > 0:
orientation = str(line[cols.strand1]) + str(line[cols.strand2])
else:
orientation = str(line[cols.strand2]) + str(line[cols.strand1])
distance = abs(distance)
return(distance, orientation)
def cis_trans_ratio (pairs_file, outfilename, DIST_THRES=20000, cols= cols_pairs):
"""measure cis/trans ratio for a given pairs file"""
cts = CisTransStat()
tb=pypairix.open(pairs_file)
chrplist = tb.get_blocknames()
for chrp in chrplist:
it = tb.querys2D(chrp)
chr1, chr2 = chrp.split(SEPARATOR)
if chr1 == chr2:
for x in it:
distance = get_distance_and_orientation(x, cols)[0]
if distance >= DIST_THRES:
cts.cis += 1
else:
cts.cis_short += 1
else:
cts.trans += sum(1 for x in it)
cts.calculate_total()
cts.calculate_cis_to_trans()
cts.calculate_percent_long_range_intra()
# print stats
with open(outfilename,'w') as f:
cts.print_stat(f)
def distance_histogram (pairs_file, chromsize_file, outfilename, cols=cols_pairs, orientation_list = orientation_list_pairs, max_logdistance=8.4, min_logdistance=1, log_binsize=0.1):
"""create a log10-scale binned histogram table for read separation distance histogram
The histogram is stratefied by read orientation (4 different orientations)
The table includes raw counts, log10 counts (pseudocounts added), contact probability, log10 contact probability, and proportions for orientation (pseudocounts added)
Bin is represented by the mid value at the log10 scale.
log_binsize: distance bin size in log10 scale.
"""
gs = GenomeSize(chromsize_file)
bins = DistanceBin(min_logdistance, max_logdistance, log_binsize)
ss = []
for _ in bins.range:
ss.append(SeparationStat(orientation_list,gs))
tb=pypairix.open( pairs_file )
chrplist = tb.get_blocknames()
# calculate histogram
for chrp in chrplist:
chr1, chr2 = chrp.split( SEPARATOR )
if chr1 == chr2:
it = tb.querys2D( chrp )
for x in it:
distance, orientation = get_distance_and_orientation (x, cols)
if orientation not in orientation_list: # for some exceptional cases like '4' in merged_nodup
continue
# remove zero distance, count.
if distance > 0:
bin_number = bins.get_bin_number(distance)
if bin_number <= bins.max_bin_number:
ss[bin_number].increment(orientation, chr1)
# calculate total
for bin_number in bins.range:
ss[bin_number].calculate_sumcount()
# calculate histogram in log10 counts and proportion
for bin_number in bins.range:
ss[bin_number].calculate_log10count_per_ori()
ss[bin_number].calculate_log10sumcount()
ss[bin_number].calculate_pcount_per_ori()
# calculate contact probability
for bin_number in bins.range:
bin_mid = bins.get_bin_mid(bin_number)
bin_size = bins.get_bin_size(bin_mid)
ss[bin_number].calculate_contact_probability_per_chr(bin_mid, bin_size)
ss[bin_number].calculate_contact_probability(bin_mid, bin_size)
# print histogram
with open(outfilename,'w') as f:
ss[0].print_header(f)
for bin_number in bins.range:
bin_mid = bins.get_bin_mid(bin_number)
if bin_mid <= bins.max_logdistance and bin_mid >= bins.min_logdistance:
ss[bin_number].print_content(f, bin_mid, bins.get_bin_range_string(bin_mid))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = 'QC for Pairs')
parser.add_argument('-p','--pairs', help = "input pairs file")
parser.add_argument('-c','--chrsize', help = "input chromsize file")
parser.add_argument('-t','--input_type', help = "input file type (P:pairs, M:merged_nodups, OM:old_merged_nodups)")
parser.add_argument('-O','--outdir_prefix', help = "prefix of output directory (output directory name will be <outdir_prefix>_report")
parser.add_argument('-s','--sample_name', help = "sample name to be used as the file prefix and in the report (do not include space)")
parser.add_argument('-M','--max_logdistance', help = "Maximum log distance. This number should not be larger than all chromosomes. Choose 8.2 for mouse. Default 8.4 (human).")
args = parser.parse_args()
if args.outdir_prefix:
outdir = args.outdir_prefix + '_report'
else:
outdir = 'report'
if not os.path.exists(outdir):
os.mkdir(outdir)
# input type selection
if args.input_type == 'P':
cols = cols_pairs
orientation_list = orientation_list_pairs
elif args.input_type == 'M':
cols = cols_merged_nodups
orientation_list = orientation_list_merged_nodups
elif args.input_type == 'OM':
cols = cols_old_merged_nodups
orientation_list = orientation_list_merged_nodups
else:
print("Unknown input type"); exit(1)
# sample name
if args.sample_name:
sample_name = args.sample_name
else:
sample_name = 'sample'
CIS_TRANS_OUT_FILE_PATH = outdir + '/' + sample_name + '.' + CIS_TRANS_OUT_FILE_SUFFIX
PLOT_TABLE_OUT_FILE_PATH = outdir + '/' + sample_name + '.' + PLOT_TABLE_OUT_FILE_SUFFIX
# max_logdistance
if args.max_logdistance:
max_logdist = float(args.max_logdistance)
else:
max_logdist = 8.4
# get the stats
cis_trans_ratio (args.pairs, outfilename=CIS_TRANS_OUT_FILE_PATH, cols = cols)
distance_histogram (args.pairs, args.chrsize, outfilename=PLOT_TABLE_OUT_FILE_PATH, cols = cols, orientation_list = orientation_list, max_logdistance = max_logdist)
``` |
{
"source": "jianhuashao/NTU_marking",
"score": 3
} |
#### File: jianhuashao/NTU_marking/ntu_student_marking.py
```python
from docx import *
from docx.shared import Inches
import pprint
import codecs
import sys
import smtplib
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.header import Header
from email import encoders
#import CONFIG_test as CONFIG
import CONFIG as CONFIG
from openpyxl import Workbook
doc_level = 0
current = 'pseudo_code'
students = {}
student = {}
def get_grade(s):
if s >= 70:
return "First"
if s >= 67 and s < 70:
return "High 2:1"
if s >= 63 and s < 67:
return "Mid 2:1"
if s >= 60 and s < 64:
return "Low 2:1"
if s >= 57 and s < 60:
return "High 2:2"
if s >= 53 and s < 57:
return "Mid 2:2"
if s >= 50 and s < 54:
return "Low 2:2"
if s >= 47 and s < 50:
return "High 3rd"
if s >= 43 and s < 47:
return "Mid 3rd"
if s >= 40 and s < 44:
return "Low 3rd"
if s >= 35 and s < 40:
return "Marginal Fail"
if s < 35:
return "Fail"
def parse_student (t, post):
global doc_level, student, current
if t == '' and not post == '':
doc_level = 0
## save previous student into students
if not student.has_key('student_id'):
#print "student_id empty"
#sys.exit(1)
print
else:
students[student['student_id']] = student
#pprint.pprint(student)
#print '\n\n\n'
## parse name and student id
name_id = post
name_id = name_id.split('(')
studnet_name = name_id[0].strip()
student_id = name_id[1].replace(')', '').strip()
student = {'parts':{}, 'student_name': studnet_name, 'student_id': student_id}
if 'part a:' in t.lower():
score = t.lower().split(':')[1]
doc_level = 1
student['parts'][doc_level] = {'score':score, 'pseudo_code':[], 'js_code':[]}
elif 'part b:' in t.lower():
score = t.lower().split(':')[1]
doc_level = 2
student['parts'][doc_level] = {'score':score, 'pseudo_code':[], 'js_code':[]}
elif 'part c:' in t.lower():
score = t.lower().split(':')[1]
doc_level = 3
student['parts'][doc_level] = {'score':score, 'pseudo_code':[], 'js_code':[]}
elif 'overall:' in t.lower():
score = t.lower().split(':')
doc_level = 4
student['parts'][doc_level] = {'score':0, 'pseudo_code':[], 'js_code':[]}
elif 'pseudo code:' in t.lower():
current = 'pseudo_code'
elif 'js code:' in t.lower():
current = 'js_code'
elif 'all:' in t.lower() or 'you have therefore' in t.lower():
return
else:
#print t
if doc_level == 0:
return
if t == '':
return
student['parts'][doc_level][current].append(t)
def calcualte_score_grade():
highest = 0
lowest = 0
average = 0
grades = {}
for student_id in students:
parts = students[student_id]['parts']
score_total = 0
for part_id in parts:
part_score = parts[part_id]['score']
part_score = int(part_score)
score_total = score_total + part_score
grade = get_grade(score_total)
students[student_id]['student_score'] = score_total
students[student_id]['student_grade'] = grade
## stastic
if score_total > highest:
highest = score_total
if score_total < lowest:
lowest = score_total
average = average + score_total
if not grades.has_key(grade):
grades[grade] = {}
grades[grade][student_id] = score_total
average = average * 1.0 / len(students)
print "==== student performance stastics ===="
print "Students counts:", len(students)
print "Highest:", highest, 'Lowest:', lowest, 'Average:', average
for grade in grades:
print "Grade:", grade, 'has student counts:', len(grades[grade])
def parse_docx(doc_name):
pre = ''
document = Document(doc_name)
for post in document.paragraphs:
t = post.text.strip() ## curent line text
parse_student(pre, t)
pre = t
students[student['student_id']] = student
pprint.pprint(students)
print "Student counts:", len(students)
calcualte_score_grade()
##########################################################################
def print_student_grade_only(file_name):
f = codecs.open(file_name, 'w', encoding='utf-8')
f.write(u'%s\t%s\t%s\t%s\t\n'%('student_id', 'student_name', 'student_score', 'student_grade'))
for student_id in students:
student = students[student_id]
student_name = student['student_name']
student_score = student['student_score']
student_grade = student['student_grade']
f.write(u'%s\t%s\t%s\t%s\t\n'%(student_id, student_name, student_score, student_grade))
f.close()
print "== print_student_grade_only: %s =="%(file_name)
def print_student_comments_in_one_file(file_name, title, show_score):
doc = Document()
for student_id in students:
student = students[student_id]
student_name = student['student_name']
student_score = student['student_score']
student_grade = student['student_grade']
doc.add_heading(title, 0)
doc.add_heading(u'Student ID: %s, Student Name: %s'%(student_id, student_name), 1)
doc.add_heading(u'Final Grade: %s'%(student_grade), 1)
parts = student['parts']
for part_id in parts:
part_label = u'Part-%s'%(part_id)
part_score = parts[part_id]['score']
if (part_id == 4):
part_label = 'Overall'
part_score = student_score
if show_score == True:
doc.add_heading(u'%s: %s :'%(part_label, part_score), 2)
else:
doc.add_heading(u'%s: '%(part_label), 2)
doc.add_heading('Pseudo code', 4)
for c in parts[part_id]['pseudo_code']:
doc.add_paragraph(c, style='ListBullet2')
doc.add_heading('JS code', 4)
for c in parts[part_id]['js_code']:
doc.add_paragraph(c, style='ListBullet2')
doc.add_page_break()
doc.save(file_name)
print "== print_student_comments_in_one_file: %s =="%(file_name)
def print_student_comments_in_seperate_file(file_name_prefix, title, show_score):
for student_id in students:
student = students[student_id]
student_name = student['student_name']
student_score = student['student_score']
student_grade = student['student_grade']
doc = Document()
doc.add_heading(title, 0)
doc.add_heading(u'Student ID: %s, Student Name: %s'%(student_id, student_name), 1)
doc.add_heading(u'Final Grade: %s'%(student_grade), 1)
parts = student['parts']
for part_id in parts:
part_label = u'Part-%s'%(part_id)
part_score = parts[part_id]['score']
if (part_id == 4):
part_label = 'Overall'
part_score = student_score
if show_score == True:
doc.add_heading(u'%s: %s :'%(part_label, part_score), 2)
else:
doc.add_heading(u'%s: '%(part_label), 2)
doc.add_heading('Pseudo code', 4)
for c in parts[part_id]['pseudo_code']:
doc.add_paragraph(c, style='ListBullet2')
doc.add_heading('JS code', 4)
for c in parts[part_id]['js_code']:
doc.add_paragraph(c, style='ListBullet2')
doc.add_page_break()
file_name = u'%s%s.docx'%(file_name_prefix, student_id)
doc.save(file_name)
print "== print_student_comments_in_seperate_file: %s =="%(file_name)
def print_student_comments_to_email(file_name_prefix, email_title_prefix, is_test, mail_type):
for student_id in students:
student = students[student_id]
student_name = student['student_name']
student_score = student['student_score']
student_grade = student['student_grade']
parts = student['parts']
parts_html = ''
for part_id in parts:
part_label = u'Part-%s'%(part_id)
if (part_id == 4):
part_label = 'Overall'
part_score = student_score
part_html_pseudo_code = ''
for c in parts[part_id]['pseudo_code']:
part_html_pseudo_code = u'%s<li>%s</li>'%(part_html_pseudo_code, c)
part_html_pseudo_code = u'<ul>%s</ul>'%(part_html_pseudo_code)
part_html_js_code = ''
for c in parts[part_id]['js_code']:
part_html_js_code = u'%s<li>%s</li>'%(part_html_js_code, c)
part_html_js_code = u'<ul>%s</ul>'%(part_html_js_code)
part_html = part_template%(part_label, part_html_pseudo_code, part_html_js_code)
parts_html = u'%s<br>%s'%(parts_html, part_html)
file_name = u'%s%s.docx'%(file_name_prefix, student_id)
email_body = email_template%(student_name, student_id, student_grade, parts_html)
email_title = u'Web-Based-Programing In-Class test results: %s'%(student_id)
#print email_body
if is_test == True:
address_to = CONFIG.address_to_test
send_email(mail_type, email_title, email_body, address_to)
print "== print_student_comments_to_email: %s =="%(file_name)
print "only in test"
return
else:
address_to = <EMAIL>'%(student_id)
send_email(mail_type, email_title, email_body, address_to)
print "== print_student_comments_to_email: %s =="%(file_name)
part_template = u'''
<b>%s</b>
<ul>Pseudo Code:
<ul>%s</ul>
</ul>
<ul>JS Code:
<ul>%s</ul>
</ul>
'''
email_template = u'''
To <b>%s</b>: <br>
<br>
Your student_id is <b>%s</b>, and your final grade is <b>%s</b> <br>
<br>
Bellow are your comments details: <br>
%s
'''
def send_email(mail_type, email_title, email_body, address_to):
#mail_type = 'gmail' # or ntu
is_attachment = False
if mail_type == 'gmail':
smtp_server = 'smtp.gmail.com'
smtp_port = 587
address_from = CONFIG.gmail_address_from
username = CONFIG.gmail_username
password = <PASSWORD>
if mail_type == 'ntu':
smtp_server = 'smtphost.ntu.ac.uk'
smtp_port = 25
address_from = CONFIG.ntu_address_from
#username = CONFIG.ntu_username
#password = CONFIG.ntu_password
address_cc = CONFIG.address_cc
msg = MIMEMultipart('alternative')
msg.set_charset('utf-8')
msg['FROM'] = address_from
msg['TO'] = address_to
msg['CC'] = address_cc
msg['Subject'] = email_title
part2 = MIMEText(email_body, 'html', 'utf-8')
msg.attach(part2)
if is_attachment == True:
### this is for attachment, disabled at moment
#part = MIMEBase('application', "octet-stream")
#part.set_payload(open('comments/H-N0491912.docx',"rb").read() )
#encoders.encode_base64(part)
#part.add_header('Content-Disposition', 'attachment; filename="H-N0491912.docx"')
#msg.attach(part)
#print msg
print 'no attachment now'
server = smtplib.SMTP(smtp_server, smtp_port)
server.ehlo()
server.starttls()
if mail_type == 'ntu':
print "NTU do not need to login"
else:
server.login(username, password)
server.sendmail(address_from, address_to, msg.as_string())
server.quit()
def print_student_comments_to_one_excel(filename, group_name):
wb = Workbook()
ws = wb.create_sheet(1)
ws.title = group_name
j = 0
ws.cell(row=j, column=0).value = "student_id"
ws.cell(row=j, column=1).value = "student_name"
ws.cell(row=j, column=2).value = "student_grade"
ws.cell(row=j, column=3).value = "Part-A"
ws.cell(row=j, column=4).value = "Part-B"
ws.cell(row=j, column=5).value = "Part-C"
ws.cell(row=j, column=6).value = "Overall"
ws.cell(row=j, column=7).value = "Extra"
j = 1
for student_id in students:
#pprint.pprint(students[student_id])
#return
student = students[student_id]
student_name = student['student_name']
student_score = student['student_score']
student_grade = student['student_grade']
parts = student['parts']
parts_html = {}
for part_id in parts:
part_label = u'Part-%s'%(part_id)
if (part_id == 4):
part_label = 'Overall'
part_score = student_score
part_html_pseudo_code = 'COMMENTS FOR PSEUDO CODE: '
i = 0
for c in parts[part_id]['pseudo_code']:
i = i + 1
part_html_pseudo_code = u'%s %s) %s'%(part_html_pseudo_code, i, c)
#part_html_pseudo_code = u'<ul>%s</ul>'%(part_html_pseudo_code)
part_html_js_code = 'COMMENTS FOR JS CODE: '
i = 0
for c in parts[part_id]['js_code']:
i = i + 1
part_html_js_code = u'%s %s) %s'%(part_html_js_code, i, c)
#part_html_js_code = u'<ul>%s</ul>'%(part_html_js_code)
part_html = u'%s. %s'%(part_html_pseudo_code, part_html_js_code)
parts_html[part_id] = part_html
#pprint.pprint(parts_html)
#print
extra = u'You are awarded a %s'%(student_grade)
ws.cell(row=j, column=0).value = student_id
ws.cell(row=j, column=1).value = student_name
ws.cell(row=j, column=2).value = student_grade
ws.cell(row=j, column=3).value = parts_html[1]
ws.cell(row=j, column=4).value = parts_html[2]
ws.cell(row=j, column=5).value = parts_html[3]
ws.cell(row=j, column=6).value = parts_html[4]
ws.cell(row=j, column=7).value = extra
j = j + 1
wb.save(filename)
def main_in_group(doc_name, group_name):
doc_level = 0
current = 'pseudo_code'
student = {}
parse_docx(doc_name)
print_student_grade_only('./output_files/'+group_name+'-grade.txt')
print_student_comments_in_one_file('./output_files/'+group_name+'-comments_in_one_file_with_score.docx', 'Web-Based-Programing In-Class test result', True)
print_student_comments_in_one_file('./output_files/'+group_name+'-comments_in_one_file.docx', 'Web-Based-Programing In-Class test result', False)
print_student_comments_in_seperate_file('./comments/'+group_name+'-', 'Web-Based-Programing In-Class test result', False)
print_student_comments_to_one_excel('./output_files/'+group_name+'.xlsx', group_name)
### ready to send email
#send_email_in_test = True ############### make sure you changed here ##################
#mail_type = 'ntu' # or ntu ############### make sure you changed here ##################
#print_student_comments_to_email('./comments/'+group_name+'-', 'Web-Based-Programing In-Class test result', send_email_in_test, mail_type)
if __name__ == "__main__":
main_in_group('./input_files/Marking-Group-H.docx', 'H') ############### make sure you changed here ##################
main_in_group('./input_files/Marking-Group-F.docx', 'F') ############### make sure you changed here ##################
``` |
{
"source": "jianhuasong/medical-image-segmentation2",
"score": 2
} |
#### File: nnUNetV1/loss_functions/lovasz_loss.py
```python
import torch
import torch.nn as nn
#from torch.autograd import Function
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
class LovaszSoftmax(nn.Module):
def __init__(self, reduction='mean'):
super(LovaszSoftmax, self).__init__()
self.reduction = reduction
def prob_flatten(self, input, target):
assert input.dim() in [4, 5]
num_class = input.size(1)
if input.dim() == 4:
input = input.permute(0, 2, 3, 1).contiguous()
input_flatten = input.view(-1, num_class)
elif input.dim() == 5:
input = input.permute(0, 2, 3, 4, 1).contiguous()
input_flatten = input.view(-1, num_class)
target_flatten = target.view(-1)
return input_flatten, target_flatten
def lovasz_softmax_flat(self, inputs, targets):
num_classes = inputs.size(1)
losses = []
for c in range(num_classes):
target_c = (targets == c).float()
if num_classes == 1:
input_c = inputs[:, 0]
else:
input_c = inputs[:, c]
loss_c = (torch.autograd.Variable(target_c) - input_c).abs()
loss_c_sorted, loss_index = torch.sort(loss_c, 0, descending=True)
target_c_sorted = target_c[loss_index]
losses.append(torch.dot(loss_c_sorted, torch.autograd.Variable(lovasz_grad(target_c_sorted))))
losses = torch.stack(losses)
if self.reduction == 'none':
loss = losses
elif self.reduction == 'sum':
loss = losses.sum()
else:
loss = losses.mean()
return loss
def forward(self, inputs, targets):
# print(inputs.shape, targets.shape) # (batch size, class_num, x,y,z), (batch size, 1, x,y,z)
inputs, targets = self.prob_flatten(inputs, targets)
# print(inputs.shape, targets.shape)
losses = self.lovasz_softmax_flat(inputs, targets)
return losses
# class net(nn.Module):
# def __init__(self, in_channels, num_classes):
# super(net, self).__init__()
# self.conv = nn.Conv3d(in_channels, num_classes, (1, 3, 3), padding=(0, 1, 1))
# def forward(self, input):
# out = self.conv(input)
# return out
# from torch.optim import Adam
# BS = 2
# num_classes = 8
# dim, hei, wid = 8, 64, 64
# data = torch.rand(BS, num_classes, dim, hei, wid)
# model = net(num_classes, num_classes)
# target = torch.zeros(BS, dim, hei, wid).random_(num_classes)
# Loss = LovaszSoftmax()
# optim = Adam(model.parameters(), lr=0.01,betas=(0.99,0.999))
# for step in range(2):
# out = model(data)
# loss = Loss(out, target)
# optim.zero_grad()
# loss.backward()
# optim.step()
# print(loss)
``` |
{
"source": "jianhuupenn/TESLA",
"score": 2
} |
#### File: TESLA_package/TESLA/imputation.py
```python
import os,csv,re, time
import cv2
import pandas as pd
import numpy as np
from . util import *
from . contour_util import *
from . calculate_dis import *
def imputation(img, raw, cnt, genes, shape="None", res=50, s=1, k=2, num_nbs=10):
binary=np.zeros((img.shape[0:2]), dtype=np.uint8)
cv2.drawContours(binary, [cnt], -1, (1), thickness=-1)
#Enlarged filter
cnt_enlarged = scale_contour(cnt, 1.05)
binary_enlarged = np.zeros(img.shape[0:2])
cv2.drawContours(binary_enlarged, [cnt_enlarged], -1, (1), thickness=-1)
x_max, y_max=img.shape[0], img.shape[1]
x_list=list(range(int(res), x_max, int(res)))
y_list=list(range(int(res), y_max, int(res)))
x=np.repeat(x_list,len(y_list)).tolist()
y=y_list*len(x_list)
sudo=pd.DataFrame({"x":x, "y": y})
sudo=sudo[sudo.index.isin([i for i in sudo.index if (binary_enlarged[sudo.x[i], sudo.y[i]]!=0)])]
b=res
sudo["color"]=extract_color(x_pixel=sudo.x.tolist(), y_pixel=sudo.y.tolist(), image=img, beta=b, RGB=True)
z_scale=np.max([np.std(sudo.x), np.std(sudo.y)])*s
sudo["z"]=(sudo["color"]-np.mean(sudo["color"]))/np.std(sudo["color"])*z_scale
sudo=sudo.reset_index(drop=True)
#------------------------------------Known points---------------------------------#
known_adata=raw[:, raw.var.index.isin(genes)]
known_adata.obs["x"]=known_adata.obs["pixel_x"]
known_adata.obs["y"]=known_adata.obs["pixel_y"]
known_adata.obs["color"]=extract_color(x_pixel=known_adata.obs["pixel_x"].astype(int).tolist(), y_pixel=known_adata.obs["pixel_y"].astype(int).tolist(), image=img, beta=b, RGB=False)
known_adata.obs["z"]=(known_adata.obs["color"]-np.mean(known_adata.obs["color"]))/np.std(known_adata.obs["color"])*z_scale
#-----------------------Distance matrix between sudo and known points-------------#
start_time = time.time()
dis=np.zeros((sudo.shape[0],known_adata.shape[0]))
x_sudo, y_sudo, z_sudo=sudo["x"].values, sudo["y"].values, sudo["z"].values
x_known, y_known, z_known=known_adata.obs["x"].values, known_adata.obs["y"].values, known_adata.obs["z"].values
print("Total number of sudo points: ", sudo.shape[0])
for i in range(sudo.shape[0]):
if i%1000==0:print("Calculating spot", i)
cord1=np.array([x_sudo[i], y_sudo[i], z_sudo[i]])
for j in range(known_adata.shape[0]):
cord2=np.array([x_known[j], y_known[j], z_known[j]])
dis[i][j]=distance(cord1, cord2)
print("--- %s seconds ---" % (time.time() - start_time))
dis=pd.DataFrame(dis, index=sudo.index, columns=known_adata.obs.index)
#-------------------------Fill gene expression using nbs---------------------------#
sudo_adata=AnnData(np.zeros((sudo.shape[0], len(genes))))
sudo_adata.obs=sudo
sudo_adata.var=known_adata.var
#Impute using all spots, weighted
for i in range(sudo_adata.shape[0]):
if i%1000==0:print("Imputing spot", i)
index=sudo_adata.obs.index[i]
dis_tmp=dis.loc[index, :].sort_values()
nbs=dis_tmp[0:num_nbs]
dis_tmp=(nbs.to_numpy()+0.1)/np.min(nbs.to_numpy()+0.1) #avoid 0 distance
if isinstance(k, int):
weights=((1/(dis_tmp**k))/((1/(dis_tmp**k)).sum()))
else:
weights=np.exp(-dis_tmp)/np.sum(np.exp(-dis_tmp))
row_index=[known_adata.obs.index.get_loc(i) for i in nbs.index]
sudo_adata.X[i, :]=np.dot(weights, known_adata.X[row_index,:])
return sudo_adata
```
#### File: TESLA_package/TESLA/TLS_detection.py
```python
import os, sys, csv,re, time, random
import cv2
import numpy as np
import pandas as pd
import scanpy as sc
from scipy.sparse import issparse
from . util import *
from . contour_util import *
def TLS_detection( pred_refined_list, cluster_density_list, num_required, cnt_color, pooling="min"):
pred_TLS=np.zeros([len(pred_refined_list[0]), len(pred_refined_list)])
for i in range(len(pred_refined_list)):
tmp=np.zeros(pred_refined_list[i].shape)
for k, v in cluster_density_list[i].items():
tmp[pred_refined_list[i]==k]=v/np.max(list(cluster_density_list[i].values()))
pred_TLS[:,i]=tmp
target = np.partition(pred_TLS, -num_required, axis=1)[:,-num_required:] #Select top num_required
if pooling=="mean":
target=np.mean(target, axis=1)
elif pooling=="min":
target=np.min(target, axis=1)
else:
print("Error! Pooling logic not understood.")
target=(target-np.min(target))/(np.max(target)-np.min(target))
target[target<0.5]=0
return target
def plot_TLS_score(img, resize_factor, binary,target, cnt_color):
resize_width=int(img.shape[1]*resize_factor)
resize_height=int(img.shape[0]*resize_factor)
binary_resized=cv2.resize(binary, (resize_width, resize_height))
img_resized =cv2.resize(img, (resize_width, resize_height))
target_img=target.reshape(resize_height, resize_width)
target_img_rgb=(cnt_color((target*255).astype("int"))[:, 0:3]*255).reshape(resize_height, resize_width,3).astype( np.uint8 )
target_img_rgb=(cnt_color((target*255).astype("int"))[:, 0:3]*255).reshape(resize_height, resize_width,3).astype( np.uint8 )
target_img_rgb=cv2.cvtColor(target_img_rgb, cv2.COLOR_RGB2BGR)
ret_img=img_resized.copy()
#Whiten
white_ratio=0.5
ret_img[binary_resized!=0]=ret_img[binary_resized!=0]*(1-white_ratio)+np.array([255, 255, 255])*(white_ratio)
ret_img[target_img!=0]=target_img_rgb[target_img!=0]
ret_img[binary_resized==0]=255
return ret_img
```
#### File: TESLA_package/TESLA/util.py
```python
import scanpy as sc
import pandas as pd
import numpy as np
import scipy
import os
from anndata import AnnData,read_csv,read_text,read_mtx
from scipy.sparse import issparse
def prefilter_cells(adata,min_counts=None,max_counts=None,min_genes=200,max_genes=None):
if min_genes is None and min_counts is None and max_genes is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[0],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_genes=min_genes)[0]) if min_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_genes=max_genes)[0]) if max_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_obs(id_tmp)
adata.raw=sc.pp.log1p(adata,copy=True) #check the rowname
print("the var_names of adata.raw: adata.raw.var_names.is_unique=:",adata.raw.var_names.is_unique)
def prefilter_genes(adata,min_counts=None,max_counts=None,min_cells=10,max_cells=None):
if min_cells is None and min_counts is None and max_cells is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[1],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_cells=min_cells)[0]) if min_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_cells=max_cells)[0]) if max_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_var(id_tmp)
def prefilter_specialgenes(adata,Gene1Pattern="ERCC",Gene2Pattern="MT-"):
id_tmp1=np.asarray([not str(name).startswith(Gene1Pattern) for name in adata.var_names],dtype=bool)
id_tmp2=np.asarray([not str(name).startswith(Gene2Pattern) for name in adata.var_names],dtype=bool)
id_tmp=np.logical_and(id_tmp1,id_tmp2)
adata._inplace_subset_var(id_tmp)
def relative_func(expres):
#expres: an array counts expression for a gene
maxd = np.max(expres) - np.min(expres)
min_exp=np.min(expres)
rexpr = (expres - min_exp)/maxd
return rexpr
def plot_relative_exp(input_adata, gene, x_name, y_name,color,use_raw=False, spot_size=200000):
adata=input_adata.copy()
if use_raw:
X=adata.raw.X
else:
X=adata.X
if issparse(X):
X=pd.DataFrame(X.A)
else:
X=pd.DataFrame(X)
X.index=adata.obs.index
X.columns=adata.var.index
rexpr=relative_func(X.loc[:,gene])
adata.obs["rexpr"]=rexpr
fig=sc.pl.scatter(adata,x=x_name,y=y_name,color="rexpr",title=gene+"_rexpr",color_map=color,show=False,size=spot_size/adata.shape[0])
return fig
def plot_log_exp(input_adata, gene, x_name, y_name,color,use_raw=False):
adata=input_adata.copy()
if use_raw:
X=adata.X
else:
X=adata.raw.X
if issparse(X):
X=pd.DataFrame(X.A)
else:
X=pd.DataFrame(X)
X.index=adata.obs.index
X.columns=adata.var.index
adata.obs["log"]=np.log((X.loc[:,gene]+1).tolist())
fig=sc.pl.scatter(adata,x=x_name,y=y_name,color="log",title=gene+"_log",color_map=color,show=False,size=200000/adata.shape[0])
return fig
def refine_clusters(pred, resize_height, resize_width, threshold, radius):
pixel_num=pd.Series(pred).value_counts()
clusters=pixel_num.index.tolist()
reorder_map={}
for i in range(pixel_num.shape[0]):
reorder_map[clusters[i]]=i
pred_reordered=pd.Series(pred).replace(reorder_map).to_numpy()
pixel_num=pd.Series(pred_reordered).value_counts()
# Number of clusters
nLabels = len(np.unique(pred_reordered))
# Number of main clusters
mainLabels=(pd.Series(pred_reordered).value_counts()>=threshold).sum()
#------------- Refine clusters ---------------------
main_clusters=pixel_num.index[pixel_num>=threshold].tolist()
minor_clusters=pixel_num.index[pixel_num<threshold].tolist()
pred_reordered_img = pred_reordered.reshape( (resize_height, resize_width))
max_x, max_y=resize_width, resize_height
replace_map={}
for i in minor_clusters:
nbs=[]
xy=np.where(pred_reordered_img==i)
for j in range(len(xy[0])):
x, y=xy[0][j], xy[1][j]
nbs=nbs+pred_reordered_img[max(0,x-radius):min(max_x,x+radius+1),max(0,y-radius):min(max_y,y+radius+1)].flatten().tolist()
nbs_num=pd.Series(nbs).value_counts()
if sum(nbs_num.index.isin(main_clusters))>0:
replace_map[i]=nbs_num.index[ nbs_num.index.isin(main_clusters) ][ 0 ]
pred_refined=pd.Series(pred_reordered).replace(replace_map).to_numpy()
return pred_refined
``` |
{
"source": "JianiDing/dla_cnn",
"score": 4
} |
#### File: dla_cnn/lucid_work/spritemap_1d.py
```python
from PIL import Image
def create_rows(layer, num_rows, num_cols, sprite_width, sprite_height, row_spacing, col_spacing):
"""
This function takes in individual channel visualizations from a layer and puts them together
in a single image spritemap row. Spacing is added between each visualization so they dont appear
stacked on top of each other.
:param layer: The layer we are creating our spritemap for
:param num_rows: The number of rows in the spritemap
:param num_cols: The number of columns in the spritemap
:param sprite_width: The width (in pixels) of each individual visualization
:param sprite_height: The height (in pixels) of each individual visualization
:param row_spacing: The amount of spacing (in pixels) between each visualization in a row
:param col_spacing: The amount of spacing (in pixels) between each visualization in a column
:return: List of PIL images, one for each row in the spritemap
"""
row_width = (num_cols * sprite_width) + (row_spacing * num_cols)
row_height = sprite_height + col_spacing
rows = []
fn = 0
for i in range(num_rows):
row = Image.new('RGBA', (row_width,row_height))
offset = row_spacing / 2
for i in range(num_cols):
image_file = 'data/neuron_vis/resized/' + layer + '/' + layer + '_' + str(fn) + '.png'
fn += 1
image = Image.open(image_file)
row.paste(im=image, box=(offset, (col_spacing/2)))
offset += (sprite_width + row_spacing)
rows.append(row)
return rows
def create_map(layer, num_rows, num_cols, sprite_width, sprite_height, row_spacing, col_spacing):
"""
This function creates a full sized map and adds rows created from the create_rows() function
:param layer: Layer we are creating spritemap for
:param num_rows: Number of rows in spritemap
:param num_cols: Number of cols in spritemap
:param sprite_width: The width (pixels) of each individual visualization
:param sprite_height: The height (pixels) of each individual visualization
:param row_spacing: Spacing (pixels) between visualizations in a row
:param col_spacing: Spacing (pixels) between visualizations in a column
:return: Full image spritemap
"""
row_width = (num_cols*sprite_width) + (row_spacing*num_cols)
row_height = sprite_height + col_spacing
# create blank map
map = Image.new('RGBA', (row_width, (num_rows*row_height)))
# Get images for each row
rows = create_rows(layer, num_rows, num_cols,sprite_width, sprite_height, row_spacing, col_spacing)
offset = 0
for i in range(num_rows):
# past row images into map
img = rows[i]
map.paste(im=img, box=(0, offset))
offset += row_height
return map
def main():
map1 = create_map('conv1', 10, 10, 210, 157, 80, 100)
map2 = create_map('conv1_relu', 10, 10, 210, 157, 80, 100)
map3 = create_map('pool1', 10, 10, 210, 157, 80, 100)
map4 = create_map('conv2', 8, 12, 210, 157, 80, 10)
map5 = create_map('conv2_relu', 8, 12, 210, 157, 80, 10)
map6 = create_map('pool2', 8, 12, 210, 157, 80, 10)
map7 = create_map('conv3', 8, 12, 210, 157, 80, 10)
map8 = create_map('conv3_relu', 8, 12, 210, 157, 80, 10)
map9 = create_map('pool3', 8, 12, 210, 157, 80, 10)
#
#
map1.save('data/neuron_vis/sprites/conv1.png')
map2.save('data/neuron_vis/sprites/conv1_relu_resized.png')
map3.save('data/neuron_vis/sprites/pool1_resized.png')
map4.save('data/neuron_vis/sprites/conv2_resized.png')
map5.save('data/neuron_vis/sprites/conv2_relu_resized.png')
map6.save('data/neuron_vis/sprites/pool2_resized.png')
map7.save('data/neuron_vis/sprites/conv3_resized.png')
map8.save('data/neuron_vis/sprites/conv3_relu_resized.png')
map9.save('data/neuron_vis/sprites/pool3_resized.png')
if __name__ == "__main__":
main()
``` |
{
"source": "jianingy/sitebase",
"score": 2
} |
#### File: sitebase/backend/mongo.py
```python
class MongoBackend(object):
def connect(self, *args, **kwargs):
from txmongo import MongoConnectionPool
self.db = MongoConnectionPool(*args, **kwargs)
def upsert(self, input):
manifest_name = input["manifest"]
print manifest_name
dbBackend = MongoBackend()
```
#### File: sitebase/service/check_syntax.py
```python
from ujson import decode as json_decode, encode as json_encode
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
from twisted.python.failure import Failure
from twisted.internet import threads
from sitebase import backend, slex
from sitebase.backend.postgres import dbBackend
from ysl.twisted.log import debug, info
import time
class CheckSyntaxService(Resource):
isLeaf = True
serviceName = "check_syntax"
def __init__(self, c, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
self.config = c
self.debug = c.get("server:main", "debug") == "1"
def prepare(self, request):
request.content.seek(0, 0)
content = request.content.read()
if content:
return defer.succeed(json_decode(content))
else:
return defer.succeed(None)
def finish(self, value, request):
request.setHeader('Content-Type', 'application/json; charset=UTF-8')
if isinstance(value, Failure):
err = value.value
if self.debug:
print "-" * 30, "TRACEKBACK", "-" * 30
value.printTraceback()
print "^" * 30, "TRACEKBACK", "^" * 30
request.setResponseCode(500)
if isinstance(err, backend.ValidationError):
request.setResponseCode(400)
elif isinstance(err, backend.NodeNotFound):
request.setResponseCode(404)
elif isinstance(err, backend.NodeInUseError):
request.setResponseCode(400)
elif isinstance(err, backend.EmptyInputData):
request.setResponseCode(400)
elif isinstance(err, backend.BatchOperationError):
request.setResponseCode(400)
elif isinstance(err, backend.SearchGrammarError):
err = dict(error="syntax", message=str(err),
traceback=value.getTraceback())
elif (isinstance(err, Exception) and
not isinstance(err, backend.GenericError)):
err = dict(error="UnknownError", message=err.message)
request.write(json_encode(dict(err)) + "\n")
else:
request.setResponseCode(200)
request.write(json_encode(value) + "\n")
info("respone time: %.3fms" % ((time.time() - self.startTime) * 1000))
request.finish()
def cancel(self, err, call):
debug("Request cancelling.")
call.cancel()
def render(self, *args, **kwargs):
self.startTime = time.time()
return Resource.render(self, *args, **kwargs)
def check_syntax(self, q):
suffix = slex.parse(q)
dbBackend._build_where_clause(suffix)
return dict(success=True)
def render_GET(self, request):
q = request.args.get("q", [None])[0]
d = self.prepare(request)
request.notifyFinish().addErrback(self.cancel, d)
d.addCallback(lambda x: threads.deferToThread(self.check_syntax, q))
d.addBoth(self.finish, request)
return NOT_DONE_YET
```
#### File: sitebase/service/setting.py
```python
from ujson import decode as json_decode, encode as json_encode
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
from twisted.internet.threads import deferToThread
from twisted.python.failure import Failure
from sitebase.utils import tag_memoize
from sitebase.backend.postgres import dbBackend
from ysl.twisted.log import debug, info
from yaml import load as yaml_load
import time
import re
import codecs
class SettingService(Resource):
isLeaf = True
serviceName = "setting"
routes = [
("^/field/?$", "render_field"),
("^/manifest/?$", "render_manifest"),
("^/cache/?$", "render_cache"),
]
def __init__(self, c, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
self.config = c
self.routes = map(lambda x: (re.compile(x[0]), x[1]), self.routes)
def prepare(self, request):
request.content.seek(0, 0)
content = request.content.read()
if content:
return defer.succeed(json_decode(content))
else:
return defer.succeed(None)
def finish(self, value, request):
request.setHeader('Content-Type', 'application/json; charset=UTF-8')
if isinstance(value, Failure):
err = value.value
request.setResponseCode(500)
error = dict(error="generic", message=str(err))
request.write(json_encode(error) + "\n")
else:
request.setResponseCode(200)
request.write(json_encode(value) + "\n")
info("respone time: %.3fms" % ((time.time() - self.startTime) * 1000))
request.finish()
def cancel(self, err, call):
debug("Request cancelling.")
call.cancel()
def select(self, input, node_id):
return dbBackend.select_cache(node_id)
def render(self, *args, **kwargs):
self.startTime = time.time()
return Resource.render(self, *args, **kwargs)
def render_GET(self, request):
path = request.path[8:]
d = self.prepare(request)
request.notifyFinish().addErrback(self.cancel, d)
matched = False
for route in self.routes:
if route[0].match(path):
d.addCallback(lambda x, y:
deferToThread(getattr(self, route[1]), x, y),
request)
matched = True
break
if not matched:
d.addCallback(self.render_not_found, request)
d.addBoth(self.finish, request)
return NOT_DONE_YET
def render_not_found(self, input, request):
return dict()
@tag_memoize('field')
def render_field(self, input, request):
yaml = self.config.get("extra", "field")
with codecs.open(yaml, "r", encoding="utf-8") as f:
tree = yaml_load(f.read())
return tree
return dict()
@tag_memoize('manifest')
def render_manifest(self, input, request):
yaml = self.config.get("extra", "manifest")
with codecs.open(yaml, "r", encoding="utf-8") as f:
tree = yaml_load(f.read())
return tree
return dict()
@tag_memoize('cache')
def render_cache(self, input, request):
yaml = self.config.get("extra", "cache")
with codecs.open(yaml, "r", encoding="utf-8") as f:
tree = yaml_load(f.read())
return tree
return dict()
``` |
{
"source": "Jianiwang996/mobileinsight-core",
"score": 2
} |
#### File: analyzer/kpi/kpi_analyzer.py
```python
is_android = False
try:
from jnius import autoclass # For Android
try:
from service import mi2app_utils
PythonService = autoclass('org.kivy.android.PythonService')
pyService = PythonService.mService
Context = autoclass('android.content.Context')
ConnectivityManager = pyService.getSystemService(Context.CONNECTIVITY_SERVICE)
except Exception as e:
import main_utils
is_android = True
except Exception as e:
import sqlite3 # Laptop version
is_android = False
__all__ = ["KpiAnalyzer"]
from ..analyzer import *
# from .track_cell_info_analyzer import TrackCellInfoAnalyzer
import os, errno
import urllib.request, urllib.error, urllib.parse, json, time, datetime
import threading
from collections import deque
class KpiAnalyzer(Analyzer):
"""
An abstraction for KPI analyzer. It offers three functions
(1) Helpers to simplify the development of KPI tracking
(2) Automation of the KPI local storage management
(3) Automation of the KPI uploading to KPI map (cloud).
"""
# Global variables: For asynchrounous KPI upload
upload_thread = None
pending_upload_task = deque([]) # (kpi_name, kpi_val) pair list
def __init__(self):
Analyzer.__init__(self)
self.include_analyzer('TrackCellInfoAnalyzer', [])
# initilize local database
self.supported_kpis = {} # Supported KPIs: kpi_name -> callback
self.__db = None # Local dabatase: kpi_name -> database
self.__conn = None # Local database cursor: kpi_name -> database
self.__op = ""
self.__phone_model = ""
self.__db_enabled = False
self.__periodicity = {}
self.__logcell = {}
self.__last_updated = {}
# Initialize uploading thread
if is_android and not KpiAnalyzer.upload_thread:
e = threading.Event()
KpiAnalyzer.upload_thread = threading.Thread(target=self.__upload_kpi_thread, args=(e,))
KpiAnalyzer.upload_thread.start()
def __del__(self):
if is_android:
mi2app_utils.detach_thread()
def enable_local_storage(self, enable_storage):
"""
Set if the local KPI should be stored
:param enable_storage: Whether to locally store the kpi. False by default
:type enable_storage: boolean
"""
self.__db_enabled = enable_storage
def register_kpi(self, kpi_type, kpi_name, callback, attributes = None):
"""
Declare a KPI to be supported
:param kpi_type: The type of the KPI (accessibility, retainability, mobility, availability, ...)
:type kpi_type: string
:param kpi_name: The name of the KPI
:type kpi_name: string
:param callback: The callbacks to update the KPI
:type kpi_name: Python method
:returns: True if the registeration succeeds, False otherwise (e.g., KPI already exists)
:param attributes:
:type attributes: None or a list of attributes
"""
full_name = 'KPI.'+kpi_type+'.'+kpi_name
if full_name in self.supported_kpis:
# KPI already exists
return False
self.supported_kpis[full_name] = callback
if not (self.__db and self.__conn):
if not self.__create_db():
self.log_info("Create database failed")
return False
if not self.__create_table(full_name, attributes):
return False
return True
def __create_table(self, kpi_name, attributes):
'''
Create SQL tables for the kpi
:param kpi_name: name of the kpi
:type kpi_name: string
:param attributes:
:type attributes: None or a list of attributes
'''
kpi_name = kpi_name.replace('.', '_')
if attributes:
sql_cmd = 'CREATE TABLE IF NOT EXISTS ' + \
kpi_name + "(id integer primary key autoincrement, "
for attribute in attributes:
sql_cmd += (str(attribute) + ' text, ')
sql_cmd += "timestamp timestamp, op text, phone_model text," \
"gps text, cell_id text, tai_id text, dl_freq text, ul_freq text, dl_bw text, ul_bw text," \
"allowed_access text, band_id text)"
else:
sql_cmd = 'CREATE TABLE IF NOT EXISTS ' + \
kpi_name + "(id integer primary key autoincrement, value text, timestamp timestamp, op text, phone_model text," \
"gps text, cell_id text, tai_id text, dl_freq text, ul_freq text, dl_bw text, ul_bw text," \
"allowed_access text, band_id text)"
# print sql_cmd
# for rrc_sr, it may have several types, shall we build a table for each types?
if is_android:
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
def __create_db(self):
"""
Create a local database for the KPI.
The database is stored at /sdcard/mobileinsight/kpi/
:returns: True if the database is successfully created (or already exists), False otherwise
"""
db_name = "Kpi"
try:
if is_android:
Environment = autoclass("android.os.Environment")
state = Environment.getExternalStorageState()
if not Environment.MEDIA_MOUNTED == state:
self.__db = None
return
sdcard_path = Environment.getExternalStorageDirectory().toString()
DB_PATH = os.path.join(sdcard_path, "mobileinsight/dbs")
activity = autoclass('org.kivy.android.PythonActivity')
if activity.mActivity:
self.__db = activity.mActivity.openOrCreateDatabase(
os.path.join(DB_PATH, db_name + '.db'), 0, None)
else:
service = autoclass('org.kivy.android.PythonService')
self.__db = service.mService.openOrCreateDatabase(
os.path.join(DB_PATH, db_name + '.db'), 0, None)
else:
try:
os.makedirs('./dbs/')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.__conn = sqlite3.connect('./dbs/' + db_name + '.db')
self.__db = self.__conn.cursor()
return True
except BaseException: # TODO: raise warnings
return False
def list_kpis(self):
"""
Return a list of available KPIs
:returns: a list of string, each of which is a KPI name
"""
return list(self.supported_kpis.keys())
def __db_query(self, sql_cmd):
"""
Return query result of a sql_cmd
"""
try:
if is_android:
sql_res = self.__db.rawQuery(sql_cmd, None)
else:
sql_res = self.__db.execute(sql_cmd).fetchall()
# print sql_res
# if sql_res.getCount()==0: #the id does not exist
if (is_android and sql_res.getCount() == 0) or (
not is_android and len(sql_res) == 0):
return None
if is_android:
sql_res.moveToFirst()
# convert string to dictionary
res = sql_res.getString(0)
else:
res = sql_res[0][0]
return res
except BaseException: # TODO: raise warnings
return None
def local_query_kpi(self, kpi_name, cell_id = None, timestamp = None):
"""
Query the phone's locally observed KPI
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param cell_id: cell global id
:type cell_id: string
:param timestamp: The timestamp of the KPI. If None, this function returns the latest KPI
:type timestamp: datetime
:returns: The KPI value, or None if the KPI is not available
"""
if not self.__db_enabled:
self.log_warning("Database is not enabled.")
return None
# cell_id = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_id()
# cell_id = cell_id if cell_id else None
kpi_name = kpi_name.replace('.', '_')
# print kpi_name
if kpi_name.endswith('SR'):
if cell_id:
if 'HO' in kpi_name:
kpi_suc = kpi_name[:-2]+'FAILURE'
else:
kpi_suc = kpi_name[:-2]+'SUC'
if timestamp:
sql_cmd = "select count(*) from " + kpi_suc + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\""
else:
sql_cmd = "select count(*) from " + kpi_suc + " where cell_id=\"" + str(cell_id) +"\""
# print sql_cmd
suc_num = self.__db_query(sql_cmd)
if 'HO' in kpi_name:
kpi_req = kpi_name[:-2]+'TOTAL'
else:
kpi_req = kpi_name[:-2]+'REQ'
if timestamp:
sql_cmd = "select count(*) from " + kpi_req + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\""
else:
sql_cmd = "select count(*) from " + kpi_req + " where cell_id=\"" + str(cell_id) +"\""
# print sql_cmd
req_num = self.__db_query(sql_cmd)
else:
if 'HO' in kpi_name:
kpi_suc = kpi_name[:-2]+'FAILURE'
else:
kpi_suc = kpi_name[:-2]+'SUC'
if timestamp:
sql_cmd = "select count(*) from " + kpi_suc + " where timestamp<\"" + \
str(timestamp) + "\""
else:
sql_cmd = "select count(*) from " + kpi_suc
# print sql_cmd
suc_num = self.__db_query(sql_cmd)
if 'HO' in kpi_name:
kpi_req = kpi_name[:-2]+'TOTAL'
else:
kpi_req = kpi_name[:-2]+'REQ'
if timestamp:
sql_cmd = "select count(*) from " + kpi_req + " where timestamp<\"" + \
str(timestamp) + "\""
else:
sql_cmd = "select count(*) from " + kpi_req
# print sql_cmd
req_num = self.__db_query(sql_cmd)
# print suc_num, req_num
if req_num and suc_num and int(req_num) > 0:
if 'HO' in kpi_name:
return '{:.2f}'.format(float(req_num - suc_num)/int(req_num)*100)+'%'
else:
return '{:.2f}'.format(float(suc_num)/int(req_num)*100)+'%'
return None
elif kpi_name.endswith('SUC') or kpi_name.endswith('REQ') or \
kpi_name.endswith('TOTAL') or kpi_name.endswith('FAILURE'):
if cell_id:
if timestamp:
sql_cmd = "select count(*) from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\""
else:
sql_cmd = "select count(*) from " + kpi_name + " where cell_id=\"" + str(cell_id) +"\""
else:
if timestamp:
sql_cmd = "select count(*) from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\""
else:
sql_cmd = "select count(*) from " + kpi_name
# print sql_cmd
value = self.__db_query(sql_cmd)
if value:
return str(value)
return None
elif kpi_name.endswith('TPUT'):
if cell_id:
if timestamp:
sql_cmd = "select value from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\" and cell_id=\"" + str(cell_id) +"\" order by id desc limit 1"
else:
sql_cmd = "select value from " + kpi_name + " where cell_id=\"" + \
str(cell_id) +"\" order by id desc limit 1"
else:
if timestamp:
sql_cmd = "select value from " + kpi_name + " where timestamp<\"" + \
str(timestamp) + "\" order by id desc limit 1"
else:
sql_cmd = "select value from " + kpi_name + " order by id desc limit 1"
# print sql_cmd
value = self.__db_query(sql_cmd)
if value:
return str(value)
return None
def remote_query_kpi(self, kpi_name, phone_model, operator, gps, timestamp):
"""
Query the remote cloud for the KPI
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param phone_model: The the phone model
:type phone_model: string
:param operator: The network operator
:type operator: string
:param gps: The GPS coordinate
:type gps: string
:param timestamp: The timestamp of the KPI.
:type timestamp: datetime
:returns: The KPI value, or None if the KPI is not available
"""
#TODO: Implement the query of remote database
return None
def set_periodicity(self, kpi_showname, periodicity):
"""
Set periodicity of the analyzer
:param kpi_showname: The KPI to be queried, this is the showname
:type kpi_showname: string
:param periodicity: periodicity (s,m,h,d repsents scale of seconds, minutes, hours, days)
:type periodicity: string
"""
try:
kpi_name = kpi_showname.replace('.', '_')
if periodicity.isdigit():
self.__periodicity[kpi_name] = int(periodicity)
elif periodicity.endswith('s'):
self.__periodicity[kpi_name] = int(periodicity[:-1])
elif periodicity.endswith('m'):
self.__periodicity[kpi_name] = int(periodicity[:-1])*60
elif periodicity.endswith('h'):
self.__periodicity[kpi_name] = int(periodicity[:-1])*60*60
elif periodicity.endswith('d'):
self.__periodicity[kpi_name] = int(periodicity[:-1])*60*60*24
self.__last_updated[kpi_name] = None
self.log_info("Priority set for "+kpi_showname+': '+periodicity)
return True
except:
self.log_info("Priority set failed for "+kpi_showname+': '+periodicity)
return False
def set_cell(self, kpi_showname, cell):
"""
Set periodicity of the analyzer
:param kpi_showname: The KPI to be queried, this is the showname
:type kpi_showname: string
:param cell: cell (s,m,h,d repsents scale of seconds, minutes, hours, days)
:type cell: string
"""
try:
kpi_name = kpi_showname.replace('.', '_')
self.__logcell[kpi_name] = cell
self.log_info("Logging cell set for "+kpi_showname+': '+str(cell))
return True
except:
self.log_info("Logging cell failed for "+kpi_showname+': '+periodicity)
return False
def store_kpi(self, kpi_name, kpi_value, timestamp, cur_location=None):
"""
Store the KPIs to the local database
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param kpi_value: The value of KPI or a dict {attribute <type: str>: value <type: str>}
:type kpi_value: string
:param timestamp
:type timestamp: datetime
"""
if not self.__db_enabled:
self.log_warning("Database is not enabled.")
return True
# try:
phone_info = self.__get_phone_model()
operator_info = self.__get_operator_info()
# cur_location = self.__get_current_gps()
cell_id = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_id()
cell_id = cell_id if cell_id else "None"
tac = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_tac()
tac = tac if tac else "None"
downlink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_frequency()
downlink_frequency = downlink_frequency if downlink_frequency else "None"
uplink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_frequency()
uplink_frequency = uplink_frequency if uplink_frequency else "None"
downlink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_bandwidth()
downlink_bandwidth = downlink_bandwidth if downlink_bandwidth else "None"
uplink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_bandwidth()
uplink_bandwidth = uplink_bandwidth if uplink_bandwidth else "None"
allowed_access = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_allowed_access()
allowed_access = allowed_access if allowed_access else "None"
band_indicator = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_band_indicator()
band_indicator = band_indicator if band_indicator else "None"
#FIXME: How to handle the missing GPS location?
if not cur_location:
cur_location = ("None", "None")
if isinstance(kpi_value, str) or isinstance(kpi_value, int) or isinstance(kpi_value, float):
sql_cmd = "insert into " + kpi_name + "(value, timestamp," \
"op, phone_model, gps, cell_id, tai_id, dl_freq, ul_freq, dl_bw, ul_bw," \
"allowed_access, band_id) values(\"" + \
str(kpi_value) + "\"," + "\"" + str(timestamp) \
+ "\"," + "\"" + operator_info \
+ "\"," + "\"" + phone_info \
+ "\"," + "\"" + str(cur_location[0])+"|"+str(cur_location[1]) \
+ "\"," + "\"" + str(cell_id) \
+ "\"," + "\"" + str(tac) \
+ "\"," + "\"" + str(downlink_frequency) \
+ "\"," + "\"" + str(uplink_frequency) \
+ "\"," + "\"" + str(downlink_bandwidth) \
+ "\"," + "\"" + str(uplink_bandwidth) \
+ "\"," + "\"" + str(allowed_access) \
+ "\"," + "\"" + str(band_indicator) \
+ "\")"
else:
idx_str = ""
value_str = ""
for attribute in kpi_value:
idx_str += (attribute + ', ')
value_str += ("\"" + str(kpi_value[attribute]) + "\"," )
sql_cmd = "insert into " + kpi_name + "(" + idx_str + \
" timestamp, op, phone_model, gps, cell_id, tai_id, dl_freq, ul_freq, dl_bw, ul_bw," \
"allowed_access, band_id) values(" + value_str + "\""+ str(timestamp) \
+ "\"," + "\"" + operator_info \
+ "\"," + "\"" + phone_info \
+ "\"," + "\"" + str(cur_location[0])+"|"+str(cur_location[1]) \
+ "\"," + "\"" + str(cell_id) \
+ "\"," + "\"" + str(tac) \
+ "\"," + "\"" + str(downlink_frequency) \
+ "\"," + "\"" + str(uplink_frequency) \
+ "\"," + "\"" + str(downlink_bandwidth) \
+ "\"," + "\"" + str(uplink_bandwidth) \
+ "\"," + "\"" + str(allowed_access) \
+ "\"," + "\"" + str(band_indicator) \
+ "\")"
# print(sql_cmd)
if is_android:
self.__db.execSQL(sql_cmd)
else:
self.__db.execute(sql_cmd)
self.__conn.commit()
self.__log_kpi(kpi_name, timestamp, cell_id, kpi_value)
return True
# except BaseException: # TODO: raise warnings
# return False
def __log_kpi(self, kpi_name, timestamp, cell_id, kpi_value):
"""
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param timestamp
:type timestamp: datetime
:param cell_id: updated kpi cell id
:type cell_id: string
"""
if kpi_name in self.__last_updated:
# if logging cell is specified, check whether cell id are the same
if not self.__logcell[kpi_name] or self.__logcell[kpi_name] and self.__logcell[kpi_name] == str(cell_id):
kpi_showname = kpi_name.replace('_', '.')
# if periodicity mode enabled, check whether time gap is longer enough
if not self.__last_updated[kpi_name] or (timestamp - self.__last_updated[kpi_name]).total_seconds() > self.__periodicity[kpi_name]:
self.__last_updated[kpi_name] = timestamp
if kpi_name.endswith('_LOSS') or kpi_name.endswith('_BLER'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(kpi_value) + '%')
elif kpi_name.endswith('_TPUT'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(kpi_value) + 'bps')
elif kpi_name.endswith('_LATENCY') or kpi_name.endswith('_HOL'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(kpi_value) + 'ms')
elif kpi_name.endswith('_PREDICTION'):
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=Triggered')
else:
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(self.local_query_kpi(kpi_name)))
# check the stats updated with instance value
if kpi_name.endswith('SUC') or kpi_name.endswith('FAILURE'):
kpi_name=kpi_name.replace('SUC', 'SR')
kpi_name=kpi_name.replace('FAILURE', 'SR')
if kpi_name in self.__last_updated:
if not self.__logcell[kpi_name] or self.__logcell[kpi_name] and self.__logcell[kpi_name] == str(cell_id):
kpi_showname = kpi_name.replace('_', '.')
if not self.__last_updated[kpi_name] or (timestamp - self.__last_updated[kpi_name]).total_seconds() > self.__periodicity[kpi_name]:
self.__last_updated[kpi_name] = timestamp
kpi_showname = kpi_name.replace('_', '.')
self.log_info(str(timestamp) + ': '+ str(kpi_showname) + '=' + str(self.local_query_kpi(kpi_name)))
def __upload_kpi_thread(self,e):
"""
Internal thread to upload the KPI
"""
while True:
if KpiAnalyzer.pending_upload_task:
while True:
activeNetworkInfo = ConnectivityManager.getActiveNetworkInfo()
if activeNetworkInfo and activeNetworkInfo.isConnected():
break
e.wait(1)
while KpiAnalyzer.pending_upload_task:
item = KpiAnalyzer.pending_upload_task.popleft()
# self.__upload_kpi_async(item[0],item[1])
while not self.__upload_kpi_async(item[0],item[1],item[2]):
e.wait(5)
e.wait(5)
def __upload_kpi_async(self,kpi_name, kpi_value, cur_location):
"""
Upload the KPI value to the cloud
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param kpi_value: The value of KPI
:type kpi_value: string
"""
self.log_debug("uploading kpi: "+kpi_name)
if is_android:
phone_info = self.__get_phone_model()
operator_info = self.__get_operator_info()
# cur_location = self.__get_current_gps()
cell_id = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_id()
cell_id = cell_id if cell_id else "None"
tac = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_cell_tac()
tac = tac if tac else "None"
downlink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_frequency()
downlink_frequency = downlink_frequency if downlink_frequency else ""
uplink_frequency = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_frequency()
uplink_frequency = uplink_frequency if uplink_frequency else ""
downlink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_downlink_bandwidth()
downlink_bandwidth = downlink_bandwidth if downlink_bandwidth else ""
uplink_bandwidth = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_uplink_bandwidth()
uplink_bandwidth = uplink_bandwidth if uplink_bandwidth else ""
allowed_access = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_allowed_access()
allowed_access = allowed_access if allowed_access else ""
band_indicator = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_band_indicator()
band_indicator = band_indicator if band_indicator else ""
#FIXME: How to handle the missing GPS location?
if not cur_location:
cur_location = "None"
for item in kpi_value:
if not kpi_value[item]:
kpi_value[item] = "None"
httpClient = None
try:
postdata = {'Phone_model': phone_info,
'operator': operator_info,
'GPS': str(cur_location[0])+"|"+str(cur_location[1]),
'Time': time.time(),
'Cell_ID': str(cell_id),
'TAI_ID' : str(tac),
'DL_Freq': str(downlink_frequency),
'UL_Freq': str(uplink_frequency),
'DL_Bandwidth': str(downlink_bandwidth),
'UL_Bandwidth': str(uplink_bandwidth),
'Allowed_access': str(allowed_access),
'Band indicator': str(band_indicator),
'KPI_type' : kpi_name,
'KPI_val': kpi_value,
}
# url = 'http://34.213.149.155/postdata/'
url = 'http://knowledge-map.xyz/postdata/'
# self.log_debug(str(postdata))
jdata = json.dumps(postdata)
req = urllib.request.Request(url, jdata)
response = urllib.request.urlopen(req)
self.log_debug("New KPI uploaded:" + kpi_name)
if httpClient:
httpClient.close()
return True
except Exception as e:
# import traceback
# self.log_error(str(traceback.format_exc()))
self.log_warning("Fail to upload the KPI: "+ kpi_name)
if httpClient:
httpClient.close()
return False
else:
self.log_info("New KPI (uploading skipped): "+kpi_name)
return True
def upload_kpi(self,kpi_name, kpi_value):
"""
Upload the KPI value to the cloud
:param kpi_name: The KPI to be queried
:type kpi_name: string
:param kpi_value: The value of KPI
:type kpi_value: string
"""
# self.log_info("New KPI: " + kpi_name)
cur_location = self.__get_current_gps()
KpiAnalyzer.pending_upload_task.append((kpi_name,kpi_value,cur_location))
def __get_phone_model(self):
if is_android:
#TODO: Optimization, avoid repetitive calls
res = mi2app_utils.get_phone_manufacturer()+"-"+mi2app_utils.get_phone_model()
# self.log_debug("Phone model: "+res)
return res
else:
return self.__phone_model
def __get_operator_info(self):
if is_android:
#TODO: Optimization, avoid repetitive calls
return mi2app_utils.get_operator_info()
else:
self.__op = self.get_analyzer('TrackCellInfoAnalyzer').get_cur_op()
return self.__op
def __get_current_gps(self):
if is_android:
location = mi2app_utils.get_current_location()
# self.log_debug("Current location: "+str(location))
return location
else:
return ""
def set_phone_model(self, phone_model):
"""
Set phone model
:param phone_model: string
:return:
"""
self.__phone_model = phone_model
def set_operator(self, operator):
"""
Set operator
:param operator: string
:return:
"""
self.__op = operator
``` |
{
"source": "jianjianGJ/RepulsionGNN",
"score": 2
} |
#### File: jianjianGJ/RepulsionGNN/main.py
```python
import os
import sys
import math
import time
import argparse
import torch
import numpy as np
from tqdm import tqdm
from parameters import set_args
from data_utils import load_data
from utils import compute_micro_f1, get_GNN, custom_loss_function, get_version, get_auxiliary, seed
#%% 日志格式设置,全局变量初始化
version = None
epsilon = 1 - math.log(2)
device = None
n_node_feats, n_classes = 0, 0
#%%
def train(model, adj_t, x, y, label_p, train_mask, optimizer, cm=None):
model.train()
optimizer.zero_grad()
out = model(x, adj_t, label_p, cm)
loss = custom_loss_function(out[train_mask], y[train_mask])
loss.backward()
optimizer.step()
return loss.detach()
@torch.no_grad()
def inference(model, adj_t, x, label_p, cm):
model.eval()
out = model(x, adj_t, label_p, cm)
return out
#%%
def run(args, adj_t, x, y, label_p, cm, train_mask, val_mask, n_running):
seed(n_running)
GNN = get_GNN(args.modelname)
model = GNN(
in_channels=n_node_feats,
out_channels=n_classes,
rsl = args.rsl,
**args.architecture
).to(device)
optimizer = torch.optim.Adam([
dict(params=model.reg_modules.parameters(), weight_decay=args.reg_weight_decay),
dict(params=model.nonreg_modules.parameters(), weight_decay=args.nonreg_weight_decay)
], lr=args.lr)
total_time = 0
best_val_acc = 0
best_out = None
for epoch in tqdm(range(1, args.epochs + 1), desc=f'running {n_running}', ncols=80):
tic = time.time()
train(model, adj_t, x, y, label_p, train_mask, optimizer, cm)
toc = time.time()
total_time += toc - tic
out = inference(model, adj_t, x, label_p, cm)
val_acc = compute_micro_f1(out, y, val_mask)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_out = out.detach().cpu()
per_epoch_time = total_time / args.epochs
return best_out, per_epoch_time
def main():
global device, n_node_feats, n_classes, epsilon
argparser = argparse.ArgumentParser("Test",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument("--gpu", type=int, default=0, help="GPU device ID.")
argparser.add_argument("--n-runs", type=int, help="running times", default=1)
argparser.add_argument("--epochs", type=int, help="number of epochs", default=500)
argparser.add_argument("--prepare", action='store_true', default=False)
argparser.add_argument("--basemodel", type=str, default='GCN')
argparser.add_argument("--dataset", type=str, default='arxiv')
argparser.add_argument("--modelname", type=str, default='GCN')
args = argparser.parse_args()
if args.prepare:
args.modelname = args.basemodel
set_args(args)
args.version = get_version(args)
#%%
if not os.path.exists(f'./datainfo-{args.basemodel}//'):
os.makedirs(f'./datainfo-{args.basemodel}/')
if not os.path.exists('./result/'):
os.makedirs('./result/')
#%%
device = f'cuda:{args.gpu}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
# load data
if args.modelname=='SAGE':
data, n_node_feats, n_classes = load_data(args.dataset,add_selfloop=True, norm=False, basemodel=args.basemodel)
else:
data, n_node_feats, n_classes = load_data(args.dataset, basemodel=args.basemodel)
adj_t, x, y, label_p, cm, train_masks, val_masks, test_masks = \
data.adj_t, data.x, data.y, data.label_p, data.cm, data.train_mask, data.val_mask, data.test_mask
adj_t, x, y, train_masks, val_masks, test_masks = adj_t.to(device), x.to(device), y.to(device), \
train_masks.to(device), val_masks.to(device), test_masks.to(device)
if args.rsl>0:
label_p, cm = (label_p[0].to(device),label_p[1].to(device)), cm.to(device)
best_outs = []
test_accs = []
per_epoch_times = []
for n_running in range(1, args.n_runs + 1):
if train_masks.dim()>1:
train_mask = train_masks[:,(n_running-1)%train_masks.shape[1]]
val_mask = val_masks[:,(n_running-1)%val_masks.shape[1]]
if train_masks.dim()==1:
train_mask = train_masks
val_mask = val_masks
if test_masks.dim()==1:
test_mask = test_masks
else:
test_mask = test_masks[:,(n_running-1)%test_masks.shape[1]]
best_out_logits, per_epoch_time = run(args, adj_t, x, y, label_p, cm, train_mask, val_mask, n_running)
best_out = best_out_logits.argmax(dim=-1)
if args.prepare:
info_path = f'./datainfo-{args.basemodel}/{args.dataset}'
if not os.path.exists(info_path):
os.mkdir(info_path)
label_p, cm = get_auxiliary(best_out_logits, n_classes)
torch.save(label_p,f'{info_path}/label.tensor')
torch.save(cm,f'{info_path}/cm.tensor')
args.version += ' prepare '
best_outs.append(best_out)
per_epoch_times.append(per_epoch_time)
acc=int(best_out[test_mask.cpu()].eq(y.cpu()[test_mask.cpu()]).sum()) / y.cpu()[test_mask.cpu()].size(0)
print(acc)
test_accs.append(acc)
with open(f'./result/{args.modelname}.txt','a') as f:
f.write(f"{args.version}: mean={np.mean(test_accs)*100:.2f} std={np.std(test_accs)*100:.2f} t={np.mean(per_epoch_times):.4f} \n")
if __name__ == "__main__":
print(' '.join(sys.argv))
main()
``` |
{
"source": "JianJiangKCL/class-incremental-learning-main",
"score": 3
} |
#### File: JianJiangKCL/class-incremental-learning-main/dataset.py
```python
import os
from typing import Any, Callable, Dict, IO, List, Optional, Tuple, Union
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.utils import download_url, download_and_extract_archive, extract_archive, \
verify_str_arg
from torchvision.datasets.mnist import read_image_file, read_label_file
import warnings
import torch
import random
def get_indices_balance(targets, class_name, max_imgs_per_class, is_random=False):
indices = []
cnts = {}
for cls in class_name:
cnts[cls] = 0
iterator = [i for i in range(len(targets))]
# if is_random:
# random.shuffle(iterator)
for i in iterator:
label = targets[i]
if label in class_name:
label = label.item()
if cnts[label] < max_imgs_per_class:
indices.append(i)
cnts[label] += 1
# used for selecting
# if this random is commented and is_random is false, then the output is fixed
# random.shuffle(indices)
# indices = indices[0:max_imgs]
return indices
class MyMNIST(VisionDataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
resources = [
("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c")
]
training_file = 'training.pt'
test_file = 'test.pt'
classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
'5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super(MyMNIST, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data = self.data.unsqueeze(3)
# around 1300 imgs for each class in Imagenet
# the least number of mnist is 5428 so to 5420
indices = get_indices_balance(self.targets, [i for i in range(10)], max_imgs_per_class=5420)
self.data = self.data[indices]
self.targets = self.targets[indices]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
@property
def raw_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self) -> Dict[str, int]:
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self) -> bool:
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self) -> None:
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self) -> str:
return "Split: {}".format("Train" if self.train is True else "Test")
``` |
{
"source": "jianjieluo/Hallucination",
"score": 2
} |
#### File: Hallucination/utils/im_consistency.py
```python
import sys
import json
import pickle as pkl
import pdb
import numpy as np
from nltk import word_tokenize
from pattern.en import singularize
import nltk
import argparse
from .misc import *
def get_label_dicts(robust=False):
if robust:
label_dict = 'output/image_classifier/classifier_output_robust.p'
else:
label_dict = 'output/image_classifier/classifier_output.p'
predicted_label_dict = pkl.load(open(label_dict, 'rb'))
gt_label_dict = pkl.load(open('data/gt_labels.p', 'rb'))
return predicted_label_dict, gt_label_dict
def get_im_consistency(hallucination_by_imid,
predicted_label_dict,
gt_label_dict):
total = 0.
scores = 0.
for i, imid in enumerate(hallucination_by_imid.keys()):
item = hallucination_by_imid[imid]
caption = item['caption']
caption_words = word_tokenize(caption.lower())
mscoco_words = [i[1] for i in item['mscoco_hallucinated_words']]
predicted_labels = predicted_label_dict[imid]['predicted_classes']
raw_output = predicted_label_dict[imid]['raw_output']
raw_output_sorted = np.argsort(raw_output)[::-1]
for mscoco_word in mscoco_words:
value = raw_output[gt_label_dict['cat_to_idx'][mscoco_word]]
scores += value
total += 1
return scores/total
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--annotation_path", type=str, default='coco/annotations')
parser.add_argument("--tag", type=str, default='td-fc_beam1_test')
parser.add_argument('--robust', dest='robust', action='store_true')
parser.set_defaults(robust=False)
args = parser.parse_args()
#read hallucination file
hallucinated_json = './output/hallucination/hallucinated_words_%s.json' %args.tag
hallucination_by_imid = hallucination_file_to_dict(hallucinated_json)
predicted_label_dict, gt_label_dict = get_label_dicts(args.robust)
consistency = get_im_consistency(hallucination_by_imid,
predicted_label_dict,
gt_label_dict)
print("Im consistency is: %0.04f" %consistency)
``` |
{
"source": "jianjieluo/OpenAI-CLIP-Feature",
"score": 2
} |
#### File: OpenAI-CLIP-Feature/visual_extractor/customized.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from PIL import Image
import torch.nn as nn
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from timm.models.vision_transformer import resize_pos_embed
from clip.clip import _convert_image_to_rgb
from .standard import CLIPRN101, CLIPViTB32
__all__ = ["CLIPRN101_448", "CLIPViTB32_448"]
transform = Compose([
Resize((448, 448), interpolation=Image.BICUBIC),
CenterCrop((448, 448)),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
])
class CLIPRN101_448(CLIPRN101):
def __init__(self, args, src_list, dst_list):
super(CLIPRN101_448, self).__init__(args, src_list, dst_list)
# larger resolution
self.transform = transform
# resize CNN visual.attnpool.positional_embedding for larger resolution
num_patches = 196
pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, self.model.visual.attnpool.positional_embedding.size(-1), device=self.device),)
resized_pos_embed_weight = resize_pos_embed(self.model.visual.attnpool.positional_embedding.unsqueeze(0), pos_embed)
pos_embed = nn.Parameter(resized_pos_embed_weight.squeeze(0),)
self.model.visual.attnpool.positional_embedding = pos_embed
# downsample feature map
self.pool2d = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
class CLIPViTB32_448(CLIPViTB32):
def __init__(self, args, src_list, dst_list):
super(CLIPViTB32_448, self).__init__(args, src_list, dst_list)
# larger resolution
self.transform = transform
# resize ViT visual.positional_embedding for larger resolution
num_patches = 196
pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, self.model.visual.positional_embedding.size(-1), device=self.device),)
resized_pos_embed_weight = resize_pos_embed(self.model.visual.positional_embedding.unsqueeze(0), pos_embed)
pos_embed = nn.Parameter(resized_pos_embed_weight.squeeze(0),)
self.model.visual.positional_embedding = pos_embed
# downsample feature map
self.pool2d = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
``` |
{
"source": "Jian-jobs/Jian-leetcode_python3",
"score": 4
} |
#### File: Jian-leetcode_python3/Solutions/001_Two Sum.py
```python
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dict = {}
for index in range(len(nums)):
if target - nums[index] not in dict:
dict[nums[index]] = index
else:
return [dict[target - nums[index]], index]
print(Solution().twoSum([2, 7, 11, 15], 9))
'''
enumerate() method: faster and less memory usage
enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,
同时列出数据和数据下标,一般用在 for 循环当中。
>>>seasons = ['Spring', 'Summer', 'Fall', 'Winter']
>>> list(enumerate(seasons))
[(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')]
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dict = {}
for index, value in enumerate(nums):
if target - value not in dict:
dict[value] = index
else:
return [dict[target - value], index]
print(Solution().twoSum([2, 7, 11, 15], 9))
'''
'''
思路:
[2, 7, 11, 15], target: 17
历遍index:
if target-nums[index] not in dict:
dict[nums[index]] = index
else:
return[dict[target - nums[index]], index]
----------
index = 0:
target-nums[index]=15
dict[2] = 0
index = 1:
target-nums[index]=10
dict[7] = 1
index = 2:
target-nums[index]=6
dict[11] = 2
index = 3:
target-nums[index]=2!!
return [dict[target - nums[index],index]
即[dict[2]的value 0, 3]
---------- print dictionary:
for k, v in dict.items():
print(k, '=>', v)
'''
```
#### File: Jian-leetcode_python3/Solutions/024_Swap Nodes in Pairs.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# iteration
class Solution:
def swapPairs(self, head):
'''
:param head: ListNode
:return: ListNode
'''
dummy = ListNode(0)
dummy.next = head
res = dummy
if not head or not head.next: return head
while res.next and res.next.next:
first = res.next
second = res.next.next
res.next = second
first.next = second.next
second.next = first
res = res.next.next
return dummy.next
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
l = Solution().swapPairs(l1)
print(l.val)
# recursion
class Solution:
def swapPairs(self, head):
'''
:param head: ListNode
:return: ListNode
'''
if not head or not head.next: return head
new_start = head.next.next
head, head.next = head.next, head
head.next.next = self.swapPairs(new_start)
return head
```
#### File: Jian-leetcode_python3/Solutions/162_ Find Peak Element.py
```python
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
lo, hi = 0, len(nums)-1
# handle condition 3
while lo < hi:
mid = lo + (hi - lo) // 2 #or (lo + hi) // 2
if nums[mid] > nums[mid + 1] and nums[mid] > nums[mid - 1]:
return mid
if nums[mid] < nums[mid + 1]:
lo = mid +1
else:
hi = mid - 1
# handle condition 1 and 2
return lo if nums[lo] >= nums[hi] else hi
# refernce:
# https://leetcode.com/problems/find-peak-element/discuss/50259/My-clean-and-readable-python-solution
```
#### File: Jian-leetcode_python3/Solutions/165_ Compare Version Numbers.py
```python
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
# 学学
version1 = [int(val) for val in version1.split(".")]
version2 = [int(val) for val in version2.split(".")]
if len(version1) > len(version2):
min_version = version2
max_version = version1
else:
min_version = version1
max_version = version2
# Compare up to min character
for i in range(len(min_version)):
if version1[i] > version2[i]:
return 1
elif version1[i] < version2[i]:
return -1
if len(version1) == len(version2):
return 0
for j in range(i + 1, len(max_version)):
if max_version[j] > 0:
return 1 if max_version == version1 else - 1
return 0
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
s1 = version1.split('.')
s2 = version2.split('.')
# Ailgning them
if len(s1) >= len(s2):
s2.extend('0' * (len(s1) - len(s2)))
else:
s1.extend('0' * (len(s2) - len(s1)))
c = [int(s1[i]) - int(s2[i]) for i in range(len(s1))]
for item in c:
if item < 0:
return -1
elif item > 0:
return 1
return 0
# refernece:
# https://leetcode.com/problems/compare-version-numbers/discuss/311157/Python-Easy-to-Understand-O(n)
# https://leetcode.com/problems/compare-version-numbers/discuss/51008/Concise-Python-code
``` |
{
"source": "jianjunwu/data_structure_and_algorithm",
"score": 4
} |
#### File: python/05_array/myarray.py
```python
class MyArray(object):
"""A simple wrapper around List.
You cannot have -1 in the array.
"""
def __init__(self, size: int):
self._data = []
self._size = size
def __getitem__(self, position: int) -> object:
return self._data[position]
def __setitem__(self, position: int, value: object):
self._data[position] = value
def __len__(self) -> int:
return len(self._data)
def __iter__(self):
for item in self._data:
yield item
def find(self, index: int) -> object:
try:
return self._data[index]
except IndexError:
return None
def insert(self, index: int, value: int) -> bool:
if len(self) >= self._size:
return False
else:
return self._data.insert(index, value)
def delete(self, index: int) -> bool:
try:
self._data.pop(index)
return True
except IndexError:
return False
def print_all(self):
for item in self:
print(item)
def test_myarray():
arr = MyArray(3)
arr.insert(0, 2)
arr.insert(0, 3)
arr.insert(1, 5)
assert arr.insert(0, 6) is False
assert len(arr) == 3
assert arr.find(1) == 5
assert arr.delete(3) is False
arr.print_all()
if __name__ == "__main__":
test_myarray()
``` |
{
"source": "jianjunz/oms-client-native",
"score": 2
} |
#### File: oms-client-native/scripts/prepare_dev.py
```python
import os
import shutil
import sys
import subprocess
HOME_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
PATCH_PATH = os.path.join(HOME_PATH, 'talk', 'owt', 'patches')
TESTING_PATH = os.path.join(HOME_PATH, 'testing')
THIRD_PARTY_PATH = os.path.join(HOME_PATH, 'third_party')
LIBSRTP_PATH = os.path.join(THIRD_PARTY_PATH, 'libsrtp')
WEBRTC_OVERRIDES_PATH = os.path.join(THIRD_PARTY_PATH, 'webrtc_overrides')
BUILD_PATH = os.path.join(HOME_PATH, 'build')
BASE_PATH = os.path.join(HOME_PATH, 'base')
platform = os.name
useShell = False
if(platform == "nt"):
useShell = True
def _patch():
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0001-Use-OpenSSL-for-usrsctp.patch')], shell=useShell, cwd=THIRD_PARTY_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=THIRD_PARTY_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0002-Use-OpenSSL-for-libsrtp.patch')], shell=useShell, cwd=LIBSRTP_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=LIBSRTP_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0003-Start-iOS-simulator-before-running-tests.patch')], shell=useShell, cwd=TESTING_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=TESTING_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0004-Remove-webrtc_overrides.patch')], shell=useShell, cwd=THIRD_PARTY_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=THIRD_PARTY_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0005-Fixed-compile-issue-and-disable-thin-archive.patch')], shell=useShell, cwd=BUILD_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BUILD_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0009-Fix-compile-issue-for-linux-g-build.patch')], shell=useShell, cwd=BUILD_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BUILD_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0006-Adjusted-jni_generator.py-to-fit-OWT-code-structure.patch')], shell=useShell, cwd=BASE_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BASE_PATH)
#if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0008-ios-Various-build-fixes-for-Xcode-10.patch')], shell=useShell, cwd=BUILD_PATH)) != 0:
# subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BUILD_PATH)
def main(argv):
_patch()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
``` |
{
"source": "Jiankai-Sun/Modular-Decision",
"score": 2
} |
#### File: common/vec_env/subproc_vec_env.py
```python
import numpy as np
from multiprocessing import Process, Pipe
from . import VecEnv, CloudpickleWrapper
import multiprocessing
def worker(remote, parent_remote, env_fn_wrapper, host, port, args):
parent_remote.close()
env = env_fn_wrapper.x(host, port, args)
env.wait_for_reset = False
# try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
# except KeyboardInterrupt:
# print('SubprocVecEnv worker: got KeyboardInterrupt')
# finally:
# env.close()
def worker_pool(remote, parent_remote, env_fn_wrapper, host, port, args):
parent_remote.close()
env = env_fn_wrapper.x(host, port, args)
env.wait_for_reset = False
# try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
# if done:
# ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'reset_ob':
env.wait_for_reset = True
elif cmd == 'reset_control':
ob = env.reset()
env.reset_ob = ob
env.wait_for_reset = False
elif cmd == 'wait_for_reset':
remote.send((env.wait_for_reset))
elif cmd == 'get_reset_ob':
remote.send((env.reset_ob))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, hosts=None, ports=None, argss=None):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn), host, port, args))
for (work_remote, remote, env_fn, host, port, args) in zip(self.work_remotes, self.remotes, env_fns, hosts, ports, argss)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
class SubprocVecEnvPool(VecEnv):
def __init__(self, env_fns, spaces=None, hosts=None, ports=None, argss=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker_pool, args=(work_remote, remote, CloudpickleWrapper(env_fn), host, port, args))
for (work_remote, remote, env_fn, host, port, args) in zip(self.work_remotes, self.remotes, env_fns, hosts, ports, argss)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
self.ts = np.zeros(len(env_fns), dtype='int')
self.actions = None
self.env_now = 0
self.reset_obs = [None, None, None]
def step_async(self, actions):
self.actions = actions
self._assert_not_closed()
e = self.env_now
action = actions[e]
remote = self.remotes[e]
remote.send(('step', action))
self.waiting = True
# async update step
def step_wait(self):
# @TODO: env pool, revise the runner structure or change the step counts
e = self.env_now
remote = self.remotes[e]
result = remote.recv()
ob, rew, done, info = result
self.ts[e] += 1
if done:
remote = self.remotes[e]
remote.send(('reset_ob', done))
remote.send(('reset_control', done))
self.ts[e] = 0
self.actions = None
return np.array([ob]), np.array([rew]), np.array([done]), [info]
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return np.array([remote.recv() for remote in self.remotes])
def is_reset(self, e):
remote = self.remotes[e]
remote.send(('wait_for_reset', None))
wait_for_reset = remote.recv()
return wait_for_reset
def get_reset_ob(self, e):
remote = self.remotes[e]
remote.send(('get_reset_ob', None))
ob = remote.recv()
return ob
def close(self):
return
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
```
#### File: baselines/gail/parallel_carla_ray.py
```python
import tensorflow as tf
import ray
import argparse
import os.path as osp
import logging
from mpi4py import MPI
from tqdm import tqdm
import numpy as np
import gym
import sys
sys.path.append("../../")
import glob, os
import pickle
baseline_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, baseline_dir)
# # version 0.9.4
# # add carla config here, put carla directory as the parent directory of IDM
# carla_simulator_path = '/home/SENSETIME/maqiurui/reinforce/carla/carla_0.9.4/PythonAPI/carla-0.9.4-py3.5-linux-x86_64.egg'
# try:
# sys.path.append(carla_simulator_path)
# sys.path.append(baseline_dir+'/../CARLA_0.9.4/PythonAPI/carla-0.9.4-py3.5-linux-x86_64.egg')
# sys.path.append(baseline_dir+'/../CARLA/PythonAPI/carla-0.9.4-py3.5-linux-x86_64.egg')
# except IndexError:
# pass
# version 0.9.5
# add carla config here, put carla directory as the parent directory of IDM
carla_simulator_path = '/home/SENSETIME/maqiurui/reinforce/carla/carla_0.9.5/PythonAPI/carla-0.9.5-py3.5-linux-x86_64.egg'
try:
sys.path.append(carla_simulator_path)
sys.path.append(baseline_dir+'/../CARLA_0.9.5/PythonAPI/carla-0.9.5-py3.5-linux-x86_64.egg')
sys.path.append(baseline_dir+'/../CARLA/PythonAPI/carla-0.9.5-py3.5-linux-x86_64.egg')
except IndexError:
pass
from baselines.gail.gail_control import Carla, CarlaDM, carla, World, RoadOption
from baselines.gail import mlp_policy
from baselines.common import set_global_seeds, tf_util as U
from baselines.common.misc_util import boolean_flag
from baselines import bench
from baselines import logger
from baselines.gail.dataset.mujoco_dset import Carla_Dset, Mujoco_Dset
from baselines.gail.adversary import TransitionClassifier
from baselines.logger import TensorBoardOutputFormat
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.vec_env.dummy_vec_env import DummyCarlaEnv
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.vec_env.vec_monitor import VecMonitor
from copy import copy
from baselines.common import explained_variance, zipsame, dataset, fmt_row
from contextlib import contextmanager
from baselines.common import colorize
import time
from baselines.common.cg import cg
from collections import deque
from tensorflow.contrib.tensorboard.plugins import projector
def argsparser():
parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL")
parser.add_argument('--sync', help='whether to sync server to client',action='store_true')
parser.add_argument('--env_id', help='environment ID', default='Carla-Motion')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--expert_path', type=str, default='../log/easy.pkl')
parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint')
parser.add_argument('--log_dir', help='the directory to save log file', default='log')
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
# Task
parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample', 'bc_evaluate', 'generate_data'], default='train')
# for evaluatation
#boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')
#boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not')
parser.add_argument('--stochastic_policy',action='store_true', help='use stochastic/deterministic policy to evaluate')
parser.add_argument('--save_sample',action='store_true',help='save the trajectories or not')
# Mujoco Dataset Configuration
parser.add_argument('--traj_limitation', type=int, default=10, help='useless in carla')
# Optimization Configuration
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=3)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1)
# Network Configuration (Using MLP Policy)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--policy_hidden_layer', type=int, default=2)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_layer', type=int, default=1)
parser.add_argument('--d_actv', type=str, default="tanh", help='Activation for discriminator, default is tanh')
parser.add_argument('--sn', action='store_false', help='Spectral normalization on Discriminator')
# Algorithms Configuration
parser.add_argument('--algo', type=str, choices=['trpo', 'ppo'], default='trpo')
parser.add_argument('--max_kl', type=float, default=0.01)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3)
parser.add_argument('--d_lrate', type=float, default=0.00001)
# Traing Configuration
parser.add_argument('--save_per_iter', help='save model every xx iterations', type=int, default=20)
parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=0)
# Behavior Cloning
#boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain')
parser.add_argument('--pretrained', action='store_true',help='Use BC to pretrain')
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=2e4)
# Carla settings
# parser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)')
parser.add_argument('--host',default='127.0.0.1',type=str, help='IP of the host server (default: 127.0.0.1)')
parser.add_argument('--port',default='2000',type=str,help='TCP port to listen to (default: 2000)')
parser.add_argument('--A_skip',default=1,type=int,help="skip frame number")
parser.add_argument('--D_skip',default=1,type=int,help="skip frame number")
parser.add_argument('--res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)')
parser.add_argument('--render', action='store_true', help='Render botton')
parser.add_argument('--birdview',action='store_true',help='use bird view')
parser.add_argument('--draw',action='store_true',help='draw waypoints in the hud')
parser.add_argument('--num_trajectories', metavar='T', default=100, type=int, help='num of trajectories')
parser.add_argument('--episode_length', metavar='EL', default=200, type=int, help='max length of a trajectory in rollout data')
parser.add_argument('--num_length', metavar='L', default=200, type=int, help='max length of a trajectory in expert data')
parser.add_argument('--batch_size', default=2048, type=int, help='timesteps per batch')
parser.add_argument('--search', action='store_true', help='search for nearest expert path for training')
parser.add_argument('--stack', default=1, type=int, help='stack frames')
parser.add_argument('--search_mode', default='traj', type=str, help='search mode, default is nearest trajectory')
parser.add_argument('--scene', default='all', type=str, choices=['all', 'straight', 'curve'], help='training scene')
parser.add_argument('--mode', default='wp_obj', type=str, choices=['all', 'wp', 'wp_obj'], help='visible ranges for observation')
parser.add_argument('--speed_mode', default='mix', type=str,
help='speed mode')
parser.add_argument('--train_mode', default='all', type=str, choices=['all', 'steer'], help='choose which action to train')
parser.add_argument('--feature', default='lane_car', type=str, choices=['wp_car', 'lane_car'], help='features')
parser.add_argument('--d_model', default='origin', type=str, choices=['origin', "separate"], help='model for discriminator')
parser.add_argument('--p_update', action='store_false', help='policy update')
parser.add_argument('--rew_type', action='store_true', help='true reward to update')
parser.add_argument('--detect', action='store_true', help='whether to detect right or wrong')
parser.add_argument('--dim', default='3d', type=str, choices=['2d', '3d'], help='observations dimension')
parser.add_argument('--region', default=0.2, type=float, help='region for early reset')
parser.add_argument('--resampling', default=0, type=int, choices=[0, 4, 8, 12], help='resampling for increasing observation diversity')
parser.add_argument('--gamma', default=0.995, type=float, help='gamma discounting factor')
parser.add_argument('--dis_max', default=1.3, type=float, help='longest distance for lane sampling')
parser.add_argument('--r_norm', action='store_true', help='reward normalization')
parser.add_argument('--init_std', default=1., type=float, help='initial std')
parser.add_argument('--carla_path', default='/data/carla_091_compiled/', type=str, help='relative path of the folder of carlaUE4.sh')
parser.add_argument('--spawn_mode', default='random', type=str, choices=['fixed', 'random'], help='spawn mode')
parser.add_argument('--pretrain_std',action='store_true',help='pretrain std')
parser.add_argument('--still_std',action='store_true',help='hold std still during train')
parser.add_argument('--start_v',default=6.4,type=float,help='start velocity')
parser.add_argument('--max_iters', default=1000,type=int,help='max iters')
parser.add_argument('--sigma_pos', default=0.4,type=float,help='sigma for track position reward, model as gaussian distribution')
parser.add_argument('--sigma_vel_upper', default=3.6,type=float,help='sigma for velocity reward')
parser.add_argument('--sigma_vel_lower', default=3.6,type=float,help='sigma for velocity reward')
parser.add_argument('--sigma_ang', default=0.4,type=float,help='sigma for angle reward')
parser.add_argument('--curriculumn_threshold', default=5,type=float,help='sigma for angle reward')
parser.add_argument('--other_cars',default=6,type=int,help='the number of other cars')
parser.add_argument('--model_output', default='DM', type=str, choices=['DM', 'CL'], help='model output, DM for the decision output, CL for the control output')
parser.add_argument('--excute_mode', default='short', type=str, choices=['short', 'long'], help='excute mode for the decision model')
parser.add_argument('--lanes', default=3, type=int, choices=[1, 3, 5, 7], help='total lane numbers')
parser.add_argument('--overtake_curriculum', default=0, type=int, help='overtake curriculum botton')
parser.add_argument('--flag', default='default', type=str, help='excute mode for the decision model')
parser.add_argument('--scenario', action='store_true', help='Whether to use the scenario runner')
parser.add_argument('--scenario_name', default='OtherLeadingVehicle', type=str, help='Scenarios')
parser.add_argument('--p_pos', default=0., type=float, choices=[0., 1.0, 0.1, 0.01, 0.001], help='Propotion parameter for the cte error in the PID controller')
parser.add_argument('--p_vel', default=1.0, type=float, choices=[1.0, 0.8, 0.6, 0.4, 0.2, 0.1, 0.05], help='Propotion parameter for the longitudinal control in the PID controller')
parser.add_argument('--actor_nums', default=1, type=int, choices=[1, 2, 4, 8, 16, 32, 64], help='Actor numbers')
parser.add_argument('--replay', action='store_true',
help='Replay the supervised data')
parser.add_argument('--checkkeys', default='ST_TL_KL40_TR_KMidlane30_KMidlane25', type=str, help='Initialization on keypoints')
parser.add_argument('--g_scratch_eps', default=0, type=int, help='The episode to start the curriculum training on keypoints')
parser.add_argument('--rule_based', action='store_true', help='Rule based decision module')
parser.add_argument('--update_D', action='store_false', help='Rule based decision module')
args = parser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
return args
def get_task_name(args):
import time
time_pre = time.strftime("%Y%m%d%H%M%S", time.localtime())
if args.rew_type:
task_name = time_pre+'_'+args.algo
else:
task_name = time_pre+'_'+args.algo+ "_GAIL."
if args.model_output == 'DM':
task_name = task_name+args.model_output+args.excute_mode+'L' +str(args.lanes)+'_'
elif args.model_output == 'CL':
task_name = task_name + args.model_output
if args.flag != 'default':
task_name += '%s_' % args.flag
task_name += args.checkkeys
task_name += '_'
task_name += 'CTE%sVel%s' % (str(args.p_pos), str(args.p_vel))
task_name += '%snenv' % len(args.host.split('_'))
task_name += '_%sSKIP' % str(args.D_skip)
task_name += '_%s' % args.scenario_name
task_name += '_CurSp%s' % args.g_scratch_eps
if args.r_norm:
task_name += 'Norm'
if args.pretrained:
task_name += "PreT%s." % args.BC_max_iter
if args.still_std:
task_name += "_still_std_"
if args.search:
task_name += "_Search%s_" % args.search_mode
if args.sn:
task_name += "SN_"
if not args.p_update:
task_name += 'NoP'
#task_name += 'RSig_%s_%s_%s_%s' % (args.sigma_pos, args.sigma_vel_upper, args.sigma_vel_lower, args.sigma_ang)
task_name += 'STD%s_'% args.init_std
#PLayer = args.policy_hidden_layer * 2 + 1
#task_name += 'D%s' % args.d_model
task_name += 'Reg%s_' % args.region
task_name += 'DLR%s_' % args.d_lrate
# task_name += 'MaxD%s_' % args.log_dis_max
# if args.search:
# task_name += 'Lim%s_' % args.traj_limitation
# task_name += 'GSize%s_' % args.policy_hidden_size
# task_name += 'PLay%s_' % PLayer
# task_name += 'DSize%s_' % args.adversary_hidden_size
#if args.d_model == "separate":
# adversary_hidden_layer = args.adversary_hidden_layer - 1
# DLayer = adversary_hidden_layer + 3
#else:
# adversary_hidden_layer = args.adversary_hidden_layer
# DLayer = adversary_hidden_layer + 2
#if args.resampling != 0:
# task_name += 'ReS_%s' % args.resampling
task_name += 'Batch%s_' % args.batch_size
task_name += '%s_' %args.scene
task_name += 'G%s'%args.gamma
task_name = task_name + ".G_" + str(args.g_step) + ".D_" + str(args.d_step) + \
".G_entcoeff_" + str(args.policy_entcoeff) + ".D_entcoeff_" + str(args.adversary_entcoeff) + \
".maxkl_" + str(args.max_kl)
task_name += ".seed_" + str(args.seed)
return task_name
def recover_args(args):
sigmas = {}
sigmas.update({'sigma_pos': args.sigma_pos})
sigmas.update({'sigma_vel_upper': args.sigma_vel_upper})
sigmas.update({'sigma_vel_lower': args.sigma_vel_lower})
sigmas.update({'sigma_ang': args.sigma_ang})
args.sigmas = sigmas
return args
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self, args, actor_id):
# ------------------------------------------------------------------------
# initializing env and arguments
U.make_session(num_cpu=1).__enter__()
set_global_seeds(args.seed+1000*actor_id)
args = recover_args(args)
self.actor_id = actor_id
self.host = host = args.hosts[actor_id]
port = args.ports[actor_id]
assert len(host)==len(port), 'number of hosts and ports should match'
logger.p_pos = args.p_pos
logger.p_vel = args.p_vel
logger.keypoints = {"ST": [0], "TL": [10], "KL40": [30], "TR": [40], "KMidlane30": [66], "KMidlane25": [100]}
logger.keyframes = [0, 10, 30, 40, 66, 100]
logger.task = args.task
logger.scenario = args.scenario
logger.scenario_name = args.scenario_name
logger.actor_id = actor_id
checkkeys = args.checkkeys.split("_")
logger.checkkeys = checkkeys
def make_env(host, port, args):
client = carla.Client(host, port)
client.set_timeout(600.0)
carla_world = client.get_world()
assert not(args.A_skip != 1 and args.D_skip != 1)
world = World(carla_world, args.sync, args.sigmas, camera=args.render, A_skip=args.A_skip, mode=args.mode, feature=args.feature,
dim=args.dim, dis_max=args.dis_max, spawn_mode=args.spawn_mode, render=args.render, width=args.width, height=args.height,
other_cars=args.other_cars, curriculumn_threshold=args.curriculumn_threshold, max_lanes=args.lanes,
scenario_name=args.scenario_name, client=client, host=host, port=port, scenario=args.scenario, checkkeys=checkkeys)
args.log_dis_max = int(args.dis_max**16)
if args.task == "evaluate":
test = True
else:
test = False
if args.model_output == 'DM':
env = CarlaDM(world, args.episode_length, args.stack, args.train_mode, test, args.region, start_v=args.start_v, excute_mode=args.excute_mode,
D_skip=args.D_skip, overtake_curriculum=args.overtake_curriculum, scenario_name=args.scenario_name, g_scratch_eps=args.g_scratch_eps,
rule_based=args.rule_based)
elif args.model_output == 'CL':
env = Carla(world, args.episode_length, args.stack, args.train_mode, test, args.region, start_v=args.start_v, scenario_name=args.scenario_name)
gym.logger.setLevel(logging.WARN)
return env
# @Junning: menv wrapper
envs = [make_env(host[i], port[i], args) for i in range(len(host))]
env = DummyCarlaEnv(envs)
# wrapper for stacking frames
env = VecFrameStack(env, args.stack)
env = VecMonitor(env)
class ENV(object):
def __init__(self, env):
self.observation_space = env.observation_space
self.action_space = env.action_space
args.env = ENV(env)
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
reuse=reuse, hid_size=args.policy_hidden_size, num_hid_layers=args.policy_hidden_layer)
args.policy_func = policy_fn
task_name = get_task_name(args)
args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name)
args.log_dir = osp.join(args.log_dir, task_name)
logger.init_std = args.init_std
datashape = env.observation_space.shape[0]
dataset = None
if not args.rew_type or args.pretrained: # Using TRPO
if args.env_id == "Carla-Motion":
dataset = Carla_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation, num_trajectories=args.num_trajectories, num_length=args.num_length, data_shape=datashape, search=args.search, mode=args.mode, feature=args.feature, train_mode=args.train_mode, dim=args.dim, resampling=args.resampling, episode_length=args.episode_length)
else:
dataset = Mujoco_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation)
logger.num_length = args.num_length
if dataset is not None:
logger.expert_acs = copy(dataset.acs_all[:args.num_length])
dis_name = "dis_%d" % actor_id
self.reward_giver = reward_giver = TransitionClassifier(env, args.adversary_hidden_size, entcoeff=args.adversary_entcoeff, adversary_hidden_layer=args.adversary_hidden_layer, sn=args.sn, d_actv=args.d_actv, lr_rate=args.d_lrate, model=args.d_model, train_mode=args.train_mode, dim=args.dim, scope=dis_name)
# ----------------------------------------------------------------------------
# initializing policy
ob_space = env.observation_space
ac_space = env.action_space
policy_func = policy_fn
pretrained_weight = None
pi_name = "pi_%d" % actor_id
self.pi_name = pi_name
self.pi = pi = policy_func(pi_name, ob_space, ac_space, reuse=(pretrained_weight != None))
# ----------------------------------------------------------------------------
# initializing model
from baselines.gail.trpo_runner import Model, Runner
if args.load_model_path is not None and args.task == 'evaluate' or args.task == 'generate_data':
stochastic = args.stochastic_policy
else:
stochastic = True
print("Stochastic Policy: ", stochastic)
model = Model(pi, reward_giver, env.num_envs, stochastic=stochastic)
# ----------------------------------------------------------------------------
# initializing runner
expert_dataset = dataset
args.expert_dataset = expert_dataset
if expert_dataset is not None:
self.seg_gen = seg_gen = Runner(env, model, args.batch_size, gamma=args.gamma, lam=0.97, length=expert_dataset.num_length, rew_type=args.rew_type, model_output=args.model_output)
else:
self.seg_gen = seg_gen = Runner(env, model, args.batch_size, gamma=args.gamma, lam=0.97, length=logger.num_length, rew_type=args.rew_type, model_output=args.model_output)
# ----------------------------------------------------------------------------
# initializing parameters-updated operator
self.params_pi = self.pi.get_variables()
self.params_dis = self.reward_giver.get_trainable_variables()
self.params_pi_placeholders = [tf.placeholder(shape=param.shape, dtype=param.dtype) for param in self.params_pi]
self.params_dis_placeholders = [tf.placeholder(shape=param.shape, dtype=param.dtype) for param in self.params_dis]
self.assign_params_pi = [tf.assign(param, param_new) for param, param_new in zip(pi.get_variables(), self.params_pi_placeholders)]
self.assign_params_dis = [tf.assign(param, param_new) for param, param_new in zip(reward_giver.get_trainable_variables(), self.params_dis_placeholders)]
args.wp_len = logger.wp_len
args.obj_len = logger.obj_len
args.road_len = logger.road_len
args.ctrl_len = logger.ctrl_len
args.egoa_idx = logger.egoa_idx
args.egov_idx = logger.egov_idx
args.zombiea_idx = logger.zombiea_idx
args.zombiev_idx = logger.zombiev_idx
args.zombiebx_idx = logger.zombiebx_idx
self.args = args
U.initialize()
def get_params(self):
return [self.pi.get_variables(), self.reward_giver.get_trainable_variables()]
def update_params(self, params):
# note that params should be numpy array
params_pi, params_dis = params[0], params[1]
try:
sess = tf.get_default_session()
sess.run(self.assign_params_pi, feed_dict={pholder: param_pi for pholder, param_pi in zip(self.params_pi_placeholders, params_pi)})
sess.run(self.assign_params_dis, feed_dict={pholder: param_dis for pholder, param_dis in zip(self.params_dis_placeholders, params_dis)})
return "Succeed"
except:
return "Failed"
def get_args(self):
return self.args
def get_batch(self):
@contextmanager
def timed(msg):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta'))
else:
yield
with timed("sampling"):
batch = self.seg_gen.run()
log_print = "================= %s =================" %(str(self.host))
logger.log(log_print)
return batch
from gym import spaces
from baselines.common.mpi_adam import MpiAdam
@ray.remote(num_gpus=1)
class Learner(object):
def __init__(self, args):
self.g_steps_now = 0
self.args = args
set_global_seeds(args.seed)
logger.keypoints = {"ST": [0], "TL": [10], "KL40": [30], "TR": [40], "KMidlane30": [66], "KMidlane25": [100]}
logger.keyframes = [0, 10, 30, 40, 66, 100]
logger.ckpt_dir_name = args.checkpoint_dir
logger.configure(args.checkpoint_dir, format_strs=['stdout', 'json', 'log', 'tensorboard'])
logger.log('Spawn mode: '+str(args.spawn_mode))
logger.log('Trajs: '+str(args.num_trajectories))
logger.log('Rollout data epsidoe Length: '+str(args.episode_length))
logger.log('Expert data epsidoe Length: '+str(args.num_length))
logger.log('Stack: '+str(args.stack))
logger.log('Mode: '+str(args.mode))
logger.log('Train mode: '+str(args.train_mode))
logger.log('Feature: '+str(args.feature))
logger.log('dim: '+str(args.dim))
logger.log('Pretrain_std: '+str(args.pretrain_std))
logger.log('Expected start velocity: '+str(args.start_v))
logger.log('Decision skip: '+str(args.D_skip))
logger.log('Action skip: '+str(args.A_skip))
logger.log('Overtake curriculum: '+ str(args.overtake_curriculum))
logger.wp_len = args.wp_len
logger.obj_len = args.obj_len
logger.road_len = args.road_len
logger.ctrl_len = args.ctrl_len
logger.egoa_idx = args.egoa_idx
logger.egov_idx = args.egov_idx
logger.zombiea_idx = args.zombiea_idx
logger.zombiev_idx = args.zombiev_idx
logger.zombiebx_idx = args.zombiebx_idx
logger.init_std = args.init_std
adversary_hidden_layer = args.adversary_hidden_layer
DLayer = adversary_hidden_layer + 2
logger.log('DLay: '+str(DLayer))
logger.log('DAtv: '+str(args.d_actv))
self.expert_dataset = args.expert_dataset
self.nworkers = MPI.COMM_WORLD.Get_size()
self.rank = rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
env = args.env
policy_func = args.policy_func
self.reward_giver = reward_giver = TransitionClassifier(env, args.adversary_hidden_size, entcoeff=args.adversary_entcoeff, adversary_hidden_layer=args.adversary_hidden_layer, sn=args.sn, d_actv=args.d_actv, lr_rate=args.d_lrate, model=args.d_model, train_mode=args.train_mode, dim=args.dim, scope="Learner")
ob_space = env.observation_space
ac_space = env.action_space
pretrained_weight = None
BC_max_iter = 10000
if self.args.pretrained and (BC_max_iter > 0):
# Pretrain with behavior cloning
from baselines.gail import behavior_clone
pretrained_weight = behavior_clone.learn(env, policy_func, self.expert_dataset,
max_iters=BC_max_iter, pretrain_std=self.args.pretrain_std)
self.pi = pi = policy_func("pi_learner", ob_space, ac_space, reuse=(pretrained_weight != None))
self.oldpi = oldpi = policy_func("oldpi_learner", ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = args.policy_entcoeff * meanent
vferr = tf.reduce_mean(tf.square(pi.vpred - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
if isinstance(env.action_space, spaces.MultiDiscrete):
losses = [optimgain, meankl, entbonus, surrgain, meanent]
self.loss_names = loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
elif isinstance(env.action_space, spaces.Box):
pi_mean = tf.reduce_mean(pi.pd.mean)
pi_std = tf.reduce_mean(pi.pd.std)
steer = tf.reduce_mean(pi.pd.mean[:, 0])
steer_std = tf.reduce_mean(pi.pd.std[:, 0])
if args.train_mode == "all":
throttle_brake = tf.reduce_mean(pi.pd.mean[:, 1])
throttle_brake_std = tf.reduce_mean(pi.pd.std[:, 1])
losses = [optimgain, meankl, entbonus, surrgain, meanent, pi_mean, pi_std, steer, throttle_brake, steer_std, throttle_brake_std]
self.loss_names = loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy", "pi_mean", "pi_std", "steer", "throttle_brake", "steer_std", "throttle_brake_std"]
elif args.train_mode == "steer":
losses = [optimgain, meankl, entbonus, surrgain, meanent, pi_mean, pi_std, steer]
self.loss_names = loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy", "pi_mean", "pi_std", "steer"]
dist = meankl
all_var_list = pi.get_trainable_variables()
if args.still_std:
var_list = [v for v in all_var_list if v.name.startswith("pi_learner/pol")]# or v.name.startswith("pi/logstd")]
else:
var_list = [v for v in all_var_list if v.name.startswith("pi_learner/pol") or v.name.startswith("pi_learner/logstd")]
vf_var_list = [v for v in all_var_list if v.name.startswith("pi_learner/vff")]
self.d_adam = d_adam = MpiAdam(reward_giver.get_trainable_variables())
self.vfadam = vfadam = MpiAdam(vf_var_list)
self.get_flat = get_flat = U.GetFlat(var_list)
self.set_from_flat = set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
self.assign_old_eq_new = assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
self.compute_losses = compute_losses = U.function([ob, ac, atarg], losses)
self.compute_lossandgrad = compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
self.compute_fvp = compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
self.compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
U.initialize()
# ----------------------------------------------------------------------------
# load model
if args.load_model_path is not None:
self.load_model()
# ----------------------------------------------------------------------------
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
d_adam.sync()
vfadam.sync()
if rank == 0:
print("Init param sum", th_init.sum(), flush=True)
self.saver = tf.train.Saver(max_to_keep=5000)
self.saver_best = tf.train.Saver()
model_init = os.path.join(self.args.checkpoint_dir+'/models/', 'model_init')
self.saver.save(tf.get_default_session(), model_init)
self.eptruerew_best = 0
self.episodes_so_far = 0
self.timesteps_so_far = 0
self.iters_so_far = 0
self.tstart = time.time()
self.lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
self.true_rewbuffer = deque(maxlen=100)
self.ep_vs_global = deque(maxlen=40)
self.ep_rets_global = deque(maxlen=40)
self.ep_true_rets_global = deque(maxlen=40)
def learn(self, batches, max_kl=0.01, cg_iters=10, cg_damping=0.1, vf_iters=5, vf_stepsize=1e-3, d_stepsize=3e-4):
@contextmanager
def timed(msg):
if self.rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= self.nworkers
return out
def fisher_vector_product(p):
return allmean(self.compute_fvp(p, *fvpargs)) + cg_damping * p
def outstr(v_x, v_y, a_x, a_y, steer, throttle):
return "Vx"+str(round(v_x, 1))+"_"+"Vy"+str(round(v_y, 1))+"_"+"Ax"+str(round(a_x, 1))+"_"+"Ay"+str(round(a_y, 1))+"_"+"St"+str(round(steer, 1))+"_"+"Th"+str(round(throttle, 1))
#for batch in batches:
seg = batches
logger.log("Optimizing Policy...")
for _ in range(1):
# reward normalization
# if r_norm:
# rew_norm.update(seg["rew"])
# seg["rew"] = (seg["rew"] - rew_norm.mean) / rew_norm.var
ob, current_pos, yaw, ac, atarg, tdlamret, done = seg["ob"], seg["current_pos"], seg["yaw"], seg["ac"], seg[
"adv"], seg["tdlamret"], seg["new"]
ob_start = ob[0]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(self.pi, "ob_rms"): self.pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
self.assign_old_eq_new() # set old parameter values to new parameter values
if self.args.p_update:
with timed("computegrad"):
*lossbefore, g = self.compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
g_policy = g
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=self.rank == 0)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = self.get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
self.set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(self.compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
self.set_from_flat(thbefore)
if self.nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=128):
if hasattr(self.pi, "ob_rms"):
self.pi.ob_rms.update(mbob) # update running mean/std for policy
g = allmean(self.compute_vflossandgrad(mbob, mbret))
self.vfadam.update(g, vf_stepsize)
g_vf = g
g_losses = meanlosses
for (lossname, lossval) in zip(self.loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
ep_v, ep_acc, ep_left_offset, ep_right_offset = seg["ep_v"], seg["ep_acc"], seg["ep_left_offset"], seg["ep_right_offset"]
self.g_steps_now = self.g_steps_now + 1
if self.expert_dataset is not None and self.g_steps_now == self.args.g_step:
logger.log("Optimizing Discriminator...")
logger.log(fmt_row(13, self.reward_giver.loss_name))
ob_expert, ac_expert = self.expert_dataset.get_next_batch(len(ob))
batch_size = len(ob) // self.args.d_step
d_losses = [] # list of tuples, each of which gives the loss for a minibatch
if self.args.model_output == 'DM':
ac = seg["ctrl"]
for ob_batch, ac_batch in dataset.iterbatches((ob, ac),
include_final_partial_batch=False,
batch_size=batch_size):
if not self.args.p_update:
with timed("just update discriminator"):
ob_expert, ac_expert, search_prop = self.expert_dataset.obs, self.expert_dataset.acs, 0
elif self.args.search:
with timed("searching batch"):
if search_mode == 'step':
ob_expert, ac_expert, search_prop = self.expert_dataset.search_batch_step(ob_batch, ac_batch)
elif search_mode == 'traj':
ob_expert, ac_expert, search_prop = self.expert_dataset.search_batch_traj(ob_start, batch_size, scene=self.args.scene)
else:
ob_expert, ac_expert = self.expert_dataset.get_next_batch(len(ob_batch), scene=self.args.scene)
# update running mean/std for reward_giver
if self.args.update_D:
if hasattr(self.reward_giver, "obs_rms"): self.reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
*newlosses, g = self.reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
g_d = g
self.d_adam.update(allmean(g), d_stepsize)
d_losses.append(newlosses)
if self.args.update_D:
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
d_losses_name = self.reward_giver.loss_name
d_losses_data = np.mean(d_losses, axis=0)
kvs = [{name: data} for name, data in zip(d_losses_name, d_losses_data)]
for kv in kvs:
for k, v in kv.items():
logger.record_tabular(k, v)
self.g_steps_now = 0
if self.g_steps_now == 0 or self.args.g_step==1:
lrlocal = (seg["ep_true_lens"], seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
true_lens, lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
self.true_rewbuffer.extend(true_rets)
self.lenbuffer.extend(lens)
self.rewbuffer.extend(rews)
# Save model
eptruerew_now = np.mean(true_rets)
ckpt_dir = self.args.checkpoint_dir
if self.rank == 0 and self.iters_so_far % self.args.save_per_iter == 0 and ckpt_dir is not None:
modelname = 'model%d.ckpt'%self.iters_so_far
fname = os.path.join(ckpt_dir+'/models/', modelname)
os.makedirs(os.path.dirname(fname), exist_ok=True)
self.saver.save(tf.get_default_session(), fname)
if self.rank == 0 and ckpt_dir is not None and eptruerew_now > self.eptruerew_best:
modelname = 'modelbest.ckpt'
fname = os.path.join(ckpt_dir+'/models', modelname)
os.makedirs(os.path.dirname(fname), exist_ok=True)
self.saver_best.save(tf.get_default_session(), fname)
self.eptruerew_best = eptruerew_now
eptruerew_last = eptruerew_now
logger.record_tabular("EpLenMean", np.mean(self.lenbuffer))
logger.record_tabular("EpLenMax", np.max(self.lenbuffer))
if self.expert_dataset is not None:
logger.record_tabular("EpRewMean", np.mean(self.rewbuffer))
logger.record_tabular("EpRewMax", np.max(self.rewbuffer))
logger.record_tabular("EpTrueRewMean", np.mean(self.true_rewbuffer))
logger.record_tabular("EpTrueRewMax", np.max(self.true_rewbuffer))
logger.record_tabular("EpThisIter", len(true_lens))
if self.args.p_update:
logger.record_tabular("EpVelocity", ep_v)
logger.record_tabular("EpAcc", ep_acc)
logger.record_tabular("EpLeftOffset", ep_left_offset)
logger.record_tabular("EpRightOffset", ep_right_offset)
corr_rew = np.corrcoef([seg["rew"], seg["truerew"]])[0][1]
ep_rets = [ret for ret in seg["ep_rets"]]
min_len = min(len(seg["v_ep"]), len(seg["ep_true_rets"]), len(ep_rets))
for i in range(min_len):
self.ep_vs_global.append(seg["v_ep"][i])
self.ep_rets_global.append(ep_rets[i])
self.ep_true_rets_global.append(seg["ep_true_rets"][i])
corr_eprew = np.corrcoef([self.ep_vs_global, self.ep_rets_global])[0][1]
corr_eptruerew = np.corrcoef([self.ep_vs_global, self.ep_true_rets_global])[0][1]
logger.record_tabular("CorrRew", corr_rew)
logger.record_tabular("CorrEpRew", corr_eprew)
logger.record_tabular("CorrEpTrueRew", corr_eptruerew)
self.episodes_so_far += len(true_lens)
self.timesteps_so_far += sum(true_lens)
self.iters_so_far += 1
logger.record_tabular("EpisodesSoFar", self.episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - self.tstart)
logger.log("")
# --------------------------------------------------------
# evaluate module
# --------------------------------------------------------
if not self.args.rew_type and self.args.scenario:
keypoints = logger.keypoints
v_x_expert_dict = dict();
v_y_expert_dict = dict();
a_x_expert_dict = dict();
a_y_expert_dict = dict()
v_x_generator_dict = dict();
v_y_generator_dict = dict();
a_x_generator_dict = dict();
a_y_generator_dict = dict()
v_x_relative_dict = dict();
v_y_relative_dict = dict();
a_x_relative_dict = dict();
a_y_relative_dict = dict()
v_x_expert_zombie_0_dict = dict();
v_y_expert_zombie_0_dict = dict();
a_x_expert_zombie_0_dict = dict();
a_y_expert_zombie_0_dict = dict()
v_x_generator_zombie_0_dict = dict();
v_y_generator_zombie_0_dict = dict();
a_x_generator_zombie_0_dict = dict();
a_y_generator_zombie_0_dict = dict()
v_x_relative_zombie_0_dict = dict();
v_y_relative_zombie_0_dict = dict();
a_x_relative_zombie_0_dict = dict();
a_y_relative_zombie_0_dict = dict()
v_x_expert_zombie_1_dict = dict();
v_y_expert_zombie_1_dict = dict();
a_x_expert_zombie_1_dict = dict();
a_y_expert_zombie_1_dict = dict()
v_x_generator_zombie_1_dict = dict();
v_y_generator_zombie_1_dict = dict();
a_x_generator_zombie_1_dict = dict();
a_y_generator_zombie_1_dict = dict()
v_x_relative_zombie_1_dict = dict();
v_y_relative_zombie_1_dict = dict();
a_x_relative_zombie_1_dict = dict();
a_y_relative_zombie_1_dict = dict()
box_p0_x_expert_zombie_0_dict = dict();
box_p0_y_expert_zombie_0_dict = dict();
box_p1_x_expert_zombie_0_dict = dict();
box_p1_y_expert_zombie_0_dict = dict();
box_p2_x_expert_zombie_0_dict = dict();
box_p2_y_expert_zombie_0_dict = dict();
box_p3_x_expert_zombie_0_dict = dict();
box_p3_y_expert_zombie_0_dict = dict()
box_p0_x_generator_zombie_0_dict = dict();
box_p0_y_generator_zombie_0_dict = dict();
box_p1_x_generator_zombie_0_dict = dict();
box_p1_y_generator_zombie_0_dict = dict();
box_p2_x_generator_zombie_0_dict = dict();
box_p2_y_generator_zombie_0_dict = dict();
box_p3_x_generator_zombie_0_dict = dict();
box_p3_y_generator_zombie_0_dict = dict()
box_p0_x_relative_zombie_0_dict = dict();
box_p0_y_relative_zombie_0_dict = dict();
box_p1_x_relative_zombie_0_dict = dict();
box_p1_y_relative_zombie_0_dict = dict();
box_p2_x_relative_zombie_0_dict = dict();
box_p2_y_relative_zombie_0_dict = dict();
box_p3_x_relative_zombie_0_dict = dict();
box_p3_y_relative_zombie_0_dict = dict()
box_p0_x_expert_zombie_1_dict = dict();
box_p0_y_expert_zombie_1_dict = dict();
box_p1_x_expert_zombie_1_dict = dict();
box_p1_y_expert_zombie_1_dict = dict();
box_p2_x_expert_zombie_1_dict = dict();
box_p2_y_expert_zombie_1_dict = dict();
box_p3_x_expert_zombie_1_dict = dict();
box_p3_y_expert_zombie_1_dict = dict()
box_p0_x_generator_zombie_1_dict = dict();
box_p0_y_generator_zombie_1_dict = dict();
box_p1_x_generator_zombie_1_dict = dict();
box_p1_y_generator_zombie_1_dict = dict();
box_p2_x_generator_zombie_1_dict = dict();
box_p2_y_generator_zombie_1_dict = dict();
box_p3_x_generator_zombie_1_dict = dict();
box_p3_y_generator_zombie_1_dict = dict()
box_p0_x_relative_zombie_1_dict = dict();
box_p0_y_relative_zombie_1_dict = dict();
box_p1_x_relative_zombie_1_dict = dict();
box_p1_y_relative_zombie_1_dict = dict();
box_p2_x_relative_zombie_1_dict = dict();
box_p2_y_relative_zombie_1_dict = dict();
box_p3_x_relative_zombie_1_dict = dict();
box_p3_y_relative_zombie_1_dict = dict()
abs_x_expert_dict = dict();
abs_y_expert_dict = dict();
abs_z_expert_dict = dict();
yaw_expert_dict = dict()
abs_x_generator_dict = dict();
abs_y_generator_dict = dict();
abs_z_generator_dict = dict();
yaw_generator_dict = dict()
abs_x_relative_dict = dict();
abs_y_relative_dict = dict();
abs_z_relative_dict = dict();
yaw_relative_dict = dict()
statistic_len = 40
for key in keypoints.keys():
stage = key
for keypoint in keypoints[stage]:
# saliency test
ob_expert_evaluate, ac_expert_evaluate, abspos_yaw_expert_evaluate = self.expert_dataset.obs_all[
keypoint].reshape(1, -1), \
self.expert_dataset.acs_all[
keypoint].reshape(1, -1), \
self.expert_dataset.obs_abspos_yaw_all[
keypoint].reshape(1, -1)
ob_generator_evaluate, ac_generator_evaluate = ob_batch[0].reshape(1, -1), ac_batch[0].reshape(1,
-1)
saliencies = self.reward_giver.get_saliency(ob_expert_evaluate, ac_expert_evaluate,
ob_generator_evaluate, ac_generator_evaluate)
# ob_batch.shape (256, 306)
for k, v in saliencies.items():
# logger.record_tabular(k, v)
logger.record_tabular(stage + "_" + str(keypoint) + "_" + k, v)
# counterfactual test
v_x, v_y = logger.egov_idx[0], logger.egov_idx[0]+1
a_x, a_y = logger.egoa_idx[0], logger.egoa_idx[0]+1
zombie_0_v_x, zombie_0_v_y, zombie_1_v_x, zombie_1_v_y = logger.zombiev_idx[0], logger.zombiev_idx[
0] + 1, logger.zombiev_idx[0] + 3, logger.zombiev_idx[0] + 4 # 3 x 6
zombie_0_a_x, zombie_0_a_y, zombie_1_a_x, zombie_1_a_y = logger.zombiea_idx[0], logger.zombiea_idx[
0] + 1, logger.zombiea_idx[0] + 3, logger.zombiea_idx[0] + 4 # 3 x 6
zombie_0_box_p0_x, zombie_0_box_p0_y, zombie_0_box_p0_z, zombie_0_box_p1_x, zombie_0_box_p1_y, zombie_0_box_p1_z, zombie_0_box_p2_x, zombie_0_box_p2_y, zombie_0_box_p2_z, zombie_0_box_p3_x, zombie_0_box_p3_y, zombie_0_box_p3_z = \
logger.zombiebx_idx[0], logger.zombiebx_idx[0] + 1, logger.zombiebx_idx[0] + 2, \
logger.zombiebx_idx[
0] + 3, logger.zombiebx_idx[0] + 4, logger.zombiebx_idx[0] + 5, logger.zombiebx_idx[0] + 6, \
logger.zombiebx_idx[0] + 7, logger.zombiebx_idx[0] + 8, logger.zombiebx_idx[0] + 9, \
logger.zombiebx_idx[0] + 10, logger.zombiebx_idx[0] + 11 # 12 x 6
zombie_1_box_p0_x, zombie_1_box_p0_y, zombie_1_box_p0_z, zombie_1_box_p1_x, zombie_1_box_p1_y, zombie_1_box_p1_z, zombie_1_box_p2_x, zombie_1_box_p2_y, zombie_1_box_p2_z, zombie_1_box_p3_x, zombie_1_box_p3_y, zombie_1_box_p3_z = \
logger.zombiebx_idx[0] + 12, logger.zombiebx_idx[0] + 13, logger.zombiebx_idx[0] + 14, \
logger.zombiebx_idx[0] + 15, logger.zombiebx_idx[0] + 16, logger.zombiebx_idx[0] + 17, \
logger.zombiebx_idx[0] + 18, logger.zombiebx_idx[0] + 19, logger.zombiebx_idx[0] + 20, \
logger.zombiebx_idx[0] + 21, logger.zombiebx_idx[0] + 22, logger.zombiebx_idx[0] + 23 # 12 x 6
# print('v_x: ', v_x, 'v_y: ', v_y, 'a_x: ', a_x, 'a_y: ', a_y, 'logger.zombiev_idx[0]: ', logger.zombiev_idx[0], logger.zombiev_idx[0]+4, 'logger.zombiea_idx[0]: ', logger.zombiea_idx[0], logger.zombiea_idx[0]+4,
# 'logger.zombiebx_idx[0]: ', logger.zombiebx_idx[0], logger.zombiebx_idx[0]+11, 'logger.zombiebx_idx[0]: ', logger.zombiebx_idx[0]+12, logger.zombiebx_idx[0]+23,)
# # abs_x_idx, abs_y_idx, abs_z_idx, yaw_idx = logger.zombiev_idx[1], logger.zombiev_idx[1]+1, logger.zombiev_idx[1]+2, logger.zombiev_idx[1]+3
all_dict = [v_x_expert_dict, v_y_expert_dict, a_x_expert_dict, a_y_expert_dict, v_x_generator_dict,
v_y_generator_dict, a_x_generator_dict, a_y_generator_dict, v_x_relative_dict,
v_y_relative_dict, a_x_relative_dict, a_y_relative_dict,
v_x_expert_zombie_0_dict, v_y_expert_zombie_0_dict, a_x_expert_zombie_0_dict,
a_y_expert_zombie_0_dict,
v_x_generator_zombie_0_dict, v_y_generator_zombie_0_dict, a_x_generator_zombie_0_dict,
a_y_generator_zombie_0_dict,
v_x_relative_zombie_0_dict, v_y_relative_zombie_0_dict, a_x_relative_zombie_0_dict,
a_y_relative_zombie_0_dict,
v_x_expert_zombie_1_dict, v_y_expert_zombie_1_dict, a_x_expert_zombie_1_dict,
a_y_expert_zombie_1_dict,
v_x_generator_zombie_1_dict, v_y_generator_zombie_1_dict, a_x_generator_zombie_1_dict,
a_y_generator_zombie_1_dict,
v_x_relative_zombie_1_dict, v_y_relative_zombie_1_dict, a_x_relative_zombie_1_dict,
a_y_relative_zombie_1_dict,
box_p0_x_expert_zombie_0_dict, box_p0_y_expert_zombie_0_dict,
box_p1_x_expert_zombie_0_dict, box_p1_y_expert_zombie_0_dict,
box_p2_x_expert_zombie_0_dict, box_p2_y_expert_zombie_0_dict,
box_p3_x_expert_zombie_0_dict, box_p3_y_expert_zombie_0_dict,
box_p0_x_generator_zombie_0_dict, box_p0_y_generator_zombie_0_dict,
box_p1_x_generator_zombie_0_dict, box_p1_y_generator_zombie_0_dict,
box_p2_x_generator_zombie_0_dict, box_p2_y_generator_zombie_0_dict,
box_p3_x_generator_zombie_0_dict, box_p3_y_generator_zombie_0_dict,
box_p0_x_relative_zombie_0_dict, box_p0_y_relative_zombie_0_dict,
box_p1_x_relative_zombie_0_dict, box_p1_y_relative_zombie_0_dict,
box_p2_x_relative_zombie_0_dict, box_p2_y_relative_zombie_0_dict,
box_p3_x_relative_zombie_0_dict, box_p3_y_relative_zombie_0_dict,
box_p0_x_expert_zombie_1_dict, box_p0_y_expert_zombie_1_dict,
box_p1_x_expert_zombie_1_dict, box_p1_y_expert_zombie_1_dict,
box_p2_x_expert_zombie_1_dict,
box_p2_y_expert_zombie_1_dict, box_p3_x_expert_zombie_1_dict,
box_p3_y_expert_zombie_1_dict,
box_p0_x_generator_zombie_1_dict, box_p0_y_generator_zombie_1_dict,
box_p1_x_generator_zombie_1_dict, box_p1_y_generator_zombie_1_dict,
box_p2_x_generator_zombie_1_dict, box_p2_y_generator_zombie_1_dict,
box_p3_x_generator_zombie_1_dict, box_p3_y_generator_zombie_1_dict,
box_p0_x_relative_zombie_1_dict, box_p0_y_relative_zombie_1_dict,
box_p1_x_relative_zombie_1_dict, box_p1_y_relative_zombie_1_dict,
box_p2_x_relative_zombie_1_dict, box_p2_y_relative_zombie_1_dict,
box_p3_x_relative_zombie_1_dict, box_p3_y_relative_zombie_1_dict,
abs_x_expert_dict, abs_y_expert_dict, abs_z_expert_dict, yaw_expert_dict,
abs_x_generator_dict, abs_y_generator_dict, abs_z_generator_dict, yaw_generator_dict,
abs_x_relative_dict, abs_y_relative_dict, abs_z_relative_dict, yaw_relative_dict,
]
for each_dict in all_dict:
if keypoint not in each_dict:
each_dict[keypoint] = list()
v_x_expert, v_y_expert, a_x_expert, a_y_expert = ob_expert_evaluate[0, v_x], ob_expert_evaluate[
0, v_y], \
ob_expert_evaluate[0, a_x], ob_expert_evaluate[
0, a_y]
# print('abspos_yaw_expert_evaluate: ', abspos_yaw_expert_evaluate)
v_x_expert_zombie_0, v_y_expert_zombie_0, a_x_expert_zombie_0, a_y_expert_zombie_0 = \
ob_expert_evaluate[0, zombie_0_v_x], ob_expert_evaluate[0, zombie_0_v_y], ob_expert_evaluate[0,
zombie_0_a_x], \
ob_expert_evaluate[0, zombie_0_a_y]
# print('zombie_1_v_x: ', zombie_1_v_x, 'zombie_1_v_y: ', zombie_1_v_y, 'zombie_1_a_x: ', zombie_1_a_x, 'zombie_1_a_y: ', zombie_1_a_y)
v_x_expert_zombie_1, v_y_expert_zombie_1, a_x_expert_zombie_1, a_y_expert_zombie_1 = \
ob_expert_evaluate[0, zombie_1_v_x], ob_expert_evaluate[0, zombie_1_v_y], ob_expert_evaluate[0,
zombie_1_a_x], \
ob_expert_evaluate[0, zombie_1_a_y]
box_p0_x_expert_zombie_0, box_p0_y_expert_zombie_0, box_p1_x_expert_zombie_0, box_p1_y_expert_zombie_0, box_p2_x_expert_zombie_0, box_p2_y_expert_zombie_0, box_p3_x_expert_zombie_0, box_p3_y_expert_zombie_0 = \
ob_expert_evaluate[0, zombie_0_box_p0_x], ob_expert_evaluate[
0, zombie_0_box_p0_y], ob_expert_evaluate[0,
zombie_0_box_p1_x], ob_expert_evaluate[
0, zombie_0_box_p1_y], ob_expert_evaluate[0,
zombie_0_box_p2_x], ob_expert_evaluate[
0, zombie_0_box_p2_y], ob_expert_evaluate[0,
zombie_0_box_p3_x], ob_expert_evaluate[
0, zombie_0_box_p3_y]
box_p0_x_expert_zombie_1, box_p0_y_expert_zombie_1, box_p1_x_expert_zombie_1, box_p1_y_expert_zombie_1, box_p2_x_expert_zombie_1, box_p2_y_expert_zombie_1, box_p3_x_expert_zombie_1, box_p3_y_expert_zombie_1 = \
ob_expert_evaluate[0, zombie_1_box_p0_x], ob_expert_evaluate[
0, zombie_1_box_p0_y], ob_expert_evaluate[0,
zombie_1_box_p1_x], ob_expert_evaluate[
0, zombie_1_box_p1_y], ob_expert_evaluate[0,
zombie_1_box_p2_x], ob_expert_evaluate[
0, zombie_1_box_p2_y], ob_expert_evaluate[0,
zombie_1_box_p3_x], ob_expert_evaluate[
0, zombie_1_box_p3_y]
abs_x_expert, abs_y_expert, abs_z_expert, yaw_expert = abspos_yaw_expert_evaluate[0, 0], \
abspos_yaw_expert_evaluate[0, 1], \
abspos_yaw_expert_evaluate[0, 2], \
abspos_yaw_expert_evaluate[0, 3]
for i in range(len(ob)):
abs_x_generator, abs_y_generator, abs_z_generator, yaw_generator = current_pos[i][0], \
current_pos[i][1], \
current_pos[i][2], \
yaw[i]
if abs(abs_x_generator - abs_x_expert) < 0.5 and abs(abs_y_generator - abs_y_expert) < 0.5:
# print('abs_x_generator - abs_x_expert: ', abs_x_generator - abs_x_expert,
# 'abs_y_generator - abs_y_expert: ', abs_y_generator - abs_y_expert)
#
# print('abs_x_generator, abs_y_generator, abs_z_generator, yaw_generator: ',
# abs_x_generator, abs_y_generator, abs_z_generator, yaw_generator, yaw_generator.item)
# print('abs_x_expert, abs_y_expert, abs_z_expert, yaw_expert: ',
# abs_x_expert, abs_y_expert, abs_z_expert, yaw_expert)
v_x_generator, v_y_generator, a_x_generator, a_y_generator = ob[i][v_x], ob[i][v_y], ob[i][
a_x], ob[i][a_y]
v_x_generator_zombie_0, v_y_generator_zombie_0, a_x_generator_zombie_0, a_y_generator_zombie_0 = \
ob[i][zombie_0_v_x], ob[i][zombie_0_v_y], ob[i][zombie_0_a_x], ob[i][zombie_0_a_y]
v_x_generator_zombie_1, v_y_generator_zombie_1, a_x_generator_zombie_1, a_y_generator_zombie_1 = \
ob[i][zombie_1_v_x], ob[i][zombie_1_v_y], ob[i][zombie_1_a_x], ob[i][zombie_1_a_y]
box_p0_x_generator_zombie_0, box_p0_y_generator_zombie_0, box_p1_x_generator_zombie_0, box_p1_y_generator_zombie_0, box_p2_x_generator_zombie_0, box_p2_y_generator_zombie_0, box_p3_x_generator_zombie_0, box_p3_y_generator_zombie_0 = \
ob[i][zombie_0_box_p0_x], ob[i][zombie_0_box_p0_y], ob[i][zombie_0_box_p1_x], ob[i][
zombie_0_box_p1_y], ob[i][zombie_0_box_p2_x], ob[i][zombie_0_box_p2_y], ob[i][
zombie_0_box_p3_x], ob[i][zombie_0_box_p3_y]
box_p0_x_generator_zombie_1, box_p0_y_generator_zombie_1, box_p1_x_generator_zombie_1, box_p1_y_generator_zombie_1, box_p2_x_generator_zombie_1, box_p2_y_generator_zombie_1, box_p3_x_generator_zombie_1, box_p3_y_generator_zombie_1 = \
ob[i][zombie_1_box_p0_x], ob[i][zombie_1_box_p0_y], ob[i][zombie_1_box_p1_x], ob[i][
zombie_1_box_p1_y], ob[i][zombie_1_box_p2_x], ob[i][zombie_1_box_p2_y], ob[i][
zombie_1_box_p3_x], ob[i][zombie_1_box_p3_y]
# print('v_x_generator, v_y_generator, a_x_generator, a_y_generator: ', v_x_generator, v_y_generator, a_x_generator, a_y_generator)
# print('v_x_generator_zombie_0, v_y_generator_zombie_0, a_x_generator_zombie_0, a_y_generator_zombie_0: ', v_x_generator_zombie_0, v_y_generator_zombie_0, a_x_generator_zombie_0, a_y_generator_zombie_0)
# print('v_x_generator_zombie_1, v_y_generator_zombie_1, a_x_generator_zombie_1, a_y_generator_zombie_1: ', v_x_generator_zombie_1, v_y_generator_zombie_1, a_x_generator_zombie_1, a_y_generator_zombie_1)
# print('box_p0_x_generator_zombie_0, box_p0_y_generator_zombie_0, box_p1_x_generator_zombie_0, box_p1_y_generator_zombie_0, box_p2_x_generator_zombie_0, box_p2_y_generator_zombie_0, box_p3_x_generator_zombie_0, box_p3_y_generator_zombie_0: ',
# box_p0_x_generator_zombie_0, box_p0_y_generator_zombie_0, box_p1_x_generator_zombie_0, box_p1_y_generator_zombie_0, box_p2_x_generator_zombie_0, box_p2_y_generator_zombie_0, box_p3_x_generator_zombie_0, box_p3_y_generator_zombie_0)
# print('box_p0_x_generator_zombie_1, box_p0_y_generator_zombie_1, box_p1_x_generator_zombie_1, box_p1_y_generator_zombie_1, box_p2_x_generator_zombie_1, box_p2_y_generator_zombie_1, box_p3_x_generator_zombie_1, box_p3_y_generator_zombie_1: ',
# box_p0_x_generator_zombie_1, box_p0_y_generator_zombie_1, box_p1_x_generator_zombie_1, box_p1_y_generator_zombie_1, box_p2_x_generator_zombie_1, box_p2_y_generator_zombie_1, box_p3_x_generator_zombie_1, box_p3_y_generator_zombie_1)
v_x_expert_dict[keypoint].append(v_x_expert)
v_y_expert_dict[keypoint].append(v_y_expert);
a_x_expert_dict[keypoint].append(a_x_expert);
a_y_expert_dict[keypoint].append(a_y_expert)
v_x_generator_dict[keypoint].append(v_x_generator);
v_y_generator_dict[keypoint].append(v_y_generator);
a_x_generator_dict[keypoint].append(a_x_generator);
a_y_generator_dict[keypoint].append(a_y_generator)
v_x_relative_dict[keypoint].append(v_x_expert - v_x_generator);
v_y_relative_dict[keypoint].append(v_y_expert - v_y_generator);
a_x_relative_dict[keypoint].append(a_x_expert - a_x_generator);
a_y_relative_dict[keypoint].append(a_y_expert - a_y_generator)
v_x_expert_zombie_0_dict[keypoint].append(v_x_expert_zombie_0);
v_y_expert_zombie_0_dict[keypoint].append(v_y_expert_zombie_0);
a_x_expert_zombie_0_dict[keypoint].append(a_x_expert_zombie_0);
a_y_expert_zombie_0_dict[keypoint].append(a_y_expert_zombie_0)
v_x_generator_zombie_0_dict[keypoint].append(v_x_generator_zombie_0);
v_y_generator_zombie_0_dict[keypoint].append(v_y_generator_zombie_0);
a_x_generator_zombie_0_dict[keypoint].append(a_x_generator_zombie_0);
a_y_generator_zombie_0_dict[keypoint].append(a_y_generator_zombie_0)
v_x_relative_zombie_0_dict[keypoint].append(v_x_expert_zombie_0 - v_x_generator_zombie_0);
v_y_relative_zombie_0_dict[keypoint].append(v_y_expert_zombie_0 - v_y_generator_zombie_0);
a_x_relative_zombie_0_dict[keypoint].append(a_x_expert_zombie_0 - a_x_generator_zombie_0);
a_y_relative_zombie_0_dict[keypoint].append(a_y_expert_zombie_0 - a_y_generator_zombie_0)
v_x_expert_zombie_1_dict[keypoint].append(v_x_expert_zombie_1);
v_y_expert_zombie_1_dict[keypoint].append(v_y_expert_zombie_1);
a_x_expert_zombie_1_dict[keypoint].append(a_x_expert_zombie_1);
a_y_expert_zombie_1_dict[keypoint].append(a_y_expert_zombie_1)
v_x_generator_zombie_1_dict[keypoint].append(v_x_generator_zombie_1);
v_y_generator_zombie_1_dict[keypoint].append(v_y_generator_zombie_1);
a_x_generator_zombie_1_dict[keypoint].append(a_x_generator_zombie_1);
a_y_generator_zombie_1_dict[keypoint].append(a_y_generator_zombie_1)
v_x_relative_zombie_1_dict[keypoint].append(v_x_expert_zombie_1 - v_x_generator_zombie_1);
v_y_relative_zombie_1_dict[keypoint].append(v_y_expert_zombie_1 - v_y_generator_zombie_1);
a_x_relative_zombie_1_dict[keypoint].append(a_x_expert_zombie_1 - a_x_generator_zombie_1);
a_y_relative_zombie_1_dict[keypoint].append(a_y_expert_zombie_1 - a_y_generator_zombie_1)
box_p0_x_expert_zombie_0_dict[keypoint].append(box_p0_x_expert_zombie_0);
box_p0_y_expert_zombie_0_dict[keypoint].append(box_p0_y_expert_zombie_0);
box_p1_x_expert_zombie_0_dict[keypoint].append(box_p1_x_expert_zombie_0);
box_p1_y_expert_zombie_0_dict[keypoint].append(box_p1_y_expert_zombie_0);
box_p2_x_expert_zombie_0_dict[keypoint].append(box_p2_x_expert_zombie_0);
box_p2_y_expert_zombie_0_dict[keypoint].append(box_p2_y_expert_zombie_0);
box_p3_x_expert_zombie_0_dict[keypoint].append(box_p3_x_expert_zombie_0);
box_p3_y_expert_zombie_0_dict[keypoint].append(box_p3_y_expert_zombie_0)
box_p0_x_generator_zombie_0_dict[keypoint].append(box_p0_x_generator_zombie_0);
box_p0_y_generator_zombie_0_dict[keypoint].append(box_p0_y_generator_zombie_0);
box_p1_x_generator_zombie_0_dict[keypoint].append(box_p1_x_generator_zombie_0);
box_p1_y_generator_zombie_0_dict[keypoint].append(box_p1_y_generator_zombie_0);
box_p2_x_generator_zombie_0_dict[keypoint].append(box_p2_x_generator_zombie_0);
box_p2_y_generator_zombie_0_dict[keypoint].append(box_p2_y_generator_zombie_0);
box_p3_x_generator_zombie_0_dict[keypoint].append(box_p3_x_generator_zombie_0);
box_p3_y_generator_zombie_0_dict[keypoint].append(box_p3_y_generator_zombie_0)
box_p0_x_relative_zombie_0_dict[keypoint].append(
box_p0_x_expert_zombie_0 - box_p0_x_generator_zombie_0);
box_p0_y_relative_zombie_0_dict[keypoint].append(
box_p0_y_expert_zombie_0 - box_p0_y_generator_zombie_0);
box_p1_x_relative_zombie_0_dict[keypoint].append(
box_p1_x_expert_zombie_0 - box_p1_x_generator_zombie_0);
box_p1_y_relative_zombie_0_dict[keypoint].append(
box_p1_y_expert_zombie_0 - box_p1_y_generator_zombie_0);
box_p2_x_relative_zombie_0_dict[keypoint].append(
box_p2_x_expert_zombie_0 - box_p2_x_generator_zombie_0);
box_p2_y_relative_zombie_0_dict[keypoint].append(
box_p2_y_expert_zombie_0 - box_p2_y_generator_zombie_0);
box_p3_x_relative_zombie_0_dict[keypoint].append(
box_p3_x_expert_zombie_0 - box_p3_x_generator_zombie_0);
box_p3_y_relative_zombie_0_dict[keypoint].append(
box_p3_y_expert_zombie_0 - box_p3_y_generator_zombie_0)
box_p0_x_expert_zombie_1_dict[keypoint].append(box_p0_x_expert_zombie_1);
box_p0_y_expert_zombie_1_dict[keypoint].append(box_p0_y_expert_zombie_1);
box_p1_x_expert_zombie_1_dict[keypoint].append(box_p1_x_expert_zombie_1);
box_p1_y_expert_zombie_1_dict[keypoint].append(box_p1_y_expert_zombie_1);
box_p2_x_expert_zombie_1_dict[keypoint].append(box_p2_x_expert_zombie_1);
box_p2_y_expert_zombie_1_dict[keypoint].append(box_p2_y_expert_zombie_1);
box_p3_x_expert_zombie_1_dict[keypoint].append(box_p3_x_expert_zombie_1);
box_p3_y_expert_zombie_1_dict[keypoint].append(box_p3_y_expert_zombie_1)
box_p0_x_generator_zombie_1_dict[keypoint].append(box_p0_x_generator_zombie_1);
box_p0_y_generator_zombie_1_dict[keypoint].append(box_p0_y_generator_zombie_1);
box_p1_x_generator_zombie_1_dict[keypoint].append(box_p1_x_generator_zombie_1);
box_p1_y_generator_zombie_1_dict[keypoint].append(box_p1_y_generator_zombie_1);
box_p2_x_generator_zombie_1_dict[keypoint].append(box_p2_x_generator_zombie_1);
box_p2_y_generator_zombie_1_dict[keypoint].append(box_p2_y_generator_zombie_1);
box_p3_x_generator_zombie_1_dict[keypoint].append(box_p3_x_generator_zombie_1);
box_p3_y_generator_zombie_1_dict[keypoint].append(box_p3_y_generator_zombie_1)
box_p0_x_relative_zombie_1_dict[keypoint].append(
box_p0_x_expert_zombie_1 - box_p0_x_generator_zombie_1);
box_p0_y_relative_zombie_1_dict[keypoint].append(
box_p0_y_expert_zombie_1 - box_p0_y_generator_zombie_1);
box_p1_x_relative_zombie_1_dict[keypoint].append(
box_p1_x_expert_zombie_1 - box_p1_x_generator_zombie_1);
box_p1_y_relative_zombie_1_dict[keypoint].append(
box_p1_y_expert_zombie_1 - box_p1_y_generator_zombie_1);
box_p2_x_relative_zombie_1_dict[keypoint].append(
box_p2_x_expert_zombie_1 - box_p2_x_generator_zombie_1);
box_p2_y_relative_zombie_1_dict[keypoint].append(
box_p2_y_expert_zombie_1 - box_p2_y_generator_zombie_1);
box_p3_x_relative_zombie_1_dict[keypoint].append(
box_p3_x_expert_zombie_1 - box_p3_x_generator_zombie_1);
box_p3_y_relative_zombie_1_dict[keypoint].append(
box_p3_y_expert_zombie_1 - box_p3_y_generator_zombie_1)
abs_x_expert_dict[keypoint].append(abs_x_expert)
abs_y_expert_dict[keypoint].append(abs_y_expert);
abs_z_expert_dict[keypoint].append(abs_z_expert);
yaw_expert_dict[keypoint].append(yaw_expert)
abs_x_generator_dict[keypoint].append(abs_x_generator)
abs_y_generator_dict[keypoint].append(abs_y_generator);
abs_z_generator_dict[keypoint].append(abs_z_generator);
yaw_generator_dict[keypoint].append(yaw_generator)
abs_x_relative_dict[keypoint].append(abs_x_expert - abs_x_generator);
abs_y_relative_dict[keypoint].append(abs_y_expert - abs_y_generator);
abs_z_relative_dict[keypoint].append(abs_z_expert - abs_z_generator);
yaw_relative_dict[keypoint].append(yaw_expert - yaw_generator)
if len(v_x_relative_dict[keypoint]) > statistic_len:
v_x_relative_dict[keypoint].pop(0)
if len(v_y_relative_dict[keypoint]) > statistic_len:
v_y_relative_dict[keypoint].pop(0)
if len(a_x_relative_dict[keypoint]) > statistic_len:
a_x_relative_dict[keypoint].pop(0)
if len(a_y_relative_dict[keypoint]) > statistic_len:
a_y_relative_dict[keypoint].pop(0)
if len(v_x_relative_zombie_0_dict[keypoint]) > statistic_len:
v_x_relative_zombie_0_dict[keypoint].pop(0)
if len(v_y_relative_zombie_0_dict[keypoint]) > statistic_len:
v_y_relative_zombie_0_dict[keypoint].pop(0)
if len(a_x_relative_zombie_0_dict[keypoint]) > statistic_len:
a_x_relative_zombie_0_dict[keypoint].pop(0)
if len(a_y_relative_zombie_0_dict[keypoint]) > statistic_len:
a_y_relative_zombie_0_dict[keypoint].pop(0)
if len(v_x_relative_zombie_1_dict[keypoint]) > statistic_len:
v_x_relative_zombie_1_dict[keypoint].pop(0)
if len(v_y_relative_zombie_1_dict[keypoint]) > statistic_len:
v_y_relative_zombie_1_dict[keypoint].pop(0)
if len(a_x_relative_zombie_1_dict[keypoint]) > statistic_len:
a_x_relative_zombie_1_dict[keypoint].pop(0)
if len(a_y_relative_zombie_1_dict[keypoint]) > statistic_len:
a_y_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p0_x_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p0_x_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p1_x_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p1_x_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p2_x_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p2_x_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p3_x_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p3_x_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p0_y_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p0_y_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p1_y_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p1_y_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p2_y_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p2_y_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p3_y_relative_zombie_0_dict[keypoint]) > statistic_len:
box_p3_y_relative_zombie_0_dict[keypoint].pop(0)
if len(box_p0_x_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p0_x_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p1_x_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p1_x_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p2_x_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p2_x_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p3_x_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p3_x_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p0_y_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p0_y_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p1_y_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p1_y_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p2_y_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p2_y_relative_zombie_1_dict[keypoint].pop(0)
if len(box_p3_y_relative_zombie_1_dict[keypoint]) > statistic_len:
box_p3_y_relative_zombie_1_dict[keypoint].pop(0)
else:
pass
# try:
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_x_ego_mean",
np.mean(v_x_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_x_ego_variance",
np.var(v_x_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_y_ego_mean",
np.mean(v_y_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_y_ego_variance",
np.var(v_y_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_x_ego_mean",
np.mean(a_x_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_x_ego_variance",
np.var(a_x_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_y_ego_mean",
np.mean(a_y_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_y_ego_variance",
np.var(a_y_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_x_zombie_0_mean",
np.mean(v_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_x_zombie_0_variance",
np.var(v_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_y_zombie_0_mean",
np.mean(v_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_y_zombie_0_variance",
np.var(v_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_x_zombie_0_mean",
np.mean(a_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_x_zombie_0_variance",
np.var(a_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_y_zombie_0_mean",
np.mean(a_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_y_zombie_0_variance",
np.var(a_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_x_zombie_1_mean",
np.mean(v_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_x_zombie_1_variance",
np.var(v_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_y_zombie_1_mean",
np.mean(v_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "v_y_zombie_1_variance",
np.var(v_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_x_zombie_1_mean",
np.mean(a_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_x_zombie_1_variance",
np.var(a_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_y_zombie_1_mean",
np.mean(a_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "a_y_zombie_1_variance",
np.var(a_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_x_zombie_0_mean",
np.mean(box_p0_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_x_zombie_0_variance",
np.var(box_p0_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_x_zombie_0_mean",
np.mean(box_p1_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_x_zombie_0_variance",
np.var(box_p1_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_x_zombie_0_mean",
np.mean(box_p2_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_x_zombie_0_variance",
np.var(box_p2_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_x_zombie_0_mean",
np.mean(box_p3_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_x_zombie_0_variance",
np.var(box_p3_x_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_y_zombie_0_mean",
np.mean(box_p0_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_y_zombie_0_variance",
np.var(box_p0_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_y_zombie_0_mean",
np.mean(box_p1_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_y_zombie_0_variance",
np.var(box_p1_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_y_zombie_0_mean",
np.mean(box_p2_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_y_zombie_0_variance",
np.var(box_p2_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_y_zombie_0_mean",
np.mean(box_p3_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_y_zombie_0_variance",
np.var(box_p3_y_relative_zombie_0_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_x_zombie_1_mean",
np.mean(box_p0_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_x_zombie_1_variance",
np.var(box_p0_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_x_zombie_1_mean",
np.mean(box_p1_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_x_zombie_1_variance",
np.var(box_p1_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_x_zombie_1_mean",
np.mean(box_p2_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_x_zombie_1_variance",
np.var(box_p2_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_x_zombie_1_mean",
np.mean(box_p3_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_x_zombie_1_variance",
np.var(box_p3_x_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_y_zombie_1_mean",
np.mean(box_p0_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p0_y_zombie_1_variance",
np.var(box_p0_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_y_zombie_1_mean",
np.mean(box_p1_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p1_y_zombie_1_variance",
np.var(box_p1_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_y_zombie_1_mean",
np.mean(box_p2_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p2_y_zombie_1_variance",
np.var(box_p2_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_y_zombie_1_mean",
np.mean(box_p3_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "box_p3_y_zombie_1_variance",
np.var(box_p3_y_relative_zombie_1_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "abs_x_mean",
np.mean(abs_x_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "abs_x_variance",
np.var(abs_x_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "abs_y_mean",
np.mean(abs_y_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "abs_y_variance",
np.var(abs_y_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "abs_z_mean",
np.mean(abs_z_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "abs_z_variance",
np.var(abs_z_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "yaw_mean",
np.mean(yaw_relative_dict[keypoint]))
logger.record_tabular(stage + "_" + str(keypoint) + "_Feature_" + "yaw_variance",
np.var(yaw_relative_dict[keypoint]))
# except:
# print('Logger passed!')
steer_, throttle_ = 0, 1
fakerew_standard = self.reward_giver.get_reward(ob_expert_evaluate, ac_expert_evaluate)[0][0]
v_x_expert, v_y_expert, a_x_expert, a_y_expert = ob_expert_evaluate[0][v_x], ob_expert_evaluate[0][
v_y], ob_expert_evaluate[0][a_x], ob_expert_evaluate[0][a_y]
steer_expert, throttle_expert = ac_expert_evaluate[0][steer_], ac_expert_evaluate[0][throttle_]
prefix = outstr(v_x_expert, v_y_expert, a_x_expert, a_y_expert, steer_expert, throttle_expert)
# prefix = stage +"_AT_"+prefix
prefix = stage + "_" + str(keypoint) + "_T" + prefix
logger.record_tabular(prefix, fakerew_standard)
ob_indexes = {"Vx": v_x, "Vy": v_y, "Ax": a_x, "Ay": a_y}
ac_indexes = {"St": steer_, "Th": throttle_}
indexes = [ob_indexes, ac_indexes]
for i in range(len(indexes)):
if i == 0:
index = indexes[i]
ac_ctfl = ac_expert_evaluate
for k, idx in index.items():
ob_ctfl = copy(ob_expert_evaluate)
less_value = ob_ctfl[0][idx] / 2.
ob_ctfl[0][idx] = less_value
prefix_ctfl = stage + "_" + str(keypoint) + "_FakeRew_" + k + str(round(less_value, 1))
fakerew_ctfl = self.reward_giver.get_reward(ob_ctfl, ac_ctfl)[0][0]
# logger.record_tabular(prefix_ctfl, fakerew_ctfl)
#relative_rew = abs(fakerew_ctfl - fakerew_standard)
relative_rew = fakerew_standard - fakerew_ctfl
logger.record_tabular(prefix_ctfl, relative_rew)
ob_ctfl = copy(ob_expert_evaluate)
negative_value = ob_ctfl[0][idx] * -1.
ob_ctfl[0][idx] = negative_value
prefix_ctfl = stage + "_" + str(keypoint) + "_FakeRew_" + k + str(round(negative_value, 1))
fakerew_ctfl = self.reward_giver.get_reward(ob_ctfl, ac_ctfl)[0][0]
# logger.record_tabular(prefix_ctfl, fakerew_ctfl)
#relative_rew = abs(fakerew_ctfl - fakerew_standard)
relative_rew = fakerew_standard - fakerew_ctfl
logger.record_tabular(prefix_ctfl, relative_rew)
elif i == 1:
index = indexes[i]
ob_ctfl = ob_expert_evaluate
for k, idx in index.items():
ac_ctfl = copy(ac_expert_evaluate)
less_value = ac_ctfl[0][idx] / 2.
ac_ctfl[0][idx] = less_value
prefix_ctfl = stage + "_" + str(keypoint) + "_FakeRew_" + k + str(round(less_value, 1))
fakerew_ctfl = self.reward_giver.get_reward(ob_ctfl, ac_ctfl)[0][0]
# logger.record_tabular(prefix_ctfl, fakerew_ctfl)
#relative_rew = abs(fakerew_ctfl - fakerew_standard)
relative_rew = fakerew_standard - fakerew_ctfl
logger.record_tabular(prefix_ctfl, relative_rew)
if k == "St":
ac_ctfl = copy(ac_expert_evaluate)
negative_value = ac_ctfl[0][idx] * -1.
ac_ctfl[0][idx] = negative_value
prefix_ctfl = stage + "_" + str(keypoint) + "_FakeRew_" + k + str(round(negative_value, 1))
fakerew_ctfl = self.reward_giver.get_reward(ob_ctfl, ac_ctfl)[0][0]
# logger.record_tabular(prefix_ctfl, fakerew_ctfl)
#relative_rew = abs(fakerew_ctfl - fakerew_standard)
relative_rew = fakerew_standard - fakerew_ctfl
logger.record_tabular(prefix_ctfl, relative_rew)
if self.rank == 0:
logger.dump_tabular()
pi_params = [tf.get_default_session().run(param) for param in self.pi.get_variables()]
dis_params = [tf.get_default_session().run(param) for param in self.reward_giver.get_trainable_variables()]
return [pi_params, dis_params]
def get_params(self):
pi_params = [tf.get_default_session().run(param) for param in self.pi.get_variables()]
dis_params = [tf.get_default_session().run(param) for param in self.reward_giver.get_trainable_variables()]
return [pi_params, dis_params]
def load_model(self):
logger.log("Let's load the pretrained model")
logger.log(self.args.load_model_path)
saver = tf.train.Saver(max_to_keep=5000)
sess = tf.get_default_session()
params = sess.run(self.pi.get_trainable_variables())
saver.restore(tf.get_default_session(), self.args.load_model_path)
params = sess.run(self.pi.get_trainable_variables())
def load_dmodel(self):
logger.log("Let's load the pretrained discriminator model")
logger.log(self.args.load_model_path)
saver = tf.train.Saver(var_list=self.reward_giver.get_trainable_variables(), max_to_keep=5000)
sess = tf.get_default_session()
saver.restore(tf.get_default_session(), self.args.load_model_path)
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
@ray.remote
def actor_worker(*actors):
batches = ray.get([actor.get_batch.remote() for actor in actors])
return batches
def stack(array):
if len(array.shape) == 2:
array = array.reshape(np.prod(array.shape))
elif len(array.shape) == 3:
array = array.reshape(np.prod(array.shape[:-1]), -1)
elif len(array.shape) == 1:
pass
return array
def batches_filter(batches):
batches_original = batches
keys = batches[0].keys()
keys = sorted(keys)
ep_infos = ["ep_rets", "ep_lens", "ep_true_rets", 'ep_v', 'ep_acc', 'ep_left_offset', 'ep_right_offset', "ep_true_lens"]
ep_flat = ["v_ep", "scene", "ep_rets"]
ep_keys = ep_infos + ep_flat
keys = [key for key in keys if key not in ep_keys]
batches_data = [np.array([batch[key] for batch in batches]) for key in keys]
batches_data = [stack(data) for data in batches_data]
batches = {keys[i]:batches_data[i] for i in range(len(keys))}
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def safemax(xs):
return np.nan if len(xs) == 0 else np.max(xs)
ep_scala_keys = ['ep_acc', 'ep_v', 'ep_left_offset', 'ep_right_offset']
for key in ep_scala_keys:
batches[key] = safemean([batch[key] for batch in batches_original])
for key in [_key for _key in ep_keys if _key not in ep_scala_keys]:
output = []
for batch in batches_original:
output += batch[key]
batches[key] = output
return batches
def merge(expert_dataset):
obs = []
acs = []
obs_abspos_yaw = []
for e_data in expert_dataset:
for traj in e_data.obs:
obs.append(traj)
for traj in e_data.acs:
acs.append(traj)
for traj in e_data.obs_abspos_yaw:
obs_abspos_yaw.append(traj)
obs = np.array(obs)
acs = np.array(acs)
obs_abspos_yaw = np.array(obs_abspos_yaw)
expert_dataset_origin = copy(expert_dataset)
expert_dataset = expert_dataset[0]
expert_dataset.obs, expert_dataset.acs, expert_dataset.obs_abspos_yaw = obs, acs, obs_abspos_yaw
print("==========================================================================")
print("Expert Data for each scenario: ", expert_dataset_origin[0].obs.shape)
print("Merge Expert Data: ", expert_dataset.obs.shape)
print("==========================================================================")
return expert_dataset
def main(args):
ray.init(num_gpus=args.actor_nums+1)
host = [str(x) for x in args.host.split('_') if x!='']
port = [int(x) for x in args.port.split('_') if x!='']
split = int(len(host)/args.actor_nums)
nums = int(len(host))
hosts = [host[i:i+split] for i in range(0, nums, split)]
ports = [port[i:i+split] for i in range(0, nums, split)]
print(hosts, ports)
args.hosts = hosts
args.ports = ports
actors = [Actor.remote(args, i) for i in range(args.actor_nums)]
if args.scenario_name == "Merge_Env" or args.speed_mode == "mix":
args = [ray.get(actors[i].get_args.remote()) for i in range(len(actors))]
expert_dataset = [args[i].expert_dataset for i in range(len(actors))]
expert_dataset = merge(expert_dataset)
args[0].expert_dataset = expert_dataset
args = args[0]
else:
args = ray.get(actors[0].get_args.remote())
learner = Learner.remote(args)
# init actor parameters
params = ray.get(learner.get_params.remote())
status = [actor.update_params.remote(params) for actor in actors]
g_steps = 0
iteration = 0
logger.log("********** Iteration %i ************" % iteration)
if args.task == 'generate_data':
save_file = open("./tmp_TRPO_curriculum.pkl", "wb")
while True:
batches = ray.get([actor.get_batch.remote() for actor in actors])
batches = batches_filter(batches)
if args.task == 'generate_data':
ob_batch = batches['ob'].tolist()
control_batch = batches['ctrl'].tolist()
current_pos_batch = batches['current_pos'].tolist()
yaw_batch = batches['yaw'].tolist()
print(len(ob_batch), len(ob_batch[0]), len(current_pos_batch[0]), yaw_batch[0])
assert len(ob_batch) == len(control_batch) == len(current_pos_batch) == len(yaw_batch), 'len(ob_batch) != len(control_batch)'
for i in range(len(ob_batch)):
pickle.dump(ob_batch[i] + current_pos_batch[i] + yaw_batch[i] + control_batch[i], save_file)
if args.task == 'train':
params = ray.get(learner.learn.remote(batches))
status = [actor.update_params.remote(params) for actor in actors]
g_steps += 1
if g_steps % args.g_step == 0:
iteration += 1
logger.log("********** Iteration %i ************" % iteration)
if iteration > args.max_iters:
break
if args.task == 'generate_data':
save_file.close()
return
if __name__ == '__main__':
args = argsparser()
main(args)
```
#### File: baselines/gail/scenarios_before_mix.py
```python
import carla
import time
import numpy as np
import shapely.geometry
import shapely.affinity
from utils import _dis3d, _pos3d
from agents.navigation.basic_agent import *
from agents.navigation.roaming_agent import *
from agents.navigation.controller import VehiclePIDController
import random
from collections import deque
from enum import Enum
from agents.tools.misc import distance_vehicle, draw_waypoints
import pickle
from baselines import logger
# import pdb
# try:
# from agents.navigation.local_planner import _retrieve_options as retrieve_options
# except:
# print('0.9.4')
# pass
def detect_lane_obstacle(world, actor, extension_factor=3, margin=1.02):
"""
This function identifies if an obstacle is present in front of the reference actor
"""
# world = CarlaDataProvider.get_world()
world_actors = world.get_actors().filter('vehicle.*')
actor_bbox = actor.bounding_box
actor_transform = actor.get_transform()
actor_location = actor_transform.location
actor_vector = actor_transform.rotation.get_forward_vector()
actor_vector = np.array([actor_vector.x, actor_vector.y])
actor_vector = actor_vector / np.linalg.norm(actor_vector)
actor_vector = actor_vector * (extension_factor - 1) * actor_bbox.extent.x
actor_location = actor_location + carla.Location(actor_vector[0], actor_vector[1])
actor_yaw = actor_transform.rotation.yaw
is_hazard = False
for adversary in world_actors:
if adversary.id != actor.id and \
actor_transform.location.distance(adversary.get_location()) < 50:
adversary_bbox = adversary.bounding_box
adversary_transform = adversary.get_transform()
adversary_loc = adversary_transform.location
adversary_yaw = adversary_transform.rotation.yaw
overlap_adversary = RotatedRectangle(
adversary_loc.x, adversary_loc.y,
2 * margin * adversary_bbox.extent.x, 2 * margin * adversary_bbox.extent.y, adversary_yaw)
overlap_actor = RotatedRectangle(
actor_location.x, actor_location.y,
2 * margin * actor_bbox.extent.x * extension_factor, 2 * margin * actor_bbox.extent.y, actor_yaw)
overlap_area = overlap_adversary.intersection(overlap_actor).area
if overlap_area > 0:
is_hazard = True
break
return is_hazard
class RotatedRectangle(object):
"""
This class contains method to draw rectangle and find intersection point.
"""
def __init__(self, c_x, c_y, width, height, angle):
self.c_x = c_x
self.c_y = c_y
self.w = width # pylint: disable=invalid-name
self.h = height # pylint: disable=invalid-name
self.angle = angle
def get_contour(self):
"""
create contour
"""
w = self.w
h = self.h
c = shapely.geometry.box(-w / 2.0, -h / 2.0, w / 2.0, h / 2.0)
rc = shapely.affinity.rotate(c, self.angle)
return shapely.affinity.translate(rc, self.c_x, self.c_y)
def intersection(self, other):
"""
Obtain a intersection point between two contour.
"""
return self.get_contour().intersection(other.get_contour())
class RoadOption(Enum):
"""
RoadOption represents the possible topological configurations when moving from a segment of lane to other.
"""
VOID = -1
LEFT = 1
RIGHT = 2
STRAIGHT = 3
LANEFOLLOW = 4
def retrieve_options(list_waypoints, current_waypoint):
"""
Compute the type of connection between the current active waypoint and the multiple waypoints present in
list_waypoints. The result is encoded as a list of RoadOption enums.
:param list_waypoints: list with the possible target waypoints in case of multiple options
:param current_waypoint: current active waypoint
:return: list of RoadOption enums representing the type of connection from the active waypoint to each
candidate in list_waypoints
"""
options = []
for next_waypoint in list_waypoints:
# this is needed because something we are linking to
# the beggining of an intersection, therefore the
# variation in angle is small
next_next_waypoint = next_waypoint.next(3.0)[0]
link = _compute_connection(current_waypoint, next_next_waypoint)
options.append(link)
return options
def _compute_connection(current_waypoint, next_waypoint):
"""
Compute the type of topological connection between an active waypoint (current_waypoint) and a target waypoint
(next_waypoint).
:param current_waypoint: active waypoint
:param next_waypoint: target waypoint
:return: the type of topological connection encoded as a RoadOption enum:
RoadOption.STRAIGHT
RoadOption.LEFT
RoadOption.RIGHT
"""
n = next_waypoint.transform.rotation.yaw
n = n % 360.0
c = current_waypoint.transform.rotation.yaw
c = c % 360.0
diff_angle = (n - c) % 180.0
if diff_angle < 1.0:
return RoadOption.STRAIGHT
elif diff_angle > 90.0:
return RoadOption.LEFT
else:
return RoadOption.RIGHT
# helper functions, mostly are planners
class WaypointFollower(object):
"""
This is an atomic behavior to follow waypoints indefinitely
while maintaining a given speed or if given a waypoint plan,
follows the given plan
"""
def __init__(self, actor, target_speed, plan=None,
avoid_collision=False, name="FollowWaypoints", map=None):
"""
Set up actor and local planner
"""
self._actor_list = []
self._actor_list.append(actor)
# print('\n\ninit_actor: ', actor)
self._target_speed = target_speed
self._local_planner_list = []
self._plan = plan
self._args_lateral_dict = {'K_P': 1.0, 'K_D': 0.01, 'K_I': 0.0, 'dt': 0.05}
self._avoid_collision = avoid_collision
self._map = map
def setup(self, timeout=5):
"""
Delayed one-time initialization
"""
for actor in self._actor_list:
# print('\n\nactor: ', actor)
self._apply_local_planner(actor, self._map)
return True
def _apply_local_planner(self, actor, map):
local_planner = WpFollowplanner(
actor=actor,
map=map,
opt_dict={
'target_speed': self._target_speed,
'lateral_control_dict': self._args_lateral_dict})
if self._plan is not None:
local_planner.set_global_plan(self._plan)
self._local_planner_list.append(local_planner)
def update(self):
"""
Run local planner, obtain and apply control to actor
"""
for actor, local_planner in zip(self._actor_list, self._local_planner_list):
if actor is not None and actor.is_alive and local_planner is not None:
control = local_planner.run_step(debug=False)
# if self._avoid_collision and detect_lane_obstacle(actor):
# control.throttle = 0.0
# control.brake = 1.0
actor.apply_control(control)
class WpFollowplanner(object):
MIN_DISTANCE_PERCENTAGE = 0.9
def __init__(self, actor, map, opt_dict):
self._vehicle = actor
self._map = map
self._dt = None
self._target_speed = None
self._sampling_radius = None
self._min_distance = None
self._current_waypoint = None
self._target_road_option = None
self._next_waypoints = None
self._target_waypoint = None
self._vehicle_controller = None
self._global_plan = None
# queue with tuples of (waypoint, RoadOption)
self._waypoints_queue = deque(maxlen=600)
self._buffer_size = 5
self._waypoint_buffer = deque(maxlen=self._buffer_size)
# initializing controller
self.init_controller(opt_dict)
def init_controller(self, opt_dict):
"""
Controller initialization.
:param opt_dict: dictionary of arguments.
:return:
"""
# default params
self._dt = 1.0 / 20.0
self._target_speed = 20.0 # Km/h
self._sampling_radius = self._target_speed * 0.5 / 3.6 # 0.5 seconds horizon
self._min_distance = self._sampling_radius * self.MIN_DISTANCE_PERCENTAGE
args_lateral_dict = {
'K_P': 1.95,
'K_D': 0.01,
'K_I': 1.4,
'dt': self._dt}
args_longitudinal_dict = {
'K_P': 1.0,
'K_D': 0,
'K_I': 1,
'dt': self._dt}
# parameters overload
if 'dt' in opt_dict:
self._dt = opt_dict['dt']
if 'target_speed' in opt_dict:
self._target_speed = opt_dict['target_speed']
if 'sampling_radius' in opt_dict:
self._sampling_radius = self._target_speed * \
opt_dict['sampling_radius'] / 3.6
if 'lateral_control_dict' in opt_dict:
args_lateral_dict = opt_dict['lateral_control_dict']
if 'longitudinal_control_dict' in opt_dict:
args_longitudinal_dict = opt_dict['longitudinal_control_dict']
self._current_waypoint = self._map.get_waypoint(
self._vehicle.get_location())
self._vehicle_controller = VehiclePIDController(self._vehicle,
args_lateral=args_lateral_dict,
args_longitudinal=args_longitudinal_dict)
self._global_plan = False
# compute initial waypoints
self._waypoints_queue.append((self._current_waypoint.next(self._sampling_radius)[0], RoadOption.LANEFOLLOW))
self._target_road_option = RoadOption.LANEFOLLOW
# fill waypoint trajectory queue
self._compute_next_waypoints(k=200)
def _compute_next_waypoints(self, k=1):
"""
Add new waypoints to the trajectory queue.
:param k: how many waypoints to compute
:return:
"""
# check we do not overflow the queue
available_entries = self._waypoints_queue.maxlen - len(self._waypoints_queue)
k = min(available_entries, k)
for _ in range(k):
last_waypoint = self._waypoints_queue[-1][0]
next_waypoints = list(last_waypoint.next(self._sampling_radius))
if len(next_waypoints) == 1:
# only one option available ==> lanefollowing
next_waypoint = next_waypoints[0]
road_option = RoadOption.LANEFOLLOW
else:
# random choice between the possible options
road_options_list = retrieve_options(
next_waypoints, last_waypoint)
road_option = random.choice(road_options_list)
next_waypoint = next_waypoints[road_options_list.index(
road_option)]
self._waypoints_queue.append((next_waypoint, road_option))
def run_step(self, debug=True):
"""
Execute one step of local planning which involves running the longitudinal and lateral PID controllers to
follow the waypoints trajectory.
:param debug: boolean flag to activate waypoints debugging
:return:
"""
# not enough waypoints in the horizon? => add more!
if len(self._waypoints_queue) < int(self._waypoints_queue.maxlen * 0.5):
if not self._global_plan:
self._compute_next_waypoints(k=100)
if len(self._waypoints_queue) == 0:
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
# Buffering the waypoints
if not self._waypoint_buffer:
for i in range(self._buffer_size):
if self._waypoints_queue:
self._waypoint_buffer.append(
self._waypoints_queue.popleft())
else:
break
# current vehicle waypoint
self._current_waypoint = self._map.get_waypoint(self._vehicle.get_location())
# target waypoint
# self._target_waypoint, self._target_road_option = self._waypoint_buffer[0]
# move using PID controllers
self._target_waypoint = self._current_waypoint.next(5.0)[0]
control = self._vehicle_controller.run_step(self._target_speed, self._target_waypoint)
# purge the queue of obsolete waypoints
vehicle_transform = self._vehicle.get_transform()
max_index = -1
for i, (waypoint, _) in enumerate(self._waypoint_buffer):
if distance_vehicle(
waypoint, vehicle_transform) < self._min_distance:
max_index = i
if max_index >= 0:
for i in range(max_index + 1):
self._waypoint_buffer.popleft()
if debug:
draw_waypoints(self._vehicle.get_world(), [self._target_waypoint], self._vehicle.get_location().z + 1.0)
return control
class OtherLeadingVehicle(object):
def __init__(self, name, map, world):
self.name = name
self._map = map
self.world = world
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
hero_car_pos = [388.9, -140, 0]
wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
# init zombie cars
first_vehicle_location = 25
second_vehicle_location = first_vehicle_location + 8
first_vehicle_waypoint = wp.next(first_vehicle_location)[0]
second_vehicle_waypoint = wp.next(second_vehicle_location)[0].get_left_lane()
first_vehicle_transform = carla.Transform(first_vehicle_waypoint.transform.location,
first_vehicle_waypoint.transform.rotation)
second_vehicle_transform = carla.Transform(second_vehicle_waypoint.transform.location,
second_vehicle_waypoint.transform.rotation)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt']
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.first_vehicle = self.world.try_spawn_actor(blueprints[0], first_vehicle_transform)
self.second_vehicle = self.world.try_spawn_actor(blueprints[1], second_vehicle_transform)
self.zombie_cars = [self.first_vehicle, self.second_vehicle]
# --------------------------------------------------------
# --------------------------------------------------------
# setup local planners for zombie cars
self._first_vehicle_speed = 36 / 3.2
self._second_vehicle_speed = 45
first_vehicle_planner = WaypointFollower(self.zombie_cars[0], self._first_vehicle_speed,map=self._map,
avoid_collision=True)
second_vehicle_planner = WaypointFollower(self.zombie_cars[1], self._second_vehicle_speed,map=self._map,
avoid_collision=True)
self.vehicle_planners = [first_vehicle_planner, second_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def _update(self):
# update action for two local planners
if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
pass
else:
for planner in self.vehicle_planners:
planner.update()
def restart(self):
self._remove_all_actors()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class OverTake(object):
def __init__(self, name, map, world, checkkeys):
self.name = name
self._map = map
self.world = world
self.speed = 0
self.keypointsinfos = logger.keypoints
self.checkpoints = {k:v for k,v in self.keypointsinfos.items() if k in checkkeys}
self.checkkeys = checkkeys
self.checkframes = 1
self.keychoice = 0
self.framechoice = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
hero_car_pos = [388.9, -140, 0]
wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
# init zombie cars
first_vehicle_location = 25
second_vehicle_location = first_vehicle_location + 8
first_vehicle_waypoint = wp.next(first_vehicle_location)[0]
second_vehicle_waypoint = wp.next(second_vehicle_location)[0].get_left_lane()
first_vehicle_transform = carla.Transform(first_vehicle_waypoint.transform.location,
first_vehicle_waypoint.transform.rotation)
second_vehicle_transform = carla.Transform(second_vehicle_waypoint.transform.location,
second_vehicle_waypoint.transform.rotation)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt']
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.first_vehicle = self.world.try_spawn_actor(blueprints[0], first_vehicle_transform)
self.second_vehicle = self.world.try_spawn_actor(blueprints[1], second_vehicle_transform)
self.zombie_cars = [self.first_vehicle, self.second_vehicle]
checkinfo = self.checkpoints[self.checkkeys[self.keychoice%len(self.checkkeys)]][self.framechoice%self.checkframes]
self.cur_checkpoint = self.checkkeys[self.keychoice%len(self.checkkeys)]
self.framechoice += 1
if self.framechoice%self.checkframes == 0:
self.keychoice += 1
# --------------------------------------------------------
# --------------------------------------------------------
# setup local planners for zombie cars
self._first_vehicle_speed = 36 / 3.2
self._second_vehicle_speed = 45
first_vehicle_planner = WaypointFollower(self.zombie_cars[0], self._first_vehicle_speed,map=self._map,
avoid_collision=True)
second_vehicle_planner = WaypointFollower(self.zombie_cars[1], self._second_vehicle_speed,map=self._map,
avoid_collision=True)
self.vehicle_planners = [first_vehicle_planner, second_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
#def _scenario_init(self):
# # init hero car
# # --------------------------------------------------------
# # setup cars on a given waypoint
# hero_car_pos = [388.9, -140, 0]
# wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
# wp = self._map.get_waypoint(wp_location)
# hero_vehicle_transform = wp.transform
# hero_model = 'vehicle.lincoln.mkz2017'
# blueprint = random.choice(self.blueprint_library.filter(hero_model))
# blueprint.set_attribute('role_name', 'hero')
# self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
# # init zombie cars
# first_vehicle_location = 25
# second_vehicle_location = first_vehicle_location + 8
# first_vehicle_waypoint = wp.next(first_vehicle_location)[0]
# second_vehicle_waypoint = wp.next(second_vehicle_location)[0].get_left_lane()
# first_vehicle_transform = carla.Transform(first_vehicle_waypoint.transform.location,
# first_vehicle_waypoint.transform.rotation)
# second_vehicle_transform = carla.Transform(second_vehicle_waypoint.transform.location,
# second_vehicle_waypoint.transform.rotation)
# models = ['vehicle.nissan.patrol', 'vehicle.audi.tt']
# blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
# for blueprint in blueprints:
# blueprint.set_attribute('role_name', 'scenario')
# self.first_vehicle = self.world.try_spawn_actor(blueprints[0], first_vehicle_transform)
# self.second_vehicle = self.world.try_spawn_actor(blueprints[1], second_vehicle_transform)
#
# checkinfo = self.checkpoints[self.checkkeys[self.keychoice%len(self.checkkeys)]][self.framechoice%self.checkframes]
# checkinfo = checkinfo[1]
# self.cur_checkpoint = self.checkkeys[self.keychoice%len(self.checkkeys)]
# hero_car_info, first_vehicle_info, second_vehicle_info = checkinfo["Agent"], checkinfo["Zombie Cars I"], checkinfo["Zombie Cars II"]
# self._scenario_init_teleport(hero_car_info, first_vehicle_info, second_vehicle_info)
# self.framechoice += 1
# if self.framechoice%self.checkframes == 0:
# self.keychoice += 1
def _scenario_init_teleport(self, hero_car_info, first_vehicle_info, second_vehicle_info):
hero_car_pos, hero_car_v, hero_car_rotation, hero_car_angular_v = hero_car_info[0], hero_car_info[1], hero_car_info[3], hero_car_info[4]
first_vehicle_pos, first_vehicle_v, first_vehicle_rotation, first_vehicle_angular_v = first_vehicle_info[0], first_vehicle_info[1], first_vehicle_info[3], first_vehicle_info[4]
second_vehicle_pos, second_vehicle_v, second_vehicle_rotation, second_vehicle_angular_v = second_vehicle_info[0], second_vehicle_info[1], second_vehicle_info[3], second_vehicle_info[4]
hero_car_impulse, first_vehicle_impulse, second_vehicle_impulse = hero_car_info[2], first_vehicle_info[2], second_vehicle_info[2]
# setup hero car
wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
wp_rotation = carla.Rotation()
wp_rotation.yaw, wp_rotation.pitch, wp_rotation.roll = hero_car_rotation[0], hero_car_rotation[1], hero_car_rotation[2]
hero_vehicle_transform = carla.Transform()
hero_vehicle_transform.location, hero_vehicle_transform.rotation = wp_location, wp_rotation
hero_vehicle_velocity = carla.Vector3D()
hero_vehicle_velocity.x, hero_vehicle_velocity.y, hero_vehicle_velocity.z = hero_car_v[0], hero_car_v[1], hero_car_v[2]
hero_vehicle_angular_velocity = carla.Vector3D()
hero_vehicle_angular_velocity.x, hero_vehicle_angular_velocity.y, hero_vehicle_angular_velocity.z = hero_car_angular_v[0], hero_car_angular_v[1], hero_car_angular_v[2]
#self.hero_car.set_simulate_physics(False)
self.hero_car.set_transform(hero_vehicle_transform)
self.hero_car.set_velocity(hero_vehicle_velocity)
self.hero_car.set_angular_velocity(hero_vehicle_angular_velocity)
hero_vehicle_impulse = carla.Vector3D()
#hero_vehicle_impulse.x, hero_vehicle_impulse.y, hero_vehicle_impulse.z = hero_car_impulse[0], hero_car_impulse[1], hero_car_impulse[2]
#self.hero_car.add_impulse(hero_vehicle_impulse)
# setup zombie cars
wp_location = carla.Location(x=first_vehicle_pos[0], y=first_vehicle_pos[1], z=first_vehicle_pos[2])
wp_rotation = carla.Rotation()
wp_rotation.yaw, wp_rotation.pitch, wp_rotation.roll = first_vehicle_rotation[0], first_vehicle_rotation[1], first_vehicle_rotation[2]
first_vehicle_transform = carla.Transform()
first_vehicle_transform.location, first_vehicle_transform.rotation = wp_location, wp_rotation
first_vehicle_velocity = carla.Vector3D()
first_vehicle_velocity.x, first_vehicle_velocity.y, first_vehicle_velocity.z = first_vehicle_v[0], first_vehicle_v[1], first_vehicle_v[2]
first_vehicle_angular_velocity = carla.Vector3D()
first_vehicle_angular_velocity.x, first_vehicle_angular_velocity.y, first_vehicle_angular_velocity.z = first_vehicle_angular_v[0], first_vehicle_angular_v[1], first_vehicle_angular_v[2]
#self.first_vehicle.set_simulate_physics(False)
self.first_vehicle.set_transform(first_vehicle_transform)
self.first_vehicle.set_velocity(first_vehicle_velocity)
self.first_vehicle.set_angular_velocity(first_vehicle_angular_velocity)
wp_location = carla.Location(x=second_vehicle_pos[0], y=second_vehicle_pos[1], z=second_vehicle_pos[2])
wp_rotation = carla.Rotation()
wp_rotation.yaw, wp_rotation.pitch, wp_rotation.roll = second_vehicle_rotation[0], second_vehicle_rotation[1], second_vehicle_rotation[2]
second_vehicle_transform = carla.Transform()
second_vehicle_transform.location, second_vehicle_transform.rotation = wp_location, wp_rotation
second_vehicle_velocity = carla.Vector3D()
second_vehicle_velocity.x, second_vehicle_velocity.y, second_vehicle_velocity.z = second_vehicle_v[0], second_vehicle_v[1], second_vehicle_v[2]
second_vehicle_angular_velocity = carla.Vector3D()
second_vehicle_angular_velocity.x, second_vehicle_angular_velocity.y, second_vehicle_angular_velocity.z = second_vehicle_angular_v[0], second_vehicle_angular_v[1], second_vehicle_angular_v[2]
self.second_vehicle.set_angular_velocity(second_vehicle_angular_velocity)
#self.second_vehicle.set_simulate_physics(False)
self.second_vehicle.set_transform(second_vehicle_transform)
self.second_vehicle.set_velocity(second_vehicle_velocity)
#self.first_vehicle.set_simulate_physics(True)
#self.second_vehicle.set_simulate_physics(True)
self.zombie_cars = [self.first_vehicle, self.second_vehicle]
# --------------------------------------------------------
# --------------------------------------------------------
# setup local planners for zombie cars
self._first_vehicle_speed = 36 / 3.2
self._second_vehicle_speed = 45
first_vehicle_planner = WaypointFollower(self.zombie_cars[0], self._first_vehicle_speed,map=self._map,
avoid_collision=True)
second_vehicle_planner = WaypointFollower(self.zombie_cars[1], self._second_vehicle_speed,map=self._map,
avoid_collision=True)
self.vehicle_planners = [first_vehicle_planner, second_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def _update(self):
# update action for two local planners
if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
pass
else:
for planner in self.vehicle_planners:
planner.update()
def restart(self):
self._remove_all_actors()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class CarFollowing(object):
def __init__(self, name, map, world):
self.name = name
self._map = map
self.world = world
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
hero_car_pos = [388.9, -140, 0]
wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'heroxxx')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
# self.hero_car.set_autopilot(enabled=True)
# init zombie cars
first_vehicle_location = 25
first_vehicle_waypoint = wp.next(first_vehicle_location)[0]
second_vehicle_waypoint = wp.next(first_vehicle_location)[0].get_left_lane()
third_vehicle_waypoint = wp.next(first_vehicle_location)[0].get_right_lane()
first_vehicle_transform = carla.Transform(first_vehicle_waypoint.transform.location,
first_vehicle_waypoint.transform.rotation)
second_vehicle_transform = carla.Transform(second_vehicle_waypoint.transform.location,
second_vehicle_waypoint.transform.rotation)
third_vehicle_transform = carla.Transform(third_vehicle_waypoint.transform.location,
third_vehicle_waypoint.transform.rotation)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt', 'vehicle.lincoln.mkz2017']
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.first_vehicle = self.world.try_spawn_actor(blueprints[0], first_vehicle_transform)
self.second_vehicle = self.world.try_spawn_actor(blueprints[1], second_vehicle_transform)
self.third_vehicle = self.world.try_spawn_actor(blueprints[2], third_vehicle_transform)
self.zombie_cars = [self.first_vehicle, self.second_vehicle, self.third_vehicle]
# setup local planners for zombie cars
self._first_vehicle_speed = 25
self._second_vehicle_speed = 23
self._third_vehicle_speed = 23
first_vehicle_planner = WaypointFollower(self.zombie_cars[0], self._first_vehicle_speed,map=self._map,
avoid_collision=True)
second_vehicle_planner = WaypointFollower(self.zombie_cars[1], self._second_vehicle_speed,map=self._map,
avoid_collision=True)
third_vehicle_planner = WaypointFollower(self.zombie_cars[2], self._second_vehicle_speed,map=self._map,
avoid_collision=True)
self.vehicle_planners = [first_vehicle_planner, second_vehicle_planner, third_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def _update(self):
# update action for two local planners
for planner in self.vehicle_planners:
planner.update()
def restart(self):
self._remove_all_actors()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
# helper functions, mostly are planners
class WaypointFollower_FullMap(object):
"""
This is an atomic behavior to follow waypoints indefinitely
while maintaining a given speed or if given a waypoint plan,
follows the given plan
"""
def __init__(self, actor, target_speed, map, world, pattern_1=None, pattern_2=None, plan=None,
avoid_collision=False, actor_location=None, name="FollowWaypoints"):
"""
Set up actor and local planner
"""
self._actor_list = []
self._actor_list.append(actor)
# print('\n\ninit_actor: ', actor)
self._target_speed = target_speed
self._local_planner_list = []
self._plan = plan
self._args_lateral_dict = {'K_P': 1.0, 'K_D': 0.0, 'K_I': 0.0, 'dt': 0.03}
self._args_longitudinal_dict = {'K_P': 1.0, 'K_D': 0.0, 'K_I': 0.0, 'dt': 0.03}
self._avoid_collision = avoid_collision
self.pattern_1 = pattern_1
self.pattern_2 = pattern_2
self.map = map
self.world = world
self.actor_location = actor_location
def setup(self, timeout=5):
"""
Delayed one-time initialization
"""
for actor in self._actor_list:
self._apply_local_planner(actor)
return True
def _apply_local_planner(self, actor):
local_planner = WpFollowplanner_FullMap(
map=self.map,
actor=actor,
actor_location=self.actor_location,
opt_dict={
'target_speed': self._target_speed,
'lateral_control_dict': self._args_lateral_dict},
pattern_1=self.pattern_1,
pattern_2=self.pattern_2
)
if self._plan is not None:
local_planner.set_global_plan(self._plan)
self._local_planner_list.append(local_planner)
def update(self):
"""
Run local planner, obtain and apply control to actor
"""
# print('Update ...')
for actor, local_planner in zip(self._actor_list, self._local_planner_list):
# print(actor is not None, actor.is_alive, local_planner is not None)
if actor is not None and actor.is_alive and local_planner is not None:
control = local_planner.run_step(debug=False)
if self._avoid_collision and detect_lane_obstacle(world=self.world, actor=actor):
control.throttle = 0.0
control.brake = 1.0
actor.apply_control(control)
class WpFollowplanner_FullMap(object):
MIN_DISTANCE_PERCENTAGE = 0.9
def __init__(self, actor, opt_dict, map, pattern_1=None, pattern_2=None, actor_location=None):
self.pattern_1 = pattern_1
self.pattern_2 = pattern_2
self._vehicle = actor
self._map = map # self._vehicle.get_world().get_map()
self._dt = None
self._target_speed = None
self._sampling_radius = None
self._min_distance = None
self._current_waypoint = None
self._target_road_option = None
self._next_waypoints = None
self._target_waypoint = None
self._vehicle_controller = None
self._global_plan = None
self._road_options_list_prev = None
self._index = None
self.actor_location = actor_location
# queue with tuples of (waypoint, RoadOption)
self._waypoints_queue = deque(maxlen=600)
self._buffer_size = 5
self._waypoint_buffer = deque(maxlen=self._buffer_size)
# initializing controller
self.init_controller(opt_dict)
def init_controller(self, opt_dict):
"""
Controller initialization.
:param opt_dict: dictionary of arguments.
:return:
"""
# default params
self._dt = 1.0 / 20.0
self._target_speed = 20.0 # Km/h
self._sampling_radius = self._target_speed * 0.5 / 3.6 # 0.5 seconds horizon
self._min_distance = self._sampling_radius * self.MIN_DISTANCE_PERCENTAGE
args_lateral_dict = {
'K_P': 1.95,
'K_D': 0.01,
'K_I': 1.4,
'dt': self._dt}
args_longitudinal_dict = {
'K_P': 1.0,
'K_D': 0,
'K_I': 1,
'dt': self._dt}
# parameters overload
if 'dt' in opt_dict:
self._dt = opt_dict['dt']
if 'target_speed' in opt_dict:
self._target_speed = opt_dict['target_speed']
if 'sampling_radius' in opt_dict:
self._sampling_radius = self._target_speed * \
opt_dict['sampling_radius'] / 3.6
if 'lateral_control_dict' in opt_dict:
args_lateral_dict = opt_dict['lateral_control_dict']
if 'longitudinal_control_dict' in opt_dict:
args_longitudinal_dict = opt_dict['longitudinal_control_dict']
# self._current_waypoint = self._map.get_waypoint(self._vehicle.get_location())
self._current_waypoint = self._map.get_waypoint(self.actor_location)
# print('self._vehicle.get_location(): ', self._current_waypoint.transform.location, self._vehicle.get_transform().location, self._vehicle, self._current_waypoint.next(1.5))
self._vehicle_controller = VehiclePIDController(self._vehicle,
args_lateral=args_lateral_dict,
args_longitudinal=args_longitudinal_dict)
self._global_plan = False
self._waypoints_queue.append((self._current_waypoint.next(1.5)[0], RoadOption.LANEFOLLOW))
self._target_road_option = RoadOption.LANEFOLLOW
# fill waypoint trajectory queue
self._compute_next_waypoints(k=200)
def _compute_next_waypoints(self, k=1):
"""
Add new waypoints to the trajectory queue.
:param k: how many waypoints to compute
:return:
"""
# check we do not overflow the queue
available_entries = self._waypoints_queue.maxlen - len(self._waypoints_queue)
k = min(available_entries, k)
for _ in range(k):
last_waypoint = self._waypoints_queue[-1][0]
next_waypoints = last_waypoint.next(1.5)
# print('next_waypoints: ', last_waypoint, next_waypoints)
if len(next_waypoints) == 1:
# only one option available ==> lanefollowing
next_waypoint = next_waypoints[0]
road_option = RoadOption.LANEFOLLOW
else:
# random choice between the possible options
road_options_list = retrieve_options(
next_waypoints, self._current_waypoint)
if self.pattern_1:
index = self.pattern_1.pop(0)
road_option = road_options_list[index]
next_waypoint = next_waypoints[index]
self.pattern_1.append(index)
elif self.pattern_2:
index = self.pattern_2.pop(0)
if isinstance(index, int):
index = road_options_list.index(RoadOption(index))
road_option = RoadOption(index)
next_waypoint = next_waypoints[road_options_list.index(
road_option)]
elif isinstance(index, list):
next_waypoint = self._map.get_waypoint(
carla.Location(x=index[0], y=index[1], z=index[2]))
road_option = RoadOption.LANEFOLLOW
else:
raise NotImplementedError('index must be type `int` or `list`')
self.pattern_2.append(index)
print(road_options_list)
else: # self.pattern_1 is None and self.pattern_2 is None
print('self.pattern_1 is None and self.pattern_2 is None')
# print(next_waypoint.transform.location)
self._waypoints_queue.append((next_waypoint, road_option))
def run_step(self, debug=True):
"""
Execute one step of local planning which involves running the longitudinal and lateral PID controllers to
follow the waypoints trajectory.
:param debug: boolean flag to activate waypoints debugging
:return:
"""
# not enough waypoints in the horizon? => add more!
if len(self._waypoints_queue) < int(self._waypoints_queue.maxlen * 0.5):
if not self._global_plan:
self._compute_next_waypoints(k=100)
if len(self._waypoints_queue) == 0:
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
# Buffering the waypoints
if not self._waypoint_buffer:
for i in range(self._buffer_size):
if self._waypoints_queue:
self._waypoint_buffer.append(
self._waypoints_queue.popleft())
else:
break
self._target_waypoint, self._target_road_option = self._waypoint_buffer[0]
control = self._vehicle_controller.run_step(self._target_speed, self._target_waypoint)
# purge the queue of obsolete waypoints
vehicle_transform = self._vehicle.get_transform()
max_index = -1
for i, (waypoint, _) in enumerate(self._waypoint_buffer):
if distance_vehicle(
waypoint, vehicle_transform) < self._min_distance:
max_index = i
if max_index >= 0:
for i in range(max_index + 1):
self._waypoint_buffer.popleft()
if debug:
draw_waypoints(self._vehicle.get_world(), [self._target_waypoint], self._vehicle.get_location().z + 1.0)
return control
class OtherLeadingVehicle_FullMap(object):
def __init__(self, name, map, world, only_reset_hero=True):
self.name = name
self._map = map
self.world = world
self.only_reset_hero = only_reset_hero
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
self.hero_car_pos_candidate = [[93.75690460205078, -132.76296997070312, 9.84310531616211],
[143.19349670410156, -204.4090118408203, 1.8431016206741333],
[-100.46805572509766, 16.266956329345703, 1.8431016206741333],
[-74.38717651367188, 99.71611022949219, 1.8052573204040527],
[-2.410623788833618, 207.50567626953125, 1.8431040048599243],
[244.31658935546875, 53.67372131347656, 1.8431016206741333],
# [245.8651123046875, -9.9967041015625, 1.8431016206741333],
[-6.594831466674805, -208.17323303222656, 1.8431016206741333],
[4.926102638244629, 91.77217864990234, 1.8432115316390991],
[4.926102638244629, 40.57860565185547, 1.8431016206741333],
#[5.430785179138184, 122.2763442993164, 1.8431016206741333],
[-77.88716888427734, 40.30692672729492, 1.8052647113800049],
[-149.06358337402344, 107.70558166503906, 1.8431016206741333]
]
self.hero_car_pos = [-77.88716888427734, 40.30692672729492, 1.8052647113800049]
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
# models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
# 'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
# 'vehicle.toyota.prius', 'vehicle.tesla.model3',
# 'vehicle.seat.leon', 'vehicle.nissan.micra',
# 'vehicle.mini.cooperst', 'vehicle.jeep.wrangler_rubicon',
# 'vehicle.dodge_charger.police', 'vehicle.citroen.c3',
# 'vehicle.chevrolet.impala', 'vehicle.mercedes-benz.coupe',
# 'vehicle.bmw.isetta', 'vehicle.bmw.grandtourer',
# 'vehicle.audi.a2', 'vehicle.ford.mustang',
# ]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160, 6, 10, 11
first_car_pos = [93.75690460205078, -132.76296997070312, 9.84310531616211] # 88
first_wp_location = carla.Location(x=first_car_pos[0], y=first_car_pos[1], z=first_car_pos[2])
first_vehicle_waypoint = self._map.get_waypoint(first_wp_location)
first_vehicle_transform = carla.Transform(first_vehicle_waypoint.transform.location,
first_vehicle_waypoint.transform.rotation)
self.first_vehicle = self.world.try_spawn_actor(blueprints[0 % len(models)], first_vehicle_transform)
self._first_vehicle_speed = 25
# print('\n\nself.first_vehicle: ', first_wp_location, first_vehicle_waypoint, first_vehicle_transform,
# self.first_vehicle, self.first_vehicle.get_location())
# for actor in self.world.get_actors():
# print(actor)
# if 'vehicle' in actor.type_id:
# print('vehicle', actor.get_location())
next_second_car_pos = [143.19349670410156, -204.4090118408203, 1.8431016206741333] # 77
next_second_wp_location = carla.Location(x=next_second_car_pos[0], y=next_second_car_pos[1],
z=next_second_car_pos[2])
next_second_vehicle_waypoint = self._map.get_waypoint(next_second_wp_location)
next_second_vehicle_transform = carla.Transform(next_second_vehicle_waypoint.transform.location,
next_second_vehicle_waypoint.transform.rotation)
self.next_second_vehicle = self.world.try_spawn_actor(blueprints[1 % len(models)],
next_second_vehicle_transform)
self._next_second_vehicle_speed = 25
second_vehicle_waypoint = next_second_vehicle_waypoint.next(16)[0].get_left_lane()
second_vehicle_transform = carla.Transform(second_vehicle_waypoint.transform.location,
second_vehicle_waypoint.transform.rotation)
self.second_vehicle = self.world.try_spawn_actor(blueprints[2 % len(models)], second_vehicle_transform)
self._second_vehicle_speed = 26
third_car_pos = [-100.46805572509766, 16.266956329345703, 1.8431016206741333] # 189
third_wp_location = carla.Location(x=third_car_pos[0], y=third_car_pos[1], z=third_car_pos[2])
third_vehicle_waypoint = self._map.get_waypoint(third_wp_location)
third_vehicle_transform = carla.Transform(third_vehicle_waypoint.transform.location,
third_vehicle_waypoint.transform.rotation)
self.third_vehicle = self.world.try_spawn_actor(blueprints[3 % len(models)], third_vehicle_transform)
# setup local planners for zombie cars
self._third_vehicle_speed = 25
fourth_car_pos = [-74.38717651367188, 99.71611022949219, 1.8052573204040527] # 27
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
self._fourth_vehicle_speed = 20
next_fifth_car_pos = [-2.410623788833618, 207.50567626953125, 1.8431040048599243] # 4
next_fifth_wp_location = carla.Location(x=next_fifth_car_pos[0], y=next_fifth_car_pos[1],
z=next_fifth_car_pos[2])
next_fifth_vehicle_waypoint = self._map.get_waypoint(next_fifth_wp_location)
next_fifth_vehicle_transform = carla.Transform(next_fifth_vehicle_waypoint.transform.location,
next_fifth_vehicle_waypoint.transform.rotation)
self.next_fifth_vehicle = self.world.try_spawn_actor(blueprints[6 % len(models)], next_fifth_vehicle_transform)
# setup local planners for zombie cars
self._next_fifth_vehicle_speed = 25
fifth_vehicle_waypoint = next_fifth_vehicle_waypoint.next(16)[0].get_left_lane()
fifth_vehicle_transform = carla.Transform(fifth_vehicle_waypoint.transform.location,
fifth_vehicle_waypoint.transform.rotation)
self.fifth_vehicle = self.world.try_spawn_actor(blueprints[7 % len(models)], fifth_vehicle_transform)
# setup local planners for zombie cars
self._fifth_vehicle_speed = 26
next_sixth_car_pos = [244.31658935546875, 53.67372131347656, 1.8431016206741333] # 162
next_sixth_wp_location = carla.Location(x=next_sixth_car_pos[0], y=next_sixth_car_pos[1],
z=next_sixth_car_pos[2])
next_sixth_vehicle_waypoint = self._map.get_waypoint(next_sixth_wp_location)
next_sixth_vehicle_transform = carla.Transform(next_sixth_vehicle_waypoint.transform.location,
next_sixth_vehicle_waypoint.transform.rotation)
self.next_sixth_vehicle = self.world.try_spawn_actor(blueprints[8 % len(models)], next_sixth_vehicle_transform)
# setup local planners for zombie cars
self._next_sixth_vehicle_speed = 25
sixth_vehicle_waypoint = next_sixth_vehicle_waypoint.next(16)[0].get_left_lane()
sixth_vehicle_transform = carla.Transform(sixth_vehicle_waypoint.transform.location,
sixth_vehicle_waypoint.transform.rotation)
self.sixth_vehicle = self.world.try_spawn_actor(blueprints[9 % len(models)], sixth_vehicle_transform)
# setup local planners for zombie cars
self._sixth_vehicle_speed = 26
next_seventh_car_pos = [245.8651123046875, -9.9967041015625, 1.8431016206741333] # 134
next_seventh_wp_location = carla.Location(x=next_seventh_car_pos[0], y=next_seventh_car_pos[1],
z=next_seventh_car_pos[2])
next_seventh_vehicle_waypoint = self._map.get_waypoint(next_seventh_wp_location)
next_seventh_vehicle_transform = carla.Transform(next_seventh_vehicle_waypoint.transform.location,
next_seventh_vehicle_waypoint.transform.rotation)
self.next_seventh_vehicle = self.world.try_spawn_actor(blueprints[10 % len(models)],
next_seventh_vehicle_transform)
# setup local planners for zombie cars
self._next_seventh_vehicle_speed = 25
seventh_vehicle_waypoint = next_seventh_vehicle_waypoint.next(16)[0].get_left_lane()
seventh_vehicle_transform = carla.Transform(seventh_vehicle_waypoint.transform.location,
seventh_vehicle_waypoint.transform.rotation)
self.seventh_vehicle = self.world.try_spawn_actor(blueprints[11 % len(models)], seventh_vehicle_transform)
# setup local planners for zombie cars
self._seventh_vehicle_speed = 26
next_eighth_car_pos = [-6.594831466674805, -208.17323303222656, 1.8431016206741333] # 68
next_eighth_wp_location = carla.Location(x=next_eighth_car_pos[0], y=next_eighth_car_pos[1],
z=next_eighth_car_pos[2])
next_eighth_vehicle_waypoint = self._map.get_waypoint(next_eighth_wp_location)
next_eighth_vehicle_transform = carla.Transform(next_eighth_vehicle_waypoint.transform.location,
next_eighth_vehicle_waypoint.transform.rotation)
self.next_eighth_vehicle = self.world.try_spawn_actor(blueprints[12 % len(models)],
next_eighth_vehicle_transform)
# setup local planners for zombie cars
self._next_eighth_vehicle_speed = 25
eighth_vehicle_waypoint = next_eighth_vehicle_waypoint.next(16)[0].get_left_lane()
eighth_vehicle_transform = carla.Transform(eighth_vehicle_waypoint.transform.location,
eighth_vehicle_waypoint.transform.rotation)
self.eighth_vehicle = self.world.try_spawn_actor(blueprints[13 % len(models)], eighth_vehicle_transform)
# setup local planners for zombie cars
self._eighth_vehicle_speed = 26
no_12_car_pos = [4.926102638244629, 91.77217864990234, 1.8432115316390991] # 53
no_12_wp_location = carla.Location(x=no_12_car_pos[0], y=no_12_car_pos[1], z=no_12_car_pos[2])
no_12_vehicle_waypoint = self._map.get_waypoint(no_12_wp_location)
no_12_vehicle_transform = carla.Transform(no_12_vehicle_waypoint.transform.location,
no_12_vehicle_waypoint.transform.rotation)
self.no_12_vehicle = self.world.try_spawn_actor(blueprints[17 % len(models)], no_12_vehicle_transform)
# setup local planners for zombie cars
self.no_12_vehicle_speed = 25
no_13_car_pos = [4.926102638244629, 40.57860565185547, 1.8431016206741333] # 145
no_13_wp_location = carla.Location(x=no_13_car_pos[0], y=no_13_car_pos[1], z=no_13_car_pos[2])
no_13_vehicle_waypoint = self._map.get_waypoint(no_13_wp_location)
no_13_vehicle_transform = carla.Transform(no_13_vehicle_waypoint.transform.location,
no_13_vehicle_waypoint.transform.rotation)
self.no_13_vehicle = self.world.try_spawn_actor(blueprints[18 % len(models)], no_13_vehicle_transform)
# setup local planners for zombie cars
self.no_13_vehicle_speed = 25
no_14_car_pos = [5.430785179138184, 122.2763442993164, 1.8431016206741333] # 98
no_14_wp_location = carla.Location(x=no_14_car_pos[0], y=no_14_car_pos[1], z=no_14_car_pos[2])
no_14_vehicle_waypoint = self._map.get_waypoint(no_14_wp_location)
no_14_vehicle_transform = carla.Transform(no_14_vehicle_waypoint.transform.location,
no_14_vehicle_waypoint.transform.rotation)
self.no_14_vehicle = self.world.try_spawn_actor(blueprints[19 % len(models)], no_14_vehicle_transform)
# setup local planners for zombie cars
self.no_14_vehicle_speed = 25
self.zombie_cars = [self.first_vehicle, self.second_vehicle, self.next_second_vehicle, self.third_vehicle,
self.fourth_vehicle,
self.fifth_vehicle, self.next_fifth_vehicle, self.sixth_vehicle,
self.next_sixth_vehicle, self.seventh_vehicle, self.next_seventh_vehicle,
self.eighth_vehicle, self.next_eighth_vehicle,
self.no_12_vehicle,
self.no_13_vehicle, self.no_14_vehicle
]
first_vehicle_planner = WaypointFollower_FullMap(actor=self.first_vehicle, map=self._map,
target_speed=self._first_vehicle_speed,
actor_location=first_wp_location,
avoid_collision=True, pattern_1=[1, 0, 2],
world=self.world) # [1,3,1,1,2,1]
second_vehicle_planner = WaypointFollower_FullMap(actor=self.second_vehicle,
target_speed=self._second_vehicle_speed,
actor_location=second_vehicle_waypoint.transform.location,
map=self._map,
avoid_collision=True, pattern_1=[1, 0, 0, 0, 1],
world=self.world)
next_second_vehicle_planner = WaypointFollower_FullMap(actor=self.next_second_vehicle,
target_speed=self._next_second_vehicle_speed,
actor_location=next_second_wp_location,
map=self._map, avoid_collision=True, pattern_1=[0, 1, 0],
world=self.world)
third_vehicle_planner = WaypointFollower_FullMap(actor=self.third_vehicle,
target_speed=self._third_vehicle_speed,
actor_location=third_wp_location,
map=self._map,
avoid_collision=True, pattern_1=[0, 0, 0],
world=self.world)
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[0, 0, 0, 0, 1],
world=self.world)
fifth_vehicle_planner = WaypointFollower_FullMap(actor=self.fifth_vehicle,
target_speed=self._fifth_vehicle_speed,
actor_location=fifth_vehicle_waypoint.transform.location,
map=self._map, avoid_collision=True,
pattern_1=[1, 1, 0, 0, 0],
world=self.world)
next_fifth_vehicle_planner = WaypointFollower_FullMap(actor=self.next_fifth_vehicle,
target_speed=self._next_fifth_vehicle_speed,
actor_location=next_fifth_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[0, 1, 0],
world=self.world)
sixth_vehicle_planner = WaypointFollower_FullMap(actor=self.sixth_vehicle,
target_speed=self._sixth_vehicle_speed,
actor_location=sixth_vehicle_waypoint.transform.location,
map=self._map,
avoid_collision=True,
pattern_1=[1, 0, 0, 0, 1],
world=self.world)
next_sixth_vehicle_planner = WaypointFollower_FullMap(actor=self.next_sixth_vehicle,
target_speed=self._next_sixth_vehicle_speed,
actor_location=next_sixth_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[0, 1, 0],
world=self.world)
seventh_vehicle_planner = WaypointFollower_FullMap(actor=self.seventh_vehicle,
target_speed=self._seventh_vehicle_speed,
actor_location=seventh_vehicle_waypoint.transform.location,
map=self._map,
avoid_collision=True, pattern_1=[1, 0, 0, 0, 1],
world=self.world)
next_seventh_vehicle_planner = WaypointFollower_FullMap(actor=self.next_seventh_vehicle,
target_speed=self._next_seventh_vehicle_speed,
actor_location=next_seventh_wp_location,
map=self._map,
avoid_collision=True, pattern_1=[0, 1, 0],
world=self.world)
eighth_vehicle_planner = WaypointFollower_FullMap(actor=self.eighth_vehicle,
target_speed=self._eighth_vehicle_speed,
actor_location=eighth_vehicle_waypoint.transform.location,
map=self._map,
avoid_collision=True, pattern_1=[0, 0, 0, 1, 1],
world=self.world)
next_eighth_vehicle_planner = WaypointFollower_FullMap(self.next_eighth_vehicle,
target_speed=self._next_eighth_vehicle_speed,
actor_location=next_eighth_wp_location,
map=self._map,
avoid_collision=True, pattern_1=[0, 1, 0],
world=self.world)
no_12_vehicle_planner = WaypointFollower_FullMap(actor=self.no_12_vehicle,
target_speed=self.no_12_vehicle_speed,
actor_location=no_12_wp_location,
map=self._map, avoid_collision=True,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
no_13_vehicle_planner = WaypointFollower_FullMap(actor=self.no_13_vehicle,
target_speed=self.no_13_vehicle_speed,
actor_location=no_13_wp_location,
map=self._map, avoid_collision=True,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
no_14_vehicle_planner = WaypointFollower_FullMap(actor=self.no_14_vehicle,
target_speed=self.no_14_vehicle_speed,
actor_location=no_14_wp_location,
map=self._map, avoid_collision=True,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
self.vehicle_planners = [first_vehicle_planner, second_vehicle_planner, next_second_vehicle_planner,
third_vehicle_planner, fourth_vehicle_planner,
fifth_vehicle_planner, next_fifth_vehicle_planner, sixth_vehicle_planner,
next_sixth_vehicle_planner, seventh_vehicle_planner,
next_seventh_vehicle_planner, eighth_vehicle_planner, next_eighth_vehicle_planner,
no_12_vehicle_planner, no_13_vehicle_planner, no_14_vehicle_planner,
]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[93.75690460205078, -132.76296997070312, 9.84310531616211],
[143.19349670410156, -204.4090118408203, 1.8431016206741333],
[-2, -2, -2],
[-100.46805572509766, 16.266956329345703, 1.8431016206741333],
[-74.38717651367188, 99.71611022949219, 1.8052573204040527],
[-2.410623788833618, 207.50567626953125, 1.8431040048599243],
[-2, -2, -2],
[244.31658935546875, 53.67372131347656, 1.8431016206741333],
[-2, -2, -2],
[245.8651123046875, -9.9967041015625, 1.8431016206741333],
[-2, -2, -2],
[-6.594831466674805, -208.17323303222656, 1.8431016206741333],
[-2, -2, -2],
[4.926102638244629, 91.77217864990234, 1.8432115316390991],
[4.926102638244629, 40.57860565185547, 1.8431016206741333],
[5.430785179138184, 122.2763442993164, 1.8431016206741333]
]
all_pattern = [[1, 0, 2],
[0, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 0],
[1, 1, 0, 0, 0],
[0, 1, 0],
[1, 0, 0, 0, 1],
[0, 1, 0],
[1, 0, 0, 0, 1],
[0, 1, 0],
[0, 0, 0, 1, 1],
[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 30:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
try:
# vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
# vehicle_transform)
vehicle = self.world.spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 25
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
except:
print('generate_car() Failed!', actor_location)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
random.shuffle(self.hero_car_pos_candidate)
world_actors = self.world.get_actors().filter('vehicle.*')
for hero_car_pos in self.hero_car_pos_candidate:
wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
flag_spawn = True
for adversary in world_actors:
if wp_location.distance(adversary.get_location()) < 10:
flag_spawn = False
if flag_spawn:
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
break
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Cross_Join(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.speed = 0
self.only_reset_hero = only_reset_hero
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
self.hero_car_pos = [-42.350990295410156, -2.835118293762207, 1.8431016206741333]
# self.hero_car_pos = [-74.38717651367188, 57.531620025634766, 1.805267095565796] # 13
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2]+10)
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [-74.38717651367188, 57.531620025634766, 1.805267095565796] # 15
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2]+10)
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
# speed_list = [21, 25, 31]
# speed = random.choice([21, 25, 31])
speed = 21
#speed = random.choice([31])
# speed = random.choice([25])
# speed = random.choice([21])
self.speed = speed
print('Velocity: ', self.speed)
self._fourth_vehicle_speed = speed #random.choice([21, 27, 31])
fifth_car_pos = [-74.38717651367188, 77.64903259277344, 1.8052573204040527] # 25
fifth_wp_location = carla.Location(x=fifth_car_pos[0], y=fifth_car_pos[1], z=fifth_car_pos[2]+10)
fifth_vehicle_waypoint = self._map.get_waypoint(fifth_wp_location)
fifth_vehicle_transform = carla.Transform(fifth_vehicle_waypoint.transform.location,
fifth_vehicle_waypoint.transform.rotation)
self.fifth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fifth_vehicle_transform)
# setup local planners for zombie cars
self._fifth_vehicle_speed = speed-1 #random.choice([21, 27, 31])
sixth_car_pos = [-74.38717651367188, 97.71611022949219, 1.8052573204040527] # 27
sixth_wp_location = carla.Location(x=sixth_car_pos[0], y=sixth_car_pos[1], z=sixth_car_pos[2]+10)
sixth_vehicle_waypoint = self._map.get_waypoint(sixth_wp_location)
sixth_vehicle_transform = carla.Transform(sixth_vehicle_waypoint.transform.location,
sixth_vehicle_waypoint.transform.rotation)
self.sixth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], sixth_vehicle_transform)
# setup local planners for zombie cars
self._sixth_vehicle_speed = speed-1 #random.choice([21, 27, 31])
self.zombie_cars = [self.fourth_vehicle, self.fifth_vehicle, self.sixth_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
actor_location=fourth_wp_location,
target_speed=self._fourth_vehicle_speed,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
fifth_vehicle_planner = WaypointFollower_FullMap(actor=self.fifth_vehicle,
actor_location=fifth_wp_location,
target_speed=self._fifth_vehicle_speed,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
sixth_vehicle_planner = WaypointFollower_FullMap(actor=self.sixth_vehicle,
actor_location=sixth_wp_location,
target_speed=self._sixth_vehicle_speed,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, fifth_vehicle_planner, sixth_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
# all_car_pos = [[-74.38717651367188, 57.531620025634766, 1.805267095565796],
# [-74.38717651367188, 75.64903259277344, 1.8052573204040527],
# [-74.38717651367188, 99.71611022949219, 1.8052573204040527]]
all_car_pos = []
all_pattern = [[1, 1, 1, 1, 1, 1, 0, 0, 1, ], [1, 1, 1, 1, 1, 1, 0, 0, 1, ], [1, 1, 1, 1, 1, 1, 0, 0, 1, ], ]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 5:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 25
self.speed = _vehicle_speed
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern, additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
# self.zombie_cars = list()
# self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Ring_Join(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.only_reset_hero = only_reset_hero
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
self.hero_car_pos = [52.61453628540039, -7.843905448913574, 1.8431028127670288] # 55
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [4.926102638244629, 40.57860565185547, 1.8431016206741333] # 145
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
# velocity = random.choice([21, 25, 31]) # default is 21, 27, 31
velocity = 25
print('velocity: ', velocity)
self._fourth_vehicle_speed = velocity
self.speed = velocity
fifth_car_pos = [4.926102638244629, 59.08685302734375, 1.8430894613265991] # 47
fifth_wp_location = carla.Location(x=fifth_car_pos[0], y=fifth_car_pos[1], z=fifth_car_pos[2])
fifth_vehicle_waypoint = self._map.get_waypoint(fifth_wp_location)
fifth_vehicle_transform = carla.Transform(fifth_vehicle_waypoint.transform.location,
fifth_vehicle_waypoint.transform.rotation)
self.fifth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fifth_vehicle_transform)
# setup local planners for zombie cars
self._fifth_vehicle_speed = velocity-1
sixth_car_pos = [4.926102638244629, 72.03030395507812, 1.843079686164856] # 49
sixth_wp_location = carla.Location(x=sixth_car_pos[0], y=sixth_car_pos[1], z=sixth_car_pos[2])
sixth_vehicle_waypoint = self._map.get_waypoint(sixth_wp_location)
sixth_vehicle_transform = carla.Transform(sixth_vehicle_waypoint.transform.location,
sixth_vehicle_waypoint.transform.rotation)
self.sixth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], sixth_vehicle_transform)
# setup local planners for zombie cars
self._sixth_vehicle_speed = velocity-1
seventh_car_pos = [4.926102638244629, 91.77217864990234, 1.8432115316390991] # 53
seventh_wp_location = carla.Location(x=seventh_car_pos[0], y=seventh_car_pos[1], z=seventh_car_pos[2])
seventh_vehicle_waypoint = self._map.get_waypoint(seventh_wp_location)
seventh_vehicle_transform = carla.Transform(seventh_vehicle_waypoint.transform.location,
seventh_vehicle_waypoint.transform.rotation)
self.seventh_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], seventh_vehicle_transform)
# setup local planners for zombie cars
self._seventh_vehicle_speed = velocity-1
self.zombie_cars = [self.fourth_vehicle, self.fifth_vehicle, self.sixth_vehicle, self.seventh_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
fifth_vehicle_planner = WaypointFollower_FullMap(actor=self.fifth_vehicle,
target_speed=self._fifth_vehicle_speed,
actor_location=fifth_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
sixth_vehicle_planner = WaypointFollower_FullMap(actor=self.sixth_vehicle,
target_speed=self._sixth_vehicle_speed,
actor_location=sixth_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
seventh_vehicle_planner = WaypointFollower_FullMap(actor=self.seventh_vehicle,
target_speed=self._seventh_vehicle_speed,
actor_location=seventh_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, fifth_vehicle_planner, sixth_vehicle_planner, seventh_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[4.926102638244629, 40.57860565185547, 1.8431016206741333]]
all_pattern = [[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0]]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
#if actor_location.distance(adversary.get_location()) < 15:
if actor_location.distance(adversary.get_location()) < 5:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 30
#_vehicle_speed = 20
#_vehicle_speed = random.choice([23, 21, 22])
self.speed = _vehicle_speed
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=False, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
# self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Straight_Follow_Single(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.only_reset_hero = only_reset_hero
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
self.speed = 15
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
# 16.876914978027344, -134.40997314453125, 1.8707298040390015 # 177
self.hero_car_pos = [93.75690460205078, -132.76296997070312, 9.84310531616211] # 88
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [93.75690460205078, -132.76296997070312, 9.84310531616211] # 88 + 10
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_waypoint = fourth_vehicle_waypoint.next(25)[0]
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
#self._fourth_vehicle_speed = np.random.choice([10, 15, 20, 25])
self._fourth_vehicle_speed = np.random.choice([15])
# print('\n\nself._fourth_vehicle_speed: ', self._fourth_vehicle_speed)
self.zombie_cars = [self.fourth_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_vehicle_waypoint.transform.location,
map=self._map,
avoid_collision=True,
pattern_1=[1, 0, 2],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[93.75690460205078, -132.76296997070312, 9.84310531616211]] # 88 + 10
all_pattern = [[1, 0, 2]]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location)
vehicle_waypoint = vehicle_waypoint.next(25)[0]
actor_location = vehicle_waypoint.transform.location
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 15:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 20
self.speed = _vehicle_speed
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_actor_location.append(actor_location)
additional_pattern.append(all_pattern[i])
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
wp = wp.next(8)[0]
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Straight_Follow_Double(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.speed = 0
self.only_reset_hero = only_reset_hero
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
# self.hero_car_pos = [-2.410623788833618, 207.50567626953125, 1.8431040048599243] # 88
self.hero_car_pos = [-15, 207.50567626953125, 1.8431040048599243] # 88
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [10.190117835998535, 207.50567626953125, 1.8431016206741333] # 4
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
# self._fourth_vehicle_speed = 20
self._fourth_vehicle_speed = 18
self.speed = self._fourth_vehicle_speed
next_fourth_car_pos = [10.181385040283203, 204.00567626953125, 1.8431016206741333] # 3
next_fourth_wp_location = carla.Location(x=next_fourth_car_pos[0], y=next_fourth_car_pos[1], z=next_fourth_car_pos[2])
next_fourth_vehicle_waypoint = self._map.get_waypoint(next_fourth_wp_location)
next_fourth_vehicle_transform = carla.Transform(next_fourth_vehicle_waypoint.transform.location,
next_fourth_vehicle_waypoint.transform.rotation)
self.next_fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], next_fourth_vehicle_transform)
# setup local planners for zombie cars
# self._next_fourth_vehicle_speed = 20
self._next_fourth_vehicle_speed = 18
self.zombie_cars = [self.fourth_vehicle, self.next_fourth_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[0, 1, 0,],
world=self.world)
next_fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.next_fourth_vehicle,
target_speed=self._next_fourth_vehicle_speed,
actor_location=next_fourth_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[1, 1, 0, 0, 0, ],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, next_fourth_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[10.190117835998535, 207.50567626953125, 1.8431016206741333], [10.181385040283203, 204.00567626953125, 1.8431016206741333]]
all_pattern = [[0, 1, 0,], [1, 1, 0, 0, 0, ]]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
vehicle_waypoint = vehicle_waypoint.next(8)[0]
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 15:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 20
self.speed = _vehicle_speed
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
# self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
wp = wp.next(8)[0]
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Cross_Follow(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.only_reset_hero = only_reset_hero
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
# self.hero_car_pos = [-74.38717651367188, 75.64903259277344, 1.8052573204040527] # 15
self.hero_car_pos = [-74.38717651367188, 48, 1.8052573204040527] # 15
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [-74.38717651367188, 40.29738235473633, 1.8052647113800049] # 13
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
self._fourth_vehicle_speed = 20
# Not available: 135, 160
next_fourth_car_pos = [-74.38717651367188, 57.531620025634766, 1.8052573204040527] # 15
next_fourth_wp_location = carla.Location(x=next_fourth_car_pos[0], y=next_fourth_car_pos[1], z=next_fourth_car_pos[2])
next_fourth_vehicle_waypoint = self._map.get_waypoint(next_fourth_wp_location)
next_fourth_vehicle_transform = carla.Transform(next_fourth_vehicle_waypoint.transform.location,
next_fourth_vehicle_waypoint.transform.rotation)
self.next_fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], next_fourth_vehicle_transform)
# setup local planners for zombie cars
self._next_fourth_vehicle_speed = 20
self.zombie_cars = [self.fourth_vehicle, self.next_fourth_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
next_fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.next_fourth_vehicle,
target_speed=self._next_fourth_vehicle_speed,
actor_location=next_fourth_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, next_fourth_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[-74.38717651367188, 57.531620025634766, 1.805267095565796]]
all_pattern = [[1, 1, 1, 1, 1, 1, 0, 0, 1, ]]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 15:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 20
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=False, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
# self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
wp = wp.next(8)[0]
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Cross_Turn_Left(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.only_reset_hero = only_reset_hero
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
#self.speed = np.random.choice([21, 26, 42])
#self.speed = np.random.choice([42])
self.speed = np.random.choice([26])
print('Velocity: ', self.speed)
self.hero_car_pos = [-42.350990295410156, -2.835118293762207, 1.8431016206741333]
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [-74.38717651367188, 57.531620025634766, 1.805267095565796] # 15
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
self._fourth_vehicle_speed = self.speed
no_2_car_pos = [-95.79371643066406, 0.17835818231105804, 1.8431016206741333] # 191 below
no_2_wp_location = carla.Location(x=no_2_car_pos[0], y=no_2_car_pos[1], z=no_2_car_pos[2])
no_2_vehicle_waypoint = self._map.get_waypoint(no_2_wp_location)
no_2_vehicle_transform = carla.Transform(no_2_vehicle_waypoint.transform.location,
no_2_vehicle_waypoint.transform.rotation)
self.no_2_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], no_2_vehicle_transform)
# setup local planners for zombie cars
self._no_2_vehicle_speed = self.speed
# no_3_car_pos = [-84.8062973022461, -25, 1.7985864877700806] # 27
# no_3_wp_location = carla.Location(x=no_3_car_pos[0], y=no_3_car_pos[1], z=no_3_car_pos[2])
# no_3_vehicle_waypoint = self._map.get_waypoint(no_3_wp_location)
# no_3_vehicle_transform = carla.Transform(no_3_vehicle_waypoint.transform.location,
# no_3_vehicle_waypoint.transform.rotation)
# self.no_3_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], no_3_vehicle_transform)
# # setup local planners for zombie cars
# self._no_3_vehicle_speed = 20
self.zombie_cars = [self.fourth_vehicle, self.no_2_vehicle] # , self.no_3_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
no_2_vehicle_planner = WaypointFollower_FullMap(actor=self.no_2_vehicle,
target_speed=self._no_2_vehicle_speed,
actor_location=no_2_wp_location,
map=self._map,
avoid_collision=False,
pattern_1=[0, 0, 3, ],
world=self.world)
# no_3_vehicle_planner = WaypointFollower_FullMap(actor=self.no_3_vehicle,
# target_speed=self._no_3_vehicle_speed,
# actor_location=no_3_wp_location,
# map=self._map,
# avoid_collision=False,
# pattern_1=[0, 0, 1, 1, 0, 0, 0, 0, 0,],
# world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, no_2_vehicle_planner] # , no_3_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[-74.38717651367188, 57.531620025634766, 1.805267095565796], [-95.79371643066406, 0.17835818231105804, 1.8431016206741333]]
all_pattern = [[1, 1, 1, 1, 1, 1, 0, 0, 1, ], [0, 0, 3]]
# 199: [-85.21101379394531, -126.87477111816406, 1.7985864877700806], [0, 0, 1, 1, 0, 0, 0, 0, 0,]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 8:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = self.speed
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=False, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Cross_Turn_Right(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.speed = 0
self.only_reset_hero = only_reset_hero
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
self.hero_car_pos = [-42.350990295410156, -2.835118293762207, 1.8431016206741333]
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [-74.38717651367188, 57.531620025634766, 1.805267095565796] # 13
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
self._fourth_vehicle_speed = 20
no_2_car_pos = [-95.79371643066406, 0.17835818231105804, 1.8431016206741333] # 191 below
no_2_wp_location = carla.Location(x=no_2_car_pos[0], y=no_2_car_pos[1], z=no_2_car_pos[2])
no_2_vehicle_waypoint = self._map.get_waypoint(no_2_wp_location)
no_2_vehicle_transform = carla.Transform(no_2_vehicle_waypoint.transform.location,
no_2_vehicle_waypoint.transform.rotation)
self.no_2_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], no_2_vehicle_transform)
# setup local planners for zombie cars
self._no_2_vehicle_speed = 20
no_3_car_pos = [-84.8062973022461, -25, 1.7985864877700806] # 27
no_3_wp_location = carla.Location(x=no_3_car_pos[0], y=no_3_car_pos[1], z=no_3_car_pos[2])
no_3_vehicle_waypoint = self._map.get_waypoint(no_3_wp_location)
no_3_vehicle_transform = carla.Transform(no_3_vehicle_waypoint.transform.location,
no_3_vehicle_waypoint.transform.rotation)
self.no_3_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], no_3_vehicle_transform)
# setup local planners for zombie cars
self._no_3_vehicle_speed = 20
self.zombie_cars = [self.fourth_vehicle, self.no_2_vehicle, self.no_3_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
target_speed=self._fourth_vehicle_speed,
actor_location=fourth_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
no_2_vehicle_planner = WaypointFollower_FullMap(actor=self.no_2_vehicle,
target_speed=self._no_2_vehicle_speed,
actor_location=no_2_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[0, 0, 3, ],
world=self.world)
no_3_vehicle_planner = WaypointFollower_FullMap(actor=self.no_3_vehicle,
target_speed=self._no_3_vehicle_speed,
actor_location=no_3_wp_location,
map=self._map,
avoid_collision=True,
pattern_1=[0, 0, 1, 1, 0, 0, 0, 0, 0,],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, no_2_vehicle_planner, no_3_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[-74.38717651367188, 57.531620025634766, 1.805267095565796], [-95.79371643066406, 0.17835818231105804, 1.8431016206741333], [-85.21101379394531, -126.87477111816406, 1.7985864877700806]]
all_pattern = [[1, 1, 1, 1, 1, 1, 0, 0, 1, ], [0, 0, 3], [0, 0, 1, 1, 0, 0, 0, 0, 0,]]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 15:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 25
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
```
#### File: baselines/gail/trpo_runner.py
```python
import time
import os
from contextlib import contextmanager
from mpi4py import MPI
from collections import deque
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from baselines.common import explained_variance, zipsame, dataset, fmt_row
from baselines import logger
from baselines.common import colorize
from baselines.common.mpi_adam import MpiAdam
from baselines.common.cg import cg
from baselines.gail.statistics import stats
from tensorflow.contrib.tensorboard.plugins import projector
from numpy import linalg as LA
from copy import copy
from gym import spaces
def stack(array):
if len(array.shape) == 2:
array = array.reshape(np.prod(array.shape))
else:
array = array.reshape(np.prod(array.shape[:-1]), -1)
return array
def detect_scene(scenes, ep_lens):
_eps = len(ep_lens)
curves = 0
_range = []
start_idx = 0
for _len in ep_lens:
_range.append((start_idx, start_idx+_len))
start_idx = start_idx + _len
for start_idx, end_idx in _range:
scenes = list(scenes)
scene_ep = scenes[start_idx: end_idx]
for scene in range(len(scenes)):
if scene == 'curve':
curves_ep += 1
continue
for scene in scenes:
if scene == 'curve':
curves += 1
if curves <= 4:
return 'straight'
elif curves == _eps:
return 'curve'
else:
return 'all'
def flat_lst(lsts):
output = []
for lst in lsts:
output+= lst
return output
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def safemax(xs):
return np.nan if len(xs) == 0 else np.max(xs)
class Model(object):
def __init__(self, pi, reward_giver, nenv, stochastic=True):
self.pi = pi
self.nenv = nenv
self.reward_giver = reward_giver
self.stochastic = stochastic
def step(self, obs):
actions = []
values = []
rewards = []
for i in range(self.nenv):
ac, vpred = self.pi.act(self.stochastic, obs[i])
reward = self.reward_giver.get_reward(obs[i], ac)
actions.append(ac)
values.append(vpred)
rewards.append(reward[0][0])
return actions, values, rewards
def step_decision(self, obs):
# here we assign a fake reward 0 to the output
# and update reward in the belows
actions = []
values = []
rewards = []
for i in range(self.nenv):
ac, vpred = self.pi.act(self.stochastic, obs[i])
reward = 0
actions.append(ac)
values.append(vpred)
rewards.append(reward)
return actions, values, rewards
def fake_rew(self, obs, ctrls):
rewards = []
for i in range(self.nenv):
ctrls[i] = np.asarray(ctrls[i])
reward = self.reward_giver.get_reward(obs[i], ctrls[i])
# if terminal_list[i]:
# reward = [[-2]]
rewards.append(reward[0][0])
return rewards
# @ Junning: menv runner
class Runner(object):
def __init__(self, env, model, nsteps, gamma, lam, length=800, rew_type=False, model_output='DM'):
self.env = env
self.model = model
nenv = env.num_envs
self.nenv = nenv
self.obs = np.zeros((nenv, ) + env.observation_space.shape, dtype=env.observation_space.dtype)
self.obs[:] = env.reset()
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.news = [[False] for _ in range(nenv)] # horizon & early reset
self.terminals = [[False] for _ in range(nenv)] # early reset
self._ep = int(nsteps/length)+1
self.epinfo_buffer = deque([], maxlen=100)
self.rew_type = rew_type
self.model_output = model_output
def run(self):
mb_obs, mb_rewards, mb_true_rewards, mb_actions, mb_values, mb_news, mb_infos, mb_terminals = [], [], [], [], [], [], [], []
mb_controls = []
mb_current_pos = []
mb_yaw = []
# step for start
epinfos = []
start_action = [0, 1]
if self.model_output == 'DM':
_, v_init, _ = self.model.step_decision(self.obs)
else:
_, v_init, _ = self.model.step(self.obs)
v_ep = [[] for i in range(self.nenv)]
eprew_tmp = np.array([0. for i in range(self.nenv)])
eprews_tmp = [[] for i in range(self.nenv)]
for _step in range(self.nsteps):
nb_actions = []
# note that actions maybe decisions or controls
# if actions are decisions, the controls can be
# obtain from environment
if self.model_output == 'DM':
actions, values, rewards = self.model.step_decision(self.obs)
else:
actions, values, rewards = self.model.step(self.obs)
mb_obs.append(self.obs.copy())
mb_actions.append(np.asarray(actions, dtype=np.float32))
mb_values.append(np.asarray(values, dtype=np.float32))
mb_news.append(self.news)
mb_terminals.append(self.terminals)
# nb_actions = nb_actions.swapaxes(1, 0)
self.obs[:], true_rewards, self.news, infos = self.env.step(actions)
# ------------------------------------------------------------------
self.env.venv.venv.envs[0].world.v_value = values
controls = []
current_pos_list = []
yaw_list = []
# terminal_list = []
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
control = info.get('control')
controls.append(control)
current_pos_list.append(info.get('current_pos'))
yaw_list.append(info.get('yaw'))
# terminal_list.append(info.get('terminal'))
mb_controls.append(np.asarray(controls, dtype=np.float32))
mb_current_pos.append(np.asarray(current_pos_list, dtype=np.float32))
mb_yaw.append(np.asarray(yaw_list, dtype=np.float32))
# ------------------------------------------------------------------
if self.model_output == 'DM':
rewards = self.model.fake_rew(self.obs, controls)
for i in range(len(self.env.venv.venv.envs)):
if self.env.venv.venv.envs[i].terminal:
rewards[i] = -2
true_rewards[i] = -2
self.env.venv.venv.envs[0].world.fake_reward = rewards
# print(rewards, "fake REWARD")
eprew_tmp += rewards
self.terminals = np.array([info['terminal'] for info in infos])
# add episode start id
for __i in range(self.nenv):
if self.news[__i]:
v_ep[__i].append(values[__i])
eprews_tmp[__i].append(eprew_tmp[__i])
eprew_tmp[__i] = 0.
mb_true_rewards.append(true_rewards)
mb_rewards.append(rewards)
mb_infos.append(infos)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_news = np.asarray(mb_news, dtype=np.bool)
mb_terminals = np.asarray(mb_terminals, dtype=np.bool)
mb_true_rewards = np.asarray(mb_true_rewards)
mb_infos = np.asarray(mb_infos)
mb_controls = np.asarray(mb_controls)
mb_current_pos = np.asarray(mb_current_pos)
mb_yaw = np.asarray(mb_yaw)
# last_values = []
if self.model_output == 'DM':
_, _values, _ = self.model.step_decision(self.obs)
else:
_, _values, _ = self.model.step(self.obs)
last_values = _values
last_values = np.asarray(last_values)
# last_values = np.asarray(last_values, dtype=np.float32).swapaxes(1, 0)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonnew = 1.0 - self.news # collision or maxlen horizon
nextnonterminal = 1.0 - self.terminals # collision
nextvalues = last_values
else:
nextnonnew = 1.0 - mb_news[t + 1]
nextnonterminal = 1.0 - mb_terminals[t + 1]
nextvalues = mb_values[t + 1]
if self.rew_type:
delta = mb_true_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
else:
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonnew * lastgaelam
mb_returns = mb_advs + mb_values
# TODO: test sf01 function here, compare with stack
# flatten axes 0 and 1, (nstep, nenv, d_shape) -> (nstep*nenv, d_shape)
output = [*map(stack, (
mb_obs, mb_current_pos, mb_yaw, mb_rewards, mb_returns, mb_news, mb_true_rewards, mb_actions, mb_values,
mb_advs, mb_infos, mb_controls))]
obs, current_pos, yaw, rews, returns, news, true_rews, acs, vpreds, advs = output[:-2]
ctrls = output[-1]
# episode statistics
self.epinfo_buffer.extend(epinfos)
mb_scenes = []
for _i in range(self.nsteps):
mb_scene_envs = []
for _j in range(self.nenv):
mb_scene_envs.append(mb_infos[_i][_j]['scene'])
mb_scenes.append(mb_scene_envs)
# flatten axes 0 and 1, (nstep, nenv, d_shape) -> (nstep*nenv, d_shape)
v_ep, scenes, ep_rets = [*map(flat_lst, (v_ep, mb_scenes, eprews_tmp))] # store v_ep after reset
# log from epinfo: remember this is from 100 rolling buffer
ep_true_rets = [epinfo['r'] for epinfo in self.epinfo_buffer]
ep_lens = [epinfo['l'] for epinfo in self.epinfo_buffer]
ep_v = safemean([epinfo['v'] for epinfo in self.epinfo_buffer])
ep_acc = safemean([epinfo['acc'] for epinfo in self.epinfo_buffer])
ep_left_offset = safemean([epinfo['left'] for epinfo in self.epinfo_buffer])
ep_right_offset = safemean([epinfo['right'] for epinfo in self.epinfo_buffer])
# record non-rolling, non-buffered info
ep_true_lens = [ep['l'] for ep in epinfos]
scene = detect_scene(scenes, ep_lens)
return {"ob": obs, "rew": rews, "vpred": vpreds, "new": news, "truerew": true_rews, "v_ep": v_ep,
"ac": acs, "ep_rets": ep_rets, "ep_lens": ep_lens, "ep_true_rets": ep_true_rets, "scene": scene,
"adv": advs, "tdlamret": returns, 'ep_v': ep_v, 'ep_acc': ep_acc, 'ep_left_offset': ep_left_offset,
'ep_right_offset': ep_right_offset, "ep_true_lens": ep_true_lens, "ctrl": ctrls,
'current_pos': current_pos, 'yaw': yaw}
def add_vtarg_and_adv(seg, gamma, lam, rew=False):
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
if not rew:
rew = seg["rew"]
else:
rew = seg["truerew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def learn(env, policy_func, reward_giver, expert_dataset, rank,
pretrained, pretrained_weight, *,
g_step, d_step, entcoeff, save_per_iter,
ckpt_dir, log_dir, timesteps_per_batch, task_name,
gamma, lam,
max_kl, cg_iters, cg_damping=1e-2,
vf_stepsize=3e-4, d_stepsize=3e-4, vf_iters=3,
max_timesteps=0, max_episodes=0, max_iters=0,
callback=None, load_model_path=None, search=False, search_mode='traj', scene='all',
p_update=True, rew_type=False, train_mode='all', r_norm=False,still_std=False, model_output='DM'):
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space, reuse=(pretrained_weight != None))
oldpi = policy_func("oldpi", ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = entcoeff * meanent
vferr = tf.reduce_mean(tf.square(pi.vpred - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
if isinstance(env.action_space, spaces.MultiDiscrete):
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
elif isinstance(env.action_space, spaces.Box):
pi_mean = tf.reduce_mean(pi.pd.mean)
pi_std = tf.reduce_mean(pi.pd.std)
steer = tf.reduce_mean(pi.pd.mean[:, 0])
steer_std = tf.reduce_mean(pi.pd.std[:, 0])
if train_mode == "all":
throttle_brake = tf.reduce_mean(pi.pd.mean[:, 1])
throttle_brake_std = tf.reduce_mean(pi.pd.std[:, 1])
losses = [optimgain, meankl, entbonus, surrgain, meanent, pi_mean, pi_std, steer, throttle_brake, steer_std, throttle_brake_std]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy", "pi_mean", "pi_std", "steer", "throttle_brake", "steer_std", "throttle_brake_std"]
elif train_mode == "steer":
losses = [optimgain, meankl, entbonus, surrgain, meanent, pi_mean, pi_std, steer]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy", "pi_mean", "pi_std", "steer"]
dist = meankl
all_var_list = pi.get_trainable_variables()
if still_std:
var_list = [v for v in all_var_list if v.name.startswith("pi/pol")]# or v.name.startswith("pi/logstd")]
else:
var_list = [v for v in all_var_list if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")]
vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")]
d_adam = MpiAdam(reward_giver.get_trainable_variables())
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
# metadata file
if expert_dataset is not None:
metadata_file_loc = ckpt_dir+'/metadata.tsv'
metadata_file = 'metadata.tsv'
os.makedirs(os.path.dirname(metadata_file_loc), exist_ok=True)
g_labels = [0 for i in range(g_step*timesteps_per_batch)]
d_labels = expert_dataset.d_labels
data_labels = g_labels+d_labels
with open(metadata_file_loc, 'a') as f:
for index, label in enumerate(data_labels):
f.write("%s\n"%(label))
# embedding settings
OB_VIZ = 'Embedding_ob'
ob_embedding = tf.get_variable(name=OB_VIZ, shape=[g_step*timesteps_per_batch+expert_dataset.obs.shape[0], ob_space.shape[0]])
ob_ph = tf.placeholder(dtype=tf.float32, shape=[g_step*timesteps_per_batch+expert_dataset.obs.shape[0], ob_space.shape[0]], name="ob_ph")
assign_ob = tf.assign(ob_embedding, ob_ph)
saver_em = tf.train.Saver({OB_VIZ: ob_embedding})
config_ob = projector.ProjectorConfig()
embedding_ob = config_ob.embeddings.add()
embedding_ob.tensor_name = OB_VIZ
embedding_ob.metadata_path = metadata_file
projector.visualize_embeddings(tf.summary.FileWriter(ckpt_dir), config_ob)
ep_vs_global = deque(maxlen=40)
ep_rets_global = deque(maxlen=40)
ep_true_rets_global = deque(maxlen=40)
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
return out
U.initialize()
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
d_adam.sync()
vfadam.sync()
if rank == 0:
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
model = Model(pi, reward_giver, env.num_envs, stochastic=True)
if expert_dataset is not None:
seg_gen = Runner(env, model, timesteps_per_batch, gamma, lam, length=expert_dataset.num_length, rew_type=rew_type)
else:
seg_gen = Runner(env, model, timesteps_per_batch, gamma, lam, logger.num_length, rew_type=rew_type)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
true_rewbuffer = deque(maxlen=100)
assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1
g_loss_stats = stats(loss_names)
d_loss_stats = stats(reward_giver.loss_name)
ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
# if provide pretrained weight
if pretrained_weight is not None:
logger.log("Let's load the pretrained BC model.")
logger.log("Some amazing things will happen.")
U.load_variables(pretrained_weight, variables=pi.get_variables(), sess=tf.get_default_session())
if load_model_path is not None:
logger.log("Let's load the pretrained model")
logger.log("For god sake, Who knows what will happen.")
saver = tf.train.Saver(max_to_keep=5000)
saver_best = tf.train.Saver()
sess = tf.get_default_session()
params = sess.run(pi.get_trainable_variables())
saver.restore(tf.get_default_session(), load_model_path)
params = sess.run(pi.get_trainable_variables())
else:
saver = tf.train.Saver(max_to_keep=5000)
saver_best = tf.train.Saver()
eptruerew_best = 0
# if r_norm:
# rew_norm = RunningMeanStd()
model_init = os.path.join(ckpt_dir, 'model_init')
saver.save(tf.get_default_session(), model_init)
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
# ------------------ Update G ------------------
if p_update:
logger.log("Optimizing Policy...")
ob_g = []
for _ in range(g_step):
repeat = True
while repeat:
with timed("sampling"):
seg = seg_gen.run()
if seg["scene"] == scene or scene == "all":
repeat = False
logger.log("Scene :%s"%seg["scene"])
# reward normalization
# if r_norm:
# rew_norm.update(seg["rew"])
# seg["rew"] = (seg["rew"] - rew_norm.mean) / rew_norm.var
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
ob_g.append(ob)
ob_start = ob[0]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
args = seg["ob"], seg["ac"], atarg
fvpargs = [arr[::5] for arr in args]
assign_old_eq_new() # set old parameter values to new parameter values
with timed("computegrad"):
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
g_policy = g
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank == 0)
assert np.isfinite(stepdir).all()
shs = .5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]),
include_final_partial_batch=False, batch_size=128):
if hasattr(pi, "ob_rms"):
pi.ob_rms.update(mbob) # update running mean/std for policy
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
g_vf = g
g_losses = meanlosses
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
ep_v, ep_acc, ep_left_offset, ep_right_offset = seg["ep_v"], seg["ep_acc"], seg["ep_left_offset"], seg["ep_right_offset"]
else:
seg = seg_gen.run()
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
ob_g = [ob]
# ------------------ Update D ------------------
if expert_dataset is not None:
logger.log("Optimizing Discriminator...")
logger.log(fmt_row(13, reward_giver.loss_name))
ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob))
batch_size = len(ob) // d_step
d_losses = [] # list of tuples, each of which gives the loss for a minibatch
if model_output == 'DM':
ac = seg["ctrl"]
for ob_batch, ac_batch in dataset.iterbatches((ob, ac),
include_final_partial_batch=False,
batch_size=batch_size):
if not p_update:
with timed("just update discriminator"):
ob_expert, ac_expert, search_prop = expert_dataset.obs, expert_dataset.acs, 0
elif search:
with timed("searching batch"):
if search_mode == 'step':
ob_expert, ac_expert, search_prop = expert_dataset.search_batch_step(ob_batch, ac_batch)
elif search_mode == 'traj':
ob_expert, ac_expert, search_prop = expert_dataset.search_batch_traj(ob_start, batch_size, scene=scene)
else:
ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob_batch), scene=scene)
# update running mean/std for reward_giver
if hasattr(reward_giver, "obs_rms"): reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
*newlosses, g = reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
g_d = g
d_adam.update(allmean(g), d_stepsize)
d_losses.append(newlosses)
# ------------------ Visualize Embedding ---------------
if p_update:
ob_g = np.array(ob_g)
ob_g = np.reshape(ob_g, [-1, np.prod(ob_g.shape[2:])])
ob_viz = np.concatenate([ob_g, expert_dataset.obs], axis=0)
sess = tf.get_default_session()
sess.run(assign_ob, feed_dict={ob_ph: ob_viz})
ob_name = 'model_ob_'+str(iters_so_far)+'.ckpt'
saver_em.save(tf.get_default_session(), os.path.join(ckpt_dir, ob_name))
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
d_losses_name = reward_giver.loss_name
d_losses_data = np.mean(d_losses, axis=0)
kvs = [{name: data} for name, data in zip(d_losses_name, d_losses_data)]
for kv in kvs:
for k, v in kv.items():
logger.record_tabular(k, v)
lrlocal = (seg["ep_true_lens"], seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
true_lens, lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
true_rewbuffer.extend(true_rets)
lenbuffer.extend(lens)
rewbuffer.extend(rews)
# Save model
eptruerew_now = np.mean(true_rets)
if rank == 0 and iters_so_far % save_per_iter == 0 and ckpt_dir is not None:
modelname = 'model%d.ckpt'%iters_so_far
fname = os.path.join(ckpt_dir, modelname)
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver.save(tf.get_default_session(), fname)
if rank == 0 and ckpt_dir is not None and eptruerew_now > eptruerew_best:
modelname = 'modelbest.ckpt'
fname = os.path.join(ckpt_dir, modelname)
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver_best.save(tf.get_default_session(), fname)
eptruerew_best = eptruerew_now
eptruerew_last = eptruerew_now
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpLenMax", np.max(lenbuffer))
if expert_dataset is not None:
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer))
logger.record_tabular("EpTrueRewMax", np.max(true_rewbuffer))
logger.record_tabular("EpThisIter", len(true_lens))
logger.record_tabular("EpVelocity", ep_v)
logger.record_tabular("EpAcc", ep_acc)
logger.record_tabular("EpLeftOffset", ep_left_offset)
logger.record_tabular("EpRightOffset", ep_right_offset)
corr_rew = np.corrcoef([seg["rew"], seg["truerew"]])[0][1]
ep_rets = [ret for ret in seg["ep_rets"]]
min_len = min(len(seg["v_ep"]), len(seg["ep_true_rets"]), len(ep_rets))
for i in range(min_len):
ep_vs_global.append(seg["v_ep"][i])
ep_rets_global.append(ep_rets[i])
ep_true_rets_global.append(seg["ep_true_rets"][i])
corr_eprew = np.corrcoef([ep_vs_global, ep_rets_global])[0][1]
corr_eptruerew = np.corrcoef([ep_vs_global, ep_true_rets_global])[0][1]
logger.record_tabular("CorrRew", corr_rew)
logger.record_tabular("CorrEpRew", corr_eprew)
logger.record_tabular("CorrEpTrueRew", corr_eptruerew)
episodes_so_far += len(true_lens)
timesteps_so_far += sum(true_lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if rank == 0:
logger.dump_tabular()
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
```
#### File: srunner/scenarios/maneuver_opposite_direction.py
```python
from six.moves.queue import Queue # pylint: disable=relative-import
import py_trees
from srunner.scenariomanager.atomic_scenario_behavior import *
from srunner.scenariomanager.atomic_scenario_criteria import *
from srunner.scenarios.basic_scenario import *
from srunner.tools.scenario_helper import get_waypoint_in_distance
MANEUVER_OPPOSITE_DIRECTION = [
"ManeuverOppositeDirection"
]
class ManeuverOppositeDirection(BasicScenario):
"""
"Vehicle Maneuvering In Opposite Direction" (Traffic Scenario 06)
"""
category = "ManeuverOppositeDirection"
def __init__(self, world, ego_vehicle, config, randomize=False, debug_mode=False, criteria_enable=True,
obstacle_type='barrier', timeout=120):
"""
Setup all relevant parameters and create scenario
obstacle_type -> flag to select type of leading obstacle. Values: vehicle, barrier
"""
self._world = world
self._map = CarlaDataProvider.get_map()
self._first_vehicle_location = 50
self._second_vehicle_location = self._first_vehicle_location + 60
self._ego_vehicle_drive_distance = self._second_vehicle_location * 2
self._start_distance = self._first_vehicle_location * 0.9
self._opposite_speed = 20 # km/h
self._source_gap = 40 # m
self._reference_waypoint = self._map.get_waypoint(config.trigger_point.location)
self._source_transform = None
self._sink_location = None
self._blackboard_queue_name = 'ManeuverOppositeDirection/actor_flow_queue'
self._queue = Blackboard().set(self._blackboard_queue_name, Queue())
self._obstacle_type = obstacle_type
self._first_actor_transform = None
self._second_actor_transform = None
self._third_actor_transform = None
# Timeout of scenario in seconds
self.timeout = timeout
super(ManeuverOppositeDirection, self).__init__(
"ManeuverOppositeDirection",
ego_vehicle,
config,
world,
debug_mode,
criteria_enable=criteria_enable)
def _initialize_actors(self, config):
"""
Custom initialization
"""
first_actor_waypoint, _ = get_waypoint_in_distance(self._reference_waypoint, self._first_vehicle_location)
second_actor_waypoint, _ = get_waypoint_in_distance(self._reference_waypoint, self._second_vehicle_location)
second_actor_waypoint = second_actor_waypoint.get_left_lane()
first_actor_transform = carla.Transform(
first_actor_waypoint.transform.location,
first_actor_waypoint.transform.rotation)
if self._obstacle_type == 'vehicle':
first_actor_model = 'vehicle.nissan.micra'
else:
first_actor_transform.rotation.yaw += 90
first_actor_model = 'static.prop.streetbarrier'
second_prop_waypoint = first_actor_waypoint.next(2.0)[0]
position_yaw = second_prop_waypoint.transform.rotation.yaw + 90
offset_location = carla.Location(
0.50 * second_prop_waypoint.lane_width * math.cos(math.radians(position_yaw)),
0.50 * second_prop_waypoint.lane_width * math.sin(math.radians(position_yaw)))
second_prop_transform = carla.Transform(
second_prop_waypoint.transform.location + offset_location, first_actor_transform.rotation)
second_prop_actor = CarlaActorPool.request_new_actor(first_actor_model, second_prop_transform)
second_prop_actor.set_simulate_physics(True)
first_actor = CarlaActorPool.request_new_actor(first_actor_model, first_actor_transform)
first_actor.set_simulate_physics(True)
second_actor = CarlaActorPool.request_new_actor('vehicle.audi.tt', second_actor_waypoint.transform)
self.other_actors.append(first_actor)
self.other_actors.append(second_actor)
if self._obstacle_type != 'vehicle':
self.other_actors.append(second_prop_actor)
self._source_transform = second_actor_waypoint.transform
sink_waypoint = second_actor_waypoint.next(1)[0]
while not sink_waypoint.is_intersection:
sink_waypoint = sink_waypoint.next(1)[0]
self._sink_location = sink_waypoint.transform.location
self._first_actor_transform = first_actor_transform
self._second_actor_transform = second_actor_waypoint.transform
self._third_actor_transform = second_prop_transform
def _create_behavior(self):
"""
The behavior tree returned by this method is as follows:
The ego vehicle is trying to pass a leading vehicle in the same lane
by moving onto the oncoming lane while another vehicle is moving in the
opposite direction in the oncoming lane.
"""
# Leaf nodes
actor_source = ActorSource(
self._world, ['vehicle.audi.tt', 'vehicle.tesla.model3', 'vehicle.nissan.micra'],
self._source_transform, self._source_gap, self._blackboard_queue_name)
actor_sink = ActorSink(self._world, self._sink_location, 10)
ego_drive_distance = DriveDistance(self.ego_vehicle, self._ego_vehicle_drive_distance)
waypoint_follower = WaypointFollower(
self.other_actors[1], self._opposite_speed,
blackboard_queue_name=self._blackboard_queue_name, avoid_collision=True)
# Non-leaf nodes
parallel_root = py_trees.composites.Parallel(policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# Building tree
parallel_root.add_child(ego_drive_distance)
parallel_root.add_child(actor_source)
parallel_root.add_child(actor_sink)
parallel_root.add_child(waypoint_follower)
scenario_sequence = py_trees.composites.Sequence()
scenario_sequence.add_child(ActorTransformSetter(self.other_actors[0], self._first_actor_transform))
scenario_sequence.add_child(ActorTransformSetter(self.other_actors[1], self._second_actor_transform))
scenario_sequence.add_child(ActorTransformSetter(self.other_actors[2], self._third_actor_transform))
scenario_sequence.add_child(parallel_root)
scenario_sequence.add_child(ActorDestroy(self.other_actors[0]))
scenario_sequence.add_child(ActorDestroy(self.other_actors[1]))
scenario_sequence.add_child(ActorDestroy(self.other_actors[2]))
return scenario_sequence
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
criteria = []
collision_criterion = CollisionTest(self.ego_vehicle)
criteria.append(collision_criterion)
return criteria
def __del__(self):
"""
Remove all actors upon deletion
"""
self.remove_all_actors()
``` |
{
"source": "jiankaiwang/ioutracker",
"score": 2
} |
#### File: ioutracker/inference/MOTDet17Main.py
```python
import os
import time
import cv2
import numpy as np
import logging
import subprocess
import tqdm
try:
from ioutracker import detections_transform, IOUTracker, loadLabel
except ModuleNotFoundError:
# The relative path is under the home directory.
import sys
relativePaths = [os.path.join(".", "ioutracker", "dataloaders"),
os.path.join(".", "dataloaders"),
os.path.join(".", "ioutracker", "src"),
os.path.join(".", "src")]
for rPath in relativePaths:
sys.path.append(rPath)
from Helpers import detections_transform
from IOUTracker import IOUTracker
from MOTDataLoader import loadLabel
# In[]
def colors(num=300):
np.random.seed(10)
cp = []
for _ in range(num):
r = np.random.randint(low=0, high=255)
g = np.random.randint(low=0, high=255)
b = np.random.randint(low=0, high=255)
cp.append((r, g, b))
return cp
# In[]
def outputAsFramesToVideo(detection_conf, iou_threshold, min_t, track_min_conf,
labelFilePath, frameFilePath, trackingOutput, fps,
outputFileName, plotting=True):
"""outputAsFramesToVideo generates the outputs by frames and generates the video
by these frames.
Args:
detection_conf: the hyperparameter defined in IOUTracker
iou_threshold: the hyperparameter defined in IOUTracker
min_t: the hyperparameter defined in IOUTracker
track_min_conf: the hyperparameter defined in IOUTracker
labelFilePath: the path pointing to `gt.txt`
frameFilePath: the path pointing to `img1` folder
trackingOutput: the target folder for the output images
fps: the frame rate of the video,
suggesting this parameter would be the same to min_t or its fold
outputFileName: the name of the output video file
plotting: plots the frame and outputs the video file
Returns:
None
"""
labels, df = loadLabel(src=labelFilePath, format_style="onlybbox_dict")
# generates the color list for plotting the box on the frames
COLOR_LIST = colors()
iouTracks = IOUTracker(detection_conf=detection_conf,
iou_threshold=iou_threshold,
min_t=min_t,
track_min_conf=track_min_conf)
if plotting:
# remove all images existing
subprocess.call("rm -f {output}/*.jpg {output}/*.mp4".format(\
output=trackingOutput), shell=True)
start = time.time()
for label in tqdm.trange(1, len(labels), 1):
logging.debug("\n")
logging.debug("Frame: {}".format(label))
# iou tracker
iouTracks.read_detections_per_frame(detections=labels[label])
active_tracks = iouTracks.get_active_tracks()
finished_tracks = iouTracks.get_finished_tracks()
logging.debug("Active tracks: {}".format(len(active_tracks)))
logging.debug("Finished tracks: {}".format(len(finished_tracks)))
if plotting:
# image
img_name = "{:>6s}.jpg".format(str(label)).replace(' ', '0')
img_path = os.path.join(frameFilePath, img_name)
assert os.path.exists(img_path)
img = cv2.imread(filename=img_path)
for act_track in active_tracks:
tid = act_track.tid % 300
# act_track_ped: [bX1, bY1, bW, bH, Visible]
act_track_ped = act_track.previous_detections()
# [bX1, bY1, bW, bH, Visible] -> [bX1, bY1, bX2, bY2]
act_track_ped_coord = detections_transform(act_track_ped)
x1, y1, x2, y2 = np.array(act_track_ped_coord, dtype=int)
cv2.rectangle(img, (x1, y1), (x2, y2), COLOR_LIST[tid], 2)
text_x = x1
text_y = int(y1*1.01)
cv2.putText(img, "TID:{}".format(str(act_track.tid)), (text_x, text_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, COLOR_LIST[tid], 1, cv2.LINE_AA)
# add additional info about the video
parainfo = ["Detection Conf: {:>4s}".format(str(detection_conf)), \
"IOU Threshold: {:>4s}".format(str(iou_threshold)), \
"MIN Time/Frame: {:>4s}".format(str(min_t)), \
"Track Min Conf: {:>4s}".format(str(track_min_conf)), \
"FRAMERATE(fps): {:>4s}".format(str(fps))]
for midx in range(len(parainfo)):
cv2.putText(img, parainfo[midx], (5, 14*(midx+1)), cv2.FONT_HERSHEY_SIMPLEX,
0.4, (0, 0, 0), 1, cv2.LINE_AA)
tracking_output_file = "{:>6s}.jpg".format(str(label)).replace(" ", "0")
cv2.imwrite(os.path.join(trackingOutput, tracking_output_file), img)
if plotting:
# *.jpg to .mp4
target_video_path = os.path.join(trackingOutput, "tracking_{}.mp4".format(outputFileName))
subprocess.call("cat $(find {} | grep 'jpg' | sort) | ffmpeg -f image2pipe -r {} -i - -vcodec libx264 {}".format(
trackingOutput, fps, target_video_path), shell=True)
peroid = time.time() - start
print("Total time cost: {}".format(peroid))
# In[]
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
pass
``` |
{
"source": "jiankaiwang/seed",
"score": 3
} |
#### File: seed/python/py2mysql.py
```python
import mysql.connector
class py2mysql:
# ----------
# private
# ----------
__host = ""
__port = ""
__user = ""
__pass = ""
__dbname = ""
__connectionValid = False
__msg = ""
#
# desc : return status
# retn : { "state" : [success|failure|warning], "info" : "message", "data" : []}
#
def __retStatus(self, state, info, data):
return {"state" : state, "info" : info, "data" : data}
#
# desc : check mysql server connection
#
def __checkConnect(self):
try:
conn = mysql.connector.connect(\
host = self.__host, \
port = self.__port, \
user = self.__user, \
password = <PASSWORD>, \
database = self.__dbname\
)
self.__connectionValid = True
conn.close()
except mysql.connector.Error as err:
self.__connectionValid = False
self.__msg = "{}".format(err)
# ----------
# public
# ----------
#
# desc : constructor
#
def __init__(self, host, port, user, pwd, dbname):
self.__host = host
self.__port = port
self.__user = user
self.__pass = <PASSWORD>
self.__dbname = dbname
self.__connectionValid = False
self.__msg = ""
# check connect
self.__checkConnect()
#
# desc : get conection status
#
def checkConnectionValid(self):
if self.__connectionValid == False:
return self.__retStatus("failure", self.__msg, "")
else :
return self.__retStatus("success", "Connection is valid.", "")
#
# desc : execute sql command
# inpt :
# |- sqlCmd : "SELECT first_name, hire_date FROM employees WHERE hire_date BETWEEN %s AND %s"
# |- parameterInSeq (tuple) : (datetime.date(1999, 1, 1), datetime.date(1999, 12, 31))
# |- isQueryFlag : {True|False}
# |- asdict (return as dictionary) : {True|False}
#
def execsql(self, sqlCmd, parameterInSeq, isQueryFlag, asdict=True):
if self.__connectionValid == False:
return self.__retStatus("failure", self.__msg, "")
if not (isinstance(sqlCmd, str) \
and isinstance(parameterInSeq, tuple) \
and isinstance(isQueryFlag, bool)\
and isinstance(asdict, bool))\
:
return self.__retStatus("failure", "Parameters passed are wrong.", "")
# connection is valid
try:
conn = mysql.connector.connect(\
host = self.__host, \
port = self.__port, \
user = self.__user, \
password = self.__<PASSWORD>, \
database = self.__dbname\
)
cursor = conn.cursor()
cursor.execute(sqlCmd, parameterInSeq)
if isQueryFlag:
curInfo = [desc[0] for desc in cursor.description]
rawData = cursor.fetchall()
retData = []
if asdict:
tmp = {}
for item in range(0, len(rawData), 1):
tmp = {}
for col in range(0, len(curInfo), 1):
tmp.setdefault(curInfo[col], rawData[item][col])
retData.append(tmp)
else:
retData.append(curInfo)
tmp = []
for item in range(0, len(rawData), 1):
tmp = []
for col in range(0, len(curInfo), 1):
tmp.append(rawData[item][col])
retData.append(tmp)
return self.__retStatus("success", "Complete query.", retData)
else:
conn.commit();
return self.__retStatus("success", "Complete non-query sql command.", "")
cursor.close()
conn.close()
except mysql.connector.Error as err:
return self.__retStatus("failure", "{}".format(err), "")
```
#### File: seed/python/TF1_FrozenModel.py
```python
import os
import tensorflow as tf
from collections import OrderedDict
# In[]
class OperateFrozenModel:
@staticmethod
def save_sess_into_frozen_model(sess, outputs, pb_path):
"""Access the sess and save it as the frozen model.
Args:
sess: an active session
outputs: a list contains the name of tensor, e.g. ["result"]
pb_path: the path for output frozen model
Returns:
state: True or False
Message: None (for True) or Message (for False)
"""
try:
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), outputs)
with tf.gfile.FastGFile(pb_path, "wb") as fout:
fout.write(output_graph_def.SerializeToString())
return True, None
except Exception as e:
return False, str(e)
@staticmethod
def load_frozen_model(pb_path, graph_name=""):
"""Load the frozen model. (Serial)
Returns:
state: True or False
Message: None (for True) or Message (for False)
"""
try:
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, "rb") as fin:
graph_def.ParseFromString(fin.read())
tf.import_graph_def(graph_def, name=graph_name)
return True, graph
except Exception as e:
return False, str(e)
@staticmethod
def transform_into_tflite(pb_path, inputs, outputs, tflite_path):
"""Transform a frozen model into a tflite one.
Args:
pb_path: the path to a frozen model
inputs: a list contains the name of tensor, e.g. ["input"]
outputs: a list contains the name of tensor, e.g. ["result"]
tflite_path: the path for tflite model
Returns:
state: True or False
Message: None (for True) or Message (for False)
"""
try:
converter = tf.lite.TFLiteConverter.from_frozen_graph(pb_path, inputs, outputs)
tflite_model = converter.convert()
open(tflite_path, "wb").write(tflite_model)
return True, None
except Exception as e:
return False, str(e)
@staticmethod
def transform_into_savedmodel(pb_path, inputs, outputs, model_dir):
"""Export the frozen model as the savedModel format.
Args:
pb_path: the path to a frozen model
inputs: a list contains available name of tensor node, e.g. ['image']
outputs: a list contains the name of tensor, e.g. ["result"]
model_dir: the directory to the saved model
Returns:
state: True or False
Message: Output Directory (for True) or Message (for False)
"""
try:
state, graph = OperateFrozenModel.load_frozen_model(pb_path)
if not state: raise Exception(graph)
tensor_inputs, tensor_outputs = {}, {}
for node in inputs:
tensor_inputs[node] = graph.get_tensor_by_name("{}:0".format(node))
for node in outputs:
tensor_outputs[node] = graph.get_tensor_by_name("{}:0".format(node))
print(tensor_inputs, tensor_outputs)
tf.reset_default_graph()
with tf.Session(graph=graph) as sess:
tf.saved_model.simple_save(sess, model_dir, tensor_inputs, tensor_outputs)
return True, model_dir
except Exception as e:
return False, str(e)
@staticmethod
def write_graph_for_tfboard(pb_path, logdir_path):
"""Write out the frozen graph into the logdir for visualization on TFBoard.
Args:
pb_path: the path to a frozen model
logdir_path: the folder path further monitored by Tensorboard
Returns:
state: True or False
Message: Output directory (for True) or Message (for False)
"""
try:
state, graph = OperateFrozenModel.load_frozen_model(pb_path)
if not state: raise Exception(graph)
if not tf.gfile.Exists(logdir_path):
os.makedirs(logdir_path)
writer = tf.summary.FileWriter(logdir=logdir_path)
writer.add_graph(graph)
return True, logdir_path
except Exception as e:
return False, str(e)
# In[]
class GraphOperations:
@staticmethod
def list_operations(graph, count=5):
"""List operations in the frozen model.
Args:
graph: a tf.Graph() object, you can first load the graph via
OperateFrozenModel.load_frozen_model(pb_path)
Standard Outputs:
List operations or error messages.
"""
try:
operations = graph.get_operations()
print("Total Operations: {}".format(len(operations)))
if count < 1:
for ops in operations:
print(ops.name)
else:
for ops in operations[:count]:
print(ops.name)
print("...")
for ops in operations[-count:]:
print(ops.name)
except Exception as e:
print("Error in listing operations: {}.".format(str(e)))
@staticmethod
def find_operations(graph, name=None):
"""Test whether the operation is available in graph or not.
Args:
graph: a tf.Graph() object, you can first load the graph via
OperateFrozenModel.load_frozen_model(pb_path)
Returns:
state: True or False
Message: True / False (for state `True`) or Message (for False)
"""
try:
operations = graph.get_operations()
operation_names = [ ops.name for ops in operations ]
return True, name in operation_names
except Exception as e:
return False, str(e)
# In[]
class GraphTensors:
@staticmethod
def list_tensors(graph):
"""List tensors in the graph.
Args:
graph: a tf.Graph() object, you can first load the graph via
OperateFrozenModel.load_frozen_model(pb_path)
Standard Outputs:
List tensors or error messages.
"""
try:
for n in graph.as_graph_def().node:
if n.op in ["Placeholder", "PlaceholderWithDefault", "Const", "Identity"]:
if n.op == "Identity":
if len(n.attr['_class'].list.s) < 1:
# check there is source or not
tensor_name = graph.get_tensor_by_name(n.name + ":0")
else: continue
else:
tensor_name = graph.get_tensor_by_name(n.name + ":0")
print("{:<50} {}".format(n.name + ":0", tensor_name.shape))
except Exception as e:
print("Can't list tensors because error {}.".format(str(e)))
@staticmethod
def get_nodes_with_type(graph):
"""List the type of each node in the graph.
Args:
graph: a tf.Graph() object, you can first load the graph via
OperateFrozenModel.load_frozen_model(pb_path)
Returns:
state: True or False
object: a OrderedDict object whose key is node type and value is a list
containing all nodes belonging to the same node type
or Message in string (for False)
"""
try:
node_type = OrderedDict()
for n in graph.as_graph_def().node:
if n.op not in list(node_type.keys()):
node_type[n.op] = [n]
else:
node_type[n.op].append(n)
return True, node_type
except Exception as e:
return False, str(e)
# In[]
if __name__ == "__main__":
pb_path = "/Users/jiankaiwang/Desktop/output_graph.pb"
if not os.path.exists(pb_path):
# a simple MLP network was implemented to generated a frozen model
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/Users/jiankaiwang/devops/tmp/MNIST_data/", one_hot=True)
tf.reset_default_graph()
def mlp(inputs):
def perception(inputs, input_shape, bias_shape):
weight_std = (2.0 / input_shape[0]) ** 0.5
w_init = tf.random_normal_initializer(stddev=weight_std)
b_init = tf.constant_initializer(value=0.)
W = tf.get_variable("W", shape=input_shape, initializer=w_init)
b = tf.get_variable("b", shape=bias_shape, initializer=b_init)
output = tf.add(tf.matmul(inputs, W), b)
return tf.nn.relu(output)
with tf.variable_scope("mlp"):
with tf.variable_scope("hidden_1"):
hidden_1 = perception(img, [784, 256], [256])
with tf.variable_scope("hidden_2"):
hidden_2 = perception(hidden_1, [256, 128], [128])
with tf.variable_scope("hidden_3"):
output = perception(hidden_2, [128, 10], [10])
output = tf.identity(output, name="result")
return output
def validation(label, logits):
compare = tf.equal(tf.argmax(label, axis=1), tf.argmax(logits, axis=1))
accuracy = tf.reduce_mean(tf.cast(compare, tf.float32))
return accuracy
with tf.Graph().as_default() as graph:
img = tf.placeholder(tf.float32, shape=[None, 784], name="input")
img -= 0.5 # shift data to -0.5 ~ 0.5
label = tf.placeholder(tf.float32, shape=[None, 10])
global_step = tf.Variable(0, name="global_step", trainable=False)
output = mlp(img)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=label, logits=output)
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
train_opt = optimizer.minimize(loss, global_step=global_step)
validator = validation(label, output)
batch_size = 32
batches = mnist.train.num_examples // batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(0, 51):
for bs in range(0, batches):
train_x, train_y = mnist.train.next_batch(batch_size)
sess.run([train_opt], feed_dict={img: train_x, label: train_y})
if epoch != 0 and epoch % 10 == 0:
val_x, val_y = mnist.test.images, mnist.test.labels
acc, cost = sess.run([validator, loss], feed_dict={img: val_x, label: val_y})
print("Epoch {}: Accuracy {}, Cost {}".format(epoch, acc, cost))
state, error = OperateFrozenModel.save_sess_into_frozen_model(
sess, ["result"], pb_path)
assert os.path.exists(pb_path), "PB was not found."
print("\n----------")
state, graph = OperateFrozenModel.load_frozen_model(pb_path)
assert state, "Can't load the frozen model. {}".format(graph)
print("Load the frozen model completely.")
print("----------\n")
print("\n----------")
print("Transfrom a frozen model into tflite one.")
tflite_model_path = "/Users/jiankaiwang/Desktop/output.tflite"
state, error = OperateFrozenModel.transform_into_tflite(
pb_path, ["input"], ["result"], tflite_model_path)
assert state, "Error in transforming tflite model: {}.".format(error)
print("----------\n")
print("\n----------")
print("Transfrom a frozen model into savedmodel format.")
model_dir = "/Users/jiankaiwang/Desktop/savedModel"
state, error = OperateFrozenModel.transform_into_savedmodel(
pb_path, ["input"], ["result"], model_dir)
assert state, "Error in transforming savedmodel format: {}.".format(error)
print("----------\n")
print("\n----------")
print("List operations in graph.")
GraphOperations.list_operations(graph)
print("----------\n")
print("\n----------")
print("Check if the nodel is in graph or not.")
for ops_name in ["a", "b"]:
state, existing = GraphOperations.find_operations(graph, ops_name)
assert state, "Can't find operations because of error {}.".format(existing)
print("Operation {} exists? {}.".format(ops_name, existing))
print("----------\n")
print("\n----------")
print("List tensors in graph.")
GraphTensors.list_tensors(graph)
print("----------\n")
print("\n----------")
print("Aggregate tensors with their types.")
state, dicts = GraphTensors.get_nodes_with_type(graph)
assert state, "Can't form dictionary: {}".format(dicts)
type_1 = list(dicts.keys())[0]
operators = dicts[type_1]
print(operators)
print("----------\n")
print("\n----------")
print("Write graph for visualization via TFBoard.")
logdir_path = "/Users/jiankaiwang/Desktop/logs"
state, error = OperateFrozenModel.write_graph_for_tfboard(pb_path, logdir_path)
assert state, "Error in writing out frozen graph: {}.".format(error)
print("----------\n")
``` |
{
"source": "Jiankun-chen/Supervised-SNN-with-GD",
"score": 2
} |
#### File: Supervised-SNN-with-GD/SUPSNN/evaluate.py
```python
import tensorflow as tf
def loss_calc(logits, labels):
labels = tf.cast(labels, dtype=tf.float32)
labels = tf.squeeze(labels)
label_0 = labels[0, :]
label_1 = labels[1, :]
label_2 = labels[2, :]
logit_0 = logits[0, :]
logit_1 = logits[1, :]
logit_2 = logits[2, :]
labels = tf.cast(tf.squeeze(labels), dtype=tf.float32)
label_00 = tf.reduce_sum(labels[0, :], 0)
label_11 = tf.reduce_sum(labels[1, :], 0)
label_22 = tf.reduce_sum(labels[2, :], 0)
temp1 = tf.cond(tf.greater(label_00, label_11), lambda: 0.0, lambda: 1.0)
gt_cl = tf.cond(tf.greater(temp1, label_22), lambda: temp1, lambda: 2.0)
def f_a(label, logit):
huber = tf.losses.huber_loss(label, logit, weights=1.0, delta=1.0)
return huber
def f_b():
huber = tf.cond(tf.equal(gt_cl, 1), lambda: f_c(label_1, logit_1), lambda: f_d(label_2, logit_2))
return huber
def f_c(label, logit):
huber = tf.losses.huber_loss(label, logit, weights=1.0, delta=1.0)
return huber
def f_d(label, logit):
huber = tf.losses.huber_loss(label, logit, weights=1.0, delta=1.0)
return huber
huber = tf.cond(tf.equal(gt_cl, 0), lambda: f_a(label_0, logit_0), lambda: f_b())
LOSS_HUBER = huber
def f_e(label, logit):
mse = tf.losses.mean_squared_error(label, logit)
return mse
def f_f():
mse = tf.cond(tf.equal(gt_cl, 1), lambda: f_g(label_1, logit_1), lambda: f_h(label_2, logit_2))
return mse
def f_g(label, logit):
mse = tf.losses.mean_squared_error(label, logit)
return mse
def f_h(label, logit):
mse = tf.losses.mean_squared_error(label, logit)
return mse
mse = tf.cond(tf.equal(gt_cl, 0), lambda: f_e(label_0, logit_0), lambda: f_f())
LOSS_MSE = mse
tf.summary.scalar('LOSS_HUBER', LOSS_HUBER)
tf.summary.scalar('LOSS_MSE', LOSS_MSE)
return LOSS_HUBER, LOSS_MSE
def eq_calc(logits, labels):
labels = tf.cast(tf.squeeze(labels), dtype=tf.float32)
label_0 = tf.reduce_sum(labels[0, :], 0)
label_1 = tf.reduce_sum(labels[1, :], 0)
label_2 = tf.reduce_sum(labels[2, :], 0)
temp1 = tf.cond(tf.greater(label_0, label_1), lambda: 0.0, lambda: 1.0)
gt_cl = tf.cond(tf.greater(temp1, label_2), lambda: temp1, lambda: 2.0)
logits = tf.cast(logits, dtype=tf.float32)
logits_0 = tf.reduce_sum(logits[0, :], 0)
logits_1 = tf.reduce_sum(logits[1, :], 0)
logits_2 = tf.reduce_sum(logits[2, :], 0)
temp2 = tf.cond(tf.greater(logits_0, logits_1), lambda: 0.0, lambda: 1.0)
pre_cl = tf.cond(tf.greater(temp2, logits_2), lambda: temp2, lambda: 2.0)
eq = tf.equal(pre_cl, gt_cl)
eq_int = tf.to_int32(eq)
return eq_int
def evaluation(logits, labels):
correct_prediction = tf.equal(tf.argmax(logits, 3), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
out_label1 = tf.argmax(logits, 3)
out_label = tf.transpose(out_label1, perm=[1, 2, 0])
return accuracy, out_label
```
#### File: Supervised-SNN-with-GD/SUPSNN/recep_field.py
```python
import tensorflow as tf
import numpy as np
def rf_tf(inp):
sca1 = 0.625
sca2 = 0.125
sca3 = -0.125
sca4 = -0.5
# Receptive field kernel
w = [[ sca4, sca3, sca2, sca3, sca4],
[ sca3, sca2, sca1, sca2, sca3],
[sca2, sca1, 1, sca1, sca2],
[ sca3, sca2, sca1, sca2, sca3],
[ sca4, sca3, sca2, sca3, sca4]]
filter = tf.convert_to_tensor(w, dtype=tf.float32)
filter = tf.expand_dims(filter, -1)
filter = tf.expand_dims(filter, -1)
pot = tf.nn.conv2d(inp, filter, strides=[1, 1, 1, 1], padding='SAME')
return pot
def rf_np(inp):
sca1 = 0.625
sca2 = 0.125
sca3 = -0.125
sca4 = -.5
# Receptive field kernel
w = [[ sca4, sca3, sca2, sca3, sca4],
[ sca3, sca2, sca1, sca2, sca3],
[ sca2, sca1, 1, sca1, sca2],
[ sca3, sca2, sca1, sca2, sca3],
[ sca4, sca3, sca2, sca3, sca4]]
pot = np.zeros([inp.shape[0], inp.shape[1]])
ran = [-2, -1, 0, 1, 2]
ox = 2
oy = 2
# Convolution
for i in range(inp.shape[0]):
for j in range(inp.shape[1]):
summ = 0
for m in ran:
for n in ran:
if (i + m) >= 0 and (i + m) <= inp.shape[0] - 1 and (j + n) >= 0 and (j + n) <= inp.shape[0] - 1:
summ = summ + w[ox + m][oy + n] * inp[i + m][j + n] / 255
pot[i][j] = summ
return pot
``` |
{
"source": "JianLi0-0/Learning-soft-priorities-with-constrained-Bayesian-optimization",
"score": 2
} |
#### File: Learning-soft-priorities-with-constrained-Bayesian-optimization/controllers/manipulatorController.py
```python
import numpy as np
from scipy import *
from RBFN import RBFN
from cvxopt import solvers, matrix, spmatrix
from controllers import positionTask
from controllers import jointAngleTask
import logging
import time
import os
class manipulatorController(object):
""" Add damping force to the skeleton """
def __init__(self, skel, hyperParam, jointAngle, runtime, setGravity=False, rbfnCenters=None, numCenters=5):
self.skel = skel
self.g = self.skel.world.gravity()
self.setGravity = setGravity
self.runtime = runtime
self.rbfn = RBFN.RBFN(1, numCenters, 3, self.runtime, centers=rbfnCenters) # indim, numCenters, outdim, time
self.rbfn.setHyperParams(hyperParam)
self.jointAngle = jointAngle
self.positionTask = positionTask.positionTask(skel, array([0.5, 0.16, 0.0]).reshape((3, 1)), bodyNodeIndex=6) # array([0.5, 0.16, 0.0])
self.jointAngelTask = jointAngleTask.jointAngleTask(skel, array([0.0, 1.54, -2.296, 0.0, -0.8, 0.0]).reshape((6, 1)), Kp=0.5, Kd=5) # array([0.0, 1.54, -2.296, 0.0, -0.0, 0.0])
self.elbowTask = positionTask.positionTask(skel, array([0.3614, -0.0291, 0.0]).reshape((3, 1)), Kp=10, Kd=15, bodyNodeIndex=4) # array([0.3614, -0.0191, 0.0])
# # array([0.3614, -0.3191, 0.0]) for CMA-ES
def jointPositionControl(self, jointAngle):
dq = np.reshape(self.skel.dq, (self.skel.ndofs, 1))
q = np.reshape(self.skel.q, (self.skel.ndofs, 1))
error = jointAngle - q
error_dt = -dq
tau = 0.01*error + 0.001*error_dt
tau = tau.flatten()
print(self.skel.q)
return tau
def qpSolver(self, P, q, G=None, h=None, A=None, b=None, initvals=None):
if G is None and h is None:
args = [matrix(P), matrix(q)]
else:
args = [matrix(P), matrix(q), matrix(G), matrix(h)]
try:
sol = solvers.qp(*args)
except ValueError:
print("QP is infeasible")
return -1
if 'optimal' not in sol['status']:
print("QP fails, the status are: %s", sol)
return -1
# print(sol['x'])
jointAccl = np.array(sol['x']).reshape((q.shape[0], 1))
return jointAccl
def jointAccl2tau(self, jointAccl):
tau = self.skel.M.dot(jointAccl)
return tau
def jointAccl2tauGravityCompensation(self, jointAccl):
tau = self.skel.M.dot(jointAccl) + self.skel.coriolis_and_gravity_forces().reshape((self.skel.ndofs, 1))\
- self.skel.constraint_forces().reshape((self.skel.ndofs, 1))
return tau
def compute(self):
"""!@brief
Would be called automatically by "step()" function of the world object
"""
time = np.array([self.skel.world.t])
priorities = self.rbfn.calOutput(time).T
# priorities = ones((3, 1))*0.5
positionMatricies = self.positionTask.cal_QP_Matricies()
jointAngleMatricies = self.jointAngelTask.cal_QP_Matricies()
elbowMatricies = self.elbowTask.cal_QP_Matricies()
Q = np.identity(self.skel.ndofs)
positionArgs = [positionMatricies[0] + Q, positionMatricies[1].T]
jointAngleArgs = [jointAngleMatricies[0] + Q, jointAngleMatricies[1].T]
elbowArgs = [elbowMatricies[0] + Q, elbowMatricies[1].T]
acc_lower = -0.3*(-1)
positionArgs = [positionMatricies[0] + Q, positionMatricies[1].T, -1*np.identity(6), acc_lower*np.ones((6,1))]
jointAngleArgs = [jointAngleMatricies[0] + Q, jointAngleMatricies[1].T, -1*np.identity(6), acc_lower*np.ones((6,1))]
elbowArgs = [elbowMatricies[0] + Q, elbowMatricies[1].T, -1*np.identity(6), acc_lower*np.ones((6,1))]
task_jointAccl = zeros((self.skel.ndofs, 0))
task_jointAccl = np.column_stack((task_jointAccl, self.qpSolver(*positionArgs)))
task_jointAccl = np.column_stack((task_jointAccl, self.qpSolver(*jointAngleArgs)))
task_jointAccl = np.column_stack((task_jointAccl, self.qpSolver(*elbowArgs)))
total_jointAccl = dot(task_jointAccl, priorities)
'''single task'''
#total_jointAccl = dot(task_jointAccl, priorities) *0.333
if self.setGravity is True:
tau = self.jointAccl2tauGravityCompensation(total_jointAccl)
else:
tau = self.jointAccl2tau(total_jointAccl)
tau = tau.flatten()
# print(self.skel.bodynodes[4].world_transform())
# print(self.skel.q)
#print(self.skel.world.t)
return tau
```
#### File: JianLi0-0/Learning-soft-priorities-with-constrained-Bayesian-optimization/learningPriorities.py
```python
from singleEpisode import evaluationInter
import GPy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import safeopt
from RBFN import RBFN
from safeopt import linearly_spaced_combinations
runtime = 26
minFinalError = 100
optimalSolution = 0
optimalEvaluation = 0
inputDimension = 15
# evaluation function
def f(x):
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
y0 = np.ones((1,1))*result[0]
# seek maximum
return y0
def f_c(x):
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
return np.hstack([np.ones((1,1))*result[0], np.ones((1,1))*result[1]])
def cmaesf(x):
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
y0 = -np.ones((1,1))*result[0]
# seek minimum
return y0[0]
'''def bof(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15):
x = np.array([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15])
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
y0 = np.ones((1,1))*result[0]
# seek maximum
return y0[0][0]'''
def bo_penalty_f(x):
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
if result[1] < 0:
result[0] = result[0] + result[1]/500.0 # penalty item
return result
def initiateSafeSet(inputDimension):
y = np.ones((1,1))
while True:
x = (0.5-np.random.random((1, inputDimension)))*10
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
y = result[0]
if y < -0.7 and y > -0.8 and result[2] is False:
print('initial x :')
print(x)
print('initial y :')
print(y)
return x
return None
def cmaes_initiateSafeSet(inputDimension):
while True:
x = (0.5-np.random.random((1, inputDimension)))*10
y = cmaesf(x)
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(x)
result = evaluation.evaluation()
y = result[0]
if y < -0.7 and y > -0.8 and result[2] is False:
print('initial x :')
print(x)
print('initial y :')
print(y)
return x
return x
if __name__ == '__main__':
'''[ 3.98756312 -4.54692078 -4.20240821 3.37581306 -2.04208146 4.58879403
0.60741451 -2.48077861 -0.49666369 0.76734985 0.04636265 1.39900566
-2.89473381 -0.97787073 -2.90685335]
'''
# plot
plt.close()
plt.ion()
# Measurement noise
noise_var = 0.1 ** 2
noise_var2 = 1e-2
# Bounds on the inputs variable
bounds = []
for i in range(inputDimension):
bounds.append((-10, 10))
# Define Kernel
# kernel = GPy.kern.RBF(input_dim=len(bounds), variance=2., lengthscale=1.0, ARD=True)
kernel = GPy.kern.Matern52(input_dim=inputDimension, ARD=True)
kernel2 = kernel.copy()
# Initial safe point
x0 = np.zeros((1, inputDimension))
x0 = initiateSafeSet(inputDimension)
'''x0 = np.array([-4.95957468, -3.61009445, 1.67003869, -4.88427162, -4.46011329, -1.38214976,
2.15057211, -3.69813929, 4.4843571, 4.82027238, -0.72567684, -1.53831128,
2.31816139, 2.08594099, 4.1214262]).reshape(1,15)'''
# The statistical model of our objective function and safety constraint
y0 = f(x0)
xs = [0, 0]
ys = [0, y0[0][0]]
list_y = [y0[0][0]]
print("initial x0 : ", x0)
print("initial y0 : ", y0)
# fitness value and constraints
# gp = GPy.models.GPRegression(x0, y0, kernel, noise_var=noise_var)
y0 = f_c(x0)
gp = GPy.models.GPRegression(x0, y0[:, 0, None], kernel, noise_var=noise_var)
gp2 = GPy.models.GPRegression(x0, y0[:, 1, None], kernel2, noise_var=noise_var2)
# The optimization routine
fmin = -10
cmin = 0.1 #safety distance to obstacle
# opt = safeopt.SafeOptSwarm(gp, fmin=fmin, bounds=bounds, threshold=0.2)
while max(list_y) < -0.43:
opt = safeopt.SafeOptSwarm([gp, gp2], [-np.inf, cmin], bounds=bounds, threshold=0.2)
#parameter_set = linearly_spaced_combinations(bounds=bounds, num_samples = 1000)
#opt = safeopt.SafeOpt([gp, gp2], parameter_set, [-np.inf, 0.], lipschitz=None, threshold=0.1)
y0 = f(x0)
xs = [0, 0]
ys = [0, y0[0][0]]
list_y = [y0[0][0]]
plt.figure()
plt.title('Fitness Value')
fitnessData = [y0[0][0]]
counter = [0]
for i in range(250):
# Obtain next query point
x_next = opt.optimize() ######### ucb=False #########
print("next x : ", x_next)
# Get a measurement from the real system
y_meas = f_c(x_next)
print("evaluation : ", y_meas)
# Add this to the GP model
opt.add_new_data_point(x_next, y_meas)
list_y.append(y_meas[0][0])
print('order:')
print(i)
# plot
if i % 5 is 0:
xs[0] = xs[1]
ys[0] = ys[1]
xs[1] = i+5
counter.append(i)
ys[1] = max(list_y)
fitnessData.append(ys[1])
plt.plot(xs, ys, color='y')
plt.pause(0.1)
txtData = np.column_stack((counter, fitnessData))
np.savetxt("fitnessValue.txt", txtData)
'''Optimal Y: [-0.48156585]
Optimal X: [-5.45050531 0.34979447 4.12126481 -0.43104242 -5.04246921 1.50982994
2.09085143 2.27027575 0.82268795 3.28250759 -2.9581665 -0.07979856
1.18092941 -3.61545808 -0.77355514]'''
'''Optimal Y: [-0.40701588]
Optimal X: [ 7.69542589 0.85522206 -1.20877597 -0.187846 -2.39027029 6.35187958
4.5719057 -2.84564475 2.00812702 -2.17746402 -2.06790978 0.50882218
2.5806161 -3.51874267 3.24561909]'''
[optimalX, optimalY] = opt.get_maximum()
print('Optimal Y: ', optimalY)
print('Optimal X: ', optimalX)
rbfn = RBFN.RBFN(1, 5, 3, runtime) # indim, numCenters, outdim, time
rbfn.setHyperParams(optimalX.reshape(5, 3))
timeInterval = safeopt.linearly_spaced_combinations([(0, runtime)], runtime*10)
priorities = np.zeros((3, 0))
for time in timeInterval:
priorities = np.column_stack((priorities, rbfn.calOutput(time).T))
# print(priorities)
plt.figure()
plt.title('Task Priorities')
plt.plot(timeInterval,priorities[0], color='r')
plt.plot(timeInterval,priorities[1], color='g')
plt.plot(timeInterval,priorities[2], color='b')
# plot robot states, joint angle, vel, accel
evaluation = evaluationInter.evaluationInter(runtime=runtime)
evaluation.updateHyperParams(optimalX)
evaluation.evaluation()
JointAngleCurve = evaluation.getJointAngleCurve()
JointVelocityCurve = evaluation.getJointVelocityCurve()
JointAccelerationCurve = evaluation.getJointAccelerationCurve()
plt.figure()
plt.title('Joint Angle')
for angle in JointAngleCurve:
plt.plot(timeInterval, angle)
plt.figure()
plt.title('Joint Velocity')
for vel in JointVelocityCurve:
plt.plot(timeInterval, vel)
plt.figure()
plt.title('Joint Acceleration')
for accel in JointAccelerationCurve:
plt.plot(timeInterval, accel)
plt.ioff()
print('Optimal Y: ', optimalY)
print('Optimal X: ', optimalX)
plt.show()
'''x_next = np.array([ -6.86710281, 0.32646077, -5.7733384, -1.81021915, -9.66391179,
-6.14784208, -0.10733062, -5.57970494, -8.68322761, -10.34465487,
1.70166791, 1.29842112, -8.83978695, -2.43083296, -10.04379552])'''
displayResult = evaluationInter.evaluationInter(runtime=runtime)
displayResult.updateHyperParams(optimalX)
print(displayResult.evaluation())
displayResult.guiStatic()
'''displayResult = evaluationInter.evaluationInter(runtime=runtime)
displayResult.updateHyperParams(optimalX)
displayResult.gui()'''
```
#### File: Learning-soft-priorities-with-constrained-Bayesian-optimization/utils/listToArray.py
```python
import numpy
def listToArray(input_list):
n = len(input_list)
temp_array = numpy.zeros(n)
for ii in range(0, n):
temp_array[ii] = input_list[ii]
return temp_array.reshape((n, 1))
```
#### File: Learning-soft-priorities-with-constrained-Bayesian-optimization/worlds/worldR650.py
```python
import pydart2 as pydart
import numpy as np
from controllers import manipulatorController
class r650(pydart.World):
def __init__(self, ):
# pydart.World.__init__(self, 0.001)
pydart.World.__init__(self, 0.001, "./data/skel/two_cubes.skel")
self.set_gravity([0.0, -9.81, 0.0])
# self.set_gravity([0.0, -9.81, 0.0])
print('pydart create_world OK')
self.robot = self.add_skeleton("./data/KR5/KR5 sixx R650.urdf")
print('pydart add_skeleton OK')
# Initialize the controller
# self.controller = controller.Controller(self.robot)
# self.robot.set_controller(self.controller)
print('create controller OK')
'''def step(self, ):
super(r650, self).step()
print(self.collision_result.num_contacts())'''
``` |
{
"source": "jianliang90/MedCommon",
"score": 2
} |
#### File: detection/datasets/position_detection_common_ds.py
```python
import os
import shutil
import sys
MEDCOMMON_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir)
sys.path.append(MEDCOMMON_ROOT)
sys.path.append(os.path.join(MEDCOMMON_ROOT, 'external_lib'))
from utils.data_io_utils import DataIO
from utils.mask_bounding_utils import MaskBoundingUtils
from utils.detection_utils import DETECTION_UTILS
from utils.datasets_utils import DatasetsUtils
import SimpleITK as sitk
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm
import json
def extract_boundary_info(mask_root, out_file):
os.makedirs(os.path.dirname(out_file), exist_ok=True)
info_dict = {}
for filename in tqdm(os.listdir(mask_root)):
mask_file = os.path.join(mask_root, filename)
boundary_info = MaskBoundingUtils.extract_mask_file_bounding(mask_file)
image = sitk.ReadImage(mask_file)
image_shape = image.GetSize()
info = list(boundary_info) + list(image_shape)
info_dict[filename] = [int(i) for i in info]
with open(out_file, 'w') as f:
f.write(json.dumps(info_dict))
print('====> extract_boundary_info finished!')
def generate_resampled_pairs_unsame_resolution(data_root, out_root, dst_size):
image_root = os.path.join(data_root, 'images')
mask_root = os.path.join(data_root, 'masks')
out_image_root = os.path.join(out_root, 'images')
out_mask_root = os.path.join(out_root, 'masks')
image_postfix='.nii.gz'
mask_postfix='.nii.gz'
DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess(
image_root, mask_root,
out_image_root, out_mask_root, dst_size,
image_postfix, mask_postfix
)
class PositionDetectionDS(Dataset):
def __init__(self, root, image_shape=[128,128,128], boundary_info_file=None) -> None:
super().__init__()
self.root = root
self.image_root = os.path.join(self.root, 'images')
self.mask_root = os.path.join(self.root, 'masks')
self.image_files = []
self.targets = []
boundary_infos = None
if boundary_info_file:
with open(boundary_info_file) as f:
boundary_infos = json.loads(f.read())
for filename in tqdm(os.listdir(self.image_root)):
image_file = os.path.join(self.image_root, filename)
mask_file = os.path.join(self.mask_root, filename)
if not os.path.exists(image_file):
continue
if not os.path.exists(mask_file):
continue
if boundary_info_file:
z_min, y_min, x_min, z_max, y_max, x_max = boundary_infos[filename][:6]
in_shape = boundary_infos[filename][6:]
else:
z_min, y_min, x_min, z_max, y_max, x_max = MaskBoundingUtils.extract_mask_file_bounding(mask_file)
in_image = sitk.ReadImage(image_file)
in_shape = in_image.GetSize()
self.image_files.append(image_file)
x_min, y_min, z_min = DETECTION_UTILS.point_coordinate_resampled(in_shape, image_shape, [x_min, y_min, z_min])
x_max, y_max, z_max = DETECTION_UTILS.point_coordinate_resampled(in_shape, image_shape, [x_max, y_max, z_max])
# 归一化
x_min /= image_shape[0]
x_max /= image_shape[0]
y_min /= image_shape[1]
y_max /= image_shape[1]
z_min /= image_shape[2]
z_max /= image_shape[2]
self.targets.append(np.array([[z_min, y_min, x_min, z_max, y_max, x_max]]))
# if self.image_files.__len__() > 2:
# break
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
image_file = self.image_files[index]
image = sitk.ReadImage(image_file)
arr = sitk.GetArrayFromImage(image)
image_tensor = torch.from_numpy(arr).float()
image_tensor = image_tensor.unsqueeze(0)
target = self.targets[index]
return image_tensor, target, image_file
def test_PositionDetectionDS():
root = '/data/medical/brain/cerebral_parenchyma/exp/cta'
boundary_info_file='/data/medical/brain/cerebral_parenchyma/exp/cta/config/mask_boundary_info.json'
ds = PositionDetectionDS(root, boundary_info_file=boundary_info_file)
dataloader = DataLoader(ds, batch_size=1)
for index, (images, targets, _) in enumerate(dataloader):
print(images.shape)
print(targets)
if __name__ == '__main__':
# extract_boundary_info(mask_root='/data/medical/brain/cerebral_parenchyma/exp/cta/masks', out_file='/data/medical/brain/cerebral_parenchyma/exp/cta/config/mask_boundary_info.json')
# extract_boundary_info(mask_root='/data/medical/cardiac/seg/coronary/coronary_ori/masks', out_file='/data/medical/cardiac/seg/coronary/coronary_ori/config/mask_boundary_info.json')
extract_boundary_info(mask_root='/data/medical/brain/cerebral_parenchyma/exp/cta_256/masks', out_file='/data/medical/brain/cerebral_parenchyma/exp/cta_256/config/mask_boundary_info.json')
extract_boundary_info(mask_root='/data/medical/cardiac/seg/coronary/coronary_ori_256/masks', out_file='/data/medical/cardiac/seg/coronary/coronary_ori_256/config/mask_boundary_info.json')
# test_PositionDetectionDS()
# 统一数据尺寸,训练的时候可以并行处理
# generate_resampled_pairs_unsame_resolution('/data/medical/brain/cerebral_parenchyma/exp/cta', '/data/medical/brain/cerebral_parenchyma/exp/cta_256', [256,256,256])
# generate_resampled_pairs_unsame_resolution('/data/medical/cardiac/seg/coronary/coronary_ori', '/data/medical/cardiac/seg/coronary/coronary_ori_256', [256,256,256])
```
#### File: detection/runner/regression_detection_trainner.py
```python
import os
import sys
import torch
import torch.nn as nn
MEDCOMMON_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir)
sys.path.append(MEDCOMMON_ROOT)
sys.path.append(os.path.join(MEDCOMMON_ROOT, 'external_lib'))
from tqdm import tqdm
import time
import numpy as np
from utils.misc_utils import AverageMeter
from detection.models.detection_auto_resample import RegressionDetecter
from detection.datasets.position_detection_common_ds import PositionDetectionDS
from utils.detection_utils import PYTORCH_TENSOR_DETECTION_UTILS
from utils.lr_adjust_utils import LR_ADJUST_UTILS
from detection.models.detection_auto_resample import RegressionDetecter
from detection.datasets.position_detection_common_ds import PositionDetectionDS
from utils.detection_utils import PYTORCH_TENSOR_DETECTION_UTILS
from utils.detection_utils import DETECTION_UTILS
from torch.utils.data import Dataset, DataLoader
from utils.data_io_utils import DataIO
import SimpleITK as sitk
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='common detection algorithm')
parser.add_argument('--dataroot', type=str, default='/data/medical/brain/cerebral_parenchyma/exp/cta', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--weights', type=str, default=None, help='pretrained weights file')
parser.add_argument('--pretrained', type=str, default=None, help='pretrained weights for backbone')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--boundary_info_file', type=str, default='/data/medical/brain/cerebral_parenchyma/exp/cta/config/mask_boundary_info.json', help='file to record mask boundary information')
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--n_objects', type=int, default=1, help='n objects to be detected')
parser.add_argument('--input_shape', nargs='+', type=int, default=[128,128,128])
parser.add_argument('--aug', default='inference')
parser.add_argument('--arch', default='resnet18')
parser.add_argument('--n_epochs', type=int, default=100)
return parser.parse_args()
class RegressionDetectionTrainer:
def __init__(self) -> None:
pass
@staticmethod
def train_one_epoch(dataloader, model, criterion, optimizer, epoch, display, phase='train', opt=None):
if phase == 'train':
model.train()
else:
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
logger = []
ious = np.array([], dtype=np.float)
for num_iter, (images, targets, names) in tqdm(enumerate(dataloader)):
data_time.update(time.time() - end)
output = model(images.cuda())
if criterion is not None:
loss = criterion(output, targets.cuda())
else:
iou,_ = PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_iou(output, targets.view(targets.shape[0],-1).cuda())
l1_loss = torch.nn.L1Loss()(output, targets.view(targets.shape[0],-1).cuda())
loss = l1_loss*10 + (1-iou.mean())
ious = np.append(ious, iou.detach().cpu())
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time()-end)
end = time.time()
losses.update(loss.detach().cpu().numpy(), len(images))
if (opt is not None and opt.rank == 0) or (opt is None):
if (num_iter+1)%display == 0:
print_info = '[{}]\tEpoch: [{}][{}/{}]\tTime {batch_time.val:3f} ({batch_time.avg:.3f})\tData {data_time.avg:.3f}\t''Loss {loss.avg:.4f}\t'.format(
phase, epoch, num_iter, len(dataloader), batch_time=batch_time, data_time=data_time, loss=losses)
print(print_info)
# print(losses.sum, '\t', losses.count)
# print(loss.detach().cpu().numpy())
logger.append(print_info)
return ious
@staticmethod
def load_model(opts):
n_objects = opts.n_objects
net_args = {
'input_size': [128, 128, 128],
'arch':opts.arch,
'pretrained': opts.pretrained
}
model = RegressionDetecter(n_objects, net_args)
if opts.weights:
model.load_state_dict(torch.load(opts.weights, map_location='cpu'))
return model
def inference_one_case(model, series_path, is_dcm=True, dst_size = [128, 128, 128]):
model.eval()
if is_dcm:
image_data = DataIO.load_dicom_series(series_path)
else:
image_data = DataIO.load_nii_image(series_path)
image = image_data['sitk_image']
image_arr = sitk.GetArrayFromImage(image)
image_tensor = torch.from_numpy(image_arr).unsqueeze(0).unsqueeze(0).float()
with torch.no_grad():
output = model(image_tensor.cuda())
z_min, y_min, x_min, z_max, y_max, x_max = output.detach().cpu().numpy()[0]
image_shape = image.GetSize()
out_coord_min = DETECTION_UTILS.restore_normalized_coordinate([x_min, y_min, z_min], image_shape)
out_coord_max = DETECTION_UTILS.restore_normalized_coordinate([x_max, y_max, z_max], image_shape)
print('hello world')
def train():
opts = parse_args()
root = opts.dataroot
boundary_info_file = opts.boundary_info_file
ds = PositionDetectionDS(root, boundary_info_file=boundary_info_file)
dataloader = DataLoader(ds, batch_size=opts.batch_size)
n_objects = opts.n_objects
net_args = {
'input_size': [128, 128, 128],
'arch':opts.arch,
'pretrained': opts.pretrained
}
model = RegressionDetecter(n_objects, net_args)
if opts.weights:
model.load_state_dict(torch.load(opts.weights, map_location='cpu'))
criterion = None
lr = opts.lr
optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr)
n_epochs = opts.n_epochs
best_iou = 0
for epoch in range(n_epochs):
LR_ADJUST_UTILS.adjust_learning_rate(optimizer, epoch, opts.lr, True, n_epochs)
ious = RegressionDetectionTrainer.train_one_epoch(dataloader, model.cuda(), criterion, optimizer, epoch, 1, phase='train', opt=None)
if ious.mean() > best_iou:
best_iou = ious.mean()
print('current best train iou is:\t{}'.format(best_iou))
model_dir = os.path.join(opts.checkpoints_dir, opts.name)
os.makedirs(model_dir, exist_ok=True)
saved_model_path = os.path.join(model_dir, 'common_det_epoch_{}_train_{:.3f}'.format(epoch, best_iou))
torch.save(model.state_dict(), saved_model_path)
print('====> save model:\t{}'.format(saved_model_path))
print('hello world!')
def inference(infile, weights=None):
opts = parse_args()
if weights:
opts.weights = weights
model = RegressionDetectionTrainer.load_model(opts)
RegressionDetectionTrainer.inference_one_case(model.cuda(), infile, False)
def test_RegressionDetectionTrainer():
root = '/data/medical/brain/cerebral_parenchyma/exp/cta'
boundary_info_file='/data/medical/brain/cerebral_parenchyma/exp/cta/config/mask_boundary_info.json'
ds = PositionDetectionDS(root, boundary_info_file=boundary_info_file)
dataloader = DataLoader(ds, batch_size=1)
model = RegressionDetecter(1)
criterion = None
lr = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
n_epochs = 100
best_iou = 0
for epoch in range(n_epochs):
LR_ADJUST_UTILS.adjust_learning_rate(optimizer, epoch, lr, True, n_epochs)
ious = RegressionDetectionTrainer.train_one_epoch(dataloader, model.cuda(), criterion, optimizer, epoch, 1, phase='train', opt=None)
if ious.mean() > best_iou:
best_iou = ious.mean()
print('current best train iou is:\t{}'.format(best_iou))
model_dir = os.path.join(opts.checkpoints_dir, opts.name)
if __name__ == '__main__':
# test_RegressionDetectionTrainer()
train()
# inference('/data/medical/brain/cerebral_parenchyma/exp/cta/images/1.3.12.2.1107.5.1.4.60320.30000018121200035049700008803.nii.gz', '/home/zhangwd/code/work/MedCommon/detection/runner/checkpoints/experiment_name/common_det_epoch_24_train_0.830')
# inference('/data/medical/brain/cerebral_parenchyma/exp/cta/images/1.3.12.2.1107.5.1.4.60320.30000018121200035049700008803.nii.gz', '/home/zhangwd/code/work/MedCommon/detection/runner/checkpoints/experiment_name/common_det_epoch_99_train_0.950')
```
#### File: lung_airway_coarse_seg/lung_airway_coarse_seg_20201116/train.py
```python
import os
import sys
import numpy as np
MEDCOMMON_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir)
print(MEDCOMMON_ROOT)
CURRENT_EXP_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir)
sys.path.append(MEDCOMMON_ROOT)
sys.path.append(os.path.join(MEDCOMMON_ROOT, 'segmentation/external_lib/MedicalZooPytorch'))
sys.path.append(CURRENT_EXP_ROOT)
from segmentation.external_lib.MedicalZooPytorch.lib.medzoo.Unet3D import UNet3D
from segmentation.external_lib.MedicalZooPytorch.lib.losses3D.dice import DiceLoss
from segmentation.models.unet3d_auto_resample import ResampledUnet3D
from datasets.lung_seg_datasets import AirwayCoarseSeg_DS
from utils.data_io_utils import DataIO
import torch
import torch.nn
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import DataLoader
from utils.misc_utils import AverageMeter
import time
from tqdm import tqdm
import argparse
import SimpleITK as sitk
import fire
class Options():
def __init__(self):
self.lr = 1e-3
self.epochs = 100
self.lr_fix = 50
self.display = 2
self.model_dir = './output/seg/model'
def train_one_epoch(dataloader, model, criterion, optimizer, epoch, display, phase='train'):
# model.train()
if phase == 'train':
model.eval()
else:
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
logger = []
end = time.time()
final_pred = None
final_gt = None
for num_iter, (images, masks, image_files, mask_files) in tqdm(enumerate(dataloader)):
data_time.update(time.time() - end)
output = model(images.cuda())
final_pred = output
final_gt = masks
loss = criterion(output, masks.cuda())[0]
if phase == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time()-end)
end = time.time()
losses.update(loss.detach().cpu().numpy(), len(images))
if (num_iter+1)%display == 0:
print_info = '[{}]\tEpoch: [{}][{}/{}]\tTime {batch_time.val:3f} ({batch_time.avg:.3f})\tData {data_time.avg:.3f}\t''Loss {loss.avg:.4f}\t'.format(
phase, epoch, num_iter, len(dataloader), batch_time=batch_time, data_time=data_time, loss=losses)
print(print_info)
logger.append(print_info)
pred_mask = torch.nn.functional.sigmoid(output).argmax(1)
pred_mask_uint8 = np.array(pred_mask.detach().cpu().numpy(), np.uint8)
gt_mask_uint8 = np.array(masks.numpy(), np.uint8)
pred_mask_sitk = sitk.GetImageFromArray(pred_mask_uint8[0])
gt_mask_sitk = sitk.GetImageFromArray(gt_mask_uint8[0])
sitk.WriteImage(pred_mask_sitk, 'pred_mask_{}.nii.gz'.format(epoch))
sitk.WriteImage(gt_mask_sitk, 'gt_mask_{}.nii.gz'.format(epoch))
return losses.avg, logger
def inference(img_path, model_pth, out_dir, is_dcm=True):
'''
debug cmd: inference('/data/medical/lung/changzheng/airway/airway_20201030/images/1.2.840.113704.1.111.10192.1571886399.11', './airway_coarse_seg_train_0.020_val_0.052', './output/seg/inference')
invoke cmd: python train.py inference '/data/medical/lung/changzheng/airway/airway_20201030/images/1.2.840.113704.1.111.10192.1571886399.11' './airway_coarse_seg_train_0.020_val_0.052' './output/seg/inference' True
invoke cmd: python train.py inference '/data/medical/lung/LUNA/RAW_NII/1.3.6.1.4.1.14519.5.2.1.6279.6001.106164978370116976238911317774.nii.gz' './airway_coarse_seg_train_0.020_val_0.052' './output/seg/inference' False
'''
print(img_path)
if is_dcm:
data = DataIO.load_dicom_series(img_path)
else:
data = DataIO.load_nii_image(img_path)
image_arr = data['image']
sitk_img = data['sitk_image']
image_tensor = torch.from_numpy(image_arr).unsqueeze(0).unsqueeze(0).float()
num_classes = 4
model = ResampledUnet3D(1, num_classes)
assert model_pth is not None
model.load_state_dict(torch.load(model_pth))
model = torch.nn.DataParallel(model).cuda()
model.eval()
with torch.no_grad():
output = model(image_tensor.cuda())
# 如下这行代码,将sigmoid操作放在cpu端来做,是因为实际操作过程中,出现350x768x768这样的数据,导致显存爆炸
# if image_arr.shape[2] > 512:
# pred_mask = torch.sigmoid(output.detach().cpu()).argmax(1)
# else:
# pred_mask = torch.sigmoid(output).argmax(1)
pred_mask = torch.sigmoid(output.detach().cpu()).argmax(1)
pred_mask_uint8 = np.array(pred_mask.detach().cpu().numpy(), np.uint8)
pred_mask_uint8 = pred_mask_uint8[0]
in_sitk_mask = sitk.GetImageFromArray(pred_mask_uint8)
in_sitk_mask.CopyInformation(sitk_img)
os.makedirs(out_dir, exist_ok=True)
sitk.WriteImage(sitk_img, os.path.join(out_dir, 'image_raw.nii.gz'))
sitk.WriteImage(in_sitk_mask, os.path.join(out_dir, 'mask_pred.nii.gz'))
print('hello world!')
def main():
opts = Options()
image_root = '/fileser/zhangfan/DataSet/airway_segment_data/train_lung_airway_data/image_refine/ori_128_128_128'
mask_root = '/fileser/zhangfan/DataSet/airway_segment_data/train_lung_airway_data/mask_refine/ori_128_128_128'
train_config_file = '/fileser/zhangfan/DataSet/airway_segment_data/csv/train_filename.csv'
val_config_file = '/fileser/zhangfan/DataSet/airway_segment_data/csv/val_filename.csv'
crop_size = [128, 128, 128]
# crop_size = [64, 64, 64]
num_classes = 4
train_ds = AirwayCoarseSeg_DS(image_root, mask_root, train_config_file, crop_size)
train_dataloader = DataLoader(train_ds, batch_size=8, pin_memory=True, num_workers=2, drop_last=True)
val_ds = AirwayCoarseSeg_DS(image_root, mask_root, val_config_file, crop_size)
val_dataloader = DataLoader(val_ds, batch_size=4, pin_memory=False, num_workers=2, drop_last=True)
model = ResampledUnet3D(1, num_classes)
criterion = DiceLoss(num_classes).cuda()
optimizer = torch.optim.Adam([{'params': model.parameters()}], lr=opts.lr, betas=(0.9, 0.999))
best_loss = 1
for epoch in range(opts.epochs):
loss_train, _ = train_one_epoch(train_dataloader, torch.nn.DataParallel(model).cuda(), criterion, optimizer, epoch, opts.display)
loss, _ = train_one_epoch(val_dataloader, torch.nn.DataParallel(model).cuda(), criterion, optimizer, epoch, opts.display, 'val')
if loss < best_loss:
best_loss = loss
print('current best val loss is:\t{}'.format(best_loss))
os.makedirs(opts.model_dir, exist_ok=True)
saved_model_path = os.path.join(opts.model_dir, 'airway_coarse_seg_train_{:.3f}_val_{:.3f}'.format(loss_train, loss))
torch.save(model.cpu().state_dict(), saved_model_path)
print('====> save model:\t{}'.format(saved_model_path))
if __name__ == '__main__':
# fire.Fire()
# main()
# inference('../data/seg/nii_file/1.3.12.2.1107.5.1.4.60320.30000015012900333934300003426/img.nii.gz', '../data/seg/model/cardiac_seg_train_0.013_val_0.020', '../data/seg/inference/test')
# inference('../../data/changzheng/airway/airway_20201030/paires_croped_by_coarse_lung_seg/images/1.2.840.113704.1.111.10192.1571886399.11.nii.gz', '../data/seg/model/cardiac_seg_train_0.105_val_0.095', '../data/seg/inference/test')
# inference('/data/medical/lung/changzheng/airway/airway_20201030/images/1.2.840.113704.1.111.10192.1571886399.11', './airway_coarse_seg_train_0.020_val_0.052', './output/seg/inference')
inference('/data/medical/lung/changzheng/airway/airway_20201030/images/1.3.46.670589.33.1.63725405821017542900002.4919856832254375598', './airway_coarse_seg_train_0.020_val_0.052', './output/seg/inference')
```
#### File: kidney/datasets/data_preprocessing.py
```python
import os
import sys
from tqdm import tqdm
import shutil
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir))
print(ROOT)
sys.path.append(ROOT)
from utils.datasets_utils import DatasetsUtils
from segmentation.datasets.common_seg_datasets import generate_resampled_pairs_unsame_resolution
data_root = '/data/medical/kidney/kits19'
# 生成分割程序需要的格式
def copy_data(
src_data_root = '/data/medical/kidney/kits19/data',
dst_data_root = '/data/medical/kidney/kits19/ori'
):
os.makedirs(dst_data_root, exist_ok=True)
dst_img_root = os.path.join(dst_data_root, 'images')
dst_mask_root = os.path.join(dst_data_root, 'masks')
os.makedirs(dst_img_root, exist_ok=True)
os.makedirs(dst_mask_root, exist_ok=True)
for pid in tqdm(os.listdir(src_data_root)):
src_sub_root = os.path.join(src_data_root, pid)
src_img_file = os.path.join(src_sub_root, 'imaging.nii.gz')
src_mask_file = os.path.join(src_sub_root, 'segmentation.nii.gz')
if not os.path.isfile(src_img_file):
continue
if not os.path.isfile(src_mask_file):
continue
dst_img_file = os.path.join(dst_img_root, '{}.nii.gz'.format(pid))
dst_mask_file = os.path.join(dst_mask_root, '{}.nii.gz'.format(pid))
shutil.copyfile(src_img_file, dst_img_file)
shutil.copyfile(src_mask_file, dst_mask_file)
if __name__ == '__main__':
# copy_data()
generate_resampled_pairs_unsame_resolution(
'/data/medical/kidney/kits19/ori',
'/data/medical/kidney/kits19/ori_256',
[256,256,256]
)
```
#### File: lung/inference/inference.py
```python
import os
import sys
import numpy as np
ROOT = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir)
sys.path.append(ROOT)
from segmentation.runner.train_seg import inference, train
from segmentation.options.train_options import TrainOptions
def load_inference_opts():
opts = TrainOptions().parse()
opts.num_classes = 2
opts.base_n_filter = 6
opts.dynamic_size = [256, 256, 256]
opts.weights = './checkpoints/chamber/common_seg_epoch_138_train_0.020'
return opts
```
#### File: gan/models/gan_networks.py
```python
import os
import sys
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance', in_discrimimator=False):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
if in_discrimimator:
norm_layer = functools.partial(nn.BatchNorm3d, affine=True, track_running_stats=False)
else:
norm_layer = functools.partial(nn.BatchNorm3d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm3d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm3d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], opt=None):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if opt is not None and 'distributed' in opt and opt.distributed is True:
# to do, add synchronize to initialize weights
import tempfile
import torch.distributed as dist
print('====> init distributed net!')
if not opt.continue_train:
checkpoint_path = os.path.join(tempfile.gettempdir(), "initial_weights.pt")
if opt.rank == 0:
init_weights(net, init_type, init_gain=init_gain)
torch.save(net.state_dict(), checkpoint_path)
dist.barrier()
net.load_state_dict(torch.load(checkpoint_path))
if opt.gpu is not None:
torch.cuda.set_device(opt.gpu)
net.cuda(opt.gpu)
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[opt.gpu], output_device=opt.gpu)
return net
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from <NAME>'s neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, opt)
# return ResnetGenerator(1,1, 32, n_blocks=6)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
# if opt is not None and 'distributed' in opt and opt.distributed is True:
# norm_layer = torch.nn.InstanceNorm3d
# else:
norm_layer = get_norm_layer(norm_type=norm, in_discrimimator=True)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids, opt)
# return PixelDiscriminator(2,8)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
p = 1
conv_block += [nn.Conv3d(dim, dim//2, kernel_size=1, padding=0, bias=use_bias), norm_layer(dim//2), nn.ReLU(True)]
conv_block += [nn.Conv3d(dim//2, dim//2, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim//2), nn.ReLU(True)]
conv_block += [nn.Conv3d(dim//2, dim, kernel_size=1, padding=0, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from <NAME>'s neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm3d, use_dropout=False, n_blocks=6, padding_type='zero', use_deconv=True):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
model = [nn.Conv3d(input_nc, ngf, kernel_size=7, padding=3, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 3
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if use_deconv:
model += [nn.ConvTranspose3d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [nn.Upsample(scale_factor=2, mode='trilinear'),
nn.Conv3d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(int(ngf * mult / 2)),nn.ReLU(True)]
model += [nn.Conv3d(ngf, output_nc, kernel_size=7, padding=3)]
# model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm3d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm3d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
self.net = [
nn.Conv3d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv3d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv3d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
```
#### File: gan/runner/train_gan_3d.py
```python
import os
import sys
import time
import torch
from torch.utils.data import DataLoader
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
from options.train_options import TrainOptions
from options.test_options import TestOptions
from util.visualizer import Visualizer
import models
from models.pix2pix_3d_model import Pix2Pix3DModel
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
from common.common_base_datasets import CommonSegmentationDS
from utils.distributed_utils import DistributedUtils
from utils.image_show_utils import ImageShowUtils
from utils.metrics_utils import MetricsUtils
from datasets.common_ds import GAN_COMMON_DS, get_common_transform
import numpy as np
import SimpleITK as sitk
from tqdm import tqdm
import pandas as pd
class GANTrainer:
def __init__(self) -> None:
pass
@staticmethod
def train_one_epoch(model, dataloader, visualizer, total_iters, epoch, opt):
# 这一句重要到你无法想象
model.eval()
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
dataset_size = len(dataloader)
for index, (subjects) in enumerate(dataloader):
iter_start_time = time.time() # timer for computation per iteration
# if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size * DistributedUtils.get_world_size()
epoch_iter += opt.batch_size * DistributedUtils.get_world_size()
real_a = subjects['src']['data'].float()
real_b = subjects['dst']['data'].float()
input = {}
input['A'] = real_a
input['B'] = real_b
input['A_paths'] = 'A'
input['B_paths'] = 'B'
if opt.mask_pattern:
mask = subjects['mask']['data']
input['mask'] = mask
input['mask_label'] = opt.mask_label
model.set_input(input)
model.optimize_parameters()
if DistributedUtils.get_rank() == 0:
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
import matplotlib.pyplot as plt
vis_result = model.get_current_visuals()
for key in vis_result.keys():
if 'real_A' in key:
ww = opt.src_ww_wl[0]
wl = opt.src_ww_wl[1]
w_min = wl-ww/2
w_max = wl+ww/2
vis_result[key] = torch.clamp(vis_result[key], min=w_min, max=w_max)/ww
else:
ww = opt.dst_ww_wl[0]
wl = opt.dst_ww_wl[1]
w_min = wl-ww/2
w_max = wl+ww/2
vis_result[key] = torch.clamp(vis_result[key], min=w_min, max=w_max)/ww
if opt.dst_vis_lut:
vis_result[key] = plt.get_cmap(opt.dst_vis_lut)(vis_result[key].detach().cpu().numpy()).squeeze()[...,:3]*255
visualizer.display_current_results(vis_result, epoch_iter, False)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / (opt.batch_size)
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if DistributedUtils.get_rank() == 0:
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
model.update_learning_rate()
return total_iters
@staticmethod
def load_model(opts, weights_G):
model = models.create_model(opts)
# setup操作会加载模型,这里不用
# model.setup(opts)
netG = model.netG.module
netG.load_state_dict(torch.load(weights_G, map_location='cpu'))
return netG
# return model
@staticmethod
def inference_onecase(model, series_path, crop_size = [128, 128, 128], out_root=None, opts=None):
model.eval()
subject = CommonSegmentationDS.get_inference_input(series_path, crop_size)
input = subject['src']['data'].float().unsqueeze(0)
real_a = input
fake_b = model.netG(real_a.cuda())
fake_b = fake_b.detach().squeeze().cpu().numpy()
fake_b = np.transpose(fake_b, [2,1,0])
info_img = sitk.ReadImage(series_path)
spacing = info_img.GetSpacing()
direction = info_img.GetDirection()
origin = info_img.GetOrigin()
os.makedirs(out_root, exist_ok=True)
real_img_a = sitk.GetImageFromArray(real_a)
real_img_a.SetSpacing(spacing)
real_img_a.SetDirection(direction)
real_img_a.SetOrigin(origin)
fake_img_b = sitk.GetImageFromArray(fake_b)
fake_img_b.CopyInformation(real_img_a)
if out_root:
sitk.WriteImage(real_img_a, os.path.join(out_root, 'real_a.nii.gz'))
sitk.WriteImage(fake_img_b, os.path.join(out_root, 'fake_b.nii.gz'))
return real_img_a, fake_img_b
@staticmethod
def inference_batch(model, data_root, out_dir, opts):
'''
必须设置:
crop_size
src_pattern
dst_pattern
'''
if opts.inference_mode == 'train':
model.train()
else:
model.eval()
transform = get_common_transform(opts.crop_size,'GAN_INFERENCE')
ds = GAN_COMMON_DS(data_root, opts.src_pattern, opts.dst_pattern, opts.crop_size, transform)
dataloader = DataLoader(ds, batch_size=1, num_workers=2, shuffle=True, pin_memory=True)
dataset_size = len(dataloader)
for index, (subjects) in tqdm(enumerate(dataloader)):
real_a = subjects['src']['data'].float()
real_b = subjects['dst']['data'].float()
# input = {}
# input['A'] = real_a
# input['B'] = real_b
# input['A_paths'] = 'A'
# input['B_paths'] = 'B'
# model.set_input(input)
# fake_b = model.netG(real_a.cuda())
fake_b = model(real_a.cuda())
fake_b = fake_b.detach().squeeze().cpu().numpy()
real_a = real_a.squeeze().cpu().numpy()
real_b = real_b.squeeze().cpu().numpy()
real_a = np.transpose(real_a, [2,1,0])
real_b = np.transpose(real_b, [2,1,0])
fake_b = np.transpose(fake_b, [2,1,0])
pid = subjects['src']['path'][0].split('/')[-2]
info_img = sitk.ReadImage(subjects['src']['path'][0])
spacing = info_img.GetSpacing()
direction = info_img.GetDirection()
origin = info_img.GetOrigin()
out_sub_dir = os.path.join(out_dir, pid)
os.makedirs(out_sub_dir, exist_ok=True)
real_img_a = sitk.GetImageFromArray(real_a)
real_img_a.SetSpacing(spacing)
real_img_a.SetDirection(direction)
real_img_a.SetOrigin(origin)
real_img_b = sitk.GetImageFromArray(real_b)
real_img_b.CopyInformation(real_img_a)
fake_img_b = sitk.GetImageFromArray(fake_b)
fake_img_b.CopyInformation(real_img_a)
sitk.WriteImage(real_img_a, os.path.join(out_sub_dir, 'real_a.nii.gz'))
sitk.WriteImage(real_img_b, os.path.join(out_sub_dir, 'real_b.nii.gz'))
sitk.WriteImage(fake_img_b, os.path.join(out_sub_dir, 'fake_b.nii.gz'))
@staticmethod
def export_slicemap_onecase(data_root, out_root,
src_ww=150, src_wl=75, dst_ww=150, dst_wl=75,
src_lut=None, dst_lut='jet'
):
real_a_file = os.path.join(data_root, 'real_a.nii.gz')
real_b_file = os.path.join(data_root, 'real_b.nii.gz')
fake_b_file = os.path.join(data_root, 'fake_b.nii.gz')
real_a_img = sitk.ReadImage(real_a_file)
real_b_img = sitk.ReadImage(real_b_file)
fake_b_img = sitk.ReadImage(fake_b_file)
real_a_arr = sitk.GetArrayFromImage(real_a_img)
real_b_arr = sitk.GetArrayFromImage(real_b_img)
fake_b_arr = sitk.GetArrayFromImage(fake_b_img)
ImageShowUtils.save_volume_to_jpg(real_a_arr, os.path.join(out_root, 'real_a'), src_ww, src_wl, axis=0, file_prefix='x', reverse=False, lut_name=src_lut)
ImageShowUtils.save_volume_to_jpg(real_b_arr, os.path.join(out_root, 'real_b'), dst_ww, dst_wl, axis=0, file_prefix='x', reverse=False, lut_name=dst_lut)
ImageShowUtils.save_volume_to_jpg(fake_b_arr, os.path.join(out_root, 'fake_b'), dst_ww, dst_wl, axis=0, file_prefix='x', reverse=False, lut_name=dst_lut)
@staticmethod
def export_slicemap_singletask(data_root, out_root, suids,
src_ww=150, src_wl=75, dst_ww=150, dst_wl=75,
src_lut=None, dst_lut='jet'
):
for suid in tqdm(suids):
try:
sub_data_root = os.path.join(data_root, suid)
sub_out_root = os.path.join(out_root, suid)
GANTrainer.export_slicemap_onecase(sub_data_root, sub_out_root,
src_ww, src_wl, dst_ww, dst_wl, src_lut, dst_lut)
except Exception as e:
print('====> Error case:\t{}'.format(suid))
print(e)
@staticmethod
def export_slicemap_multiprocessing(data_root, out_root,
src_ww=150, src_wl=75, dst_ww=150, dst_wl=75,
src_lut=None, dst_lut='jet',
process_num=6
):
series_uids = []
series_uids = os.listdir(data_root)
# print(series_uids)
num_per_process = (len(series_uids) + process_num - 1)//process_num
# this for single thread to debug
# GANTrainer.export_slicemap_singletask(data_root, out_root, series_uids, src_ww, src_wl, dst_ww, dst_wl, src_lut, dst_lut)
# this for run
import multiprocessing
from multiprocessing import Process
multiprocessing.freeze_support()
pool = multiprocessing.Pool()
results = []
print(len(series_uids))
for i in range(process_num):
sub_series_uids = series_uids[num_per_process*i:min(num_per_process*(i+1), len(series_uids))]
print(len(sub_series_uids))
result = pool.apply_async(GANTrainer.export_slicemap_singletask,
args=(data_root, out_root, sub_series_uids, src_ww, src_wl, dst_ww, dst_wl, src_lut, dst_lut))
results.append(result)
pool.close()
pool.join()
@staticmethod
def calc_mae(
data_root='/data/medical/cardiac/cta2mbf/data_66_20210517/6.inference_384x384x160_eval',
out_dir = '/data/medical/cardiac/cta2mbf/data_66_20210517/7.analysis_result',
out_file = 'mae_384x384x160_eval.csv'
):
row_elems = []
for suid in tqdm(os.listdir(data_root)):
sub_data_root = os.path.join(data_root, suid)
real_b_file = os.path.join(sub_data_root, 'real_b.nii.gz')
fake_b_file = os.path.join(sub_data_root, 'fake_b.nii.gz')
_, mae = MetricsUtils.calc_mae_with_file(real_b_file, fake_b_file)
row_elems.append(np.array([suid, mae]))
df = pd.DataFrame(np.array(row_elems), columns=['suid', 'mae'])
os.makedirs(out_dir, exist_ok=True)
out_file = os.path.join(out_dir, out_file)
df.to_csv(out_file)
@staticmethod
def calc_mae_with_mask(
data_root='/data/medical/cardiac/cta2mbf/data_66_20210517/6.inference_384x384x160_eval',
out_dir = '/data/medical/cardiac/cta2mbf/data_66_20210517/7.analysis_result',
out_file = 'mae_384x384x160_eval.csv',
mask_root = None,
mask_pattern = 'cropped_mbf_mask.nii.gz',
mask_label = 1,
crop_size = [384, 384, 160]
):
row_elems = []
for suid in tqdm(os.listdir(data_root)):
sub_data_root = os.path.join(data_root, suid)
real_b_file = os.path.join(sub_data_root, 'real_b.nii.gz')
fake_b_file = os.path.join(sub_data_root, 'fake_b.nii.gz')
# _, mae = MetricsUtils.calc_mae_with_file(real_b_file, fake_b_file)
if mask_root:
mask_file = os.path.join(mask_root, suid, mask_pattern)
try:
subject = CommonSegmentationDS.get_inference_input(mask_file, crop_size)
mask_arr = subject['src']['data'][0]
mask_arr = np.transpose(mask_arr, [2,1,0])
mask_img = sitk.GetImageFromArray(mask_arr)
real_img = sitk.ReadImage(real_b_file)
mask_img.CopyInformation(real_img)
tmp_mask_file = os.path.join(sub_data_root, 'tmp_mask.nii.gz')
sitk.WriteImage(mask_img, tmp_mask_file)
mask_mae, mae = MetricsUtils.calc_mae_with_file(real_b_file, fake_b_file, mask_file=tmp_mask_file, mask_label=mask_label)
except:
mask_mae = -1
mae = -1
row_elems.append(np.array([suid, mae, mask_mae]))
else:
_, mae = MetricsUtils.calc_mae_with_file(real_b_file, fake_b_file)
row_elems.append(np.array([suid, mae]))
if mask_root:
df = pd.DataFrame(np.array(row_elems), columns=['suid', 'mae', 'mask_mae'])
else:
df = pd.DataFrame(np.array(row_elems), columns=['suid', 'mae'])
os.makedirs(out_dir, exist_ok=True)
out_file = os.path.join(out_dir, out_file)
df.to_csv(out_file)
def train():
opt = TrainOptions().parse()
DistributedUtils.init_distributed_mode(opt)
if 'rank' not in opt:
opt.rank = DistributedUtils.get_rank()
print(opt)
data_root = opt.dataroot
crop_size = opt.crop_size
transform = get_common_transform(crop_size, opt.aug)
ds = GAN_COMMON_DS(data_root, opt.src_pattern, opt.dst_pattern, crop_size, transform, opt.mask_pattern)
dataloader = DataLoader(ds, batch_size=1, num_workers=2, shuffle=True, pin_memory=True)
dataset_size = len(dataloader) # get the number of images in the dataset.
model = models.create_model(opt)
model.setup(opt)
visualizer = Visualizer(opt)
total_iters = 0 # the total number of training iterations
print(model.netG)
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
total_iters = GANTrainer.train_one_epoch(model, dataloader, visualizer, total_iters, epoch, opt)
def test_load_model():
'''
# --model pix2pix_3d \
# --input_nc 1 \
# --output_nc 1 \
# --ngf 32 \
# --netG resnet_6blocks \
# --ndf 8 \
# --no_dropout \
# --netD pixel \
# --norm batch \
'''
opts = TestOptions().parse()
opts.model = 'pix2pix_3d'
opts.input_nc = 1
opts.output_nc = 1
opts.ngf = 32
opts.netG = 'resnet_6blocks'
opts.ndf = 8
opts.no_dropout = True
opts.netD = 'pixel'
opts.norm = 'batch'
weights_G = '/home/zhangwd/code/work/MedCommon/gan/unit_test/checkpoints/experiment_name/430_net_G.pth'
model = GANTrainer.load_model(opts, weights_G)
print(model)
def inference_onecase(series_path, out_root, weights):
opt = TrainOptions().parse()
model = GANTrainer.load_model(opt, weights)
GANTrainer.inference_onecase(model.cuda(), series_path, opt.crop_size, out_root, opt)
def inference(data_root, out_root, weights):
opt = TrainOptions().parse()
model = GANTrainer.load_model(opt, weights)
GANTrainer.inference_batch(model.cuda(), data_root, out_root, opt)
if __name__ == '__main__':
# train()
# test_load_model()
# inference(
# '/data/medical/cardiac/cta2mbf/data_140_20210602/5.mbf_myocardium',
# '/data/medical/cardiac/cta2mbf/data_140_20210602/6.inference_352x352x160_eval',
# '/data/medical/cardiac/cta2mbf/data_114_20210318/checkpoints/cta2mbf/90_net_G.pth'
# )
# inference(
# '/data/medical/cardiac/cta2mbf/data_140_20210602/5.mbf_myocardium',
# '/data/medical/cardiac/cta2mbf/data_140_20210602/6.inference_352x352x160_train',
# '/home/zhangwd/code/work/MedCommon/gan/unit_test/checkpoints/bk/train_latest/1140_net_G.pth'
# )
# inference(
# '/ssd/zhangwd/cta2mbf/data_yourname/5.mbf_myocardium',
# '/ssd/zhangwd/cta2mbf/data_yourname/6.inference_384x384x160_train',
# '/ssd/zhangwd/cta2mbf/data_yourname/checkpoints/cta2mbf_sr/390_net_G.pth'
# )
GANTrainer.calc_mae_with_mask(
data_root='/ssd/zhangwd/cta2mbf/data_yourname/6.inference_384x384x160_train',
out_dir = '/ssd/zhangwd/cta2mbf/data_yourname/7.analysis_result',
out_file = 'mae_384x384x160_eval.csv',
mask_root = '/ssd/zhangwd/cta2mbf/data_yourname/5.mbf_myocardium',
mask_pattern = 'cropped_mbf_mask.nii.gz',
mask_label = 1
)
```
#### File: zf/data_processor/data_loader.py
```python
import torch
import numpy as np
from torch.utils.data import Dataset
from utils.csv_tools import read_csv
from data_processor.data_io import DataIO
from utils.image_utils import clip_and_normalize
from utils.mask_utils import convert_one_hot, convert_ribCenterline_one_hot
class DataSetLoader(Dataset):
"""data loader."""
def __init__(self, csv_path, image_dir, mask_dir, num_classes=1, phase="train", normalization=True,
file_exclude_csv=None, window_level=[-1200, 1200]):
"""
Args:
csv_path(str): data .csv file.
image_dir(str): image dir.
mask_dir(str): mask dir.
num_classes(int): number of labels.
phase(str): train or val.
normalization(bool): whether image normalization.
file_exclude_csv(str): csv to file which will be excluded, default(None).
window_level(list): the level of window in CT HU value.
"""
self.phase = phase
self.csv_path = csv_path
self.image_dir = image_dir
self.mask_dir = mask_dir
self.num_classes = num_classes
self.normalization = normalization
self.window_level = window_level
file_names = read_csv(csv_path)[1:]
self.file_names = [item[0] for item in file_names]
if file_exclude_csv:
exclude_filenames = read_csv(file_exclude_csv)[1:]
self.exclude_filenames = [item[0] for item in exclude_filenames]
# remove bad case.
for file_name in self.file_names:
if file_name in self.exclude_filenames:
self.file_names.remove(file_name)
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
"""
Return:
image(torch tensor): channel first, dims=[c, z, y, x]
mask(torch tensor): channel first, dims=[c, z, y, x]
"""
data_loader = DataIO()
# load image.
uid = self.file_names[idx]
uid = uid[0] if type(uid) == list else uid
image_path = self.image_dir + uid
image_dict = data_loader.load_nii_image(image_path)
image_zyx = image_dict["image"]
image_zyx = clip_and_normalize(image_zyx, min_window=self.window_level[0], max_window=self.window_level[1]) \
if self.normalization else image_zyx
image_czyx = image_zyx[np.newaxis, ]
# load mask.
if self.mask_dir:
mask_path = self.mask_dir + uid
mask_dict = data_loader.load_nii_image(mask_path)
mask_zyx = mask_dict["image"]
# convert rib and centerline mask to ont hot.
mask_czyx = convert_ribCenterline_one_hot(mask_zyx, 1, self.num_classes)
# mask_czyx = convert_one_hot(mask_zyx, 1, self.num_classes)
if self.phase != "test":
return torch.from_numpy(image_czyx).float(), torch.from_numpy(mask_czyx).float()
else:
if self.mask_dir:
return {"uid": uid, "image": image_czyx[np.newaxis, ], "image_shape_ori": image_dict["image"].shape,
"mask": mask_czyx[np.newaxis, ], "is_exist_mask": True,
"origin": image_dict["origin"], "spacing": image_dict["spacing"],
"direction": image_dict["direction"]}
else:
return {"uid": uid, "image": image_czyx[np.newaxis, ], "image_shape_ori": image_dict["image"].shape,
"mask": None, "is_exist_mask": False,
"origin": image_dict["origin"], "spacing": image_dict["spacing"],
"direction": image_dict["direction"]}
```
#### File: zf/data_processor/data_resample.py
```python
import numpy as np
from scipy.ndimage.interpolation import zoom
class DataResampler(object):
def __init__(self):
super(DataResampler, self).__init__()
def resampleImageToFixedSize(self, image, out_size, order=1):
"""resample image to fixed size"""
"""
Args:
image(numpy array): image array.
out_size(list): out image size.
order(int, optional): The order of the spline interpolation.
Return:
image_zoom(numpy array): image array after resample.
zoom_factor(numpy array, size:[3]): zoom factor.
"""
scale = np.array(out_size) / image.shape
zoom_factor = image.shape / np.array(out_size)
image_zoom = zoom(image, scale, order=order)
return image_zoom, zoom_factor
def resampleMaskToFixedSize(self, mask, num_label, out_size, order=1):
"""resample mask to fixed size"""
scale = np.array(out_size) / mask.shape
zoom_factor = mask.shape / np.array(out_size)
mask_zoom = np.zeros_like(mask)
for i in range(1, num_label+1):
t_mask = mask.copy()
t_mask[t_mask != i] = 0
t_mask[t_mask == i] = 1
t_mask = zoom(t_mask, scale, order=order)
t_mask = (t_mask > 0.5).astype(np.uint8)
mask_zoom[t_mask != 0] = i
return mask_zoom, zoom_factor
def resampleImageToFixedSpacing(self, image, ori_spacing, out_spacing, order=1):
"""resample image to fixed spacing"""
"""
Args:
image(numpy array): image array.
ori_spacing(list): out image spacing.
out_spacing(list): out image spacing.
order(int, optional): The order of the spline interpolation.
Return:
image_zoom(numpy array): image array after resample.
zoom_factor(numpy array, size:[3]): zoom factor.
"""
scale = np.array(ori_spacing) / np.array(out_spacing)
zoom_factor = np.array(out_spacing) / np.array(ori_spacing)
image_zoom = zoom(image, scale, order=order)
return image_zoom, zoom_factor
def resampleMaskToFixedSpacing(self, mask, ori_spacing, out_spacing, num_label, order=1):
"""resample mask to fixed spacing"""
scale = np.array(ori_spacing) / np.array(out_spacing)
zoom_factor = np.array(out_spacing) / np.array(ori_spacing)
mask_zoom = np.zeros_like(mask)
for i in range(1, num_label+1):
t_mask = mask.copy()
t_mask[t_mask != i] = 0
t_mask[t_mask == i] = 1
t_mask = zoom(t_mask, scale, order=order)
t_mask = (t_mask > 0.5).astype(np.uint8)
mask_zoom[t_mask != 0] = i
return mask_zoom, zoom_factor
def resampleImageToFixedScale(self, image, scale, order=1):
"""resample image to fixed scale"""
image_zoom = zoom(image, scale, order=order)
return image_zoom
def resampleMaskToFixedScale(self, mask, scale, num_label, order=1):
"""resmaple mask to fixed scale"""
mask_zoom = np.zeros_like(mask)
for i in range(1, num_label+1):
t_mask = mask.copy()
t_mask[t_mask != i] = 0
t_mask[t_mask == i] = 1
t_mask = zoom(t_mask, scale, order=order)
t_mask = (t_mask > 0.5).astype(np.uint8)
mask_zoom[t_mask != 0] = i
return mask_zoom
```
#### File: network/blocks/attention.py
```python
import torch
import torch.nn as nn
class MultiHeadAttention3d(torch.nn.Module):
def __init__(self, in_channels, total_key_filters, total_value_filters,
out_channels, num_heads, dropout_prob=0.5, layer_type='SAME'):
super(MultiHeadAttention3d, self).__init__()
if total_key_filters % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_filters, num_heads))
if total_value_filters % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_filters, num_heads))
if layer_type not in ['SAME', 'DOWN', 'UP']:
raise ValueError("Layer type (%s) must be one of SAME, "
"DOWN, UP." % (layer_type))
self.in_channels = in_channels
self.out_channels = out_channels
self.total_key_filters = total_key_filters
self.total_value_filters = total_value_filters
self.num_heads = num_heads
self.layer_type = layer_type
self.compute_qkv_3d = ComputeQkv3D(in_channels, total_key_filters, total_value_filters, layer_type)
self.dropout = nn.Dropout(dropout_prob)
self.outconv = nn.Conv3d(self.total_value_filters, self.out_channels, kernel_size=1, stride=1, padding=0,
bias=True)
def forward(self, inputs):
"""
inputs: Tensor with shape [batch, channels, D, H, W]
return: Tensor with shape [batch, channels, D, H, W]
"""
q, k, v = self.compute_qkv_3d(inputs)
shape = q.shape
k = self.split_heads_3d(k, self.num_heads)
v = self.split_heads_3d(v, self.num_heads)
q = self.split_heads_3d(q, self.num_heads)
# normalize
key_filters_per_head = self.total_key_filters // self.num_heads
q *= key_filters_per_head ** -0.5
output = self.global_attention_3d(q, k, v, shape)
return self.outconv(output)
@staticmethod
def split_heads_3d(x, num_heads):
"""
Args:
x: Tensor with shape [B D H W channels]
num_heads: an integer
Return:
Tensor with shape [B, num_heads, D, H, W, channels / num_heads]
"""
channels = x.shape[-1]
return x.view(x.shape[0], x.shape[1], x.shape[2], x.shape[3], num_heads, int(channels / num_heads))
def global_attention_3d(self, q, k, v, shape):
k = torch.flatten(k, 0, 4) # [B, D, H, W, N, C]
v = torch.flatten(v, 0, 4)
q = torch.flatten(q, 0, 4)
attention_weight = torch.matmul(q, k.transpose(0, 1))
attention_weight = torch.softmax(attention_weight, dim=1)
attention_weight = self.dropout(attention_weight)
output = torch.matmul(attention_weight, v)
output = output.view(shape[0], shape[1], shape[2], shape[3], v.shape[-1] * self.num_heads)
output = output.permute(0, 4, 1, 2, 3) # [B C D H W]
return output
class ComputeQkv3D(nn.Module):
"""Computes query, key and value.
Args:
inputs: a Tensor with shape [batch, channels, d, h, w] # Differnet with tensorflow
in_channels: Conv input channels
total_key_filters: an integer
total_value_filters: and integer
layer_type: String, type of this layer -- SAME, DOWN, UP
Returns:
q: [batch, _d, _h, _w, total_key_filters] tensor # Same with tensorflow
k: [batch, h, w, total_key_filters] tensor
v: [batch, h, w, total_value_filters] tensor
"""
def __init__(self, in_channels, total_key_filters, total_value_filters, layer_type):
super(ComputeQkv3D, self).__init__()
self.in_channels = in_channels
self.total_key_filters = total_key_filters
self.total_value_filters = total_value_filters
self.layer_type = layer_type
if self.layer_type == 'SAME':
self.qconv = nn.Conv3d(in_channels, total_key_filters, kernel_size=1, stride=1,
padding=0, bias=True)
elif self.layer_type == 'DOWN':
self.qconv = nn.Conv3d(in_channels, total_key_filters, kernel_size=3, stride=2,
padding=1, bias=True)
elif self.layer_type == 'UP':
self.qconv = nn.ConvTranspose3d(in_channels, total_key_filters, kernel_size=3, stride=2,
padding=1, output_padding=1, bias=True)
self.kconv = nn.Conv3d(in_channels, total_key_filters, kernel_size=1, stride=1, padding=0, bias=True)
self.vconv = nn.Conv3d(in_channels, total_value_filters, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
q = self.qconv(x)
k = self.kconv(x)
v = self.vconv(x)
# [B D H W C]
return q.permute(0, 2, 3, 4, 1), k.permute(0, 2, 3, 4, 1), v.permute(0, 2, 3, 4, 1)
def test():
net = MultiHeadAttention3d(2, 16, 16, 4, 4, layer_type='DOWN') # 'SAME', 'DOWN', 'UP'
x = torch.rand(2, 2, 16, 8, 4)
if torch.cuda.is_available():
x = x.cuda()
net = net.cuda()
y = net(x)
print(y.shape)
#test()
```
#### File: network/blocks/splat3D.py
```python
import torch
from torch import nn
import torch.nn.functional as F
__all__ = ['SplAtConv3d']
class SplAtConv3d(nn.Module):
"""Split-Attention Conv3d
"""
def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, bias=True,
radix=2, norm_layer="GN",
dropblock_prob=0.0):
super(SplAtConv3d, self).__init__()
inter_channels = max(in_channels*radix//2, 8)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.conv = nn.Conv3d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias)
self.bn0 = nn.BatchNorm3d(num_features=channels*radix) if norm_layer=="BN" else \
nn.GroupNorm(num_groups=channels // 2, num_channels=channels*radix)
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Conv3d(channels, inter_channels, 1, groups=self.cardinality)
self.bn1 = nn.BatchNorm3d(num_features=inter_channels) if norm_layer=="BN" else \
nn.GroupNorm(num_groups=inter_channels // 4, num_channels=inter_channels)
self.fc2 = nn.Conv3d(inter_channels, channels*radix, 1, groups=self.cardinality)
self.dropblock = nn.Dropout(p=dropblock_prob) if dropblock_prob > 0.0 else nn.Sequential()
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
x = self.bn0(x)
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
splited = torch.split(x, rchannel//self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool3d(gap, 1)
gap = self.fc1(gap)
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1, 1)
if self.radix > 1:
attens = torch.split(atten, rchannel // self.radix, dim=1)
out = sum([att * split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
```
#### File: zf/runner/metric.py
```python
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class DiceMetric(nn.Module):
"""Dice Metric Function"""
def __init__(self, dims=[2, 3, 4]):
super(DiceMetric, self).__init__()
self.dims = dims
def forward(self, predict, gt, activation="sigmoid", is_average=True):
"""
Args:
predict(torch tensor):
gt(torch tensor):
activation(str): sigmoid or softmax
is_average(bool):
Return:
dice(torch tensor):
"""
predict = predict.float()
gt = gt.float()
if activation == "sigmoid":
pred = F.sigmoid(predict)
pred[pred < 0.5] = 0
pred[pred >= 0.5] = 1
elif activation == "softmax":
# pred = torch.argmax(predict, dim=1)
pred = F.softmax(predict, dim=1)
intersection = torch.sum(pred * gt, dim=self.dims)
union = torch.sum(pred, dim=self.dims) + torch.sum(gt, dim=self.dims)
dice = (2. * intersection + 1e-5) / (union + 1e-5)
dice = dice.mean(0) if is_average else dice.sum(0)
return dice
def compute_precision_recall_F1(predict, gt, num_class):
"""compute precision, recall and F1"""
tp, tp_fp, tp_fn = [0.] * num_class, [0.] * num_class, [0.] * num_class
precision, recall, f1 = [0.] * num_class, [0.] * num_class, [0.] * num_class
for label in range(num_class):
t_labels = gt == label
p_labels = predict == label
tp[label] += np.sum(t_labels == (p_labels * 2 - 1))
tp_fp[label] += np.sum(p_labels)
tp_fn[label] += np.sum(t_labels)
precision[label] = tp[label] / (tp_fp[label] + 1e-8)
recall[label] = tp[label] / (tp_fn[label] + 1e-8)
f1[label] = 2 * precision[label] * recall[label] / (precision[label] + recall[label] + 1e-8)
return precision, recall, f1
```
#### File: zf/utils/logger.py
```python
import sys
import logging
def get_logger(exp_dir):
"""
creates logger instance. writing out info to file and to terminal.
:param exp_dir: experiment directory, where log.txt file is stored.
:return: logger instance.
"""
logger = logging.getLogger('log')
logger.setLevel(logging.DEBUG)
log_file = exp_dir + '/log.txt'
hdlr = logging.FileHandler(log_file)
print('Logging to {}'.format(log_file))
logger.addHandler(hdlr)
logger.addHandler(ColorHandler())
logger.propagate = False
return logger
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37, default=39)
def __init__(self, stream):
self.stream = stream
@classmethod
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%sm%s\x1b[0m' % (color, text))
class ColorHandler(logging.StreamHandler):
def __init__(self, stream=sys.stdout):
super(ColorHandler, self).__init__(_AnsiColorizer(stream))
def emit(self, record):
msg_colors = {
logging.DEBUG: "green",
logging.INFO: "default",
logging.WARNING: "red",
logging.ERROR: "red"
}
color = msg_colors.get(record.levelno, "blue")
self.stream.write(record.msg + "\n", color)
```
#### File: zf/utils/mask_utils.py
```python
import numpy as np
from skimage import measure
from skimage.morphology import label
from scipy.ndimage.morphology import generate_binary_structure, binary_closing, \
binary_erosion, binary_dilation
def convert_one_hot(mask, s_idx, num_classes):
"""Convert mask label into one hot coding."""
masks = []
for i_label in range(s_idx, num_classes + s_idx):
mask_i = mask == i_label
masks.append(mask_i)
mask_czyx = np.stack(masks, axis=0)
mask_czyx = mask_czyx.astype(np.float32)
return mask_czyx
def convert_ribCenterline_one_hot(mask, s_idx, num_classes):
"""Convert rib and centerline mask into one hot coding."""
masks = []
for i_label in range(s_idx, num_classes + s_idx):
mask_i = mask.copy()
if i_label == 1:
mask_i[mask_i != 0] = 1
else:
mask_i[mask_i != i_label] = 0
mask_i[mask_i == i_label] = 1
masks.append(mask_i)
mask_czyx = np.stack(masks, axis=0)
mask_czyx = mask_czyx.astype(np.float32)
return mask_czyx
def extract_bbox(mask):
"""extract object bbox"""
t_mask = mask > 0
zz, yy, xx = np.where(t_mask)
bbox = np.array([[np.min(zz), np.max(zz)], [np.min(yy), np.max(yy)],
[np.min(xx), np.max(xx)]])
return bbox
def dilation_mask(mask, itrs=2):
struct = generate_binary_structure(3, 2)
dilated_mask = binary_dilation(mask, structure=struct, iterations=itrs)
return dilated_mask
def erosion_mask(mask, itrs=2):
struct = generate_binary_structure(3, 2)
erosion_mask = binary_erosion(mask, structure=struct, iterations=itrs)
return erosion_mask
def remove_small_connected_object(mask, area_least=10):
"""remove small connected object"""
"""
Args:
mask(numpy array): mask array.
area_least(int): remain the connected objects that area exceed this threshold.
Return:
res_mask(numpy array): re-define mask array.
"""
mask[mask != 0] = 1
labeled_mask, num = label(mask, neighbors=4, background=0, return_num=True)
region_props = measure.regionprops(labeled_mask)
res_mask = np.zeros_like(mask)
for i in range(1, num + 1):
t_area = region_props[i - 1].area
if t_area > area_least:
res_mask[labeled_mask == i] = 1
return res_mask
def extract_largest_connected_object(mask, area_least=10):
"""extract largest connected object"""
"""
Args:
mask(numpy array): mask array.
area_least(int): remain the connected objects that area exceed this threshold.
Return:
res_mask(numpy array): re-define mask array.
centroid(list, size=3): the centroid of the largest connected object.
"""
mask[mask != 0] = 1
labeled_mask, num = label(mask, neighbors=4, background=0, return_num=True)
region_props = measure.regionprops(labeled_mask)
max_area = 0
max_index = 0
centroid = [0, 0, 0]
for i in range(1, num+1):
t_area = region_props[i-1].area
if t_area > max_area:
max_area = t_area
max_index = i
centroid = region_props[i-1].centroid
if max_area >= area_least:
res_mask = labeled_mask == max_index
else:
res_mask = np.zeros_like(labeled_mask)
res_mask = res_mask.astype(np.uint8)
return res_mask, centroid
def keep_KthLargest_connected_object(mask, kth):
"""keep kth largest connected object"""
mask[mask != 0] = 1
labeled_mask, num = label(mask, neighbors=4, background=0, return_num=True)
region_props = measure.regionprops(labeled_mask)
areas = {}
for i in range(1, num + 1):
t_area = region_props[i - 1].area
areas[str(i)] = t_area
candidates = sorted(areas.items(), key=lambda item: item[1], reverse=True)
res_mask = np.zeros_like(mask)
for i in range(kth):
res_mask[labeled_mask == candidates[i][0]] = 1
return res_mask
def smooth_mask(mask, area_least=10, is_binary_close=False):
"""smooth mask by remove small connected object and binary closing"""
"""
Args:
mask(numpy array): mask array.
area_least(int): remain the connected objects that area exceed this threshold.
is_binary_close(bool): whether run binary closing.
Return:
mask(numpy array): re-define mask array.
"""
mask = mask.astype(np.uint8)
mask = remove_small_connected_object(mask, area_least)
if is_binary_close:
struct = generate_binary_structure(3, 2)
mask = binary_closing(mask, structure=struct, iterations=3)
mask = mask.astype(np.uint8)
return mask
def extract_left_right_bbox(mask):
"""extract the left and right lung box"""
# connected region analysis.
mask[mask != 0] = 1
labeled_mask, num = label(mask, neighbors=8, background=0, return_num=True)
region_props = measure.regionprops(labeled_mask)
# extract object bbox.
objects_bbox_min = []
objects_bbox_max = []
for i in range(num):
props = region_props[i]
bbox = props.bbox
objects_bbox_min.append(bbox[2])
objects_bbox_max.append(bbox[5])
objects_bbox_min.sort()
objects_bbox_max.sort()
# find the right boundary of left lung and the left boundary of right lung.
left_diff = 0
right_diff = 0
left_idx = num // 2 + 1
right_idx = num // 2 - 1
for i in range(int(num * 0.2), int(num * 0.8)+1):
diff_min = objects_bbox_min[i] - objects_bbox_min[i - 1]
diff_max = objects_bbox_max[i] - objects_bbox_max[i - 1]
if diff_min >= left_diff:
left_diff = diff_min
left_idx = i
if diff_max >= right_diff:
right_diff = diff_max
right_idx = i
res = [objects_bbox_min[left_idx], objects_bbox_max[right_idx-1]]
return res
def find_rib_bound(objects_centroid, interval_value=10):
"""find the FPs of rib mask along the x axis."""
"""
Args:
objects_centroid(dict): eg. {1: 100, ...} key:rib label, value:rib centroid along the x axis.
interval_value(int): the interval rib of two rib.
Return:
out_bound_idx(list): the idx of objects which centroids are out of boundary.
"""
num = len(objects_centroid)
sorted_centroid = sorted(objects_centroid.items(), key=lambda item: item[1], reverse=False)
axis_diff = [sorted_centroid[i][1] - sorted_centroid[i - 1][1] for i in range(1, num)]
sorted_axis_diff = sorted(np.array(axis_diff))
axis_diff_median = sorted_axis_diff[int(3 / 4 * num)]
axis_diff_median = max(axis_diff_median, interval_value)
low_bound_idx = num
low_diff_value = 0
for i in range((num - 1) // 3):
if axis_diff[i] > axis_diff_median * 3 and axis_diff[i] > low_diff_value:
low_bound_idx = i
low_diff_value = axis_diff[i]
high_bound_idx = 0
high_diff_value = 0
for j in range((num - 1) // 3):
if axis_diff[num - 2 - j] > axis_diff_median * 3 and axis_diff[num - 2 - j] > high_diff_value:
high_bound_idx = num - 1 - j
high_diff_value = axis_diff[num - 2 - j]
out_bound_idx = []
if low_bound_idx != num:
out_low_bound_idx = [sorted_centroid[i][0] for i in range(low_bound_idx)]
out_bound_idx.extend(out_low_bound_idx)
if high_bound_idx != 0:
out_high_bound_idx = [sorted_centroid[i][0] for i in range(high_bound_idx, num)]
out_bound_idx.extend(out_high_bound_idx)
return out_bound_idx, axis_diff_median
```
#### File: MedCommon/utils/data_aug_utils.py
```python
import os
import sys
root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
print('solution root:\t', root)
sys.path.append(root)
sys.path.append(os.path.join(root, 'external_lib/torchio'))
import torchio as tio
class DATA_AUGMENTATION_UTILS:
def __init__(self):
pass
@staticmethod
def get_common_transform(image_shape, type='GAN'):
default_transform = None
if type == 'GAN':
default_transform = tio.Compose([
tio.RandomFlip(axes=[0,1,2]),
tio.RandomAnisotropy(downsampling=(1,2.5),scalars_only=False,p=0.25), # make images look anisotropic 25% of times
tio.OneOf({ # either
tio.RandomCropOrPad((image_shape[0], image_shape[1], image_shape[2])): 0.8,
tio.CropOrPad((image_shape[0], image_shape[1], image_shape[2])):0.2, # or random elastic deformation
}),
tio.RandomBlur(p=0.25), # blur 25% of times
tio.RandomNoise(p=0.25)
])
elif type == 'DEBUG':
default_transform = tio.Compose([
tio.RandomAnisotropy(p=0.999), # make images look anisotropic 25% of times
tio.CropOrPad((image_shape[0], image_shape[1], image_shape[2]))
])
elif type == 'GAN_INFERENCE':
default_transform = tio.Compose([
tio.CropOrPad((image_shape[0], image_shape[1], image_shape[2]))
])
else:
default_transform = tio.Compose([
tio.RandomFlip(axes=[0,1,2]),
tio.RandomAnisotropy(p=0.25), # make images look anisotropic 25% of times
tio.CropOrPad((image_shape[0], image_shape[1], image_shape[2])), # tight crop around brain
tio.RandomBlur(p=0.99999), # blur 25% of times
tio.RandomNoise(p=0.25), # Gaussian noise 25% of times
tio.OneOf({ # either
tio.RandomAffine(): 0.8, # random affine
tio.RandomElasticDeformation(): 0.2, # or random elastic deformation
}, p=0.8), # applied to 80% of images
tio.RandomBiasField(p=0.3), # magnetic field inhomogeneity 30% of times
tio.OneOf({ # either
tio.RandomMotion(): 1, # random motion artifact
tio.RandomSpike(): 2, # or spikes
tio.RandomGhosting(): 2, # or ghosts
}, p=0.5),
])
return default_transform
```
#### File: MedCommon/utils/datasets_utils.py
```python
import os
import sys
import numpy as np
from tqdm import tqdm
from glob import glob
import SimpleITK as sitk
import time
COMMON_ROOT = os.path.join(os.path.dirname(__file__), os.path.pardir)
sys.path.append(COMMON_ROOT)
from utils.data_io_utils import DataIO
class DatasetsUtils:
def __init__(self):
pass
@staticmethod
def get_random_crop_boundary_3d(crop_size, cropped_boundary):
'''
note: crop_size 应该<= 待切割影像的尺寸
crop_size: [dim_z, dim_y, dim_x], 目标size
cropped_boundary: [min_z, min_y, min_x, max_z, max_y, max_x]限定区域,若整图都可做crop,则此处cropped_boundary为图片的全区域
'''
padding = 1
[img_d, img_h, img_w] = [cropped_boundary[3]+padding, cropped_boundary[4]+padding, cropped_boundary[5]+padding]
[input_d, input_h, input_w] = crop_size
z_min_upper = img_d - input_d
y_min_upper = img_h - input_h
x_min_upper = img_w - input_w
Z_min = np.random.randint(cropped_boundary[0], z_min_upper) if cropped_boundary[0] < z_min_upper else 0
Y_min = np.random.randint(cropped_boundary[1], y_min_upper) if cropped_boundary[1] < y_min_upper else 0
X_min = np.random.randint(cropped_boundary[2], x_min_upper) if cropped_boundary[2] < x_min_upper else 0
Z_max = Z_min + input_d
Y_max = Y_min + input_h
X_max = X_min + input_w
return Z_min, Y_min, X_min, Z_max, Y_max, X_max
@staticmethod
def get_center_crop_boundary_3d(crop_size, cropped_boundary):
'''
note: crop_size 应该<= 待切割影像的尺寸
crop_size: [dim_z, dim_y, dim_x], 目标size
cropped_boundary: [min_z, min_y, min_x, max_z, max_y, max_x]限定区域,若整图都可做crop,则此处cropped_boundary为图片的全区域
'''
padding = 1
[img_d, img_h, img_w] = [cropped_boundary[3]+padding, cropped_boundary[4]+padding, cropped_boundary[5]+padding]
center_d = (cropped_boundary[3] + cropped_boundary[0] + padding) // 2
center_h = (cropped_boundary[4] + cropped_boundary[1] + padding) // 2
center_w = (cropped_boundary[5] + cropped_boundary[2] + padding) // 2
[input_d, input_h, input_w] = crop_size
Z_min = center_d-input_d//2
Y_min = center_h-input_h//2
X_min = center_w-input_w//2
Z_max = Z_min + input_d
Y_max = Y_min + input_h
X_max = X_min + input_w
return Z_min, Y_min, X_min, Z_max, Y_max, X_max
@staticmethod
def expand_to_multiples_of_n(in_arr, n, full_val=0):
'''
n is multiples of 2
'''
[d,h,w] = in_arr.shape
new_d = ((d+n-1)//n) * n
new_h = ((h+n-1)//n) * n
new_w = ((w+n-1)//n) * n
new_arr = np.full([new_d, new_h, new_w], full_val, dtype=in_arr.dtype)
beg_d = new_d//2 - d//2
beg_h = new_h//2 - h//2
beg_w = new_w//2 - w//2
new_arr[beg_d:beg_d+d, beg_h:beg_h+h, beg_w:beg_w+w] = in_arr
return new_arr
@staticmethod
def collapse_multiples_of_n(in_arr, ori_arr, n):
[d,h,w] = ori_arr.shape
new_d = ((d+n-1)//n) * n
new_h = ((h+n-1)//n) * n
new_w = ((w+n-1)//n) * n
assert (new_d, new_h, new_w) == in_arr.shape
beg_d = new_d//2 - d//2
beg_h = new_h//2 - h//2
beg_w = new_w//2 - w//2
new_arr = np.zeros([d, h, w], dtype=in_arr.dtype)
new_arr[:, :, :] = in_arr[beg_d:beg_d+d, beg_h:beg_h+h, beg_w:beg_w+w]
return new_arr
@staticmethod
def extend_image_mask_boundary_for_seg(image_arr, mask_arr, dst_size, boundary_value=0):
'''
1. 确保图像的边界(image_arr.shape)<=要扩展的边界(dst_size)
'''
assert image_arr.shape == mask_arr.shape
if image_arr.shape == dst_size:
return image_arr, mask_arr
z_min_upper = dst_size[0] - image_arr.shape[0]
y_min_upper = dst_size[1] - image_arr.shape[1]
x_min_upper = dst_size[2] - image_arr.shape[2]
z_min = np.random.randint(0, z_min_upper) if 0 < z_min_upper else 0
y_min = np.random.randint(0, y_min_upper) if 0 < y_min_upper else 0
x_min = np.random.randint(0, x_min_upper) if 0 < x_min_upper else 0
z_max = z_min + image_arr.shape[0]
y_max = y_min + image_arr.shape[1]
x_max = x_min + image_arr.shape[2]
image_arr_new = np.full(dst_size, boundary_value, dtype=image_arr.dtype)
mask_arr_new = np.full(dst_size, 0, dtype=mask_arr.dtype)
image_arr_new[z_min:z_max, y_min:y_max, x_min:x_max] = image_arr[:,:,:]
mask_arr_new[z_min:z_max, y_min:y_max, x_min:x_max] = mask_arr[:,:,:]
return image_arr_new, mask_arr_new
@staticmethod
def crop_image_mask_with_padding(image_arr, mask_arr, dst_size, boundary_value=0):
'''
1.将图像扩展成dst_size的倍数,再进行随机crop操作
'''
assert image_arr.shape == mask_arr.shape
if image_arr.shape == dst_size:
return image_arr, mask_arr
new_size = []
for i in range(3):
new_size.append(((image_arr.shape[i]+dst_size[i]-1)//dst_size[i])*dst_size[i])
new_image_arr = np.full(new_size, boundary_value, dtype=image_arr.dtype)
new_mask_arr = np.full(new_size, 0, dtype=mask_arr.dtype)
new_d, new_h, new_w = new_size
d,h,w = image_arr.shape
beg_d = new_d//2 - d//2
beg_h = new_h//2 - h//2
beg_w = new_w//2 - w//2
new_image_arr[beg_d:beg_d+d, beg_h:beg_h+h, beg_w:beg_w+w] = image_arr
new_mask_arr[beg_d:beg_d+d, beg_h:beg_h+h, beg_w:beg_w+w] = mask_arr
z_lower_min = np.random.randint(0, beg_d) if 0 < beg_d else 0
y_lower_min = np.random.randint(0, beg_h) if 0 < beg_h else 0
x_lower_min = np.random.randint(0, beg_w) if 0 < beg_w else 0
z_upper_min = np.random.randint(beg_d+d-dst_size[0], new_d-dst_size[0]) if beg_d+d-dst_size[0] < new_d-dst_size[0] else beg_d+d-dst_size[0]
y_upper_min = np.random.randint(beg_h+h-dst_size[1], new_h-dst_size[1]) if beg_h+h-dst_size[1] < new_h-dst_size[1] else beg_h+h-dst_size[1]
x_upper_min = np.random.randint(beg_w+w-dst_size[2], new_w-dst_size[2]) if beg_w+w-dst_size[2] < new_w-dst_size[2] else beg_w+w-dst_size[2]
z_upper_min = max(z_lower_min, 0)
y_upper_min = max(y_lower_min, 0)
x_upper_min = max(x_lower_min, 0)
z_min = np.random.choice([z_lower_min, z_upper_min])
y_min = np.random.choice([y_lower_min, y_upper_min])
x_min = np.random.choice([x_lower_min, x_upper_min])
z_min = z_min if d > dst_size[0] else 0
y_min = y_min if h > dst_size[1] else 0
x_min = x_min if w > dst_size[2] else 0
z_max = z_min + dst_size[0]
y_max = y_min + dst_size[1]
x_max = x_min + dst_size[2]
# cropped_image = new_image_arr[z_min:z_max, y_min:y_max, x_min:x_max]
# cropped_mask = new_mask_arr[z_min:z_max, y_min:y_max, x_min:x_max]
# print(dst_size)
# assert list(cropped_image.shape) == dst_size
# print('{}\t{}\t{}\t{}\t{}\t{}'.format(z_min, z_max, y_min, y_max, x_min, x_max))
return new_image_arr[z_min:z_max, y_min:y_max, x_min:x_max], new_mask_arr[z_min:z_max, y_min:y_max, x_min:x_max]
@staticmethod
def cut_image_into_blocks_by_sliding_window(image_arr, crop_size, overlap=[0,0,0]):
'''
将3d图像按照滑窗的方式,切割成crop_size的大小
todo: 暂时未提供overlap的版本
'''
# src_data = sitk.GetArrayFromImage(sitk_image)
src_data = image_arr
# padding to 32xn/Nxn
padding = 32
[pd, ph, pw] = crop_size
[d,h,w] = src_data.shape
new_d = ((d+pd-1)//pd)*pd
new_h = ((h+ph-1)//ph)*ph
new_w = ((w+pw-1)//pw)*pw
if not np.all([d,h,w]==np.array([new_d, new_h, new_w])):
new_arr = np.zeros([new_d, new_h, new_w])
new_arr[:d,:h,:w] = src_data
else:
new_arr = src_data
cropped_srcs = []
d_cnt = (d+pd-1)//pd
h_cnt = (h+ph-1)//ph
w_cnt = (w+pw-1)//pw
for iz in range(d_cnt):
for iy in range(h_cnt):
for ix in range(w_cnt):
cropped_src = new_arr[iz*pd:(iz+1)*pd, iy*ph:(iy+1)*ph, ix*pw:(ix+1)*pw]
# cropped_src = torch.from_numpy(cropped_src).float()
# cropped_src = torch.unsqueeze(cropped_src, axis=0)
# cropped_src = torch.unsqueeze(cropped_src, axis=0)
cropped_srcs.append(cropped_src)
return cropped_srcs, d_cnt, h_cnt, w_cnt
@staticmethod
def compose_blocks_cutted_by_sliding_window_into_image(arr, blocks_dim, crop_size, ori_size, overlay=[0, 0, 0]):
'''
将3d图像按照滑窗的方式,切割成crop_size的大小后,组装成完整的图像
todo: 暂时未提供overlap的版本
'''
assert len(arr) == blocks_dim[0] * blocks_dim[1] * blocks_dim[2]
dim = np.array(blocks_dim)*np.array(crop_size)
dst_arr = np.zeros(dim)
[d_cnt, h_cnt, w_cnt] = blocks_dim
[pd, ph, pw] = crop_size
for iz in range(d_cnt):
for iy in range(h_cnt):
for ix in range(w_cnt):
dst_arr[iz*pd:(iz+1)*pd, iy*ph:(iy+1)*ph, ix*pw:(ix+1)*pw] = arr[iz*h_cnt*w_cnt+iy*w_cnt+ix]
return dst_arr[:ori_size[0], :ori_size[1], :ori_size[2]]
@staticmethod
def resample_image_unsame_resolution(image, dst_size, interpolation_mode=sitk.sitkNearestNeighbor):
'''
该函数并没有统一分辨率。。。
'''
img = image
# print(img.GetSize(), img.GetSpacing())
res_factor = list()
for s_size, d_size in zip(img.GetSize(), dst_size):
res_factor.append(s_size / d_size)
# print('res_factor:{}'.format(res_factor))
dst_spacing = list()
for spacing, factor in zip(img.GetSpacing(), res_factor):
dst_spacing.append(spacing * factor)
# print('dst_spacing:{}'.format(dst_spacing))
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(interpolation_mode)
resampler.SetOutputDirection(img.GetDirection())
resampler.SetOutputOrigin(img.GetOrigin())
resampler.SetOutputSpacing(dst_spacing)
resampler.SetSize(dst_size)
img_res = resampler.Execute(img)
return img_res
@staticmethod
def restore_ori_image_from_resampled_image(resampled_image, ori_ref_image, interpolation_mode=sitk.sitkNearestNeighbor):
'''
'''
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
resampler.SetOutputDirection(ori_ref_image.GetDirection())
resampler.SetOutputOrigin(ori_ref_image.GetOrigin())
resampler.SetOutputSpacing(ori_ref_image.GetSpacing())
resampler.SetSize(ori_ref_image.GetSize())
img_res = resampler.Execute(resampled_image)
return img_res
@staticmethod
def resample_image_mask_unsame_resolution_onecase(image_file, mask_file, dst_image_file, dst_mask_file, dst_size, is_dcm=False):
if is_dcm:
image_data = DataIO.load_dicom_series(image_file)
else:
image_data = DataIO.load_nii_image(image_file)
mask_data = DataIO.load_nii_image(mask_file)
resampled_image = DatasetsUtils.resample_image_unsame_resolution(image_data['sitk_image'], dst_size)
resampled_mask = DatasetsUtils.resample_image_unsame_resolution(mask_data['sitk_image'], dst_size)
# 注:标注数据的时候未知原因二者的信息会有稍许差别,有可能是精度的问题,待查证
resampled_mask.CopyInformation(resampled_image)
os.makedirs(os.path.dirname(dst_image_file), exist_ok=True)
os.makedirs(os.path.dirname(dst_mask_file), exist_ok=True)
sitk.WriteImage(resampled_image, dst_image_file)
sitk.WriteImage(resampled_mask, dst_mask_file)
@staticmethod
def resample_image_mask_unsame_resolution_singletask(series_uids, image_root, mask_root,
dst_image_root, dst_mask_root, dst_size,
image_postfix='', mask_postfix='',
is_dcm=False):
for series_uid in tqdm(series_uids):
if is_dcm:
image_file = os.path.join(image_root, '{}'.format(series_uid))
else:
image_file = os.path.join(image_root, '{}{}'.format(series_uid, image_postfix))
mask_file = os.path.join(mask_root, '{}{}'.format(series_uid, mask_postfix))
dst_image_file = os.path.join(dst_image_root, '{}.nii.gz'.format(series_uid))
dst_mask_file = os.path.join(dst_mask_root, '{}.nii.gz'.format(series_uid))
DatasetsUtils.resample_image_mask_unsame_resolution_onecase(image_file, mask_file, dst_image_file, dst_mask_file, dst_size, is_dcm)
@staticmethod
def resample_image_mask_unsame_resolution_multiprocess(image_root, mask_root,
dst_image_root, dst_mask_root, dst_size,
image_postfix='', mask_postfix='',
process_num=12, is_dcm=False):
series_uids = []
if is_dcm:
series_uids = os.listdir(image_root)
else:
series_uids = glob(os.path.join(image_root, '*{}'.format(image_postfix)))
series_uids = [os.path.basename(i).replace(image_postfix, '') for i in series_uids]
# print(series_uids)
num_per_process = (len(series_uids) + process_num - 1)//process_num
# this for single thread to debug
# DatasetsUtils.resample_image_mask_unsame_resolution_singletask(series_uids, image_root, mask_root,
# dst_image_root, dst_mask_root, dst_size,
# image_postfix, mask_postfix,
# is_dcm)
# this for run
import multiprocessing
from multiprocessing import Process
multiprocessing.freeze_support()
pool = multiprocessing.Pool()
results = []
print(len(series_uids))
for i in range(process_num):
sub_series_uids = series_uids[num_per_process*i:min(num_per_process*(i+1), len(series_uids))]
print(len(sub_series_uids))
result = pool.apply_async(DatasetsUtils.resample_image_mask_unsame_resolution_singletask,
args=(sub_series_uids, image_root, mask_root,
dst_image_root, dst_mask_root, dst_size,
image_postfix, mask_postfix,
is_dcm))
results.append(result)
pool.close()
pool.join()
@staticmethod
def split_ds(image_root, out_config_dir, train_ratio=0.7, val_ratio=0.2):
'''
debug cmd: split_ds('/data/medical/lung/changzheng/airway/airway_20201030/images', '/data/medical/lung/changzheng/airway/airway_20201030/config')
out_config_dir listed as follows:
tree -L 1
.
├── test.txt
├── train.txt
└── val.txt
'less train.txt' as follows:
1.2.840.113704.1.111.5624.1392092458.10
1.2.840.113704.1.111.6896.1389252289.9
1.3.46.670589.33.1.63725405821017542900002.4919856832254375598
1.2.840.113704.1.111.2452.1387439529.10
1.2.840.113704.1.111.6756.1592183917.11
1.2.840.113704.1.111.8660.1421889850.10
1.2.840.113704.1.111.11692.1420599548.14
1.3.46.670589.33.1.63722560084727458900002.4851763629495772847
1.2.840.113704.1.111.13172.1389599763.7
1.3.46.670589.33.1.63700781943575774800001.5142437508376053996
1.2.840.113704.1.111.10192.1571886399.11
1.2.840.113704.1.111.9536.1577060319.15
1.2.840.113704.1.111.1384.1392885868.9
train.txt (END)
'''
series_uids = os.listdir(image_root)
series_uids = [i.replace('.nii.gz', '') for i in series_uids]
np.random.shuffle(series_uids)
train_pos = int(len(series_uids)*train_ratio)
val_pos = int(len(series_uids)*(train_ratio+val_ratio))
train_series_uids = series_uids[:train_pos]
val_series_uids = series_uids[train_pos:val_pos]
test_series_uids = series_uids[val_pos:]
os.makedirs(out_config_dir, exist_ok=True)
with open(os.path.join(out_config_dir, 'train.txt'), 'w') as f:
f.write('\n'.join(train_series_uids))
with open(os.path.join(out_config_dir, 'val.txt'), 'w') as f:
f.write('\n'.join(val_series_uids))
with open(os.path.join(out_config_dir, 'test.txt'), 'w') as f:
f.write('\n'.join(test_series_uids))
@staticmethod
def pairs_split_3d_to_2d_slice_onecase(src_file, dst_file, out_dir, non_zero_pixels=10):
src_image = sitk.ReadImage(src_file)
src_data = sitk.GetArrayFromImage(src_image)
dst_image = sitk.ReadImage(dst_file)
dst_data = sitk.GetArrayFromImage(dst_image)
os.makedirs(out_dir, exist_ok=True)
for z in range(dst_data.shape[0]):
tmp_arr = np.zeros(dst_data[z].shape)
tmp_arr[dst_data[z] != 0] = 1
if np.sum(tmp_arr) < 10:
out_src_file = os.path.join(out_dir, 'src_{}_neg.npy'.format(z))
out_dst_file = os.path.join(out_dir, 'dst_{}_neg.npy'.format(z))
else:
out_src_file = os.path.join(out_dir, 'src_{}_pos.npy'.format(z))
out_dst_file = os.path.join(out_dir, 'dst_{}_pos.npy'.format(z))
np.save(out_src_file, src_data[z])
np.save(out_dst_file, dst_data[z])
@staticmethod
def pairs_split_3d_to_2d_slice_singletask(series_uids, in_dir, out_dir, src_pattern, dst_pattern):
for series_uid in tqdm(series_uids):
src_file = os.path.join(in_dir, series_uid, src_pattern)
dst_file = os.path.join(in_dir, series_uid, dst_pattern)
out_series_dir = os.path.join(out_dir, series_uid)
try:
DatasetsUtils.pairs_split_3d_to_2d_slice_onecase(src_file, dst_file, out_series_dir)
except Exception as e:
print(e)
print('====> Error when process {}!'.format(series_uid))
@staticmethod
def pairs_split_3d_to_2d_slice_multiprocess(indir, outdir, src_pattern, dst_pattern, process_num=5):
series_uids = os.listdir(indir)
# print(series_uids)
num_per_process = (len(series_uids) + process_num - 1)//process_num
# this for single thread to debug
# DatasetsUtils.pairs_split_3d_to_2d_slice_singletask(series_uids, indir, outdir,
# src_pattern, dst_pattern)
# this for run
import multiprocessing
from multiprocessing import Process
multiprocessing.freeze_support()
pool = multiprocessing.Pool()
results = []
print(len(series_uids))
for i in range(process_num):
sub_series_uids = series_uids[num_per_process*i:min(num_per_process*(i+1), len(series_uids))]
print(len(sub_series_uids))
result = pool.apply_async(DatasetsUtils.pairs_split_3d_to_2d_slice_singletask,
args=(sub_series_uids, indir, outdir,
src_pattern, dst_pattern))
results.append(result)
pool.close()
pool.join()
@staticmethod
def sitk_resample_to_image(image, reference_image, interpolator, default_value=0., transform=None,
output_pixel_type=None):
if transform is None:
transform = sitk.Transform()
transform.SetIdentity()
if output_pixel_type is None:
output_pixel_type = image.GetPixelID()
resample_filter = sitk.ResampleImageFilter()
resample_filter.SetInterpolator(interpolator)
resample_filter.SetTransform(transform)
resample_filter.SetOutputPixelType(output_pixel_type)
resample_filter.SetDefaultPixelValue(default_value)
resample_filter.SetReferenceImage(reference_image)
return resample_filter.Execute(image)
@staticmethod
def calculate_origin_offset(new_spacing, old_spacing):
return np.subtract(new_spacing, old_spacing)/2
@staticmethod
def sitk_new_blank_image(size, spacing, direction, origin, default_value=0.):
image = sitk.GetImageFromArray(np.ones(size, dtype=np.float).T * default_value)
image.SetSpacing(spacing)
image.SetDirection(direction)
image.SetOrigin(origin)
return image
@staticmethod
def sitk_resample_to_spacing(image, new_spacing=(1.0, 1.0, 1.0), interpolator=sitk.sitkLinear, default_value=0.):
zoom_factor = np.divide(image.GetSpacing(), new_spacing)
new_size = np.asarray(np.ceil(np.round(np.multiply(zoom_factor, image.GetSize()), decimals=5)), dtype=np.int16)
offset = DatasetsUtils.calculate_origin_offset(new_spacing, image.GetSpacing())
reference_image = DatasetsUtils.sitk_new_blank_image(size=new_size, spacing=new_spacing, direction=image.GetDirection(),
origin=image.GetOrigin() + offset, default_value=default_value)
return DatasetsUtils.sitk_resample_to_image(image, reference_image, interpolator=interpolator, default_value=default_value)
@staticmethod
def resample_unified_spacing_x(image, interpolator=sitk.sitkLinear, default_value=0.):
'''
将图像重采样到指定的分辨率,这里以x方向的分辨率作为基准
'''
spacing = image.GetSpacing()
new_spacing = [0.0 for i in range(3)]
x_spac = spacing[0]
new_image = DatasetsUtils.sitk_resample_to_spacing(image, [x_spac, x_spac, x_spac], interpolator, default_value)
return new_image
@staticmethod
def resample_unified_spacing_x_default_min(image, interpolator=sitk.sitkLinear):
'''
将图像重采样到指定的分辨率,这里以x方向的分辨率作为基准, 空白区域默认为最小值
'''
min_value = sitk.GetArrayFromImage(image).min()
new_image = DatasetsUtils.resample_unified_spacing_x(image, interpolator, float(min_value))
return new_image
def test_resample_image_mask_unsame_resolution_multiprocess():
'''
我就是个分割线,下面的是心脏腔室的处理
'''
image_root = '/data/medical/cardiac/chamber/seg/chamber_seg/images'
mask_root = '/data/medical/cardiac/chamber/seg/chamber_seg/masks'
# dst_image_root = '/data/medical/cardiac/chamber/seg/chamber_seg_resampled_unified/images'
# dst_mask_root = '/data/medical/cardiac/chamber/seg/chamber_seg_resampled_unified/masks'
# dst_size = [128, 128, 128]
dst_image_root = '/data/medical/cardiac/chamber/seg/chamber_seg_resampled_unified_256/images'
dst_mask_root = '/data/medical/cardiac/chamber/seg/chamber_seg_resampled_unified_256/masks'
dst_size = [256, 256, 256]
image_postfix = '.nii.gz'
mask_postfix = '.nii.gz'
process_num=12
is_dcm = False
DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess(
image_root, mask_root,
dst_image_root, dst_mask_root, dst_size,
image_postfix, mask_postfix, process_num, is_dcm)
'''
我就是个分割线,下面的是心包的处理
'''
# image_root = '/data/medical/cardiac/seg/heart_hub/images'
# mask_root = '/data/medical/cardiac/seg/heart_hub/renamed_masks'
# dst_image_root = '/data/medical/cardiac/seg/heart_hub/resampled_unified_128/images'
# dst_mask_root = '/data/medical/cardiac/seg/heart_hub/resampled_unified_128/masks'
# dst_size = [128, 128, 128]
# image_postfix = ''
# mask_postfix = '.mha'
# process_num=12
# is_dcm = True
# DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess(
# image_root, mask_root,
# dst_image_root, dst_mask_root, dst_size,
# image_postfix, mask_postfix, process_num, is_dcm)
def test_restore_ori_image_from_resampled_image():
'''
我就是个分割线,下面的是: 利用小分辨率的(等分辨率)模型将心脏进行分割,
并将风格后的心脏mask恢复到原图一样的大小
'''
mask_file = '/data/medical/cardiac/chamber/seg/chamber_seg_resampled_unified/masks/1.3.12.2.1107.5.1.4.60320.30000015020300202700000017926.nii.gz'
ref_image_file = '/data/medical/cardiac/chamber/seg/chamber_seg/images/1.3.12.2.1107.5.1.4.60320.30000015020300202700000017926.nii.gz'
ref_mask_file = '/data/medical/cardiac/chamber/seg/chamber_seg/masks/1.3.12.2.1107.5.1.4.60320.30000015020300202700000017926.nii.gz'
resample_mask = sitk.ReadImage(mask_file)
ori_ref_image = sitk.ReadImage(ref_image_file)
ori_ref_mask = sitk.ReadImage(ref_mask_file)
# resample_mask = DatasetsUtils.resample_image_unsame_resolution(ori_ref_mask, [128, 128, 128])
restored_mask = DatasetsUtils.restore_ori_image_from_resampled_image(resample_mask, ori_ref_image)
tmp_out_dir = './tmp_out'
os.makedirs(tmp_out_dir, exist_ok=True)
out_restored_mask_file = os.path.join(tmp_out_dir, 'restored_mask.nii.gz')
out_ref_image_file = os.path.join(tmp_out_dir, 'ref_image.nii.gz')
out_ref_mask_file = os.path.join(tmp_out_dir, 'ref_mask.nii.gz')
sitk.WriteImage(restored_mask, out_restored_mask_file)
sitk.WriteImage(ori_ref_image, out_ref_image_file)
sitk.WriteImage(ori_ref_mask, out_ref_mask_file)
def test_cut_image_into_blocks_by_sliding_window():
beg = time.time()
infile = '/data/medical/lung/changzheng/airway/airway_20201030/pred_masks/1.2.840.113704.1.111.10192.1571886399.11/coarse_lung/cropped_image.nii.gz'
image = sitk.ReadImage(infile)
image_arr = sitk.GetArrayFromImage(image)
crop_size = [128, 128, 128]
cropped_arrs, d_cnt, h_cnt, w_cnt = DatasetsUtils.cut_image_into_blocks_by_sliding_window(image_arr, crop_size)
ori_size = list(image.GetSize())[::-1]
composed_arr = DatasetsUtils.compose_blocks_cutted_by_sliding_window_into_image(cropped_arrs, [d_cnt, h_cnt, w_cnt], crop_size, ori_size)
composed_image = sitk.GetImageFromArray(composed_arr)
composed_image.CopyInformation(image)
out_dir = './tmp_out'
os.makedirs(out_dir, exist_ok=True)
outfile = os.path.join(out_dir, 'test_cut_image_into_blocks_by_sliding_window.nii.gz')
sitk.WriteImage(composed_image, outfile)
end = time.time()
print('====> test_cut_image_into_blocks_by_sliding_window time elapsed:\t{:.3f}s'.format(end-beg))
def test_extend_image_mask_boundary_for_seg():
beg = time.time()
image_file = '/data/medical/lung/changzheng/airway/airway_20201030/paires_croped_by_coarse_lung_seg/images/1.2.840.113704.1.111.10192.1571886399.11.nii.gz'
mask_file = '/data/medical/lung/changzheng/airway/airway_20201030/paires_croped_by_coarse_lung_seg/masks/1.2.840.113704.1.111.10192.1571886399.11.nii.gz'
out_dir = './tmp_out/test_extend_image_mask_boundary_for_seg'
os.makedirs(out_dir, exist_ok=True)
sitk_image = sitk.ReadImage(image_file)
image_arr = sitk.GetArrayFromImage(sitk_image)
sitk_mask = sitk.ReadImage(mask_file)
mask_arr = sitk.GetArrayFromImage(sitk_mask)
for i in tqdm(range(10)):
out_image_file = os.path.join(out_dir, 'image_{}.nii.gz'.format(i))
out_mask_file = os.path.join(out_dir, 'mask_{}.nii.gz'.format(i))
dst_size = [128, 128, 128]
padding = [np.random.randint(0,5), np.random.randint(0,5), np.random.randint(0,5)]
crop_size = []
for i in range(3):
crop_size.append(dst_size[i] - padding[i])
# 随机取数据
cropped_boundary = [0,0,0, image_arr.shape[0]-1, image_arr.shape[1]-1, image_arr.shape[2]-1]
boundary = DatasetsUtils.get_random_crop_boundary_3d(crop_size, cropped_boundary)
Z_min, Y_min, X_min, Z_max, Y_max, X_max = boundary
cropped_image = image_arr[Z_min:Z_max, Y_min:Y_max, X_min:X_max]
cropped_mask = mask_arr[Z_min:Z_max, Y_min:Y_max, X_min:X_max]
cropped_image, cropped_mask = DatasetsUtils.extend_image_mask_boundary_for_seg(cropped_image, cropped_mask, dst_size)
out_sitk_image = sitk.GetImageFromArray(cropped_image)
out_sitk_mask = sitk.GetImageFromArray(cropped_mask)
sitk.WriteImage(out_sitk_image, out_image_file)
sitk.WriteImage(out_sitk_mask, out_mask_file)
end = time.time()
print('====> test_extend_image_mask_boundary_for_seg time elapsed:\t{:.3f}s'.format(end-beg))
def test_crop_image_mask_with_padding():
beg = time.time()
image_file = '/data/medical/lung/changzheng/airway/airway_20201030/paires_croped_by_coarse_lung_seg/images/1.2.840.113704.1.111.10192.1571886399.11.nii.gz'
mask_file = '/data/medical/lung/changzheng/airway/airway_20201030/paires_croped_by_coarse_lung_seg/masks/1.2.840.113704.1.111.10192.1571886399.11.nii.gz'
out_dir = './tmp_out/test_crop_image_mask_with_padding'
os.makedirs(out_dir, exist_ok=True)
sitk_image = sitk.ReadImage(image_file)
image_arr = sitk.GetArrayFromImage(sitk_image)
sitk_mask = sitk.ReadImage(mask_file)
mask_arr = sitk.GetArrayFromImage(sitk_mask)
# 1. 验证是否出错
dst_size = [128, 128, 128]
# for i in tqdm(range(200)):
# DatasetsUtils.crop_image_mask_with_padding(image_arr, mask_arr, dst_size, boundary_value=0)
# 2. check 随机抽样
# for i in tqdm(range(2000)):
# dst_size = [np.random.randint(100, 300), np.random.randint(100, 300), np.random.randint(100, 300)]
# cropped_image, cropped_mask = DatasetsUtils.crop_image_mask_with_padding(image_arr, mask_arr, dst_size, boundary_value=0)
# assert list(cropped_image.shape) == dst_size
# 3. 保存查看增强数据
for i in tqdm(range(10)):
out_image_file = os.path.join(out_dir, 'image_{}.nii.gz'.format(i))
out_mask_file = os.path.join(out_dir, 'mask_{}.nii.gz'.format(i))
dst_size = [np.random.randint(100, 300), np.random.randint(100, 300), np.random.randint(100, 300)]
cropped_image, cropped_mask = DatasetsUtils.crop_image_mask_with_padding(image_arr, mask_arr, dst_size, boundary_value=0)
out_sitk_image = sitk.GetImageFromArray(cropped_image)
out_sitk_mask = sitk.GetImageFromArray(cropped_mask)
sitk.WriteImage(out_sitk_image, out_image_file)
sitk.WriteImage(out_sitk_mask, out_mask_file)
end = time.time()
print('====> test_crop_image_mask_with_padding time elapsed:\t{:.3f}s'.format(end-beg))
def test_pairs_split_3d_to_2d_slice_multiprocess():
# 华东COPD
indir = '/data/medical/lung/copd/copd_412/images/out_pairs'
outdir = '/data/medical/hospital/huadong/copd/copd_gan/data_412/images/slice'
src_pattern = 'image_raw.nii.gz'
dst_pattern = 'substraction.nii.gz'
DatasetsUtils.pairs_split_3d_to_2d_slice_multiprocess(indir, outdir, src_pattern, dst_pattern)
def test_sitk_resample_to_spacing():
infile = '/data/medical/hospital/huadong/copd/copd_gan/data_412/images/inference_result_final_postprocessed_subtract/1.3.12.2.1107.5.1.4.73793.30000017062300142044800033167/inhale_lung.nii.gz'
out_dir = '/data/medical/tmp'
os.makedirs(out_dir, exist_ok=True)
out_file = os.path.join(out_dir, 'tmp.nii.gz')
image = sitk.ReadImage(infile)
# 重采样,分辨率为1mm
new_image = DatasetsUtils.sitk_resample_to_spacing(image)
sitk.WriteImage(new_image, out_file)
# 重采样,分辨率为x方向的分辨率
new_image = DatasetsUtils.resample_unified_spacing_x(image)
sitk.WriteImage(new_image, out_file)
# 重采样,分辨率为x方向的分辨率,空白区域的默认值为原始图像的最小值
new_image = DatasetsUtils.resample_unified_spacing_x_default_min(image)
sitk.WriteImage(new_image, out_file)
if __name__ == '__main__':
test_resample_image_mask_unsame_resolution_multiprocess()
# test_restore_ori_image_from_resampled_image()
# test_cut_image_into_blocks_by_sliding_window()
# test_extend_image_mask_boundary_for_seg()
# test_crop_image_mask_with_padding()
# test_pairs_split_3d_to_2d_slice_multiprocess()
# test_sitk_resample_to_spacing()
```
#### File: MedCommon/utils/detection_utils.py
```python
import os
import sys
import numpy as np
import torch
class DETECTION_UTILS:
def __init__(self) -> None:
pass
@staticmethod
def calc_brick_volume(bricks):
'''
bricks: [N, x0, y0, z0, x1, y1, z1]
'''
delta_bricks = bricks[:, 3:] - bricks[:,:3]
# assert np.all(delta_bricks > 0)
delta_bricks.clip(min = 0)
volumes = delta_bricks[:,0] * delta_bricks[:,1] * delta_bricks[:,2]
return volumes
@staticmethod
def calc_brick_iou(bricks1, bricks2):
'''
boxes: [N, x0,y0,z0,x1,y1,z1]
'''
v1 = DETECTION_UTILS.calc_brick_volume(bricks1)
v2 = DETECTION_UTILS.calc_brick_volume(bricks2)
pt_min = np.maximum(bricks1[:, :3], bricks2[:, :3])
pt_max = np.minimum(bricks1[:, 3:], bricks2[:, 3:])
whd = (pt_max - pt_min).clip(min=0)
inter = whd[:, 0] * whd[:, 1] * whd[:, 2]
union = v1 + v2 - inter
iou = inter / union
return iou, union
@staticmethod
def point_coordinate_resampled(in_shape, out_shape, in_coord):
'''
'''
out_coord = [out_shape[i]/in_shape[i]*in_coord[i] for i in range(3)]
return out_coord
@staticmethod
def point_coordinate_resampled_normalized(in_shape, out_shape, in_coord):
'''
'''
out_coord = [1/in_shape[i]*in_coord[i] for i in range(3)]
return out_coord
@staticmethod
def restore_normalized_coordinate(in_normalized_coord, in_shape):
out_coord = [in_normalized_coord[i] * in_shape[i] for i in range(3)]
return out_coord
@staticmethod
def generate_test_bricks(n):
bricks = np.zeros([n,6])
for i in range(bricks.shape[0]):
bricks[i, :3] = np.random.randint(0,4,[3])
delta_edge = np.random.randint(4,7)
bricks[i, 3:] = bricks[i, :3] + delta_edge
return bricks
class PYTORCH_TENSOR_DETECTION_UTILS:
def __init__(self) -> None:
pass
@staticmethod
def calc_brick_volume(bricks):
'''
bricks: [N, x0, y0, z0, x1, y1, z1]
'''
delta_bricks = bricks[:, 3:] - bricks[:,:3]
# assert torch.all(delta_bricks > 0)
delta_bricks.clip(min = 0)
volumes = delta_bricks[:,0] * delta_bricks[:,1] * delta_bricks[:,2]
return volumes
@staticmethod
def calc_brick_iou(bricks1, bricks2):
'''
boxes: [N, x0,y0,z0,x1,y1,z1]
'''
v1 = PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_volume(bricks1)
v2 = PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_volume(bricks2)
pt_min = torch.max(bricks1[:, :3], bricks2[:, :3])
pt_max = torch.min(bricks1[:, 3:], bricks2[:, 3:])
whd = (pt_max - pt_min).clip(min=0)
inter = whd[:, 0] * whd[:, 1] * whd[:, 2]
union = v1 + v2 - inter
iou = inter / union
return iou, union
def test_brick_volume():
bricks = DETECTION_UTILS.generate_test_bricks(6)
print('====> begin to test DETECTION_UTILS.calc_brick_volume')
volumes = DETECTION_UTILS.calc_brick_volume(bricks)
for i in range(bricks.shape[0]):
print('bricks:\t', bricks[i], '\tvolume:\t{}'.format(volumes[i]))
print('====> end to test DETECTION_UTILS.calc_brick_volume')
print('====> begin to test PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_volume')
bricks = torch.from_numpy(bricks)
volumes = PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_volume(bricks)
for i in range(bricks.shape[0]):
print('bricks:\t', bricks[i], '\tvolume:\t{}'.format(volumes[i]))
print('====> end to test PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_volume')
def test_calc_brick_iou():
bricks1 = DETECTION_UTILS.generate_test_bricks(6)
bricks2 = DETECTION_UTILS.generate_test_bricks(6)
print('====>begin to test DETECTION_UTILS.calc_brick_iou')
ious, unions = DETECTION_UTILS.calc_brick_iou(bricks1, bricks2)
for i in range(bricks1.shape[0]):
print('bricks:\t', bricks1[i], '\tiou:\t{}'.format(ious[i]))
print('====>end to test DETECTION_UTILS.calc_brick_iou')
print('====>begin to test PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_iou')
bricks1 = torch.from_numpy(bricks1)
bricks2 = torch.from_numpy(bricks2)
ious, unions = PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_iou(bricks1, bricks2)
for i in range(bricks1.shape[0]):
print('bricks:\t', bricks1[i], '\tiou:\t{}'.format(ious[i]))
print('====>end to test PYTORCH_TENSOR_DETECTION_UTILS.calc_brick_iou')
def test_point_coordinate_resampled():
inshape = [512, 512, 243]
outshape = [128, 128, 128]
in_pt = [18, 23, 24]
out_pt = DETECTION_UTILS.point_coordinate_resampled(inshape, outshape, in_pt)
print('test_point_coordinate_resampled')
if __name__ == '__main__':
# test_brick_volume()
# test_calc_brick_iou()
test_point_coordinate_resampled()
```
#### File: MedCommon/utils/image_show_utils.py
```python
import os
import sys
import numpy as np
# import cv2
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(ROOT)
from utils.datasets_utils import DatasetsUtils
import SimpleITK as sitk
class ImageShowUtils:
def __init__(self):
pass
@staticmethod
def save_img(in_arr, out_file, ww=150, wl=50, lut=None):
min_v = wl-ww//2
max_v = wl+ww//2
out_arr = np.clip(in_arr, min_v, max_v)
out_arr = (out_arr-min_v)/ww
if lut:
out_arr = lut(out_arr)*255
cv2.imwrite(out_file, cv2.cvtColor(np.array(out_arr, dtype=np.uint8), cv2.COLOR_RGB2BGR))
else:
out_arr = out_arr*255
cv2.imwrite(out_file, out_arr)
@staticmethod
def save_volume_to_jpg(in_arr, out_root, ww, wl, axis=0, file_prefix=None, reverse=False, lut_name=None):
'''
in_arr = sitk.GetArrayFromImage(sitk_image)
h-f: axis=0
a-p: axis=1
l-r: axis=2
'''
os.makedirs(out_root, exist_ok=True)
n = in_arr.shape[axis]
lut = None
if lut_name:
import matplotlib.pyplot as plt
lut = plt.get_cmap(lut_name)
for i in range(n):
if file_prefix:
file_name = '{}_{}.jpg'.format(file_prefix, i)
else:
file_name = '{}.jpg'.format(i)
sub_file_name = os.path.join(out_root, file_name)
if axis == 0:
tmp_arr = in_arr[i,:,:]
elif axis == 1:
tmp_arr = in_arr[:,i,:]
else:
tmp_arr = in_arr[:,:,i]
if reverse:
tmp_arr = tmp_arr[::-1,:]
ImageShowUtils.save_img(tmp_arr, sub_file_name, ww, wl, lut)
@staticmethod
def save_volume_to_mpr_jpg(in_image, out_dir, ww=150, wl=50, prefix='xxx'):
resampled_img = DatasetsUtils.resample_unified_spacing_x_default_min(in_image)
arr = sitk.GetArrayFromImage(resampled_img)
[z,y,x] = arr.shape
z_plane = arr[z//2, :, :]
y_plane = arr[:, y//2, :]
x_plane = arr[:,:,x//2]
os.makedirs(out_dir, exist_ok=True)
z_file = os.path.join(out_dir, '{}_z.jpg'.format(prefix))
y_file = os.path.join(out_dir, '{}_y.jpg'.format(prefix))
x_file = os.path.join(out_dir, '{}_x.jpg'.format(prefix))
cv2.imwrite(z_file, z_plane)
cv2.imwrite(y_file, y_plane)
cv2.imwrite(x_file, x_plane)
def test_save_volume_to_jpg():
print('todo')
def test_save_volume_to_mpr_jpg():
infile = '/data/medical/brain/gan/cta2dwi_history_pos/5.train_batch/1014186/fixed_cta.nii.gz'
image = sitk.ReadImage(infile)
ImageShowUtils.save_volume_to_mpr_jpg(image, '/data/medical/tmp/mpr')
if __name__ == '__main__':
# test_save_volume_to_jpg()
test_save_volume_to_mpr_jpg()
```
#### File: MedCommon/utils/lr_adjust_utils.py
```python
import os
import sys
import math
COMMON_ROOT = os.path.join(os.path.dirname(__file__), os.path.pardir)
sys.path.append(COMMON_ROOT)
class LR_ADJUST_UTILS:
def __init__(self) -> None:
pass
@staticmethod
def adjust_learning_rate(optimizer, epoch, lr, cos, epochs, schedule=None):
"""Decay the learning rate based on schedule"""
lr = lr
if cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / epochs))
else: # stepwise lr schedule
for milestone in schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
``` |
{
"source": "jianli-gu/advanced-python-topics",
"score": 3
} |
#### File: jianli-gu/advanced-python-topics/coroutine_functions.py
```python
import asyncio
async def task():
print("Hello")
await asyncio.sleep(2)
print("World")
asyncio.run(task())
``` |
{
"source": "jianli-gu/aws-batch-cdk-python",
"score": 2
} |
#### File: aws-batch-cdk-python/batch/stack.py
```python
from aws_cdk import core
from aws_cdk import aws_batch as batch
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from aws_cdk import aws_secretsmanager as secretsmanager
STACK_PREFIX = "solar"
# Network
VPC_ID = STACK_PREFIX + "-vpc-id"
SECURITY_GROUP_ID = STACK_PREFIX + "-sg-id"
SECURITY_GROUP_NAME = STACK_PREFIX + "-sg"
# Roles
BATCH_SERVICE_ROLE_ID = STACK_PREFIX + "-batch-service-role-id"
BATCH_SERVICE_ROLE_NAME = STACK_PREFIX + "-batch-service-role"
SPOT_FLEET_ROLE_ID = STACK_PREFIX + "-spot-fleet-role-id"
SPOT_FLEET_ROLE_NAME = STACK_PREFIX + "-spot-fleet-role"
BATCH_INSTANCE_ROLE_ID = STACK_PREFIX + "-batch-instance-role-id"
BATCH_INSTANCE_ROLE_NAME = STACK_PREFIX + "-batch-instance-role"
INSTANCE_PROFILE_ID = STACK_PREFIX + "-instance-profile-id"
# Compute Environment
COMPUTE_TYPE = "SPOT"
COMPUTE_ENVIRONMENT_ID = STACK_PREFIX + "-" + COMPUTE_TYPE.lower() + "-compute-environment-id"
COMPUTE_ENVIRONMENT_NAME = STACK_PREFIX + "-" + COMPUTE_TYPE.lower() + "-compute-environment"
# Job Queue
JOB_QUEUE_ID = STACK_PREFIX + "-job-queue-id"
JOB_QUEUE_NAME = STACK_PREFIX + "-job-queue"
# Job Definition
JOB_DEFINITION_ID = STACK_PREFIX + "-job-definition-id"
JOB_DEFINITION_NAME = STACK_PREFIX + "-job-definition"
COMPUTE_MIN_VCPUS = 0
COMPUTE_MAX_VCPUS = 4
COMPUTE_DESIRED_VCPUS = 0
COMPUTE_INSTANCE_TYPES = ["optimal"]
BID_PERCENTAGE = 90
CONTAINER_IMAGE = "AWS-ACCOUNT-ECR/processor"
CONTAINER_VCPUS = 1
CONTAINER_MEMORY = 1024
# Secret
SECRET_NAME = "dev/solar/app"
class AWSBatchStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# VPC & Security Group
vpc = ec2.Vpc(scope=self, id=VPC_ID, max_azs=3)
sg = ec2.SecurityGroup(self, SECURITY_GROUP_ID,
vpc=vpc,
security_group_name=SECURITY_GROUP_NAME
)
# IAM Roles and Permissions
batch_service_role = iam.Role(self, BATCH_SERVICE_ROLE_ID,
role_name=BATCH_SERVICE_ROLE_NAME,
assumed_by=iam.ServicePrincipal("batch.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSBatchServiceRole")
]
)
spot_fleet_role = iam.Role(self, SPOT_FLEET_ROLE_ID,
role_name=SPOT_FLEET_ROLE_NAME,
assumed_by=iam.ServicePrincipal("spotfleet.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2SpotFleetTaggingRole")
]
)
batch_instance_role = iam.Role(self, BATCH_INSTANCE_ROLE_ID,
role_name=BATCH_INSTANCE_ROLE_NAME,
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("ec2.amazonaws.com"),
iam.ServicePrincipal("ecs.amazonaws.com")
),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess"),
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceforEC2Role")
]
)
instance_profile = iam.CfnInstanceProfile(self, INSTANCE_PROFILE_ID,
instance_profile_name=batch_instance_role.role_name,
roles=[batch_instance_role.role_name]
)
# Compute Environment
compute_environment = batch.CfnComputeEnvironment(self, COMPUTE_ENVIRONMENT_ID,
compute_environment_name=COMPUTE_ENVIRONMENT_NAME,
type="MANAGED",
service_role=batch_service_role.role_arn,
compute_resources={
"type": COMPUTE_TYPE,
"maxvCpus": COMPUTE_MAX_VCPUS,
"minvCpus": COMPUTE_MIN_VCPUS,
"desiredvCpus": COMPUTE_DESIRED_VCPUS,
"bidPercentage": BID_PERCENTAGE,
"spotIamFleetRole": spot_fleet_role.role_arn,
"instanceTypes": COMPUTE_INSTANCE_TYPES,
"instanceRole": batch_instance_role.role_name,
"subnets": [subnet.subnet_id for subnet in vpc.public_subnets],
"securityGroupIds": [sg.security_group_id]
}
)
compute_environment.add_depends_on(instance_profile)
# Job Queue
job_queue = batch.CfnJobQueue(self, JOB_QUEUE_ID,
job_queue_name=JOB_QUEUE_NAME,
priority=1,
compute_environment_order=[
{
"order": 1,
"computeEnvironment": compute_environment.compute_environment_name
}
]
)
job_queue.add_depends_on(compute_environment)
# Job Definition
job_definition = batch.CfnJobDefinition(self, JOB_DEFINITION_ID,
job_definition_name=JOB_DEFINITION_NAME,
type="container",
retry_strategy={
"Attemps": 1
},
timeout={
"AttemptDurationSeconds": 60
},
container_properties={
"image": CONTAINER_IMAGE,
"vcpus": CONTAINER_VCPUS,
"memory": CONTAINER_MEMORY,
"environment": [
{
"name": STACK_PREFIX + "_POSTGRES_DB",
"value": "{{resolve:secretsmanager:" + SECRET_NAME + ":SecretString:POSTGRES_DB}}"
},
{
"name": STACK_PREFIX + "_POSTGRES_USER",
"value": "{{resolve:secretsmanager:" + SECRET_NAME + ":SecretString:POSTGRES_USER}}"
}
]
}
)
```
#### File: aws-batch-cdk-python/scripts/submit_jobs.py
```python
import boto3
def submit_jobs():
"""Submit jobs via boto3"""
client = boto3.client("batch")
for i, name in enumerate(["file1.csv", "file2.csv", "file3.csv"]):
client.submit_job(
jobName=f"my-job-{i+1}",
jobQueue="solar-job-queue",
jobDefinition="solar-job-definition",
containerOverrides={
"command": ["python", "process_data.py", "--s3-key", name]
}
)
if __name__ == "__main__":
submit_jobs()
``` |
{
"source": "jianligu/django-celery-boilerplate",
"score": 2
} |
#### File: django-celery-boilerplate/taskapp/views.py
```python
import random
import time
from django.http import JsonResponse
from django.shortcuts import render
from celery.decorators import task
from .tasks import long_computing
def index(request):
"""Home page"""
return render(request, template_name='index.html')
def run_short_computing(request):
"""Short computing task"""
# Run computing
t0 = time.time()
time.sleep(random.random())
result = random.randint(1, 100)
elapsed_time = time.time() - t0
# Return response
data = {
'result': result,
'elasped_time': round(elapsed_time, 3)
}
return JsonResponse(data)
def run_long_computing(request):
"""Long computing task"""
# Run computing
result, elapsed_time = long_computing()
# Return response
data = {
'result': result,
'elasped_time': round(elapsed_time, 3)
}
return JsonResponse(data)
def run_asynchronous_computing(request):
"""Asynchronous long computing task"""
# Run computing asynchronously
t0 = time.time()
async_result = long_computing.delay()
elapsed_time = round(time.time() - t0, 3)
# Return response
data = {
'result': async_result.id,
'elasped_time': elapsed_time
}
return JsonResponse(data)
``` |
{
"source": "jianlingzhong/xonsh",
"score": 2
} |
#### File: xonsh/tests/test_history.py
```python
import io
import os
import sys
import shlex
from xonsh.lazyjson import LazyJSON
from xonsh.history import History, _hist_create_parser, _hist_parse_args
from xonsh import history
import pytest
@pytest.yield_fixture
def hist():
h = History(filename='xonsh-HISTORY-TEST.json', here='yup', sessionid='SESSIONID', gc=False)
yield h
os.remove(h.filename)
def test_hist_init(hist):
"""Test initialization of the shell history."""
with LazyJSON(hist.filename) as lj:
obs = lj['here']
assert 'yup' == obs
def test_hist_append(hist, xonsh_builtins):
"""Verify appending to the history works."""
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set()
hf = hist.append({'joco': 'still alive'})
assert hf is None
assert 'still alive' == hist.buffer[0]['joco']
def test_hist_flush(hist, xonsh_builtins):
"""Verify explicit flushing of the history works."""
hf = hist.flush()
assert hf is None
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set()
hist.append({'joco': 'still alive'})
hf = hist.flush()
assert hf is not None
while hf.is_alive():
pass
with LazyJSON(hist.filename) as lj:
obs = lj['cmds'][0]['joco']
assert 'still alive' == obs
def test_cmd_field(hist, xonsh_builtins):
# in-memory
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set()
hf = hist.append({'rtn': 1})
assert hf is None
assert 1 == hist.rtns[0]
assert 1 == hist.rtns[-1]
assert None == hist.outs[-1]
# slice
assert [1] == hist.rtns[:]
# on disk
hf = hist.flush()
assert hf is not None
assert 1 == hist.rtns[0]
assert 1 == hist.rtns[-1]
assert None == hist.outs[-1]
CMDS = ['ls', 'cat hello kitty', 'abc', 'def', 'touch me', 'grep from me']
@pytest.mark.parametrize('inp, commands, offset', [
('', CMDS, (0, 1)),
('-r', list(reversed(CMDS)), (len(CMDS)- 1, -1)),
('0', CMDS[0:1], (0, 1)),
('1', CMDS[1:2], (1, 1)),
('-2', CMDS[-2:-1], (len(CMDS) -2 , 1)),
('1:3', CMDS[1:3], (1, 1)),
('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', CMDS[1::2], (1, 2)),
('-4:-2', CMDS[-4:-2], (len(CMDS) - 4, 1))
])
def test_show_cmd_numerate(inp, commands, offset, hist, xonsh_builtins, capsys):
"""Verify that CLI history commands work."""
base_idx, step = offset
xonsh_builtins.__xonsh_history__ = hist
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set()
for ts,cmd in enumerate(CMDS): # populate the shell history
hist.append({'inp': cmd, 'rtn': 0, 'ts':(ts+1, ts+1.5)})
exp = ('{}: {}'.format(base_idx + idx * step, cmd)
for idx, cmd in enumerate(list(commands)))
exp = '\n'.join(exp)
history.history_main(['show', '-n'] + shlex.split(inp))
out, err = capsys.readouterr()
assert out.rstrip() == exp
def test_histcontrol(hist, xonsh_builtins):
"""Test HISTCONTROL=ignoredups,ignoreerr"""
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = 'ignoredups,ignoreerr'
assert len(hist.buffer) == 0
# An error, buffer remains empty
hist.append({'inp': 'ls foo', 'rtn': 2})
assert len(hist.buffer) == 0
# Success
hist.append({'inp': 'ls foobazz', 'rtn': 0})
assert len(hist.buffer) == 1
assert 'ls foobazz' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Error
hist.append({'inp': 'ls foo', 'rtn': 2})
assert len(hist.buffer) == 1
assert 'ls foobazz' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# File now exists, success
hist.append({'inp': 'ls foo', 'rtn': 0})
assert len(hist.buffer) == 2
assert 'ls foo' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Success
hist.append({'inp': 'ls', 'rtn': 0})
assert len(hist.buffer) == 3
assert 'ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Dup
hist.append({'inp': 'ls', 'rtn': 0})
assert len(hist.buffer) == 3
# Success
hist.append({'inp': '/bin/ls', 'rtn': 0})
assert len(hist.buffer) == 4
assert '/bin/ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Error
hist.append({'inp': 'ls bazz', 'rtn': 1})
assert len(hist.buffer) == 4
assert '/bin/ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
# Error
hist.append({'inp': 'ls bazz', 'rtn': -1})
assert len(hist.buffer) == 4
assert '/bin/ls' == hist.buffer[-1]['inp']
assert 0 == hist.buffer[-1]['rtn']
@pytest.mark.parametrize('args', [ '-h', '--help', 'show -h', 'show --help'])
def test_parse_args_help(args, capsys):
with pytest.raises(SystemExit):
args = _hist_parse_args(shlex.split(args))
assert 'show this help message and exit' in capsys.readouterr()[0]
@pytest.mark.parametrize('args, exp', [
('', ('show', 'session', [], False, False)),
('1:5', ('show', 'session', ['1:5'], False, False)),
('show', ('show', 'session', [], False, False)),
('show 15', ('show', 'session', ['15'], False, False)),
('show bash 3:5 15:66', ('show', 'bash', ['3:5', '15:66'], False, False)),
('show -r', ('show', 'session', [], False, True)),
('show -rn bash', ('show', 'bash', [], True, True)),
('show -n -r -30:20', ('show', 'session', ['-30:20'], True, True)),
('show -n zsh 1:2:3', ('show', 'zsh', ['1:2:3'], True, False))
])
def test_parser_show(args, exp):
# use dict instead of argparse.Namespace for pretty pytest diff
exp_ns = {'action': exp[0],
'session': exp[1],
'slices': exp[2],
'numerate': exp[3],
'reverse': exp[4],
'start_time': None,
'end_time': None,
'datetime_format': None,
'timestamp': False}
ns = _hist_parse_args(shlex.split(args))
assert ns.__dict__ == exp_ns
@pytest.mark.parametrize('index, exp', [
(-1, 'grep from me'),
('hello', 'cat hello kitty'),
((-1, -1), 'me'),
(('hello', 0), 'cat'),
((-1, slice(0,2)), 'grep from'),
(('kitty', slice(1,3)), 'hello kitty')
])
def test_history_getitem(index, exp, hist, xonsh_builtins):
xonsh_builtins.__xonsh_env__['HISTCONTROL'] = set()
for ts,cmd in enumerate(CMDS): # populate the shell history
hist.append({'inp': cmd, 'rtn': 0, 'ts':(ts+1, ts+1.5)})
assert hist[index] == exp
```
#### File: xonsh/prompt/base.py
```python
import builtins
import itertools
import os
import re
import socket
import string
import sys
import xonsh.lazyasd as xl
import xonsh.tools as xt
import xonsh.platform as xp
from xonsh.prompt.cwd import (
_collapsed_pwd, _replace_home_cwd, _dynamically_collapsed_pwd
)
from xonsh.prompt.job import _current_job
from xonsh.prompt.env import (env_name, vte_new_tab_cwd)
from xonsh.prompt.vc_branch import (
current_branch, branch_color, branch_bg_color
)
from xonsh.prompt.gitstatus import gitstatus_prompt
@xl.lazyobject
def FORMATTER_DICT():
return dict(
user=os.environ.get('USERNAME' if xp.ON_WINDOWS else 'USER', '<user>'),
prompt_end='#' if xt.is_superuser() else '$',
hostname=socket.gethostname().split('.', 1)[0],
cwd=_dynamically_collapsed_pwd,
cwd_dir=lambda: os.path.dirname(_replace_home_cwd()),
cwd_base=lambda: os.path.basename(_replace_home_cwd()),
short_cwd=_collapsed_pwd,
curr_branch=current_branch,
branch_color=branch_color,
branch_bg_color=branch_bg_color,
current_job=_current_job,
env_name=env_name,
vte_new_tab_cwd=vte_new_tab_cwd,
gitstatus=gitstatus_prompt,
)
@xl.lazyobject
def _FORMATTER():
return string.Formatter()
def default_prompt():
"""Creates a new instance of the default prompt."""
if xp.ON_CYGWIN:
dp = ('{env_name:{} }{BOLD_GREEN}{user}@{hostname}'
'{BOLD_BLUE} {cwd} {prompt_end}{NO_COLOR} ')
elif xp.ON_WINDOWS:
dp = ('{env_name:{} }'
'{BOLD_INTENSE_GREEN}{user}@{hostname}{BOLD_INTENSE_CYAN} '
'{cwd}{branch_color}{curr_branch: {}}{NO_COLOR} '
'{BOLD_INTENSE_CYAN}{prompt_end}{NO_COLOR} ')
else:
dp = ('{env_name:{} }'
'{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} '
'{cwd}{branch_color}{curr_branch: {}}{NO_COLOR} '
'{BOLD_BLUE}{prompt_end}{NO_COLOR} ')
return dp
@xt.lazyobject
def DEFAULT_PROMPT():
return default_prompt()
def _get_fmtter(formatter_dict=None):
if formatter_dict is None:
fmtter = builtins.__xonsh_env__.get('FORMATTER_DICT', FORMATTER_DICT)
else:
fmtter = formatter_dict
return fmtter
def _failover_template_format(template):
if callable(template):
try:
# Exceptions raises from function of producing $PROMPT
# in user's xonshrc should not crash xonsh
return template()
except Exception:
xt.print_exception()
return '$ '
return template
def partial_format_prompt(template=DEFAULT_PROMPT, formatter_dict=None):
"""Formats a xonsh prompt template string."""
try:
return _partial_format_prompt_main(template=template,
formatter_dict=formatter_dict)
except Exception:
return _failover_template_format(template)
def _partial_format_prompt_main(template=DEFAULT_PROMPT, formatter_dict=None):
template = template() if callable(template) else template
fmtter = _get_fmtter(formatter_dict)
bopen = '{'
bclose = '}'
colon = ':'
expl = '!'
toks = []
for literal, field, spec, conv in _FORMATTER.parse(template):
toks.append(literal)
if field is None:
continue
elif field.startswith('$'):
val = builtins.__xonsh_env__[field[1:]]
val = _format_value(val, spec, conv)
toks.append(val)
elif field in fmtter:
v = fmtter[field]
try:
val = v() if callable(v) else v
except Exception:
print('prompt: error: on field {!r}'
''.format(field), file=sys.stderr)
xt.print_exception()
toks.append('(ERROR:{})'.format(field))
continue
val = _format_value(val, spec, conv)
toks.append(val)
else:
toks.append(bopen)
toks.append(field)
if conv is not None and len(conv) > 0:
toks.append(expl)
toks.append(conv)
if spec is not None and len(spec) > 0:
toks.append(colon)
toks.append(spec)
toks.append(bclose)
return ''.join(toks)
@xt.lazyobject
def RE_HIDDEN():
return re.compile('\001.*?\002')
def multiline_prompt(curr=''):
"""Returns the filler text for the prompt in multiline scenarios."""
line = curr.rsplit('\n', 1)[1] if '\n' in curr else curr
line = RE_HIDDEN.sub('', line) # gets rid of colors
# most prompts end in whitespace, head is the part before that.
head = line.rstrip()
headlen = len(head)
# tail is the trailing whitespace
tail = line if headlen == 0 else line.rsplit(head[-1], 1)[1]
# now to constuct the actual string
dots = builtins.__xonsh_env__.get('MULTILINE_PROMPT')
dots = dots() if callable(dots) else dots
if dots is None or len(dots) == 0:
return ''
tokstr = xt.format_color(dots, hide=True)
baselen = 0
basetoks = []
for x in tokstr.split('\001'):
pre, sep, post = x.partition('\002')
if len(sep) == 0:
basetoks.append(('', pre))
baselen += len(pre)
else:
basetoks.append(('\001' + pre + '\002', post))
baselen += len(post)
if baselen == 0:
return xt.format_color('{NO_COLOR}' + tail, hide=True)
toks = basetoks * (headlen // baselen)
n = headlen % baselen
count = 0
for tok in basetoks:
slen = len(tok[1])
newcount = slen + count
if slen == 0:
continue
elif newcount <= n:
toks.append(tok)
else:
toks.append((tok[0], tok[1][:n - count]))
count = newcount
if n <= count:
break
toks.append((xt.format_color('{NO_COLOR}', hide=True), tail))
rtn = ''.join(itertools.chain.from_iterable(toks))
return rtn
def is_template_string(template, formatter_dict=None):
"""Returns whether or not the string is a valid template."""
template = template() if callable(template) else template
try:
included_names = set(i[1] for i in _FORMATTER.parse(template))
except ValueError:
return False
included_names.discard(None)
if formatter_dict is None:
fmtter = builtins.__xonsh_env__.get('FORMATTER_DICT', FORMATTER_DICT)
else:
fmtter = formatter_dict
known_names = set(fmtter.keys())
return included_names <= known_names
def _format_value(val, spec, conv):
"""Formats a value from a template string {val!conv:spec}. The spec is
applied as a format string itself, but if the value is None, the result
will be empty. The purpose of this is to allow optional parts in a
prompt string. For example, if the prompt contains '{current_job:{} | }',
and 'current_job' returns 'sleep', the result is 'sleep | ', and if
'current_job' returns None, the result is ''.
"""
if val is None:
return ''
val = _FORMATTER.convert_field(val, conv)
if spec:
val = _FORMATTER.format(spec, val)
if not isinstance(val, str):
val = str(val)
return val
```
#### File: xonsh/prompt/gitstatus.py
```python
import builtins
import collections
import os
import subprocess
import xonsh.lazyasd as xl
GitStatus = collections.namedtuple('GitStatus',
['branch', 'num_ahead', 'num_behind',
'untracked', 'changed', 'conflicts',
'staged', 'stashed', 'operations'])
def _check_output(*args, **kwargs):
kwargs.update(dict(env=builtins.__xonsh_env__.detype(),
stderr=subprocess.DEVNULL,
timeout=builtins.__xonsh_env__['VC_BRANCH_TIMEOUT'],
universal_newlines=True
))
return subprocess.check_output(*args, **kwargs)
@xl.lazyobject
def _DEFS():
DEFS = {
'HASH': ':',
'BRANCH': '{CYAN}',
'OPERATION': '{CYAN}',
'STAGED': '{RED}●',
'CONFLICTS': '{RED}×',
'CHANGED': '{BLUE}+',
'UNTRACKED': '…',
'STASHED': '⚑',
'CLEAN': '{BOLD_GREEN}✓',
'AHEAD': '↑·',
'BEHIND': '↓·',
}
return DEFS
def _get_def(key):
def_ = builtins.__xonsh_env__.get('XONSH_GITSTATUS_' + key)
return def_ if def_ is not None else _DEFS[key]
def _get_tag_or_hash():
tag = _check_output(['git', 'describe', '--exact-match']).strip()
if tag:
return tag
hash_ = _check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
return _get_def('HASH') + hash_
def _get_stash(gitdir):
try:
with open(os.path.join(gitdir, 'logs/refs/stash')) as f:
return sum(1 for _ in f)
except IOError:
return 0
def _gitoperation(gitdir):
files = (
('rebase-merge', 'REBASE'),
('rebase-apply', 'AM/REBASE'),
('MERGE_HEAD', 'MERGING'),
('CHERRY_PICK_HEAD', 'CHERRY-PICKING'),
('REVERT_HEAD', 'REVERTING'),
('BISECT_LOG', 'BISECTING'),
)
return [f[1] for f in files
if os.path.exists(os.path.join(gitdir, f[0]))]
def gitstatus():
"""Return namedtuple with fields:
branch name, number of ahead commit, number of behind commit,
untracked number, changed number, conflicts number,
staged number, stashed number, operation."""
status = _check_output(['git', 'status', '--porcelain', '--branch'])
branch = ''
num_ahead, num_behind = 0, 0
untracked, changed, conflicts, staged = 0, 0, 0, 0
for line in status.splitlines():
if line.startswith('##'):
line = line[2:].strip()
if 'Initial commit on' in line:
branch = line.split()[-1]
elif 'no branch' in line:
branch = _get_tag_or_hash()
elif '...' not in line:
branch = line
else:
branch, rest = line.split('...')
if ' ' in rest:
divergence = rest.split(' ', 1)[-1]
divergence = divergence.strip('[]')
for div in divergence.split(', '):
if 'ahead' in div:
num_ahead = int(div[len('ahead '):].strip())
elif 'behind' in div:
num_behind = int(div[len('behind '):].strip())
elif line.startswith('??'):
untracked += 1
else:
if len(line) > 1 and line[1] == 'M':
changed += 1
if len(line) > 0 and line[0] == 'U':
conflicts += 1
elif len(line) > 0 and line[0] != ' ':
staged += 1
gitdir = _check_output(['git', 'rev-parse', '--git-dir']).strip()
stashed = _get_stash(gitdir)
operations = _gitoperation(gitdir)
return GitStatus(branch, num_ahead, num_behind,
untracked, changed, conflicts, staged, stashed,
operations)
def gitstatus_prompt():
"""Return str `BRANCH|OPERATOR|numbers`"""
try:
s = gitstatus()
except subprocess.SubprocessError:
return None
ret = _get_def('BRANCH') + s.branch
if s.num_ahead > 0:
ret += _get_def('AHEAD') + str(s.num_ahead)
if s.num_behind > 0:
ret += _get_def('BEHIND') + str(s.num_behind)
if s.operations:
ret += _get_def('OPERATION') + '|' + '|'.join(s.operations)
ret += '|'
if s.staged > 0:
ret += _get_def('STAGED') + str(s.staged) + '{NO_COLOR}'
if s.conflicts > 0:
ret += _get_def('CONFLICTS') + str(s.conflicts) + '{NO_COLOR}'
if s.changed > 0:
ret += _get_def('CHANGED') + str(s.changed) + '{NO_COLOR}'
if s.untracked > 0:
ret += _get_def('UNTRACKED') + str(s.untracked) + '{NO_COLOR}'
if s.stashed > 0:
ret += _get_def('STASHED') + str(s.stashed) + '{NO_COLOR}'
if s.staged + s.conflicts + s.changed + s.untracked + s.stashed == 0:
ret += _get_def('CLEAN') + '{NO_COLOR}'
ret += '{NO_COLOR}'
return ret
```
#### File: xonsh/xonsh/readline_shell.py
```python
import os
import sys
import cmd
import time
import select
import builtins
import importlib
import threading
import collections
from xonsh.lazyjson import LazyJSON
from xonsh.lazyasd import LazyObject
from xonsh.base_shell import BaseShell
from xonsh.ansi_colors import ansi_partial_color_format, ansi_color_style_names, ansi_color_style
from xonsh.prompt.base import partial_format_prompt, multiline_prompt
from xonsh.tools import print_exception
from xonsh.platform import ON_WINDOWS, ON_CYGWIN, ON_DARWIN
from xonsh.lazyimps import pygments, pyghooks
terminal256 = LazyObject(
lambda: importlib.import_module('pygments.formatters.terminal256'),
globals(), 'terminal')
readline = None
RL_COMPLETION_SUPPRESS_APPEND = RL_LIB = RL_STATE = None
RL_CAN_RESIZE = False
RL_DONE = None
RL_VARIABLE_VALUE = None
_RL_STATE_DONE = 0x1000000
_RL_STATE_ISEARCH = 0x0000080
_RL_PREV_CASE_SENSITIVE_COMPLETIONS = 'to-be-set'
def setup_readline():
"""Sets up the readline module and completion suppression, if available."""
global RL_COMPLETION_SUPPRESS_APPEND, RL_LIB, RL_CAN_RESIZE, RL_STATE, readline
if RL_COMPLETION_SUPPRESS_APPEND is not None:
return
for _rlmod_name in ('gnureadline', 'readline'):
try:
readline = importlib.import_module(_rlmod_name)
sys.modules['readline'] = readline
except ImportError:
pass
else:
break
if readline is None:
print("""Skipping setup. Because no `readline` implementation available.
Please install a backend (`readline`, `prompt-toolkit`, etc) to use
`xonsh` interactively.
See https://github.com/xonsh/xonsh/issues/1170""")
return
import ctypes
import ctypes.util
uses_libedit = readline.__doc__ and 'libedit' in readline.__doc__
readline.set_completer_delims(' \t\n')
# Cygwin seems to hang indefinitely when querying the readline lib
if (not ON_CYGWIN) and (not readline.__file__.endswith('.py')):
RL_LIB = lib = ctypes.cdll.LoadLibrary(readline.__file__)
try:
RL_COMPLETION_SUPPRESS_APPEND = ctypes.c_int.in_dll(
lib, 'rl_completion_suppress_append')
except ValueError:
# not all versions of readline have this symbol, ie Macs sometimes
RL_COMPLETION_SUPPRESS_APPEND = None
try:
RL_STATE = ctypes.c_int.in_dll(lib, 'rl_readline_state')
except Exception:
pass
RL_CAN_RESIZE = hasattr(lib, 'rl_reset_screen_size')
env = builtins.__xonsh_env__
# reads in history
readline.set_history_length(-1)
ReadlineHistoryAdder()
# sets up IPython-like history matching with up and down
readline.parse_and_bind('"\e[B": history-search-forward')
readline.parse_and_bind('"\e[A": history-search-backward')
# Setup Shift-Tab to indent
readline.parse_and_bind('"\e[Z": "{0}"'.format(env.get('INDENT')))
# handle tab completion differences found in libedit readline compatibility
# as discussed at http://stackoverflow.com/a/7116997
if uses_libedit and ON_DARWIN:
readline.parse_and_bind("bind ^I rl_complete")
print('\n'.join(['', "*" * 78,
"libedit detected - readline will not be well behaved, including but not limited to:",
" * crashes on tab completion",
" * incorrect history navigation",
" * corrupting long-lines",
" * failure to wrap or indent lines properly",
"",
"It is highly recommended that you install gnureadline, which is installable with:",
" pip install gnureadline",
"*" * 78]), file=sys.stderr)
else:
readline.parse_and_bind("tab: complete")
# try to load custom user settings
inputrc_name = os.environ.get('INPUTRC')
if inputrc_name is None:
if uses_libedit:
inputrc_name = '.editrc'
else:
inputrc_name = '.inputrc'
inputrc_name = os.path.join(os.path.expanduser('~'), inputrc_name)
if (not ON_WINDOWS) and (not os.path.isfile(inputrc_name)):
inputrc_name = '/etc/inputrc'
if os.path.isfile(inputrc_name):
try:
readline.read_init_file(inputrc_name)
except Exception:
# this seems to fail with libedit
print_exception('xonsh: could not load readline default init file.')
def teardown_readline():
"""Tears down up the readline module, if available."""
try:
import readline
except (ImportError, TypeError):
return
def _rebind_case_sensitive_completions():
# handle case sensitive, see Github issue #1342 for details
global _RL_PREV_CASE_SENSITIVE_COMPLETIONS
env = builtins.__xonsh_env__
case_sensitive = env.get('CASE_SENSITIVE_COMPLETIONS')
if case_sensitive is _RL_PREV_CASE_SENSITIVE_COMPLETIONS:
return
if case_sensitive:
readline.parse_and_bind("set completion-ignore-case off")
else:
readline.parse_and_bind("set completion-ignore-case on")
_RL_PREV_CASE_SENSITIVE_COMPLETIONS = case_sensitive
def fix_readline_state_after_ctrl_c():
"""
Fix to allow Ctrl-C to exit reverse-i-search.
Based on code from:
http://bugs.python.org/file39467/raw_input__workaround_demo.py
"""
if ON_WINDOWS:
# hack to make pyreadline mimic the desired behavior
try:
_q = readline.rl.mode.process_keyevent_queue
if len(_q) > 1:
_q.pop()
except Exception:
pass
if RL_STATE is None:
return
if RL_STATE.value & _RL_STATE_ISEARCH:
RL_STATE.value &= ~_RL_STATE_ISEARCH
if not RL_STATE.value & _RL_STATE_DONE:
RL_STATE.value |= _RL_STATE_DONE
def rl_completion_suppress_append(val=1):
"""Sets the rl_completion_suppress_append varaiable, if possible.
A value of 1 (default) means to suppress, a value of 0 means to enable.
"""
if RL_COMPLETION_SUPPRESS_APPEND is None:
return
RL_COMPLETION_SUPPRESS_APPEND.value = val
def rl_variable_dumper(readable=True):
"""Dumps the currently set readline variables. If readable is True, then this
output may be used in an inputrc file.
"""
RL_LIB.rl_variable_dumper(int(readable))
def rl_variable_value(variable):
"""Returns the currently set value for a readline configuration variable."""
global RL_VARIABLE_VALUE
if RL_VARIABLE_VALUE is None:
import ctypes
RL_VARIABLE_VALUE = RL_LIB.rl_variable_value
RL_VARIABLE_VALUE.restype = ctypes.c_char_p
env = builtins.__xonsh_env__
enc, errors = env.get('XONSH_ENCODING'), env.get('XONSH_ENCODING_ERRORS')
if isinstance(variable, str):
variable = variable.encode(encoding=enc, errors=errors)
rtn = RL_VARIABLE_VALUE(variable)
return rtn.decode(encoding=enc, errors=errors)
def _insert_text_func(s, readline):
"""Creates a function to insert text via readline."""
def inserter():
readline.insert_text(s)
readline.redisplay()
return inserter
DEDENT_TOKENS = LazyObject(lambda: frozenset(['raise', 'return', 'pass',
'break', 'continue']),
globals(), 'DEDENT_TOKENS')
class ReadlineShell(BaseShell, cmd.Cmd):
"""The readline based xonsh shell."""
def __init__(self, completekey='tab', stdin=None, stdout=None, **kwargs):
super().__init__(completekey=completekey,
stdin=stdin,
stdout=stdout,
**kwargs)
setup_readline()
self._current_indent = ''
self._current_prompt = ''
self._force_hide = None
self.cmdqueue = collections.deque()
def __del__(self):
teardown_readline()
def singleline(self, store_in_history=True, **kwargs):
"""Reads a single line of input. The store_in_history kwarg
flags whether the input should be stored in readline's in-memory
history.
"""
if not store_in_history: # store current position to remove it later
try:
import readline
except ImportError:
store_in_history = True
pos = readline.get_current_history_length() - 1
rtn = input(self.prompt)
if not store_in_history and pos >= 0:
readline.remove_history_item(pos)
return rtn
def parseline(self, line):
"""Overridden to no-op."""
return '', line, line
def completedefault(self, text, line, begidx, endidx):
"""Implements tab-completion for text."""
rl_completion_suppress_append() # this needs to be called each time
_rebind_case_sensitive_completions()
line = builtins.aliases.expand_alias(line)
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
if self.completer is None:
x = []
else:
x = [(i[offs:] if " " in i[:-1] else i)
for i in self.completer.complete(text, line,
begidx, endidx,
ctx=self.ctx)[0]]
return x
# tab complete on first index too
completenames = completedefault
def _load_remaining_input_into_queue(self):
buf = b''
while True:
r, w, x = select.select([self.stdin], [], [], 1e-6)
if len(r) == 0:
break
buf += os.read(self.stdin.fileno(), 1024)
if len(buf) > 0:
buf = buf.decode().replace('\r\n', '\n').replace('\r', '\n')
self.cmdqueue.extend(buf.splitlines(keepends=True))
def postcmd(self, stop, line):
"""Called just before execution of line. For readline, this handles the
automatic indentation of code blocks.
"""
try:
import readline
except ImportError:
return stop
if self.need_more_lines:
if len(line.strip()) == 0:
readline.set_pre_input_hook(None)
self._current_indent = ''
elif line.rstrip()[-1] == ':':
ind = line[:len(line) - len(line.lstrip())]
ind += builtins.__xonsh_env__.get('INDENT')
readline.set_pre_input_hook(_insert_text_func(ind, readline))
self._current_indent = ind
elif line.split(maxsplit=1)[0] in DEDENT_TOKENS:
env = builtins.__xonsh_env__
ind = self._current_indent[:-len(env.get('INDENT'))]
readline.set_pre_input_hook(_insert_text_func(ind, readline))
self._current_indent = ind
else:
ind = line[:len(line) - len(line.lstrip())]
if ind != self._current_indent:
insert_func = _insert_text_func(ind, readline)
readline.set_pre_input_hook(insert_func)
self._current_indent = ind
else:
readline.set_pre_input_hook(None)
return stop
def _cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
This was forked from Lib/cmd.py from the Python standard library v3.4.3,
(C) Python Software Foundation, 2015.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ": complete")
have_readline = True
except ImportError:
have_readline = False
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
stop = None
while not stop:
line = None
exec_now = False
if len(self.cmdqueue) > 0:
line = self.cmdqueue.popleft()
exec_now = line.endswith('\n')
if self.use_rawinput and not exec_now:
inserter = None if line is None \
else _insert_text_func(line, readline)
if inserter is not None:
readline.set_pre_input_hook(inserter)
try:
line = self.singleline()
except EOFError:
if builtins.__xonsh_env__.get("IGNOREEOF"):
self.stdout.write('Use "exit" to leave the shell.'
'\n')
line = ''
else:
line = 'EOF'
if inserter is not None:
readline.set_pre_input_hook(None)
else:
self.print_color(self.prompt, file=self.stdout)
if line is not None:
os.write(self.stdin.fileno(), line.encode())
if not exec_now:
line = self.stdin.readline()
if len(line) == 0:
line = 'EOF'
else:
line = line.rstrip('\r\n')
if have_readline and line != 'EOF':
readline.add_history(line)
if not ON_WINDOWS:
# select() is not fully functional on windows
self._load_remaining_input_into_queue()
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def cmdloop(self, intro=None):
while not builtins.__xonsh_exit__:
try:
self._cmdloop(intro=intro)
except KeyboardInterrupt:
print() # Gives a newline
fix_readline_state_after_ctrl_c()
self.reset_buffer()
intro = None
@property
def prompt(self):
"""Obtains the current prompt string."""
global RL_LIB, RL_CAN_RESIZE
if RL_CAN_RESIZE:
# This is needed to support some system where line-wrapping doesn't
# work. This is a bug in upstream Python, or possibly readline.
RL_LIB.rl_reset_screen_size()
if self.need_more_lines:
if self.mlprompt is None:
try:
self.mlprompt = multiline_prompt(curr=self._current_prompt)
except Exception: # pylint: disable=broad-except
print_exception()
self.mlprompt = '<multiline prompt error> '
return self.mlprompt
env = builtins.__xonsh_env__ # pylint: disable=no-member
p = env.get('PROMPT')
try:
p = partial_format_prompt(p)
except Exception: # pylint: disable=broad-except
print_exception()
hide = True if self._force_hide is None else self._force_hide
p = ansi_partial_color_format(p, style=env.get('XONSH_COLOR_STYLE'),
hide=hide)
self._current_prompt = p
self.settitle()
return p
def format_color(self, string, hide=False, **kwargs):
"""Readline implementation of color formatting. This usesg ANSI color
codes.
"""
hide = hide if self._force_hide is None else self._force_hide
return ansi_partial_color_format(string, hide=hide,
style=builtins.__xonsh_env__.get('XONSH_COLOR_STYLE'))
def print_color(self, string, hide=False, **kwargs):
if isinstance(string, str):
s = self.format_color(string, hide=hide)
else:
# assume this is a list of (Token, str) tuples and format it
env = builtins.__xonsh_env__
self.styler.style_name = env.get('XONSH_COLOR_STYLE')
style_proxy = pyghooks.xonsh_style_proxy(self.styler)
formatter = terminal256.Terminal256Formatter(style=style_proxy)
s = pygments.format(string, formatter).rstrip()
print(s, **kwargs)
def color_style_names(self):
"""Returns an iterable of all available style names."""
return ansi_color_style_names()
def color_style(self):
"""Returns the current color map."""
style = style = builtins.__xonsh_env__.get('XONSH_COLOR_STYLE')
return ansi_color_style(style=style)
class ReadlineHistoryAdder(threading.Thread):
def __init__(self, wait_for_gc=True, *args, **kwargs):
"""Thread responsible for adding inputs from history to the current readline
instance. May wait for the history garbage collector to finish.
"""
super(ReadlineHistoryAdder, self).__init__(*args, **kwargs)
self.daemon = True
self.wait_for_gc = wait_for_gc
self.start()
def run(self):
try:
import readline
except ImportError:
return
hist = builtins.__xonsh_history__
while self.wait_for_gc and hist.gc.is_alive():
time.sleep(0.011) # gc sleeps for 0.01 secs, sleep a beat longer
files = hist.gc.files()
i = 1
for _, _, f in files:
try:
lj = LazyJSON(f, reopen=False)
for command in lj['cmds']:
inp = command['inp'].splitlines()
for line in inp:
if line == 'EOF':
continue
readline.add_history(line)
if RL_LIB is not None:
RL_LIB.history_set_pos(i)
i += 1
lj.close()
except (IOError, OSError, ValueError):
continue
``` |
{
"source": "Jianlin-lv/bcc",
"score": 3
} |
#### File: tests/python/test_map_batch_ops.py
```python
from __future__ import print_function
from unittest import main, skipUnless, TestCase
from bcc import BPF
import os
import distutils.version
import ctypes as ct
def kernel_version_ge(major, minor):
# True if running kernel is >= X.Y
version = distutils.version.LooseVersion(os.uname()[2]).version
if version[0] > major:
return True
if version[0] < major:
return False
if minor and version[1] < minor:
return False
return True
@skipUnless(kernel_version_ge(5, 6), "requires kernel >= 5.6")
class TestMapBatch(TestCase):
MAPSIZE = 1024
def fill_hashmap(self):
b = BPF(text=b"""BPF_HASH(map, int, int, %d);""" % self.MAPSIZE)
hmap = b[b"map"]
for i in range(0, self.MAPSIZE):
hmap[ct.c_int(i)] = ct.c_int(i)
return hmap
def check_hashmap_values(self, it):
i = 0
for k, v in sorted(it):
self.assertEqual(k, i)
self.assertEqual(v, i)
i += 1
return i
def test_lookup_and_delete_batch(self):
# fill the hashmap
hmap = self.fill_hashmap()
# check values and count them
count = self.check_hashmap_values(hmap.items_lookup_and_delete_batch())
self.assertEqual(count, self.MAPSIZE)
# and check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items_lookup_batch())
self.assertEqual(count, 0)
def test_lookup_batch(self):
# fill the hashmap
hmap = self.fill_hashmap()
# check values and count them
count = self.check_hashmap_values(hmap.items_lookup_batch())
self.assertEqual(count, self.MAPSIZE)
def test_delete_batch_all_keysp(self):
# Delete all key/value in the map
# fill the hashmap
hmap = self.fill_hashmap()
hmap.items_delete_batch()
# check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items())
self.assertEqual(count, 0)
def test_delete_batch_subset(self):
# Delete only a subset of key/value in the map
# fill the hashmap
hmap = self.fill_hashmap()
# Get 4 keys in this map.
subset_size = 32
keys = (hmap.Key * subset_size)()
i = 0
for k, _ in hmap.items_lookup_batch():
if i < subset_size:
keys[i] = k
i += 1
else:
break
hmap.items_delete_batch(keys)
# check the delete has worked, i.e map is now empty
count = sum(1 for _ in hmap.items())
self.assertEqual(count, self.MAPSIZE - subset_size)
def test_update_batch(self):
hmap = self.fill_hashmap()
# preparing keys and new values arrays
keys = (hmap.Key * self.MAPSIZE)()
new_values = (hmap.Leaf * self.MAPSIZE)()
for i in range(self.MAPSIZE):
keys[i] = ct.c_int(i)
new_values[i] = ct.c_int(-1)
hmap.items_update_batch(keys, new_values)
# check the update has worked, i.e sum of values is -NUM_KEYS
count = sum(v.value for v in hmap.values())
self.assertEqual(count, -1*self.MAPSIZE)
if __name__ == "__main__":
main()
``` |
{
"source": "jianlins/keep",
"score": 2
} |
#### File: keep_backend/backend/views.py
```python
import json
from backend.forms import RegistrationFormUserProfile
from backend.forms import ResendActivationForm
from django.core import serializers
from django.core.urlresolvers import reverse
from django.contrib.sites.models import RequestSite
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from organizations.models import OrganizationUser
from registration.models import RegistrationProfile
from repos.models import Repository, RepoSerializer
from studies.models import Study, StudySerializer
from twofactor.models import UserAPIToken
def home( request ):
if request.user.is_authenticated():
return HttpResponseRedirect(
reverse( 'user_dashboard',
kwargs={ 'username': request.user.username } ) )
return render_to_response( 'index.html' )
def register( request ):
if request.method == 'POST':
form = RegistrationFormUserProfile( request.POST )
if form.is_valid():
( new_user, user_token ) = form.save()
# Send activation email
for profile in RegistrationProfile.objects.filter(user=new_user):
profile.send_activation_email( RequestSite( request ) )
return render_to_response('registration/reg_complete.html',
{'user_token': user_token.google_url(),
'email': new_user.email } )
else:
form = RegistrationFormUserProfile()
return render_to_response( 'registration/registration_form.html',
{'form': form },
context_instance=RequestContext(request) )
def registration_complete( request ):
return render_to_response( 'registration/activate.html' )
def resend_activation( request ):
status = None
if request.method == 'POST':
form = ResendActivationForm( request.POST )
if form.is_valid():
email = form.cleaned_data[ 'email' ]
users = User.objects.filter( email=email, is_active=0 )
site = RequestSite( request )
if users.count() == 0:
form._errors[ 'email' ] = '''Account for email address is not
recognized'''
else:
user = users[0]
for profile in RegistrationProfile.objects.filter(user=user):
if not profile.activation_key_expired():
profile.send_activation_email( site )
status = 'Email Activation Sent!'
else:
form = ResendActivationForm()
return render_to_response( 'registration/resend_activation.html',
{ 'form': form,
'status': status },
context_instance=RequestContext( request ) )
@login_required
def user_dashboard( request, username ):
'''
Dashboard seen when a user signs in or views another user's profile.
The dashboard contains links to a users the private/public data repos.
Private repos are only shown if the user has permission to view them.
'''
# Are we looking at our own profile or someone elses?
is_other_user = request.user.username != username
user = get_object_or_404( User, username=username )
# Find all the organization this user belongs to
organizations = OrganizationUser.objects.filter( user=user )
# Grab a list of forms uploaded by the user
if is_other_user:
user_repos = Repository.objects.list_by_user( user=user,
organizations=organizations,
public=True )
user_studies = []
else:
user_repos = Repository.objects.list_by_user( user=user,
organizations=organizations )
user_studies = Study.objects.filter( user=user )
serializer = RepoSerializer()
repo_json = json.dumps( serializer.serialize( user_repos ) )
serializer = StudySerializer()
study_json = json.dumps( serializer.serialize( user_studies ) )
return render_to_response( 'dashboard.html',
{ 'user_studies': study_json,
'user_repos': repo_json,
'is_other_user': is_other_user,
'account': user,
'organizations': organizations },
context_instance=RequestContext(request) )
@login_required
def generate_api_key( request ):
UserAPIToken.objects.create( user=request.user,
name=request.GET.get( 'name', '' ) )
return HttpResponseRedirect( '/settings' )
@login_required
def delete_api_key( request, key ):
token = UserAPIToken.objects.get(id=key)
token.delete()
return HttpResponseRedirect( '/settings' )
@login_required
def settings( request ):
api_tokens = UserAPIToken.objects.filter(user=request.user)
return render_to_response( 'settings.html',
{'api_tokens': api_tokens},
context_instance=RequestContext(request))
```
#### File: keep_backend/openrosa/serializer.py
```python
from django.conf import settings
from lxml import etree
from tastypie.serializers import Serializer
from pyxform.builder import create_survey_element_from_dict
from json_xls_convert import jsonXlsConvert
class XFormSerializer( Serializer ):
'''
Uses the pyxform provided classes to convert from JSON -> XForm xml
and back again.
'''
formats = [ 'xform', 'json', 'xls' ]
content_types = {
'json': 'application/json',
'xform': 'text/xml',
'xls': 'application/vnd.ms-excel'
}
def to_formList( self, repos ):
root = etree.Element( 'xforms' )
root.set( 'xmlns', 'http://openrosa.org/xforms/xformsList' )
for xform in repos:
element = etree.Element( 'xform' )
formId = etree.Element( 'formID' )
formId.text = xform[ 'name' ]
element.append( formId )
name = etree.Element( 'name' )
name.text = xform[ 'name' ]
element.append( name )
formType = etree.Element( 'type' )
formType.text = xform[ 'type' ]
element.append( formType )
downloadUrl = etree.Element( 'downloadUrl' )
if settings.DEBUG:
base_url = 'http://%s/api/v1/repos/' % ('localhost:8000')
else:
base_url = 'http://%s/api/v1/repos/' % (settings.HOSTNAME)
downloadUrl.text = '%s%s/?format=xform' % ( base_url, xform[ 'id' ] )
element.append( downloadUrl )
manifestUrl = etree.Element( 'manifestUrl' )
manifestUrl.text = '%s%s/manifest/?format=xml' % ( base_url, xform[ 'id' ] )
element.append( manifestUrl )
element.append( etree.Element( 'descriptionText' ) )
root.append( element )
return etree.tostring( root )
def to_manifest( self, data ):
root = etree.Element( 'manifest' )
root.set( 'xmlns', 'http://openrosa.org/xforms/xformsManifest' )
for media in data[ 'manifest' ]:
mediaFile = etree.Element( 'mediaFile' )
fileName = etree.Element( 'filename' )
fileName.text = media[0]
downloadUrl = etree.Element( 'downloadUrl' )
downloadUrl.text = media[1]
mediaFile.append( fileName )
mediaFile.append( downloadUrl )
root.append( mediaFile )
return etree.tostring( root )
def to_xform( self, data, options=None ):
options = options or {}
data = self.to_simple( data, options )
if 'manifest' in data:
# Return the xform manifest
return self.to_manifest( data )
elif 'objects' in data:
# Return the formList representation of our objects if they are
# present.
return self.to_formList( data.get( 'objects', [] ) )
elif 'id' in data:
# Accessing a single repo object! Convert completely into the
# xform format.
xform = {}
xform[ 'name' ] = data.get( 'name' )
# TODO: Fix pyxform to handle this correctly. # data.get( 'type' )
xform[ 'type' ] = 'survey'
xform[ 'default_language' ] = data.get( 'default_language', 'default' )
xform[ 'children' ] = data.get( 'children' )
return create_survey_element_from_dict( xform )._to_pretty_xml()
else:
raise Exception( data )
return None
def to_xls( self, data, options=None ):
options = options or {}
data = self.to_simple(data, options)
converter = jsonXlsConvert(data.get('name'))
return converter.writeToXls(data.get("children"))
```
#### File: keep_backend/tests/__init__.py
```python
import json
import os
import shlex
import subprocess
import urllib
import urllib2
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.firefox.webdriver import WebDriver
class HttpTestCase( LiveServerTestCase ):
@classmethod
def setUpClass( cls ):
cls.selenium = WebDriver()
#cls.selenium = webdriver.PhantomJS()
#cls.selenium.set_window_size( 1024, 768 )
with open( os.devnull, 'w' ) as devnull:
testdb = 'mongorestore -d test --drop ../_data/mongo-test/dhlab'
subprocess.call( shlex.split( testdb ),
stdout=devnull,
stderr=devnull )
super( HttpTestCase, cls ).setUpClass()
@classmethod
def tearDownClass( cls ):
cls.selenium.quit()
super( HttpTestCase, cls ).tearDownClass()
def open( self, url ):
self.selenium.get( '%s%s' % ( self.live_server_url, url ) )
class ApiTestCase( LiveServerTestCase ):
@classmethod
def setUpClass( cls ):
with open( os.devnull, 'w' ) as devnull:
testdb = 'mongorestore -d test --drop ../_data/mongo-test/dhlab'
subprocess.call( shlex.split( testdb ),
stdout=devnull,
stderr=devnull )
super( ApiTestCase, cls ).setUpClass()
def openRaw( self, url, params ):
final_url = '%s%s' % ( self.live_server_url, url )
return urllib2.urlopen( final_url, params ).read()
def open( self, url, params, method='GET', format='JSON' ):
final_url = '%s%s' % ( self.live_server_url, '/api/v1' )
final_url += url
encoded_params = ''
if params is not None:
encoded_params = urllib.urlencode( params, True )
if method == 'GET':
final_url += '?' + encoded_params
response = urllib2.urlopen( final_url )
else:
response = urllib2.urlopen( final_url, encoded_params )
if format == 'JSON':
return json.load( response )
else:
return response.read()
```
#### File: keep_backend/tests/test_openrosa_api.py
```python
from tests import ApiTestCase
class OpenRosaAPITests( ApiTestCase ):
def test_formlist( self ):
'''
'''
response = self.openRaw( '/bs/admin/formList', None )
assert response is not None
def test_repo_xform_xml( self ):
'''
Test if we can list the repo details for a specific repo for a
test user.
'''
# Get the list of repos for the test user
response = self.open( '/repos/', {'format': 'json', 'user': 'admin'} )
repo = response[ 'objects' ][0][ 'id' ]
# Grab the repo details
response = self.open( '/repos/%s' % ( repo ),
{'format': 'xform', 'user': 'admin'},
format='xform' )
assert response is not None
``` |
{
"source": "jianlins/PyFastNER",
"score": 2
} |
#### File: PyFastNER/PyFastNER/ReplicationFunctionsLambda.py
```python
import string
def processReplicationCommon(evalFunc, processRulesFunc, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
current_repeats = 0
text_length = len(text)
while evalFunc(this_char) and current_repeats < ReplicationFunctions.max_repeat and current_position < text_length:
current_repeats += 1
current_position += 1
if current_position == text_length:
break
this_char = text[current_position]
processRulesFunc(text, rule_map, match_begin, match_end, current_position, matches, previous_char, False, '+')
pass
class ReplicationFunctions:
def __init__(self, processRulesFunc, max_repeat=50):
self.replication_funcs = dict()
self.processRules = processRulesFunc
ReplicationFunctions.max_repeat = max_repeat
self.initReplicationFunctions(self.replication_funcs)
pass
def initReplicationFunctions(self, replication_funcs):
replication_funcs['s'] = self.processReplication_s
replication_funcs['n'] = self.processReplication_n
replication_funcs['d'] = self.processReplication_d
replication_funcs['C'] = self.processReplication_C
replication_funcs['c'] = self.processReplication_c
replication_funcs['p'] = self.processReplication_p
replication_funcs['a'] = self.processReplication_a
replication_funcs['u'] = self.processReplication_u
replication_funcs['w'] = self.processReplication_w
pass
def processReplication_s(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: (char == ' ' or char == '\t' or ord(char) == 160)
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_n(self, text, rule_map, match_begin, match_end, current_position, matches, this_char, previous_char):
evalFunc = lambda char: (char == '\n' or char == '\r')
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_d(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: char.isdigit()
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_C(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: char.isupper()
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_c(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: char.islower()
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_p(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: char in string.punctuation
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_a(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: not char.isspace()
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_u(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: (char > '~' and ord(char) != 160)
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
def processReplication_w(self, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char):
evalFunc = lambda char: (char > '~' or char.isspace())
processReplicationCommon(evalFunc, self.processRules, text, rule_map, match_begin, match_end, current_position, matches,
this_char, previous_char)
pass
``` |
{
"source": "jianlins/PyRuSH",
"score": 2
} |
#### File: PyRuSH/PyRuSH/PyRuSHSentencizer.py
```python
from spacy.pipeline import Sentencizer
from .RuSH import RuSH
from .StaticSentencizerFun import cpredict, cset_annotations
class PyRuSHSentencizer(Sentencizer):
def __init__(self, rules_path: str = '', max_repeat: int = 50, auto_fix_gaps: bool = True) -> Sentencizer:
"""
@param rules_path: The string of the rule file path or rules themselves.
@param max_repeat: Total number of replicates that allows to be handled by "+" wildcard.
@param auto_fix_gaps: If gaps are caused by malcrafted rules, try to fix them.
However, this has no control of sentence end,
TODO: need to see how the downsteam spacy components make use of doc.c
"""
self.rules_path = rules_path
self.rush = RuSH(rules=rules_path, max_repeat=max_repeat, auto_fix_gaps=auto_fix_gaps)
@classmethod
def from_nlp(cls, nlp, **cfg):
return cls(**cfg)
def __call__(self, doc):
tags = self.predict([doc])
cset_annotations([doc], tags)
return doc
def predict(self, docs):
"""Apply the pipeline's model to a batch of docs, without
modifying them.
"""
guesses = cpredict(docs, self.rush.segToSentenceSpans)
return guesses
def set_annotations(self, docs, batch_tag_ids, tensors=None):
"""
This function overwrite spacy's Sentencizer.
@param batch_tag_ids: a list of doc's tags (a list of boolean values)
@param tensors: a place holder for future extensions
"""
cset_annotations(docs, batch_tag_ids, tensors)
```
#### File: PyRuSH/tests/Test_Rush.py
```python
import unittest
import os
from PyRuSH import RuSH
class TestRuSH(unittest.TestCase):
def setUp(self):
self.pwd = os.path.dirname(os.path.abspath(__file__))
self.rush = RuSH(str(os.path.join(self.pwd, 'rush_rules.tsv')), enable_logger=True)
def test1(self):
input_str = 'Can Mr. K check it. Look\n good.\n'
sentences = self.rush.segToSentenceSpans(input_str)
assert (sentences[0].begin == 0 and sentences[0].end == 19)
assert (sentences[1].begin == 20 and sentences[1].end == 31)
def test2(self):
input_str = 'S/p C6-7 ACDF. No urgent events overnight. Pain control ON. '
sentences = self.rush.segToSentenceSpans(input_str)
assert (sentences[0].begin == 0 and sentences[0].end == 14)
assert (sentences[1].begin == 15 and sentences[1].end == 42)
assert (sentences[2].begin == 43 and sentences[2].end == 59)
def test3(self):
input_str = ''' • Coagulopathy (HCC)
• Hepatic encephalopathy (HCC)
• Hepatorenal syndrome (HCC)
'''
sentences = self.rush.segToSentenceSpans(input_str)
assert (sentences[0].begin == 1 and sentences[0].end == 22)
assert (sentences[1].begin == 31 and sentences[1].end == 62)
assert (sentences[2].begin == 71 and sentences[2].end == 100)
def test4(self):
input_str = 'Delirium - '
sentences = self.rush.segToSentenceSpans(input_str)
self.printDetails(sentences, input_str)
assert (sentences[0].begin == 0 and sentences[0].end == 8)
pass
def test5(self):
input_str = "The patient complained about the TIA \n\n No memory issues. \"I \n\nOrdered the MRI scan.- "
sentences = self.rush.segToSentenceSpans(input_str)
self.printDetails(sentences, input_str)
assert (sentences[0].begin == 0 and sentences[0].end == 36)
assert (sentences[1].begin == 39 and sentences[1].end == 57)
assert (sentences[2].begin == 58 and sentences[2].end == 84)
pass
def printDetails(self, sentences, input_str):
for i in range(0, len(sentences)):
sentence = sentences[i]
print('assert (sentences[' + str(i) + '].begin == ' + str(sentence.begin) +
' and sentences[' + str(i) + '].end == ' + str(sentence.end) + ')')
for i in range(0, len(sentences)):
sentence = sentences[i]
print(input_str[sentence.begin:sentence.end])
# self.printDetails(sentences, input_str)
pass
def test6(self):
input_str = '''The Veterans Aging Cohort Study (VACS) is a large, longitudinal, observational study of a cohort of HIV infected and matched uninfected Veterans receiving care within the VA [2]. This cohort was designed to examine important health outcomes, including cardiovascular diseases like heart failure, among HIV infected and uninfected Veterans.'''
sentences = self.rush.segToSentenceSpans(input_str)
self.printDetails(sentences, input_str)
def test7(self):
input_str = '''The Veterans Aging Cohort Study (VACS) is a large, longitudinal, observational study of a cohort of HIV infected and matched uninfected Veterans receiving care within the VA [2]. This cohort was designed to examine important health outcomes, including cardiovascular diseases like heart failure, among HIV infected and uninfected Veterans.'''
rules = []
rules.append(r'\b(\a 0 stbegin')
rules.append(r'\a\e 2 stend')
rules.append(r'. +(This 0 stbegin')
rules.append(r'](. 2 stend')
rush = RuSH(rules, enable_logger=True)
sentences = rush.segToSentenceSpans(input_str)
self.printDetails(sentences, input_str)
def test_doc2(self):
input_str = '''
9. Advair b.i.d.
10. Xopenex q.i.d. and p.r.n.
I will see her in a month to six weeks. She is to follow up with Dr. X before that.
'''
self.rush = RuSH(str(os.path.join(self.pwd, 'rush_rules.tsv')), min_sent_chars=2, enable_logger=True)
sentences = self.rush.segToSentenceSpans(input_str)
for sent in sentences:
print('>' + input_str[sent.begin:sent.end] + '<\n')
assert (len(sentences) == 4)
sent = sentences[1]
assert (input_str[sent.begin:sent.end] == '10. Xopenex q.i.d. and p.r.n.')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jianlins/quicksect",
"score": 2
} |
#### File: quicksect/tests/Test_quicksect.py
```python
import pyximport
pyximport.install()
import os, sys
sys.path.append("../src")
sys.path.append("src")
import unittest
# from quicksectx import IntervalNode as IntervalNodeX, Interval as IntervalX, IntervalTree as IntervalTreeX
from quicksectx import IntervalNode, Interval, IntervalTree
# tree = IntervalTree()
# tree.add(0, 3, 100)
# tree.add(5, 8, 110)
# tree.add(6, 10, 120)
# tree.add(8, 9, 130)
# tree.add(15, 23, 140)
# tree.add(19, 20, 150)
# tree.add(17, 19, 160)
# tree.add(26, 26, 160)
# tree.add(25, 30, 160)
# tree.add(16, 21, 160)
# print(tree.pretty_print())
# print('\n\n---\n\n\n')
# tree = IntervalTree()
# tree.add(0, 3, 100)
# tree.add(5, 8, 110)
# tree.add(6, 10, 120)
# tree.add(8, 9, 130)
# tree.add(15, 23, 140)
# tree.add(16, 21, 160)
# tree.add(17, 19, 160)
# tree.add(19, 20, 150)
# tree.add(25, 30, 160)
# tree.add(26, 26, 160)
# tree.add(27, 28, 160)
# tree.add(27, 28, 160)
# tree.add(27, 28, 160)
# print(tree.pretty_print())
class MyTestCase(unittest.TestCase):
def test_1(self):
tree = IntervalTree()
tree.add(1, 3, 100)
tree.add(3, 7, 110)
tree.add(2, 5, 120)
tree.add(4, 6, 130)
tree.add(4, 8, 140)
tree.add(4, 8, 150)
tree.add(5, 7, 160)
print(tree.pretty_print())
print(tree.find(Interval(2, 5)))
tree.remove(Interval(2, 5))
print(tree.find(Interval(2, 5)))
print(tree.pretty_print())
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
print(Interval(1, 2).__reduce__())
``` |
{
"source": "jianlins/SmartAnno",
"score": 3
} |
#### File: SmartAnno/models/BaseClassifier.py
```python
import abc
import joblib
import os
from SmartAnno.utils.NoteBookLogger import logMsg
NotTrained = 0
InTraining = 1
ReadyTrained = 2
class BaseClassifier:
# indicate the status of classifier
status = NotTrained
instance = None
# add optional paramters with default values here (will be overwritten by ___init__'s **kwargs)
# These parameters will be shown in GUI ask for users' configuration
def __init__(self, task_name='default_task', pipeline=None, params=None, model_file=None, **kwargs):
self.task_name = task_name
for name, value in kwargs.items():
setattr(self, name, value)
if model_file is None:
model_file = 'models/saved/' + type(self).__name__ + '_' + task_name
self.model_file = model_file
self.model = None
if os.path.isfile(self.model_file):
self.model = self.loadModel()
BaseClassifier.status = ReadyTrained
else:
self.model = self.init_model()
BaseClassifier.status = NotTrained
# automatically set customized parameters to self object
BaseClassifier.instance = self
pass
@abc.abstractmethod
def init_model(self):
"""separate the definition, because at most of the time, you would want to automatically load previously trained
model instead. """
return None
@abc.abstractmethod
def classify(self, txt):
return 'Irrelevant'
@abc.abstractmethod
def train(self, x, y):
logMsg('error, abstract method called')
# [] to return Documents, dict() to return grouping information
pass
def saveModel(self):
"""will be automatically saved when user click complete"""
joblib.dump(self.model, self.model_file)
pass
def loadModel(self):
"""will be automatically load when initiate the classifier if self.model_file exists."""
model = joblib.load(self.model_file)
BaseClassifier.status = ReadyTrained
return model
```
#### File: models/cnn/PreProcessing.py
```python
import re
import string
from os import path
import gensim
import nltk
import numpy as np
import spacy
from PyRuSH.RuSH import RuSH
from gensim.corpora import Dictionary
from gensim.models import KeyedVectors
import spacy.matcher as matcher
class PreProcessing:
def __init__(self,
annotation_type='SOCIAL_SUPPORT',
default_value='no mention',
filter_file='conf/keywords_filter.txt',
stopwords_file='conf/stop_words.txt',
word2vec_file='models/glove.word2vec.txt.bin',
rush_rules='conf/rush_rules.tsv',
max_token_per_sentence=150):
# each time we only train/predict a models for one annotation type
# set an arbitrary max length of sentences, so that we can pad sentences without knowing the max length of sentences in testing set.
self.max_token_per_sentence = max_token_per_sentence
self.annotation_type = annotation_type
self.default_value = default_value
self.real_max_length = 0
self.rush = RuSH(rush_rules)
self.html_tokens_p = re.compile('^\&[a-z]{2,4}\;$')
self.punctuations = set(string.punctuation)
# keep '?'
self.punctuations.remove('?')
self.spacy_nlp = spacy.load('en', disable=['parser', 'tagger', 'ner'])
self.matcher = None
self.corpus = None
keywords_filter = []
print('load filter keywords')
# load filter keywords
if path.isfile(filter_file):
f = open(filter_file, 'r')
keywords_filter = [line for line in f.readlines() if not line.startswith('#')]
f.close()
if len(keywords_filter) > 0:
self.matcher = matcher.PhraseMatcher(self.spacy_nlp.tokenizer.vocab, max_length=6)
for keyword in keywords_filter:
self.matcher.add(keyword, None)
print('load stopwords')
# load stop words
if path.isfile(stopwords_file):
f = open(stopwords_file, 'r')
self.my_stopwords = set(f.readlines())
f.close()
else:
self.my_stopwords = set(nltk.corpus.stopwords.words('english'))
f = open(stopwords_file, 'w')
f.writelines('\n'.join(self.my_stopwords))
f.close()
print('load label dictionary')
self.label_dict = None
self.label_dict_file = 'models/' + self.annotation_type + '_labels.dict'
# load dictionary
if path.isfile(self.label_dict_file):
self.label_dict = Dictionary.load(self.label_dict_file)
print('load glove model')
# self.glove_model = glove2word2vec.smart_open(word2vec_file)
if path.isfile(word2vec_file):
if word2vec_file.endswith('.bin'):
self.glove_model = KeyedVectors.load_word2vec_format(word2vec_file, binary=True)
else:
self.glove_model = KeyedVectors.load_word2vec_format(word2vec_file, binary=False)
print('convert txt model to binary model...')
self.glove_model.save_word2vec_format(word2vec_file + '.bin', binary=True)
pass
""" Given a plain text document, return a list of tokenized sentences that contain filter keywords"""
def processDocument(self, doc_text, tokenized_sentences=[], labels=[], annotations=None, doc_id=None):
print(doc_id)
sentences = self.rush.segToSentenceSpans(doc_text)
sentences_txt = ([doc_text[sentence.begin:sentence.end] for sentence in sentences])
anno_id = 0
for i in range(0, len(sentences_txt)):
sentence = sentences_txt[i]
label = self.default_value
# if annotations are available, read as labels
if annotations is not None:
if len(annotations) > 0:
if anno_id < len(annotations) \
and annotations[anno_id]['start'] >= sentences[i].begin \
and annotations[anno_id]['end'] <= sentences[i].end:
label = list(annotations[anno_id]['attributes'].values())[0]
anno_id += 1
elif anno_id < len(annotations) \
and annotations[anno_id]['end'] <= sentences[i].begin:
print(doc_id + str(annotations[anno_id]) + 'was skipped')
i -= 1
anno_id += 1
words = [token for token in self.spacy_nlp.make_doc(sentence)
if len(''.join(ch for ch in token.text if ch not in self.punctuations)) > 0
and not self.html_tokens_p.search(token.text)
and not token.text.replace('.', '', 1).isdigit()
and not token.text.replace('-', '', 1).isdigit()
and token.text not in self.my_stopwords]
if self.real_max_length < len(words):
self.real_max_length = len(words)
if self.get_matches(words):
if len(words) < self.max_token_per_sentence:
tokenized_sentences.append(self.pad_sentence([word.text for word in words]))
labels.append(label)
else:
begin = 0
words = [word.text for word in words]
while begin <= len(words) - self.max_token_per_sentence:
tokenized_sentences.append(words[begin:self.max_token_per_sentence])
# overlap the sliced sub-sentences
begin += int(self.max_token_per_sentence / 2)
if begin < len(words):
tokenized_sentences.append(self.pad_sentence(words[len(words) - self.max_token_per_sentence:]))
return tokenized_sentences
def get_matches(self, sentence_tokens):
if self.matcher is None:
return True
matches = self.matcher(sentence_tokens)
for ent_id, start, end in matches:
yield (ent_id, start, end)
# def processLabelledCorpus(self, corpus_dir):
# corpus_reader = EhostCorpusReader(corpus_dir)
# corpus = corpus_reader.parse()
# self.corpus = corpus
# tokenized_sentences = []
# labels = []
# for doc_id, doc in corpus.items():
# if self.annotation_type in doc['categorized']:
# annotations = [doc['annotations'][anno_id] for anno_id in doc['categorized'][self.annotation_type]]
# else:
# annotations = []
# self.processDocument(doc['text'], tokenized_sentences, labels, annotations, doc_id)
#
# x, y = self.vectorize(tokenized_sentences, labels)
# return x, y
def pad_sentence(self, sentence, padding_word="<PAD/>"):
"""
Revised from alexander-rakhlin's code
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
num_padding = self.max_token_per_sentence - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
return new_sentence
def vectorize(self, sentences, labels=[]):
"""
Revised from alexander-rakhlin's code, use glove models instead.
Maps sentencs and labels to vectors based on a vocabulary.
"""
print(labels)
if self.label_dict is None:
self.label_dict = gensim.corpora.Dictionary([set(labels)])
self.label_dict.compactify()
self.label_dict.save(self.label_dict_file)
self.label_dict.save_as_text(self.label_dict_file + '.txt')
print(set(labels))
x = np.array([[self.glove_model.word_vec(word) if word in self.glove_model.vocab
else np.random.uniform(-0.25, 0.25, self.glove_model.vector_size) for word in sentence] for
sentence in sentences])
y = None
if len(labels) > 0:
y = np.zeros((len(labels), len(self.label_dict.keys())))
for i in range(0, len(labels)):
label = labels[i]
y[i][self.label_dict.token2id[label]] = 1
shuffle_indices = np.random.permutation(np.arange(len(y)))
x = x[shuffle_indices]
y = y[shuffle_indices]
return x, y
```
#### File: SmartAnno/models/GloveModel.py
```python
import fileinput
from os import path
from gensim.models import KeyedVectors
from SmartAnno.gui.Workflow import logMsg
NotInitiated = 0
Initiating = 1
Initiated = 2
class GloveModel:
glove_model = None
status = NotInitiated
def __init__(self, word2vec_file='models/saved/glove/glove.42B.300d.bin', vocab=1900000, vect=300):
glove_model = None
if GloveModel.glove_model is None and GloveModel.status == NotInitiated:
if path.isfile(word2vec_file):
GloveModel.status = Initiating
logMsg('Load glove model in the backend...')
print('Load glove model in the backend...')
if word2vec_file.endswith('.bin'):
glove_model = KeyedVectors.load_word2vec_format(word2vec_file, binary=True)
GloveModel.status = Initiated
else:
glove_model = KeyedVectors.load_word2vec_format(word2vec_file, binary=False)
logMsg('convert txt model to binary model...')
glove_model.save_word2vec_format(word2vec_file[:-3] + '.bin', binary=True)
GloveModel.status = Initiated
elif path.isfile(word2vec_file[:-3] + 'txt'):
GloveModel.status = Initiating
logMsg('Load glove model in the backend...')
print('Load glove model in the backend...')
txt_model = word2vec_file[:-3] + 'txt'
self.addDimensions(txt_model, line_to_prepend=str(vocab) + ' ' + str(vect))
glove_model = KeyedVectors.load_word2vec_format(txt_model, binary=False)
logMsg('convert txt model to binary model...')
glove_model.save_word2vec_format(word2vec_file, binary=True)
GloveModel.status = Initiated
else:
logMsg(("Either ", path.abspath(word2vec_file), ' or ', path.abspath(word2vec_file[:-3] + 'txt'),
' exists.'))
print(("Either ", path.abspath(word2vec_file), ' or ', path.abspath(word2vec_file[:-3] + 'txt'),
' exists.'))
GloveModel.glove_model = glove_model
pass
def checkModelExistance(self, word2vec_file='models/glove/glove.42B.300d.bin'):
if path.isfile(word2vec_file) or path.isfile(word2vec_file[:-3] + 'txt'):
return True
else:
return False
def addDimensions(self, filename, line_to_prepend):
with open(filename, 'r') as f:
line = f.readline()
if line.startswith(line_to_prepend):
return
f = fileinput.input(filename, inplace=1)
for xline in f:
if f.isfirstline():
print(line_to_prepend + '\n' + xline.rstrip('\r\n'))
else:
print(xline.rstrip('\r\n'))
pass
```
#### File: models/sampling/BaseSampler.py
```python
import abc
class BaseSampler:
def __init__(self, **kwargs):
pass
@abc.abstractmethod
def sampling(self, sample_size=0):
# [] to return Documents, dict() to return grouping information
return [], dict()
```
#### File: models/sampling/UniversalSentenceEncoderStratefiedSampler.py
```python
from collections import OrderedDict
from pathlib import Path
import faiss
import numpy as np
from SmartAnno.db.ORMs import Document, Annotation
from SmartAnno.models.sampling.KeywordStratefiedSampler import KeywordStratefiedSampler
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.utils.NoteBookLogger import logError
class UniversalSentenceEncoderStratefiedSampler(KeywordStratefiedSampler):
# document/sentence embedding dump files
VECTOR_DUMP = 'vec_dump.pickle'
# faiss index dump file
FAISS_INX = 'faiss_idx.index'
# number of results to return for each query (faiss has limits, set smaller to gain speed)
MAX_QUERY_RES = 100
# number of documents to encode each time (universal sentence encoder has limits)
ENCODING_PACE = 1000
def __init__(self, **kwargs):
self.sample_size = 0
self.previous_sampled_ids = kwargs['previous_sampled_ids']
self.dao = kwargs['dao']
self.task_id = kwargs['task_id']
self.dataset_id = 'origin_doc' if 'dataset_id' not in kwargs else kwargs['dataset_id']
self.ignore_case = True
self.distance_threhold = 0.3
self.grouped_ids = dict()
self.all_contain_ids = set()
self.available_not_contain = 0
self.new_available_not_contain = 0
self.new_ids = dict()
self.current_stats = dict()
if 'index_dir' in kwargs:
self.index_dir = kwargs['index_dir']
else:
self.index_dir = 'data/faiss'
if not Path(self.index_dir).is_dir():
Path(self.index_dir).mkdir()
self.dataset_idx = OrderedDict()
self.dataset_txt = []
self.dataset_doc_ids = []
self.faiss_index = None
self.dataset_embeddings = None
pass
def getSummary(self, filters: dict = dict(), reindex=False, distance_threhold=None) -> dict:
"""get doc_names (ids) grouped by distance to previous annotated categories
:param filters: a dictionary of keywords grouped by type names
:return: a dictionary of doc_names grouped by type names, a dictionary of doc_names that haven't been sampled grouped by type names
total number of documents that do not have any keywords, total number of documents that do not have any
keywords and have not been sampled
:rtype: dict
"""
if distance_threhold is None:
distance_threhold = self.distance_threhold
self.grouped_ids = {type_name: set() for type_name in filters.keys()}
self.new_ids = {type_name: set() for type_name in filters.keys()}
annos, self.dataset_idx, self.dataset_txt, self.dataset_doc_ids = self.read_db(reindex)
if reindex or not Path(self.index_dir, self.VECTOR_DUMP).is_file():
self.faiss_index, self.dataset_embeddings = self.reindex_embeddings()
else:
print('Loading indexed sentence embeddings and faiss indexes...')
self.faiss_index, self.dataset_embeddings = self.load_embedding_index()
return self.gen_stats(self.dataset_idx, self.dataset_doc_ids, self.dataset_embeddings, self.faiss_index, annos,
distance_threhold)
def read_db(self, reindex: bool = False) -> (dict, dict, list, list):
"""
get documents (based on dataset_id) and previous annotated records (based on task_id) from database
:param reindex:
:return:annotated document ids grouped by annotation type name, dictionary to map document id to the index of documents array,
documents array, document ids array (map index to document id)
"""
annos = dict()
with self.dao.create_session() as session:
res = session.query(Annotation).filter(Annotation.TASK_ID == self.task_id, Annotation.REVIEWED_TYPE != None)
for anno in res:
if anno.REVIEWED_TYPE not in annos:
annos[anno.REVIEWED_TYPE] = set()
annos[anno.REVIEWED_TYPE].add(anno.DOC_ID)
# read dataset from database
if reindex or len(self.dataset_idx) == 0:
self.dataset_idx.clear()
self.dataset_txt.clear()
with self.dao.create_session() as session:
res = session.query(Document).filter(Document.DATASET_ID == self.dataset_id)
for doc in res:
self.dataset_idx[doc.DOC_ID] = len(self.dataset_txt)
self.dataset_txt.append(doc.TEXT)
self.dataset_doc_ids.append(doc.DOC_ID)
return annos, self.dataset_idx, self.dataset_txt, self.dataset_doc_ids
def reindex_embeddings(self) -> (faiss.Index, np.ndarray):
"""
index/reoindex document/sentence embeddings
:return: faiss index and document/sentence embeddings
"""
self.dataset_embeddings = self.generate_embeddings()
dimension = self.dataset_embeddings.shape[1]
# save vectors
self.dataset_embeddings.dump(str(Path(self.index_dir, self.VECTOR_DUMP)))
print('Sentence embedding generated.')
self.faiss_index = faiss.IndexFlatL2(dimension)
self.faiss_index.add(self.dataset_embeddings)
# save faiss index
faiss.write_index(self.faiss_index, str(Path(self.index_dir, self.FAISS_INX)))
print('Sentence embedding indexed.')
return self.faiss_index, self.dataset_embeddings
def generate_embeddings(self) -> np.ndarray:
"""
Generate embeddings for each document/sentence--- can be replaced with other embedding method
:return: 2d numpy array of vectors (each row represent a document embedding)
"""
print('Start reindexing sentence embeddings...')
import tensorflow as tf
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3"
embed = hub.Module(module_url)
print('Sentence encoder model loaded.')
# need to split the dataset to fit into memory for sentence encoder
self.dataset_embeddings = []
i = 0
pace = self.ENCODING_PACE
print('Start encoding documents...')
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
while i <= (len(self.dataset_txt) - pace) / pace:
self.dataset_embeddings.append(session.run(embed(self.dataset_txt[i * pace:(i + 1) * pace])))
print(str(i * pace) + ' documents have been encoded.')
i += 1
if i * pace < len(self.dataset_txt):
self.dataset_embeddings.append(session.run(embed(self.dataset_txt[i * pace:])))
if len(self.dataset_embeddings) == 0:
logError('dataset_embeddings is none, no documents were read from the database.')
return np.ndarray([[]])
self.dataset_embeddings = np.concatenate(self.dataset_embeddings)
return self.dataset_embeddings
def load_embedding_index(self) -> (faiss.Index, np.ndarray):
"""
Load previous indexed embedding from dump files
:return:
"""
self.dataset_embeddings = np.load(str(Path(self.index_dir, self.VECTOR_DUMP)), allow_pickle=True)
self.faiss_index = faiss.read_index(str(Path(self.index_dir, self.FAISS_INX)))
return self.faiss_index, self.dataset_embeddings
def gen_stats(self, dataset_idx: OrderedDict, dataset_doc_ids: [], dataset_embeddings: np.ndarray,
faiss_index: faiss.Index,
annos: dict, distance_threshold: float) -> (dict, dict, dict):
"""
Generate count stats from the dataset, grouped by different annotation types
:param dataset_idx: The dictionary to map document id to the index in dataset_doc_ids
:param dataset_doc_ids: Array of document ids
:param dataset_embeddings: Numpy array of document embeddings
:param faiss_index: Faiss index
:param annos: The dictionary to map each annotation type to a set of document ids.
:param distance_threshold: A threhold to exclude dislike query results
:return: A dictionary group document ids by annotation type, a dictionary group not sampled document ids
by annotation type, a stats counts for each annotation type.
"""
distances = {type_name: dict() for type_name in self.grouped_ids.keys()}
print(type(dataset_embeddings))
max_query_res = int(len(dataset_embeddings) * 0.8)
if max_query_res > self.MAX_QUERY_RES:
max_query_res = self.MAX_QUERY_RES
print('Querying similar document embeddings...')
for type_name, doc_ids in annos.items():
subset_embeddings = np.array([dataset_embeddings[dataset_idx[doc_id]] for doc_id in doc_ids])
for i in range(0, len(subset_embeddings)):
res_distances, res_doc_idx_ids = faiss_index.search(subset_embeddings[i:i + 1], max_query_res)
for j in range(0, len(res_distances[0])):
res_d = res_distances[0][j]
if res_d > distance_threshold:
break
doc_id = dataset_doc_ids[res_doc_idx_ids[0][j]]
self.grouped_ids[type_name].add(doc_id)
# update the distances of a candidate doc to the closest doc in the reviewed documents
if doc_id not in distances[type_name] or res_d < distances[type_name][doc_id]:
distances[type_name][doc_id] = res_d
# solve overlapping candidates
print('Solve overlapping candidates...')
for doc_id in dataset_doc_ids:
shortest_distance = 10000
to_remove_from_types = []
previous_type = ''
for type_name in distances.keys():
if doc_id in distances[type_name] and distances[type_name][doc_id] < shortest_distance:
shortest_distance = distances[type_name][doc_id]
if previous_type != '':
to_remove_from_types.append(type_name)
previous_type = type_name
for type_name in to_remove_from_types:
self.grouped_ids[type_name].remove(doc_id)
available_outscope_ids = set(dataset_doc_ids)
# identify the documents haven't been reviewed
print("identify the documents haven't been reviewed")
for type_name, doc_ids in self.grouped_ids.items():
available_outscope_ids = available_outscope_ids - doc_ids
self.new_ids[type_name] = doc_ids - self.previous_sampled_ids
self.current_stats = {'all_counts': {type_name: len(value) for type_name, value in self.grouped_ids.items()},
'new_counts': {type_name: len(value) for type_name, value in self.new_ids.items()}}
self.available_not_contain = len(available_outscope_ids)
self.current_stats['all_counts']['not_contain'] = self.available_not_contain
self.new_available_not_contain = len(available_outscope_ids - self.previous_sampled_ids)
self.current_stats['new_counts']['not_contain'] = self.new_available_not_contain
return self.grouped_ids, self.new_ids, self.current_stats
```
#### File: test/models/BERT_Sentimental.py
```python
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# The GPU id to use, usually either "0" or "1"
# check the ids using command nvidia-smi in terminal
os.environ["CUDA_VISIBLE_DEVICES"]="3"
# -
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
print(tf.__version__)
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
# !pwd
# +
# Set the output directory for saving model file
# Optionally, set a GCP bucket location
OUTPUT_DIR = 'resources/models/bert_sentimental'#@param {type:"string"}
#@markdown Whether or not to clear/delete the directory and create a new one
DO_DELETE = False #@param {type:"boolean"}
#@markdown Set USE_BUCKET and BUCKET if you want to (optionally) store model output on GCP bucket.
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
# Doesn't matter if the directory didn't exist
pass
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# +
from tensorflow import keras
import os
import re
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
# Merge positive and negative examples, add a polarity column and shuffle.
def load_dataset(directory):
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
# Download and process the dataset files.
def download_and_load_datasets(force_download=False):
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True)
train_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "train"))
test_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "test"))
return train_df, test_df
# -
train, test = download_and_load_datasets()
train = train.sample(5000)
test = test.sample(5000)
train.columns
DATA_COLUMN = 'sentence'
LABEL_COLUMN = 'polarity'
# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'
label_list = [0, 1]
# ## Data processing
# +
# Use the InputExample class from BERT's run_classifier code to create examples from the data
train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
# +
# This is a path to an uncased (all lowercase) version of BERT
BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
def create_tokenizer_from_hub_module():
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = hub.Module(BERT_MODEL_HUB)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return bert.tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
tokenizer = create_tokenizer_from_hub_module()
# -
tokenizer.tokenize("This here's an example of using the BERT tokenizer")
# We'll set sequences to be at most 128 tokens long.
MAX_SEQ_LENGTH = 128
# Convert our train and test features to InputFeatures that BERT understands.
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
# Compute train and warmup steps from batch size
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = 32
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 500
SAVE_SUMMARY_STEPS = 100
# Compute # train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
# ## Create model
#
def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,
num_labels):
"""Creates a classification model."""
bert_module = hub.Module(
BERT_MODEL_HUB,
trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
# Create our own layer to tune for politeness data.
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# Dropout helps prevent overfitting
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# Convert labels into one-hot encoding
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))
# If we're predicting, we want predicted labels and the probabiltiies.
if is_predicting:
return (predicted_labels, log_probs)
# If we're train/eval, compute loss between predicted and actual label
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, predicted_labels, log_probs)
# model_fn_builder actually creates our model function
# using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
train_op = bert.optimization.create_optimizer(
loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
f1_score = tf.contrib.metrics.f1_score(
label_ids,
predicted_labels)
auc = tf.metrics.auc(
label_ids,
predicted_labels)
recall = tf.metrics.recall(
label_ids,
predicted_labels)
precision = tf.metrics.precision(
label_ids,
predicted_labels)
true_pos = tf.metrics.true_positives(
label_ids,
predicted_labels)
true_neg = tf.metrics.true_negatives(
label_ids,
predicted_labels)
false_pos = tf.metrics.false_positives(
label_ids,
predicted_labels)
false_neg = tf.metrics.false_negatives(
label_ids,
predicted_labels)
return {
"eval_accuracy": accuracy,
"f1_score": f1_score,
"auc": auc,
"precision": precision,
"recall": recall,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
predictions = {
'probabilities': log_probs,
'labels': predicted_labels
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
# Specify outpit directory and number of checkpoint steps to save
run_config = tf.estimator.RunConfig(
model_dir=OUTPUT_DIR,
save_summary_steps=SAVE_SUMMARY_STEPS,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)
# +
model_fn = model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={"batch_size": BATCH_SIZE})
# -
# Create an input function for training. drop_remainder = True for using TPUs.
train_input_fn = bert.run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=False)
print(f'Beginning Training!')
current_time = datetime.now()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Training took time ", datetime.now() - current_time)
test_input_fn = run_classifier.input_fn_builder(
features=test_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=False)
def getPrediction(in_sentences):
labels = ["Negative", "Positive"]
input_examples = [run_classifier.InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label
input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)
predictions = estimator.predict(predict_input_fn)
return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]
# return predictions
# %%
pred_sentences = [
"That movie was absolutely awful",
"The acting was a bit lacking",
"The film was creative and surprising",
"Absolutely fantastic!"
]
predictions = getPrediction(pred_sentences)
predictions
# %%capture
p=getPrediction(["he doesn't feel good.","he does feel good."])
import numpy as np
import os
import spacy
import pathlib
import zipfile
from collections import OrderedDict
from spacy.lang.en import English
import spacy
from spacy.matcher import Matcher
import random
zip_file_name='45566_3visits'
filter_file='../resources/dictionaries/filter_manual.tsv'
NUM_NOTES=1000
random.seed(777)
# +
# # !python -m spacy download en_core_web_md
# -
nlp = spacy.load('en_core_web_sm')
def kw_matcher(filter_file:str=filter_file):
matcher = Matcher(nlp.vocab)
patterns=[[{'LOWER':w}] for w in pathlib.Path(filter_file).read_text().split('\n') if len(w.strip())>0]
for p in patterns:
matcher.add('patten',None,p)
return matcher
def sample_from_zip_file(filepath, sample_num=NUM_NOTES,keywords_filter_file:str=None):
sentences=[]
vectors=[]
vector_dict={}
zfile = zipfile.ZipFile(filepath)
file_list=zfile.infolist()
sampled_ids=random.sample(range(0,len(file_list)),sample_num)
filtering=False
matcher=None
if keywords_filter_file is not None:
filtering=True
matcher=kw_matcher(keywords_filter_file)
for f_id in sampled_ids:
finfo=file_list[f_id]
ifile = zfile.open(finfo)
doc_text = ifile.read().decode("utf-8")
if nlp(doc_text).sents is None:
print(doc_text)
continue
# print(f_id)
for sent in nlp(doc_text).sents:
sent_txt=str(sent).strip()
if len(sent_txt)<20:
continue
if filtering:
matches = matcher(nlp(sent_txt))
if len(matches)==0:
continue
sentences.append(sent_txt)
ifile.close()
zfile.close()
return sentences
# %%capture
print(getPrediction(["This is good"]))
# %%
sentences=sample_from_zip_file('../data/notes/{}.zip'.format(zip_file_name),NUM_NOTES,filter_file)
p=getPrediction(sentences)
len(p)
p[0]
# ## display
sorted_pred = sorted(p, key=lambda kv: kv[1][1]-kv[1][0])
```
#### File: SmartAnno/utils/KeywordsEmbeddingExtenderSetup.py
```python
from IPython.core.display import display
from ipywidgets import widgets, Label
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.gui.MyWidgets import ToggleButtonsMultiSelectionInBox
from SmartAnno.gui.PreviousNextWidgets import PreviousNext
from SmartAnno.utils.TreeSet import TreeSet
class KeywordsEmbeddingExtenderSetup(PreviousNext):
def __init__(self,
description='<h4>Extend keywords through <b>Word Embedding</b></h4><p>Please select which keywords you want to '
'check the synonyms from the word embedding (currently only single word works for the word embedding model):',
name=None):
self.title = widgets.HTML(value=description)
self.to_we_ext_words = dict()
self.to_we_ext_filters = dict()
self.glove_path_input = None
super().__init__(name)
def backStart(self):
rows = self.showWords(self.workflow.filters)
self.box = widgets.VBox(rows, layout=widgets.Layout(display='flex', flex_grown='column'))
if hasattr(self, 'to_we_ext_filters') and isinstance(self.to_we_ext_filters, dict):
for type_name, toggle in self.to_we_ext_filters.items():
if type_name in self.to_we_ext_words:
toggle.value = list(self.to_we_ext_words[type_name])
display(self.box)
pass
def start(self):
if ConfigReader.getValue("glove/model_path") is None or len(ConfigReader.getValue("glove/model_path")) == 0:
self.workflow.steps[self.pos_id + 2].start()
return
if not hasattr(self.workflow, 'we_extended'):
self.workflow.we_extended = dict()
rows = self.showWords(self.workflow.filters)
self.box = widgets.VBox(rows, layout=widgets.Layout(display='flex', flex_grown='column'))
display(self.box)
pass
def showWords(self, filters):
rows = [self.title]
for type_name in filters.keys():
rows.append(Label(value=type_name + ':'))
# only show single word
selections = ToggleButtonsMultiSelectionInBox(
options=[word for word in filters[type_name].to_list() if ' ' not in word])
self.to_we_ext_filters[type_name] = selections
rows.append(selections)
rows += (self.addSeparator())
rows += [self.addPreviousNext(self.show_previous, self.show_next)]
return rows
def complete(self):
no_word_selected = True
for type_name, toggle in self.to_we_ext_filters.items():
self.to_we_ext_words[type_name] = TreeSet(toggle.value)
if no_word_selected and len(self.to_we_ext_words[type_name]) > 0:
no_word_selected = False
if not no_word_selected:
self.workflow.to_we_ext_words = self.to_we_ext_words
from SmartAnno.models.GloveModel import GloveModel
from time import sleep
if GloveModel.glove_model is None:
print('Please wait for glove model to get ready.', end='', flush=True)
while GloveModel.glove_model is None:
print('.', end='', flush=True)
sleep(1)
self.setNextStep(self.workflow.steps[self.pos_id + 1])
else:
self.setNextStep(self.workflow.steps[self.pos_id + 2])
self.workflow.steps[self.pos_id + 2].setPreviousStep(self)
super().complete()
pass
```
#### File: SmartAnno/utils/KeywordsUMLSExtender.py
```python
from IPython.core.display import display
from ipywidgets import widgets
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.db.ORMs import Filter
from SmartAnno.gui.BranchingWidgets import LoopRepeatSteps, RepeatStep
from SmartAnno.gui.MyWidgets import ToggleButtonsMultiSelectionInBox
from SmartAnno.gui.Workflow import Step, logMsg
from SmartAnno.umls.UMLSFinder import UMLSFinder
def filterExtended(extending, type_name, filters, extended):
original_list = filters[type_name]
filtered_list = set()
for word in extending:
if word in original_list or word in extended:
continue
filtered_list.add(word)
extended.update(extended)
return filtered_list
class KeywordsUMLSExtender(LoopRepeatSteps):
umls = None
description = "<h4>Synonyms of keyword '<b>%s</b>'</h4><p>Choose the ones that you want to include in keyword filters:</p>"
def __init__(self, name=str(Step.global_id + 1), sources=['SNOMEDCT_US'], filter_by_length=0, max_query=50,
filter_by_contains=True):
super().__init__([], name=name)
self.sources = sources
self.filter_by_length = filter_by_length
self.filter_by_contains = filter_by_contains
self.max_query = max_query
self.loadDefaultConfig()
pass
def loadDefaultConfig(self):
if self.sources is None or len(self.sources) == 0:
self.sources = ['SNOMEDCT_US']
ConfigReader.setValue("umls/sources", self.sources)
if self.filter_by_length is None:
self.filter_by_length = 0
ConfigReader.setValue("umls/filter_by_length", 0)
if self.filter_by_contains is None:
self.filter_by_contains = True
ConfigReader.setValue("umls/filter_by_contains", True)
if self.max_query is None:
self.max_query = 50
ConfigReader.setValue("umls/max_query", 50)
ConfigReader.saveConfig()
pass
def start(self):
self.init_real_time()
self.loop_workflow.start()
pass
def complete(self):
# Differentiate the keywords from where added----Not used for now
# self.data = TreeSet()
# for step in self.loop_workflow.steps:
# for word in step.data:
# self.data.add(word)
# self.workflow.umls_extended = self.data
# update the keywords in db
logMsg('update UMLS extended keywords into database')
with self.workflow.dao.create_session() as session:
records = session.query(Filter).filter(Filter.task_id == self.workflow.task_id) \
.filter(Filter.type == 'orig')
for record in records:
type_name = record.type_name
keywords = '\n'.join(self.workflow.filters[type_name]).strip()
record.keyword = keywords
super().complete()
pass
def init_real_time(self):
KeywordsUMLSExtender.umls = UMLSFinder(self.workflow.api_key, self.sources, self.filter_by_length,
self.max_query,
self.filter_by_contains)
word_dict = dict()
self.loop_workflow.steps.clear()
self.loop_workflow.filters = self.workflow.filters
self.loop_workflow.extended = set()
for type_name, words in self.workflow.to_ext_words.items():
for word in words:
word_dict[word] = type_name
self.loop_workflow.to_ext_words = list(word_dict.items())
self.initiateRepeatStep()
pass
def initiateRepeatStep(self):
if len(self.loop_workflow.to_ext_words) > 0:
word, type_name = self.loop_workflow.to_ext_words.pop(0)
extending = []
try:
extending = KeywordsUMLSExtender.umls.search(word)
# self.loop_workflow.extended saved all the extended words that will be displayed, no matter will be
# selected or not, so that the same extended word won't be shown twice asking for selection
extending = filterExtended(extending, type_name, self.workflow.filters, self.loop_workflow.extended)
except KeyError:
logMsg(("not synonym found for word '%s'" % word.lower()))
if len(extending) > 0:
self.appendRepeatStep(
RepeatMultipleSelection(description=KeywordsUMLSExtender.description % word,
options=list(extending), master=self, type_name=type_name))
else:
self.initiateRepeatStep()
else:
self.complete()
class RepeatMultipleSelection(RepeatStep):
def __init__(self, description='', options=[], tooltips=[],
branch_names=['Previous', 'Next', 'Complete'], branch_steps=[None, None, None],
name=None, master=None, type_name='Irrelevant'):
self.master = master
self.type_name = type_name
self.display_description = widgets.HTML(value=description)
self.selections = ToggleButtonsMultiSelectionInBox(
options=options, num_per_row=4
)
super().__init__(branch_names, branch_steps, name)
pass
def start(self):
self.box = self.updateBox()
display(self.box)
if len(self.workflow.to_ext_words) > 0:
self.initNextStepWhileReviewing()
pass
def initNextStepWhileReviewing(self):
if len(self.workflow.to_ext_words) > 0:
word, type_name = self.workflow.to_ext_words.pop(0)
extended = KeywordsUMLSExtender.umls.search(word)
extended = filterExtended(extended, type_name, self.master.workflow.filters, self.workflow.extended)
if len(extended) > 0:
next_step = RepeatMultipleSelection(description=KeywordsUMLSExtender.description % word,
options=list(extended), master=self.master, type_name=type_name)
next_step.setCompleteStep(self.branch_buttons[2].linked_step)
self.workflow.append(next_step)
else:
self.initNextStepWhileReviewing()
pass
def updateBox(self):
rows = [self.display_description] + self.addSeparator(top='5px') + \
[self.selections] + self.addSeparator(top='10px') + self.addConditionsWidget()
vbox = widgets.VBox(rows, layout=widgets.Layout(width='100%', magins='10px'))
return vbox
def navigate(self, b):
# print(b)
self.data = self.selections.value
if self.master is not None and self.data is not None:
logMsg(self.data)
self.master.workflow.filters[self.type_name].addAll(self.data)
super().navigate(b)
pass
def complete(self):
self.data = self.selections.value
if self.master is not None and self.data is not None:
self.master.workflow.filters[self.type_name].addAll(self.data)
self.master.complete()
pass
```
#### File: SmartAnno/utils/KeywordsUMLSExtenderSetup.py
```python
from IPython.core.display import display
from ipywidgets import widgets, Label, Layout
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.gui.MyWidgets import ToggleButtonsMultiSelectionInBox
from SmartAnno.gui.PreviousNextWidgets import PreviousNext
from SmartAnno.utils.TreeSet import TreeSet
class KeywordsUMLSExtenderSetup(PreviousNext):
def __init__(self,
description='<h4>Extend keywords through <b>UMLS</b></h4><p>Please select which keywords you want to '
'check the synonyms from UMLS:', name=None):
self.api_key = ConfigReader.getValue('api_key')
self.title = widgets.HTML(value=description)
self.to_ext_words = dict()
self.to_umls_ext_filters = dict()
self.api_input = None
super().__init__(name)
def start(self):
if self.workflow.api_key is None or len(self.workflow.api_key) < 5:
self.workflow.steps[self.pos_id + 2].start()
return
if not hasattr(self.workflow, 'umls_extended'):
self.workflow.umls_extended = dict()
rows = self.showWords(self.workflow.filters)
self.box = widgets.VBox(rows, layout=widgets.Layout(display='flex', flex_grown='column'))
display(self.box)
pass
def showWords(self, filters):
rows = [self.title]
self.requestUMLSAPIKey(rows)
for type_name in filters.keys():
rows.append(Label(value=type_name + ':'))
selections = ToggleButtonsMultiSelectionInBox(options=filters[type_name].to_list(),
value=list(self.to_ext_words[type_name]) if hasattr(self,
'to_ext_words') and isinstance(
self.to_ext_words,
dict) and type_name in self.to_ext_words else [])
self.to_umls_ext_filters[type_name] = selections
rows.append(selections)
rows += (self.addSeparator())
rows += [self.addPreviousNext(self.show_previous, self.show_next)]
return rows
def complete(self):
no_word_selected = True
for type_name, toggle in self.to_umls_ext_filters.items():
self.to_ext_words[type_name] = TreeSet(toggle.value)
if no_word_selected and len(self.to_ext_words[type_name]) > 0:
no_word_selected = False
if not no_word_selected:
self.workflow.to_ext_words = self.to_ext_words
if self.api_key is None:
self.api_key = self.api_input.value
self.workflow.api_key = self.api_key
ConfigReader.setValue("api_key", self.api_key)
ConfigReader.saveConfig()
else:
self.setNextStep(self.workflow.steps[self.pos_id + 2])
self.workflow.steps[self.pos_id + 2].setPreviousStep(self)
super().complete()
pass
def requestUMLSAPIKey(self, rows):
if self.api_key is not None or self.api_key.strip() != '':
self.workflow.api_key = self.api_key
else:
rows.append(widgets.HTML(value='<h5>Set up your API key</h5><p>In order to use UMLS service, you will need '
'to use a API key. Please type your API key below: (Here is '
'<a href="https://documentation.uts.nlm.nih.gov/rest/authentication.html" '
' target="_blank">how to get a UMLS API key</a>)</p>'))
self.api_input = widgets.Text(
value='',
placeholder='copy and paste your api key here',
description='',
disabled=False,
layout=Layout(width='600')
)
rows.append(self.api_input)
rows += self.addSeparator()
pass
``` |
{
"source": "JianLiu91/eventrelation",
"score": 3
} |
#### File: JianLiu91/eventrelation/dataset.py
```python
import sys
import torch
import pickle
import random
from tqdm import tqdm
def pad_sequence_to_length(sequence,
desired_length,
default_value = lambda: 0,
padding_on_right = True):
"""
Take a list of objects and pads it to the desired length, returning the padded list. The
original list is not modified.
Parameters
----------
sequence : List
A list of objects to be padded.
desired_length : int
Maximum length of each sequence. Longer sequences are truncated to this length, and
shorter ones are padded to it.
default_value: Callable, default=lambda: 0
Callable that outputs a default value (of any type) to use as padding values. This is
a lambda to avoid using the same object when the default value is more complex, like a
list.
padding_on_right : bool, default=True
When we add padding tokens (or truncate the sequence), should we do it on the right or
the left?
Returns
-------
padded_sequence : List
"""
# Truncates the sequence to the desired length.
if padding_on_right:
padded_sequence = sequence[:desired_length]
else:
padded_sequence = sequence[-desired_length:]
# Continues to pad with default_value() until we reach the desired length.
for _ in range(desired_length - len(padded_sequence)):
if padding_on_right:
padded_sequence.append(default_value())
else:
padded_sequence.insert(0, default_value())
return padded_sequence
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
class Dataset(object):
def __init__(self, batch_size, dataset):
super(Dataset, self).__init__()
self.batch_size = batch_size
self.y_label = {
'NULL': 0,
'null': 0,
'FALLING_ACTION': 1,
'PRECONDITION': 1
}
self.construct_index(dataset)
def construct_index(self, dataset):
self.dataset = dataset
self.index_length = len(dataset)
self.shuffle_list = list(range(0, self.index_length))
def shuffle(self):
random.shuffle(self.shuffle_list)
def get_tqdm(self, device, shuffle=True):
return tqdm(self.reader(device, shuffle), mininterval=2, total=self.index_length // self.batch_size, leave=False, file=sys.stdout, ncols=80)
def reader(self, device, shuffle):
cur_idx = 0
while cur_idx < self.index_length:
end_index = min(cur_idx + self.batch_size, self.index_length)
batch = [self.dataset[self.shuffle_list[index]] for index in range(cur_idx, end_index)]
cur_idx = end_index
yield self.batchify(batch, device)
if shuffle:
self.shuffle()
def batchify(self, batch, device):
lens1 = [len(tup[4].split('_')) for tup in batch]
lens2 = [len(tup[8].split('_')) for tup in batch]
word_padded_len = 3
data_x1, data_x2, data_y = list(), list(), list()
for data in batch:
data_x1.append(list(map(int, data[4].split('_'))))
data_x2.append(list(map(int, data[8].split('_'))))
data_y.append(self.y_label[data[-1]])
data_x1 = list(map(lambda x: pad_sequence_to_length(x, word_padded_len), data_x1))
data_x2 = list(map(lambda x: pad_sequence_to_length(x, word_padded_len), data_x2))
mask_x1 = get_mask_from_sequence_lengths(torch.LongTensor(lens1), word_padded_len)
mask_x2 = get_mask_from_sequence_lengths(torch.LongTensor(lens2), word_padded_len)
return [torch.LongTensor(data_x1).to(device), mask_x1,
torch.LongTensor(data_x2).to(device), mask_x2,
torch.LongTensor(data_y).to(device)]
if __name__ == '__main__':
dataset = Dataset(10)
for batch in dataset.reader('cpu', True):
data_x1, mask_x1, data_x2, mask_x2, data_y = batch
print(data_x1.size(0))
```
#### File: JianLiu91/eventrelation/generate_training_data.py
```python
import sys
import os
import os.path
from lxml import etree
import collections
import pickle
def get_feature(s, all_token):
sens = []
psns = []
words = []
for c in s.split('_'):
token = all_token[int(c) - 1]
sens.append(token[1])
psns.append(token[2])
words.append(str(token[3]))
return '_'.join(sens), '_'.join(psns), '_'.join(words)
def all_tokens(filename):
ecbplus = etree.parse(filename, etree.XMLParser(remove_blank_text=True))
root_ecbplus = ecbplus.getroot()
root_ecbplus.getchildren()
all_token = []
for elem in root_ecbplus.findall('token'):
temp = (elem.get('t_id'), elem.get('sentence'),
elem.get('number'), elem.text)
all_token.append(temp)
return all_token
def main(argv=None):
with open('data.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
data = pickle.load(f)
documents = data['data']
word_map = data['word_map']
word_list = data['word_list']
word_vector = data['word_vector']
# data format:
# all_token
# ecb_star_events
# ecb_coref_relations
# ecb_star_time
# ecbstar_events_plotLink
# ecbstar_timelink
# evaluation_data
# evaluationcrof_data
f = open('training_data.txt', 'w')
for key in documents:
[all_token, ecb_star_events, ecb_coref_relations,
ecb_star_time, ecbstar_events_plotLink, ecbstar_timelink,
evaluation_data, evaluationcrof_data] = documents[key]
# for elem in evaluation_data:
# s, t, value = elem
# s_text = transfter_to_token(s, all_token)
# t_text = transfter_to_token(t, all_token)
# temp = [key, s_text, t_text, value]
# print(temp)
for event1 in ecb_star_events:
for event2 in ecb_star_events:
if event1 == event2: # event ID
continue
offset1 = ecb_star_events[event1]
offset2 = ecb_star_events[event2]
# every two pairs
rel = 'NULL'
for elem in evaluation_data:
e1, e2, value = elem
if e1 == offset1 and e2 == offset2:
rel = value
s_sen, s_pos, s_word = get_feature(offset1, all_token)
t_sen, t_pos, t_word = get_feature(offset2, all_token)
print('\t'.join([key, offset1, s_sen, s_pos, s_word,
offset2, t_sen, t_pos, t_word, rel]), file=f)
if __name__ == '__main__':
main()
```
#### File: JianLiu91/eventrelation/read_document_data.py
```python
import sys
import os
import os.path
from lxml import etree
import collections
import pickle
def create_folder(filepath):
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def check_entry_dict(event_tokens, d):
if event_tokens in d:
return " ".join(d[event_tokens])
else:
return event_tokens
def get_sentence(num, all_token):
temp = []
for token in all_token:
if token[1] == num:
temp.append(token[-1])
return temp
def transfter_to_token(s, all_token):
tmp = []
for c in s.split('_'):
token = all_token[int(c) - 1]
tmp.append(token[-1])
return ' '.join(tmp)
def get_sentence_num(s, all_token):
c = s.split('_')[0]
return all_token[int(c)-1][1]
def generate_feature(s, t, value, all_token):
s_text = transfter_to_token(s, all_token)
t_text = transfter_to_token(t, all_token)
s_sentence = get_sentence(get_sentence_num(s, all_token), all_token)
t_sentence = get_sentence(get_sentence_num(t, all_token), all_token)
return s_text, t_text
def all_tokens(filename):
ecbplus = etree.parse(filename, etree.XMLParser(remove_blank_text=True))
root_ecbplus = ecbplus.getroot()
root_ecbplus.getchildren()
all_token = []
for elem in root_ecbplus.findall('token'):
temp = (elem.get('t_id'), elem.get('sentence'),
elem.get('number'), elem.text)
all_token.append(temp)
return all_token
def extract_event_CAT(etreeRoot):
"""
:param etreeRoot: ECB+/ESC XML root
:return: dictionary with annotaed events in ECB+
"""
event_dict = collections.defaultdict(list)
for elem in etreeRoot.findall('Markables/'):
if elem.tag.startswith("ACTION") or elem.tag.startswith("NEG_ACTION"):
for token_id in elem.findall('token_anchor'): # the event should have at least one token
event_mention_id = elem.get('m_id', 'nothing')
token_mention_id = token_id.get('t_id', 'nothing')
event_dict[event_mention_id].append(token_mention_id)
return event_dict
def extract_time_CAT(etreeRoot):
time_dict = collections.defaultdict(list)
for elem in etreeRoot.findall('Markables/'):
if elem.tag.startswith("TIME_DATE"):
for token_id in elem.findall('token_anchor'): # the event should have at least one token
event_mention_id = elem.get('m_id', 'nothing')
token_mention_id = token_id.get('t_id', 'nothing')
time_dict[event_mention_id].append(token_mention_id)
return time_dict
def extract_corefRelations(etreeRoot, d):
"""
:param etreeRoot: ECB+ XML root
:return: dictionary with annotaed events in ECB+ (event_dict)
:return:
"""
relations_dict_appo = collections.defaultdict(list)
relations_dict = {}
for elem in etreeRoot.findall('Relations/'):
target_element = elem.find('target').get('m_id', 'null') # the target is a non-event
for source in elem.findall('source'):
source_elem = source.get('m_id', 'null')
if source_elem in d:
val = "_".join(d[source_elem])
relations_dict_appo[target_element].append(val) # coreferential sets of events
for k, v in relations_dict_appo.items():
for i in v:
relations_dict[i] = v
return relations_dict
def extract_plotLink(etreeRoot, d):
"""
:param etreeRoot: ESC XML root
:param d: dictionary with annotaed events in ESC (event_dict)
:return:
"""
plot_dict = collections.defaultdict(list)
for elem in etreeRoot.findall('Relations/'):
if elem.tag == "PLOT_LINK":
source_pl = elem.find('source').get('m_id', 'null')
target_pl = elem.find('target').get('m_id', 'null')
relvalu = elem.get('relType', 'null')
if source_pl in d:
val1 = "_".join(d[source_pl])
if target_pl in d:
val2 = "_".join(d[target_pl])
plot_dict[(val1, val2)] = relvalu
return plot_dict
def extract_timeLink(etreeRoot, d):
tlink_dict = collections.defaultdict(list)
for elem in etreeRoot.findall('Relations/'):
if elem.tag == "TLINK":
try:
source_pl = elem.find('source').get('m_id', 'null')
target_pl = elem.find('target').get('m_id', 'null')
except:
continue
relvalu = elem.get('relType', 'null')
if source_pl in d:
val1 = "_".join(d[source_pl])
if target_pl in d:
val2 = "_".join(d[target_pl])
tlink_dict[(val1, val2)] = relvalu
return tlink_dict
def read_evaluation_file(fn):
res = []
if not os.path.exists(fn):
return res
for line in open(fn):
fileds = line.strip().split('\t')
res.append(fileds)
return res
def read_file(ecbplus_original, ecbstart_new, evaluate_file, evaluate_coref_file):
"""
:param ecbplus_original: ECB+ CAT data
:param ecbstart_new: ESC CAT data
:param outfile1: event mention extended
:param outfile2: event extended coref chain
:return:
"""
ecbplus = etree.parse(ecbplus_original, etree.XMLParser(remove_blank_text=True))
root_ecbplus = ecbplus.getroot()
root_ecbplus.getchildren()
ecb_event_mentions = extract_event_CAT(root_ecbplus)
ecb_coref_relations = extract_corefRelations(root_ecbplus, ecb_event_mentions)
"""
ecbstar data
"""
ecbstar = etree.parse(ecbstart_new, etree.XMLParser(remove_blank_text=True))
ecbstar_root = ecbstar.getroot()
ecbstar_root.getchildren()
ecb_star_events = extract_event_CAT(ecbstar_root)
ecbstar_events_plotLink = extract_plotLink(ecbstar_root, ecb_star_events)
ecb_star_time = extract_time_CAT(ecbstar_root)
ecb_star_time.update(ecb_star_events)
ecbstar_timelink = extract_timeLink(ecbstar_root, ecb_star_time)
evaluation_data = read_evaluation_file(evaluate_file)
evaluationcrof_data = read_evaluation_file(evaluate_coref_file)
# TLINK ??
# print(ecb_star_events) # all the events
# print(ecb_star_time) # all the time expressions
# print(ecbstar_events_plotLink) # direct event plot link
# print(ecbstar_timelink)
return ecb_star_events, ecb_coref_relations, ecb_star_time, ecbstar_events_plotLink, ecbstar_timelink, evaluation_data, evaluationcrof_data
def make_corpus(ecbtopic, ecbstartopic, evaluationtopic, evaluationcoreftopic, datadict):
"""
:param ecbtopic: ECB+ topic folder in CAT format
:param ecbstartopic: ESC topic folder in CAT format
:param outdir: output folder for evaluation data format
:return:
"""
if os.path.isdir(ecbtopic) and os.path.isdir(ecbstartopic) and os.path.isdir(evaluationtopic):
if ecbtopic[-1] != '/':
ecbtopic += '/'
if ecbstartopic[-1] != '/':
ecbstartopic += '/'
if evaluationtopic[-1] != '/':
evaluationtopic += '/'
if evaluationcoreftopic[-1] != '/':
evaluationcoreftopic += '/'
ecb_subfolder = os.path.dirname(ecbtopic).split("/")[-1]
for f in os.listdir(ecbtopic):
if f.endswith('plus.xml'):
ecb_file = f
star_file = ecbstartopic + f + ".xml"
evaluate_file = evaluationtopic + f
evaluate_coref_file = evaluationcoreftopic + f
ecb_star_events, ecb_coref_relations, ecb_star_time, ecbstar_events_plotLink, ecbstar_timelink, evaluation_data, evaluationcrof_data = read_file(ecbtopic + ecb_file, star_file, evaluate_file, evaluate_coref_file)
for key in ecb_star_events:
ecb_star_events[key] = '_'.join(ecb_star_events[key])
for key in ecb_star_time:
ecb_star_time[key] = '_'.join(ecb_star_time[key])
all_token = all_tokens(star_file)
datadict[star_file] = [all_token, ecb_star_events, ecb_coref_relations, ecb_star_time, ecbstar_events_plotLink, ecbstar_timelink, evaluation_data, evaluationcrof_data]
# for elem in ecbstar_events_plotLink:
# (s, t), value = elem, ecbstar_events_plotLink[elem]
# s_text, t_text = generate_feature(s, t, value, all_token)
# print(s_text, t_text, value)
def main(argv=None):
# topic = '5'
# ECBplusTopic = 'ECB+_LREC2014/ECB+/' + topic
# ECBstarTopic = 'annotated_data/v1.0/' + topic
# EvaluationTopic = 'evaluation_format/full_corpus/v1.0/event_mentions_extended/' + topic
ECBplusTopic = 'ECB+_LREC2014/ECB+/'
ECBstarTopic = 'annotated_data/v1.0/'
EvaluationTopic = 'evaluation_format/full_corpus/v1.0/event_mentions_extended/'
EvaluationCrofTopic = 'evaluation_format/full_corpus/v1.0/coref_chain/'
data_dict = {}
for topic in os.listdir('annotated_data/v1.0/'):
if os.path.isdir('annotated_data/v1.0/' + topic):
dir1, dir2, dir3, dir4 = ECBplusTopic + topic, ECBstarTopic + topic, EvaluationTopic + topic, EvaluationCrofTopic + topic
make_corpus(dir1, dir2, dir3, dir4, data_dict)
for key in data_dict:
print(key)
with open('document_raw.pickle', 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(data_dict, f, pickle.HIGHEST_PROTOCOL)
# data format:
# all_token
# ecb_star_events
# ecb_coref_relations
# ecb_star_time
# ecbstar_events_plotLink
# ecbstar_timelink
# evaluation_data
# evaluationcrof_data
# data = data_dict['annotated_data/v1.0/14/14_2ecbplus.xml.xml']
# print(data[2])
# print(data[4])
# print(data[-2])
# print(data[-1])
if __name__ == '__main__':
main()
``` |
{
"source": "jianlong-yuan/SimpleBaseline",
"score": 3
} |
#### File: data/dataloader/cityscapes.py
```python
import os
import torch
import numpy as np
import logging
from PIL import Image
from .seg_data_base import SegmentationDataset
from segmentron.config import cfg
from ..transform import *
import cv2
from segmentron.data.randaug import Rand_Augment
class CitySegmentation(SegmentationDataset):
BASE_DIR = 'cityscapes'
NUM_CLASS = 19
def __init__(self, root='', split='train', mode=None, transform=None, **kwargs):
super(CitySegmentation, self).__init__(root, split, mode, transform, **kwargs)
root = 'data'
if split == 'train_fine':
_split_f = ["data/train_fine.txt"]
elif split == 'train_extra':
_split_f = ["data/train_extra.txt"]
elif split == 'val_fine':
_split_f = ["data/val_fine.txt"]
elif split == 'trainval_fine':
_split_f = ["data/train_fine.txt",
"data/val_fine.txt"]
elif split == 'train_18label':
_split_f = ["data/train_fine_18label.txt"]
elif split == 'train_18unlabel':
_split_f = ["data/train_fine_18unlabel.txt"]
elif split == 'train_18unlabel_deeplabv3_1t':
_split_f = [
"data/train_fine_18unlabel_pseudo_TTA_1t_deeplabv3.txt",
"data/train_fine_18label.txt"]
elif split == 'train_fine_12label':
_split_f = ["data/train_fine_12label.txt"]
elif split == 'train_fine_12unlabel':
_split_f = ["data/train_fine_12unlabel.txt"]
elif split == 'train_fine_12_1t':
_split_f = ["data/train_fine_12label.txt",
"data/train_fine_12unlabel_noisy_1t.txt"]
elif split == 'train_fine_14label':
_split_f = ["data/train_fine_14label.txt"]
elif split == 'train_fine_14unlabel':
_split_f = ["data/train_fine_14unlabel.txt"]
elif split == 'train_fine_14_1t':
_split_f = ["data/train_fine_14label.txt",
"data/train_fine_14unlabel_noisy_1t.txt"]
elif split == 'train_fine_deeplabv3_1t':
_split_f = ["data/train_extra_noisy_deeplabv3_1t.txt",
"data/train_fine.txt"]
else:
raise RuntimeError('Unknown dataset split.')
self.mode = mode
self.images = []
self.masks = []
self.nirs = []
self.split = split
lines= []
for path in _split_f:
lines += open(path, "r").readlines()
for line in lines:
if split != 'test':
image_path, label_path = line.strip('\n').split(' ')
_image = os.path.join(root, image_path)
assert os.path.isfile(_image), _image
self.images.append(_image)
_mask = os.path.join(root, label_path)
assert os.path.isfile(_mask), _mask
self.masks.append(_mask)
elif split == 'test':
image_path = line.strip('\n')
_image = os.path.join(root, image_path)
assert os.path.isfile(_image), _image
self.images.append(_image)
logging.info('{} data num {}'.format(self.split,len(self.images)))
def __getitem__(self, index):
img = cv2.imread(self.images[index], -1)
if self.mode == 'test':
mask = np.zeros([img.shape[0], img.shape[1]])
else:
mask = np.array(Image.open(self.masks[index]))
img, mask = self.transform(img, mask)
mask = self._set_ignore_lable(mask)
if self.mode == 'test':
return {'image': img, 'label': 'None', 'name': os.path.basename(self.images[index])}
mask = self._mask_transform(mask)
return {'image': img, 'label': mask, 'name': os.path.basename(self.images[index]), 'is_noisy': float('noisy' in self.masks[index])}
def _mask_transform(self, mask):
target = mask
return torch.LongTensor(np.array(target).astype('int32'))
def _set_ignore_lable(self, mask):
mask[mask == 255] = -1
return mask
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 0
@property
def classes(self):
"""Category names."""
return ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic_light',
'traffic_sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',
'truck', 'bus', 'train', 'motorcycle', 'bicycle')
if __name__ == '__main__':
dataset = CitySegmentation()
```
#### File: segmentron/solver/loss.py
```python
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .lovasz_losses import lovasz_softmax
from ..data.dataloader import datasets
from ..config import cfg
__all__ = ['get_segmentation_loss']
class MixSoftmaxCrossEntropyLoss(nn.CrossEntropyLoss):
def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs):
super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_index=ignore_index)
self.aux = aux
self.aux_weight = aux_weight
def _aux_forward(self, *inputs, **kwargs):
*preds, target = tuple(inputs)
loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target)
for i in range(1, len(preds)):
aux_loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target)
loss += self.aux_weight * aux_loss
return loss
def _multiple_forward(self, *inputs):
*preds, target = tuple(inputs)
loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target)
for i in range(1, len(preds)):
loss += super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target)
return loss
def forward(self, *inputs, **kwargs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
if self.aux:
return dict(loss=self._aux_forward(*inputs))
elif len(preds) > 1:
return dict(loss=self._multiple_forward(*inputs))
else:
return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs))
class ICNetLoss(nn.CrossEntropyLoss):
"""Cross Entropy Loss for ICNet"""
def __init__(self, aux_weight=0.4, ignore_index=-1, **kwargs):
super(ICNetLoss, self).__init__(ignore_index=ignore_index)
self.aux_weight = aux_weight
def forward(self, *inputs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
pred, pred_sub4, pred_sub8, pred_sub16, target = tuple(inputs)
# [batch, W, H] -> [batch, 1, W, H]
target = target.unsqueeze(1).float()
target_sub4 = F.interpolate(target, pred_sub4.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long()
target_sub8 = F.interpolate(target, pred_sub8.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long()
target_sub16 = F.interpolate(target, pred_sub16.size()[2:], mode='bilinear', align_corners=True).squeeze(
1).long()
loss1 = super(ICNetLoss, self).forward(pred_sub4, target_sub4)
loss2 = super(ICNetLoss, self).forward(pred_sub8, target_sub8)
loss3 = super(ICNetLoss, self).forward(pred_sub16, target_sub16)
return dict(loss=loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight)
class OhemCrossEntropy2d(nn.Module):
def __init__(self, ignore_index=-1, thresh=0.7, min_kept=100000, use_weight=True, **kwargs):
super(OhemCrossEntropy2d, self).__init__()
self.ignore_index = ignore_index
self.thresh = float(thresh)
self.min_kept = int(min_kept)
if use_weight:
weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754,
1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507])
self.criterion = torch.nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index)
else:
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, pred, target):
n, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_index)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = prob.transpose(0, 1).reshape(c, -1)
if self.min_kept > num_valid:
print("Lables: {}".format(num_valid))
elif num_valid > 0:
# prob = prob.masked_fill_(1 - valid_mask, 1)
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
index = mask_prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
valid_mask = valid_mask * kept_mask
target = target * kept_mask.long()
# target = target.masked_fill_(1 - valid_mask, self.ignore_index)
target = target.masked_fill_(~valid_mask, self.ignore_index)
target = target.view(n, h, w)
return self.criterion(pred, target)
class EncNetLoss(nn.CrossEntropyLoss):
"""2D Cross Entropy Loss with SE Loss"""
def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs):
super(EncNetLoss, self).__init__(weight, None, ignore_index)
self.se_loss = cfg.MODEL.ENCNET.SE_LOSS
self.se_weight = cfg.MODEL.ENCNET.SE_WEIGHT
self.nclass = datasets[cfg.DATASET.NAME].NUM_CLASS
self.aux = aux
self.aux_weight = aux_weight
self.bceloss = nn.BCELoss(weight)
def forward(self, *inputs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
if not self.se_loss and not self.aux:
return super(EncNetLoss, self).forward(*inputs)
elif not self.se_loss:
pred1, pred2, target = tuple(inputs)
loss1 = super(EncNetLoss, self).forward(pred1, target)
loss2 = super(EncNetLoss, self).forward(pred2, target)
return dict(loss=loss1 + self.aux_weight * loss2)
elif not self.aux:
pred, se_pred, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred)
loss1 = super(EncNetLoss, self).forward(pred, target)
loss2 = self.bceloss(torch.sigmoid(se_pred), se_target)
return dict(loss=loss1 + self.se_weight * loss2)
else:
pred1, se_pred, pred2, target = tuple(inputs)
se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1)
loss1 = super(EncNetLoss, self).forward(pred1, target)
loss2 = super(EncNetLoss, self).forward(pred2, target)
loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
return dict(loss=loss1 + self.aux_weight * loss2 + self.se_weight * loss3)
@staticmethod
def _get_batch_label_vector(target, nclass):
# target is a 3D Variable BxHxW, output is 2D BxnClass
batch = target.size(0)
tvect = Variable(torch.zeros(batch, nclass))
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=nclass, min=0,
max=nclass - 1)
vect = hist > 0
tvect[i] = vect
return tvect
class MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d):
def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs):
super(MixSoftmaxCrossEntropyOHEMLoss, self).__init__(ignore_index=ignore_index)
self.aux = aux
self.aux_weight = aux_weight
self.bceloss = nn.BCELoss(weight)
def _aux_forward(self, *inputs, **kwargs):
*preds, target = tuple(inputs)
loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[0], target)
for i in range(1, len(preds)):
aux_loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[i], target)
loss += self.aux_weight * aux_loss
return loss
def forward(self, *inputs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
if self.aux:
return dict(loss=self._aux_forward(*inputs))
else:
return dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(*inputs))
class LovaszSoftmax(nn.Module):
def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs):
super(LovaszSoftmax, self).__init__()
self.aux = aux
self.aux_weight = aux_weight
self.ignore_index = ignore_index
def _aux_forward(self, *inputs, **kwargs):
*preds, target = tuple(inputs)
loss = lovasz_softmax(F.softmax(preds[0], dim=1), target, ignore=self.ignore_index)
for i in range(1, len(preds)):
aux_loss = lovasz_softmax(F.softmax(preds[i], dim=1), target, ignore=self.ignore_index)
loss += self.aux_weight * aux_loss
return loss
def _multiple_forward(self, *inputs):
*preds, target = tuple(inputs)
loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target)
for i in range(1, len(preds)):
loss += super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target)
return loss
def forward(self, *inputs, **kwargs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
if self.aux:
return dict(loss=self._aux_forward(*inputs))
elif len(preds) > 1:
return dict(loss=self._multiple_forward(*inputs))
else:
return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs))
class FocalLoss(nn.Module):
def __init__(self, alpha=0.5, gamma=2, weight=None, aux=True, aux_weight=0.2, ignore_index=-1,
size_average=True):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.weight = weight
self.ignore_index = ignore_index
self.aux = aux
self.aux_weight = aux_weight
self.size_average = size_average
self.ce_fn = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index)
def _aux_forward(self, *inputs, **kwargs):
*preds, target = tuple(inputs)
loss = self._base_forward(preds[0], target)
for i in range(1, len(preds)):
aux_loss = self._base_forward(preds[i], target)
loss += self.aux_weight * aux_loss
return loss
def _base_forward(self, output, target):
if output.dim() > 2:
output = output.contiguous().view(output.size(0), output.size(1), -1)
output = output.transpose(1, 2)
output = output.contiguous().view(-1, output.size(2)).squeeze()
if target.dim() == 4:
target = target.contiguous().view(target.size(0), target.size(1), -1)
target = target.transpose(1, 2)
target = target.contiguous().view(-1, target.size(2)).squeeze()
elif target.dim() == 3:
target = target.view(-1)
else:
target = target.view(-1, 1)
logpt = self.ce_fn(output, target)
pt = torch.exp(-logpt)
loss = ((1 - pt) ** self.gamma) * self.alpha * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
def forward(self, *inputs, **kwargs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
return dict(loss=self._aux_forward(*inputs))
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target, valid_mask):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
valid_mask = valid_mask.contiguous().view(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(predict, target) * valid_mask, dim=1) * 2 + self.smooth
den = torch.sum((predict.pow(self.p) + target.pow(self.p)) * valid_mask, dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class DiceLoss(nn.Module):
"""Dice loss, need one hot encode input"""
def __init__(self, weight=None, aux=True, aux_weight=0.4, ignore_index=-1, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
self.aux = aux
self.aux_weight = aux_weight
def _base_forward(self, predict, target, valid_mask):
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
predict = F.softmax(predict, dim=1)
for i in range(target.shape[-1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[..., i], valid_mask)
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1], \
'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss / target.shape[-1]
def _aux_forward(self, *inputs, **kwargs):
*preds, target = tuple(inputs)
valid_mask = (target != self.ignore_index).long()
target_one_hot = F.one_hot(torch.clamp_min(target, 0))
loss = self._base_forward(preds[0], target_one_hot, valid_mask)
for i in range(1, len(preds)):
aux_loss = self._base_forward(preds[i], target_one_hot, valid_mask)
loss += self.aux_weight * aux_loss
return loss
def forward(self, *inputs):
preds, target = tuple(inputs)
inputs = tuple(list(preds) + [target])
return dict(loss=self._aux_forward(*inputs))
class L2SP(nn.Module):
def __init__(self, model):
super(L2SP, self).__init__()
assert cfg.SOLVER.WEIGHT_DECAY == 0
self.pretrained_weights=torch.load(cfg.SOLVER.REGULAR_MINE.REGULAR_PRETRAINED_PATH, map_location=lambda storage, loc: storage)
self.weight_decay = cfg.SOLVER.REGULAR_MINE.WIGHT_DECAT
keys_all = list([k for k, v in model.named_parameters() if v.requires_grad])
keys = list([k for k, v in model.named_parameters() if v.requires_grad])
tmp_keys = list(model.state_dict().keys())
for k, v in model.state_dict().items():
# if 'encoder' not in k:
# keys.remove(k)
if 'weight' in k:
k1 = k.replace('weight', 'running_mean')
if k1 in tmp_keys and k in keys:
keys.remove(k)
elif 'bias' in k:
if k in keys:
keys.remove(k)
self.l2sp = keys
self.l2 = list(set(keys_all) - set(keys))
self.regular_dict = {}
pretrained_keys = self.pretrained_weights.keys()
for i, key in enumerate(self.l2sp):
if 'conv2d_list.3' in key or 'conv2d_list.2' in key:
logging.info(key)
continue
if key.replace('module.', '') in pretrained_keys:
self.regular_dict[key] = self.pretrained_weights[key.replace('module.', '')]
elif key.replace('encoder.', '').replace('module.', '') in pretrained_keys:
self.regular_dict[key] = self.pretrained_weights[key.replace('encoder.', '').replace('module.', '')]
else:
ValueError('error key {}'.format(key))
for i, key in enumerate(self.l2):
if 'conv2d_list.3' in key or 'conv2d_list.2' in key:
logging.info(key)
continue
self.regular_dict[key] = torch.tensor(0.0)
logging.info('warning using l2sp l2: {} l2sp: {} reg all {} keys all {}'.format(len(self.l2), len(self.l2sp), len(self.regular_dict.keys()), len(keys_all)))
def forward(self, model):
keys = self.regular_dict.keys()
tmp_keys = {}
for k, v in model.named_parameters():
if v.requires_grad:
tmp_keys[k] = v
outputs = [self.func((k, tmp_keys[k])) for k in keys]
return sum(outputs) * self.weight_decay
def func(self, inputs):
k, w = inputs
w0 = self.regular_dict[k]
out= torch.pow(w - w0.to(w.device), 2).sum()
return out
class DynamicCEAndSCELoss(torch.nn.Module):
def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1):
super(DynamicCEAndSCELoss, self).__init__()
self.alpha = cfg.SOLVER.CEAndSCELoss.ALPHA
self.beta = cfg.SOLVER.CEAndSCELoss.BATE
self.using_weight = cfg.SOLVER.CEAndSCELoss.USING_WEIGHT
self.cross_entropy = nn.CrossEntropyLoss(reduction='none')
self.ignore_index = ignore_index
self.nclass = datasets[cfg.DATASET.NAME].NUM_CLASS
self.cross_entropy_clean = nn.CrossEntropyLoss(reduction='mean', ignore_index=ignore_index)
def _forward(self, pred, labels):
if self.using_weight:
weights = torch.max(torch.softmax(pred, dim=1), dim=1, keepdim=True).values
weights[weights>0.8] = 1.0
weights = torch.clamp(weights, min=1e-10, max=1.0).detach()
else:
weights = 0.5
# CCE
not_ignore_mask = labels.ne(self.ignore_index).float()
# print(pred.shape, labels.shape)
ce = self.cross_entropy(pred, labels)
ce = torch.mean(ce * not_ignore_mask * (1-weights))
# RCE
pred = F.softmax(pred, dim=1)
label_one_hot = F.one_hot(torch.clamp(labels, min=0, max=self.nclass - 1), self.nclass).float()
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
label_one_hot = label_one_hot.permute([0, 3, 1, 2]).contiguous()
rce = -1 * pred * torch.log(label_one_hot)
rce = torch.sum(rce, dim=1)
rce = torch.mean(rce * not_ignore_mask * weights)
# logging.info((self.alpha * ce, self.beta * rce))
# Loss
loss = self.alpha * ce + self.beta * rce
return loss
def _tuple_inputs(self, preds, target):
return sum([self._forward(preds[i], target) for i in range(len(preds))])
def _tuple_inputs_clean(self, preds, target):
return sum([self.cross_entropy_clean(preds[i], target) for i in range(len(preds))])
def forward(self, *inputs, **kwargs):
p, t, noisy_inds = inputs
p = p[0]
ps_noisy = []
ps_clean = []
t_noisy = []
t_clean = []
for nid, is_noisy in enumerate(noisy_inds):
if is_noisy:
ps_noisy.append(p[nid, :, :, :])
t_noisy.append(t[nid, :, :])
else:
ps_clean.append(p[nid, :, :, :])
t_clean.append(t[nid, :, :])
loss_noisy, loss_clean = 0.0, 0.0
if len(ps_noisy):
preds_noisy = torch.stack(ps_noisy, dim=0)
target_noisy = torch.stack(t_noisy, dim=0)
loss_noisy = self._tuple_inputs(preds=tuple([preds_noisy]), target=target_noisy)
if len(ps_clean):
preds_clean = torch.stack(ps_clean, dim=0)
target_clean = torch.stack(t_clean, dim=0)
loss_clean = self._tuple_inputs_clean(preds=tuple([preds_clean]), target=target_clean)
loss = loss_noisy + loss_clean
# logging.info('loss noisy {:.4f} loss clean {:.4f}'.format(loss_noisy, loss_clean))
# preds, target = tuple(inputs)
return dict(loss=loss)
def get_segmentation_loss(model, use_ohem=False, **kwargs):
if use_ohem:
return MixSoftmaxCrossEntropyOHEMLoss(**kwargs)
elif cfg.SOLVER.LOSS_NAME == 'lovasz':
logging.info('Use lovasz loss!')
return LovaszSoftmax(**kwargs)
elif cfg.SOLVER.LOSS_NAME == 'focal':
logging.info('Use focal loss!')
return FocalLoss(**kwargs)
elif cfg.SOLVER.LOSS_NAME == 'dice':
logging.info('Use dice loss!')
return DiceLoss(**kwargs)
elif cfg.SOLVER.LOSS_NAME == 'DynamicCEAndSCELoss':
logging.info('Use DynamicCEAndSCELoss')
return DynamicCEAndSCELoss(**kwargs)
model = model.lower()
if model == 'icnet':
return ICNetLoss(**kwargs)
elif model == 'encnet':
return EncNetLoss(**kwargs)
else:
return MixSoftmaxCrossEntropyLoss(**kwargs)
``` |
{
"source": "jianlongzhou/openshift-tools",
"score": 2
} |
#### File: scripts/monitoring/cron-send-daemonset-status.py
```python
import argparse
import time
import sys
import logging
sys.path.insert(0, '/container_setup')
from zabbix_data_sync import *
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='ds status check ')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
parser.add_argument('--namespace', default="", help='service namespace')
parser.add_argument('--ds', default="", help='daemonset namespace')
parser.add_argument('-ma', '--master_included', action='store_true', default=None, help='if master included')
parser.add_argument('-key', default="key", help='zabbix key')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_ds_status(args=None, ):
""" check if the ds have the right number """
ocutil.namespace = args.namespace
logger.info('Namespace: %s', args.namespace)
cluster_desired_dspod_count = cluster_desired_infra_size + cluster_desired_compute_size
if args.master_included:
logger.info('master included to this ds')
cluster_desired_dspod_count = cluster_desired_dspod_count + cluster_desired_master_size
pods = runOCcmd_yaml("get pod ")
running_pod_count = 0
logger.info('trying to finding ds pod : %s', args.ds)
for pod in pods['items']:
if (not pod['metadata']['name'].find(args.ds) == -1) and (pod['status']['phase'] == 'Running'):
running_pod_count = running_pod_count + 1
logger.info('found match ds pod : %s', pod['metadata']['name'])
logger.info('Healthy dspod count is : %s', running_pod_count)
logger.info('the design number is: %s', cluster_desired_dspod_count )
if running_pod_count == cluster_desired_dspod_count:
#running ds pod number is the same as requred
return 1
else:
return 0
def main():
""" ds pod check """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
result = check_ds_status(args=args, )
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({args.key: result})
mts.send_metrics()
if __name__ == "__main__":
main()
```
#### File: scripts/monitoring/cron-send-elb-health.py
```python
from ConfigParser import SafeConfigParser
import argparse
import re
import urllib2
import boto3
from openshift_tools.monitoring.metric_sender import MetricSender
# number instances behind elb
elb_no_instances = []
# number of unhealthy instances
elb_instances_unhealthy = []
# Comparison for instance state
instance_healthy = "InService"
# Monitoring should only report for ELBs created by service of type LoadBalancer in namespaces which are defined in this regex.
watched_ns_regex = '(^kube-.*|^openshift-.*)'
def parse_args():
''' parse the args from the cli '''
parser = argparse.ArgumentParser(description='ELB status checker')
parser.add_argument('--clusterid', default="", help='clusterid', required=True)
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
return parser.parse_args()
def get_aws_creds(creds_file):
''' Get AWS authentication details from .aws/credentials file '''
parser = SafeConfigParser()
parser.read(creds_file)
aws_access_key = parser.get('ops_monitoring', 'aws_access_key_id')
aws_secret_key = parser.get('ops_monitoring', 'aws_secret_access_key')
return [aws_access_key, aws_secret_key]
def get_instance_id():
''' Get this instance AWS ID '''
instance_id = urllib2.urlopen('http://instance-data/latest/meta-data/instance-id').read()
return instance_id
def get_instance_region():
''' Get instances region '''
instance_zone = urllib2.urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone').read()
instance_region = instance_zone[:-1]
return instance_region
def get_elb_name(lb):
''' Get ELB name '''
return lb['LoadBalancerName']
def filter_by_cluster(elb_tags, cluster_id):
''' Find all ELBs for a specific cluster '''
cluster_elbs = []
for elb_tag_description in elb_tags['TagDescriptions']:
for elb_tag in elb_tag_description['Tags']:
if elb_tag['Key'] == 'kubernetes.io/cluster/' + cluster_id:
cluster_elbs.append(elb_tag_description)
return cluster_elbs
def filter_monitored_service_elbs(elb_tag_descriptions):
''' Filter elbs created by service of type LoadBalancer not in watched namespaces '''
elbs = []
for elb_tag_description in elb_tag_descriptions:
ignore_elb = False
for elb_tag in elb_tag_description['Tags']:
# ELBs created by service of type LoadBalancer have a tag where the value is <namespace/service-name>
# If an elb is created by service of type LoadBalancer but not in a watched namespace, ignore
if elb_tag['Key'] == 'kubernetes.io/service-name' and not re.match(watched_ns_regex, elb_tag['Value']):
ignore_elb = True
break
if not ignore_elb:
elbs.append(elb_tag_description)
return elbs
def elb_instance_count(elb_instances, elb_name):
''' Get count of instances behind ELB '''
if len(elb_instances) == 0:
elb_no_instances.append(elb_name)
def elb_instance_health(instance_state, instance_name, elb_name):
''' Check health of each instance '''
if instance_state != instance_healthy:
unhealthy_detected = {
"elb": elb_name,
"instance": instance_name,
"state": instance_state,
}
elb_instances_unhealthy.append(unhealthy_detected)
def elb_health_check(client, elbs_discovered):
''' Check health of each node found behind each ELB '''
# Iterate through raw health checks of each instance behind each ELB
for i, item in enumerate(elbs_discovered):
elb_health_checks_raw = client.describe_instance_health(
LoadBalancerName=item
)
# Get https response
elb_response_http = elb_health_checks_raw['ResponseMetadata']['HTTPStatusCode']
# Get instance health/state
elb_instance_response_states = elb_health_checks_raw['InstanceStates']
# Check count of instances behind each ELB. Alert on 0 count.
elb_instance_count(elb_instance_response_states, elbs_discovered[i])
elb_name = elbs_discovered[i]
# Iterate through each instances health/state behind the ELB
for elb_instance_response_state in elb_instance_response_states:
elb_instance_name = elb_instance_response_state['InstanceId']
elb_instance_state = elb_instance_response_state['State']
# Check http response
if elb_response_http != 200:
print "A potential error occurred. HTTP Response: %s" % elb_response_http
elb_instance_health(elb_instance_state, elb_instance_name, elb_name)
def main():
''' Gather and examine details about this node within ELBs '''
args = parse_args()
aws_access, aws_secret = get_aws_creds('/root/.aws/credentials')
instance_region = get_instance_region()
# Create boto client to access ELB resources
client = boto3.client(
'elb',
aws_access_key_id=aws_access,
aws_secret_access_key=aws_secret,
region_name=instance_region
)
# Call all available loadbalancers in the AWS account and store blob result in elb_descriptions
elb_descriptions = client.describe_load_balancers()
elb_names = map(get_elb_name, elb_descriptions['LoadBalancerDescriptions'])
# Get a list of available ELBs for a cluster
elb_tags = client.describe_tags(LoadBalancerNames=elb_names)
cluster_elbs = filter_by_cluster(elb_tags, args.clusterid)
# Filter any ELBs created by service of type LoadBalancer that is not in our watched namespaces
monitored_elbs = filter_monitored_service_elbs(cluster_elbs)
monitored_elb_names = map(get_elb_name, monitored_elbs)
# Perform health check of each instance available behind each ELB
elb_health_check(client, monitored_elb_names)
### Metric Checks
if len(elb_no_instances) != 0:
for _, elb in enumerate(elb_no_instances):
elb_instances_unhealthy.append(elb)
print "ELB: %s has no instances behind it. Please investigate." % elb
### Unhealthy count check
elb_instances_unhealthy_metric = len(elb_instances_unhealthy)
if elb_instances_unhealthy_metric != 0:
for _, unhealthy in enumerate(elb_instances_unhealthy):
print unhealthy
# ''' Now that we know if this instance is missing, feed zabbix '''
mts = MetricSender(verbose=args.verbose, debug=args.debug)
mts.add_metric({'openshift.aws.elb.health' : elb_instances_unhealthy_metric})
mts.send_metrics()
if __name__ == '__main__':
main()
``` |
{
"source": "jianming93/incremental_learner",
"score": 2
} |
#### File: incremental_learner/apps/add_class.py
```python
import os
import shutil
import io
import base64
import pickle
from zipfile import ZipFile
from dash_bootstrap_components._components.Col import Col
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import dash_table
import tqdm
import numpy as np
from server import app, shell_family, config
from src.utils import ImageGenerator
from sql_app.crud import get_all_shells_by_shell_family_id, bulk_create_images, bulk_create_shell_images_for_one_shell_family, create_shell_for_shell_family
from sql_app.database import SessionLocal, engine
help_modal = dbc.Modal(
[
dbc.ModalHeader("How to add class", className="bg-dark text-light"),
dbc.ModalBody("Upload a zip file containing all the images of the class desired to be part of the model's dataset. "
"Verify the correct number of images below before clicking the button at the bottom."),
dbc.ModalFooter(
[
dbc.Button("Close", id="add-class-help-modal-close-button", className="ml-auto", color='primary')
]
)
],
id="add-class-help-modal",
size="lg"
)
layout = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
html.H1("Add New Class", className="d-flex justify-content-start"),
]
),
],
className="pt-2 pb-2"
),
dbc.Row(
[
dbc.Col(
[
html.H5("Upload zip file of new classes images",
className="d-inline justify-content-start"),
html.A(
[
html.Img(id="add-class-help-img", src='assets/question-mark-inside-a-circle-svgrepo-com.svg')
],
id="add-class-help-button",
className="d-inline"
)
]
)
],
className="pt-2 pb-2"
),
dbc.Row(
[
dcc.Upload(
id="add-class-upload-image-area",
children=
[
html.Div(
[
html.A("Drag and Drop or Select Zip File")
]
)
],
)
],
className="pt-2 pb-2"
),
dcc.Loading(
dbc.Alert("Successfully Uploaded",
id='add-class-success-alert',
color="success",
dismissable=True,
duration=5000,
is_open=False),
fullscreen=True,
type='default',
parent_className ="loader"
),
dbc.Alert("Upload has failed! Please check inputs!",
id='add-class-fail-alert',
color="danger",
dismissable=True,
duration=5000,
is_open=False),
dbc.Row(
[
dbc.Col(
[
html.H5('Uploaded zip file summary')
]
),
],
className="pt-2 pb-2"
),
dbc.Row(
[
dbc.Col(
[
dcc.Loading(
dash_table.DataTable(
id='add-class-table',
columns=[{"name": "Class Name", "id": "class-name-column"},
{"name": "Number of images", "id": "number-of-images-column"},
{"name": "Existing Class", "id": "existing-class-column"},],
style_header={
'backgroundColor': "#343a40",
'color': "#f8f9fa",
}
),
id='add-class-table-loader',
type='default',
parent_className ="loader"
)
]
),
],
className="pt-2 pb-2"
),
dbc.Row(
[
dbc.Col(
[
dbc.Button("Add New Classes", id="add-class-button", color="primary", className="mr-1", n_clicks=0),
],
className="p-0"
)
],
className="pt-2 pb-2"
),
html.Br(),
help_modal,
],
id="add-class-main-container",
className="pt-3 pb-3",
)
@app.callback(
Output('add-class-table', 'data'),
[
Input('add-class-upload-image-area', 'contents')
],
[
State('add-class-upload-image-area', 'filename'),
State('add-class-upload-image-area', 'last_modified')
]
)
def generate_uploaded_zip_file_summary(content, name, date):
if content is not None and name.endswith('.zip'):
# the content needs to be split. It contains the type and the real content
content_type, content_string = content.split(',')
# Decode the base64 string
content_decoded = base64.b64decode(content_string)
# Use BytesIO to handle the decoded content
zip_str = io.BytesIO(content_decoded)
# Now you can use ZipFile to take the BytesIO output
zip_obj = ZipFile(zip_str, 'r')
classes_folder_list = [x for x in zip_obj.namelist() if x.endswith('/')][1:]
# Populate table
current_classes_in_model = os.listdir(config['dash_environment']['save_image_directory'])
file_counter = []
for folder in classes_folder_list:
file_counter.append({"class-name-column": folder.split('/')[1],
"number-of-images-column": len([x for x in zip_obj.namelist() if x.startswith(folder)]),
"existing-class-column": folder in current_classes_in_model})
zip_obj.close()
return file_counter
@app.callback(
[
Output('add-class-success-alert', 'is_open'),
Output('add-class-fail-alert', 'is_open'),
],
[
Input('add-class-button', 'n_clicks')
],
[
State('add-class-upload-image-area', 'contents'),
State('add-class-upload-image-area', 'filename'),
State('add-class-upload-image-area', 'last_modified')
]
)
def add_class_to_model(n_clicks, content, name, date):
if n_clicks > 0:
try:
# the content needs to be split. It contains the type and the real content
content_type, content_string = content.split(',')
# Decode the base64 string
content_decoded = base64.b64decode(content_string)
# Use BytesIO to handle the decoded content
zip_str = io.BytesIO(content_decoded)
# Now you can use ZipFile to take the BytesIO output
zip_obj = ZipFile(zip_str, 'r')
# Extract files
classes_folder_list = [x for x in zip_obj.namelist() if x.endswith('/')][1:]
current_classes_in_model = os.listdir(config['dash_environment']['save_image_directory'])
# Need class_name_array, class_index_array and image_path_array
# class_name_array stores the unique classes present
# class_index_array stores the class index for each image. Index goes by the class folder list
# image_path_array stores the image path after copying to the backend which is determined
# by config['dash_environment']['save_image_directory']
unique_class_name_array = []
class_index_array = []
class_name_array = []
image_path_array = []
for i in range(len(classes_folder_list)):
folder = classes_folder_list[i]
class_name = folder.split('/')[1]
class_name = class_name.lower()
if class_name not in current_classes_in_model:
os.mkdir(os.path.join(config['dash_environment']['save_image_directory'], class_name))
if class_name not in unique_class_name_array:
unique_class_name_array.append(class_name)
save_path = os.path.join(config['dash_environment']['save_image_directory'], class_name)
all_class_image_paths = [x for x in zip_obj.namelist() if x.startswith(folder)][1:]
for class_image_path in all_class_image_paths:
class_filename = os.path.basename(class_image_path)
with open(os.path.join(save_path, class_filename), 'wb') as save_image_file:
save_image_file.write(zip_obj.read(class_image_path))
image_path_array.append(os.path.join(save_path, class_filename))
class_index_array.append(i)
class_name_array.append(class_name)
# all_dataset_image_features = np.array([])
# all_image_paths = []
# all_classes = []
# Use image_path_array and class_index_array for image generator
# Create image generator
# image_generator = ImageGenerator(image_path_array, class_index_array, config['model']['batch_size'], (config['model']['target_size'], config['model']['target_size']))
# for (batch_images, batch_filepaths, batch_classes) in tqdm.tqdm(image_generator, total=int(np.ceil(len(image_generator) / config['model']['batch_size']))):
# features = shell_family.preprocessor.predict(batch_images)
# if all_dataset_image_features.shape[0] == 0:
# all_dataset_image_features = np.array(features)
# else:
# all_dataset_image_features = np.concatenate(
# [
# all_dataset_image_features,
# features
# ],
# axis=0,
# )
# all_image_paths += list(batch_filepaths)
# all_classes += [unique_class_name_array[i] for i in batch_classes]
# shell_family.fit(image_generator, unique_class_name_array, config['model']['model_path'])
# Save to database
app.logger.info('Adding new classes/existing classes images and metadata to database..')
db = SessionLocal()
# Add images to database
create_images_result = bulk_create_images(db, class_name_array, image_path_array)
# Check for current shells existing in shell family
all_shells_for_shell_family_id_result = get_all_shells_by_shell_family_id(db, shell_family.shell_family_id)
# Remove overlaps if present
if len(all_shells_for_shell_family_id_result) == 0:
class_to_add = unique_class_name_array
else:
database_shell_ids = [all_shells_for_shell_family_id_result[i].shell_id for i in range(len(all_shells_for_shell_family_id_result))]
# Find difference to add to database
class_to_add = list(set(unique_class_name_array).difference(database_shell_ids))
app.logger.info('Found following new shells to add to database: {}'.format(class_to_add))
# Add shell ids to database
for new_class in class_to_add:
create_shell_for_shell_family(db, config['model']['shell_family_id'], new_class, None, None, None, None)
# Assign images to shell fmaily and shell id
assign_to_shell_family_and_shells_result = bulk_create_shell_images_for_one_shell_family(db, shell_family.shell_family_id, class_name_array, image_path_array, [None for i in range(len(image_path_array))])
db.close()
app.logger.info('Successfully added to database!')
# Close zip file when done
zip_obj.close()
app.logger.info('Successfully added/updated new images with the following classes: {}'.format(unique_class_name_array))
return True, False
except Exception as e:
app.logger.info(e)
app.logger.info('Error in adding new classes!')
return False, True
else:
return False, False
@app.callback(
Output("add-class-help-modal", "is_open"),
[Input("add-class-help-button", "n_clicks"), Input("add-class-help-modal-close-button", "n_clicks")],
[State("add-class-help-modal", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
```
#### File: incremental_learner/src/shell.py
```python
import pickle
import timeit
from collections import OrderedDict
import os
import sys
sys.path.append(os.path.dirname(__file__))
import numpy as np
from tqdm import tqdm
from sklearn.svm import OneClassSVM
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input as vgg16_preprocess_input
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input as resnet50_preprocess_input
from tensorflow.keras.applications import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input as mobilenet_preprocess_input
from tensorflow.keras.layers import GlobalAvgPool2D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.preprocessing import image
from utils import normalize
from utils import sorted_neighbors_of_i
# from utils import evaluate
from utils import fit_to_list
ACCEPTED_PREPROCESSORS = ("vgg16", "resnet50", "mobilenet")
PREPROCESSORS_PREPROCESS_FUNCTIONS = {'vgg16': vgg16_preprocess_input,
'resnet50': resnet50_preprocess_input,
'mobilenet': mobilenet_preprocess_input}
# assumes data has been pre-normalized
class ShellModel:
"""Creates a shell for one class mean.
"""
def __init__(self):
self.parameters = None
self.raw_features = None
self.shell_mean = None
self.num_instances = None
self.noise_mean = None
self.noise_std = None
self.mean_norm = 0
def fit(self, global_mean):
"""Generate the shell parameters based on the global mean the shell family currently sees
"""
self.__generate_one_class_mean(global_mean)
def __generate_one_class_mean(self, global_mean):
"""Generate the one class mean which is the 'center' of the shell along with its 'diameter'
"""
normalized_features, _ = normalize(self.raw_features, global_mean)
normalized_mean = np.mean(normalized_features, axis=0, keepdims=True)
# normalized_mean = np.mean(self.raw_features, axis=0, keepdims=True)
# noise = self.raw_features - normalized_mean
noise = normalized_features - normalized_mean
noise = np.linalg.norm(noise, axis=1)
self.shell_mean = normalized_mean
self.num_instances = normalized_features.shape[0]
self.noise_mean = np.median(noise)
self.noise_std = np.median(np.absolute(noise - np.mean(noise)))
def score(self, feat, global_mean, with_norm=True):
"""Perform a distance score based on how far a feature is from the shell
"""
# smaller scores are better, muliply - to reverse that
score = self.__generate_one_class_mean_score(feat, global_mean, with_norm=with_norm)
return -score
def __generate_one_class_mean_score(self, feat, global_mean, with_norm=True):
"""Perform a distance score based on how far a feature is from the shell.
"""
if with_norm:
feat_, _ = normalize(feat, global_mean)
else:
feat_ = feat.copy()
feat_ = feat_ - self.shell_mean
feat_ = np.linalg.norm(feat_, axis=1)
shell_score = (feat_ - self.noise_mean) / self.noise_std
return shell_score
def update(self, feat, global_mean):
"""Perform an update to shell parameter. To be used for 1 data point of feature to
update the model.
"""
self.raw_features = np.concatenate([self.raw_features,
feat])
normalized_features, _ = normalize(self.raw_features, global_mean)
self.shell_mean= np.mean(normalized_features,
axis=0,
keepdims=True)
self.num_instances = normalized_features.shape[0]
noise = normalized_features - self.shell_mean
noise = np.linalg.norm(noise , axis=1)
self.noise_mean = np.median(noise)
self.noise_std = np.median(np.absolute(noise - np.mean(noise)))
class ShellFamily():
def __init__(self):
self.classifiers = OrderedDict()
self.feature_extractor_model = None
self.preprocessor = None
self.shell_file = None
self.global_mean = None
self.weighted_mean = None
self.instances = 0
self.mapping = []
def create_preprocessor(self, feature_extractor_model):
if feature_extractor_model in ACCEPTED_PREPROCESSORS:
model = Sequential()
if feature_extractor_model == 'vgg':
vgg = VGG16(weights='imagenet', include_top=False)
model.add(vgg)
elif feature_extractor_model == 'resnet50':
resnet = ResNet50(weights='imagenet', include_top=False)
model.add(resnet)
model.add(GlobalAvgPool2D())
elif feature_extractor_model == 'mobilenet':
mobilenet = MobileNet(weights='imagenet', include_top=False)
model.add(mobilenet)
model.add(GlobalAvgPool2D())
self.preprocessor = model
self.feature_extractor_model = feature_extractor_model
self.preprocessor_preprocess_function = PREPROCESSORS_PREPROCESS_FUNCTIONS[self.feature_extractor_model]
else:
raise ValueError("Preprocessor model not found! Please enter the following models: {}".format(ACCEPTED_PREPROCESSORS))
def load(self, shell_file):
with open(shell_file, "rb") as saved_data:
shell_family_configuration = pickle.load(saved_data)
for class_name in shell_family_configuration['classifiers']:
self.classifiers[class_name] = shell_family_configuration['classifiers'][class_name]
self.feature_extractor_model = shell_family_configuration['feature_extractor_model']
self.mapping = shell_family_configuration['mapping']
self.global_mean = shell_family_configuration['global_mean']
self.weighted_mean = shell_family_configuration['weighted_mean']
self.instances = shell_family_configuration['instances']
self.shell_file = shell_file
self.create_preprocessor(self.feature_extractor_model)
def fit(self, data_generator, raw_mapping, output_datafile):
"""To be used when creating an entire new family of shells
"""
# Generate empty shell if needed
for class_index in range(len(raw_mapping)):
if raw_mapping[class_index] not in self.classifiers:
self.classifiers[raw_mapping[class_index]] = ShellModel()
self.mapping.append(raw_mapping[class_index])
# Extract features and prepare for shell creation
for data in data_generator:
images = data[0]
classes = data[2]
unique_classes = np.unique(classes)
for class_index in unique_classes:
# Generate class features
indexes = np.where(classes == class_index)
target_images = images[indexes]
class_features = self.preprocessor.predict(target_images)
# Update shell family params
if self.global_mean is None:
self.global_mean = np.mean(class_features,
axis=0,
keepdims=True)
else:
### TODO: Use weighted mean to calculate the new mean ###
self.global_mean = np.mean(
np.concatenate(
[
np.repeat(
self.global_mean,
self.instances,
axis=0
),
class_features
]
),
axis=0,
keepdims=True
)
self.instances += class_features.shape[0]
class_name = raw_mapping[class_index]
# Append raw features to classifiers
if self.classifiers[class_name].raw_features is None:
self.classifiers[class_name].raw_features = class_features
else:
self.classifiers[class_name].raw_features =\
np.concatenate([self.classifiers[class_name].raw_features,
class_features])
# Create shells from features
self.update_shells(self.global_mean)
self.save(output_datafile)
self.shell_file = output_datafile
def update_shells(self, global_mean):
for shell_name in self.classifiers:
self.classifiers[shell_name].fit(global_mean)
def score(self, feat, threshold, with_update=True, return_full_results=True):
results = OrderedDict()
best_class_name = None
best_class_index = None
best_result = -9999999
for class_name, shell in self.classifiers.items():
results[class_name] = shell.score(feat, self.global_mean)
if results[class_name] > best_result:
best_class_name = class_name
best_result = results[class_name]
best_class_index = self.mapping.index(class_name)
if with_update:
self.global_mean = (self.global_mean * self.instances + feat) / (self.instances + 1)
self.instances += 1
self.classifiers[best_class_name].update(feat, self.global_mean)
if return_full_results:
return best_class_index, best_class_name, best_result, results
else:
return best_class_index, best_class_name, best_result
# def scoreV2(self, feat, threshold, with_norm=True, with_update=True, add_new_class=True):
# results = OrderedDict()
# best_class_name = None
# best_class_index = None
# best_result = -9999999
# for class_name, shell in self.classifiers.items():
# results[class_name] = shell.score(feat, self.global_mean)
# if results[class_name] > best_result:
# best_class_name = class_name
# best_result = results[class_name]
# best_class_index = self.mapping.index(class_name)
# if best_result >= threshold:
# if with_update:
# self.global_mean = (self.global_mean * self.instances + feat) / (self.instances + 1)
# self.instances += 1
# self.classifiers[best_class_name].update(feat, self.global_mean)
# else:
# if add_new_class:
# self.create_new_class(feat)
# return best_class_index, best_class_name, best_result
def save(self, output_filename):
save_data = {'classifiers': self.classifiers,
'feature_extractor_model': self.feature_extractor_model,
'mapping': self.mapping,
'global_mean': self.global_mean,
'weighted_mean': self.weighted_mean,
'instances': self.instances}
with open(output_filename, "wb") as data_file:
pickle.dump(save_data, data_file)
# def create_new_class(self, feat, new_class_name):
# """To be used when a family of shell is already present
# """
# shell = ShellModel()
# shell.fit(feat)
# self.mapping.append(new_class_name)
# self.classifiers[new_class_name] = shell
# with open(self.mapping_file, "w") as data_file:
# for class_name in self.mapping:
# data_file.write("%s\n" % class_name)
# with open(self.shell_file, "wb") as data_file:
# pickle.dump(self.classifiers, data_file)
def delete_class(self, class_to_delete):
"""To be used when a shell needs to be deleted
"""
all_features_total_value = self.global_mean * self.instances
class_to_delete_raw_features_sum = np.sum(self.classifiers[class_to_delete].raw_features, axis=0)
class_to_delete_raw_features_sum = np.expand_dims(class_to_delete_raw_features_sum, 0)
self.global_mean = (all_features_total_value - class_to_delete_raw_features_sum) / (self.instances - self.classifiers[class_to_delete].num_instances)
self.instances -= self.classifiers[class_to_delete].num_instances
del self.classifiers[class_to_delete]
# Re update all shell configurations
self.update_shells(self.global_mean)
# Save new configuration
self.save(self.shell_file)
def normIt(data, m=None):
nData = data.copy()
#nData = data/np.linalg.norm(data, axis =1, keepdims=True)
if m is None:
m = np.mean(nData, axis =0, keepdims=True)
nData = nData - m
nData = nData/np.linalg.norm(nData, axis =1, keepdims=True)
return nData, m
# def ocMean(feat):
# m_ = np.mean(feat, axis=0, keepdims=True)
# d = feat - m_
# d = np.linalg.norm(d, axis=1)
# model ={'clusMean': m_,
# 'numInstance': feat.shape[0],
# 'noiseMean': np.median(d),
# 'noiseStd':np.median(np.absolute(d-np.mean(d))),
# 'mean_norm': 0}
# return model
# def ocMeanScore(feat, model, withNorm=True):
# if withNorm:
# feat_, _ = normalize(feat, model['mean_norm'])
# else:
# feat_ = feat.copy()
# feat_ = feat_ - model['clusMean']
# feat_ = np.linalg.norm(feat_, axis=1)
# ss = (feat_ - model['noiseMean'])/model['noiseStd']
# return ss
def evalOneClassMean(testFeat, testGt, trainFeat, trainGt, verbose=True, withNorm=True):
if type(trainFeat) is list:
featList = trainFeat.copy()
numClass = len(featList)
else:
featList = []
numClass = np.max(trainGt)+1
for i in range(numClass):
featList.append(trainFeat[trainGt==i])
trainTime = 0
testTime = 0
scores = np.zeros([testFeat.shape[0], numClass])
for i in range(numClass):
sOCM = OneClassMean()
# training
start = timeit.default_timer()
sOCM.fit(featList[i])
stop = timeit.default_timer()
trainTime = trainTime + stop-start
# testing
start = timeit.default_timer()
scores[:,i] = sOCM.score(testFeat, withNorm=withNorm)
stop = timeit.default_timer()
testTime = testTime + stop-start
trainTime = trainTime/numClass
testTime = testTime/numClass
if verbose:
print('Train Time: ', trainTime)
print('Test Time: ', testTime)
labelEst = np.argmax(scores, axis=1)
meanEST, mapEST, rocEST = evaluate(labelEst, scores, testGt, verbose)
return meanEST, mapEST, rocEST
# class StackedOneClassMean(OneClassMean):
# """Create stacked shell of one class mean.
# """
# def __init__(self):
# self.classifers = []
# def fit(self, feat, target, multiMeans):
# self.classifers = self.__generate_stacked_one_class_mean(feat, target, multiMeans)
# def __generate_stacked_one_class_mean(self, feat, target, m_all):
# _, neighs = sorted_neighbors_of_i(m_all, target)
# classifers = []
# current_shell = []
# for i in neighs:
# current_shell.append(i)
# if len(current_shell)> 1:
# m1 = np.mean(m_all[current_shell,:], axis =0, keepdims=True)
# tf = feat-m1
# tf = tf/np.linalg.norm(tf, axis =1, keepdims=True)
# model = super(StackedOneClassMean, self).__generate_one_class_mean(tf)
# model['mean_norm'] = m1
# classifers.append(model)
# tf = feat/np.linalg.norm(feat, axis =1, keepdims=True)
# model = super(StackedOneClassMean, self).__generate_one_class_mean(tf)
# model['mean_norm'] = np.zeros([1, feat.shape[1]])
# classifers.append(model)
# return classifers
# def score(self, testFeat, with_norm=True):
# scores = self.__generate_stacked_one_class_mean_score(testFeat, with_norm)
# labels = np.argmin(scores, axis=1)
# return labels, -scores
# def __generate_stacked_one_class_mean_score(self, feat, with_norm=True):
# score = np.zeros([feat.shape[0], len(self.classifers)])
# for i in range(len(self.classifers)):
# score[:,i] = super(StackedOneClassMean, self).__generate_one_class_mean_score(feat, self.classifers[i])
# return score
# def stackedMean(train_feat, target, m_all):
# _, neighs = sorted_neighbors_of_i(m_all, target)
# classifers = []
# current_shell = []
# for i in neighs:
# current_shell.append(i)
# if len(current_shell)> 1:
# m1 = np.mean(m_all[current_shell,:], axis =0, keepdims=True)
# tf = train_feat-m1
# tf = tf/np.linalg.norm(tf, axis =1, keepdims=True)
# model = ocMean(tf)
# model['mean_norm'] = m1
# classifers.append(model)
# tf = train_feat/np.linalg.norm(train_feat, axis =1, keepdims=True)
# model = ocMean(tf)
# model['mean_norm'] = np.zeros([1,train_feat.shape[1]])
# classifers.append(model)
# return classifers
# def stackedMeanScore(classifers, test_feat):
# score = np.zeros([test_feat.shape[0], len(classifers)])
# for i in range(len(classifers)):
# score[:,i] = ocMeanScore(test_feat, classifers[i])
# return score
# def evalStackedOneClassMean(testFeat, testGt, trainFeat, trainGt, verbose=True):
# sOCM = StackedOneClassMean()
# sOCM.train(trainFeat, trainGt)
# labelEst, scores = sOCM.score(testFeat)
# meanEST, mapEST, rocEST = evaluate(labelEst, scores, testGt, verbose)
# return meanEST, mapEST, rocEST
# class StackedMultiClassMean(StackedOneClassMean):
# """Create multi class stacked shell class mean.
# """
# def __init__(self):
# self.classifers = []
# def fit(self, feat, gt=-1):
# if type(feat) is list:
# featList = feat.copy()
# numClass = len(featList)
# else:
# featList = fit_to_list(feat, gt)
# numClass = len(featList)
# allMeans = np.zeros([numClass, featList[0].shape[1]])
# for i in range(numClass):
# allMeans[i,:] = np.mean(feat[i], axis =0)
# self.classifers = self.__generate_stacked_multi_class_mean(featList, allMeans)
# def __generate_stacked_multi_class_mean(self, featList, allMeans):
# numClass = len(allMeans)
# allClassifiers = []
# for i in range(numClass):
# target = i
# classifers = super(StackedMultiClassMean, self).__generate_stacked_one_class_mean(featList[target], target, allMeans)
# allClassifiers.append(classifers)
# return allClassifiers
# # def trainSingleClass(self, feat, target, multiMeans):
# # classifier = stackedMean(feat, target, multiMeans)
# # return classifier
# def score(self, testFeat):
# scores = self.__generate_stacked_multi_class_mean_score(testFeat, self.classifers)
# labels = np.argmin(scores, axis=1)
# return labels, -scores
# def __generate_stacked_multi_class_mean_score(self, testFeat, allClassifiers):
# numClass = len(allClassifiers)
# scores = np.zeros([testFeat.shape[0], numClass])
# for i in range(numClass):
# stacked_one_class_shell = super(StackedMultiClassMean, self).__generate_stacked_one_class_mean_score(allClassifiers[i], testFeat)
# stacked_one_class_shell = np.mean(stacked_one_class_shell, axis =1)
# scores[:,i] = stacked_one_class_shell
# return scores
# def multiStackedOneClassMean(trainFeat, allMeans):
# numClass = len(allMeans)
# allClassifiers = []
# for i in range(numClass):
# target = i
# classifers = stackedMean(trainFeat[target], target, allMeans)
# allClassifiers.append(classifers)
# return allClassifiers
# def scoreMultiStackedOneClassMean(testFeat, allClassifiers):
# numClass = len(allClassifiers)
# scores = np.zeros([testFeat.shape[0], numClass])
# for i in range(numClass):
# s = stackedMeanScore(allClassifiers[i], testFeat)
# s = np.mean(s, axis =1)
# scores[:,i] = s
# return scores
``` |
{
"source": "jianming93/is613-face-recognition-demo",
"score": 3
} |
#### File: jianming93/is613-face-recognition-demo/demo.py
```python
import os
import base64
import logging
import face_recognition
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from flask import Flask, Response, redirect
import cv2
import sqlalchemy as db
class VideoCamera(object):
def __init__(self, resize_ratio=0.25, distance_threshold=0.5):
# Distance threshold set 0.5 after testing (still slightly risky if lookalike person exist)
self.video = cv2.VideoCapture(99)
self.process_this_frame = True
self.resize_ratio = resize_ratio
self.distance_threshold = distance_threshold
self.match_found = False
self.match_found_image = None
self.match_found_names = []
def __del__(self):
self.video.release()
def __convert_and_resize(self, frame):
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=self.resize_ratio, fy=self.resize_ratio)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
return rgb_small_frame
def __face_matching(self, rgb_small_frame):
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_deteced_names = []
face_detected_distances = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings=known_face_encodings,
face_encoding_to_check=face_encoding,
tolerance=self.distance_threshold)
name = "Unknown"
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
if name is not "Unknown":
self.match_found=True
self.match_found_name = name
face_deteced_names.append(name)
face_detected_distances.append(face_distances[best_match_index])
return face_locations, face_deteced_names, face_detected_distances
def __draw_bounding_boxes(self, frame, face_locations, face_names, face_distances):
# Display the results
for (top, right, bottom, left), name, distance in zip(face_locations, face_names, face_distances):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= int(1 // self.resize_ratio)
right *= int(1 // self.resize_ratio)
bottom *= int(1 // self.resize_ratio)
left *= int(1 // self.resize_ratio)
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name + " " + str(round(distance, 2)), (left + 6, bottom - 6), font, 0.75, (255, 255, 255), 1)
return frame
def process_frame(self):
# Grab a single frame of video
ret, frame = self.video.read()
# Preprocessing (reduce size for faster processing and changing from BGR to RGB channels)
rgb_small_frame = self.__convert_and_resize(frame)
# Only process every other frame of video to save time
if self.process_this_frame:
# Generate face matches
face_locations, face_names, face_distances = self.__face_matching(rgb_small_frame)
# Display the results
frame = self.__draw_bounding_boxes(frame, face_locations, face_names, face_distances)
if len(face_names) == 1:
if face_names[0] is not "Unknown":
self.match_found=True
self.match_found_name = face_names[0]
ret, jpeg = cv2.imencode('.jpg', frame)
# Constantly alternate frames to improve
self.process_this_frame = not self.process_this_frame
# If match found, store match
if self.match_found and self.match_found_image is None:
self.match_found_image = jpeg.tobytes()
return jpeg.tobytes()
def start_video(self):
self.video = cv2.VideoCapture(0)
server.logger.info("Video Started")
def stop_video(self):
self.video = cv2.VideoCapture(99)
server.logger.info("Video Stopped")
def reset_status(self):
self.match_found = False
self.match_found_image = None
self.match_found_name = None
def gen(camera):
while True:
frame = camera.process_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
### Global Variables ###
DATA_FOLDER = "data"
DB_NAME = 'sqlite:///demo.db'
TABLE_NAME = 'users'
INTERVAL_STARTUP = 5
global camera
### Define app server ###
server = Flask(__name__)
app = dash.Dash(__name__, server=server, external_stylesheets=[dbc.themes.DARKLY])
### Set logging level ###
server.logger.setLevel(logging.INFO)
### Create DB if needed ###
server.logger.info("Connecting to database")
engine = db.create_engine(DB_NAME)
connection = engine.connect()
metadata = db.MetaData()
server.logger.info("Creating users table if needed")
face_table = db.Table(
TABLE_NAME, metadata,
db.Column('id', db.Integer, primary_key = True),
db.Column('name', db.String),
db.Column('image_filepath', db.String),
db.Column('face_encodings', db.String)
)
metadata.create_all(engine)
### Extract filenames for all users to determine if there is a need to update db ###
server.logger.info("Updating database based on images present in {} folder specified".format(DATA_FOLDER))
users_table = db.Table(TABLE_NAME, metadata, autoload=True, autoload_with=engine)
image_filepath_query = db.select([users_table.c.image_filepath])
image_filepath_result = connection.execute(image_filepath_query).fetchall()
### Check if file exists, if not update db ###
for filename in os.listdir(DATA_FOLDER):
image_filepath = os.path.join(DATA_FOLDER, filename)
if image_filepath not in image_filepath_result:
face_image = face_recognition.load_image_file(image_filepath)
face_encoding = face_recognition.face_encodings(face_image)[0]
face_encoding_string = list(map(lambda x : str(x), face_encoding))
insert_query = users_table.insert().values(
name=filename.split('.')[0],
image_filepath=image_filepath,
face_encodings=",".join(face_encoding_string)
)
connection.execute(insert_query)
### Query to load data for app ###
server.logger.info("Extracting all users information from database for application")
users_query = db.select([users_table])
users_result = connection.execute(users_query).fetchall()
known_face_encodings = [np.array(list(map(lambda y: float(y), x[-1].split(',')))) for x in users_result]
known_face_names = [x[1] for x in users_result]
known_image_filepaths = [x[2] for x in users_result]
### Define camera ###
camera = VideoCamera()
server.logger.info("Application Ready!")
### Define layouts ###
initial_layout = html.Div(id="initial-layout", children=[
html.H3(id="intro-header", children="Welcome to the facial recognition demo!"),
html.Br(),
html.P(id="intro-statement", children="The purpose of this demo is to illustrate how facial recognition works as well as its weaknesses."),
html.P(id="intro-begin-statement", children="To begin the demo, please click on the button below."),
html.Br(),
html.Br(),
html.Br(),
html.Div(id="intro-button-container", className="container", children=
dbc.Button(id="intro-button", color="primary", className="mr-1", children="Begin Demo", n_clicks=0)
)
])
video_stream_layout = html.Div(id="video-stream-layout", children=[
html.Img(id="vid-stream", src="/")
])
detected_layout = html.Div(id="detected-layout", children=[
html.H2(id="detected-header"),
html.Div(className="container", children=[
dbc.Row([
html.Div(id="detected-face-container", children=[
html.Img(id="detected-face"),
html.H5(id="detected-face-text", children="Detected Face")
]),
html.Div(id="database-face-container", children=[
html.Img(id="database-face"),
html.H5(id="database-face-text", children="Database Face")
])
])
]),
html.Br(),
html.Br(),
html.Div(id="detected-button-container", className="container", children=
dbc.Button(id="return-start", color="primary", className="mr-1", children="Return to start", n_clicks=0)
)
])
### Specify initial layout ###
app.layout = html.Div(id="main-body", children=[
dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.NavItem(dbc.NavLink("Basic", id="basic-link", href="/", n_clicks=0)),
dbc.NavItem(dbc.NavLink("Anti Spoofing", id="anti-spoofing-link", href="http://localhost:8040", n_clicks=0))
],
nav=True,
in_navbar=True,
label="Select demo",
)
],
brand="Group 5 Demo",
color="dark",
dark=True,
id="dropdown-menu"
),
html.Br(),
html.Br(),
dcc.Interval(id="checker", interval=1000, n_intervals=0, disabled=True),
html.Div(id='page-content', className='container d-flex align-items-center justify-content-center', children=[
dbc.Card(
[
dbc.CardHeader("Face Recognition Demo", className="card-title"),
dbc.CardBody(
[
html.Div(id="card-content", className="container", children=[
initial_layout,
video_stream_layout,
detected_layout
])
]
),
]
)
])
])
### Callbacks and routes ###
@server.route('/video_feed')
def video_feed():
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.callback(
[Output(component_id='initial-layout', component_property='style'),
Output(component_id='video-stream-layout', component_property='style'),
Output(component_id='detected-layout', component_property='style'),
Output(component_id='checker', component_property='disabled'),
Output(component_id='vid-stream', component_property='src'),
Output(component_id='detected-header', component_property='children'),
Output(component_id='detected-face', component_property='src'),
Output(component_id='database-face', component_property='src')],
[Input(component_id='intro-button', component_property='n_clicks'),
Input(component_id='checker', component_property='n_intervals'),
Input(component_id='return-start', component_property='n_clicks')],
[State(component_id='checker', component_property='disabled')]
)
def layout_update(intro_click, interval, return_click, checker_state):
if intro_click is None and interval is None and return_click is None:
raise PreventUpdate
ctx = dash.callback_context
trigger_component = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger_component == "intro-button":
camera.start_video()
return ({'display': 'none'}, {'display': 'block'}, {'display': 'none'}, False, "/video_feed", "", "", "")
elif trigger_component == "checker":
if interval < INTERVAL_STARTUP:
camera.reset_status()
if not checker_state:
if camera.match_found and interval > INTERVAL_STARTUP:
detected_header = "User detected: {}".format(camera.match_found_name)
detected_image = "data:image/jpeg;base64,{}".format(base64.b64encode(camera.match_found_image).decode())
database_image_index = known_face_names.index(camera.match_found_name)
database_image = "data:image/jpeg;base64,{}".format(base64.b64encode(open(known_image_filepaths[database_image_index], 'rb').read()).decode())
camera.reset_status()
return ({'display': 'none'}, {'display': 'none'}, {'display': 'block'}, True, "/", detected_header, detected_image, database_image)
else:
return ({'display': 'none'}, {'display': 'block'}, {'display': 'none'}, False, "/video_feed", "", "", "")
else:
raise PreventUpdate
elif trigger_component == "return-start":
return ({'display': 'block'}, {'display': 'none'}, {'display': 'none'}, True, "/", "", "", "")
else:
raise PreventUpdate
@app.callback(
Output(component_id='checker', component_property='n_intervals'),
[Input(component_id='intro-button', component_property='n_clicks'),
Input(component_id='return-start', component_property='n_clicks')]
)
def reset_intervals(intro_n_clicks, return_n_clicks):
if intro_n_clicks is None and return_n_clicks is None:
raise PreventUpdate
return 0
@app.callback(
Output(component_id='anti-spoofing-link', component_property='href'),
[Input(component_id='anti-spoofing-link', component_property='n_clicks')],
[State(component_id='anti-spoofing-link', component_property='href')]
)
def destroy_camera(n_clicks, href):
if n_clicks is None or n_clicks == 0:
raise PreventUpdate
camera.stop_video()
return href
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port=8050, debug=True, dev_tools_ui=False, dev_tools_props_check=False)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.