metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jiatianxu/pyLLE",
"score": 2
} |
#### File: jiatianxu/pyLLE/setup.py
```python
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.build_py import build_py
import subprocess as sub
import sys
import os
class MyInstall(install):
def run(self):
install.run(self)
for ii in range(20):
print('-'*10)
if sys.platform == 'darwin':
julia = 'julia'
if sys.platform == 'linux2':
julia = 'julia'
if sys.platform == 'win32':
julia = os.path.expanduser('~') + '\\AppData\\Local\\Julia-0.6.4\\bin\\julia.exe'
sub.call([julia, 'InstallPkg.jl'])
setup(name='pyLLE',
version='2.1.1',
description='LLE Solver',
url='https://github.com/gregmoille/pyLLE',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
long_description='',
packages=['pyLLE'],
install_requires=[
'scipy',
'plotly',
'numpy',
'matplotlib',
'h5py',
'prettytable',
'matplotlib',
],
package_data={'': ['*.jl']},
include_package_data=True,
zip_safe=False,
cmdclass={'install': MyInstall},
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),)
``` |
{
"source": "JiatongBao/DRLSorting",
"score": 3
} |
#### File: JiatongBao/DRLSorting/gripper_example.py
```python
import robotiq_gripper
import time
import sys
ip = "192.168.8.113" # actual ip of the UR robot
def log_info(gripper):
print(f"Pos: {str(gripper.get_current_position()): >3} "
f"Open: {gripper.is_open(): <2} "
f"Closed: {gripper.is_closed(): <2} ")
if len(sys.argv) != 2:
print('False')
sys.exit()
print("Creating gripper...")
gripper = robotiq_gripper.RobotiqGripper()
print("Connecting to gripper...")
gripper.connect(ip, 63352)
print("Activating gripper...")
gripper.activate(auto_calibrate=False)
print("Testing gripper...")
if sys.argv[1] == '0':
gripper.move_and_wait_for_pos(255, 255, 255)
log_info(gripper)
if sys.argv[1] == '1':
gripper.move_and_wait_for_pos(120, 255, 255)
log_info(gripper)
print(gripper.get_current_position())
```
#### File: DRLSorting/real/camera.py
```python
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import socket
import numpy as np
import cv2
import os
import time
import struct
class Camera(object):
def __init__(self):
# Data options (change me)
self.im_height = 720
self.im_width = 1280
self.tcp_host_ip = '127.0.0.1'
self.tcp_port = 50000
self.buffer_size = 4098 # 4 KiB
# Connect to server
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
self.intrinsics = None
self.get_data()
def get_data(self):
# Ping the server with anything
self.tcp_socket.send(b'asdf')
# Fetch TCP data:
# color camera intrinsics, 9 floats, number of bytes: 9 x 4
# depth scale for converting depth from uint16 to float, 1 float, number of bytes: 4
# depth image, self.im_width x self.im_height uint16, number of bytes: self.im_width x self.im_height x 2
# color image, self.im_width x self.im_height x 3 uint8, number of bytes: self.im_width x self.im_height x 3
data = b''
while len(data) < (10*4 + self.im_height*self.im_width*5):
data += self.tcp_socket.recv(self.buffer_size)
# Reorganize TCP data into color and depth frame
self.intrinsics = np.fromstring(data[0:(9*4)], np.float32).reshape(3, 3)
depth_scale = np.fromstring(data[(9*4):(10*4)], np.float32)[0]
depth_img = np.fromstring(data[(10*4):((10*4)+self.im_width*self.im_height*2)], np.uint16).reshape(self.im_height, self.im_width)
color_img = np.fromstring(data[((10*4)+self.im_width*self.im_height*2):], np.uint8).reshape(self.im_height, self.im_width, 3)
depth_img = depth_img.astype(float) * depth_scale
return color_img, depth_img
# DEPRECATED CAMERA CLASS FOR REALSENSE WITH ROS
# ----------------------------------------------
# import rospy
# from realsense_camera.msg import StreamData
# class Camera(object):
# def __init__(self):
# # Data options (change me)
# self.im_height = 720
# self.im_width = 1280
# # RGB-D data variables
# self.color_data = np.zeros((self.im_height,self.im_width,3))
# self.depth_data = np.zeros((self.im_height,self.im_width))
# self.intrinsics = np.zeros((3,3))
# # Start ROS subscriber to fetch RealSense RGB-D data
# rospy.init_node('listener', anonymous=True)
# rospy.Subscriber("/realsense_camera/stream", StreamData, self.realsense_stream_callback)
# # Recording variables
# self.frame_idx = 0
# self.is_recording = False
# self.recording_directory = ''
# # ROS subscriber callback function
# def realsense_stream_callback(self, data):
# tmp_color_data = np.asarray(bytearray(data.color))
# tmp_color_data.shape = (self.im_height,self.im_width,3)
# tmp_depth_data = np.asarray(data.depth)
# tmp_depth_data.shape = (self.im_height,self.im_width)
# tmp_depth_data = tmp_depth_data.astype(float)/10000
# tmp_intrinsics = np.asarray(data.intrinsics)
# tmp_intrinsics.shape = (3,3)
# self.color_data = tmp_color_data
# self.depth_data = tmp_depth_data
# self.intrinsics = tmp_intrinsics
# if self.is_recording:
# tmp_color_image = cv2.cvtColor(tmp_color_data, cv2.COLOR_RGB2BGR)
# cv2.imwrite(os.path.join(self.recording_directory, '%06d.color.png' % (self.frame_idx)), tmp_color_image)
# tmp_depth_image = np.round(tmp_depth_data * 10000).astype(np.uint16) # Save depth in 1e-4 meters
# cv2.imwrite(os.path.join(self.recording_directory, '%06d.depth.png' % (self.frame_idx)), tmp_depth_image)
# self.frame_idx += 1
# else:
# self.frame_idx = 0
# time.sleep(0.1)
# # Start/stop recording RGB-D video stream
# def start_recording(self, directory):
# self.recording_directory = directory
# self.is_recording = True
# def stop_recording(self):
# self.is_recording = False
``` |
{
"source": "jiaulislam/AutoBotBMCRemedy",
"score": 3
} |
#### File: AutoBotBMCRemedy/prettify/prettify_ldma.py
```python
from datetime import datetime
from rich import box
from rich.align import Align
from rich.console import Console, RenderGroup
from rich.layout import Layout
from rich.panel import Panel
from rich.prompt import Prompt
from rich.table import Table
from rich.text import Text
console = Console()
class Header:
"""Display header with clock."""
def __init__(self, header_text: str) -> None:
self.header_text = header_text
def __rich__(self) -> Panel:
grid = Table.grid(expand=True)
grid.add_column(justify="center", ratio=1)
grid.add_column(justify="right")
grid.add_row(f"\t\t\t{self.header_text}",
datetime.now().ctime().replace(":", "[blink]:[/]"),
)
return Panel(grid, style="white", border_style="red", title="AUTOBOT")
def make_sponsor_message() -> Panel:
"""Some example content."""
sponsor_message = Table.grid(padding=1)
sponsor_message.add_column(style="green", justify="right")
sponsor_message.add_column(no_wrap=True)
sponsor_message.add_row(
"✔ GitHub 🤓 ",
"[u blue link=https://github.com/jiaulislam]https://github.com/jiaulislam",
)
intro_message = Text.from_markup(
"""Consider supporting my work via Github 🤘 🤘 🤘. - <NAME>"""
)
message = Table.grid(padding=1)
message.add_column()
message.add_column(no_wrap=True)
message.add_row(intro_message, sponsor_message)
message_panel = Panel(
Align.center(
RenderGroup(intro_message, "\n", Align.center(sponsor_message)),
vertical="middle",
),
box=box.ROUNDED,
padding=(1, 2),
title="[b red]Thanks for using my application!",
border_style="bright_blue",
)
return message_panel
def get_choice() -> int:
choice = Prompt.ask("Enter your choice", choices=['1', '2', '0'])
return int(choice)
class MainMenuLayout:
""" Return the main menu layout """
def __init__(self) -> None:
self.layout = Layout()
self.table = Table(title="LDMA PARSER ACTIONS", expand=True)
def __rich__(self) -> Layout:
self.layout.split(
Layout(name="head", size=3),
Layout(name="body"),
)
self.layout["body"].split_column(
Layout(name='should_be_unused', visible=False),
Layout(name='table2'),
Layout(name='action_table'),
)
self.table.add_column("Action Button", justify="center", style="cyan", no_wrap=True)
self.table.add_column("Actions", style="magenta", justify="center")
self.table.add_row("1", "TO SEARCH WITH LINK CODE")
self.table.add_row("2", "TO SEARCH WITH SITE ID")
self.table.add_row("0", "BACK TO MAIN MENU")
self.layout["head"].update(Header("LDMA-PARSER"))
self.layout["action_table"].update(self.table)
self.layout["table2"].update(make_sponsor_message())
return self.layout
```
#### File: AutoBotBMCRemedy/utilites/locators.py
```python
from selenium.webdriver.common.by import By
class LoginPageLocators:
""" Login Page related XPATH class Variable are declared here """
LOGO_IMG = (By.XPATH, "//img[@src='images/login_logo.gif']")
USERNAME_TEXTBOX = (By.XPATH, "//*[@id='username-id']")
PASSWORD_TEXTBOX = (By.XPATH, "//*[@id='pwd-id']")
LOGIN_BUTTON = (By.XPATH, "//input[@name='login']")
class HomePageLocators:
""" All the home page locators should be kept here """
IT_HOME_TEXT = (By.XPATH, "//label[@id='label80137'][contains(text(), 'IT Home')]")
APPLICATION_BUTTON = (By.XPATH, "//*[@id='reg_img_304316340']")
CHANGE_MANAGEMENT_LIST = (By.XPATH, "//*[text()='Change Management']")
NEW_CHANGE_LIST = (By.XPATH, "//*[text()='New Change']")
HOME_ICON_BTN = (By.ID, "reg_img_304248660")
ALL_CHANGE_TABLE = (By.XPATH, "//table[@id='T301444200']//td[1]//nobr[1]//span")
LOGOUT_BUTTON = (By.XPATH, "//div[@class='f9'][contains(text(),'Logout')]")
IT_HOME_BUTTON = (By.XPATH, "//a[@class='btn'][contains(text(),'IT Home')]")
class ChangeManagerLocators:
""" All the change manager section locators """
MANAGER_GROUP_BTN = (By.XPATH, "//div[@id='WIN_3_1000000015']//a[@class='btn btn3d menu']")
IMPLEMENTATION_MENU = (
By.XPATH, "//div[@class='MenuOuter']//div[@class='MenuTableContainer']//table[@class='MenuTable']"
"//tbody[@class='MenuTableBody']//tr[@class='MenuTableRow']//td[@class='MenuEntryName']"
"[contains(text(),'Implementation')]")
# ------------------ CHANGE_MANAGER_DEPARTMENT ------------------------------ #
TNR_GROUP_MENU = (By.XPATH, "//td[contains(text(),'Transport Network Rollout')]")
ANR_GROUP_MENU = (By.XPATH, "//td[contains(text(),'Access Network Rollout')]")
# ------------------ CHANGE_MANAGER_TECHNOLOGY DIVISION ----------------------- #
TX_OPTIMIZATION_SELECT_BTN = (By.XPATH, "//td[contains(text(),'Access Network Rollout_2(3G)')]")
RADIO_ROLLOUT_SELECT_BTN = (By.XPATH, "//td[contains(text(),'Access Network Rollout_2(3G)')]")
CHANGE_MANAGER_MENU_BTN = (By.XPATH, "//*[@id='WIN_3_1000000403']/a")
# ----------------- NAMES OF THE CHANGE MANAGERS --------------------------- #
CHANGE_MANAGER_SHAHED = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_RIPAN = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_FUAD = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_MUSFIQ = (By.XPATH, "//div[@class='MenuOuter']//*[text()= '<NAME>']")
CHANGE_MANAGER_SHAHRIAR = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_SUMON = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_RAKIB = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_KHAIRUL = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
CHANGE_MANAGER_SUDIPTA = (By.XPATH, "//div[@class='MenuOuter']//*[text()='<NAME>']")
class LocationServiceLocators:
""" All the change location service locators """
LOCATION_MENU_BTN = (By.XPATH, "//img[@id='reg_img_303935000']")
CLEAR_BUTTON = (By.XPATH, "//*[@id='WIN_0_303915400']/div/div")
SEARCH_ICON_IMG = (By.XPATH, "//img[@id='reg_img_304249820']")
COMPANY_TEXTBOX = (By.XPATH, "//textarea[@id='arid_WIN_0_1000000001']")
REGION_TEXTBOX = (By.XPATH, "//textarea[@id='arid_WIN_0_200000012']")
SITE_GROUP_TEXTBOX = (By.XPATH, "//textarea[@id='arid_WIN_0_200000007']")
SITE_TEXTBOX = (By.XPATH, "//textarea[@id='arid_WIN_0_260000001']")
SEARCH_LOCATION_BTN = (By.XPATH, "//div[@class='f1'][contains(text(),'Search')]")
SELECT_LOCATION_BTN = (By.XPATH, "//div[contains(text(),'Select')]")
OK_LOCATION_BTN = (By.XPATH, "//div[contains(text(),'OK')]")
class TaskSectionLocators:
""" All the task creation or closing related locators """
TASK_PAGE = (By.XPATH, "//a[contains(text(),'Tasks')]")
REQUEST_TYPE_BTN = (By.XPATH, "//input[@type='text' and @id='arid_WIN_3_10003042']")
TASK_GROUP_TEMPLATE_BTN = (By.XPATH, "//td[contains(text(), 'Task Group Template')]")
RELATE_BTN = (By.XPATH, "//div[contains(text(),'Relate')]")
TASK_RELATE_BTN = (By.XPATH, "//*[@id='WIN_0_10006772']")
TASK_GROUP_ROW_SPAN = (By.XPATH, "//span[contains(text(), 'Task Group')]")
# ==> INITIATION TASK PAGE LOCATORS <== #
INITIATION_TASK_SPAN = (By.XPATH, "//span[contains(text(), 'Initiation Phase Task (SOC)')]")
# ==> DOWNTIME DURATION PAGE LOCATORS <== #
SERVICE_DOWNTIME_DURATION_TASK_SPAN = (By.XPATH, "//span[contains(text(), 'Service Downtime Duration Task')]")
# ==> SYSTEM DOWNTIME TASK LOCATORS <== #
SYSTEM_DOWNTIME_TASK = (By.XPATH, "//span[contains(text(), 'System Downtime Task')]")
# ==> DOWNTIME WINDOW PAGE LOCATORS <== #
SERVICE_DOWNTIME_WINDOW_TASK_SPAN = (By.XPATH, "//span[contains(text(), 'Service Downtime Window Task')]")
# ==> REVIEW & CLOSE TASK PAGE LOCATORS <== #
REVIEW_CLOSURE_TASK_SPAN = (By.XPATH, "//span[contains(text(), 'Review and Closure Task (SOC)')]")
class WorkInfoAttachment:
""" Work info & attachment area """
INFO_NOTES_TEXTBOX = (By.XPATH, "//div[@id='WIN_3_304247080']//*[@id='arid_WIN_3_304247080']")
ATTACH_FILE_ICON_BUTTON = (By.XPATH, "//img[@id='reg_img_304247100']")
UPLOAD_ATTACHMENT_FRAME = (
By.XPATH, "//iframe[@src='http://itsm-web.robi.com.bd:8080/arsys/resources/html/AttachmentPopup.html']")
CHOOSE_ATTACHMENT_FRAME = (By.XPATH, "//input[@id='PopupAttInput']")
OK_ATTACHMENT_FRAME_BUTTON = (By.XPATH, "//div[@id='PopupAttFooter']/a[contains(text(), 'OK')]")
ADD_NOTE_ATTACHMENT_BUTTON = (By.XPATH, "//a[@id='WIN_3_304247110']//div[@class='f1'][contains(text(),'Add')]")
MORE_DETAILS_BUTTON = (By.XPATH, "//div[@id='WIN_3_304247070']//a[@class='pagebtn ']")
WORK_INFO_TYPE_BUTTON = (By.XPATH, "//input[@id='arid_WIN_3_304247210']")
VIEW_ATTACHMENT_BUTTON = (By.ID, "reg_img_304252650")
class SummaryAndNotesBox:
""" Summary and Notes Textbox """
SUMMARY_TEXTBOX = (By.XPATH, "//*[@id='arid_WIN_3_1000000000']")
NOTES_TEXTBOX = (By.XPATH, "//*[@id='arid_WIN_3_1000000151']")
class DateSectionSelector:
""" Date section locators """
DATE_PAGE = (By.XPATH, "//a[contains(text(),'Date')]")
START_DATE_INPUT = (By.XPATH, "//input[@id='arid_WIN_3_1000000350']")
END_DATE_INPUT = (By.XPATH, "//input[@id='arid_WIN_3_1000000362']")
class CommonTaskDateLocators:
""" Date Page locators in the Task stage """
DATE_SECTOR_IN_TASK = (By.XPATH, "//a[contains(text(), 'Dates')]")
START_TIME_IN_TASK = (By.XPATH, "//input[@id= 'arid_WIN_0_1000000350']")
END_TIME_IN_TASK = (By.XPATH, "//input[@id= 'arid_WIN_0_1000000362']")
SAVE_TASK_BTN = (By.XPATH, "//div[@class='f7'][contains(text(), 'Save')]")
class CommonChangeCreateLocators:
""" Some Common locators for Creating a new Change """
# ==> GET CHANGE NUMBER <== #
CHANGE_NUMBER_VALUE = (By.XPATH, "//div[@id='WIN_3_1000000182']//textarea[@id='arid_WIN_3_1000000182']")
# ==> CHANGE CLASS TYPE SECTION LOCATORS <== #
CHANGE_CLASS_TYPE_BUTTON = (By.XPATH, "//input[@id='arid_WIN_3_1000000568']")
LOADING_ICON = (By.XPATH, "//span[@class='loadingText']")
class SaveChangeLocators:
""" Save & Send the Change to Request for Authorization locators """
SAVE_CHANGE_BTN = (By.XPATH, "//a[@id='WIN_3_1001']")
GOTO_NEXT_STAGE_BTN = (By.XPATH, "//div[@class='f7'][contains(text(), 'Next Stage')]")
class FrameBoxLocators:
""" Frame object locators """
FRAME_OF_CONFIRMATION = (
By.XPATH, "//iframe[@src='http://itsm-web.robi.com.bd:8080/arsys/resources/html/MessagePopup.html']")
FRAME_OK_BUTTON = (By.XPATH, "//div[@id='PopupMsgFooter']//a[contains(text(),'OK')]")
class CloseChangeLocators:
""" This Class is going to store all the required new locators for Closing a Change Request """
CURRENT_CR_STATUS = (By.XPATH, "//textarea[@id='arid_WIN_3_303502600']")
ACTUAL_OPEN_DATE = (By.XPATH, "//input[@id = 'arid_WIN_3_1000000348']")
CLOSE_MENU_SELECT = (By.XPATH, "//input[@id= 'arid_WIN_4_7']")
SELECT_CLOSE = (By.XPATH, "//tr[@class='MenuTableRow']//td[contains(text(), 'Closed')]")
TASK_INIT_STATUS = (By.XPATH, "//input[@id='arid_WIN_4_7']")
NEXT_STAGE_BUTTON = (By.XPATH, "//div[@class='f7'][contains(text(), 'Next Stage')]")
CHANGE_STATUS_INPUT = (By.XPATH, "//input[@id='arid_WIN_4_7']")
TASK_PLAN_START_DATE = (By.XPATH, "//input[@id='arid_WIN_4_1000000350']")
TASK_PLAN_END_DATE = (By.XPATH, "//input[@id= 'arid_WIN_4_1000000362']")
TASK_ACTUAL_START_DATE = (By.XPATH, "//input[@id = 'arid_WIN_4_1000000348']")
TASK_ACTUAL_END_DATE = (By.XPATH, "//input[@id = 'arid_WIN_4_1000000364']")
# Below is a special requirement where i need a changeable XPATH
@staticmethod
def get_changeable_xpath(dynamic_xpath: str) -> tuple:
""" A static function to create a dynamic XPATH as user need while run time """
return By.XPATH, dynamic_xpath
class CancelRequestLocators:
""" A class variable holder for Canceling the Change Request """
CHANGE_MANAGEMENT_CONSOLE = (By.XPATH, "//*[text()='Change Management Console']")
MENU_FOR_STATUS = (By.XPATH, "//div[@id='WIN_3_303502600']//a[@class='btn btn3d menu']")
SELECT_CANCEL = (By.XPATH, "//td[@class='MenuEntryNameHover']")
CANCEL_OPTION_SELECT = (By.XPATH, "//td[contains(text(),'Cancel')]")
SAVE = (By.XPATH, "//a[@id='WIN_3_1003']//div[@class='f1'][contains(text(),'Save')]")
STATUS_AREA = (By.XPATH, "//textarea[@id='arid_WIN_3_303502600']")
class RelationshipQueryLocators:
""" A class for all the variable to relate the relationship query """
RELATIONSHIP_TAB_BTN = (
By.XPATH, "//a[@class='btn f1'][contains(text(), 'Relationships')]")
RECORD_TYPE_TEXTAREA = (By.XPATH, "//textarea[@id='arid_WIN_3_301541300']")
CONFIGURATION_ITEM_LIST = (By.XPATH, "//tr[@class='MenuTableRow']//td[contains(text(), 'Configuration Item')]")
SEARCH_BTN = (By.XPATH, "//img[@id='reg_img_301905800']")
# ==> RELATIONSHIP PAGE LOCATORS <== #
RELATIONSHIP_ADVANCE_SEARCH_LINK = (By.XPATH, "//div[contains(text(),'Use Advanced Search')]")
RELATIONSHIP_QUERY_TEXTBOX = (By.XPATH, "//textarea[@id='arid_WIN_0_304328790']")
RELATIONSHIP_ADVANCE_SEARCH_BTN = (
By.XPATH, "//a[@id='WIN_0_301867800']//div[@class='f1'][contains(text(),'Search')]")
RELATIONSHIP_ROBI_AXIATA = (By.XPATH, "//*[contains(text(), 'Robi-Axiata')]")
RELATE_THE_RELATIONSHIP_BTN = (By.XPATH, "//a[@id='WIN_0_301867500']//div[@class='f1']")
``` |
{
"source": "jiauy/before_work",
"score": 3
} |
#### File: before_work/project_train/deal_pdf.py
```python
import io
import os
import re
import time
import gevent
from gevent import monkey
monkey.patch_all()
import requests
#网站自带的函数封装了一个类,改变了一下保存路径,发现一个bug:zipfile没有open属性
class Pdf2TableAPI:
def invoke_api(self, api, filename, company_name, file_name_without_pdf):
# 异步执行
output_format = 'zip' # or json or bz2
url = 'http://qianliyan2.memect.cn:6111/api/{}'.format(api)
query = {
# 表示异步执行,所以需要通过轮训获得结果
# 'async': 'false', #true
'async': 'true', #true
'output-format': output_format
}
headers = {}
with open(filename, 'rb') as fp:
data = fp.read()
# 如果上传的是json,建议先压缩,如:
# import gzip
# data = gzip.compress(data)
# headers={'Content-Encoding':'gzip'}
r = requests.post(url, data=data, params=query, headers=headers)
result = None
if r.status_code == 200:
result = r.json()
elif r.status_code == 400:
# 返回的是json
result = r.json()
else:
# 其它的错误,如:500,系统错误
print(r.text)
# 等待结果
if result and result.get('task'):
# result={task:{id:''}}
print('开始获取任务--{}--的结果'.format(result.get('task')))
self.get_result(api, result.get('task').get('id'), company_name, file_name_without_pdf,
output_format=output_format)
print('任务--{}--完成'.format(result.get('task')))
else:
# 执行失败等
pass
def get_result(self, api, task_id, company_name, file_name_without_pdf, output_format='json'):
# api=pdf2json ,pdf2table,pdf2doc
url = 'http://qianliyan2.memect.cn:6111/api/{}'.format(api)
query = {
'task_id': task_id
}
while True:
r = requests.post(url, params=query)
print(r.status_code)
print(r.text)
if r.status_code == 200:
if output_format in ('zip', 'bz2'):
# 这里演示如何直接解压,也可以把zip文件保存到本地
with io.BytesIO(r.content) as fp:
if output_format == 'bz2':
import tarfile
with tarfile.open(fileobj=fp) as tar:
tar.extractall('company/{}/{}'.format(company_name, file_name_without_pdf))
else:
import zipfile
with zipfile.ZipFile(fp) as zf: # with zipfile.open(fp) as zf 报错 没有这个属性
zf.extractall('company/{}/{}'.format(company_name, file_name_without_pdf))
print('zip文件下载并解压成功')
else:
with open('company/{}/{}.json'.format(company_name, file_name_without_pdf), 'wb') as fp:
fp.write(r.content)
break
elif r.status_code == 400:
# 获得错误信息:{error:{code:'',message:''}}
result = r.json()
code = result.get('error').get('code')
if code == 'error':
# 表示已经执行完毕,但是执行失败,不需要再轮训
break
elif code in ('running', 'waiting'):
# running or waiting
# 等待1秒再次轮训
time.sleep(1)
else:
# 其他的错误码?暂时没有,一样不需要继续了
break
else:
# 其他的错误,如:500,系统错误,不需要再轮训
break
class DealPdf2Table(Pdf2TableAPI):
def __init__(self):
self.base_dir = os.path.join(os.getcwd(), 'company')
self.company_names = os.listdir(self.base_dir)
# table,wbk,html,html-scale,layout,extract-classes
self.query = {
'extract-image': 'true',
'format': 4, # 1 or 4 规定是4
'textlines': 'false', # testlinse or span
'table': 'ybk', # wbk ybk all
# 'wbk': 2, #wbk的识别方式2快1慢
# 'layout': 'default',
# 'html': 'false',
# 'html-scale': 1.5,
# 'extract-classes': 'chart', #chart表示需要识别图表,diagram表示需要识别流程图等,多个使用逗号分割
'output-files': 'raw.json', # 表示同时返回raw.json,多个使用逗号分割
'output-format': 'zip', # zip json json表示仅仅返回table.json文件,zip表示使用zip打包多个文件,bz2使用tar.bz2打包多个文件返回
}
self.headers = {}
self.url = 'http://qianliyan2.memect.cn:6111/api/pdf2table'
def deal_pdf_2table_with_1threading(self, company_index):
# company_files
company_path = os.path.join(self.base_dir, self.company_names[company_index])
#第二次更新会含有以前解压好的文件夹,需要处理掉
files_list = os.listdir(company_path)
count=0
for file_name in files_list:
count+=1
try:
if re.findall('回复', file_name)[0] == '回复' and file_name[-3:]=='pdf':
file_path = os.path.join(self.base_dir, self.company_names[company_index], file_name)
#没有文件对应的目录才可以处理,否则就是之前处理过了
if not os.path.exists(file_path[:-4]):
self.invoke_api(api='pdf2table', filename=file_path, company_name=self.company_names[company_index],
file_name_without_pdf=file_name[:-4])
print('{}-有-{}-个文件,当前处理进度为--{}/{}--异步提交成功'.format(self.company_names[company_index], len(files_list),
count, len(files_list)))
except:
print('{}-有-{}-个文件,当前处理进度为--{}/{}--无回复字样或该文件是已处理好的目录'.format(self.company_names[company_index],len(files_list),count,len(files_list)))
def deal_pdf_2table_with_multithreading(self):
missions=[]
for company_index in range(len(self.company_names)):
missions.append(gevent.spawn(self.deal_pdf_2table_with_1threading(company_index)))
gevent.joinall(missions)
if __name__ == '__main__':
t1=time.time()
DealPdf2Table().deal_pdf_2table_with_multithreading()
t2=time.time()
print('本次任务一共耗时--{}--秒'.format(t2-t1))
```
#### File: before_work/project_train/download_pdf.py
```python
import os
from contextlib import closing
import time
import json
import gevent
from gevent import monkey
monkey.patch_all()
import requests
def reconnet(func):
def inner(*args, **kwargs):
n = 0
t1 = time.time()
while True:
try:
func(*args, **kwargs)
#很关键 成功就要中断循环
break
except:
pass
time.sleep(10)
t2 = time.time()
if ((t2 - t1) // 60) % 2 == 0:
print('{}--任务已经等待了--{}--秒'.format(kwargs, (t2 - t1)))
return inner
class DownloadCompanysPdf:
def __init__(self, page_num):
self.source_info_url = 'http://www.neeq.com.cn/projectNewsController/infoResult.do?callback=jQuery211_1599964857562'
self.post_args = {'page': page_num, 'isNewThree': 1, 'sortfield': 'updateDate', 'sorttype': 'desc',
'needFields[]': ['id', 'stockName', 'companyName']}
self.page_num = page_num
try_times = 0
while True:
try:
# 考虑到可能会用到多线程,互相挤占网速,数据传输时间设置的长一点 10min
self.response = requests.post(self.source_info_url, data=self.post_args, timeout=(60*10,60 * 10))
break
except:
try_times += 1
if try_times > 3:
print('第{}页数据尝试三次仍未获取成功'.format(page_num))
break
self.json_data = json.loads(self.response.text[25:-2])
self.pdf_source_info_url = 'http://www.neeq.com.cn/projectNewsController/infoDetailResult.do?id={}&callback=jQuery211_1600008606945'
@reconnet
def __download_one_company_pdf(self, index):
#index 是公司的列表中的位置
base_dir = os.getcwd()
company_id = self.json_data['listInfo']['content'][index]['id']
company_name = self.json_data['listInfo']['content'][index]['companyName']
pdf_source_post_args = {'id': company_id, 'callback': 'jQuery211_1600008606945'}
# 容易出现超时错误 502 Bad GateWay
company_pdfdata_response = requests.post(self.pdf_source_info_url.format(company_id), data=pdf_source_post_args,
timeout=(60 * 10, 60 * 10))
company_pdfdata_response_json_data = json.loads(company_pdfdata_response.text[25:-2])
# wxhf:问询回复
wxhf_num = len(company_pdfdata_response_json_data['wxhfhInfo'])
wxhf_json_data = company_pdfdata_response_json_data['wxhfhInfo']
save_dir = os.path.join(base_dir, 'company', company_name)
for pdf_index in range(wxhf_num):
file_title = wxhf_json_data[pdf_index]['disclosureTitle']
file_path = wxhf_json_data[pdf_index]['destFilePath']
file_url_path = 'http://www.neeq.com.cn' + file_path
save_path = os.path.join(save_dir, file_title) + '.pdf'
if not os.path.exists(save_path):
with closing(requests.get(file_url_path, stream=True, timeout=(60 * 10, 60 * 10))) as response:
with open(save_path, "wb") as f:
# chunk_size 512 bytes
for chunk in response.iter_content(chunk_size=512):
if chunk:
f.write(chunk)
print('第--{}--页,第--{}--个公司的第--{}/{}个--pdf文件<{}>下载完毕'.format(self.page_num, index, pdf_index+1, wxhf_num,
file_title))
else:
print('第--{}--页,第--{}--个公司的第--{}/{}个--pdf文件<{}>已存在'.format(self.page_num, index, pdf_index+1, wxhf_num,
file_title))
def download_onepage_pdf_with_multithreding(self):
company_num = len(self.json_data['listInfo']['content'])
missions = []
for index in range(company_num):
missions.append(
gevent.spawn(self.__download_one_company_pdf, index=index))
gevent.joinall(missions)
def download_allpages_pdf_with_mutithreading(total_page_num):
missions = []
for index in range(total_page_num):
missions.append(
gevent.spawn(DownloadCompanysPdf(index).download_onepage_pdf_with_multithreding()))
gevent.joinall(missions)
if __name__ == '__main__':
t1=time.time()
download_allpages_pdf_with_mutithreading(4)
t2=time.time()
print('本次玩从头下载共耗时--{}--秒'.format(t2-t1))
#嵌套gevent多线程效果不大,需要等内部的多线程任务的gevent.joinall()运行完毕后才能在外层任务间切换
#进一步改进,以公司为单位进行pdf的下载,而不是以页数
```
#### File: before_work/PythonAndOop/N17_instance_methods_1.py
```python
class A:
def method(*args):#中途发现 这个类与一般的不同,没有self,目的就是为了显示地址
return args
if __name__ == '__main__':
a = A()
result = a.method([1,2,3,4])
print(result)
```
#### File: before_work/PythonAndOop/N19_decorators_1.py
```python
def my_decorator(my_function):
def inner_decorator():
print("This happened before!")
my_function()
print("This happens after ")
print("This happened at the end!")
return inner_decorator
@my_decorator
def my_decorated():
print("This happened!")
if __name__ == '__main__':
my_decorated()
```
#### File: before_work/PythonAndOop/N20_decorators_2.py
```python
import datetime
def my_decorator(inner):
def inner_decorator():
print(datetime.datetime.utcnow())
inner()
print(datetime.datetime.utcnow())
return inner_decorator
@my_decorator
def decorated():
print("This happened!")
if __name__ == "__main__":
decorated()
```
#### File: before_work/PythonAndOop/N26_class_decorators.py
```python
def honirific(cls):
class HonirificCls(cls):
def full_name(self):
return "Dr. " + super(HonirificCls, self).full_name()
return HonirificCls
@honirific
class Name:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def full_name(self):
return " ".join([self.first_name, self.last_name])
if __name__ == '__main__':
result = Name("Vimal", "A.R").full_name()
print("Full name: {0}".format(result))
```
#### File: before_work/PythonAndOop/N29_staticmethod_1.py
```python
class MyClass:
count = 0
def __init__(self, val):
self.val = self.filterint(val)
MyClass.count += 1
@staticmethod
def filterint(value):
if not isinstance(value, int):
print("Entered value is not an INT, value set to 0")
return 0
else:
return value
if __name__ == '__main__':
a = MyClass(5)
b = MyClass(10)
c = MyClass(15)
print(a.val)
print(b.val)
print(c.val)
print(a.filterint(100))
```
#### File: before_work/PythonAndOop/N38_method_overloading_2.py
```python
import abc
class GetSetParent(metaclass=abc.ABCMeta):
def __init__(self, value):
self.val = 0
def set_val(self, value):
self.val = value
def get_val(self):
return self.val
@abc.abstractmethod
def showdoc(self):
return
class GetSetList(GetSetParent):
def __init__(self, value=0):
self.vallist = [value]
def get_val(self):
return self.vallist[-1]
def get_vals(self):
return self.vallist
def set_val(self, value):
self.vallist.append(value)
def showdoc(self):
print("GetSetList object, len {0}, store history of values set".format(
len(self.vallist)))
```
#### File: before_work/PythonAndOop/N6_class_attributes_1.py
```python
class YourClass:
classy = 10
def set_val(self):
self.insty = 100
if __name__ == '__main__':
dd = YourClass()
dd.classy
dd.set_val()
dd.insty
``` |
{
"source": "jiavila/hpc-client-1",
"score": 2
} |
#### File: code/cluster/__init__.py
```python
from util import frame
from .base import Base
from .lsf import Lsf
from .sge import Sge
from .slurm import Slurm
def run_cast(start, config, log):
"""
Look up a cluster implementation, and run a single cast sweep.
"""
if config.cast.cluster == 'base':
Base(config, log).handle_all(start)
elif config.cast.cluster == 'lsf':
Lsf(config, log).handle_all(start)
elif config.cast.cluster == 'sge':
Sge(config, log).handle_all(start)
elif config.cast.cluster == 'slurm':
Slurm(config, log).handle_all(start)
else:
frame.fatal('No such cluster type: ' + config.cast.cluster)
```
#### File: code/cluster/sge.py
```python
import re
from .base import Base
from util import defn
class Sge(Base):
def set_config_defaults(self):
c = self.config.cast
if c.command is None:
c.command = ['qsub', '{{script_path}}']
if c.command_script_stdin is None:
c.command_script_stdin = False
if c.script is None:
c.script = SCRIPT_TEMPLATE
if c.script_executable is None:
c.script_executable = True
def determine_job_settings(self, job):
s_debug, s_write = self.determine_singularity_settings(job)
ram = job.config.get('sge-ram', '4G')
cpu = job.config.get('sge-cpu', '4-8')
# Force alphanum, with dashes for cpu range
ram = re.sub(r'\W+', '', str(ram))
cpu = re.sub(r'[^a-zA-Z0-9\-]+', '', str(cpu))
return defn.JobSettings(
fw_id = str(job.id),
singularity_debug = s_debug,
singularity_writable = s_write,
ram = ram,
cpu = cpu,
)
SCRIPT_TEMPLATE = """#!/usr/bin/env bash
#$ -j y
#$ -o {{script_log_path}}
#$ -S /bin/bash
#$ -l h_vmem={{job.ram}}
#$ -pe threaded {{job.cpu}}
set -euo pipefail
source "{{cast_path}}/settings/credentials.sh"
cd "{{engine_run_path}}"
set -x
./engine run --single-job {{job.fw_id}}
"""
``` |
{
"source": "jia-wan/GeneralizedLoss-Counting-Pytorch",
"score": 2
} |
#### File: GeneralizedLoss-Counting-Pytorch/datasets/crowd.py
```python
from PIL import Image
import PIL
import torch.utils.data as data
import os
import scipy.io as io
from glob import glob
import json
import torch
import torchvision.transforms.functional as F
from torchvision import transforms
import random
import numpy as np
import math
def random_crop(im_h, im_w, crop_h, crop_w):
res_h = im_h - crop_h
res_w = im_w - crop_w
i = random.randint(0, res_h)
j = random.randint(0, res_w)
return i, j, crop_h, crop_w
def cal_innner_area(c_left, c_up, c_right, c_down, bbox):
inner_left = np.maximum(c_left, bbox[:, 0])
inner_up = np.maximum(c_up, bbox[:, 1])
inner_right = np.minimum(c_right, bbox[:, 2])
inner_down = np.minimum(c_down, bbox[:, 3])
inner_area = np.maximum(inner_right-inner_left, 0.0) * np.maximum(inner_down-inner_up, 0.0)
return inner_area
def get_im_list(root_path, json_file):
with open(json_file) as f:
im_list = json.load(f)
im_list = [os.path.join(root_path, x.split('/')[-1]) for x in im_list]
return im_list
def train_val(im_list, ratio=0.9):
N = int(float(len(im_list))*ratio)
idx = torch.randperm(len(im_list))
train_list = [im_list[i] for i in idx[0:N]]
val_list = [im_list[i] for i in idx[N+1:]]
return train_list, val_list
class Crowd(data.Dataset):
def __init__(self, root_path, crop_size,
downsample_ratio, is_gray=False,
method='train', resize=False, im_list=None, noise=0):
self.noise = noise
self.root_path = root_path
self.resize = resize
if im_list is None:
self.im_list = sorted(glob(os.path.join(self.root_path, '*.jpg')))
else:
self.im_list = im_list
if method not in ['train', 'val']:
raise Exception("not implement")
self.method = method
self.c_size = crop_size
self.d_ratio = downsample_ratio
assert self.c_size % self.d_ratio == 0
self.dc_size = self.c_size // self.d_ratio
if is_gray:
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
else:
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return 1*len(self.im_list)
def __getitem__(self, item):
img_path = self.im_list[item % len(self.im_list)]
gd_path = img_path.replace('jpg', 'npy')
img = Image.open(img_path).convert('RGB')
keypoints = np.load(gd_path)
if self.method == 'train':
return self.train_transform_with_crop(img, keypoints)
elif self.method == 'val':
img = self.trans(img)
name = os.path.basename(img_path).split('.')[0]
if len(keypoints) == 0:
keypoints = torch.zeros(size=(1,1))
return img, keypoints, name
def train_transform_with_crop(self, img, keypoints):
"""random crop image patch and find people in it"""
wd, ht = img.size
st_size = min(wd, ht)
if st_size < self.c_size:
c_size = 512
else:
c_size = self.c_size
assert st_size >= self.c_size
i, j, h, w = random_crop(ht, wd, c_size, c_size)
img = F.crop(img, i, j, h, w)
if len(keypoints) < 1:
if random.random() > 0.5:
img = F.hflip(img)
return self.trans(img), torch.from_numpy(keypoints.copy()).float(), \
torch.from_numpy(keypoints.copy()).float(), st_size
nearest_dis = np.clip(keypoints[:, 2], 4.0, 128.0)
points_left_up = keypoints[:, :2] - nearest_dis[:, None] / 2.0
points_right_down = keypoints[:, :2] + nearest_dis[:, None] / 2.0
bbox = np.concatenate((points_left_up, points_right_down), axis=1)
inner_area = cal_innner_area(j, i, j+w, i+h, bbox)
origin_area = nearest_dis * nearest_dis
ratio = np.clip(1.0 * inner_area / origin_area, 0.0, 1.0)
mask = (ratio >= 0.3)
target = ratio[mask]
keypoints = keypoints[mask]
keypoints = keypoints[:, :2] - [j, i] # change coodinate
if len(keypoints) > 0:
if random.random() > 0.5:
img = F.hflip(img)
keypoints[:, 0] = w - keypoints[:, 0]
else:
if random.random() > 0.5:
img = F.hflip(img)
return self.trans(img), torch.from_numpy(keypoints.copy()).float(), \
torch.from_numpy(target.copy()).float(), st_size
```
#### File: GeneralizedLoss-Counting-Pytorch/models/vgg.py
```python
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
from torchvision import models
from torch.nn import functional as F
__all__ = ['vgg19']
model_urls = {
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, down=8, o_cn=1, final='abs'):
super(VGG, self).__init__()
self.down = down
self.final = final
self.reg_layer = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, o_cn, 1)
)
self._initialize_weights()
self.features = features
def forward(self, x):
x = self.features(x)
if self.down < 16:
x = F.interpolate(x, scale_factor=2)
x = self.reg_layer(x)
if self.final == 'abs':
x = torch.abs(x)
elif self.final == 'relu':
x = torch.relu(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def make_layers(cfg, in_channels=3, batch_norm=False, dilation=False):
if dilation:
d_rate = 2
else:
d_rate = 1
layers = []
# in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
# conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate,dilation = d_rate)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'C': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
'F': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512]
}
def vgg19(down=8, bn=False, o_cn=1, final='abs'):
"""VGG 19-layer model (configuration "E")
model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E'], batch_norm=False), down=down, o_cn=o_cn, final=final)
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']), strict=False)
return model
```
#### File: jia-wan/GeneralizedLoss-Counting-Pytorch/test.py
```python
import torch
import os
import numpy as np
from datasets.crowd import Crowd
from models.vgg import vgg19
import argparse
args = None
def train_collate(batch):
transposed_batch = list(zip(*batch))
images = torch.stack(transposed_batch[0], 0)
points = transposed_batch[1] # the number of points is not fixed, keep it as a list of tensor
targets = transposed_batch[2]
st_sizes = torch.FloatTensor(transposed_batch[3])
return images, points, targets, st_sizes
def parse_args():
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('--data-dir', default='../../data/UCF_Bayes',
help='training data directory')
parser.add_argument('--save-dir', default='./model.pth',
help='model path')
parser.add_argument('--device', default='0', help='assign device')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu
datasets = Crowd(os.path.join(args.data_dir, 'test'), 512, 8, is_gray=False, method='val')
dataloader = torch.utils.data.DataLoader(datasets, 1, shuffle=False,
num_workers=1, pin_memory=False)
model = vgg19()
device = torch.device('cuda')
model.to(device)
model.load_state_dict(torch.load(os.path.join(args.save_dir), device))
epoch_minus = []
for inputs, count, name in dataloader:
inputs = inputs.to(device)
assert inputs.size(0) == 1, 'the batch size should equal to 1'
with torch.set_grad_enabled(False):
outputs = model(inputs)
temp_minu = len(count[0]) - torch.sum(outputs).item()
print(name, temp_minu, len(count[0]), torch.sum(outputs).item())
epoch_minus.append(temp_minu)
epoch_minus = np.array(epoch_minus)
mse = np.sqrt(np.mean(np.square(epoch_minus)))
mae = np.mean(np.abs(epoch_minus))
log_str = 'Final Test: mae {}, mse {}'.format(mae, mse)
print(log_str)
``` |
{
"source": "JIAWea/rsocket-py-examples",
"score": 2
} |
#### File: JIAWea/rsocket-py-examples/client_test_channel.py
```python
import asyncio
import json
import time
from abc import abstractmethod
import rxbp
from reactivestreams import Subscriber
from rsocket import Payload, BaseRequestHandler
from rsocket import RSocket
from rxbp.acknowledgement.stopack import StopAck
from rxbp.observer import Observer
from rxbp.schedulers.threadpoolscheduler import ThreadPoolScheduler
from rxbp.schedulers.threadpoolschedulerdispose import RXThreadPoolScheduler
from rxbp.typing import ElementType
class Handler(BaseRequestHandler):
def request_fire_and_forget(self, payload: Payload):
str_data = payload.data.decode('utf-8')
data = json.loads(str_data)
print("[FNF] data: ", data)
class ChannelSubscriber:
def __init__(self):
self.on_next_count = 0
def on_next(self, elem: ElementType):
self.on_next_count += 1
def on_error(self, exc):
print('Exception from server: ', str(exc))
def on_completed(self):
print("Completed! next_count: {}".format(self.on_next_count))
def on_subscribe(self, subscription):
# noinspection PyAttributeOutsideInit
self.subscription = subscription
self.subscription.request(20)
async def session(reader, writer):
socket = RSocket(reader, writer, handler_factory=Handler, server=False)
def handler(o: Observer, _):
for i in range(1, 20):
time.sleep(0.5)
data, metadata = b'1', b''
ack = o.on_next([Payload(data, metadata)])
if isinstance(ack, StopAck):
break
o.on_completed()
socket.request_channel(
rxbp.create(handler).pipe(
rxbp.op.subscribe_on(pool),
)
).subscribe(ChannelSubscriber())
await asyncio.sleep(1000)
await socket.close()
return
if __name__ == '__main__':
pool = RXThreadPoolScheduler()
loop = asyncio.get_event_loop()
try:
connection = loop.run_until_complete(asyncio.open_connection(
'localhost', 9898))
loop.run_until_complete(session(*connection))
finally:
loop.close()
```
#### File: JIAWea/rsocket-py-examples/server_request_stream.py
```python
import asyncio
import time
import rxbp
from rsocket import RSocket, BaseRequestHandler
from rsocket.payload import Payload
from rxbp.acknowledgement.stopack import StopAck
from rxbp.observer import Observer
from rxbp.schedulers.threadpoolscheduler import ThreadPoolScheduler
def handler(o: Observer, _):
for i in range(1, 10):
time.sleep(0.5)
ack = o.on_next([Payload(b'server data-' + str(i).encode("utf-8"), b'metadata')])
if isinstance(ack, StopAck):
print("stopped!")
break
o.on_completed()
class Handler(BaseRequestHandler):
def request_stream(self, payload: Payload):
data = payload.data.decode('utf-8')
metadata = payload.metadata.decode('utf-8')
print("[RS] Received from client: {}, {}".format(data, metadata))
pool = ThreadPoolScheduler("publisher")
if metadata == "unlimit":
publisher = rxbp.interval(1).pipe(
rxbp.op.map(lambda v: Payload(b'map client2 index-' + str(v).encode('utf-8'), b'metadata')),
rxbp.op.do_action(
on_next=lambda v: print("sending " + v.data.decode('utf-8')),
on_error=lambda v: print("sending error: ", str(v)),
on_completed=lambda: print("completed!")
),
rxbp.op.subscribe_on(pool),
)
else:
publisher = rxbp.create(handler).pipe(
rxbp.op.do_action(
on_next=lambda v: print("sending " + v.data.decode('utf-8')),
on_error=lambda v: print("sending error: ", str(v)),
on_completed=lambda: print("completed!"),
),
rxbp.op.subscribe_on(pool),
)
return publisher
def session(reader, writer):
RSocket(reader, writer, handler_factory=Handler)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
service = loop.run_until_complete(asyncio.start_server(
session, 'localhost', 9898))
try:
loop.run_forever()
finally:
service.close()
loop.close()
``` |
{
"source": "JIAWea/rxbackpressure",
"score": 3
} |
#### File: rxbackpressure/examples/examplestrategy.py
```python
import time
from threading import Thread
import rxbp
from rxbp.acknowledgement.acksubject import AckSubject
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.overflowstrategy import DropOld, ClearBuffer, Drop
from rxbp.schedulers.asyncioscheduler import AsyncIOScheduler
from rxbp.schedulers.threadpoolscheduler import ThreadPoolScheduler
from rxbp.schedulers.timeoutscheduler import TimeoutScheduler
from rxbp.testing.tobserver import TObserver
def demo1():
def counter(sink):
while True:
time.sleep(5)
print(f"[**client**] received: ", sink.received)
publisher = rxbp.interval(0.5).pipe(
# rxbp.op.strategy(DropOld(buffer_size=15))
rxbp.op.strategy(ClearBuffer(buffer_size=15))
)
sink = TObserver(immediate_continue=5)
publisher.subscribe(observer=sink, subscribe_scheduler=TimeoutScheduler())
t1 = Thread(target=counter, args=(sink,))
t1.start()
t1.join()
def demo2():
def counter(sink):
while True:
time.sleep(5)
print(f"[**client**] received: ", sink.received)
def work(o, skd):
for i in range(1_000):
o.on_next([i])
o.on_completed()
source = rxbp.create(work)
source = source.pipe(
rxbp.op.strategy(DropOld(buffer_size=100)),
# rxbp.op.strategy(ClearBuffer(buffer_size=15)),
# rxbp.op.strategy(Drop(buffer_size=15)),
)
sink = TObserver(immediate_continue=5)
source.subscribe(observer=sink, subscribe_scheduler=ThreadPoolScheduler("publisher"))
t1 = Thread(target=counter, args=(sink,))
t1.start()
t1.join()
if __name__ == '__main__':
# demo1()
demo2()
```
#### File: rxbp/flowables/intervalflowable.py
```python
from rx.core import typing
from rxbp.init.initsubscription import init_subscription
from rxbp.internal.timer import _interval
from rxbp.mixins.flowablemixin import FlowableMixin
from rxbp.observables.subscriptionobservable import SubscriptionObservable
from rxbp.subscriber import Subscriber
from rxbp.subscription import Subscription
class IntervalFlowable(FlowableMixin):
def __init__(
self,
period: typing.RelativeTime,
):
self.period = period
def unsafe_subscribe(self, subscriber: Subscriber) -> Subscription:
return init_subscription(
observable=SubscriptionObservable(
source=_interval(self.period),
scheduler=subscriber.scheduler,
subscribe_scheduler=subscriber.subscribe_scheduler,
)
)
``` |
{
"source": "jiawei415/cleanrlxu",
"score": 2
} |
#### File: work/policy/factorisednoisylayer.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class FactorisedNoisyLayer(nn.Module):
def __init__(self, input_features, output_features, sigma=0.5):
super().__init__()
self.input_features = input_features
self.output_features = output_features
self.sigma = sigma
self.bound = input_features ** (-0.5)
self.mu_bias = nn.Parameter(torch.FloatTensor(output_features))
self.sigma_bias = nn.Parameter(torch.FloatTensor(output_features))
self.mu_weight = nn.Parameter(
torch.FloatTensor(output_features, input_features)
)
self.sigma_weight = nn.Parameter(
torch.FloatTensor(output_features, input_features)
)
self.register_buffer("epsilon_input", torch.FloatTensor(input_features))
self.register_buffer("epsilon_output", torch.FloatTensor(output_features))
self.parameter_initialization()
self.sample_noise()
def parameter_initialization(self):
self.mu_bias.data.uniform_(-self.bound, self.bound)
self.sigma_bias.data.fill_(self.sigma * self.bound)
self.mu_weight.data.uniform_(-self.bound, self.bound)
self.sigma_weight.data.fill_(self.sigma * self.bound)
def forward(self, x: torch.Tensor, sample_noise: bool = True) -> torch.Tensor:
if not self.training:
return F.linear(x, weight=self.mu_weight, bias=self.mu_bias)
if sample_noise:
self.sample_noise()
# print(f"mu_weight: {self.mu_weight.device} output: {self.epsilon_output.device}")
weight = (
self.sigma_weight
* torch.ger(
self.epsilon_output.to(self.mu_weight.device),
self.epsilon_input.to(self.mu_weight.device),
)
+ self.mu_weight
)
bias = (
self.sigma_bias * self.epsilon_output.to(self.mu_bias.device) + self.mu_bias
)
return F.linear(x, weight=weight, bias=bias)
def sample_noise(self):
self.epsilon_input = self.get_noise_tensor(self.input_features)
self.epsilon_output = self.get_noise_tensor(self.output_features)
def get_noise_tensor(self, features: int) -> torch.Tensor:
noise = torch.FloatTensor(features).uniform_(-self.bound, self.bound)
return torch.sign(noise) * torch.sqrt(torch.abs(noise))
``` |
{
"source": "jiawei415/revisiting_rainbow",
"score": 2
} |
#### File: revisiting_rainbow/Agents/dqn_agent_new.py
```python
import time
import copy
import functools
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.replay_memory import prioritized_replay_buffer
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import tensorflow as tf
import jax.scipy.special as scp
from flax import linen as nn
def mse_loss(targets, predictions):
return jnp.mean(jnp.power((targets - (predictions)),2))
@functools.partial(jax.jit, static_argnums=(0, 9,10,11,12,13, 14))
def train(network_def, target_params, optimizer, states, actions, next_states, rewards,
terminals, loss_weights, cumulative_gamma, target_opt, mse_inf,tau,alpha,clip_value_min, rng):
"""Run the training step."""
online_params = optimizer.target
def loss_fn(params, rng_input, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, rng=rng_input)
q_values = jax.vmap(q_online)(states).q_values
q_values = jnp.squeeze(q_values)
replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)
if mse_inf:
loss = jax.vmap(mse_loss)(target, replay_chosen_q)
else:
loss = jax.vmap(dqn_agent.huber_loss)(target, replay_chosen_q)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
rng, rng2, rng3, rng4 = jax.random.split(rng, 4)
def q_target(state):
return network_def.apply(target_params, state, rng=rng2)
def q_target_online(state):
return network_def.apply(online_params, state, rng=rng4)
if target_opt == 0:
target = dqn_agent.target_q(q_target, next_states, rewards, terminals, cumulative_gamma)
elif target_opt == 1:
#Double DQN
target = target_DDQN(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)
elif target_opt == 2:
#Munchausen
target = target_m_dqn(q_target_online, q_target, states,next_states,actions,rewards,terminals,
cumulative_gamma,tau,alpha,clip_value_min)
else:
print('error')
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)
optimizer = optimizer.apply_gradient(grad)
return optimizer, loss, mean_loss
def target_DDQN(model, target_network, next_states, rewards, terminals, cumulative_gamma):
"""Compute the target Q-value. Double DQN"""
next_q_values = jax.vmap(model, in_axes=(0))(next_states).q_values
next_q_values = jnp.squeeze(next_q_values)
replay_next_qt_max = jnp.argmax(next_q_values, axis=1)
next_q_state_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values
q_values = jnp.squeeze(next_q_state_values)
replay_chosen_q = jax.vmap(lambda t, u: t[u])(q_values, replay_next_qt_max)
return jax.lax.stop_gradient(rewards + cumulative_gamma * replay_chosen_q *
(1. - terminals))
def stable_scaled_log_softmax(x, tau, axis=-1):
max_x = jnp.amax(x, axis=axis, keepdims=True)
y = x - max_x
tau_lse = max_x + tau * jnp.log(jnp.sum(jnp.exp(y / tau), axis=axis, keepdims=True))
return x - tau_lse
def stable_softmax(x, tau, axis=-1):
max_x = jnp.amax(x, axis=axis, keepdims=True)
y = x - max_x
return nn.softmax(y/tau, axis=axis)
def target_m_dqn(model, target_network, states, next_states, actions,rewards, terminals,
cumulative_gamma,tau,alpha,clip_value_min):
"""Compute the target Q-value. Munchausen DQN"""
#----------------------------------------
q_state_values = jax.vmap(target_network, in_axes=(0))(states).q_values
q_state_values = jnp.squeeze(q_state_values)
next_q_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values
next_q_values = jnp.squeeze(next_q_values)
#----------------------------------------
tau_log_pi_next = stable_scaled_log_softmax(next_q_values, tau, axis=1)
pi_target = stable_softmax(next_q_values,tau, axis=1)
replay_log_policy = stable_scaled_log_softmax(q_state_values, tau, axis=1)
#----------------------------------------
replay_next_qt_softmax = jnp.sum((next_q_values-tau_log_pi_next)*pi_target,axis=1)
replay_action_one_hot = nn.one_hot(actions, q_state_values.shape[-1])
tau_log_pi_a = jnp.sum(replay_log_policy * replay_action_one_hot, axis=1)
#a_max=1
tau_log_pi_a = jnp.clip(tau_log_pi_a, a_min=clip_value_min,a_max=1)
munchausen_term = alpha * tau_log_pi_a
modified_bellman = (rewards + munchausen_term +cumulative_gamma * replay_next_qt_softmax *
(1. - jnp.float32(terminals)))
return jax.lax.stop_gradient(modified_bellman)
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2, rng3 = jax.random.split(rng, num=4)
selected_action = jnp.argmax(network_def.apply(params, state, rng=rng3).q_values)
p = jax.random.uniform(rng1)
return rng, jnp.where(p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
selected_action)
@gin.configurable
class JaxDQNAgentNew(dqn_agent.JaxDQNAgent):
"""A compact implementation of a simplified Rainbow agent."""
def __init__(self,
num_actions,
tau,
alpha=1,
clip_value_min=-10,
net_conf = None,
env = "CartPole",
normalize_obs = True,
hidden_layer=2,
neurons=512,
replay_scheme='prioritized',
noisy = False,
dueling = False,
initzer = 'xavier_uniform',
target_opt=0,
mse_inf=False,
network=networks.NatureDQNNetwork,
optimizer='adam',
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
seed=None):
"""Initializes the agent and constructs the necessary components.
Args:
num_actions: int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to jnp.float32.
stack_size: int, number of frames to use in state stack.
network: flax.nn Module that is initialized by shape in _create_network
below. See dopamine.jax.networks.RainbowNetwork as an example.
num_atoms: int, the number of buckets of the value function distribution.
vmax: float, the value distribution support is [-vmax, vmax].
gamma: float, discount factor with the usual RL meaning.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of transitions that should be experienced
before the agent begins training its value function.
update_period: int, period between DQN updates.
target_update_period: int, update period for the target network.
epsilon_fn: function expecting 4 parameters:
(decay_period, step, warmup_steps, epsilon). This function should return
the epsilon value used for exploration during training.
epsilon_train: float, the value to which the agent's epsilon is eventually
decayed during training.
epsilon_eval: float, epsilon used when evaluating the agent.
epsilon_decay_period: int, length of the epsilon decay schedule.
replay_scheme: str, 'prioritized' or 'uniform', the sampling scheme of the
replay memory.
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
allow_partial_reload: bool, whether we allow reloading a partial agent
(for instance, only the network parameters).
"""
# We need this because some tools convert round floats into ints.
seed = int(time.time() * 1e6) if seed is None else seed
self._net_conf = net_conf
self._env = env
self._normalize_obs = normalize_obs
self._hidden_layer = hidden_layer
self._neurons=neurons
self._noisy = noisy
self._dueling = dueling
self._initzer = initzer
self._target_opt = target_opt
self._mse_inf = mse_inf
self._tau = tau
self._alpha = alpha
self._clip_value_min = clip_value_min
self._rng = jax.random.PRNGKey(seed)
super(JaxDQNAgentNew, self).__init__(
num_actions= num_actions,
network= functools.partial(network,
num_actions=num_actions,
net_conf=self._net_conf,
env=self._env,
normalize_obs=self._normalize_obs,
hidden_layer=self._hidden_layer,
neurons=self._neurons,
noisy=self._noisy,
dueling=self._dueling,
initzer=self._initzer),
optimizer=optimizer,
epsilon_fn=dqn_agent.identity_epsilon if self._noisy == True else epsilon_fn)
self._replay_scheme = replay_scheme
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
online_network_params = self.network_def.init(
rng, x=self.state, rng=self._rng)
optimizer_def = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer = optimizer_def.create(online_network_params)
self.target_network_params = copy.deepcopy(online_network_params)
def _build_replay_buffer(self):
"""Creates the prioritized replay buffer used by the agent."""
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
# Run a train op at the rate of self.update_period if enough training steps
# have been run. This matches the Nature DQN behaviour.
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self.cumulative_gamma,
self._target_opt,
self._mse_inf,
self._tau,
self._alpha,
self._clip_value_min,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='HuberLoss', simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority)
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
```
#### File: revisiting_rainbow/Agents/implicit_quantile_agent_new.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import time
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.replay_memory import prioritized_replay_buffer
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import tensorflow as tf
import jax.scipy.special as scp
import jax.lax
@functools.partial(
jax.vmap,
in_axes=(None, None, None, 0, 0, 0, None, None, None, None, None),
out_axes=(None, 0))
def target_quantile_values_fun(network_def, online_params, target_params,
next_states, rewards, terminals,
num_tau_prime_samples, num_quantile_samples,
cumulative_gamma, double_dqn, rng):
rewards = jnp.tile(rewards, [num_tau_prime_samples])
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = jnp.tile(gamma_with_terminal, [num_tau_prime_samples])
rng, rng1, rng2 = jax.random.split(rng, num=3)
# Compute Q-values which are used for action selection for the next states
# in the replay buffer. Compute the argmax over the Q-values.
if double_dqn:
outputs_action = network_def.apply(online_params,
next_states,
num_quantiles=num_quantile_samples,
rng=rng1)
else:
outputs_action = network_def.apply(target_params,
next_states,
num_quantiles=num_quantile_samples,
rng=rng1)
target_quantile_values_action = outputs_action.quantile_values
target_q_values = jnp.squeeze(
jnp.mean(target_quantile_values_action, axis=0))
# Shape: batch_size.
next_qt_argmax = jnp.argmax(target_q_values)
# Get the indices of the maximium Q-value across the action dimension.
# Shape of next_qt_argmax: (num_tau_prime_samples x batch_size).
next_state_target_outputs = network_def.apply(
target_params,
next_states,
num_quantiles=num_tau_prime_samples,
rng=rng2)
next_qt_argmax = jnp.tile(next_qt_argmax, [num_tau_prime_samples])
target_quantile_vals = (
jax.vmap(lambda x, y: x[y])(next_state_target_outputs.quantile_values,
next_qt_argmax))
target_quantile_vals = rewards + gamma_with_terminal * target_quantile_vals
# We return with an extra dimension, which is expected by train.
return rng, jax.lax.stop_gradient(target_quantile_vals[:, None])
def stable_scaled_log_softmax(x, tau, axis=-1):
max_x = jnp.amax(x, axis=axis, keepdims=True)
y = x - max_x
tau_lse = max_x + tau * jnp.log(jnp.sum(jnp.exp(y / tau), axis=axis, keepdims=True))
return x - tau_lse
def stable_softmax(x, tau, axis=-1):
max_x = jnp.amax(x, axis=axis, keepdims=True)
y = x - max_x
return jax.nn.softmax(y/tau, axis=axis)
@functools.partial(
jax.vmap,
in_axes=(None, None, None, 0, 0, 0, 0, 0, None, None, None, None, None,None, None, None,None),
out_axes=(None, 0))
def munchau_target_quantile_values_fun(network_def, online_network, target_params,
states,actions,next_states, rewards, terminals,
num_tau_prime_samples, num_quantile_samples,
cumulative_gamma, double_dqn, rng,tau,alpha,clip_value_min,num_actions):
#Build the munchausen target for return values at given quantiles.
del double_dqn
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = jnp.tile(gamma_with_terminal, [num_tau_prime_samples])
rng, rng1, rng2 = jax.random.split(rng, num=3)
#------------------------------------------------------------------------
replay_net_target_outputs = network_def.apply(target_params, next_states,num_quantiles=num_tau_prime_samples, rng=rng)
replay_net_target_quantile_values = replay_net_target_outputs.quantile_values
target_next_action = network_def.apply(target_params, next_states,num_quantiles=num_quantile_samples, rng=rng1)
target_next_quantile_values_action = target_next_action.quantile_values
_replay_next_target_q_values = jnp.squeeze(jnp.mean(target_next_quantile_values_action, axis=0))
outputs_action = network_def.apply(target_params, states,num_quantiles=num_quantile_samples, rng=rng2)
q_state_values = outputs_action.quantile_values
_replay_target_q_values = jnp.squeeze(jnp.mean(q_state_values, axis=0))
#------------------------------------------------------------------------
replay_action_one_hot = jax.nn.one_hot(actions,num_actions)
replay_next_log_policy = stable_scaled_log_softmax(_replay_next_target_q_values, tau, axis=0)
replay_next_policy = stable_softmax(_replay_next_target_q_values,tau, axis=0)
replay_log_policy = stable_scaled_log_softmax(_replay_target_q_values, tau, axis=0)
#------------------------------------------------------------------------
tau_log_pi_a = jnp.sum(replay_log_policy * replay_action_one_hot, axis=0)
tau_log_pi_a = jnp.clip(tau_log_pi_a, a_min=clip_value_min,a_max=0)
munchausen_term = alpha * tau_log_pi_a
rewards = rewards + munchausen_term
rewards = jnp.tile(rewards, [num_tau_prime_samples])
weighted_logits = (replay_next_policy * (replay_net_target_quantile_values-replay_next_log_policy))
target_quantile_values = jnp.sum(weighted_logits, axis=1)
target_quantile_values = rewards + gamma_with_terminal * target_quantile_values
return rng, jax.lax.stop_gradient(target_quantile_values[:, None])
@functools.partial(jax.jit, static_argnums=(0, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))
def train(network_def, target_params, optimizer, states, actions, next_states, rewards,
terminals, loss_weights, target_opt, num_tau_samples, num_tau_prime_samples,
num_quantile_samples, cumulative_gamma, double_dqn, kappa, tau,alpha,clip_value_min, num_actions,rng):
"""Run a training step."""
online_params = optimizer.target
def loss_fn(params, rng_input, target_quantile_vals, loss_multipliers):
def online(state):
return network_def.apply(params, state, num_quantiles=num_tau_samples, rng=rng_input)
model_output = jax.vmap(online)(states)
quantile_values = model_output.quantile_values
quantiles = model_output.quantiles
chosen_action_quantile_values = jax.vmap(lambda x, y: x[:, y][:, None])(
quantile_values, actions)
# Shape of bellman_erors and huber_loss:
# batch_size x num_tau_prime_samples x num_tau_samples x 1.
bellman_errors = (target_quantile_vals[:, :, None, :] -
chosen_action_quantile_values[:, None, :, :])
# The huber loss (see Section 2.3 of the paper) is defined via two cases:
# case_one: |bellman_errors| <= kappa
# case_two: |bellman_errors| > kappa
huber_loss_case_one = (
(jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *
0.5 * bellman_errors ** 2)
huber_loss_case_two = (
(jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *
kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))
huber_loss = huber_loss_case_one + huber_loss_case_two
# Tile by num_tau_prime_samples along a new dimension. Shape is now
# batch_size x num_tau_prime_samples x num_tau_samples x 1.
# These quantiles will be used for computation of the quantile huber loss
# below (see section 2.3 of the paper).
quantiles = jnp.tile(quantiles[:, None, :, :],
[1, num_tau_prime_samples, 1, 1]).astype(jnp.float32)
# Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.
quantile_huber_loss = (jnp.abs(quantiles - jax.lax.stop_gradient(
(bellman_errors < 0).astype(jnp.float32))) * huber_loss) / kappa
# Sum over current quantile value (num_tau_samples) dimension,
# average over target quantile value (num_tau_prime_samples) dimension.
# Shape: batch_size x num_tau_prime_samples x 1.
loss = jnp.sum(quantile_huber_loss, axis=2)
loss = jnp.squeeze(jnp.mean(loss, axis=1), axis=-1)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
if target_opt == 0:
rng, target_quantile_vals = target_quantile_values_fun(
network_def,
online_params,
target_params,
next_states,
rewards,
terminals,
num_tau_prime_samples,
num_quantile_samples,
cumulative_gamma,
double_dqn,
rng)
elif target_opt == 1:
rng, target_quantile_vals = munchau_target_quantile_values_fun(
network_def,
online_params,
target_params,
states,
actions,
next_states,
rewards,
terminals,
num_tau_prime_samples,
num_quantile_samples,
cumulative_gamma,
double_dqn,
rng,
tau,
alpha,
clip_value_min,
num_actions
)
else:
print('error')
rng, rng_input = jax.random.split(rng)
(mean_loss, loss), grad = grad_fn(online_params, rng_input, target_quantile_vals, loss_weights)
optimizer = optimizer.apply_gradient(grad)
return rng, optimizer, loss, mean_loss
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 9, 11, 12, 13))
def select_action(network_def, params, state, rng, num_quantile_samples, num_actions,
eval_mode, epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn, tau, model):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2 = jax.random.split(rng, num=3)
selected_action = jnp.argmax(jnp.mean(
network_def.apply(params, state,
num_quantiles=num_quantile_samples,
rng=rng2).quantile_values, axis=0),
axis=0)
p = jax.random.uniform(rng1)
return rng, jnp.where(p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
selected_action)
@gin.configurable
class JaxImplicitQuantileAgentNew(dqn_agent.JaxDQNAgent):
"""An extension of Rainbow to perform implicit quantile regression."""
def __init__(self,
num_actions,
tau,
alpha=1,
clip_value_min=-10,
target_opt=0,
net_conf = None,
env = "CartPole",
hidden_layer=2,
neurons=512,
noisy = False,
dueling = False,
initzer = 'variance_scaling',
observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=dqn_agent.NATURE_DQN_DTYPE,
stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,
network=networks.ImplicitQuantileNetwork,
kappa=1.0,
num_tau_samples=32,
num_tau_prime_samples=32,
num_quantile_samples=32,
quantile_embedding_dim=64,
double_dqn=False,
gamma=0.99,
update_horizon=1,
min_replay_history=20000,
update_period=4,
target_update_period=8000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.01,
epsilon_eval=0.001,
epsilon_decay_period=250000,
replay_scheme='prioritized',
optimizer='adam',
summary_writer=None,
summary_writing_frequency=500,
seed=None):
"""Initializes the agent and constructs the necessary components.
Most of this constructor's parameters are IQN-specific hyperparameters whose
values are taken from Dabney et al. (2018).
Args:
num_actions: int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to jnp.float32.
stack_size: int, number of frames to use in state stack.
network: flax.nn Module that is initialized by shape in _create_network
below. See dopamine.jax.networks.JaxImplicitQuantileNetwork as an
example.
kappa: float, Huber loss cutoff.
num_tau_samples: int, number of online quantile samples for loss
estimation.
num_tau_prime_samples: int, number of target quantile samples for loss
estimation.
num_quantile_samples: int, number of quantile samples for computing
Q-values.
quantile_embedding_dim: int, embedding dimension for the quantile input.
double_dqn: boolean, whether to perform double DQN style learning
as described in Van Hasselt et al.: https://arxiv.org/abs/1509.06461.
gamma: float, discount factor with the usual RL meaning.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of transitions that should be experienced
before the agent begins training its value function.
update_period: int, period between DQN updates.
target_update_period: int, update period for the target network.
epsilon_fn: function expecting 4 parameters:
(decay_period, step, warmup_steps, epsilon). This function should return
the epsilon value used for exploration during training.
epsilon_train: float, the value to which the agent's epsilon is eventually
decayed during training.
epsilon_eval: float, epsilon used when evaluating the agent.
epsilon_decay_period: int, length of the epsilon decay schedule.
replay_scheme: str, 'prioritized' or 'uniform', the sampling scheme of the
replay memory.
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
"""
seed = int(time.time() * 1e6) if seed is None else seed
self._net_conf = net_conf
self._env = env
self._hidden_layer = hidden_layer
self._neurons=neurons
self._noisy = noisy
self._dueling = dueling
self._initzer = initzer
self._tau = tau
self._alpha = alpha
self._clip_value_min = clip_value_min
self._target_opt = target_opt
self._rng = jax.random.PRNGKey(seed)
self.kappa = kappa
self._replay_scheme = replay_scheme
# num_tau_samples = N below equation (3) in the paper.
self.num_tau_samples = num_tau_samples
# num_tau_prime_samples = N' below equation (3) in the paper.
self.num_tau_prime_samples = num_tau_prime_samples
# num_quantile_samples = k below equation (3) in the paper.
self.num_quantile_samples = num_quantile_samples
# quantile_embedding_dim = n above equation (4) in the paper.
self.quantile_embedding_dim = quantile_embedding_dim
# option to perform double dqn.
self.double_dqn = double_dqn
super(JaxImplicitQuantileAgentNew, self).__init__(
num_actions=num_actions,
observation_shape=observation_shape,
observation_dtype=observation_dtype,
stack_size=stack_size,
network=functools.partial(network,
num_actions=num_actions,
net_conf=self._net_conf,
env=self._env,
hidden_layer=self._hidden_layer,
neurons=self._neurons,
noisy=self._noisy,
dueling=self._dueling,
initzer=self._initzer,
quantile_embedding_dim=quantile_embedding_dim),
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
optimizer=optimizer,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency)
self._num_actions=num_actions
self._replay = self._build_replay_buffer()
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
online_network_params = self.network_def.init(
rng, x=self.state, num_quantiles=self.num_tau_samples, rng=self._rng)
optimizer_def = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer = optimizer_def.create(online_network_params)
self.target_network_params = copy.deepcopy(online_network_params)
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._tau,
self.optimizer)
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._tau,
self.optimizer)
self.action = onp.asarray(self.action)
return self.action
def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self._rng, self.optimizer, loss, mean_loss= train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._target_opt,
self.num_tau_samples,
self.num_tau_prime_samples,
self.num_quantile_samples,
self.cumulative_gamma,
self.double_dqn,
self.kappa,
self._tau,
self._alpha,
self._clip_value_min,
self._num_actions,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='ImplicitLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority)
```
#### File: revisiting_rainbow/Agents/quantile_agent_new.py
```python
import copy
import time
import functools
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.replay_memory import prioritized_replay_buffer #check
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import tensorflow as tf
@functools.partial(jax.vmap, in_axes=(None, None, 0, 0, 0, None))
def target_distributionDouble(model,target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = model(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
next_dist = target_network(next_states)
logits = jnp.squeeze(next_dist.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits)
@functools.partial(jax.vmap, in_axes=(None, 0, 0, 0, None))
def target_distribution(target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
logits = jnp.squeeze(next_state_target_outputs.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits)
@functools.partial(jax.jit, static_argnums=(0, 9, 10, 11, 12))
def train(network_def, target_params, optimizer, states, actions, next_states, rewards,
terminals, loss_weights, kappa, num_atoms, cumulative_gamma, double_dqn, rng):
"""Run a training step."""
online_params = optimizer.target
def loss_fn(params,rng_input, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, rng=rng_input)
logits = jax.vmap(q_online)(states).logits
logits = jnp.squeeze(logits)
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
bellman_errors = (target[:, None, :] -
chosen_action_logits[:, :, None]) # Input `u' of Eq. 9.
# Eq. 9 of paper.
huber_loss = (
(jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *
0.5 * bellman_errors ** 2 +
(jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *
kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))
tau_hat = ((jnp.arange(num_atoms, dtype=jnp.float32) + 0.5) /
num_atoms) # Quantile midpoints. See Lemma 2 of paper.
# Eq. 10 of paper.
tau_bellman_diff = jnp.abs(
tau_hat[None, :, None] - (bellman_errors < 0).astype(jnp.float32))
quantile_huber_loss = tau_bellman_diff * huber_loss
# Sum over tau dimension, average over target value dimension.
loss = jnp.sum(jnp.mean(quantile_huber_loss, 2), 1)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
rng, rng2, rng3, rng4 = jax.random.split(rng, 4)
def q_target(state):
return network_def.apply(target_params, state, rng=rng2)
def q_target_online(state):
return network_def.apply(online_params, state, rng=rng4)
if double_dqn:
target = target_distributionDouble(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)
else:
target = target_distribution(q_target, next_states, rewards, terminals, cumulative_gamma)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)
optimizer = optimizer.apply_gradient(grad)
return optimizer, loss, mean_loss
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2, rng3 = jax.random.split(rng, num=4)
selected_action = jnp.argmax(network_def.apply(params, state, rng=rng3).q_values)
p = jax.random.uniform(rng1)
return rng, jnp.where(p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
selected_action)
@gin.configurable
class JaxQuantileAgentNew(dqn_agent.JaxDQNAgent):
"""An implementation of Quantile regression DQN agent."""
def __init__(self,
num_actions,
kappa=1.0,
num_atoms=200,
noisy = False,
dueling = False,
initzer = 'variance_scaling',
net_conf = None,
env = "CartPole",
normalize_obs = True,
hidden_layer=2,
neurons=512,
double_dqn=False,
replay_scheme='prioritized',
optimizer='adam',
network=networks.QuantileNetwork,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
seed=None):
"""Initializes the agent and constructs the Graph.
Args:
num_actions: Int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to jnp.float32.
stack_size: int, number of frames to use in state stack.
network: tf.Keras.Model, expects 3 parameters: num_actions, num_atoms,
network_type. A call to this object will return an instantiation of the
network provided. The network returned can be run with different inputs
to create different outputs. See
dopamine.discrete_domains.jax.networks.QuantileNetwork as an example.
kappa: Float, Huber loss cutoff.
num_atoms: Int, the number of buckets for the value function distribution.
gamma: Float, exponential decay factor as commonly used in the RL
literature.
update_horizon: Int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: Int, number of stored transitions for training to
start.
update_period: Int, period between DQN updates.
target_update_period: Int, ppdate period for the target network.
epsilon_fn: Function expecting 4 parameters: (decay_period, step,
warmup_steps, epsilon), and which returns the epsilon value used for
exploration during training.
epsilon_train: Float, final epsilon for training.
epsilon_eval: Float, epsilon during evaluation.
epsilon_decay_period: Int, number of steps for epsilon to decay.
replay_scheme: String, replay memory scheme to be used. Choices are:
uniform - Standard (DQN) replay buffer (Mnih et al., 2015)
prioritized - Prioritized replay buffer (Schaul et al., 2015)
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
allow_partial_reload: bool, whether we allow reloading a partial agent
(for instance, only the network parameters).
"""
seed = int(time.time() * 1e6) if seed is None else seed
self._num_atoms = num_atoms
self._kappa = kappa
self._replay_scheme = replay_scheme
self._double_dqn = double_dqn
self._net_conf = net_conf
self._env = env
self._normalize_obs = normalize_obs
self._hidden_layer= hidden_layer
self._neurons=neurons
self._noisy = noisy
self._dueling = dueling
self._initzer = initzer
self._rng = jax.random.PRNGKey(seed)
super(JaxQuantileAgentNew, self).__init__(
num_actions=num_actions,
optimizer=optimizer,
epsilon_fn = dqn_agent.identity_epsilon if self._noisy == True else epsilon_fn,
network=functools.partial(network, num_atoms=self._num_atoms , net_conf=self._net_conf,
env=self._env,
normalize_obs=self._normalize_obs,
hidden_layer=self._hidden_layer,
neurons=self._neurons,
noisy=self._noisy,
dueling=self._dueling,
initzer=self._initzer))
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
online_network_params = self.network_def.init(rng, x=self.state, rng=self._rng)
optimizer_def = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer = optimizer_def.create(online_network_params)
self.target_network_params = copy.deepcopy(online_network_params)
def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
def begin_episode(self, observation):
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._kappa,
self._num_atoms,
self.cumulative_gamma,
self._double_dqn,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if self.summary_writer is not None:
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='QuantileLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority)
```
#### File: revisiting_rainbow/revisiting_rainbow/minatar_env.py
```python
from dopamine.discrete_domains import atari_lib
from flax import nn
import gin
import jax
import jax.numpy as jnp
import minatar
gin.constant('minatar_env.ASTERIX_SHAPE', (10, 10, 4))
gin.constant('minatar_env.BREAKOUT_SHAPE', (10, 10, 4))
gin.constant('minatar_env.FREEWAY_SHAPE', (10, 10, 7))
gin.constant('minatar_env.SEAQUEST_SHAPE', (10, 10, 10))
gin.constant('minatar_env.SPACE_INVADERS_SHAPE', (10, 10, 6))
gin.constant('minatar_env.DTYPE', jnp.float64)
class MinAtarEnv(object):
def __init__(self, game_name):
self.env = minatar.Environment(env_name=game_name)
self.env.n = self.env.num_actions()
self.game_over = False
@property
def observation_space(self):
return self.env.state_shape()
@property
def action_space(self):
return self.env # Only used for the `n` parameter.
@property
def reward_range(self):
pass # Unused
@property
def metadata(self):
pass # Unused
def reset(self):
self.game_over = False
self.env.reset()
return self.env.state()
def step(self, action):
r, terminal = self.env.act(action)
self.game_over = terminal
return self.env.state(), r, terminal, None
@gin.configurable
def create_minatar_env(game_name):
return MinAtarEnv(game_name)
``` |
{
"source": "jiaweih/CrossWalk",
"score": 2
} |
#### File: src/crosswalk/scorelator.py
```python
import os
from pathlib import Path
from typing import Tuple, Union
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from .model import CWModel
class Scorelator:
def __init__(self, model: CWModel,
draw_bounds: Tuple[float, float] = (0.05, 0.95),
type: str = 'harmful',
name: str = 'unknown'):
self.model = model
self.draw_bounds = draw_bounds
self.type = type
self.name = name
self.cov_names = model.get_cov_names()
self.intercept_index = self.cov_names.index("intercept") + \
np.arange(model.cwdata.num_dorms - 1)*model.num_vars_per_dorm
beta = np.delete(model.beta, model.var_idx[model.gold_dorm])
beta_sd = np.delete(model.beta_sd, model.var_idx[model.gold_dorm])
gamma_fisher = model.lt.get_gamma_fisher(model.gamma)[0, 0]
self.beta = beta[self.intercept_index]
self.beta_sd = beta_sd[self.intercept_index]
self.gamma = model.gamma[0]
self.gamma_sd = 1.0/np.sqrt(gamma_fisher)
gamma_ub = self.gamma + 2.0*self.gamma_sd
self.draw_lb = self.beta + norm.ppf(self.draw_bounds[0], scale=np.sqrt(self.gamma + self.beta_sd**2))
self.draw_ub = self.beta + norm.ppf(self.draw_bounds[1], scale=np.sqrt(self.gamma + self.beta_sd**2))
self.wider_draw_lb = self.beta + norm.ppf(self.draw_bounds[0], scale=np.sqrt(gamma_ub + self.beta_sd**2))
self.wider_draw_ub = self.beta + norm.ppf(self.draw_bounds[1], scale=np.sqrt(gamma_ub + self.beta_sd**2))
def get_score(self, use_gamma_ub: bool = False) -> float:
if use_gamma_ub:
score = self.wider_draw_lb if self.type == 'harmful' else -self.wider_draw_ub
else:
score = self.draw_lb if self.type == 'harmful' else -self.draw_ub
return score
def plot_model(self,
ax=None,
title: str = None,
xlabel: str = 'definitions or methods',
ylabel: str = 'ln relative risk',
xlim: tuple = None,
ylim: tuple = None,
xscale: str = None,
yscale: str = None,
folder: Union[str, Path] = None):
if ax is None:
fig = plt.figure(figsize=(4*(self.model.cwdata.num_dorms - 1), 5))
ax = fig.add_subplot()
data = self.model.cwdata
alt_dorms = np.array([dorm for i in range(data.num_obs) for dorm in data.alt_dorms[i]])
obs_ref = np.array([
np.sum([self.model.beta[self.model.var_idx[dorm]]
for dorm in data.ref_dorms[i]])
for i in range(data.num_obs)
])
obs = data.obs + obs_ref
dorms = np.delete(data.unique_dorms, list(data.unique_dorms).index(self.model.gold_dorm))
beta_sort_id = np.argsort(self.beta)
if self.type == 'protective':
beta_sort_id = beta_sort_id[::-1]
dorms = dorms[beta_sort_id]
for i, dorm in enumerate(dorms):
index = alt_dorms == dorm
beta_index = beta_sort_id[i]
lb = i - 0.49
ub = i + 0.49
x = np.random.uniform(low=lb, high=ub, size=np.sum(index))
ax.scatter(x, obs[index], s=2.0/data.obs_se[index], color='gray', alpha=0.5)
ax.fill_between([lb, ub],
[self.draw_lb[beta_index], self.draw_lb[beta_index]],
[self.draw_ub[beta_index], self.draw_ub[beta_index]], color='#69b3a2', alpha=0.2)
ax.fill_between([lb, ub],
[self.wider_draw_lb[beta_index], self.wider_draw_lb[beta_index]],
[self.wider_draw_ub[beta_index], self.wider_draw_ub[beta_index]],
color='#69b3a2', alpha=0.2)
ax.axhline(0.0, color='red', linestyle='--', linewidth=1.0)
title = self.name if title is None else title
score = np.round(self.get_score(), 3)
low_score = np.round(self.get_score(use_gamma_ub=True), 3)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(f"{title}: ref = {self.model.gold_dorm}\n"
f"categories: {dorms}\n"
f"low scores: {list(low_score[beta_sort_id])}\n"
f"scores: {list(score[beta_sort_id])}", loc='left')
ax.set_xticks(np.arange(data.num_dorms - 1))
ax.set_xticklabels(dorms)
if xlim is not None:
ax.set_xlim(*xlim)
if ylim is not None:
ax.set_ylim(*ylim)
if xscale is not None:
ax.set_xscale(xscale)
if yscale is not None:
ax.set_yscale(yscale)
if folder is not None:
folder = Path(folder)
if not folder.exists():
os.mkdir(folder)
plt.savefig(folder/f"{self.name}.pdf", bbox_inches='tight')
return ax
``` |
{
"source": "jiaweih/MRTool",
"score": 2
} |
#### File: MRTool/tests/test_data.py
```python
import numpy as np
import pandas as pd
import xarray as xr
import pytest
from mrtool import MRData
@pytest.fixture()
def df():
num_obs = 5
df = pd.DataFrame({
'obs': np.random.randn(num_obs),
'obs_se': np.random.rand(num_obs) + 0.01,
'cov0': np.random.randn(num_obs),
'cov1': np.random.randn(num_obs),
'cov2': np.random.randn(num_obs),
})
return df
@pytest.fixture()
def xarray():
example_dataset = xr.Dataset({
"y":
xr.DataArray(
np.random.random([2, 2]),
dims=["age_group_id", "location_id"],
name="random_met_need",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
"y_se":
xr.DataArray(
np.ones([2, 2]),
dims=["age_group_id", "location_id"],
name="random_met_need",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
"sdi":
xr.DataArray(
np.ones([2, 2])*5,
dims=["age_group_id", "location_id"],
name="random_education",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
"sdi_se":
xr.DataArray(
np.ones([2, 2])*0,
dims=["age_group_id", "location_id"],
name="random_education",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
})
return example_dataset
@pytest.mark.parametrize('obs', ['obs', None])
@pytest.mark.parametrize('obs_se', ['obs_se', None])
def test_obs(df, obs, obs_se):
d = MRData()
d.load_df(df,
col_obs=obs,
col_obs_se=obs_se,
col_covs=['cov0', 'cov1', 'cov2'])
assert d.obs.size == df.shape[0]
assert d.obs_se.size == df.shape[0]
if obs is None:
assert all(np.isnan(d.obs))
@pytest.mark.parametrize('covs', [None,
['cov0', 'cov1', 'cov2']])
def test_covs(df, covs):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=covs)
num_covs = 0 if covs is None else len(covs)
num_covs += 1
assert d.num_covs == num_covs
@pytest.mark.parametrize('study_id', [None, np.array([0, 0, 1, 1, 2])])
def test_study_id(df, study_id):
if study_id is not None:
df['study_id'] = study_id
col_study_id = 'study_id'
else:
col_study_id = None
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'],
col_study_id=col_study_id)
if col_study_id is None:
assert np.all(d.study_id == 'Unknown')
assert d.num_studies == 1
assert d.studies[0] == 'Unknown'
else:
assert np.allclose(d.study_id, np.array([0, 0, 1, 1, 2]))
assert d.num_studies == 3
assert np.allclose(d.studies, np.array([0, 1, 2]))
assert np.allclose(d.study_sizes, np.array([2, 2, 1]))
def test_is_empty(df):
d = MRData()
assert d.is_empty()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
assert not d.is_empty()
d.reset()
assert d.is_empty()
def test_assert_not_empty():
d = MRData()
with pytest.raises(ValueError):
d._assert_not_empty()
def test_has_covs(df):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
assert d.has_covs(['cov0'])
assert d.has_covs(['cov0', 'cov1'])
assert not d.has_covs(['cov3'])
def test_assert_has_covs(df):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
with pytest.raises(ValueError):
d._assert_has_covs('cov3')
def test_get_covs(df):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
for cov_name in ['cov0', 'cov1', 'cov2']:
assert np.allclose(d.get_covs(cov_name), df[cov_name].to_numpy()[:, None])
cov_mat = d.get_covs(['cov0', 'cov1', 'cov2'])
assert np.allclose(cov_mat, df[['cov0', 'cov1', 'cov2']].to_numpy())
@pytest.mark.parametrize('covs', [None, 'cov0', ['cov0', 'cov1']])
def test_normalize_covs(df, covs):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
d.normalize_covs(covs)
assert d.is_cov_normalized(covs)
@pytest.mark.parametrize('covs', [['cov0', 'cov1']])
def test_remove_nan_in_covs(df, covs):
df.loc[:0, covs] = np.nan
d = MRData()
with pytest.warns(Warning):
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=covs)
assert d.num_obs == df.shape[0] - 1
def test_load_xr(xarray):
d = MRData()
d.load_xr(xarray,
var_obs='y',
var_obs_se='y_se',
var_covs=['sdi'],
coord_study_id='location_id')
assert np.allclose(np.sort(d.obs), np.sort(xarray['y'].data.flatten()))
assert np.allclose(np.sort(d.obs_se), np.sort(xarray['y_se'].data.flatten()))
assert np.allclose(np.sort(d.covs['sdi']), np.sort(xarray['sdi'].data.flatten()))
assert np.allclose(np.sort(d.studies), np.sort(xarray.coords['location_id']))
``` |
{
"source": "jiaweih/Temperature",
"score": 3
} |
#### File: src/temperature/mtslice.py
```python
import numpy as np
import matplotlib.pyplot as plt
import xspline
import limetr
import utils
import copy
# utils
# -----------------------------------------------------------------------------
def extract_mtslice(tdata, mt):
"""extract the temperature data at given mean_temp"""
valid_id = tdata.mean_temp == mt
mean_temp = tdata.mean_temp[valid_id]
daily_temp = tdata.daily_temp[valid_id]
obs_mean = tdata.obs_mean[valid_id]
obs_std = tdata.obs_std[valid_id]
study_id = tdata.study_id[valid_id]
if tdata.data_id is not None:
data_id = tdata.data_id[valid_id]
else:
data_id = None
trimming_weights = tdata.trimming_weights[valid_id]
return utils.TempData(mean_temp,
daily_temp,
obs_mean,
obs_std,
study_id,
data_id,
trimming_weights=trimming_weights)
def aggregate_mtslice(tdata):
"""aggregate data by the weighted mean"""
# extract the unique mean and daily pair
unique_pair = np.unique(np.vstack((tdata.mean_temp,
tdata.daily_temp)).T, axis=0)
mean_temp = unique_pair[:, 0]
daily_temp = unique_pair[:, 1]
obs_mean = []
obs_std = []
for p in unique_pair:
valid_id = (tdata.mean_temp == p[0]) &\
(tdata.daily_temp == p[1]) &\
(tdata.trimming_weights > 0.5)
obs_mean_atp = tdata.obs_mean[valid_id]
obs_std_atp = tdata.obs_std[valid_id]
ivar = 1.0/obs_std_atp**2
obs_mean_atp = obs_mean_atp.dot(ivar)/np.sum(ivar)
obs_std_atp = np.sqrt(1.0/np.sum(ivar))
obs_mean.append(obs_mean_atp)
obs_std.append(obs_std_atp)
obs_mean = np.array(obs_mean)
obs_std = np.array(obs_std)
study_id = np.arange(obs_mean.size)
data_id = None
return utils.TempData(mean_temp,
daily_temp,
obs_mean,
obs_std,
study_id,
data_id)
def stack_mtslice(tdata_list):
"""stack tdata in the list into one instance"""
mean_temp_list = [tdata.mean_temp for tdata in tdata_list]
daily_temp_list = [tdata.daily_temp for tdata in tdata_list]
obs_mean_list = [tdata.obs_mean for tdata in tdata_list]
obs_std_list = [tdata.obs_std for tdata in tdata_list]
study_id_list = [tdata.study_id for tdata in tdata_list]
data_id_list = [tdata.data_id for tdata in tdata_list]
trimming_weights_list = [tdata.trimming_weights
for tdata in tdata_list]
mean_temp = np.hstack(mean_temp_list)
daily_temp = np.hstack(daily_temp_list)
obs_mean = np.hstack(obs_mean_list)
obs_std = np.hstack(obs_std_list)
study_id = np.hstack(study_id_list)
data_id = np.hstack(data_id_list)
trimming_weights = np.hstack(trimming_weights_list)
if np.any(data_id == None):
data_id = None
return utils.TempData(mean_temp,
daily_temp,
obs_mean,
obs_std,
study_id,
data_id,
trimming_weights=trimming_weights)
# fit
# -----------------------------------------------------------------------------
def adjust_mean(tdata, ref=None):
"""Adjust the mean of the data by the given ref
"""
if ref is None:
ref = tdata.unique_mean_temp
tdata_list = []
for i, mt in enumerate(tdata.unique_mean_temp):
tdata_mt = extract_mtslice(tdata, mt)
tdata_mt = adjust_mean_mtslice(tdata_mt, ref=ref[i])
tdata_list.append(tdata_mt)
return stack_mtslice(tdata_list)
def adjust_mean_mtslice(tdata_mt, ref=None):
"""Adjust the mean of the mtslice by the given ref
"""
if ref is None:
ref = tdata_mt.mean_temp[0]
# import pdb; pdb.set_trace()
study_slices = utils.sizes_to_slices(tdata_mt.study_sizes)
for i in range(tdata_mt.num_studies):
obs_mean = tdata_mt.obs_mean[study_slices[i]]
obs_std = tdata_mt.obs_std[study_slices[i]]
cov = tdata_mt.daily_temp[study_slices[i]]
# fit the curve
if tdata_mt.study_sizes[i] >= 5:
spline = xspline.XSpline(
np.array([cov.min(), ref, cov.max()]),
2, l_linear=True)
else:
spline = xspline.XSpline(
np.array([cov.min(), cov.max()]), 1)
beta = utils.fit_spline(obs_mean, obs_std, cov, spline)
ref_lnrr = spline.design_mat(ref).dot(beta)
# adjust the mean
tdata_mt.obs_mean[study_slices[i]] -= ref_lnrr
return tdata_mt
def adjust_agg_std(tdata, ref=None):
"""Adjust std of the aggregate the tdata
"""
if ref is None:
ref = tdata.unique_mean_temp
tdata_list = []
for i, mt in enumerate(tdata.unique_mean_temp):
tdata_mt = extract_mtslice(tdata, mt)
tdata_mt = adjust_agg_std_mtslice(tdata_mt, ref=ref[i])
tdata_list.append(tdata_mt)
return stack_mtslice(tdata_list)
def adjust_agg_std_mtslice(tdata_mt, ref=None):
"""Adjust std of the aggregate the tdata slices
"""
if ref is None:
ref = tdata_mt.mean_temp[0]
# fit the curve
spline = xspline.XSpline(
np.array([tdata_mt.daily_temp.min(), ref,
tdata_mt.daily_temp.max()]), 2, l_linear=True)
beta = utils.fit_spline(tdata_mt.obs_mean,
tdata_mt.obs_std,
tdata_mt.daily_temp,
spline)
residual = (tdata_mt.obs_mean -
spline.design_mat(tdata_mt.daily_temp).dot(beta))
residual /= tdata_mt.obs_std
# print(np.maximum(1.0, np.std(residual)))
tdata_mt.obs_std *= np.maximum(3.0, np.std(residual))
return tdata_mt
def fit_trend(tdata, surface_result, inlier_pct=1.0):
# calculate the residual data
tdata_residual = copy.deepcopy(tdata)
tdata_residual.obs_mean -= surface_result.surface_func(
tdata.mean_temp,
tdata.daily_temp)
beta = []
beta_var = []
gamma = []
random_effects = []
for i, mean_temp in enumerate(tdata.unique_mean_temp):
tdata_at_mean_temp = extract_mtslice(tdata_residual, mean_temp)
# tmrl = surface_result.tmrl[i]
tmrl = np.quantile(tdata_at_mean_temp.daily_temp, 0.75)
surface_result.tmrl[i] = tmrl
# print(mean_temp, tmrl)
(beta_at_mt,
beta_var_at_mt,
gamma_at_mt,
random_effects_at_mt) = fit_trend_mtslice(tdata_at_mean_temp,
tmrl,
inlier_pct=inlier_pct)
beta.append(beta_at_mt)
beta_var.append(beta_var_at_mt)
gamma.append(gamma_at_mt)
random_effects.append(random_effects_at_mt)
beta = np.vstack(beta)
beta_var = np.vstack(beta_var)
gamma = np.vstack(gamma)
trend_result = utils.TrendResult(beta, beta_var, gamma, random_effects,
tdata.unique_mean_temp)
# import pdb; pdb.set_trace()
return trend_result, tdata_residual
def fit_trend_mtslice(tdata_at_mean_temp, tmrl, inlier_pct=0.9, debug=False):
"""
Return beta (intercept and slope) and gamma (intercept and slope)
with given data
"""
if debug:
print("number of locations at mean temp",
tdata_at_mean_temp.num_studies)
outer_verbose = True
inner_print_level = 5
else:
outer_verbose = False
inner_print_level = 0
# construct the linear mixed effect model
cov = tdata_at_mean_temp.daily_temp
knots = np.array([cov.min(), tmrl, cov.max()])
degree = 1
spline = xspline.XSpline(knots, degree)
l1 = knots[1] - knots[0]
l2 = knots[2] - knots[1]
mat_transform = np.array([[1.0, 0.0, 0.0],
[1.0, l1, 0.0],
[1.0, l1, l2 ]])
M = spline.design_mat(cov).dot(mat_transform)
M[:, 1] -= M[:, 0]*l1
M = M[:, 1:]
scale = np.linalg.norm(M, axis=0)
scaled_M = M/scale
# construct the LimeTr object
F = lambda beta: scaled_M.dot(beta)
JF = lambda beta: scaled_M
Z = scaled_M.copy()
n = tdata_at_mean_temp.study_sizes
k_beta = 2
k_gamma = 2
Y = tdata_at_mean_temp.obs_mean
S = tdata_at_mean_temp.obs_std
uprior = np.array([
[-np.inf]*k_beta + [1e-7]*k_gamma,
[np.inf]*k_beta + [1.5]*k_gamma
])
lt = limetr.LimeTr(n, k_beta, k_gamma, Y, F, JF, Z, S=S,
uprior=uprior,
inlier_percentage=inlier_pct)
# fit model
MS = M/S.reshape(S.size, 1)
YS = Y/S
beta0 = np.linalg.solve(MS.T.dot(MS), MS.T.dot(YS))
gamma0 = np.array([0.1, 0.1])
(beta,
gamma,
trimming_weights) = lt.fitModel(x0=np.hstack((beta0, gamma0)),
outer_step_size=200.0,
outer_verbose=outer_verbose,
inner_print_level=inner_print_level)
# estimate the random effects
random_effects = lt.estimateRE()
# estimate the uncertainty of beta
V = limetr.utils.VarMat(lt.S**2, lt.Z, gamma, lt.n)
beta_var = np.linalg.inv(M.T.dot(V.invDot(M)))
# # scale beta and gamma back
beta /= scale
beta_var /= scale**2
gamma /= scale**2
random_effects /= scale
return beta, beta_var, gamma, random_effects
# viz
# -----------------------------------------------------------------------------
def plot_mtslice(tdata, mt, ylim=None, ax=None, use_colors=True):
if ax is None:
ax = plt
if ylim is not None:
ax.ylim(ylim[0], ylim[1])
else:
if ylim is not None:
ax.set_ylim(ylim[0], ylim[1])
tdata_mt = extract_mtslice(tdata, mt)
if use_colors:
study_slices = utils.sizes_to_slices(tdata_mt.study_sizes)
for i in range(tdata_mt.num_studies):
ax.scatter(tdata_mt.daily_temp[study_slices[i]],
tdata_mt.obs_mean[study_slices[i]],
s=1.0/tdata_mt.obs_std[study_slices[i]])
else:
ax.scatter(tdata_mt.daily_temp, tdata_mt.obs_mean,
s=1.0/tdata_mt.obs_std)
``` |
{
"source": "Jia-Wei-Liao/Recommender_System_using_Matrix_Factorization",
"score": 3
} |
#### File: Jia-Wei-Liao/Recommender_System_using_Matrix_Factorization/Preprocess.py
```python
import os
import csv
import random
def ReadDat(path):
'''
Read the content from *.dat and return
DATA (list[list]): [[UserID, MovieID, Rating], ...]
'''
DATA = []
for line in open(path, 'r'):
content = line.split('::')
UserID, MovieID, Rating, _ = content
DATA.append([UserID, MovieID, Rating])
return DATA
def WriteCsv(path, DATA, ID):
'''
Write csv file for a partial dataset DATA (given by indices ID)
'''
with open(path, 'w', newline='') as f:
# create writer
writer = csv.writer(f)
# write the column name
writer.writerow(['UserID', 'MovieID', 'Rating'])
# write the content
for ind in ID:
writer.writerow(DATA[ind])
return
if __name__ == '__main__':
'''
Goal: split the dataset (ratings.dat) to 9:1 ratio
The format in ratings.dat is:
UserID::MovieID::Rating::Timestamp
We split them into 9:1 and write them to Train.csv and Valid.csv
'''
folder_path = os.path.join('dataset', 'ml-10m')
rating_path = os.path.join(folder_path, 'ratings.dat')
# read ratings.dat
DATA = ReadDat(rating_path)
N = len(DATA)
# random sampling
TrainID = random.sample(range(N), int(0.9*N)) # 90 percent
ValidID = list(set(range(N)) - set(TrainID))
TrainID = sorted(TrainID)
ValidID = sorted(ValidID)
# write csv
WriteCsv(os.path.join(folder_path, 'Train.csv'), DATA, TrainID)
WriteCsv(os.path.join(folder_path, 'Test.csv'), DATA, ValidID)
``` |
{
"source": "Jia-Wei-Liao/Set14_Dataset_Super-Resolution",
"score": 2
} |
#### File: Jia-Wei-Liao/Set14_Dataset_Super-Resolution/inference.py
```python
import os
import tqdm
import imageio
import argparse
import options.options as option
from solvers import create_solver
from data import create_dataset, create_dataloader
from utils import util
def main(args):
opt = option.parse(args.opt)
opt = option.dict_to_nonedict(opt)
solver = create_solver(opt)
bm_names = []
test_loaders = []
for _, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(test_set, dataset_opt)
test_loaders.append(test_loader)
bm_names.append(test_set.name())
for bm, test_loader in zip(bm_names, test_loaders):
save_path_list = opt['solver']['pretrained_path'].split(os.sep)[:-2]
save_path = '/'.join(save_path_list)
save_img_path = os.path.join(save_path, 'result')
os.makedirs(save_img_path, exist_ok=True)
for batch in tqdm.tqdm(test_loader):
solver.feed_data(batch, need_HR=False)
solver.test()
visuals = solver.get_current_visual(need_HR=False)
imageio.imwrite(os.path.join(
save_img_path,
os.path.basename(batch['LR_path'][0])[:-4]+'_pred.png'
), visuals['SR'])
print("finish!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True,
help='path to options json file.')
args = parser.parse_args()
main(args)
``` |
{
"source": "Jia-Wei-Liao/SVHN_Dataset_Detection",
"score": 3
} |
#### File: SVHN_Dataset_Detection/YOLOv4/mat2yolo.py
```python
import os
import argparse
import scipy.io as sio
from PIL import Image
def get_yolo_bbox(bboxes, image_width, image_height):
yolo_bbox = []
for bbox in bboxes:
bbox = [e.squeeze().tolist() for e in bbox]
h, l, t, w, label = bbox
if label == 10:
label = 0
xc = (l+w/2) / image_width
yc = (t+h/2) / image_height
width = w / image_width
height = h / image_height
bbox = f'{label} {xc} {yc} {width} {height}'
yolo_bbox.append(bbox)
# print(bbox)
yolo_bboxes = '\n'.join(yolo_bbox)
return yolo_bboxes
def save_txt(save_content, save_path):
with open(save_path, 'w') as f:
f.write(save_content)
return None
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='train',
help='data_location'
)
parser.add_argument(
'--digit_struct',
type=str,
default='new_digitStruct.mat',
help='digit structure'
)
args = parser.parse_args()
if __name__ == '__main__':
digit_struct = sio.loadmat(args.digit_struct)['digitStruct'][0]
for i, b in enumerate(digit_struct):
name, bboxes = b[0][0], b[1][0]
image = Image.open(os.path.join(args.data_dir, name))
image_width, image_height = image.size
yolo_bbox = get_yolo_bbox(bboxes, image_width, image_height)
save_path = os.path.join(args.data_dir, name.replace('png', 'txt'))
save_txt(yolo_bbox, save_path)
print(f'[{i+1}/{len(digit_struct)}]')
``` |
{
"source": "jiaweili-hammer/rpi-robotics-project",
"score": 3
} |
#### File: test/src/my_demotest.py
```python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import numpy as np
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from sensor_msgs.msg import Image
#import opencv libraries and tools
import cv2
from cv_bridge import CvBridge, CvBridgeError
import boxes_finder as bf
rospy.loginfo("test log message to respy")
#print("creating CvBridge")
bridge = CvBridge()
#print("bridge created")
all_boxes = np.zeros((14, 2), dtype = "float32")
def image_callback(img_msg):
print("image callback")
cv_image = bridge.imgmsg_to_cv2(img_msg, "passthrough")
boxes_remaining, red_boxes, green_boxes, blue_boxes = bf.find_boxes_in_rgb_image(cv_image)
h = cv_image.shape[0]
w = cv_image.shape[1]
count_red_boxes = len(red_boxes)
print('red box count: ', count_red_boxes)
for n in range(count_red_boxes):
print('red box: ', n, ' x: ', red_boxes[n][0], ' y: ', red_boxes[n][1])
for n in range(len(green_boxes)):
print('green box: ', n, ' x: ', green_boxes[n][0], ' y: ', green_boxes[n][1])
for n in range(len(blue_boxes)):
print('blue box: ', n, ' x: ', blue_boxes[n][0], ' y: ', blue_boxes[n][1])
print("h x w", h, w)
print("image callback complete")
total_box_count = len(red_boxes) + len(green_boxes) + len(blue_boxes)
print('total boxes: ', total_box_count)
#while less than 9 boxes, add blue boxes
# if more than 9 but less than 14, add green boxes
# if on the 14th block, add red boxes
for n in range(total_box_count):
if(n < len(blue_boxes)):
all_boxes[n][0] = blue_boxes[n][0]
all_boxes[n][1] = blue_boxes[n][1]
elif(n >= len(blue_boxes) and n < (len(blue_boxes) + len(green_boxes))):
all_boxes[n][0] = green_boxes[n-len(blue_boxes)][0]
all_boxes[n][1] = green_boxes[n-len(blue_boxes)][1]
else:
all_boxes[n][0] = red_boxes[n - len(blue_boxes) - len(green_boxes)][0]
all_boxes[n][1] = red_boxes[n - len(blue_boxes) - len(green_boxes)][1]
print('box: ', n+1, ' x: ', all_boxes[n][0], ' y: ', all_boxes[n][1])
#print("subscribing to image callback")
sub_image = rospy.Subscriber("/rrbot/camera1/image_raw", Image, image_callback)
#print("subscribing to image callback complete")
check = False
#returns location of associated block
#blue blocks are the lowest numbers
#green blocks are the middle numbers
#red blocks are the highest numbers
def BlockLocate(blockcount):
x_Pos = 0
y_Pos = 0
#print('888888888888888888888888')
print('block locate called')
#print('888888888888888888888888')
print("all_boxes length", len(all_boxes))
for n in range(len(all_boxes)):
print('box: ', n+1, ' x: ', all_boxes[n][0], ' y: ', all_boxes[n][1])
#set x, y
x_Pos = all_boxes[1]
y_Pos = all_boxes[1]
#set bool if last box
if(blockcount == len(all_boxes)):
check = True
else:
check = False
return check, x_Pos, y_Pos
``` |
{
"source": "jiawei-mo/dsvo",
"score": 2
} |
#### File: dsvo/test/zed_sptam.py
```python
import math
import argparse
import rospy
import rosbag
import sensor_msgs.msg
import yaml
import numpy as np
from numpy.linalg import inv
import cv2
import tf
# parse camera calibration yaml file
def load_intrinsics( calib_data ):
width, height = calib_data['resolution']
#cam_info.distortion_model = 'plumb_bob'
D = np.array(calib_data['distortion_coefficients'])
#cam_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
fu, fv, cu, cv = calib_data['intrinsics']
K = np.array([[fu, 0, cu],
[0, fv, cv],
[0, 0, 1]])
return height, width, K, D
# parse camera calibration yaml file
def load_extrinsics( calib_data ):
# read homogeneous rotation and translation matrix
transformation_base_camera = np.array(calib_data['T_BS']['data'])
transformation_base_camera = transformation_base_camera.reshape( (4,4) )
# compute projection matrix
# projection = np.zeros((3,4))
# projection[:,:-1] = K
# cam_info.P = projection.reshape(-1,).tolist()
return transformation_base_camera
# create camera info message
def create_camera_info_msg(width, height, K, D, R, P):
cam_info = sensor_msgs.msg.CameraInfo()
cam_info.width = width
cam_info.height = height
cam_info.distortion_model = 'plumb_bob'
cam_info.D = D.tolist()
cam_info.R = R.reshape(-1,).tolist()
cam_info.K = K.reshape(-1,).tolist()
cam_info.P = P.reshape(-1,).tolist()
return cam_info
if __name__ == "__main__":
####################################################################
# Parse program options
####################################################################
parser = argparse.ArgumentParser()
parser.add_argument('rosbag', help='euroc rosbag file')
parser.add_argument('left_calibration', help='euroc left camera calibration')
parser.add_argument('right_calibration', help='euroc right camera calibration')
parser.add_argument('--scaling', help='if set, crop/scale rectified image to region of interest (no black areas)', action="store_true")
args = parser.parse_args()
####################################################################
# Process data
####################################################################
# load yaml file
left_calib_stream = file(args.left_calibration, 'r')
left_calib_data = yaml.load( left_calib_stream )
right_calib_stream = file(args.right_calibration, 'r')
right_calib_data = yaml.load( right_calib_stream )
# parse information from calibration euroc files
height_left, width_left, K_left, D_left = load_intrinsics( left_calib_data )
height_right, width_right, K_right, D_right = load_intrinsics( right_calib_data )
transformation_base_left = load_extrinsics( left_calib_data )
transformation_base_right = load_extrinsics( right_calib_data )
# compute transformation between left and right camera
transformation_right_base = inv(transformation_base_right)
transformation_right_left = transformation_right_base.dot(transformation_base_left)
R = transformation_right_left[0:3,0:3]
T = transformation_right_left[0:3,3]
print('R')
print(R)
print('T')
print(T)
####################################################################
# Compute Rectification parameters
####################################################################
R_left = np.empty([3,3])
R_right = np.empty([3,3])
P_left = np.empty([3,4])
P_right = np.empty([3,4])
# perform cropping to ROI only if specified
alpha = 0 if args.scaling else -1
cv2.stereoRectify(K_left, D_left, K_right, D_right, (width_left, height_left), R, T, R_left, R_right, P_left, P_right, None, cv2.CALIB_ZERO_DISPARITY, alpha)
####################################################################
# Print results
####################################################################
print('Intrinsic matrices (original)')
print(K_left)
print(K_right)
print('Projection matrices')
print(P_left)
print(P_right)
print('Rectification matrices')
print(R_left)
print(R_right)
print("Distortion coefficients")
print(D_left)
print(D_right)
left_camera_info_msg = create_camera_info_msg(width_left, height_left, K_left, D_left, R_left, P_left)
right_camera_info_msg = create_camera_info_msg(width_right, height_right, K_right, D_right, R_right, P_right)
# num_msgs = 1000
with rosbag.Bag('output.bag', 'w') as outbag:
for topic, msg, t in rosbag.Bag(args.rosbag).read_messages():
# if num_msgs < 1:
# break
# num_msgs -= 1
# create left camera_info
if (topic == '/left/image_raw_color'):
#
outbag.write('/cam0/image_raw', msg, t)
left_camera_info_msg.header = msg.header
outbag.write("/cam0/camera_info", left_camera_info_msg, t)
# create right camera_info
elif (topic == '/right/image_raw_color'):
outbag.write('/cam1/image_raw', msg, t)
right_camera_info_msg.header = msg.header
outbag.write("/cam1/camera_info", right_camera_info_msg, t)
else:
outbag.write(topic, msg, t)
# Close opend file
outbag.close()
####################################################################
# compute rigit transformation between euroc left camera (cam0) and base_link
####################################################################
rotation_base_left = transformation_base_left[0:3,0:3]
pitch, yaw, roll = tf.transformations.euler_from_matrix(transformation_base_left, axes='sxyz')
translation_base_left = transformation_base_left[0:3,3]
print('Yaw Pitch Roll ')
print(yaw, pitch, roll)
print('translation')
print(translation_base_left)
``` |
{
"source": "jiaweiM/omics-plot",
"score": 3
} |
#### File: omics-plot/omicsplot/venn_circle.py
```python
import plotly.graph_objects as go
def venn2(labels, names=('A', 'B'), **options):
"""
plot a 2-st venn diagram
:param labels:
:param names:
:param options:
:return:
"""
pass
``` |
{
"source": "jiawei-ren/BalancedMSE",
"score": 3
} |
#### File: BalancedMSE/imdb-wiki-dir/datasets.py
```python
import os
import logging
import numpy as np
from PIL import Image
from scipy.ndimage import convolve1d
from torch.utils import data
import torchvision.transforms as transforms
from utils import get_lds_kernel_window
print = logging.info
class IMDBWIKI(data.Dataset):
def __init__(self, df, data_dir, img_size, split='train', reweight='none',
lds=False, lds_kernel='gaussian', lds_ks=5, lds_sigma=2):
self.df = df
self.data_dir = data_dir
self.img_size = img_size
self.split = split
self.weights = self._prepare_weights(reweight=reweight, lds=lds, lds_kernel=lds_kernel, lds_ks=lds_ks, lds_sigma=lds_sigma)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
index = index % len(self.df)
row = self.df.iloc[index]
img = Image.open(os.path.join(self.data_dir, row['path'])).convert('RGB')
transform = self.get_transform()
img = transform(img)
label = np.asarray([row['age']]).astype('float32')
weight = np.asarray([self.weights[index]]).astype('float32') if self.weights is not None else np.asarray([np.float32(1.)])
return img, label, weight
def get_transform(self):
if self.split == 'train':
transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.RandomCrop(self.img_size, padding=16),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
else:
transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
return transform
def _prepare_weights(self, reweight, max_target=121, lds=False, lds_kernel='gaussian', lds_ks=5, lds_sigma=2):
assert reweight in {'none', 'inverse', 'sqrt_inv'}
assert reweight != 'none' if lds else True, \
"Set reweight to \'sqrt_inv\' (default) or \'inverse\' when using LDS"
value_dict = {x: 0 for x in range(max_target)}
labels = self.df['age'].values
for label in labels:
value_dict[min(max_target - 1, int(label))] += 1
if reweight == 'sqrt_inv':
value_dict = {k: np.sqrt(v) for k, v in value_dict.items()}
elif reweight == 'inverse':
value_dict = {k: np.clip(v, 5, 1000) for k, v in value_dict.items()} # clip weights for inverse re-weight
num_per_label = [value_dict[min(max_target - 1, int(label))] for label in labels]
if not len(num_per_label) or reweight == 'none':
return None
print(f"Using re-weighting: [{reweight.upper()}]")
if lds:
lds_kernel_window = get_lds_kernel_window(lds_kernel, lds_ks, lds_sigma)
print(f'Using LDS: [{lds_kernel.upper()}] ({lds_ks}/{lds_sigma})')
smoothed_value = convolve1d(
np.asarray([v for _, v in value_dict.items()]), weights=lds_kernel_window, mode='constant')
num_per_label = [smoothed_value[min(max_target - 1, int(label))] for label in labels]
weights = [np.float32(1 / x) for x in num_per_label]
scaling = len(weights) / np.sum(weights)
weights = [scaling * x for x in weights]
return weights
def get_bucket_info(self, max_target=121, lds=False, lds_kernel='gaussian', lds_ks=5, lds_sigma=2):
value_dict = {x: 0 for x in range(max_target)}
labels = self.df['age'].values
for label in labels:
if int(label) < max_target:
value_dict[int(label)] += 1
bucket_centers = np.asarray([k for k, _ in value_dict.items()])
bucket_weights = np.asarray([v for _, v in value_dict.items()])
if lds:
lds_kernel_window = get_lds_kernel_window(lds_kernel, lds_ks, lds_sigma)
print(f'Using LDS: [{lds_kernel.upper()}] ({lds_ks}/{lds_sigma})')
bucket_weights = convolve1d(bucket_weights, weights=lds_kernel_window, mode='constant')
bucket_centers = np.asarray([bucket_centers[k] for k, v in enumerate(bucket_weights) if v > 0])
bucket_weights = np.asarray([bucket_weights[k] for k, v in enumerate(bucket_weights) if v > 0])
bucket_weights = bucket_weights / bucket_weights.sum()
return bucket_centers, bucket_weights
```
#### File: BalancedMSE/synthetic_benchmark/main_2d.py
```python
import torch.nn as nn
from torch.utils.data import DataLoader
from loss import *
import copy
from utils import *
# =========== CONSTANTS ==============
# Training
NUM_EPOCHS = 10000
PRINT_FREQ = NUM_EPOCHS // 10
BATCH_SIZE = 256
NUM_TRAIN_ITERS = 4
NUM_VAL_ITERS = 1
NUM_TRAIN_SAMPLES = BATCH_SIZE * NUM_TRAIN_ITERS
NUM_VAL_SAMPLES = BATCH_SIZE * NUM_VAL_ITERS
NUM_TEST_SAMPLES = BATCH_SIZE * NUM_VAL_ITERS
# Dimensions
X_DIM = 2
Y_DIM = 2
# Data Range
Y_UB = torch.ones(Y_DIM) * 5
Y_LB = torch.ones(Y_DIM) * -5
# Linear Relation and Noise Scale
NOISE_SIGMA = 1.
NOISE_COVARIANCE = torch.eye(Y_DIM) * (NOISE_SIGMA ** 2)
ORACLE_MATRIX = torch.randn([X_DIM, Y_DIM]) * 0.01
# Normal Distribution Parameters
Y_COVARIANCE = torch.eye(Y_DIM)
Y_COVARIANCE = Y_COVARIANCE * 0.5 + torch.ones_like(Y_COVARIANCE) * 0.5
Y_MEAN = (Y_LB + Y_UB) / 2
# Specify which training distribution to use
TRAIN_DIST = 'normal'
# predefine distributions
DIST_DICT = {
'uniform': torch.distributions.Uniform(Y_LB, Y_UB),
'normal': torch.distributions.MultivariateNormal(loc=Y_MEAN, covariance_matrix=Y_COVARIANCE)
}
CRITERIA_TO_USE = [
'MSE',
'Reweight',
'GAI',
'BMC',
'GAI Learnable Noise',
'BMC Learnable Noise'
]
# ======= END OF CONSTANTS ==========
def f(x):
# This function will never be called, so we leave the inverse here
y = ORACLE_MATRIX.inverse() @ x.unsqueeze(-1)
return y.squeeze()
def f_inv(y):
x = ORACLE_MATRIX @ y.unsqueeze(-1)
return x.squeeze()
# Define a linear regressor
class LinearModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearModel, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(input_dim, output_dim),
)
def forward(self, x):
x = self.mlp(x)
return x
def prepare_data():
# Training label samples
y_train = DIST_DICT[TRAIN_DIST].sample((NUM_TRAIN_SAMPLES,))
assert len(y_train) == NUM_TRAIN_SAMPLES
# Assume a gaussian noise has been added to observed y
noise_distribution = torch.distributions.MultivariateNormal(torch.zeros(Y_DIM), covariance_matrix=NOISE_COVARIANCE)
noise = noise_distribution.sample((NUM_TRAIN_SAMPLES,))
# then the oracle y should be
y_train_oracle = y_train - noise
x_train = f_inv(y_train_oracle)
# Evaluate on balanced (uniform) y distribution
y_eval = DIST_DICT['uniform'].sample((NUM_VAL_SAMPLES,))
x_eval = f_inv(y_eval)
# Test set
y_test = DIST_DICT['uniform'].sample((NUM_TEST_SAMPLES,))
x_test = f_inv(y_test)
train_loader = DataLoader(DummyDataset(x_train, y_train), BATCH_SIZE, shuffle=True)
eval_loader = DataLoader(DummyDataset(x_eval, y_eval), BATCH_SIZE)
test_loader = DataLoader(DummyDataset(x_test, y_test), BATCH_SIZE)
return train_loader, eval_loader, test_loader
def prepare_model():
model = LinearModel(input_dim=X_DIM, output_dim=Y_DIM)
optimizer = torch.optim.Adam(model.parameters(), lr=0.2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=NUM_EPOCHS)
return model, optimizer, scheduler
def train(train_loader, eval_loader, test_loader, model, optimizer, scheduler, criterion):
best_eval_loss = 1e8
model_best = None
for epoch in range(NUM_EPOCHS):
train_loss = AverageMeter('train loss')
model.train()
for data, target in train_loader:
optimizer.zero_grad()
pred = model(data)
loss = criterion(pred, target)
train_loss.update(loss.item())
loss.backward()
optimizer.step()
scheduler.step()
if (epoch + 1) % PRINT_FREQ == 0:
print('epoch: ', epoch + 1)
model.eval()
eval_loss = AverageMeter('eval loss')
for data, target in eval_loader:
pred = model(data)
loss = F.mse_loss(pred, target)
eval_loss.update(loss.item())
print(train_loss)
print(eval_loss)
print('-' * 10)
if best_eval_loss > eval_loss.avg:
model_best = copy.deepcopy(model)
best_eval_loss = eval_loss.avg
print('best eval loss {:.6f}'.format(best_eval_loss))
model_best.eval()
test_loss = AverageMeter('test loss')
for data, target in test_loader:
pred = model(data)
loss = F.mse_loss(pred, target)
test_loss.update(loss.item())
print(test_loss)
print('=' * 20)
return model_best, test_loss.avg
def train_model(train_loader, eval_loader, test_loader):
gmm = get_gmm(dist=DIST_DICT[TRAIN_DIST], n_components=1)
criteria = {
'MSE': nn.MSELoss(),
'Reweight': ReweightL2(DIST_DICT[TRAIN_DIST]),
'GAI': GAILossMD(init_noise_sigma=NOISE_SIGMA, gmm=gmm),
'BMC': BMCLossMD(init_noise_sigma=NOISE_SIGMA),
# For learnable noise, we assume we don't know the ground truth noise scale
# Therefore we multiply an offset 1.5 to the ground truth noise scale
'GAI Learnable Noise': GAILossMD(init_noise_sigma=1.5 * NOISE_SIGMA, gmm=gmm),
'BMC Learnable Noise': BMCLossMD(init_noise_sigma=1.5 * NOISE_SIGMA),
}
criteria = {k: criteria[k] for k in CRITERIA_TO_USE} # Only use selected criteria
perf_stats = {}
models_trained = {}
for criterion_name, criterion in criteria.items():
print("Training with distribution {} and criterion {}".format(TRAIN_DIST, criterion_name))
model, optimizer, scheduler = prepare_model()
if 'Learnable Noise' in criterion_name:
optimizer.add_param_group({'params': criterion.parameters(), 'lr': 0.01})
model_best, perf_stats[criterion_name] = \
train(train_loader, eval_loader, test_loader, model, optimizer, scheduler, criterion)
models_trained[criterion_name] = model_best
print('Final results')
for method in perf_stats:
print('{0: <20}: {1:.6f}'.format(method, perf_stats[method]))
return models_trained
def main():
train_loader, eval_loader, test_loader = prepare_data()
models_trained = train_model(train_loader, eval_loader, test_loader)
visualize_md(models_trained, train_loader, test_loader, Y_LB, Y_UB)
if __name__ == '__main__':
main()
``` |
{
"source": "jiawei-ren/ModelNet-C",
"score": 2
} |
#### File: ModelNet-C/build_modelnetc/corrupt.py
```python
import os
import glob
import h5py
import numpy as np
from corrupt_utils import corrupt_scale, corrupt_jitter, corrupt_rotate, corrupt_dropout_global, corrupt_dropout_local, \
corrupt_add_global, corrupt_add_local
NUM_POINTS = 1024
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, '../data')
np.random.seed(0)
corruptions = {
'clean': None,
'scale': corrupt_scale,
'jitter': corrupt_jitter,
'rotate': corrupt_rotate,
'dropout_global': corrupt_dropout_global,
'dropout_local': corrupt_dropout_local,
'add_global': corrupt_add_global,
'add_local': corrupt_add_local,
}
def download():
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(partition):
download()
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5' % partition)):
f = h5py.File(h5_name, 'r')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_data = all_data[:, :NUM_POINTS, :]
return all_data, all_label
def save_data(all_data, all_label, corruption_type, level):
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet_c')):
os.makedirs(os.path.join(DATA_DIR, 'modelnet_c'))
if corruption_type == 'clean':
h5_name = os.path.join(DATA_DIR, 'modelnet_c', '{}.h5'.format(corruption_type))
else:
h5_name = os.path.join(DATA_DIR, 'modelnet_c', '{}_{}.h5'.format(corruption_type, level))
f = h5py.File(h5_name, 'w')
f.create_dataset('data', data=all_data)
f.create_dataset('label', data=all_label)
f.close()
print("{} finished".format(h5_name))
def corrupt_data(all_data, type, level):
if type == 'clean':
return all_data
corrupted_data = []
for pcd in all_data:
corrupted_pcd = corruptions[type](pcd, level)
corrupted_data.append(corrupted_pcd)
corrupted_data = np.stack(corrupted_data, axis=0)
return corrupted_data
def main():
all_data, all_label = load_data('test')
for corruption_type in corruptions:
for level in range(5):
corrupted_data = corrupt_data(all_data, corruption_type, level)
save_data(corrupted_data, all_label, corruption_type, level)
if corruption_type == 'clean':
break
if __name__ == '__main__':
main()
``` |
{
"source": "JiaweiShiCV/Amend-Representation-Module",
"score": 2
} |
#### File: Amend-Representation-Module/src/train_raf-db.py
```python
import warnings
warnings.filterwarnings("ignore")
from apex import amp
import numpy as np
import torch.utils.data as data
from torchvision import transforms
import os, torch
import argparse
import Networks
from dataset import RafDataSet
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--raf_path', type=str, default='./datasets/raf-basic/', help='Raf-DB dataset path.')
parser.add_argument('-c', '--checkpoint', type=str, default=None, help='Pytorch checkpoint file path')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--val_batch_size', type=int, default=64, help='Batch size for validation.')
parser.add_argument('--optimizer', type=str, default="adam", help='Optimizer, adam or sgd.')
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate for sgd.')
parser.add_argument('--momentum', default=0.9, type=float, help='Momentum for sgd')
parser.add_argument('--workers', default=4, type=int, help='Number of data loading workers (default: 4)')
parser.add_argument('--epochs', type=int, default=70, help='Total training epochs.')
parser.add_argument('--wandb', action='store_true')
return parser.parse_args()
def run_training():
args = parse_args()
if args.wandb:
import wandb
wandb.init(project='raf-db')
model = Networks.ResNet18_ARM___RAF()
# print(model)
print("batch_size:", args.batch_size)
if args.checkpoint:
print("Loading pretrained weights...", args.checkpoint)
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint["model_state_dict"], strict=False)
data_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.RandomErasing(scale=(0.02, 0.1))])
train_dataset = RafDataSet(args.raf_path, phase='train', transform=data_transforms, basic_aug=True)
print('Train set size:', train_dataset.__len__())
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
shuffle=True,
pin_memory=True)
data_transforms_val = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
val_dataset = RafDataSet(args.raf_path, phase='test', transform=data_transforms_val)
val_num = val_dataset.__len__()
print('Validation set size:', val_num)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.val_batch_size,
num_workers=args.workers,
shuffle=False,
pin_memory=True)
params = model.parameters()
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(params, weight_decay=1e-4)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(params, args.lr, momentum=args.momentum, weight_decay=1e-4)
if args.wandb:
config = wandb.config
config.learning_rate = args.lr
else:
raise ValueError("Optimizer not supported.")
print(optimizer)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
model = model.cuda()
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
CE_criterion = torch.nn.CrossEntropyLoss()
best_acc = 0
for i in range(1, args.epochs + 1):
train_loss = 0.0
correct_sum = 0
iter_cnt = 0
model.train()
for batch_i, (imgs, targets, indexes) in enumerate(train_loader):
iter_cnt += 1
optimizer.zero_grad()
imgs = imgs.cuda()
outputs, alpha = model(imgs)
targets = targets.cuda()
CE_loss = CE_criterion(outputs, targets)
loss = CE_loss
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
train_loss += loss
_, predicts = torch.max(outputs, 1)
correct_num = torch.eq(predicts, targets).sum()
correct_sum += correct_num
train_acc = correct_sum.float() / float(train_dataset.__len__())
train_loss = train_loss/iter_cnt
print('[Epoch %d] Training accuracy: %.4f. Loss: %.3f LR: %.6f' %
(i, train_acc, train_loss, optimizer.param_groups[0]["lr"]))
scheduler.step()
with torch.no_grad():
val_loss = 0.0
iter_cnt = 0
bingo_cnt = 0
model.eval()
for batch_i, (imgs, targets, _) in enumerate(val_loader):
outputs, _ = model(imgs.cuda())
targets = targets.cuda()
CE_loss = CE_criterion(outputs, targets)
loss = CE_loss
val_loss += loss
iter_cnt += 1
_, predicts = torch.max(outputs, 1)
correct_or_not = torch.eq(predicts, targets)
bingo_cnt += correct_or_not.sum().cpu()
val_loss = val_loss/iter_cnt
val_acc = bingo_cnt.float()/float(val_num)
val_acc = np.around(val_acc.numpy(), 4)
print("[Epoch %d] Validation accuracy:%.4f. Loss:%.3f" % (i, val_acc, val_loss))
if args.wandb:
wandb.log(
{
"train_loss": train_loss,
"train_acc": train_acc,
"val_loss": val_loss,
"val_acc": val_acc,
}
)
if val_acc > 0.92 and val_acc > best_acc:
torch.save({'iter': i,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(), },
os.path.join('models/RAF-DB', "epoch" + str(i) + "_acc" + str(val_acc) + ".pth"))
print('Model saved.')
if val_acc > best_acc:
best_acc = val_acc
print("best_acc:" + str(best_acc))
if __name__ == "__main__":
run_training()
``` |
{
"source": "jiaweiz414/BOSCH-project",
"score": 3
} |
#### File: jiaweiz414/BOSCH-project/readdata.py
```python
import sys
from pyspark import SparkConf, SparkContext
import numpy as np
import scipy.sparse as sps
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.linalg import SparseVector
from pyspark.mllib.regression import LabeledPoint
from numpy import linalg as LA
import matplotlib.pyplot as plt
def parseData(line):
data = line.split(",")
index= data[:2] # the index number, this number is not equal to row number
temp = data[2:]
features = []
for i in temp:
if i == "":
features.append(0)
else:
features.append(float(i.replace("T","").replace("-", "")))
return [index, np.array(features)]
"""
multiply matrix A by vector x
"""
if __name__ == "__main__":
# set up environment
conf = SparkConf()
conf.setAppName("columns_reduction")
sc = SparkContext(conf=conf)
inputData = sc.textFile("/home/jiz414/bosch/non_nan_test1.csv")
header = inputData.first()
inputData = inputData.filter(lambda x : x != header)
data = inputData.map(parseData).take(2)
print data
totalPoints = data.count()
print totalPoints
print data[1:5]
``` |
{
"source": "JiaweiZhuang/advection_solver",
"score": 2
} |
#### File: advection_solver/advection_solver/advection_2d.py
```python
import numpy as np
from numba import jit
from . advection_1d import upwind_tendency, vanleer_tendency
# Apply 1D operator in x, y dimensions separately, and then add up tendencies
# TODO: make those helper functions less verbose
@jit(nopython=True)
def _tend_x_inner(c, u, dx, dt):
ny, nx = c.shape
tend = np.empty((ny, nx))
for i in range(ny):
tend[i, :] = upwind_tendency(c[i, :], u[i, :], dx, dt)
return tend
@jit(nopython=True)
def _tend_y_inner(c, v, dy, dt):
ny, nx = c.shape
tend = np.empty((ny, nx))
for j in range(nx):
tend[:, j] = upwind_tendency(c[:, j], v[:, j], dy, dt)
return tend
@jit(nopython=True)
def _tend_x_outer(c, u, dx, dt):
ny, nx = c.shape
tend = np.empty((ny, nx))
for i in range(ny):
tend[i, :] = vanleer_tendency(c[i, :], u[i, :], dx, dt)
return tend
@jit(nopython=True)
def _tend_y_outer(c, v, dy, dt):
ny, nx = c.shape
tend = np.empty((ny, nx))
for j in range(nx):
tend[:, j] = vanleer_tendency(c[:, j], v[:, j], dy, dt)
return tend
@jit(nopython=True)
def tendency_2d_vanleer(c, u, v, dx, dy, dt):
'''
2D advection tendency with periodic boundary
Use second-order (VanLeer) scheme for outer operator and upwind for inner operator
Args:
c: 2d numpy array, density field
u: 2d numpy array, wind field in x direction
v: 2d numpy array, wind field in y direction
dx: float, grid spacing (assume uniform)
dy: float, grid spacing (assume uniform, but can be different from dx)
dt: float, time step
Returns:
2d numpy array with same shape as `c`
'''
ny, nx = c.shape
# operator splitting in x and y directions
tendency = (_tend_x_outer(0.5*_tend_y_inner(c, v, dy, dt) + c, u, dx, dt) +
_tend_y_outer(0.5*_tend_x_inner(c, u, dx, dt) + c, v, dy, dt)
)
return tendency
@jit(nopython=True)
def tendency_2d_upwind(c, u, v, dx, dy, dt):
'''
2D advection tendency with periodic boundary
Use upwind scheme for both outer operator and inner operator
Args:
c: 2d numpy array, density field
u: 2d numpy array, wind field in x direction
v: 2d numpy array, wind field in y direction
dx: float, grid spacing (assume uniform)
dy: float, grid spacing (assume uniform, but can be different from dx)
dt: float, time step
Returns:
2d numpy array with same shape as `c`
'''
ny, nx = c.shape
# operator splitting in x and y directions
tendency = (_tend_x_inner(0.5*_tend_y_inner(c, v, dy, dt) + c, u, dx, dt) +
_tend_y_inner(0.5*_tend_x_inner(c, u, dx, dt) + c, v, dy, dt)
)
return tendency
``` |
{
"source": "jiawen9611/classification_Pytorch_Proj",
"score": 2
} |
#### File: classification_Pytorch_Proj/models/__init__.py
```python
from .lenet import *
from .alexnet import *
from .resnet_152 import *
# from .vgg import *
# from .resnet import *
# from .preresnet import *
# from .senet import *
# from .resnext import *
# from .densenet import *
# from .shake_shake import *
# from .sknet import *
# from .genet import *
# from .cbam_resnext import *
def get_model(config):
# todo
return globals()[config.architecture](config.if_pretrain, config.num_classes, config.input_size_w,
config.input_size_h)
# return config.architecture
```
#### File: classification_Pytorch_Proj/network/get_net_imformation.py
```python
import numpy as np
import torch.nn as nn
# count model's total parameters
def count_parameters(model):
model.parameters()
# return sum(p.numel() for p in model.parameters() if p.requires_grad)
return sum(p.numel() for p in list(model.parameters()))
# estimate GPU-used pace
def gpu_used_estimate(model, input_tensor, type_size=4):
for batch_index, (inputs, targets) in enumerate(input_tensor):
if batch_index == 0:
input = inputs
break
para = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
input_ = input.clone()
# input_ = input.type(torch.FloatTensor)
input_.requires_grad_(requires_grad=False)
# input_ = input_.cpu()
input_ = input_.to('cuda')
mods = list(model.modules())
out_sizes = []
for i in range(2, len(mods)):
m = mods[i]
if isinstance(m, nn.ReLU):
if m.inplace:
continue
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
total_nums = 0
for i in range(len(out_sizes)):
s = out_sizes[i]
nums = np.prod(np.array(s))
total_nums += nums
print('Model {} : intermedite variables: {:3f} M (without backward)'
.format(model._get_name(), total_nums * type_size / 1000 / 1000))
print('Model {} : intermedite variables: {:3f} M (with backward)'
.format(model._get_name(), total_nums * type_size * 2 / 1000 / 1000))
return total_nums * type_size * 2 / 1000 / 1000 + para * type_size / 1000 / 1000
``` |
{
"source": "jiawen9611/classification_Tensorflow_Proj",
"score": 2
} |
#### File: classification_Tensorflow_Proj/models/resnet_v1_50.py
```python
import tensorflow as tf
from tensorflow.contrib.slim import nets
# import preprocessing
from datasets.data_preprocess import *
slim = tf.contrib.slim
class Model(object):
def __init__(self, num_classes, is_training,
fixed_resize_side=224,
default_image_size=224,
dataset_config=None, if_reuse=None):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of
computation graph should be constructed.
num_classes: Number of classes.
"""
self._num_classes = num_classes
self._is_training = is_training
self._fixed_resize_side = fixed_resize_side
self._default_image_size = default_image_size
self._dataset_config = dataset_config
self.if_reuse = if_reuse
@property
def num_classes(self):
return self._num_classes
def preprocess(self, inputs):
"""preprocessing.
Outputs of this function can be passed to loss or postprocess functions.
Args:
preprocessed_inputs: A float32 tensor with shape [batch_size,
height, width, num_channels] representing a batch of images.
Returns:
prediction_dict: A dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
preprocessed_inputs = preprocess_images(
inputs, self._default_image_size, self._default_image_size,
is_training=self._is_training,
border_expand=False, normalize=True,
preserving_aspect_ratio_resize=False,
dataset_config=self._dataset_config,
)
preprocessed_inputs = tf.cast(preprocessed_inputs, tf.float32)
return preprocessed_inputs
def predict(self, preprocessed_inputs):
"""Predict prediction tensors from inputs tensor.
Outputs of this function can be passed to loss or postprocess functions.
Args:
preprocessed_inputs: A float32 tensor with shape [batch_size,
height, width, num_channels] representing a batch of images.
Returns:
prediction_dict: A dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
# 每次predict是否都会重新 arg_scope? 之前slim.train不会
# 验证失败也不是这个顺序问题
with slim.arg_scope(nets.resnet_v1.resnet_arg_scope()):
net, endpoints = nets.resnet_v1.resnet_v1_50(
preprocessed_inputs, num_classes=None,
is_training=self._is_training)
# net = tf.squeeze(net, axis=[1, 2])
# logits = slim.fully_connected(net, num_outputs=self.num_classes,
# activation_fn=None, scope='Predict')
with tf.variable_scope('Logits'):
net = tf.squeeze(net, axis=[1, 2])
net = slim.dropout(net, keep_prob=0.5, scope='scope')
logits = slim.fully_connected(net, num_outputs=self.num_classes,
activation_fn=None, scope='fc')
prediction_dict = {'logits': logits}
return prediction_dict
def postprocess(self, prediction_dict):
"""Convert predicted output tensors to final forms.
Args:
prediction_dict: A dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations
of specified models.
Returns:
A dictionary containing the postprocessed results.
"""
logits = prediction_dict['logits']
logits = tf.nn.softmax(logits)
classes = tf.argmax(logits, axis=1)
postprocessed_dict = {'logits': logits,
'classes': classes}
return postprocessed_dict
def loss(self, prediction_dict, groundtruth_lists):
"""Compute scalar loss tensors with respect to provided groundtruth.
Args:
prediction_dict: A dictionary holding prediction tensors.
groundtruth_lists_dict: A dict of tensors holding groundtruth
information, with one entry for each image in the batch.
Returns:
A dictionary mapping strings (loss names) to scalar tensors
representing loss values.
"""
logits = prediction_dict['logits']
slim.losses.sparse_softmax_cross_entropy(
logits=logits,
labels=groundtruth_lists,
scope='Loss')
loss = slim.losses.get_total_loss()
cross_entropy_mean = tf.reduce_mean(loss, name='cross_entropy')
loss_dict = {'loss': cross_entropy_mean}
return loss_dict
def accuracy(self, postprocessed_dict, groundtruth_lists):
"""Calculate accuracy.
Args:
postprocessed_dict: A dictionary containing the postprocessed
results
groundtruth_lists: A dict of tensors holding groundtruth
information, with one entry for each image in the batch.
Returns:
accuracy: The scalar accuracy.
"""
classes = postprocessed_dict['classes']
accuracy = tf.reduce_mean(
tf.cast(tf.equal(classes, groundtruth_lists), dtype=tf.float32))
return accuracy
```
#### File: jiawen9611/classification_Tensorflow_Proj/test.py
```python
import os
# import numpy as np
import tensorflow as tf
# from tensorflow.python import pywrap_tensorflow
from datasets.create_classification_data import *
import yaml
import time
from easydict import EasyDict
from datasets.data_preprocess import *
# !!!!!!!!!only for easy_resnet50
if_pb_model = False
flags = tf.app.flags
flags.DEFINE_string('model_ckpt_path', 'exp_output/easy_resnet50/ckpt/model.ckpt', 'Path to model checkpoint.')
flags.DEFINE_string('test_img_path', 'datasets/easy/test_with_label/', 'dataset.')
flags.DEFINE_string('config_path', 'exp_configs/easy_resnet50/config.yaml', 'config_path.')
FLAGS = flags.FLAGS
def test_model():
ckpt_path = FLAGS.model_ckpt_path
config_path = FLAGS.config_path
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config = EasyDict(config)
saver = tf.train.import_meta_graph(ckpt_path + '.meta')
with tf.Session() as sess:
saver.restore(sess, ckpt_path)
inputs = tf.get_default_graph().get_tensor_by_name('inputs:0')
classes = tf.get_default_graph().get_tensor_by_name('classes:0')
is_training = tf.get_default_graph().get_tensor_by_name('is_training:0')
start_time = time.time()
images_path = os.path.join(FLAGS.test_img_path, '*.jpg')
for image_file in glob.glob(images_path):
image = cv2.imread(image_file)
image = cv2.resize(image, (config.input_resize_w, config.input_resize_h))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.expand_dims(image, axis=0)
predicted_label = sess.run(classes, feed_dict={inputs: image, is_training: False})
print(predicted_label, ' vs ', image_file)
time_count = time.time() - start_time
examples_per_sec = config.val_num / time_count
print("speed:", examples_per_sec)
if __name__ == '__main__':
test_model()
``` |
{
"source": "jiawen9611/vis-mmdetection",
"score": 2
} |
#### File: mmdet/datasets/coco_seg.py
```python
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import cv2
import math
# import Polygon as plg
# from tqdm import tqdm
from pycocotools.coco import COCO
from .custom import CustomDataset
from .custompolarmask import CustomDatasetpolarmask
from .registry import DATASETS
import os.path as osp
import warnings
import mmcv
import numpy as np
from imagecorruptions import corrupt
from mmcv.parallel import DataContainer as DC
from torch.utils.data import Dataset
import torch
from .extra_aug import ExtraAugmentation
from .registry import DATASETS
from .transforms import (BboxTransform, ImageTransform, MaskTransform,
Numpy2Tensor, SegMapTransform, SegmapTransform)
from .utils import random_scale, to_tensor
from IPython import embed
import time
INF = 1e8
def get_angle(v1, v2=[0,0,100,0]):
dx1 = v1[2] - v1[0]
dy1 = v1[3] - v1[1]
dx2 = v2[2] - v2[0]
dy2 = v2[3] - v2[1]
angle1 = math.atan2(dy1, dx1)
angle1 = int(angle1 * 180/math.pi)
angle2 = math.atan2(dy2, dx2)
angle2 = int(angle2 * 180/math.pi)
included_angle = angle2 - angle1
if included_angle < 0:
included_angle += 360
return included_angle
@DATASETS.register_module
class Coco_Seg_Dataset(CustomDatasetpolarmask):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(ann_info, self.with_mask)
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, ann_info, with_mask=True):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
self.debug = False
if with_mask:
gt_masks = []
gt_mask_polys = []
gt_poly_lens = []
if self.debug:
count = 0
total = 0
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
#filter bbox < 10
if self.debug:
total+=1
if ann['area'] <= 15 or (w < 10 and h < 10) or self.coco.annToMask(ann).sum() < 15:
# print('filter, area:{},w:{},h:{}'.format(ann['area'],w,h))
if self.debug:
count+=1
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
if with_mask:
gt_masks.append(self.coco.annToMask(ann))
mask_polys = [
p for p in ann['segmentation'] if len(p) >= 6
] # valid polygons have >= 3 points (6 coordinates)
poly_lens = [len(p) for p in mask_polys]
gt_mask_polys.append(mask_polys)
gt_poly_lens.extend(poly_lens)
if self.debug:
print('filter:',count/total)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
if with_mask:
ann['masks'] = gt_masks
# poly format is not used in the current implementation
ann['mask_polys'] = gt_mask_polys
ann['poly_lens'] = gt_poly_lens
return ann
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# corruption
if self.corruption is not None:
img = corrupt(
img,
severity=self.corruption_severity,
corruption_name=self.corruption)
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0 and self.skip_img_without_anno:
warnings.warn('Skip the image "%s" that has no valid gt bbox' %
osp.join(self.img_prefix, img_info['filename']))
return None
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix,
img_info['filename'].replace('jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack([proposals, scores
]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(ann['masks'], pad_shape,
scale_factor, flip)
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
#--------------------offline ray label generation-----------------------------
self.center_sample = True
self.use_mask_center = True
self.radius = 1.5
self.strides = [8, 16, 32, 64, 128]
self.regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))
featmap_sizes = self.get_featmap_size(pad_shape)
self.featmap_sizes = featmap_sizes
num_levels = len(self.strides)
all_level_points = self.get_points(featmap_sizes)
self.num_points_per_level = [i.size()[0] for i in all_level_points]
expanded_regress_ranges = [
all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
all_level_points[i]) for i in range(num_levels)
]
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(all_level_points, 0)
gt_masks = gt_masks[:len(gt_bboxes)]
gt_bboxes = torch.Tensor(gt_bboxes)
gt_labels = torch.Tensor(gt_labels)
_labels, _bbox_targets, _mask_targets = self.polar_target_single(
gt_bboxes,gt_masks,gt_labels,concat_points, concat_regress_ranges)
data['_gt_labels'] = DC(_labels)
data['_gt_bboxes'] = DC(_bbox_targets)
data['_gt_masks'] = DC(_mask_targets)
#--------------------offline ray label generation-----------------------------
return data
def get_featmap_size(self, shape):
h,w = shape[:2]
featmap_sizes = []
for i in self.strides:
featmap_sizes.append([int(h / i), int(w / i)])
return featmap_sizes
def get_points(self, featmap_sizes):
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i]))
return mlvl_points
def get_points_single(self, featmap_size, stride):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride)
y_range = torch.arange(
0, h * stride, stride)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points.float()
def polar_target_single(self, gt_bboxes, gt_masks, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
#xs ys 分别是points的x y坐标
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1) #feature map上所有点对于gtbox的上下左右距离 [num_pix, num_gt, 4]
#mask targets 也按照这种写 同时labels 得从bbox中心修改成mask 重心
mask_centers = []
mask_contours = []
#第一步 先算重心 return [num_gt, 2]
for mask in gt_masks:
cnt, contour = self.get_single_centerpoint(mask)
contour = contour[0]
contour = torch.Tensor(contour).float()
y, x = cnt
mask_centers.append([x,y])
mask_contours.append(contour)
mask_centers = torch.Tensor(mask_centers).float()
# 把mask_centers assign到不同的层上,根据regress_range和重心的位置
mask_centers = mask_centers[None].expand(num_points, num_gts, 2)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
# condition1: inside a gt bbox
#加入center sample
if self.center_sample:
strides = [8, 16, 32, 64, 128]
if self.use_mask_center:
inside_gt_bbox_mask = self.get_mask_sample_region(gt_bboxes,
mask_centers,
strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = self.get_sample_region(gt_bboxes,
strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0 #[num_gt] 介于0-80
bbox_targets = bbox_targets[range(num_points), min_area_inds]
pos_inds = labels.nonzero().reshape(-1)
mask_targets = torch.zeros(num_points, 36).float()
pos_mask_ids = min_area_inds[pos_inds]
for p,id in zip(pos_inds, pos_mask_ids):
x, y = points[p]
pos_mask_contour = mask_contours[id]
dists, coords = self.get_36_coordinates(x, y, pos_mask_contour)
mask_targets[p] = dists
return labels, bbox_targets, mask_targets
def get_sample_region(self, gt, strides, num_points_per, gt_xs, gt_ys, radius=1):
center_x = (gt[..., 0] + gt[..., 2]) / 2
center_y = (gt[..., 1] + gt[..., 3]) / 2
center_gt = gt.new_zeros(gt.shape)
# no gt
if center_x[..., 0].sum() == 0:
return gt_xs.new_zeros(gt_xs.shape, dtype=torch.uint8)
beg = 0
for level, n_p in enumerate(num_points_per):
end = beg + n_p
stride = strides[level] * radius
xmin = center_x[beg:end] - stride
ymin = center_y[beg:end] - stride
xmax = center_x[beg:end] + stride
ymax = center_y[beg:end] + stride
# limit sample region in gt
center_gt[beg:end, :, 0] = torch.where(xmin > gt[beg:end, :, 0], xmin, gt[beg:end, :, 0])
center_gt[beg:end, :, 1] = torch.where(ymin > gt[beg:end, :, 1], ymin, gt[beg:end, :, 1])
center_gt[beg:end, :, 2] = torch.where(xmax > gt[beg:end, :, 2], gt[beg:end, :, 2], xmax)
center_gt[beg:end, :, 3] = torch.where(ymax > gt[beg:end, :, 3], gt[beg:end, :, 3], ymax)
beg = end
left = gt_xs - center_gt[..., 0]
right = center_gt[..., 2] - gt_xs
top = gt_ys - center_gt[..., 1]
bottom = center_gt[..., 3] - gt_ys
center_bbox = torch.stack((left, top, right, bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 # 上下左右都>0 就是在bbox里面
return inside_gt_bbox_mask
def get_mask_sample_region(self, gt_bb, mask_center, strides, num_points_per, gt_xs, gt_ys, radius=1):
center_y = mask_center[..., 0]
center_x = mask_center[..., 1]
center_gt = gt_bb.new_zeros(gt_bb.shape)
#no gt
if center_x[..., 0].sum() == 0:
return gt_xs.new_zeros(gt_xs.shape, dtype=torch.uint8)
beg = 0
for level,n_p in enumerate(num_points_per):
end = beg + n_p
stride = strides[level] * radius
xmin = center_x[beg:end] - stride
ymin = center_y[beg:end] - stride
xmax = center_x[beg:end] + stride
ymax = center_y[beg:end] + stride
# limit sample region in gt
center_gt[beg:end, :, 0] = torch.where(xmin > gt_bb[beg:end, :, 0], xmin, gt_bb[beg:end, :, 0])
center_gt[beg:end, :, 1] = torch.where(ymin > gt_bb[beg:end, :, 1], ymin, gt_bb[beg:end, :, 1])
center_gt[beg:end, :, 2] = torch.where(xmax > gt_bb[beg:end, :, 2], gt_bb[beg:end, :, 2], xmax)
center_gt[beg:end, :, 3] = torch.where(ymax > gt_bb[beg:end, :, 3], gt_bb[beg:end, :, 3], ymax)
beg = end
left = gt_xs - center_gt[..., 0]
right = center_gt[..., 2] - gt_xs
top = gt_ys - center_gt[..., 1]
bottom = center_gt[..., 3] - gt_ys
center_bbox = torch.stack((left, top, right, bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 # 上下左右都>0 就是在bbox里面
return inside_gt_bbox_mask
def get_centerpoint(self, lis):
area = 0.0
x, y = 0.0, 0.0
a = len(lis)
for i in range(a):
lat = lis[i][0]
lng = lis[i][1]
if i == 0:
lat1 = lis[-1][0]
lng1 = lis[-1][1]
else:
lat1 = lis[i - 1][0]
lng1 = lis[i - 1][1]
fg = (lat * lng1 - lng * lat1) / 2.0
area += fg
x += fg * (lat + lat1) / 3.0
y += fg * (lng + lng1) / 3.0
x = x / area
y = y / area
return [int(x), int(y)]
def get_single_centerpoint(self, mask):
contour, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contour.sort(key=lambda x: cv2.contourArea(x), reverse=True) #only save the biggest one
'''debug IndexError: list index out of range'''
# todo 改
# try:
# count = contour[0][:, 0, :]
# try:
# center = self.get_centerpoint(count)
# except:
# x, y = count.mean(axis=0)
# center = [int(x), int(y)]
# except:
# count = []
# center = []
count = contour[0][:, 0, :]
try:
center = self.get_centerpoint(count)
except:
x,y = count.mean(axis=0)
center=[int(x), int(y)]
# max_points = 360
# if len(contour[0]) > max_points:
# compress_rate = len(contour[0]) // max_points
# contour[0] = contour[0][::compress_rate, ...]
return center, contour
def get_36_coordinates(self, c_x, c_y, pos_mask_contour):
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
# angle = np.arctan2(x, y)*180/np.pi
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
# dist = np.sqrt(x ** 2 + y ** 2)
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
#生成36个角度
new_coordinate = {}
for i in range(0, 360, 10):
if i in angle:
d = dist[angle==i].max()
new_coordinate[i] = d
elif i + 1 in angle:
d = dist[angle == i+1].max()
new_coordinate[i] = d
elif i - 1 in angle:
d = dist[angle == i-1].max()
new_coordinate[i] = d
elif i + 2 in angle:
d = dist[angle == i+2].max()
new_coordinate[i] = d
elif i - 2 in angle:
d = dist[angle == i-2].max()
new_coordinate[i] = d
elif i + 3 in angle:
d = dist[angle == i+3].max()
new_coordinate[i] = d
elif i - 3 in angle:
d = dist[angle == i-3].max()
new_coordinate[i] = d
distances = torch.zeros(36)
for a in range(0, 360, 10):
if not a in new_coordinate.keys():
new_coordinate[a] = torch.tensor(1e-6)
distances[a//10] = 1e-6
else:
distances[a//10] = new_coordinate[a]
# for idx in range(36):
# dist = new_coordinate[idx * 10]
# distances[idx] = dist
return distances, new_coordinate
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
``` |
{
"source": "jiawenanan/Database",
"score": 3
} |
#### File: jiawenanan/Database/data_preprocessing.py
```python
import pandas as pd
import numpy as np
import zipfile
prison = pd.read_csv('~/Desktop/Prison_Admissions__Beginning_2008.csv')
house = pd.read_csv('~/Desktop/County_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv')
vpf = pd.read_csv('~/Desktop/Index__Violent__Property__and_Firearm_Rates_By_County__Beginning_1990.csv')
food = pd.read_csv('~/Desktop/Retail_Food_Stores.csv')
##house = house.rename(columns={"RegionID": "county_id", "SizeRank": "size_rank","RegionName": "county_name","RegionType": "region_type","": "county_name"})
import re
## function for extract the first word
def first_word(text: str) -> str:
return re.search("([\w']+)", text).group(1)
#---------------------------<houseprice>-------------------------------
##extract data for {houseprice} table
##transfer the time series as the value of column ['month_year']
old = ' County' ## we don't want "County" to be apeared in our ['county_name'] column
house = house[house['State'] == 'NY']
house['RegionName'] = house['RegionName'].apply(lambda x : x.replace(old, ''))
house.sort_values(by = 'SizeRank',axis = 0, ascending= True)
house.reset_index(drop=True, inplace=True)
time_index = house.columns[9:,]
column_countyid = []
column_time = []
column_price = []
for x in range(0,62):
for y in range(0, 298):
column_countyid.append(house.iloc[x,0])
column_time.append(time_index[y])
column_price.append(house.iloc[x,9 + y])
temp = pd.DataFrame()
temp['county_id'] = column_countyid
temp['month_year'] = column_time
temp['price'] = column_price
temp['price'] = temp['price'].astype(int)
## temp is the data for table {houseprice} (18476 * 3)
houseprice = temp
houseprice.to_csv('~/Desktop/housepricetable.csv', index = False)
#---------------------------<county>-------------------------------
##extract data for {county} table
temp1 = pd.DataFrame()
temp1['county_id'] = house['RegionID']
temp1['county_name'] = house['RegionName']
temp1['metro'] = house['Metro']
temp1['statecodefips'] = house['StateCodeFIPS']
temp1['size_rank'] = house.index + 1
temp1['municipalcodefips'] = house['MunicipalCodeFIPS']
## we change all NaN value to 'Missing' according to our plan,county is the fk, cannot be null
temp1['metro'] = temp1['metro'].fillna('Missing')
temp1 = temp1.append([{'county_id':0,'county_name':'Missing','metro':'Missing','statecodefips':0,'size_rank': 0,'municipalcodefips':0}], ignore_index=True)
county = temp1
county.to_csv('~/Desktop/countytable.csv', index = False)
## the preprocessed dataset includes 62 rows, however, in the final dataset there will be 63 rows,
## the 63rd row is ['Missing']
## for further expanding, we store the U.S-wide statecodefips in a mongodb database
## this application is focusing on New York State
## for further mapping use, make a county --> county_id dictionary
county_id = county.county_id.tolist()
county_name = county.county_name.tolist()
county_id_name = {}
for name in county_name:
for i_d in county_id:
county_id_name[name] = i_d
county_id.remove(i_d)
break
#---------------------------<vpf>-------------------------------
## extract data for {vpf} table
## map county to county_id, before doing that, we noticed that in table {county} --> 'Saint Lawrence', however, in original vpf table, it is 'St Lawrence' or 'St. Lawrence'
## so we need to change all 'St Lawrence' || 'St. Lawrence' in vpf table to be Saint Lawrance
vpf['County'].loc[(vpf['County'] == 'St Lawrence')] = 'Saint Lawrence'
vpf['County'].loc[(vpf['County'] == 'St. Lawrence')] = 'Saint Lawrence'
## Map to county_id(the primary key in {county} table)
vpf['County'] = vpf['County'].map(county_id_name)
vpf = vpf.rename(columns={"County": "county_id", "Year":"year_id", "Population": "population", "Index Count" : "index_count", "Index Rate":"index_rate", "Violent Count" :"violent_count", "Violent Rate" :"violent_rate","Property Count":"property_count","Property Rate":"property_rate","Firearm Count":"firearm_count","Firearm Rate":"firearm_rate"})
vpf['population'] = vpf['population'].astype(int)
vpf['firearm_count'] = vpf['firearm_count'].astype(pd.Int32Dtype())
vpf.to_csv('~/Desktop/vpftable.csv', index = False)
#---------------------------<prison>-------------------------------
## extract data for {prison} table
## ['Admission Month'] and ['Month Code'] represent the same meaning without any explantion to users
## As there will be no data loss, we plan to drop ['Admission Month']
## 1) ['County of Commitment'] in prison dataset are all capitalized, we transfer it to be consistent with the table {county}
## we can sure that there will be no data missing for doing above-mentioned transformation
columns = prison.columns.tolist()
string_columns = [1,3,4,5,6,8]
for x in string_columns:
prison[columns[x]] = prison[columns[x]].str.title()
## we change all NaN value in ['County of Commitment'] column to 'Missing' according to our plan, county is the fk, cannot be null
prison['County of Commitment'] = prison['County of Commitment'].fillna('Missing')
prison = prison.drop(columns = ['Admission Month'])
## Assign case_id to each case as the pk
#prison['case_id'] = prison.index + 1
## change all 'St Lawrence' in prison table to be Saint Lawrance
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'St Lawrence')] = 'Saint Lawrence'
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'Brooklyn')] = 'New York'
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'Manhattan')] = 'New York'
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'Staten Island')] = 'New York'
prison['Last Known Residence County'] = prison['Last Known Residence County'].fillna('Missing')
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Richmond (Staten Island)')] = 'New York'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'New York (Manhattan)')] = 'New York'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Kings (Brooklyn)')] = 'New York'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Unknown')] = 'Missing'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'St Lawrence')] = 'Saint Lawrence'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Out Of State')] = 'Missing'
## the data in column['Last Known Residence County'] is sort of different when compared with 62 county name in new york state
## for example: Kings (Brooklyn), New York (Manhattan), Rensselaer, Seneca, Westchester
## one county mapped with multiple ast Known Residence County, this column seems more like city.
## we decide to extract words in bracket, create a list to store all unique value and compare it with city in table{food}
#prison['Last Known Residence County'].apply(lambda x : y = re.findall('\((.*?)\)', x), unique_last_known_resi.append(y))
before_extract = prison['Last Known Residence County'].unique()
## create a new dataframe and drop the duplication to check the relationship between ['county'] & ['Last Known Residence County']
## the result is that, staten island, manhattan, and brooklyn, three city in newyork are only three value different from the county
## since both two columns talk about county, we eventually set above-mentioned value to be 'New York'
## as the same, do mapping to only reserve the county_id as fk
prison['County of Commitment'] = prison['County of Commitment'].map(county_id_name)
prison['Last Known Residence County'] = prison['Last Known Residence County'].map(county_id_name)
prison = prison.rename(columns={"Admission Year": "admission_year", "Month Code":"admission_month", "Admission Type": "admission_type", "County of Commitment" : "county_id_commitment", "Last Known Residence County":"county_id_last_known_residence", "Gender" :"gender", "Age at Admission" :" age_at_admission","Most Serious Crime":"most_serious_crime"})
prison.insert(0,'case_id',prison.index + 1)
prison['gender'].loc[(prison['gender'].isnull())] = 'Missing'
prison['gender'].loc[(prison['gender'] == 'Not Coded')] = 'Missing'
#len(prison['County of Commitment'].unique())
#prison['county_id_last_known_residence']
prison.to_csv('~/Desktop/prisontable.csv', index = False)
#---------------------------<food>-------------------------------
food['City'] = food['City'].str.title()
food['County'].loc[(food['County'] == 'St. Lawrence')] = 'Saint Lawrence'
## ['Location'] = ['Street Number'] + ['Street Name'] + ['latitude'] + ['longitude'] +['city'] +['Zip Code']
## in order to eleminate the data redundancy, we decide to extract latitude and longitude, all other data can be found in other columns
## result of data manipulation: 1558 unique zip_code, 1452 unique city, 1605 unique zipcode + county_id, 1797 unique zipcode + city, 1499 unique city + county_id
## after data manipulation, we noticed that even ['zipcode'] + ['city'] cannot determine the ['county'] for our food dataset
## the explanation we fetch from the google: google gives the explanation as:Some cities cross into five different counties and as many as 20% of the ZIP Codes cross county lines)
Location = []
for x in range(0, len(food)):
if food.iloc[x]['Street Number'] != None:
y = str(food.iloc[x]['Street Number']).strip()
z = food.iloc[x]['Street Name'].strip()
Location.append(y + ' ' + z)
else:
z = food.iloc[x]['Street Name'].strip()
Location.append(z)
temp2 = pd.DataFrame()
temp2['address'] = Location
temp2['zip_code'] = food['Zip Code']
temp2['city'] = food['City']
temp2['county_id'] = food['County'].map(county_id_name)
temp2 = temp2.drop_duplicates(['address'])
## Extract ['address'] for {address} table and {food} without data loss
#---------------------------<address>-------------------------------
temp2.to_csv('~/Desktop/addresstable.csv', index = False)
## data in address is not unique, duplication exist. For example: a Starbucks in a Walmart shares the same address with the Walmart
## drop above-mentioned columns without any data loss
food = food.drop(columns = ['County','Street Number', 'Street Name','Address Line 2','Address Line 3','City','State','Zip Code'])
pair= []
def subString(location_column):
for x in range(0, len(location_column)):
if isinstance(location_column[x], str):
y = re.findall(r'[(](.*?)[)]',location_column[x])
if len(y) != 0:
pair.append(y[0])
else:
pair.append(None)
else:
pair.append(None)
## extract the latitude and longitude from food['Location']
subString(food['Location'])
food['latitude_longitude'] = pair
## drop ['Location'] and there is no data loss
food = food.drop(columns = ['Location'])
## add our processed location data to food
food['address'] = Location
food = food.rename(columns={"License Number": "license_number", "Operation Type":"operation_type", "Establishment Type": "establishment_type", "Entity Name" : "entity_name", "DBA Name":"dba_name", "Square Footage" :"square_footage"})
food.to_csv('~/Desktop/foodtable.csv', index = False)
## after the data preprocessing, you should have six .csv files on your desktop
# In[ ]:
``` |
{
"source": "jiawenquan/argparseDemo",
"score": 3
} |
#### File: argparseDemo/argparseDemo/command_line.py
```python
import argparse
import argparseDemo.add_number as add_number
import argparseDemo.sub_number as sub_number
import argparseDemo.mul_number as mul_number
import argparseDemo.div_number as div_number
import argparseDemo.sex_choose as sex_choose
def main():
# parser = argparse.ArgumentParser(
# description='这是一个argparse的使用测试小Demo',
# formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#
# parser.add_argument(
# '--verbose',
# help='Print logs (-1: no logs, 0: progress indicator, 1+: increased verbosity)',
# default=1, type=int)
#
parser = argparse.ArgumentParser(
description="这是一个argparse的使用测试小demo",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--verbose",
help="打印日志(-1:无日志,0:进度指示器,1+:增加的详细程度)",
default=1, type=int
)
sub_parsers = parser.add_subparsers(dest='command')
# init subparsers
add_number.init_parser(sub_parsers)
sub_number.init_parser(sub_parsers)
mul_number.init_parser(sub_parsers)
div_number.init_parser(sub_parsers)
sex_choose.init_parser(sub_parsers)
args = parser.parse_args()
try:
if args.command == 'add':
add_number.main(args)
elif args.command == 'sub':
sub_number.main(args)
elif args.command == 'mul':
mul_number.main(args)
elif args.command == 'div':
div_number.main(args,parser)
elif args.command == 'sex':
sex_choose.main(args)
else:
parser.print_help()
except Exception as e:
print(e)
parser.print_help()
# try:
# if args.command == 'convert':
# pass
# # convert.main(args)
# elif args.command == 'info':
# # info.main(args)
# pass
# elif args.command == 'merge':
# pass
# # merger.main(args)
# elif args.command == 'export':
# pass
# # export.main(args)
# else:
# parser.print_help()
# except Exception as e:
# print(e)
# parser.print_help()
if __name__ == '__main__':
main()
``` |
{
"source": "jiawenquan/Django_rest_framework_demo",
"score": 3
} |
#### File: day03_1/utils/auth.py
```python
from rest_framework.authentication import BaseAuthentication
from rest_framework import exceptions
token_list = [
'<KEY>',
'<KEY>',
]
class TestAuthentication(BaseAuthentication):
def authenticate(self, request):
val = request.query_params.get('token')
if val not in token_list:
raise exceptions.AuthenticationFailed("用户认证失败")
user = request._request.user
print(user, val)
return (user, val)
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
# 验证失败时,返回的响应头WWW-Authenticate对应的值
pass
``` |
{
"source": "jiawenxiao/ECG_12",
"score": 2
} |
#### File: jiawenxiao/ECG_12/run_12ECG_classifier.py
```python
import numpy as np
import joblib
from get_12ECG_features import get_12ECG_features
from tensorflow.keras.models import load_model
import keras
import tensorflow
# from keras.models import load_model
from keras.models import model_from_json
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def run_12ECG_classifier(data,header_data,classes,model):
num_classes = len(classes)
current_label = np.zeros(num_classes, dtype=int)
current_score = np.zeros(num_classes)
# Use your classifier here to obtain a label and score for each class.
feats_reshape,feats_external = get_12ECG_features(data,header_data)
tmp_score = model.predict([feats_reshape,feats_external]) #输出维度(1,9)
tmp_label = np.where(tmp_score>0.13,1,0)
for i in range(num_classes):
if np.sum(tmp_label)==0:
max_index=np.argmax(tmp_score)
tmp_label[0,max_index]=1
if np.sum(tmp_label)>3:
sort_index=np.argsort(tmp_score)
min_index=sort_index[:6]
tmp_label[0,min_index]=0
for i in range(num_classes):
current_label[i] = np.array(tmp_label[0][i])
current_score[i] = np.array(tmp_score[0][i])
return current_label, current_score
def load_12ECG_model():
# load the model from disk
# filename='physionet_cnn_0403.h5'
# loaded_model = load_model(filename)
with open("model_save_0403.json", "r") as f:
json_string = f.read() # 读取本地模型的json文件
model = model_from_json(json_string) # 创建一个模型
model.load_weights("model_weight_0403.h5")
return model
``` |
{
"source": "jiawenxiao/physionet2020_0423",
"score": 3
} |
#### File: jiawenxiao/physionet2020_0423/run_12ECG_classifier.py
```python
import numpy as np
import joblib
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_12ECG_features(data,header_data):
set_length=20000
resample_interval=2
data_num=np.zeros((1,12,set_length))
data_external=np.zeros((1,3))
length=data.shape[1]
if length>=set_length:
data_num[:,:,:]=data[:,:set_length]
else:
data_num[:,:,:length]=data
resample_index=np.arange(0,set_length,resample_interval).tolist()
data_num=data_num[:,:, resample_index]
for lines in header_data:
if lines.startswith('#Age'):
age=lines.split(': ')[1].strip()
if age=='NaN':
age='60'
if lines.startswith('#Sex'):
sex=lines.split(': ')[1].strip()
length=data.shape[1]
data_external[:,0]=float(age)/100
data_external[:,1]=np.array(sex=='Male').astype(int)
data_external[:,2]=length/30000
data_num=data_num/15000
return data_num,data_external
def load_12ECG_model():
model = torch.load('resnet_0420.pkl',map_location=device)
return model
def run_12ECG_classifier(data,header_data,classes,model):
num_classes = len(classes)
current_label = np.zeros(num_classes, dtype=int)
current_score = np.zeros(num_classes)
# Use your classifier here to obtain a label and score for each class.
feats_reshape,feats_external = get_12ECG_features(data,header_data)
feats_reshape = torch.tensor(feats_reshape,dtype=torch.float,device=device)
feats_external = torch.tensor(feats_external,dtype=torch.float,device=device)
pred = model.forward(feats_reshape,feats_external)
pred =torch.sigmoid(pred)
tmp_score = pred.squeeze().cpu().detach().numpy()
tmp_label = np.where(tmp_score>0.25,1,0)
for i in range(num_classes):
if np.sum(tmp_label)==0:
max_index=np.argmax(tmp_score)
tmp_label[max_index]=1
if np.sum(tmp_label)>3:
sort_index=np.argsort(tmp_score)
min_index=sort_index[:6]
tmp_label[min_index]=0
for i in range(num_classes):
current_label[i] = np.array(tmp_label[i])
current_score[i] = np.array(tmp_score[i])
return current_label, current_score
``` |
{
"source": "jiawenxiao/physionet2020_0717",
"score": 2
} |
#### File: jiawenxiao/physionet2020_0717/train_12ECG_classifier.py
```python
import numpy as np, os, sys, joblib
from scipy.io import loadmat
from get_12ECG_features import get_12ECG_features
import pandas as pd
import os,time
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import random
import torch
from torch import nn, optim
from torch.utils.data import DataLoader,Dataset
from config import config
import utils
# from resnet import ECGNet
import warnings
warnings.filterwarnings('ignore')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(666)
torch.cuda.manual_seed(666)
class BasicBlock1d(nn.Module):
def __init__(self, inplanes, planes, stride, size,downsample):
super(BasicBlock1d, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=size, stride=stride, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d( planes, planes, kernel_size=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = nn.Sequential(
nn.Conv1d(inplanes, planes ,kernel_size=size, stride=stride, bias=False),
nn.BatchNorm1d(planes))
self.dropout = nn.Dropout(.2)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool =nn.AdaptiveAvgPool1d(1)
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
def forward(self, x):
x=x.squeeze(2)
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.bn2(out)
out = self.conv2(out)
#Squeeze-and-Excitation (SE)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1),1)
out = out * original_out
#resnet
out += residual
out = self.relu(out)
return out
class BasicBlock2d(nn.Module):
def __init__(self, inplanes, planes, stride, size,downsample):
super(BasicBlock2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=(1,size), stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=(1,1), stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes ,kernel_size=(1,size), stride=stride, bias=False),
nn.BatchNorm2d(planes))
self.dropout = nn.Dropout(.2)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool = nn.AdaptiveAvgPool2d((1,1))
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
def forward(self, x):
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.bn2(out)
out = self.conv2(out)
#Squeeze-and-Excitation (SE)
original_out=out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1),1,1)
out = out * original_out
#resnet
out += residual
out = self.relu(out)
return out
class ECGNet(nn.Module):
def __init__(self, BasicBlock1d, BasicBlock2d , num_classes):
super(ECGNet, self).__init__()
self.sizes=[5,7,9]
self.external = 2
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(12,32, kernel_size=(1,50), stride=(1,2),padding=(0,0),bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.AvgPool = nn.AdaptiveAvgPool1d(1)
self.layers=nn.Sequential()
self.layers.add_module('layer_1',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers.add_module('layer_2',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers.add_module('layer_3',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers1_list=nn.ModuleList()
self.layers2_list=nn.ModuleList()
for size in self.sizes:
self.layers1=nn.Sequential()
self.layers1.add_module('layer{}_1_1'.format(size),self._make_layer( BasicBlock2d,inplanes=32, planes=32,blocks=32,
stride=(1,1),size=size))
self.layers2=nn.Sequential()
self.layers2.add_module('layer{}_2_1'.format(size),self._make_layer(BasicBlock1d,inplanes=32, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_2'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_3'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_4'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers1_list.append(self.layers1)
self.layers2_list.append(self.layers2)
self.fc = nn.Linear(256*len(self.sizes)+self.external, num_classes)
def _make_layer(self, block,inplanes, planes, blocks, stride ,size,downsample = None):
layers = []
for i in range(blocks):
layers.append(block(inplanes, planes, stride, size,downsample))
return nn.Sequential(*layers)
def forward(self, x0, fr):
x0=x0.unsqueeze(2)
x0 = self.conv1(x0)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x0 = self.layers(x0)
xs=[]
for i in range(len(self.sizes)):
x=self.layers1_list[i](x0)
x=torch.flatten(x,start_dim=2,end_dim=3)
x=self.layers2_list[i](x0)
x= self.AvgPool(x)
xs.append(x)
out = torch.cat(xs,dim=2)
out = out.view(out.size(0), -1)
out = torch.cat([out,fr], dim=1)
out = self.fc(out)
return out
# Load challenge data.
def load_challenge_data(filename):
x = loadmat(filename)
data = np.asarray(x['val'], dtype=np.float64)
new_file = filename.replace('.mat','.hea')
input_header_file = os.path.join(new_file)
with open(input_header_file,'r') as f:
header_data=f.readlines()
return data, header_data
# Find unique classes.
def get_classes(input_directory, filenames):
classes = set()
for filename in filenames:
input_file=os.path.join(input_directory,filename)
with open( input_file, 'r') as f:
for l in f:
if l.startswith('#Dx'):
tmp = l.split(': ')[1].split(',')
for c in tmp:
classes.add(c.strip())
return sorted(classes)
def train(x_train,x_val,x_train_external,x_val_external,y_train,y_val, num_class):
# model
model = ECGNet( BasicBlock1d, BasicBlock2d ,num_classes= num_class)
model = model.to(device)
# optimizer and loss
optimizer = optim.Adam(model.parameters(), lr=config.lr)
# optimizer = optim. RMSProp(model.parameters(), lr=config.lr)
wc = y_train.sum(axis=0)
wc = 1. / (np.log(wc)+1)
w = torch.tensor(wc, dtype=torch.float).to(device)
criterion1 = utils.WeightedMultilabel(w)
criterion2 = nn.BCEWithLogitsLoss()
lr = config.lr
start_epoch = 1
stage = 1
best_auc = -1
# =========>开始训练<=========
print("*" * 10, "step into stage %02d lr %.5f" % (stage, lr))
for epoch in range(start_epoch, config.max_epoch + 1):
since = time.time()
train_loss,train_auc = train_epoch(model, optimizer, criterion1,x_train,x_train_external,y_train,num_class)
val_loss,val_auc = val_epoch(model, criterion2, x_val,x_val_external,y_val,num_class)
print('#epoch:%02d stage:%d train_loss:%.4f train_auc:%.4f val_loss:%.4f val_auc:%.4f time:%s'
% (epoch, stage, train_loss, train_auc,val_loss,val_auc, utils.print_time_cost(since)))
if epoch in config.stage_epoch:
stage += 1
lr /= config.lr_decay
print("*" * 10, "step into stage %02d lr %.5f" % (stage, lr))
utils.adjust_learning_rate(optimizer, lr)
return model
def train_epoch(model, optimizer, criterion,x_train,x_train_external,y_train,num_class):
model.train()
auc_meter,loss_meter, it_count = 0, 0,0
batch_size=config.batch_size
for i in range(0,len(x_train)-batch_size,batch_size):
inputs1 = torch.tensor(x_train[i:i+batch_size],dtype=torch.float,device=device)
inputs2 = torch.tensor(x_train_external[i:i+batch_size],dtype=torch.float,device=device)
target = torch.tensor(y_train[i:i+batch_size],dtype=torch.float,device=device)
output = model.forward(inputs1,inputs2)
# zero the parameter gradients
optimizer.zero_grad()
# forward
loss = criterion(output, target)
loss.backward()
optimizer.step()
loss_meter += loss.item()
it_count += 1
auc_meter = auc_meter+ utils.calc_auc(target, torch.sigmoid(output))
return loss_meter / it_count, auc_meter/it_count
def val_epoch(model, criterion, x_val,x_val_external,y_val,num_class):
model.eval()
auc_meter,loss_meter, it_count = 0, 0,0
batch_size=config.batch_size
with torch.no_grad():
for i in range(0,len(x_val)-batch_size,batch_size):
inputs1 = torch.tensor(x_val[i:i+batch_size],dtype=torch.float,device=device)
inputs2 = torch.tensor(x_val_external[i:i+batch_size],dtype=torch.float,device=device)
target = torch.tensor(y_val[i:i+batch_size],dtype=torch.float,device=device)
output = model(inputs1,inputs2)
loss = criterion(output, target)
loss_meter += loss.item()
it_count += 1
auc_meter =auc_meter + utils.calc_auc(target, torch.sigmoid(output))
return loss_meter / it_count, auc_meter/ it_count
def train_12ECG_classifier(input_directory, output_directory):
input_files=[]
header_files=[]
train_directory=input_directory
for f in os.listdir(train_directory):
if os.path.isfile(os.path.join(train_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('mat'):
g = f.replace('.mat','.hea')
input_files.append(f)
header_files.append(g)
# the 27 scored classes
classes_weight=['270492004','164889003','164890007','426627000','713427006','713426002','445118002','39732003',
'164909002','251146004','698252002','10370003','284470004','427172004','164947007','111975006',
'164917005','47665007','59118001','427393009','426177001','426783006','427084000','63593006',
'164934002','59931005','17338001']
classes_name=sorted(classes_weight)
num_files=len(input_files)
num_class=len(classes_name)
# initilize the array
data_num = np.zeros((num_files,12,10*500))
data_external=np.zeros((num_files,2))
classes_num=np.zeros((num_files,num_class))
for cnt,f in enumerate(input_files):
classes=set()
tmp_input_file = os.path.join(train_directory,f)
data,header_data = load_challenge_data(tmp_input_file)
for lines in header_data:
if lines.startswith('#Dx'):
tmp = lines.split(': ')[1].split(',')
for c in tmp:
classes.add(c.strip())
if lines.startswith('#Age'):
age=lines.split(': ')[1].strip()
if age=='NaN':
age='60'
if lines.startswith('#Sex'):
sex=lines.split(': ')[1].strip()
for j in classes:
if j in classes_name:
class_index=classes_name.index(j)
classes_num[cnt,class_index]=1
data_external[cnt,0]=float(age)/100
data_external[cnt,1]=np.array(sex=='Male').astype(int)
if data.shape[1]>=5000:
data_num[cnt,:,:] = data[:,:5000]/30000
else:
length=data.shape[1]
data_num[cnt,:,:length] = data/30000
#split the training set and testing set
x_train,x_val,x_train_external,x_val_external,y_train,y_val = train_test_split(data_num,data_external,
classes_num,test_size=0.2, random_state=2020)
#build the pre_train model
model= train(x_train,x_val,x_train_external,x_val_external,y_train,y_val, num_class)
#save the model
output_directory=os.path.join(output_directory, 'resnet_0628.pkl')
torch.save(model, output_directory)
``` |
{
"source": "jiawenxiao/physionet2020_0823",
"score": 2
} |
#### File: jiawenxiao/physionet2020_0823/utils.py
```python
import torch
import numpy as np
import time, os
from sklearn.metrics import f1_score, roc_auc_score
from torch import nn
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
# 计算F1score,每一类单独计算最后计算平均的auc
def calc_auc(y_true, y_pre, threshold=0.5):
labels = y_true.cpu().detach().numpy().astype(np.int)
outputs = y_pre.cpu().detach().numpy()
return roc_auc_score(labels, outputs, 'micro')
# 打印时间
def print_time_cost(since):
time_elapsed = time.time() - since
return '{:.0f}m{:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60)
# 调整学习率
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# 多标签使用类别权重
class WeightedMultilabel(nn.Module):
def __init__(self, weights: torch.Tensor):
super(WeightedMultilabel, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')
self.weights = weights
def forward(self, outputs, targets):
loss = self.cerition(outputs, targets)
return (loss * self.weights).mean()
``` |
{
"source": "jiawu/Roller",
"score": 2
} |
#### File: Figures/Granger/ganger_figure.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import matplotlib as mpl
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['font.sans-serif'] = 'Arial'
# mpl.rcParams['xtick.major.pad'] = 12
# mpl.rcParams['ytick.major.pad'] = 12
def biochem_sim_poly(x):
base_x = np.arange(36)
base_y = np.array([1, 1, 1, 1, 1, 1, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 18, 17.5, 17, 16, 15, 13, 11, 8, 7,
6.5, 6.25, 6, 6, 6, 6, 6, 6, 6, 6])
poly = np.poly1d(np.polyfit(base_x, base_y, 10))
y = poly(x)
start_min = np.min(y[:len(y)/2])
start_min_index = np.where(y==start_min)[0][0]
stop_min = np.min((y[len(y)/2:]))
stop_min_index = np.where(y==stop_min)[0][0]
y[:start_min_index] = start_min
y[stop_min_index:] = stop_min
return y
def shift_values(x, y, noise, shift):
points_to_prepend = np.sum(x < shift)
shifted_noise = np.append(noise[:points_to_prepend], noise)
shifted_y = np.append(np.array([y[0]]*points_to_prepend), y)+shifted_noise
return shifted_y[:len(y)]
if __name__ == '__main__':
cutoffs = range(0, 11, 5)[::-1]
np.random.seed(10)
n_points = 100
noise_factor = 1
gauss1 = np.random.normal(0, 1, n_points)*noise_factor
gauss2 = np.random.normal(0, 1, n_points)*noise_factor
a = np.linspace(0, 36, n_points)
b = biochem_sim_poly(a)
plot_b = stats.zscore(b+gauss1, ddof=1)
line_width = 2
f, axarr = plt.subplots(2, len(cutoffs), figsize=(15, 10))
tick_size = 18
for col, cutoff in enumerate(cutoffs):
b2 = stats.zscore(shift_values(a, b, gauss2, cutoff), ddof=1)
# Fit linear model
slope, intercept, r_value, p_value, std_err = stats.linregress(b, b2)
axarr[0, col].plot(a, plot_b, lw=line_width, label='Gene 1', c='b')
axarr[0, col].plot(a, b2, lw=line_width, label='Gene 2', c='r')
axarr[0, col].set_xlim([np.min(a), np.max(a)])
axarr[0, col].set_title('Lag Order: %i' % (10-cutoff), fontsize=tick_size, weight='bold')
axarr[0, col].tick_params(axis='both', labelsize=tick_size)
if col != 0:
axarr[0, col].get_xaxis().set_visible(False)
axarr[0, col].get_yaxis().set_visible(False)
else:
axarr[0, col].legend(loc='best')
axarr[0, col].locator_params(nbins=4)
axarr[1, col].plot(plot_b, b2, '.', ms=15, c='k')
axarr[1, col].annotate(r'$R^2$' + '=%0.4f' % r_value ** 2, xy=(0.02, 0.98), xycoords='axes fraction',
va='top', fontsize=20)
axarr[1, col].tick_params(axis='both', labelsize=tick_size)
if col != 0:
axarr[1, col].get_xaxis().set_visible(False)
axarr[1, col].get_yaxis().set_visible(False)
else:
axarr[1, col].locator_params(nbins=4)
plt.tight_layout(h_pad=2, w_pad=2)
plt.savefig('granger_figure.pdf', format='pdf')
```
#### File: Roller/pipelines/run_tdSwing_scan.py
```python
import pdb
import sys
import Pipelines as pl
import pandas as pd
from datetime import datetime
import numpy as np
import time
import tempfile
import os
# saving the models for the iteration tests:
# to save the models for the iteration tests, we will save a dataframe (in the form of the final dataframe from Analyzer...) instead of a full model, because it is too computationally expensive, and as of this day, we are running out of room on QUEST.
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def main(data_folder, output_path, target_dataset, my_iterating_param, param_test_style, param_tests, n_trials):
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S:%f')
if 'Dionesus' in data_folder:
n_trials = 2
default_params = {'data_folder':data_folder, 'file_path':target_dataset, 'td_window':15,'min_lag':1,'max_lag':3,'n_trees':500,'permutation_n':5, 'lag_method':'mean_mean', 'calc_mse':False, 'bootstrap_n':5,'n_trials':n_trials, 'run_time':current_time, 'sort_by': 'rank','iterating_param':my_iterating_param, 'filter_noisy':False, 'alpha': None}
overall_df = pd.DataFrame()
#**kwargs allows me to change the iterating parameter very easily
trial_times = []
for current_param_value in param_tests:
for trial in range(0,n_trials):
trial_start = time.time()
run_params = default_params.copy()
if param_test_style == "pair":
run_params[my_iterating_param[0]] = current_param_value[0]
run_params[my_iterating_param[1]] = current_param_value[1]
elif param_test_style == "triplet":
run_params[my_iterating_param[0]] = current_param_value[0]
run_params[my_iterating_param[1]] = current_param_value[1]
run_params[my_iterating_param[2]] = current_param_value[2]
else:
run_params[my_iterating_param]=current_param_value
# Check max_lag restriction
if 'size10' in run_params['data_folder']:
max_window = 21
if 'high_sampling' in run_params['data_folder']:
if 'even' in run_params['file_path']:
max_window = 7
else:
interval = run_params['file_path'].split('/')[-1].split('_')[1]
max_window = int(1000/int(interval)+1)
elif 'gardner_out' in run_params['data_folder']:
interval = run_params['file_path'].split('/')[-1].split('_')[1]
max_window = int(round(14/int(interval)))
else:
max_window = 21
lag_gap = max_window-run_params['td_window']
# max window = 1, td window = 1
# lag gap = 1-1 =0
# if lag gap (0) <= max_lag = 1
if lag_gap <= run_params['max_lag']:
run_params['max_lag'] = lag_gap
if run_params['max_lag'] >= max_window:
run_params['max_lag'] = max_window - 1
if run_params['min_lag'] > run_params['max_lag']:
run_params['min_lag'] = run_params['max_lag']
if 'community' in data_folder:
roc,pr, tdr, _ = pl.get_td_community(**run_params)
else:
roc,pr, tdr, _ = pl.get_td_stats(**run_params)
run_params['auroc']=roc
run_params['aupr']=pr
trial_end = time.time()
run_params['trial_time'] = trial_end-trial_start
for key in run_params.keys():
if run_params[key] is None:
run_params[key] = "None"
run_result=pd.Series(run_params)
overall_df = overall_df.append(run_result, ignore_index=True)
print(run_result)
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S:%f')
full_path = output_path+current_time
directory = os.path.dirname(full_path)
_, filename = os.path.split(full_path)
with tempfile.NamedTemporaryFile(prefix=filename, suffix='.tsv', dir=directory, delete=False) as temp:
overall_df.to_csv(temp.name, index=False, sep='\t')
print(temp.name)
temp.close()
if __name__ == "__main__":
"""
Runs a number of trials with tdRoller with specified params, saves a dataframe with the run params and result
Example call: python run_tdSwing_scan.py data_folder output_path target_dataset iterating_param param_test_style
:param data_folder: /projects/p20519/roller_output/optimizing_window_size/RandomForest/janes
:param output_path: /projects/p20519/roller_output/stability_analysis/RandomForest/janes_ntrees_
the output will be a tsv named janes_ntrees_<currenttime>.tsv
:param target_dataset: /projects/p20519/Swing/data/invitro/janes_timeseries.tsv or/projects/p20519/Swing/data/dream4/ecoli_timeseries.tsv
:param my_iterating_param: = n_trees
:param param_test_style: str that defines either logarithmic 10,100,1000 or specify a min/max or string
"""
data_folder = str(sys.argv[1])
output_path = str(sys.argv[2])
target_dataset = str(sys.argv[3])
my_iterating_param = str(sys.argv[4])
param_test_combo = str(sys.argv[5])
param_test_style = param_test_combo.split("_")[0]
if param_test_style == "log":
param_tests = [10,100,500,1000]
elif param_test_style == "minmax":
param_min = param_test_combo[1]
param_max = param_test_combo[2]
param_tests = [i for i in range(param_min, param_max+1)]
elif param_test_style == "num":
param_tests = [int(x) for x in param_test_combo.split("_")[1:]]
elif param_test_style == "string":
param_tests = [str(x) for x in param_test_combo.split("_")[1:]]
elif param_test_style == "boolean":
param_tests = [False, True]
elif param_test_style == "pair":
pli =param_test_combo.split("_")
param_tests = list(zip( map(int, pli[1::2]), map(int, pli[2::2])))
my_iterating_param = my_iterating_param.split("^")
elif param_test_style == "triplet":
pli =param_test_combo.split("_")
param_tests = list(zip( map(int, pli[1::3]), map(int, pli[2::3]), map(int, pli[3::3]) ) )
my_iterating_param = my_iterating_param.split("^")
n_trials = 50
#always save the full parameter list and date in the dataframe for each test. for posterity!
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S:%f')
overall_df = pd.DataFrame()
#**kwargs allows me to change the iterating parameter very easily
#always save the full parameter list and date in the dataframe for each test. for posterity!
main(data_folder, output_path, target_dataset, my_iterating_param, param_test_style, param_tests, n_trials)
```
#### File: scripts/figure4/parse_biocyc.py
```python
from datetime import datetime
import sys
import pandas as pd
import pdb
import numpy as np
import pickle
from collections import defaultdict
from goatools.go_enrichment import GOEnrichmentStudy
from goatools.obo_parser import GODag
import json
import math
from Swing import Swing
from Swing.util import utility_module as ut
sys.path.append('/home/jjw036/Roller/pipelines')
import Pipelines as pl
from Swing.util.Evaluator import Evaluator
import os.path
import Swing.util.lag_identification as lag_id
import Swing.util.utility_module as Rutil
import networkx as nx
from nxpd import draw
def parse_go():
go = pd.read_csv('../../data/invitro/gene_ontology.tsv', sep='\t')
genes_go = go.iloc[:,[2,4]]
genes_go.columns = ['name','GO_ID']
genes = genes_go['name'].str.lower().tolist()
go_id = genes_go['GO_ID'].tolist()
go_tuple = list(zip(genes,go_id))
eco_go = defaultdict(list)
for genes,go_id in go_tuple:
eco_go[genes].append(go_id)
return(eco_go)
def get_clist(clusterid, cluster_table):
clist = cluster_table[cluster_table['__glayCluster'] == clusterid]['name'].tolist()
return(clist)
def get_tf_list():
"""returns full list of tfs"""
tf_fp = "/home/jjw036/Roller/data/invitro/omranian_parsed_tf_list.tsv"
with open(tf_fp, 'r') as tf_file:
tf_list = tf_file.read().splitlines()
return(tf_list)
def generate_json(merged_df,method):
"""
Generates a json file with the lag information, module information, and parent information.
"""
# the key is the parent
# first organize default dict such that each parent has all edges
parent_map = defaultdict(list)
for parent,child in merged_lag['Edge'].tolist():
parent_map[parent].append(child)
# then expand the dict into a list of dicts
json_list = []
for key in parent_map.keys():
json_dict = {}
clusterid = merged_lag[merged_lag['parent']==key]['parent_cluster'].tolist()[0]
json_dict['name'] = 'Module%d.%s' % (clusterid, key)
json_dict['imports'] = []
for value in parent_map[key]:
child_id = merged_lag[(merged_lag['child']==value) & (merged_lag['parent']==key)]['child_cluster'].tolist()[0]
edge_info = {}
edge_info['t_name'] = 'Module%d.%s' % (child_id, value)
lag = merged_lag[merged_lag['Edge'] == (key,value)][method].tolist()[0]
if math.isnan(lag):
lag = 0
edge_info['lag'] = lag
json_dict['imports'].append(edge_info)
json_list.append(json_dict)
### fill in empty dicts for child nodes that do not become parents
all_child = merged_lag['child'].tolist()
child_only = list(set(all_child) - set(parent_map.keys()))
for child in child_only:
json_dict = {}
clusterid = merged_lag[merged_lag['child']==child]['child_cluster'].tolist()[0]
json_dict['name'] = 'Module%d.%s' % (clusterid, child)
json_dict['imports'] = []
json_list.append(json_dict)
json_list = sorted(json_list, key=lambda k: len(k['imports']), reverse=False)
with open('lagged_network_2.json', 'w') as fp:
json.dump(json_list, fp, sort_keys=True)
# for every parent, append the edge
# dict with name
return(True)
def run_subswing(df, td_window=6, min_lag = 0, max_lag = 0, window_type = 'RandomForest', clusterid = None, output_fn = None):
"""
Pass in subnet_dict
"""
pdb.set_trace()
true_edges = df['Edge'].tolist()
#true_edges = df['index'].tolist()
sub_dict = get_subnetwork_info(df)
sub_eval = Evaluator(subnet_dict = sub_dict)
file_path = "/home/jjw036/Roller/data/invitro/iomranian_parsed_timeseries.tsv"
gene_start_column = 1
gene_end = None
time_label = "Time"
separator = "\t"
window_types = ['Dionesus','RandomForest','Lasso']
final_edge_list = []
for window_type in window_types:
tdr = Swing(file_path, gene_start_column, gene_end, time_label, separator, min_lag =min_lag, max_lag = max_lag, window_type = window_type, sub_dict=sub_dict)
# remember the data is already zscored
#tdr.zscore_all_data()
tdr.set_window(td_window)
tdr.create_custom_windows(sub_dict['tfs'])
tdr.optimize_params()
tdr.crag = False
tdr.calc_mse = False
tdr.fit_windows(n_trees=100, show_progress=False, n_jobs=-1)
tdr.rank_edges(permutation_n=10, n_bootstraps=10)
tdr.compile_roller_edges(self_edges=False)
tdr.make_static_edge_dict(true_edges, self_edges=False, lag_method='mean_mean')
sub_df = tdr.make_sort_df(tdr.edge_dict, sort_by = 'rank')
sub_df['Rank'] = np.arange(len(sub_df))
pr = sub_eval.calc_pr(sub_df.sort('Rank'))
roc = sub_eval.calc_roc(sub_df.sort('Rank'))
print(window_type,td_window,roc[2].values[-1],pr[2].values[-1])
final_edge_list.append(sub_df)
averaged_rank_data = Rutil.average_rank(final_edge_list,'Rank')
col_names = averaged_rank_data.columns.tolist()
for i in range(len(window_types)):
col_names[i] = window_types[i]+'-rank'
averaged_rank_data.columns = col_names
averaged_rank_data.sort('mean-rank', inplace=True)
pr = sub_eval.calc_pr(averaged_rank_data.sort('mean-rank'))
roc = sub_eval.calc_roc(averaged_rank_data.sort('mean-rank'))
print('community',td_window,roc[2].values[-1],pr[2].values[-1])
#moduleID, source, target, window_size, min_lag, max_lag, true or false, lag or not, lag time, total number of edges in module
sub_df = averaged_rank_data
sub_df['tp'] = sub_df['regulator-target'].isin(sub_eval.gs_flat)
sub_df=sub_df.merge(df,how = 'outer', right_on='Edge',left_on='regulator-target')
sub_df['Source'] = [x[0] for x in sub_df['regulator-target'].tolist()]
sub_df['Target'] = [x[1] for x in sub_df['regulator-target'].tolist()]
sub_df = sub_df[(sub_df['parent_cluster'] == sub_df['child_cluster']) | sub_df['parent_cluster'].isnull()]
sub_df['moduleID'] = clusterid
sub_df['window_size'] = td_window
sub_df['min_lag'] = min_lag
sub_df['max_lag'] = max_lag
sub_df['total_edges'] = len(df)
sub_df['pr'] = pr[2].values[-1]
sub_df['roc'] = roc[2].values[-1]
sub_df = sub_df.sort('mean-rank')
#sub_df = sub_df.iloc[:len(df)]
if os.path.isfile(output_fn):
with open(output_fn,'a') as output:
sub_df.to_csv(output, header=False, index=False, sep='\t')
else:
with open(output_fn,'a') as output:
sub_df.to_csv(output, header=True, index=False, sep='\t')
return(pr[2].values[-1], roc[2].values[-1])
def get_subnetwork_info(df):
sub_genes = df['parent'].unique().tolist() + df['child'].unique().tolist()
sub_genes = set(sub_genes)
tf_list = get_tf_list()
sub_tfs = list(sub_genes.intersection(set(tf_list)))
targets = sub_genes
regulators = sub_tfs
evaluator = Evaluator()
sub_all_edges = tuple(map(tuple,evaluator.possible_edges(np.array(regulators), np.array(list(targets)))))
sub_all_edges = [ x for x in sub_all_edges if x[0] != x[1] ]
sub_true_edges = df['Edge'].tolist()
sub_stats = { 'edges': sub_all_edges,
'true_edges': sub_true_edges,
'tfs': sub_tfs,
'genes': list(sub_genes)}
return(sub_stats)
def extract_subnetwork(cluster_id, merged_lag, parsed_info, agg_results):
# get the ranks of all the edges, and only true edges
# get the name of all the transcription factors
sub_genes = merged_lag['parent'].unique().tolist() + merged_lag['child'].unique().tolist()
sub_genes = set(sub_genes)
tf_list = get_tf_list()
sub_tfs = sub_genes.intersection(set(tf_list))
targets = sub_genes
regulators = tf_list
evaluator = Evaluator()
sub_all_edges = tuple(map(tuple,evaluator.possible_edges(np.array(regulators), np.array(list(targets)))))
sub_all_edges = [ x for x in sub_all_edges if x[0] != x[1] ]
sub_true_edges = merged_lag['Edge'].tolist()
sub_stats = { 'edges': sub_all_edges,
'true_edges': sub_true_edges,
'tfs': sub_tfs,
'genes': list(sub_genes)}
result_group = agg_results.groupby(['data_folder','min_lag','max_lag', 'td_window'])
# first parse the agg_df such that the only files you need are in a dataframe
# group them in terms of inference method, windowing, and lag
# groupby
gs_file = agg_results['file_path'].iloc[0].replace('_timeseries.tsv', '_goldstandard.tsv')
om_eval = Evaluator(gs_file)
# pass in the sub_group df and the parsed info.
# return the aupr and auroc
baseline_group = ('/projects/p20519/roller_output/ranks/RandomForest/omranian_', '0', '0', '6')
swing_group =('/projects/p20519/roller_output/ranks/RandomForest/omranian_', '1', '1', '5')
baseline_group = result_group.get_group(baseline_group).convert_objects(convert_numeric=True)
sub_pr1, sub_roc1 = get_group_stats(baseline_group, parsed_info, sub_stats, sub_all_edges)
swing_group = result_group.get_group(swing_group).convert_objects(convert_numeric=True)
sub_pr2, sub_roc2 = get_group_stats(swing_group, parsed_info, sub_stats, sub_all_edges)
sub_stats['baseline_pr'] = sub_pr1
sub_stats['baseline_roc'] = sub_roc1
sub_stats['swing_pr'] = sub_pr2
sub_stats['swing_roc'] = sub_roc2
return(sub_stats)
def get_group_stats(sub_group,parsed_info, sub_stats, sub_all_edges):
#print(summary)
sub_ranks = [parsed_info.get(k) for k in sub_group['result_path'].tolist() if k in parsed_info.keys()]
if 'rank_importance' in sub_ranks[0].columns:
sub_average = ut.average_rank(sub_ranks, 'rank_importance')
else:
# This is a community dataframe
for df in sub_ranks:
df['mean-rank-dr'] = df[['Dionesus-rank','RandomForest-rank']].mean()
sub_average = ut.average_rank(sub_ranks, 'mean-rank-dr')
sub_average['regulator-target'] = sub_average['regulator-target'].apply(eval)
#only get regulator-copy pairs in the gold standard
sub_eval = Evaluator(subnet_dict = sub_stats)
sub_df = sub_average[sub_average['regulator-target'].isin(sub_all_edges)]
pr = sub_eval.calc_pr(sub_df.sort('mean-rank'))
roc = sub_eval.calc_roc(sub_df.sort('mean-rank'))
return(pr[2].values[-1], roc[2].values[-1])
"""
1. Parse gene ontology to get names of modules
2. Identify clusters that are lagged
3. Determine if clusters have higher AUROC with tdRoller than the baseline community or baseline method
4. Check if there's an enrichment, or if cluster is statistically significant
"""
def main(window_type='RandomForest', CLUSTER=26):
"""
df = pd.read_csv('../data/invitro/ecocyc_database_export_ver1_02.txt',sep='\t')
# Parse the gene lists for each pathway
pathway_gene_list = []
pathway_gene_string = []
for idx, row in df.iterrows():
gene_string = row['Genes of pathway']
parsed = gene_string.replace(' ','').replace('"','').lower().split('//')
parsed_str = gene_string.replace(' ','').replace('"','').lower().replace('//',' ')
pathway_gene_list.append(parsed)
pathway_gene_string.append(parsed_str)
print(parsed)
df['parsed_genes_list'] = pathway_gene_list
df['parsed_genes_str'] = pathway_gene_string
"""
if os.path.isfile('../pickles/lag_df2_parse_biocyc_6.pkl'):
lag_df = pd.read_pickle('../pickles/lag_df2_parse_biocyc_6.pkl')
edge_df = pd.read_pickle('../pickles/edge_df2_parse_biocyc_6.pkl')
else:
experiments = lag_id.get_experiment_list('../../data/invitro/omranian_parsed_timeseries.tsv',5,26)
signed_edge_list = pd.read_csv('../../data/invitro/omranian_signed_parsed_goldstandard.tsv',sep='\t',header=None)
signed_edge_list.columns=['regulator', 'target', 'signs']
signed_edge_list['regulator-target'] = tuple(zip(signed_edge_list['regulator'],signed_edge_list['target']))
genes = list(experiments[0].columns.values)
lag_df,edge_df = lag_id.calc_edge_lag2(experiments,genes,signed_edge_list)
## Get the lags to associate with the network
#(lag_df, edge_df) = flm.get_true_lags('../../data/invitro/omranian_parsed_timeseries.tsv',5,26)
#lag_df['lag_median'] = [np.median(x) for x in lag_df['Lag'].tolist()]
#edge_df['lag_median'] = [np.median(x) for x in edge_df['Lag'].tolist()]
lag_df.to_pickle('../pickles/lag_df2_parse_biocyc_6.pkl')
edge_df.to_pickle('../pickles/edge_df2_parse_biocyc_6.pkl')
#lag_df['lag_counts'] = [len(x) if type(x) is list else 0 for x in lag_df['Lag'].tolist()]
#edge_df['lag_counts'] = [len(x) if type(x) is list else 0 for x in edge_df['Lag'].tolist()]
clusters = pd.read_csv('../../data/invitro/regulon_cluster_assignments'+str(CLUSTER)+'.csv',sep=',')
pdb.set_trace()
new_lag= lag_df.reset_index()
#new_lag[['parent','child']] = new_lag['index'].apply(pd.Series)
merged_lag = pd.merge(new_lag, clusters[['name','__glayCluster']], how='left', left_on=['parent'], right_on=['name'])
merged_lag = merged_lag.rename(columns = {'__glayCluster':'parent_cluster'})
merged_lag = pd.merge(merged_lag, clusters[['name','__glayCluster']], how='left', left_on=['child'], right_on=['name'])
merged_lag = merged_lag.rename(columns = {'__glayCluster':'child_cluster'})
#generate_json(merged_lag, method = 'lag_median')
#average_lag_over_network = merged_lag['lag_mean'].mean()
#std_lag_over_network = merged_lag['lag_mean'].std()
#zero_lag_edges = merged_lag[merged_lag['lag_mean']<1].count()
within_clusters = merged_lag[merged_lag['parent_cluster'] == merged_lag['child_cluster']]
between_clusters = merged_lag[merged_lag['parent_cluster'] != merged_lag['child_cluster']]
target_clusters = within_clusters
grouped_by_cluster = target_clusters.groupby('parent_cluster')
clusters = target_clusters['parent_cluster'].unique().tolist()
cluster_summary = pd.DataFrame()
pd_databases = ['community_agg_rank.pkl','RandomForest_agg_rank.pkl','Lasso_agg_rank.pkl']
dict_databases = ['community_rank_data.pkl','RandomForest_rank_data.pkl','Lasso_rank_data.pkl']
db = zip(pd_databases, dict_databases)
agg_results = pd.DataFrame()
#parsed_info = {}
target_fp = 'omranian'
"""
for pd_d, d_d in db:
df = pd.read_pickle(pd_d)
df = df[df['file_path'].str.contains(target_fp)]
agg_results = agg_results.append(df)
with open(d_d, 'rb') as infile:
info = pickle.load(infile)
parsed_info.update( {k: info[k] for k in df['result_path'].tolist() if k in info.keys()})
"""
clusters.sort()
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
output_file = "networks/community_omranian_clusters_" + current_time + ".csv"
output_file2 = "networks/SWING_community_omranian_clusters_" + current_time + ".csv"
output_file3 = "networks/SWING2_community_omranian_clusters_" + current_time + ".csv"
for clusterid in clusters:
print(clusterid, len(clusters))
current_group = grouped_by_cluster.get_group(clusterid)
total_edges = len(current_group)
nan_edges = len(current_group[current_group['Lag'].isnull()])
lagged_edges = len(current_group[current_group['Lag'] >= 10])
lagged_edges_2 = len(current_group[(current_group['Lag'] >= 20)])
sub_dict = get_subnetwork_info(current_group)
if (len(current_group) < 10) or (len(sub_dict['tfs']) < 3):
continue
pr1,roc1 = run_subswing(current_group, td_window = 5, min_lag = 0, max_lag = 0, window_type = window_type, clusterid = clusterid, output_fn = output_file)
pr2,roc2 = run_subswing(current_group, td_window = 4, min_lag = 1, max_lag = 1, window_type = window_type, clusterid = clusterid, output_fn = output_file2)
pr3,roc3 = run_subswing(current_group, td_window = 4, min_lag = 0, max_lag = 1, window_type = window_type, clusterid = clusterid, output_fn = output_file3)
pr4,roc4 = run_subswing(current_group, td_window = 3, min_lag = 1, max_lag = 2, window_type = window_type, clusterid = clusterid)
#pr5,roc5 = run_subswing(current_group, td_window = 3, min_lag = 1, max_lag = 1, window_type = window_type, clusterid = clusterid)
#pr6,roc6 = run_subswing(current_group, td_window = 3, min_lag = 2, max_lag = 2, window_type = window_type, clusterid = clusterid)
print('Diff pr:', pr1-pr2)
print('diff roc:', roc1-roc2)
#print(pr1,roc1,pr2,roc2,pr3,roc3,pr4,roc4,pr5,roc5,pr6,roc6)
print('total_edges: %d, nan_edges: %d, lagged_edges: %d, stringently_lagged_edges: %d' % (total_edges, nan_edges, lagged_edges, lagged_edges_2))
print('percentage nan_edges: %.2f, lagged_edges: %.2f, stringently_lagged_edges: %.2f' % (nan_edges/total_edges, lagged_edges/total_edges, lagged_edges_2/total_edges))
"""
subnet_info = extract_subnetwork(clusterid, merged_lag, parsed_info, agg_results)
print('AUPR_Swing: ', subnet_info['swing_pr'])
print('AUROC_Swing: ', subnet_info['swing_roc'])
print('AUPR diff: ', subnet_info['baseline_pr'] - subnet_info['swing_pr'])
print('AUROC diff: ', subnet_info['baseline_roc'] - subnet_info['swing_roc'])
"""
cluster_result = { 'cluster_id': clusterid,
'total_edges':total_edges,
'nan_edges':nan_edges,
'lagged_edges':lagged_edges,
'lagged_edges2':lagged_edges_2,
'percent_lagged':lagged_edges/total_edges,
'percent_lagged2':lagged_edges_2/total_edges,
'baseline_auroc':roc1,
'baseline_aupr':pr1,
'swing_aupr':pr2,
'swing_auroc':roc2,
'swing_aupr2':pr3,
'swing_auroc2':roc3,
'swing_aupr3':pr4,
'swing_auroc3':roc4,
#'swing_aupr4':pr5,
#'swing_auroc4':roc5,
#'swing_aupr5':pr6,
#'swing_auroc5':roc6
}
cluster_summary = cluster_summary.append(cluster_result, ignore_index = True)
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
cluster_summary.to_csv('cluster_summaries/cluster_summary_within_community_c'+str(CLUSTER)+'_' + current_time + '.csv', header=True, index=False, sep='\t')
if __name__ == '__main__':
if len(sys.argv) >= 2:
window_type = str(sys.argv[1])
CLUSTER = int(sys.argv[2])
else:
window_type = 'RandomForest'
n_trials = 2
for x in range(n_trials):
main(window_type, CLUSTER=CLUSTER)
"""
pdb.set_trace()
result_df = grouped_by_cluster.median()
result_df['count'] = grouped_by_cluster.count()['name_x']
valid_clusters = result_df[result_df['count']>9]['child_cluster'].tolist()
omranian_promotion = pd.read_pickle('/projects/p20519/roller_output/pickles/omranian_promotion.pkl')
promotion_lag = pd.merge(merged_lag, omranian_promotion, how='left', left_on=['index'], right_on=['regulator-target'])
promotion_lag['rank_diff_D'] = promotion_lag['rank_importance_Dionesus-td_6']-promotion_lag['rank_importance_Dionesus-td_4']
eco_go = parse_go()
clusters = pd.read_csv('../data/invitro/regulon_cluster_assignments.csv',sep=',')
obodag = GODag("go-basic.obo")
goeaobj = GOEnrichmentStudy(
eco_go.keys(), # List of mouse protein-coding genes
eco_go, # geneid/GO associations
obodag, # Ontologies
propagate_counts = False,
alpha = 0.05, # default significance cut-off
methods = ['fdr_by']) # defult multipletest correction method
# For each cluster, get a list of genes.
# For each cluster, test the list of the genes for gene ontology enrichment
valid_clusters = clusters['__glayCluster'].unique().tolist()
for clusterid in valid_clusters:
genes_0 = get_clist(clusterid, clusters)
goea_results_all = goeaobj.run_study(genes_0)
goea_results_sig = [r for r in goea_results_all if r.p_fdr_by < 0.05]
print(len(goea_results_sig))
for result in goea_results_sig:
print(clusterid,result.name, result.ratio_in_study)
# show an enrichment modules with lag
# divide edges into modules
# create file in json format
# for each node, add list of pathways
# for each pathway, get genes involved in pathway
# for each pathway, get a list of interactions within that pathway
# for each pathway, get a list of interactions related to that pathway
# get list of lags, then for each lag
"""
```
#### File: scripts/figure4/parse_cluster_summaries_sc.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import pandas as pd
import pdb
from parse_cluster_info_sc import main as pinf
import numpy as np
def parse_summary(fp, lag_thresh=4, fill_na=False, median_thresh=2):
df = pd.read_csv(fp, sep='\t')
aupr_cols = [col for col in df.columns if 'swing_aupr' in col]
auroc_cols = [col for col in df.columns if 'swing_auroc' in col]
norm_aupr = df[aupr_cols].sub(df['baseline_aupr'],axis=0)
norm_auroc = df[auroc_cols].sub(df['baseline_auroc'],axis=0)
norm_aupr.columns = ["norm_" + col for col in aupr_cols]
norm_auroc.columns = ["norm_" + col for col in auroc_cols]
final_df = df.join(norm_aupr).join(norm_auroc)
final_df.sort('cluster_id', inplace=True)
return(final_df)
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def plot_lag(df, idx):
# separate into two groups, between and within groups
within_clusters = df[df['parent_cluster'] == df['child_cluster']]
between_clusters = df[df['parent_cluster'] != df['child_cluster']]
plt.figure(1)
#within_clusters_values = within_clusters['lag_mean'].dropna().values
#between_clusters_values = between_clusters['lag_mean'].dropna().values
within_clusters_values = within_clusters['Lag'].dropna().values
between_clusters_values = between_clusters['Lag'].dropna().values
within_weights= np.ones_like(within_clusters_values)/len(within_clusters_values)
between_weights= np.ones_like(between_clusters_values)/len(between_clusters_values)
ax_clust = fig.add_subplot(10,1,idx)
bins = [0,0.5,1,1.5,2,2.5,3]
ax_clust.hist(within_clusters['Lag'].dropna().values, alpha = 0.5, label = 'Within', weights=within_weights, color = 'red')
ax_clust.hist(between_clusters['Lag'].dropna().values, alpha = 0.5, label = 'Between', weights=between_weights, color = 'blue')
plt.legend(shadow=True, fancybox=True)
CLUSTER=4
df_list = []
prev_df = None
iseq = True
directory ='/projects/p20519/roller_output/cluster_summaries/'
for filepath in os.listdir(directory):
if 'sc_cluster_summary_within_c'+str(CLUSTER)+'_' in filepath and 'swp' not in filepath:
try:
final_df = parse_summary(directory+filepath)
# each summary is the result of running the tdrollers on a certain cluster
# check if final df cluster ids is equal to prev df
df_list.append(final_df)
except ValueError:
continue
## I want to get a positive correlation of the norm auroc columns
exp =df_list[0]
parsed_df_list = []
for df in df_list:
if exp['cluster_id'].equals(df['cluster_id']):
parsed_df_list.append(df)
big_df = pd.concat(parsed_df_list)
mean_df=big_df.groupby(level=0).mean()
t_list = [0]
m_list = [10]
#t_list = [6,7]
#t_list = [0,1,2,3,4,5,6,7,8,9,10]
#m_list = [0]
#t_list = [6]
#m_list = [2]
param_list = []
fig = plt.figure(1,figsize=(20,20))
counter = 0
for m in m_list:
for t in t_list:
info,lag_df = pinf(lag_thresh=t, fill_na=False, median_thresh=m, CLUSTER=CLUSTER, img_append=str(t))
inf_col = info.columns.tolist()
temp_df = mean_df.merge(info, on='cluster_id')
plot_lag(lag_df, counter)
norm_cols = [col for col in mean_df.columns if 'norm' in col]
test_stat = 'percent_lagged_y'
norm_cor = temp_df.corr(method='spearman')[test_stat].loc[norm_cols]
print('norm_correlation :', norm_cor)
print('t,m ',t,m)
param_list.append((m,t))
temp_df.sort(test_stat, inplace=True)
#thresh = temp_df[test_stat].describe()[5]
thresh = 0.1
not_lagged = temp_df[temp_df[test_stat] < thresh]
lagged = temp_df[temp_df[test_stat] >= thresh]
diff = lagged[norm_cols].mean() - not_lagged[norm_cols].mean()
print('diff improvement: ', diff)
print('lagged not lagged',len(lagged), len(not_lagged))
print('threshold',thresh)
counter +=1
print(param_list)
info, lag_df = pinf(lag_thresh=t, fill_na=False, median_thresh=m, CLUSTER=CLUSTER)
inf_col = info.columns.tolist()
mean_df = mean_df.merge(info, on='cluster_id')
mean_df.to_csv('mean_cluster_summary_within_sc_c'+str(CLUSTER)+'.csv', sep = '\t', index = False)
mean_df.corr()
mean_df.corr().to_csv('mean_corr_cluster_summary_within_sc_c'+str(CLUSTER)+'.csv', sep = '\t', index = False)
plt.figure(1)
plt.savefig('SC_CLUSTER_'+str(CLUSTER)+'_hist_between_within.png')
pdb.set_trace()
```
#### File: scripts/figureS4/box_plot_large_network_comparison.py
```python
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
pd.set_option('display.width', 2000)
"""
Script that loads data from a dataframe and generates boxplots
"""
def read_tdr_results(folder_list):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if ".tsv" in file_path:
# print(file_path)
df = pd.read_csv(input_folder + file_path, sep=',|\t', engine='python')
# Correct weird inputs:
# Omranian data includes a weird "alpha" at the beginning which shifts and messes up data.
if 'alpha' in df.columns:
new_cols = df.columns[1:]
# Delete last column
del df[df.columns[-1]]
df.columns = new_cols
agg_df = agg_df.append(df)
return (agg_df)
def parse_tdr_results(agg_df, test_statistic, datasets):
label_list = []
auroc_list = []
## Analyze:
# nonuniform
# uniform
# for all networks 1 2 3 4 5
# parsing for windows = 7, windows = 4
for dataset in datasets:
current_df = agg_df[agg_df['file_path'].str.contains(dataset)]
RF = current_df[(current_df['td_window'] == 21)]
SWING_RF = current_df[(current_df['td_window'] == 15)]
comparisons = [RF, SWING_RF]
for category in comparisons:
auroc_list.append(category[test_statistic][0:n_trials].tolist())
label_list.append("Dionesus")
label_list.append("SWING Dionesus")
# label_list.append("Dionesus")
# label_list.append("SWING Dionesus")
return ((label_list, auroc_list))
output_path = "./"
input_folder_list = ["/Users/jfinkle/Downloads/Lasso/"]
test_statistic = ['aupr', 'auroc']
save_tag = "Dionesus_Yeast100_11-20"
n_trials = 100
datasets = ["Yeast100-" + str(index) + "_" for index in range(1, 21)]
# datasets = ['insilico_size10_1','insilico_size10_2','insilico_size10_3','insilico_size10_4','insilico_size10_5']
agg_df = read_tdr_results(input_folder_list)
with PdfPages(output_path + save_tag + '.pdf') as pdf:
for test in test_statistic:
label_list, auroc_list = parse_tdr_results(agg_df, test, datasets)
bp_data = auroc_list
bp = BoxPlot()
bp.plot_box(bp_data, label_list)
title = save_tag
bp.add_formatting(title, y_label=test.upper())
pdf.savefig(bp.f)
# auroc_1 = df['auroc'].values
# auroc_2 = df['auroc'].values
# bp_data = [auroc_1,auroc_2]
# bp = BoxPlot()
# bp.plot_box(bp_data, ['n_trees = 10', 'n_trees = 20'])
# bp.save_plot(output_path, save_tag)
# grouped.get_group((2,2)).mean()['aupr']
```
#### File: scripts/figureS5/get_missing_networks.py
```python
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
import time
def get_df(df, fp, min_lag, max_lag, td_window):
new_df = df[(df['file_path'] == fp) & (df['min_lag'] == min_lag) & (df['max_lag'] == max_lag) & (df['td_window'] == td_window)]
return(new_df)
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
# check if the columns are misaligned.
if type(df['permutation_n'].iloc[0]) is str:
new_col = df.columns.tolist()
new_col.pop(0)
new_df = df.iloc[:,0:len(df.iloc[0])-1]
new_df.columns = new_col
df=new_df
agg_df = agg_df.append(df)
return(agg_df)
#input_folder_list = ["/projects/p20519/roller_output/high_sampling/RandomForest/"]
input_folder_list = ["/projects/p20519/roller_output/gnw/Dionesus/"]
test_statistic = ['aupr', 'auroc']
save_tag = "window_scan"
n_trials = 100
start = time.time()
agg_df = read_tdr_results(input_folder_list, folder_str = "2017-09")
#agg_df.to_pickle("RF_window_scan.pkl")
end = time.time()
stat = 'aupr'
network_list = agg_df['file_path'].unique().tolist()
window_sizes = range(1,21)
outer_list = []
for td_window in window_sizes:
inner_list = []
for network in network_list:
baseline = get_df(agg_df, network, 0, 0, 21)
if len(baseline) == 0:
continue
baseline_mean=baseline[stat].mean()
if 21-td_window > 2:
max_lag = 3
else:
max_lag = 21-td_window
if (td_window == 21):
min_lag = 0
max_lag = 0
else:
min_lag = 1
comparisons = get_df(agg_df, network, min_lag, max_lag, td_window)
if len(comparisons) == 0:
continue
winstat = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
inner_list.append(winstat.iloc[0])
outer_list.append(inner_list)
pdb.set_trace()
```
#### File: scripts/old/analyze_promotion_plot.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('/home/jjw036/Roller/pipelines')
import Pipelines as pl
import numpy as np
import pandas as pd
from datetime import datetime
import time
from Swing.util.Evaluator import Evaluator
import pickle
import pdb
def parse_method(method_string, max_window):
"""
Parameters:
method_string is a str that contains abbreviated information about parameter settings
ie Dionesus-td10 indicates that dionesus will be run with a windowsize of 10
"""
min_lag = 1
max_lag = 3
td_window = 15
inf_method = method_string.split('-')[0]
misc = method_string.split('-')[1]
if "td" in misc:
td_window = int(misc.split('_')[1])
if td_window == max_window:
min_lag = 0
max_lag = 0
elif td_window + max_lag > max_window:
max_lag = max_window - td_window
elif "ml" in misc:
case = str(misc.split('_')[1])
min_lag = int(case[0])
max_lag = int(case[1])
if td_window > max_window:
td_window = 3
return(inf_method, td_window, min_lag, max_lag)
def parse_job_index(job_index):
## Job Indices:
## 1-20: ecoli 1 to 20, 10 node networks, Dionesus
## 21-40: yeast 1 to 20, 10 node networks, Dionesus
## 41-60: ecoli 1 to 20, 100 node networks, Dionesus
# 61-80: yeast 1 to 20, 100 node networks, Dionesus
## 81 omranian Dionesus
## 101-120: ecoli 1 to 20, 10 node networks, RF
## 121-140: yeast 1 to 20, 10 node networks, RF
## 141-160: ecoli 1 to 20, 100 node networks, RF
## 161-180: yeast 1 to 20, 100 node networks, RF
## 181 omranian RF
## 201-220: ecoli 1 to 20, 10 node networks, LASSO
## 221-240: yeast 1 to 20, 10 node networks, LASSO
## 241-260: ecoli 1 to 20, 100 node networks, LASSO
## 261-280: yeast 1 to 20, 100 node networks, LASSO
## 281 omranian lasso
## 301-320: ecoli 1 to 20, 10 node networks, community
## 321-340: yeast 1 to 20, 10 node networks, community
## 341-360: ecoli 1 to 20, 100 node networks, community
## 361-380: yeast 1 to 20, 100 node networks, community
## 381 omranian community
if job_index < 100:
inf_method = 'Dionesus'
elif job_index < 200:
inf_method = 'RandomForest'
elif job_index < 300:
inf_method = 'Lasso'
elif job_index < 400:
inf_method = 'community'
organism_index = job_index%100
if organism_index < 21:
organism = 'Ecoli'
elif organism_index < 41:
organism = 'Yeast'
elif organism_index < 61:
organism = 'Ecoli100'
elif organism_index < 81:
organism = 'Yeast100'
elif organism_index == 81:
organism = 'omranian'
elif organism_index >= 82:
organism = 'dream5'
network_index = organism_index%20
if network_index == 0:
network_index = 20
if organism is 'omranian':
file_path = "/home/jjw036/Roller/data/invitro/omranian_parsed_timeseries.tsv"
data_folder = "/projects/p20519/roller_output/ranks/%s/%s_" % (inf_method,organism)
elif organism is 'dream5':
file_path = "/home/jjw036/Roller/data/dream5/insilico_timeseries.tsv"
data_folder = "/projects/p20519/roller_ouput/ranks/%s/%s_" % (inf_method,organism)
else:
file_path = "/home/jjw036/Roller/data/gnw_insilico/network_data/%s/%s-%d_timeseries.tsv" % (organism,organism,network_index)
data_folder = "/projects/p20519/roller_output/ranks/%s/%s_%d_" % (inf_method,organism,network_index)
return(file_path, data_folder, inf_method)
def main(job,n_trials=1):
"""
Prints a series of text files with the parameters: denoted by ! and the resulting ranked list for each trial, denoted by a time-stamp
Parameters:
job - an int that corresponds to a method/network combination
** each job runs a series of methods for one network, each method produces its own rank file
"""
for i in range(n_trials):
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
default_params = {'data_folder':None, 'file_path':None, 'td_window':0,'min_lag':0,'max_lag':0,'n_trees':100,'permutation_n':5, 'lag_method':'mean_mean', 'calc_mse':False, 'bootstrap_n':50,'n_trials':n_trials, 'run_time':current_time, 'sort_by': 'rank','iterating_param':'promotion', 'filter_noisy':False, 'alpha': None, 'trial_time': 0, 'auroc': 0.0, 'aupr': 0.0}
run_params = default_params.copy()
file_path,data_folder, inf_method = parse_job_index(job)
run_params['file_path'] = file_path
run_params['data_folder'] = data_folder
methods_of_interest = ['-ml_00']
# 11 methods of interest. so it goes from 82 to 93
max_window = 21
if 'omranian' in file_path:
methods_of_interest = ['-td_6','-td_5','-td_4', '-ml_01', '-ml_11', '-ml_02','-ml_12', '-ml_22', '-ml_23', '-ml_13']
max_window = 6
elif 'dream5' in file_path:
methods_of_interest = methods_of_interest[(job-82)%100]
method_strings = [inf_method + x for x in methods_of_interest]
for method_string in method_strings:
#update current_time
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
run_params['run_time'] = current_time
trial_start = time.time()
inf_method, td_window, min_lag, max_lag = parse_method(method_string, max_window)
run_params['td_window'] = td_window
run_params['min_lag'] = min_lag
run_params['max_lag'] = max_lag
print(run_params)
if 'community' in data_folder:
roc,pr, tdr, rank_table = pl.get_td_community(**run_params)
else:
roc, pr, tdr = pl.get_td_stats_custom(**run_params)
run_params['auroc']=roc
run_params['aupr']=pr
trial_end = time.time()
run_params['trial_time'] = trial_end-trial_start
if 'community' in data_folder:
result_table = rank_table
else:
result_table = tdr.make_sort_df(tdr.edge_dict, sort_by = 'rank')
result_table['rank_importance'] = np.arange(len(result_table))
output_file = data_folder+current_time+".csv"
with open(output_file,'a') as output:
for key, value in run_params.items():
output.write('!%s,%s\n' % (key, value))
result_table.to_csv(output, header=True, index=False, sep='\t')
if __name__ == '__main__':
job_index = int(sys.argv[1])
if len(sys.argv) >= 3:
n_trials = int(sys.argv[2])
else:
n_trials = 1
main(job_index, n_trials)
```
#### File: scripts/timeseries_sampling/get_missing_networks.py
```python
import pandas as pd
import pdb
import os
import time
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
try:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
except pd.io.common.EmptyDataError:
continue
agg_df = agg_df.append(df)
return(agg_df)
output_path = "/home/jjw036/"
input_folder_list = ["/projects/p20519/roller_output/high_sampling/Lasso/"]
#input_folder_list = ["/projects/p20519/roller_output/gnw/RandomForest/", "/projects/p20519/roller_output/gnw/Lasso/", "/projects/p20519/roller_output/gnw/Dionesus/"]
test_statistic = ['aupr', 'auroc']
save_tag = "sampling_comparison"
n_trials = 100
#datasets = ["_"]
#datasets = ['insilico_size10_1','insilico_size10_2','insilico_size10_3','insilico_size10_4','insilico_size10_5']
start = time.time()
agg_df = read_tdr_results(input_folder_list, folder_str = "2017-09")
result_counts = agg_df['file_path'].dropna().value_counts()
completed_files = result_counts[result_counts > 99].index.tolist()
job_file = pd.read_csv('/home/jjw036/Roller/pipelines/job_params_high_sampling.txt', sep = ' ', header=None)
job_file.columns = ['data_path','data_path', 'input_file', 'iterating_param', 'iterating_style']
mask = job_file['input_file'].str.contains('|'.join(completed_files))
new_jobs = job_file[~mask]
n_jobs = len(new_jobs)
new_jobs.to_csv('/home/jjw036/Roller/pipelines/job_params_high_sampling_missing.txt', sep = ' ', header=None, index=False)
print("Added {} missing jobs".format(n_jobs))
```
#### File: Roller/Swing/DionesusWindow.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from scipy import stats
from sklearn.cross_decomposition import PLSRegression
import numpy as np
from sklearn.metrics import mean_squared_error
import pandas as pd
import sys
import pdb
from sklearn.decomposition import PCA
from .Window import Window
from .util import utility_module as utility
from .util.pls_nipals import vipp
class DionesusWindow(Window):
"""
A window that runs Dionesus as the network inference algorithm. The PLSR function is from sci-kit learn for
implementation consistency between window types
For more information about Dionesus see:
Ciaccio, <NAME>., et al. "The DIONESUS algorithm provides scalable and accurate reconstruction of dynamic
phosphoproteomic networks to reveal new drug targets." Integrative Biology (2015).
"""
def __init__(self, dataframe, window_info, roller_data, td_window, explanatory_dict, response_dict):
super(DionesusWindow, self).__init__(dataframe, window_info, roller_data, td_window, explanatory_dict,
response_dict)
self.num_pcs = None
self.beta_coefficients = None
self.vip = None
self.cv_table = None
self.bootstrap_matrix = None
self.freq_matrix = None
self.edge_stability_auc = None
def make_edge_table(self, calc_mse=False):
"""
:return:
Called by:
Swing.rank_edges()
"""
# Build indexing method for all possible edges. Length = number of parents * number of children
parent_index = range(self.beta_coefficients.shape[1])
child_index = range(self.beta_coefficients.shape[0])
a, b = np.meshgrid(parent_index, child_index)
# Flatten arrays to be used in link list creation
df = pd.DataFrame()
df['Parent'] = self.beta_coefficients.columns.values[a.flatten()]
df['Child'] = self.beta_coefficients.index.values[b.flatten()]
df['Importance'] = self.vip.values.flatten()
df['Beta'] = self.beta_coefficients.values.flatten()
df['P_window'] = self.explanatory_window[a.flatten()]
# Calculate the window of the child node, which is equivalent to the current window index
child_values = np.array([self.nth_window] * self.beta_coefficients.shape[0])
df['C_window'] = child_values[b.flatten()]
if self.permutation_p_values is not None:
df["p_value"] = self.permutation_p_values.flatten()
# Remove any self edges
df = df[~((df['Parent'] == df['Child']) & (df['P_window'] == df['C_window']))]
if calc_mse:
df['MSE_diff'] = self.edge_mse_diff.flatten()
return df
def sort_edges(self, method="importance"):
if self.results_table is None:
raise ValueError("The edge table must be created before getting edges")
if method == "p_value":
self.results_table.sort(columns=['p_value', 'importance'], ascending=[True, False], inplace=True)
elif method == "importance":
self.results_table.sort(columns=['importance', 'p_value'], ascending=[False, True], inplace=True)
return self.results_table['regulator-target'].values
def generate_results_table(self):
# generate edges for initial model
initial_edges = self.create_linked_list(self.beta_coefficients, 'B')
# permutation edges
permutation_mean_edges = self.create_linked_list(self.permutation_means, 'p-means')
permutation_sd_edges = self.create_linked_list(self.permutation_sd, 'p-sd')
stability_edges = self.create_linked_list(self.edge_stability_auc, 'stability')
aggregated_edges = initial_edges.merge(permutation_mean_edges, on='regulator-target').merge(
permutation_sd_edges, on='regulator-target').merge(stability_edges, on='regulator-target')
# sorry, it is a little messy to do the p-value calculations for permutation tests here...
# valid_indices = aggregated_edges['p-sd'] != 0
# valid_indices = aggregated_edges['B'] != 0
valid_window = aggregated_edges
initial_B = valid_window['B']
sd = valid_window['p-sd']
mean = valid_window['p-means']
valid_window['final-z-scores-perm'] = (initial_B - mean) / sd
valid_window['cdf-perm'] = (-1 * abs(valid_window['final-z-scores-perm'])).apply(stats.norm.cdf)
# calculate t-tailed pvalue
valid_window['p-value-perm'] = (2 * valid_window['cdf-perm'])
self.results_table = valid_window
return (self.results_table)
def rank_results(self, rank_by, ascending=False):
rank_column_name = rank_by + "-rank"
# rank edges with an actual beta value first until further notice ##
valid_indices = self.results_table['B'] != 0
valid_window = self.results_table[valid_indices]
valid_window[rank_column_name] = valid_window[rank_by].rank(method="dense", ascending=ascending)
edge_n = len(valid_window.index)
invalid_indices = self.results_table['B'] == 0
invalid_window = self.results_table[invalid_indices]
invalid_window[rank_column_name] = invalid_window[rank_by].rank(method="dense", ascending=ascending)
invalid_window[rank_column_name] += edge_n
self.results_table = valid_window.append(invalid_window)
self.results_table = self.results_table.sort(columns=rank_column_name, axis=0)
return (self.results_table)
def run_permutation_test(self, n_permutations=1000, crag=False):
# initialize permutation results array
self.permutation_means = np.empty((self.n_genes, self.n_genes))
self.permutation_sd = np.empty((self.n_genes, self.n_genes))
zeros = np.zeros(self.beta_coefficients.shape)
# initialize running calculation
result = {'n': zeros.copy(), 'mean': zeros.copy(), 'ss': zeros.copy()}
# inner loop: permute the window N number of times
for nth_perm in range(0, n_permutations):
# if (nth_perm % 200 == 0):
# print 'Perm Run: ' +str(nth_perm)
# permute data
permuted_data = self.permute_data(self.explanatory_data)
# fit the data and get coefficients
result_tuple = self.get_coeffs(x_data=permuted_data)
permuted_coeffs = result_tuple[0]
permuted_vip = result_tuple[1]
dummy_list = [permuted_coeffs]
result = self.update_variance_2D(result, dummy_list)
self.permutation_means = result['mean'].copy()
self.permutation_sd = np.sqrt(result['variance'].copy())
self.permutation_p_values = self.calc_p_value()
def calc_p_value(self, value=None, mean=None, sd=None):
if value is None:
value = self.beta_coefficients.copy()
if mean is None:
mean = self.permutation_means.copy()
if sd is None:
sd = self.permutation_sd.copy()
z_scores = (value - mean) / sd
cdf = stats.norm.cdf((-1 * abs(z_scores)))
p_values = 2 * cdf
return p_values
def initialize_params(self):
"""
Optimize the number of PCs to use.
:return:
"""
# calculate the Q2 score using PC=1,2,3,4,5
# pick the PCs that maximizes the Q2 score-PCs tradeoff, using the elbow rule, maximizing the second derivative or maximum curvature.
temp = self.remove_stationary_ts
self.remove_stationary_ts = False
result_tuple = self.get_coeffs(crag=False, calc_mse=False)
self.remove_stationary_ts = temp
mse_diff = result_tuple[2]
model_list = result_tuple[3]
model_inputs = result_tuple[4]
explained_variances = None
size_test = []
for response, explanatory, index in model_inputs:
size_test.append(explanatory.shape)
min_dim=sorted(size_test,key=lambda x: x[1], reverse=False)[0][1]
for response, explanatory, index in model_inputs:
pca = PCA()
pca.fit(explanatory)
if explained_variances is None:
explained_variances = pca.explained_variance_ratio_
else:
try:
explained_variances = np.vstack((explained_variances, pca.explained_variance_ratio_))
except ValueError:
try:
explained_variances = np.vstack((explained_variances[:,:min_dim], pca.explained_variance_ratio_[:min_dim]))
except IndexError:
truncated_index = min_dim
explained_variances = np.vstack((explained_variances[:truncated_index], pca.explained_variance_ratio_[:truncated_index]))
explained_variances_mean = np.mean(explained_variances, axis = 0)
test_pcs = [x for x in range(1, len(explained_variances_mean)+1)]
elbow_x, elbow_y = utility.elbow_criteria(test_pcs, explained_variances_mean)
self.num_pcs = elbow_x
def fit_window(self, pcs=3, crag=False, calc_mse=False):
"""
Set the attributes of the window using expected pipeline procedure and calculate beta values
:return:
"""
if self.num_pcs is not None:
pcs = self.num_pcs
result_tuple = self.get_coeffs(pcs, crag = crag, calc_mse = calc_mse)
self.beta_coefficients = result_tuple[0]
self.vip = result_tuple[1]
self.edge_mse_diff = result_tuple[2]
self.model_list = result_tuple[3]
def _fitstack_coeffs(self, n_pcs, coeff_matrix, vip_matrix, model_list, x_matrix, target_y, col_index, crag=False):
"""
:param n_pcs:
:param coeff_matrix:
:param vip_matrix:
:param model_list:
:param x_matrix:
:param target_y:
:param col_index:
:param crag:
:return:
"""
pls = PLSRegression(n_pcs, False)
# Fit the model
pls.fit(x_matrix, target_y)
model_params = {'col_index': col_index,
'response': target_y,
'predictor': x_matrix,
'model': pls}
model_list.append(model_params)
# artificially add a 0 to where the col_index is to prevent self-edges
coeffs = pls.coef_
coeffs = np.reshape(coeffs, (len(coeffs),))
vips = vipp(x_matrix, target_y, pls.x_scores_, pls.x_weights_)
vips = np.reshape(vips, (len(vips),))
if coeff_matrix.shape[1] - len(coeffs) == 1:
coeffs = np.insert(coeffs, col_index, 0)
vips = np.insert(vips, col_index, 0)
coeff_matrix = np.vstack((coeff_matrix, coeffs))
vip_matrix = np.vstack((vip_matrix, vips))
# scoping issues
if crag:
training_scores, test_scores = self.crag_window(model_params)
self.training_scores.append(training_scores)
self.test_scores.append(test_scores)
return coeff_matrix, vip_matrix, model_list
def get_coeffs(self, num_pcs=2, x_data=None, y_data=None, crag=False, calc_mse=False):
"""
:param x_data:
:param n_trees:
:return: array-like
An array in which the rows are children and the columns are the parents
"""
# initialize items
if y_data is None:
y_data = self.response_data
if x_data is None:
x_data = self.explanatory_data
coeff_matrix, model_list, model_inputs = self._initialize_coeffs(data = x_data, y_data = y_data, x_labels = self.explanatory_labels, y_labels = self.response_labels, x_window = self.explanatory_window, nth_window = self.nth_window)
vip_matrix = coeff_matrix.copy()
mse_matrix = None
# Calculate a model for each target column
for target_y, x_matrix, insert_index in model_inputs:
coeff_matrix, vip_matrix, model_list = self._fitstack_coeffs(num_pcs, coeff_matrix, vip_matrix, model_list,
x_matrix, target_y, insert_index, crag=crag)
if calc_mse:
base_mse = mean_squared_error(model_list[insert_index]['model'].predict(x_matrix), target_y)
f_coeff_matrix, f_model_list, f_model_inputs = self._initialize_coeffs(data=x_matrix, y_data=y_data, x_labels=self.explanatory_labels, y_labels = self.response_labels, x_window = self.explanatory_window, nth_window = self.nth_window)
f_vip_matrix = f_coeff_matrix.copy()
mse_list = []
for idx in range(x_matrix.shape[1]):
adj_x_matrix = np.delete(x_matrix, idx, axis=1)
f_coeff_matrix, f_vip_matrix, f_model_list = self._fitstack_coeffs(num_pcs, f_coeff_matrix,
f_vip_matrix, f_model_list,
adj_x_matrix, target_y,
idx, crag)
mse_diff = base_mse - mean_squared_error(f_model_list[idx]['model'].predict(adj_x_matrix), target_y)
mse_list.append(mse_diff)
if mse_matrix is None:
mse_matrix = np.array(mse_list)
else:
mse_matrix = np.vstack((mse_matrix, np.array(mse_list)))
coeff_dataframe = pd.DataFrame(coeff_matrix, index=self.response_labels, columns=self.explanatory_labels)
coeff_dataframe.index.name = 'Child'
coeff_dataframe.columns.name = 'Parent'
importance_dataframe = pd.DataFrame(vip_matrix, index=self.response_labels, columns=self.explanatory_labels)
importance_dataframe.index.name = 'Child'
importance_dataframe.columns.name = 'Parent'
return coeff_dataframe, importance_dataframe, mse_matrix, model_list, model_inputs
```
#### File: Roller/Swing/Swing.py
```python
import random
import sys
import pandas as pd
import numpy as np
import warnings
from scipy import stats
from .Window import Window
from .RFRWindow import RandomForestRegressionWindow
from .DionesusWindow import DionesusWindow
from .LassoWindow import LassoWindow
from .util import utility_module as utility
from .util.Evaluator import Evaluator
import pdb
class Swing(object):
"""
A thing that grabs different timepoints of data, can set window and step size.
"""
def __init__(self, file_path, gene_start=None, gene_end=None, time_label="Time", separator="\t",
window_type="RandomForest", step_size=1, min_lag=0, max_lag=0, window_width=3, sub_dict = None):
"""
Initialize the roller object. Read the file and put it into a pandas dataframe
:param file_path: string
File to read
:param gene_start: int
:param gene_end: int
:param time_label: str
:param separator: str
:param window_type: str
:param step_size: int
:param min_lag: int
:param max_lag: int or None
:param window_width: int
:return:
"""
# Read the raw data into a pandas dataframe object
self.raw_data = pd.read_csv(file_path, sep=separator)
self.raw_data = self.raw_data.dropna(axis=0, how='all')
if sub_dict is not None:
valid_genes = sub_dict['genes']
new_cols = [time_label] + list(valid_genes)
self.raw_data = self.raw_data[new_cols]
self.file_path = file_path
self.window_type = window_type
# Set SWING defaults
self.current_step = 0
self.window_width = window_width
self.step_size = step_size
self.time_label = time_label
self.crag = False
self.calc_mse = False
self.alpha = None
self.tf_list = None
# Get overall width of the time-course
self.time_vec = self.raw_data[self.time_label].unique()
self.overall_width = len(self.time_vec)
# Set lag defaults
self.min_lag = min_lag
self.max_lag = max_lag
self.check_lags()
if gene_end is not None:
self.gene_end = gene_end
else:
self.gene_end = len(self.raw_data.columns)
if gene_start is not None:
self.gene_start = gene_start
else:
self.gene_start = 0
self.gene_list = self.raw_data.columns.values[self.gene_start:self.gene_end]
self.window_list = []
# assign norm data for window creation.
# by default, norm_data is raw_data and is later modified by other functions.
self.norm_data = self.raw_data
self.full_edge_list = None
self.edge_dict = None
self.lag_set = None
def get_n_windows(self):
"""
Calculate the number of windows
Called by:
create_windows
get_window_stats
:return: int
"""
total_windows = int((self.overall_width - self.window_width + 1.0) / self.step_size)
return(int(total_windows))
def filter_noisy(self):
for window in self.window_list:
window.remove_stationary_ts = True
def get_window_raw(self, start_index, random_time=False):
"""
Select a window from the full data set. This is fancy data-frame slicing
Called by:
create_windows
get_window_stats
get_window
:param start_index: int
The start of the window
:param random_time: bool, optional
:return: data-frame
"""
if random_time:
# select three random timepoints
time_window = self.time_vec[start_index]
choices = self.time_vec
choices = np.delete(choices, start_index)
for x in range(0, self.window_width - 1):
chosen_time = random.choice(choices)
time_window = np.append(time_window, chosen_time)
chosen_index = np.where(choices == chosen_time)
choices = np.delete(choices, chosen_index)
else:
end_index = start_index + self.window_width
time_window = self.time_vec[start_index:end_index]
data = self.norm_data[self.norm_data[self.time_label].isin(time_window)]
return data
def set_window(self, width):
"""
Set the window width
Called by:
pipeline
:param width: int
:return:
"""
self.window_width = width
def set_step(self, step):
"""
Set the window step size
Called by:
:param step:
:return:
"""
self.step_size = step
# need to do something about this method. keep for now, but currently need a "preprocess" method.
def remove_blank_rows(self):
"""
Removes a row if the sum of that row is NaN
Called by:
:return:
"""
"""calculates sum of rows. if sum is NAN, then remove row"""
coln = len(self.raw_data.columns)
sums = [self.raw_data.iloc[:, x].sum() for x in range(0, coln)]
ind = np.where(np.isnan(sums))[0]
self.raw_data.iloc[:, ind] = 0
def get_n_genes(self):
"""
Calculate the number of genes in the data set
Called by:
:return:
"""
return len(self.raw_data.columns) - 1
def set_min_lag(self, min_lag):
"""
Set the minimum lag for the roller
:param min_lag:
:return:
"""
self.min_lag = min_lag
def set_max_lag(self, max_lag):
"""
Set the minimum lag for the roller
:param min_lag:
:return:
"""
self.max_lag = max_lag
def create_windows(self, random_time=False):
"""
Create window objects for the roller to use
Called by:
pipeline
:return:
"""
# Initialize empty lists
window_list = []
# Check to make sure lags are valid if parameters have been changed
self.check_lags()
# If min_lag is 0 and max_lag is 0 then you don't need a tdWindow
if self.min_lag == 0 and self.max_lag == 0:
td_window = False
else:
td_window = True
# Generate possible windows using specified SWING parameters
for index in range(0, self.get_n_windows()):
# Confirm that the window will not be out of bounds
if (index + self.window_width) > self.overall_width:
raise Exception('Window created that is out of bounds based on parameters')
explanatory_indices = utility.get_explanatory_indices(index, min_lag=self.min_lag, max_lag=self.max_lag)
raw_window = self.get_window_raw(index, random_time)
if explanatory_indices is not None:
explanatory_dict, response_dict = self.get_window_data(index, explanatory_indices)
window_info = {"time_label": self.time_label, "gene_start": self.gene_start, "gene_end": self.gene_end,
"nth_window": index}
window_object = self.get_window_object(raw_window, window_info, td_window, explanatory_dict,
response_dict)
window_list.append(window_object)
self.window_list = window_list
def create_custom_windows(self, tf_list,random_time=False):
"""
Create window objects for the roller to use, with set explanatory variables (such as TFs)
Called by:
pipeline
:return:
"""
#tf_list = ['CBF1','SWI5','ASH1', 'GAL4', 'GAL80']
#tf_list = ['G1','G2','G3','G4','G5','G6','G7','G8','G9','G10']
# Initialize empty lists
window_list = []
self.tf_list=tf_list
# Check to make sure lags are valid if parameters have been changed
self.check_lags()
# If min_lag is 0 and max_lag is 0 then you don't need a tdWindow
if self.min_lag == 0 and self.max_lag == 0:
td_window = False
else:
td_window = True
# Generate possible windows using specified SWING parameters
for index in range(0, self.get_n_windows()):
# Confirm that the window will not be out of bounds
if (index + self.window_width) > self.overall_width:
raise Exception('Window created that is out of bounds based on parameters')
explanatory_indices = utility.get_explanatory_indices(index, min_lag=self.min_lag, max_lag=self.max_lag)
raw_window = self.get_window_raw(index, random_time)
if explanatory_indices is not None:
explanatory_dict, response_dict = self.get_window_data(index, explanatory_indices)
#remove information from explanatory window
to_remove = list(set(explanatory_dict['explanatory_labels'])-set(tf_list))
for removed_tf in to_remove:
#remove from explanatory_labels
removed_index = np.where(explanatory_dict['explanatory_labels'] == removed_tf)[0][0]
explanatory_dict['explanatory_labels'] = np.delete(explanatory_dict['explanatory_labels'], removed_index)
#explanatory_window
explanatory_dict['explanatory_window'] = np.delete(explanatory_dict['explanatory_window'], removed_index)
#explanatory_data
explanatory_dict['explanatory_data'] = np.delete(explanatory_dict['explanatory_data'],removed_index,axis=1)
# not explanatory_times
window_info = {"time_label": self.time_label, "gene_start": self.gene_start, "gene_end": self.gene_end,
"nth_window": index}
window_object = self.get_window_object(raw_window, window_info, td_window, explanatory_dict,
response_dict)
window_list.append(window_object)
self.window_list = window_list
def check_lags(self):
"""
Make sure the user specified lags meet necessary criteria
:return:
"""
if self.min_lag > self.max_lag and self.max_lag is not None:
raise ValueError('The minimum lag {} cannot be greater than the maximum lag {}'.format(self.min_lag, self.max_lag))
if self.min_lag < 0:
raise ValueError('The minimum lag {} cannot be negative'.format(self.min_lag))
if self.min_lag > self.get_n_windows():
raise ValueError('The minimum lag {} cannot be greater than the number of windows {}'.format(self.min_lag, self.get_n_windows()))
if self.max_lag >= self.get_n_windows():
raise ValueError('The maximum lag {} cannot be greater than or equal to the number of windows {}'.format(self.max_lag, self.get_n_windows()))
def strip_dataframe(self, dataframe):
"""
Split dataframe object components into relevant numpy arrays
:param dataframe:
:return:
"""
df = dataframe.copy()
df_times = df[self.time_label].values
df.drop(self.time_label, axis=1, inplace=True)
data = df.values
labels = df.columns.values
return df_times, data, labels
def get_window_data(self, index, explanatory_indices):
"""
Get the appropriate data for the window
:param index:
:param explanatory_indices:
:return:
"""
# Get the data for the current window
response_df = self.get_window_raw(index)
response_times, response_data, response_labels = self.strip_dataframe(response_df)
response_window = np.array([index]*len(response_labels))
response_dict = {'response_times': response_times, 'response_data': response_data,
'response_labels': response_labels, 'response_window': response_window}
explanatory_times, explanatory_data, explanatory_labels, explanatory_window = None, None, None, None
# Get the data for each lagged window
for ii, idx in enumerate(explanatory_indices):
current_df = self.get_window_raw(idx)
current_times, current_data, current_labels = self.strip_dataframe(current_df)
current_window = np.array([idx]*len(current_labels))
if ii == 0:
# Initialize values
explanatory_times = current_times.copy()
explanatory_data = current_data.copy()
explanatory_labels = current_labels.copy()
explanatory_window = current_window.copy()
else:
# concatenate relevant windows horizontally.
explanatory_data = np.hstack((explanatory_data, current_data))
explanatory_times = np.append(explanatory_times, current_times)
explanatory_labels = np.append(explanatory_labels, current_labels)
explanatory_window = np.append(explanatory_window, current_window)
explanatory_dict = {'explanatory_times': explanatory_times, 'explanatory_data': explanatory_data,
'explanatory_labels': explanatory_labels, 'explanatory_window': explanatory_window}
return explanatory_dict, response_dict
def get_window_object(self, dataframe, window_info_dict, td_window, explanatory_dict, response_dict):
"""
Return a window object from a data-frame
Called by:
create_windows
:param dataframe: data-frame
:param window_info_dict: dict
Dictionary containing information needed for window initialization
:return:
"""
window_obj = None
if self.window_type == "Lasso":
window_obj = LassoWindow(dataframe, window_info_dict, self.norm_data, td_window, explanatory_dict,
response_dict)
elif self.window_type == "RandomForest":
window_obj = RandomForestRegressionWindow(dataframe, window_info_dict, self.norm_data, td_window,
explanatory_dict, response_dict)
elif self.window_type == "Dionesus":
window_obj = DionesusWindow(dataframe, window_info_dict, self.norm_data, td_window, explanatory_dict,
response_dict)
return window_obj
def initialize_windows(self):
"""
deprecated - Initialize window parameters and do a preliminary fit
Called by:
Currently only called by unittest Swing/unittests/test_roller.py
todo: delete
:return:
"""
for window in self.window_list:
window.initialize_params()
window.fit_window(crag=self.crag)
def rank_windows(self, n_permutes=10, n_bootstraps=10, n_alphas=20, noise=0.2):
"""
Run tests to score and rank windows
Called by:
:param n_permutes: int, optional
Number of permutes to run. Default is 1,000
:param n_bootstraps: int, optional
Number of bootstraps to run. Default is 1,000
:param n_alphas: int, optional
Number of alpha values to test if using Lasso. Default is 20
:param noise: float ([0,1]), optional
The amount of noise to add to bootstrapped windows. Default is 0.2
:return:
"""
for window in self.window_list:
window.run_permutation_test(n_permutes, crag=False)
window.run_bootstrap(n_bootstraps, n_alphas, noise)
window.make_edge_table()
def optimize_params(self):
"""
Optimize window fit parameters
Called by:
pipeline
:return:
"""
if self.window_type is "Lasso":
for window in self.window_list:
window.initialize_params(alpha=self.alpha)
else:
for window in self.window_list:
window.initialize_params()
return self.window_list
def fit_windows(self, pcs=None, alpha=None, n_trees=None, n_jobs=None, show_progress=True):
#todo: need a better way to pass parameters to fit functions
"""
Fit each window in the list
Called by:
pipeline
:param alpha:
:param n_trees:
:return:
"""
for window in self.window_list:
if self.window_type == "Lasso":
if alpha is not None:
window.alpha = alpha
if self.window_type == "RandomForest":
if n_trees is not None:
window.n_trees = n_trees
if n_jobs is not None:
window.n_jobs = n_jobs
if self.window_type == "Dionesus":
if pcs is not None:
window.num_pcs = pcs
if show_progress:
if window.td_window:
print("Fitting window index %i against the following window indices: ")
else:
print("Fitting window {} of {}".format(window.nth_window, self.get_n_windows()))
window.fit_window(crag=self.crag, calc_mse=self.calc_mse)
return self.window_list
def rank_edges(self, n_bootstraps=1000, permutation_n=1000):
"""
Run tests to rank edges in windows
Called by:
pipeline
:param n_bootstraps:
:param permutation_n:
:return:
"""
if self.window_type == "Dionesus":
for window in self.window_list:
#window.run_permutation_test(n_permutations=permutation_n, crag=False)
window.make_edge_table()
if self.window_type == "Lasso":
for window in self.window_list:
window.run_permutation_test(n_permutations=permutation_n, crag=False)
print("Running bootstrap...")
window.run_bootstrap(n_bootstraps=n_bootstraps)
window.make_edge_table()
if self.window_type == "RandomForest":
for window in self.window_list:
#print("Running permutation on window {}...".format(window.nth_window))
#window.run_permutation_test(n_permutations=permutation_n, crag=False)
window.make_edge_table(calc_mse=self.calc_mse)
return self.window_list
def average_rank(self, rank_by, ascending):
"""
Average window edge ranks
Called by:
pipeline
:param rank_by: string
The parameter to rank edges by
:param ascending: Bool
:return:
"""
if self.window_type == "Lasso":
ranked_result_list = []
for window in self.window_list:
ranked_result = window.rank_results(rank_by, ascending)
ranked_result_list.append(ranked_result)
if self.window_type == "RandomForest":
ranked_result_list = []
for window in self.window_list:
ranked_result = window.sort_edges(rank_by)
ranked_result_list.append(ranked_result)
aggr_ranks = utility.average_rank(ranked_result_list, rank_by + "-rank")
# sort tables by mean rank in ascending order
mean_sorted_edge_list = aggr_ranks.sort(columns="mean-rank", axis=0)
self.averaged_ranks = mean_sorted_edge_list
return self.averaged_ranks
def zscore_all_data(self):
#todo: this should not replace raw_data, a new feature should be made
#todo: scipy.stats.zscore can be used with the correct parameters for 1 line
"""
Zscore the data in a data-frame
Called by:
pipeline
:return: z-scored dataframe
"""
# zscores all the data
raw_dataset = self.raw_data.values.copy()
zscored_dataset = pd.DataFrame(stats.zscore(raw_dataset, axis=0, ddof=1), index=self.raw_data.index, columns=self.raw_data.columns)
zscored_dataset[self.time_label] = self.raw_data[self.time_label]
self.norm_data = zscored_dataset
return(zscored_dataset)
def get_window_stats(self):
"""
Generate a dictionary of relevant information from a window
N : the number of data points in this window,
time_labels: the names of the time points in a roller model
step_size: the step-size of the current model
window_size: the size of the window of the current model
total_windows: the number of windows total
window_index: the index of the window. counts start at 0. ie if the window index is 0 it is the 1st window.
If the window index is 12, it is the 12th window in the series.
Called by:
:return: dict
"""
"""for each window, get a dict:
N : the number of datapoints in this window,
time_labels: the names of the timepoints in a roller model
step_size: the step-size of the current model
window_size: the size of the window of the current model
total_windows: the number of windows total
window_index: the index of the window. counts start at 0. ie if the window index is 0 it is the 1st window. if the window index is 12, it is the 12th window in the series."""
current_window = self.get_window_raw()
"""calculate the window index. todo: move into own function later"""
min_time = np.amin(current_window[self.time_label])
window_index = np.where(self.time_vec == min_time) / self.step_size
# to calculate the nth window, time vector
# index of the time-vector, step size of 2? window 4, step size 2
#
# total windows = total width (10) - window_width (2) +1 / step size
# 10 time points 0 1 2 3 4 5 6 7 8 9
# width is 2: 0 and 1
# step size is 2
# 01, 12, 23, 34, 45, 56, 67, 78, 89
# todo: so the issue is that total windows (get n windows) is the true number of windows, and window index is the nth -1 window... it would be great to consolidate these concepts but no big deal if they can't be.
window_stats = {'N': len(current_window.index),
'time_labels': current_window[self.time_label].unique(),
'step_size': self.step_size,
'window_size': self.window_width,
'total_windows': self.get_n_windows(),
'window_index': window_index}
return window_stats
def compile_roller_edges(self, self_edges=False):
"""
Edges across all windows will be compiled into a single edge list
:return:
"""
print("Compiling all model edges...", end='')
df = None
for ww, window in enumerate(self.window_list):
# Get the edges and associated values in table form
current_df = window.make_edge_table(calc_mse=self.calc_mse)
# Only retain edges if the MSE_diff is negative
if self.calc_mse:
current_df = current_df[current_df['MSE_diff'] < 0]
current_df['adj_imp'] = np.abs(current_df['Importance'])
#current_df['adj_imp'] = np.abs(current_df['Importance'])*(1-current_df['p_value'])
if self.window_type is "Dionesus":
current_df['adj_imp'] = np.abs(current_df['Importance'])
elif self.window_type is "Lasso":
current_df['adj_imp'] = np.abs(current_df['Stability'])
current_df.sort(['adj_imp'], ascending=False, inplace=True)
#current_df.sort(['Importance'], ascending=False, inplace=True)
current_df['Rank'] = np.arange(0, len(current_df))
if df is None:
df = current_df.copy()
else:
df = df.append(current_df.copy(), ignore_index=True)
if not self_edges:
df = df[df.Parent != df.Child]
df['Edge'] = list(zip(df.Parent, df.Child))
df['Lag'] = df.C_window - df.P_window
self.full_edge_list = df.copy()
print("[DONE]")
return
def compile_roller_edges2(self, self_edges=False):
"""
Edges across all windows will be compiled into a single edge list
:return:
"""
print("Compiling all model edges...")
df = None
for ww, window in enumerate(self.window_list):
# Get the edges and associated values in table form
current_df = window.make_edge_table(calc_mse=self.calc_mse)
# Only retain edges if the MSE_diff is negative
if self.calc_mse:
current_df = current_df[current_df['MSE_diff'] < 0]
current_df['adj_imp'] = np.abs(current_df['Importance'])*(1-current_df['p_value'])
#change
if ww == 8:
current_df['adj_imp'] = np.abs(current_df['Importance'])*(1-current_df['p_value'])*2
if self.window_type is "Dionesus":
current_df['adj_imp'] = np.abs(current_df['Importance'])
elif self.window_type is "Lasso":
current_df['adj_imp'] = np.abs(current_df['Stability'])
current_df.sort(['adj_imp'], ascending=False, inplace=True)
#current_df.sort(['Importance'], ascending=False, inplace=True)
current_df['Rank'] = np.arange(0, len(current_df))
if df is None:
df = current_df.copy()
else:
df = df.append(current_df.copy(), ignore_index=True)
if not self_edges:
df = df[df.Parent != df.Child]
df['Edge'] = list(zip(df.Parent, df.Child))
df['Lag'] = df.C_window - df.P_window
self.full_edge_list = df.copy()
print("[DONE]")
return
def make_static_edge_dict(self, true_edges, self_edges=False, lag_method='max_median'):
"""
Make a dictionary of edges
:return:
"""
print("Lumping edges...", end='')
df = self.full_edge_list.copy()
# Only keep edges with importance > 0. Values below 0 are not helpful for model building
df = df[df['Importance'] > 0]
# Ignore self edges if desired
if not self_edges:
df = df[df.Parent != df.Child]
edge_set = set(df.Edge)
# Calculate the full set of potential edges with TF list if it is provided.
if self.tf_list is not None:
full_edge_set = set(utility.make_possible_edge_list(np.array(self.tf_list), self.gene_list, self_edges=self_edges))
else:
full_edge_set = set(utility.make_possible_edge_list(self.gene_list, self.gene_list, self_edges=self_edges))
# Identify edges that could exist, but do not appear in the inferred list
edge_diff = full_edge_set.difference(edge_set)
self.edge_dict = {}
lag_importance_score, lag_lump_method = lag_method.split('_')
score_method = eval('np.'+lag_importance_score)
lump_method = eval('np.'+lag_lump_method)
for idx,edge in enumerate(full_edge_set):
if idx%1000 ==0:
print(str(idx)+" out of "+ str(len(full_edge_set)), end='')
if edge in edge_diff:
self.edge_dict[edge] = {"dataframe": None, "mean_importance": 0, 'real_edge': (edge in true_edges),
"max_importance": 0, 'max_edge': None, 'lag_importance': 0,
'lag_method': lag_method, 'rank_importance': np.nan, 'adj_importance': 0}
continue
current_df = df[df['Edge'] == edge]
max_idx = current_df['Importance'].idxmax()
lag_set = list(set(current_df.Lag))
lag_imp = score_method([lump_method(current_df.Importance[current_df.Lag == lag]) for lag in lag_set])
lag_adj_imp = score_method([lump_method(current_df.adj_imp[current_df.Lag == lag]) for lag in lag_set])
lag_rank = score_method([lump_method(current_df.Rank[current_df.Lag == lag]) for lag in lag_set])
self.edge_dict[edge] = {"dataframe":current_df, "mean_importance":np.mean(current_df.Importance),
'real_edge':(edge in true_edges), "max_importance":current_df.Importance[max_idx],
'max_edge':(current_df.P_window[max_idx], current_df.C_window[max_idx]),
'lag_importance': lag_imp, 'lag_method':lag_method,
'rank_importance': lag_rank, 'adj_importance':lag_adj_imp}
print("...[DONE]")
if edge_diff:
message = 'The last %i edges had no meaningful importance score' \
' and were placed at the bottom of the list' %len(edge_diff)
warnings.warn(message)
return
def make_sort_df(self, df, sort_by='mean'):
"""
Calculate the mean for each edge
:param df: dataframe
:return: dataframe
"""
sort_field = sort_by+"_importance"
print("Calculating {} edge importance...".format(sort_by), end='')
temp_dict = {edge: df[edge][sort_field] for edge in df.keys()}
sort_df = pd.DataFrame.from_dict(temp_dict, orient='index')
sort_df.columns = [sort_field]
if sort_by.lower() == 'rank':
sort_df.sort(sort_field, ascending=True, inplace=True)
else:
sort_df.sort(sort_field, ascending=False, inplace=True)
#sort_df['mean_importance'] = stats.zscore(sort_df['mean_importance'], ddof=1)
sort_df.index.name = 'regulator-target'
sort_df = sort_df.reset_index()
print("[DONE]")
return sort_df
def calc_edge_importance_cutoff(self, df):
"""
Calculate the importance threshold to filter edges on
:param df:
:return: dict
"""
x, y = utility.elbow_criteria(range(0, len(df.Importance)), df.Importance.values.astype(np.float64))
elbow_dict = {'num_edges':x, 'importance_threshold':y}
return elbow_dict
def get_samples(self):
df=pd.read_csv(self.file_path,sep='\t')
node_list = df.columns.tolist()
node_list.pop(0)
return node_list
def score(self, sorted_edge_list, gold_standard_file=None):
"""
Scores some stuff, I think...
Called by:
pipeline
:param sorted_edge_list:
:param gold_standard_file:
:return:
"""
print("Scoring model...", end='')
if gold_standard_file is None:
current_gold_standard = self.file_path.replace("timeseries.tsv","goldstandard.tsv")
else:
current_gold_standard = gold_standard_file
evaluator = Evaluator(current_gold_standard, '\t', node_list=self.get_samples())
tpr, fpr, auroc = evaluator.calc_roc(sorted_edge_list)
auroc_dict = {'tpr':np.array(tpr), 'fpr':np.array(fpr), 'auroc': np.array(auroc)}
precision, recall, aupr = evaluator.calc_pr(sorted_edge_list)
aupr_random = [len(evaluator.gs_flat)/float(len(evaluator.full_list))]*len(recall)
aupr_dict = {"precision": np.array(precision), "recall": np.array(recall), "aupr": np.array(aupr),
"aupr_random": np.array(aupr_random)}
print("[DONE]")
return auroc_dict, aupr_dict
```
#### File: Swing/unittests/test_dionesus_window.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import unittest
import Swing
import pandas as pd
import numpy as np
import numpy.testing as npt
import pdb
class TestDionesusWindow(unittest.TestCase):
def setUp(self):
file_path = "../../data/dream4/insilico_size10_1_timeseries.tsv"
gene_start_column = 1
time_label = "Time"
separator = "\t"
gene_end = None
self.roller = Swing.Swing(file_path, gene_start_column, gene_end, time_label, separator,
window_type="Dionesus")
self.roller.create_windows()
self.test_dionesus = self.roller.window_list[0]
self.permutes = 10
def test_make_edge_table(self):
pass
def test_sort_edges(self):
pass
def test_generate_results_table(self):
pass
def test_rank_results(self):
pass
def test_run_permutation_test(self):
pass
def test_calc_p_value(self):
pass
def test_initialize_params(self):
pass
def test_fit_window(self):
pass
def test_fitstack_coeffs(self):
pass
def test_get_coeffs(self):
pass
def test_get_coeffs(self):
# All coefficients and vip scores should be nonzero except along the diagonal
expected_non_zero = len(self.test_dionesus.genes)**2-len(self.test_dionesus.genes)
calc_coeffs, calc_vip = self.test_dionesus.get_coeffs()
calc_non_zero = np.count_nonzero(calc_coeffs)
calc_vip_non_zero = np.count_nonzero(calc_vip)
self.assertTrue(expected_non_zero == calc_non_zero)
self.assertTrue(expected_non_zero == calc_vip_non_zero)
def test_run_permutation_test(self):
# The model must first be initialized
self.test_dionesus.initialize_params()
self.test_dionesus.fit_window()
self.test_dionesus.run_permutation_test(self.permutes)
n_genes = len(self.test_dionesus.genes)
self.assertTrue(self.test_dionesus.permutation_means.shape == (n_genes, n_genes))
self.assertTrue(self.test_dionesus.permutation_sd.shape == (n_genes, n_genes))
def test_make_edge_table(self):
self.test_dionesus.initialize_params()
self.test_dionesus.fit_window()
self.test_dionesus.run_permutation_test(self.permutes)
#self.test_dionesus.generate_results_table()
self.test_dionesus.make_edge_table()
old_order = self.test_dionesus.results_table['regulator-target'].values
self.test_dionesus.sort_edges()
new_order = self.test_dionesus.results_table['regulator-target'].values
self.assertFalse(np.array_equal(old_order, new_order))
if __name__ == '__main__':
unittest.main()
```
#### File: Swing/unittests/test_prediction.py
```python
import unittest
import numpy as np
import Swing
import random
from random import randint
import numpy.testing as npt
import pdb
import sklearn.metrics as skmet
import Swing.util.utility_module as Rutil
class TestWindow(unittest.TestCase):
def setUp(self):
file_path = "../../data/dream4/insilico_size10_1_timeseries.tsv"
gene_start_column = 1
time_label = "Time"
separator = "\t"
gene_end = None
self.roller = Swing.Swing(file_path, gene_start_column, gene_end, time_label, separator, window_type = "Lasso")
self.roller.set_window(width=20)
self.roller.create_windows()
self.test_window = self.roller.window_list[0]
def test_model_is_saved(self):
model_list = self.test_window.model
n_genes = self.test_window.n_genes
self.assertTrue(len(model_list),n_genes)
def test_prediction(self):
model_list = self.test_window.model
model = model_list[0]['model']
response_train = model_list[0]['response']
predictor_train = model_list[0]['predictor']
#get training scores
training_scores = Rutil.get_cragging_scores(model, predictor_train, response_train)
#get test set from the roller model
test_data = Rutil.get_test_set(self.test_window.raw_data, self.roller.raw_data)
response_col = 0
response_test = test_data.ix[:,response_col].values
predictor_test = test_data.drop(test_data.columns[response_col],1).values
#get prediction scores
test_scores = Rutil.get_cragging_scores(model, predictor_test, response_test)
def test_fit(self):
self.roller.optimize_params()
self.roller.fit_windows()
self.test_window.fit_window()
pdb.set_trace()
if __name__ == '__main__':
unittest.main()
```
#### File: Swing/util/BoxPlot.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os, pdb
import scipy
from Swing.util.BasePlot import BasePlot
class BoxPlot(BasePlot):
def __init__(self):
BasePlot.__init__(self)
self.meanpointprops = dict(marker='D', markersize=6)
self.flierprops = dict(marker='o', markersize=6, markerfacecolor='black', markeredgecolor='black', linewidth=8.0)
self.boxprops = dict(color='black', linewidth=3.0)
self.whiskerprops = dict(color='black', linewidth=2.0)
self.capprops = self.whiskerprops
self.medianprops = dict(color='blue', linewidth=2.5)
def plot_box(self, y_values, labels = None):
"""
Plots the summary data
:param y_values: list of np vectors
:param label: int or str
example: the window size
:return fig obj:
"""
bp=self.axes.boxplot(y_values, labels=labels, widths = 0.3,medianprops = self.medianprops, whiskerprops=self.whiskerprops,flierprops=self.flierprops, meanprops=self.meanpointprops,showmeans=True, boxprops=self.boxprops, capprops=self.capprops)
return(bp)
def add_formatting(self, title, y_label):
self.axes.annotate(title, xy=(0.5, 1.01), xycoords='axes fraction', horizontalalignment='center', fontsize = 25)
#self.axes.set_aspect(25)
self.axes.set_ylabel(y_label, fontsize=30)
#self.axes.set_ylim([-0.4,0.6])
#self.axes.yaxis.set_ticks(np.arange(-0.4, 0.6, 0.1))
ylabels = self.axes.get_yticklabels()
xlabels = self.axes.get_xticklabels()
for label in (self.axes.get_xticklabels()):
label.set_fontsize(18)
label.set_rotation('vertical')
for label in (self.axes.get_yticklabels()):
label.set_fontsize(20)
for l in self.axes.get_xticklines() + self.axes.get_yticklines():
l.set_markersize(0)
def add_significance(self, mann_whitney_results, style = 'separate', reset=0.06):
counter = 0.01
for result in mann_whitney_results:
if counter > 0.05:
counter = 0.01
index_x = result[0]
index_y = result[1]
significance = result[2]
y_limits = self.axes.get_ylim()
if style == 'cascade':
if significance < 0.05:
self.axes.hlines(y=counter, xmin=index_x+1, xmax=index_y+1, color = "black")
if significance < 0.01:
self.axes.annotate('**', xy=((index_x+index_y+2)/2, counter-0.075), xycoords='data', horizontalalignment='center', fontsize = 20, weight='heavy', color = "black")
else:
self.axes.annotate('*', xy=((index_x+index_y+2)/2, counter-0.075), xycoords='data', horizontalalignment='center', fontsize = 20, weight='heavy', color = "black")
counter = counter + 0.01
elif style == 'separate':
if significance < 0.05:
self.axes.hlines(y=y_limits[1]-0.05, xmin=index_x+1, xmax=index_y+1, color = "black")
if significance < 0.01:
self.axes.annotate('**', xy=((index_x+index_y+2)/2, y_limits[1]-0.075), xycoords='data', horizontalalignment='center', fontsize = 20, weight='heavy', color = "black")
else:
self.axes.annotate('*', xy=((index_x+index_y+2)/2, y_limits[1]-0.075), xycoords='data', horizontalalignment='center', fontsize = 20, weight='heavy', color = "black")
return()
def sigtest(self, data_list, score):
results = []
for test in score:
index_x = test[0]
index_y = test[1]
test_result = scipy.stats.mannwhitneyu(data_list[index_x], data_list[index_y])
p_value = test_result[1]*2
results.append( (index_x, index_y, p_value) )
return(results)
def add_sections(self, box_plots_per_section, annotation_per_section, offset=0.05):
x_lim = self.axes.get_xlim()
total_boxplots = x_lim[1] - 0.5
line_coords = [x for x in range(0,int(total_boxplots),box_plots_per_section)]
#pop out the first one
#line_coords = line_coords[1:]
annotation_location = list(np.linspace(0, 1, total_boxplots/box_plots_per_section, endpoint=False))
line_annotation = zip(line_coords, annotation_per_section, annotation_location)
for line, text, loc in line_annotation:
self.axes.axvline(x=line+0.5, color = "gray")
self.axes.annotate(text, xy=(loc+offset, .95), xycoords='axes fraction', horizontalalignment='center', fontsize = 20, weight='heavy', color = "gray")
return(True)
```
#### File: Swing/util/LinePlot.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os, pdb
from BasePlot import BasePlot
class LinePlot(BasePlot):
def set_x_values(self, x_values):
"""
Sets the x values (ie time points)
:param x_values: list of str/ints
"""
self.x_values = map(int,x_values)
def plot_window_series(self, y_values, color_index, label, x_values = None):
"""
Plots points that are interpolated with a line.
:param y_values: list
:param label: int or str
example: the window size
"""
#if not x_values:
#x_values = self.x_values
if color_index > 19:
color_index = color_index%20
self.axes.plot(x_values,y_values, linestyle='-', color = self.tableau20[color_index], label = str(label), linewidth=5)
def plot_vertical_line(self, x_value,color_index, label):
"""
Plots a vertical line.
:param x_value: int
:param label: int or str
example: window size
:param color_index: corresponding color index on tableau20
"""
if color_index > 19:
color_index = color_index%20
self.axes.axvline(x=x_value, linestyle='--',color=self.tableau20[color_index],label="WS "+str(label), linewidth=3)
def plot_horizontal_line(self, y_value, color_index, label):
"""
Plots a horizontal line.
:param y_value: int
:param label: int or str
example: the window size
"""
my_x = [min(self.x_values), max(self.x_values)]
my_y = [y_value,y_value]
if color_index > 19:
color_index = color_index%20
self.axes.plot(my_x, my_y, linestyle='--',color=self.tableau20[color_index],label="WS "+str(label), linewidth=3)
def add_formatting(self, min_tick=0,max_tick=1200, interval=200):
#legend
box = self.axes.get_position()
self.axes.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])
self.axes.legend(fontsize=8,bbox_to_anchor=(0.5, -0.2), loc='upper center',ncol=7,fancybox=True, shadow=True)
#labels
self.axes.set_ylabel('AUROC')
self.axes.set_xlabel('Time (min)')
xlabels = self.axes.get_xticklabels()
ylabels = self.axes.get_yticklabels()
for label in xlabels:
label.set_rotation(90)
label.set_fontsize(16)
for label in (self.axes.get_yticklabels()):
label.set_fontsize(16)
for l in self.axes.get_xticklines() + self.axes.get_yticklines():
l.set_markersize(0)
line_ticks = np.arange(min_tick,max_tick,interval)
self.axes.xaxis.set_ticks(line_ticks)
def add_formatting_auroc(self, min_tick=0,max_tick=1200, interval=200):
#legend
box = self.axes.get_position()
self.axes.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])
self.axes.legend(fontsize=8,bbox_to_anchor=(0.5, -0.2), loc='upper center',ncol=7,fancybox=True, shadow=True)
#labels
self.axes.set_ylabel('True Positive Rate', fontsize = 16)
self.axes.set_xlabel('False Positive Rate', fontsize = 16)
xlabels = self.axes.get_xticklabels()
ylabels = self.axes.get_yticklabels()
for label in xlabels:
label.set_rotation(90)
label.set_fontsize(16)
for label in (self.axes.get_yticklabels()):
label.set_fontsize(16)
for l in self.axes.get_xticklines() + self.axes.get_yticklines():
l.set_markersize(0)
line_ticks = np.arange(min_tick,max_tick,interval)
```
#### File: Swing/util/MultiBoxPlot.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os, pdb
from BoxPlot import BoxPlot
class MultiBoxPlot(BoxPlot):
def __init__(self):
BoxPlot.__init__(self)
self.axes_list = []
self.axes_list.append(plt.subplot(2,1,1))
self.axes_list.append(plt.subplot(2,1,2))
def plot_box(self, y_values, labels = None):
"""
Plots the summary data
:param y_values: list of np vectors
:param label: int or str
example: the window size
:return fig obj:
"""
bp=self.axes.boxplot(y_values, labels=labels, widths = 0.3,medianprops = self.medianprops, whiskerprops=self.whiskerprops,flierprops=self.flierprops, meanprops=self.meanpointprops,showmeans=True, boxprops=self.boxprops, capprops=self.capprops)
return(bp)
def add_formatting(self, title, y_label):
self.axes.set_title(title, fontsize=25)
self.axes.set_aspect(25)
self.axes.set_ylabel(y_label, fontsize=30)
ylabels = self.axes.get_yticklabels()
xlabels = self.axes.get_xticklabels()
for label in (self.axes.get_xticklabels()):
label.set_fontsize(18)
label.set_rotation('vertical')
for label in (self.axes.get_yticklabels()):
label.set_fontsize(20)
for l in self.axes.get_xticklines() + self.axes.get_yticklines():
l.set_markersize(0)
```
#### File: Swing/util/Reporter.py
```python
class Reporter:
"""Generates a pdf report"""
def __init__(self):
self.set_heatmaps(
```
#### File: Swing/util/Scanmap.py
```python
class Scanmap:
"""A heatmap and line plot combined into one figure"""
def __init__(self, dim = None):
if dim:
self.set_dimensions(dim)
else:
default_dim = { 'gp_left': 0.2,
'gp_bottom': 0.1,
'gp_width': 0.7,
'gp_height': 0.2,
'padding': 0.01,
'numTFs': 20,
'dm_left': 0.2,
'dm_bottom': 0.32,
'dm_width':0.7,
'box_height':0.03,
'dm_height':0.6 }
self.set_dimensions(default_dim)
#initialize colormap
self.tableau20 = [((152,223,138),(31, 119, 180), (174, 199, 232), (255, 127, 14),(255, 187, 120), (44, 160, 44), (255, 152, 150),(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),(214,39,40)]
for i in range(len(tableau20)):
r,g,b = self.tableau20[i]
self.tableau20[i] = (r/255., g/255., b/255.)
#initialize axes
f = plt.figure(figsize=(10,10))
d = self.dimensions
axarr2 = f.add_axes(d['gp_left'],d['gp_bottom'],d['gp_width'],d['gp_height'])
axarr1 = f.add_axes(d['dm_left'],d['dm_bottom'],d['dm_width'],d['dm_height'])
def set_dimensions(self, dim_dict):
self.dimensions = dim_dict
return(dim_dict)
``` |
{
"source": "jiaw-z/DenseMatchingBenchmark",
"score": 3
} |
#### File: evaluation/flow/eval.py
```python
import warnings
from collections import abc as container_abcs
import torch
from dmb.data.datasets.evaluation.flow.pixel_error import calc_error
def remove_padding(batch, size):
"""
Usually, the SceneFlow image size is [540, 960], and we often pad it to [544, 960] evaluation,
What's more, for KITTI, the image size is pad to [384, 1248]
Here, we mainly remove the padding from the estimated tensor, such as flow map
Args:
batch (torch.Tensor): in [BatchSize, Channel, Height, Width] layout
size (list, tuple): the last two dimensions are desired [Height, Width]
"""
error_msg = "batch must contain tensors, dicts or lists; found {}"
if isinstance(batch, torch.Tensor):
# Crop batch to desired size
# For flow, we often pad image around and keep it in the center
assert batch.shape[-2] >= size[-2] and batch.shape[-1] >= size[-1]
pad_top = (batch.shape[-2] - size[-2])//2
pad_left = (batch.shape[-1] - size[-1])//2
# pad_right = batch.shape[-1] - size[-1]
batch = batch[:, :, pad_top:, pad_left:]
return batch
elif isinstance(batch, container_abcs.Mapping):
return {key: remove_padding(batch[key], size) for key in batch}
elif isinstance(batch, container_abcs.Sequence):
return [remove_padding(samples, size) for samples in batch]
raise TypeError((error_msg.format(type(batch))))
def do_evaluation(est_flow, gt_flow, sparse=False):
"""
Do pixel error evaluation. (See KITTI evaluation protocols for details.)
Args:
est_flow, (Tensor): estimated flow map, in [BatchSize, 2, Height, Width] or
[2, Height, Width] layout
gt_flow, (Tensor): ground truth flow map, in [BatchSize, 2, Height, Width] or
[2, Height, Width]layout
sparse, (bool): whether the given flow is sparse, default False
Returns:
error_dict (dict): the error of 1px, 2px, 3px, 5px, in percent,
range [0,100] and average error epe
"""
error_dict = {}
if est_flow is None:
warnings.warn('Estimated flow map is None')
return error_dict
if gt_flow is None:
warnings.warn('Reference ground truth flow map is None')
return error_dict
if torch.is_tensor(est_flow):
est_flow = est_flow.clone().cpu()
if torch.is_tensor(gt_flow):
gt_flow = gt_flow.clone().cpu()
error_dict = calc_error(est_flow, gt_flow, sparse=sparse)
return error_dict
```
#### File: datasets/flow/builder.py
```python
from dmb.data.transforms import Compose
from dmb.data.transforms import flow_trans as T
from dmb.data.datasets.flow import FlyingChairsDataset
def build_transforms(cfg, type, is_train):
input_shape = cfg.data[type].input_shape
mean = cfg.data[type].mean
std = cfg.data[type].std
if is_train:
transform = Compose(
[
# T.RandomTranslate(10),
# T.RandomRotate(angle=5, diff_angle=10),
T.ToTensor(),
T.RandomCrop(input_shape),
# T.RandomHorizontalFlip(),
# T.RandomVerticalFlip(),
T.Normalize(mean, std),
# T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
]
)
else:
transform = Compose(
[
T.ToTensor(),
T.CenterCat(input_shape),
T.Normalize(mean, std),
]
)
return transform
def build_flow_dataset(cfg, type):
if type not in cfg.data:
return None
data_root = cfg.data[type].data_root
data_type = cfg.data[type].type
annFile = cfg.data[type].annfile
is_train = True if type == 'train' else False
transforms = build_transforms(cfg, type, is_train=is_train)
if 'FlyingChairs' in data_type:
dataset = FlyingChairsDataset(annFile, data_root, transforms)
else:
raise ValueError("invalid data type: {}".format(data_type))
return dataset
```
#### File: datasets/utils/load_flow.py
```python
import re
import numpy as np
import png
def load_pfm(file_path):
"""
load image in PFM type.
Args:
file_path string: file path(absolute)
Returns:
data (numpy.array): data of image in (Height, Width[, 3]) layout
scale (float): scale of image
"""
with open(file_path, encoding="ISO-8859-1") as fp:
color = None
width = None
height = None
scale = None
endian = None
# load file header and grab channels, if is 'PF' 3 channels else 1 channel(gray scale)
header = fp.readline().rstrip().decode('utf-8')
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', fp.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(fp.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(fp, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def load_png(file_path):
"""
Read from KITTI .png file
Args:
file_path string: file path(absolute)
Returns:
data (numpy.array): data of image in (Height, Width, 3) layout
"""
flow_object = png.Reader(filename=file_path)
flow_direct = flow_object.asDirect()
flow_data = list(flow_direct[2])
(w, h) = flow_direct[3]['size']
flow = np.zeros((h, w, 3), dtype=np.float64)
for i in range(len(flow_data)):
flow[i, :, 0] = flow_data[i][0::3]
flow[i, :, 1] = flow_data[i][1::3]
flow[i, :, 2] = flow_data[i][2::3]
invalid_idx = (flow[:, :, 2] == 0)
flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0
flow[invalid_idx, 0] = 0
flow[invalid_idx, 1] = 0
return flow.astype(np.float32)
def load_flo(file_path):
"""
Read .flo file in MiddleBury format
Code adapted from:
http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
WARNING: this will work on little-endian architectures (eg Intel x86) only!
Args:
file_path string: file path(absolute)
Returns:
flow (numpy.array): data of image in (Height, Width, 2) layout
"""
with open(file_path, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
assert(magic == 202021.25)
w = int(np.fromfile(f, np.int32, count=1))
h = int(np.fromfile(f, np.int32, count=1))
# print('Reading %d x %d flo file\n' % (w, h))
flow = np.fromfile(f, np.float32, count=2 * w * h)
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
flow = np.resize(flow, (h, w, 2))
return flow
def write_flo(file_path, uv, v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by <NAME>, adapted from <NAME>.
"""
nBands = 2
if v is None:
assert (uv.ndim == 3)
assert (uv.shape[2] == 2)
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert (u.shape == v.shape)
height, width = u.shape
f = open(file_path, 'wb')
# write the header
np.array([202021.25]).astype(np.float32).tofile(f)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * nBands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
# load utils
def load_flying_chairs_flow(img_path):
"""load flying chairs flow image
Args:
img_path:
Returns:
"""
assert img_path.endswith('.flo'), "flying chairs flow image must end with .flo " \
"but got {}".format(img_path)
flow_img = load_flo(img_path)
return flow_img
# load utils
def write_flying_chairs_flow(img_path, uv, v=None):
"""write flying chairs flow image
Args:
img_path:
Returns:
"""
assert img_path.endswith('.flo'), "flying chairs flow image must end with .flo " \
"but got {}".format(img_path)
write_flo(img_path, uv, v)
# load utils
def load_flying_things_flow(img_path):
"""load flying things flow image
Args:
img_path:
Returns:
"""
assert img_path.endswith('.pfm'), "flying things flow image must end with .pfm " \
"but got {}".format(img_path)
flow_img, __ = load_pfm(img_path)
return flow_img
# load utils
def load_kitti_flow(img_path):
"""load KITTI 2012/2015 flow image
Args:
img_path:
Returns:
"""
assert img_path.endswith('.png'), "KITTI 2012/2015 flow image must end with .png " \
"but got {}".format(img_path)
flow_img = load_png(img_path)
return flow_img
```
#### File: dmb/modeling/__init__.py
```python
from .flow.models import _META_ARCHITECTURES as _FLOW_META_ARCHITECTURES
from .stereo.models import _META_ARCHITECTURES as _STEREO_META_ARCHITECTURES
_META_ARCHITECTURES = dict()
_META_ARCHITECTURES.update(_FLOW_META_ARCHITECTURES)
_META_ARCHITECTURES.update(_STEREO_META_ARCHITECTURES)
def build_model(cfg):
meta_arch = _META_ARCHITECTURES[cfg.model.meta_architecture]
return meta_arch(cfg)
```
#### File: stereo/backbones/StereoNet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from dmb.modeling.stereo.layers.basic_layers import conv_bn, conv_bn_relu, BasicBlock
class DownsampleHead(nn.Module):
"""
Args:
in_planes (int): the channels of input
out_planes (int): the channels of output
batchNorm, (bool): whether use batch normalization layer, default True
Inputs:
x, (tensor): feature in (BatchSize, in_planes, Height, Width) layout
Outputs:
down_x, (tensor): downsampled feature in (BatchSize, out_planes, Height, Width) layout
"""
def __init__(self, in_planes, out_planes, batch_norm=True):
super(DownsampleHead, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.batch_norm = batch_norm
self.downsample = nn.Conv2d(in_planes, out_planes, kernel_size=5,
stride=2, padding=2, bias=True)
def forward(self, x):
down_x = self.downsample(x)
return down_x
class StereoNetBackbone(nn.Module):
"""
Backbone proposed in StereoNet.
Args:
in_planes (int): the channels of input
batch_norm (bool): whether use batch normalization layer, default True
downsample_num (int): the number of downsample module,
the input RGB image will be downsample to 1/2^num resolution, default 3, i.e., 1/8 resolution
residual_num (int): the number of residual blocks, used for robust feature extraction
Inputs:
l_img (Tensor): left image, in [BatchSize, 3, Height, Width] layout
r_img (Tensor): right image, in [BatchSize, 3, Height, Width] layout
Outputs:
l_fms (Tensor): left image feature maps, in [BatchSize, 32, Height//8, Width//8] layout
r_fms (Tensor): right image feature maps, in [BatchSize, 32, Height//8, Width//8] layout
"""
def __init__(self, in_planes=3, batch_norm=True, downsample_num=3, residual_num=6):
super(StereoNetBackbone, self).__init__()
self.in_planes = in_planes
self.batch_norm = batch_norm
self.downsample_num = downsample_num
self.residual_num = residual_num
# Continuously downsample the input RGB image to 1/2^num resolution
in_planes = self.in_planes
out_planes = 32
self.downsample = nn.ModuleList()
for _ in range(self.downsample_num):
self.downsample.append(DownsampleHead(in_planes, out_planes))
in_planes = out_planes
out_planes = 32
# Build residual feature extraction module
self.residual_blocks = nn.ModuleList()
for _ in range(self.residual_num):
self.residual_blocks.append(BasicBlock(
self.batch_norm, 32, 32, stride=1, downsample=None, padding=1, dilation=1
))
self.lastconv = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True)
def _forward(self, x):
for i in range(self.downsample_num):
x = self.downsample[i](x)
for i in range(self.residual_num):
x = self.residual_blocks[i](x)
output_feature = self.lastconv(x)
return output_feature
def forward(self, *input):
if len(input) != 2:
raise ValueError('expected input length 2 (got {} length input)'.format(len(input)))
l_img, r_img = input
l_fms = self._forward(l_img)
r_fms = self._forward(r_img)
return l_fms, r_fms
```
#### File: cost_processors/aggregators/builder.py
```python
from .GCNet import GCAggregator
from .PSMNet import PSMAggregator
from .AcfNet import AcfAggregator
from .StereoNet import StereoNetAggregator
from .DeepPruner import DeepPrunerAggregator
from .AnyNet import AnyNetAggregator
AGGREGATORS = {
"GCNet": GCAggregator,
"PSMNet": PSMAggregator,
"AcfNet": AcfAggregator,
'StereoNet': StereoNetAggregator,
'DeepPruner': DeepPrunerAggregator,
'AnyNet': AnyNetAggregator,
}
def build_cost_aggregator(cfg):
agg_type = cfg.model.cost_processor.cost_aggregator.type
assert agg_type in AGGREGATORS, "cost_aggregator type not found, excepted: {}," \
"but got {}".format(AGGREGATORS.keys(), agg_type)
default_args = cfg.model.cost_processor.cost_aggregator.copy()
default_args.pop('type')
default_args.update(batch_norm=cfg.model.batch_norm)
aggregator = AGGREGATORS[agg_type](**default_args)
return aggregator
```
#### File: cost_processors/utils/cost_norm.py
```python
import torch
import torch.nn as nn
eps = 1e-5
class _CostVolumeNorm(nn.Module):
"""
Normalize Cost Volume
Args:
dim (int): which dim to apply normalization operation, default dim is for the cost dim.
affine (bool): whether the parameters are learnable, default is True
weight (float): weight for cost re-range
bias (float): bias for cost
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
"""
def __init__(self, dim=1, affine=True, weight=1, bias=0):
super(_CostVolumeNorm, self).__init__()
self.dim = dim
self.affine = affine
if self.affine:
self.weight = nn.Parameter(data=torch.Tensor(1), requires_grad=True)
self.bias = nn.Parameter(data=torch.Tensor(1), requires_grad=True)
else:
self.weight = nn.Parameter(data=torch.Tensor(1), requires_grad=False)
self.bias = nn.Parameter(data=torch.Tensor(1), requires_grad=False)
# init weight and bias
self.weight.data.fill_(weight)
self.bias.data.fill_(bias)
def forward(self, input):
raise NotImplementedError
class RangeNorm(_CostVolumeNorm):
def __init__(self, dim=1, affine=True, weight=1, bias=0):
super(RangeNorm, self).__init__(dim=dim, affine=affine, weight=weight, bias=bias)
def forward(self, input):
# compute mean value
mean = input.min(dim=self.dim, keepdim=True)[0]
# compute margin
var = input.max(dim=self.dim, keepdim=True)[0] - input.min(dim=self.dim, keepdim=True)[0]
# normalize
normalized_input = (input - mean) / (var + eps)
# apply weight and bias
output = normalized_input * self.weight + self.bias
return output
class VarNorm(_CostVolumeNorm):
def __init__(self, dim=1, affine=True, weight=1, bias=0):
super(VarNorm, self).__init__(dim=dim, affine=affine, weight=weight, bias=bias)
def forward(self, input):
# compute mean value
mean = input.mean(dim=self.dim, keepdim=True)
# compute var value
var = input.var(dim=self.dim, keepdim=True)
# normalize
normalized_input = (input - mean).abs() / (var + eps)
# apply weight and bias
output = normalized_input * self.weight + self.bias
return output
class StdNorm(_CostVolumeNorm):
def __init__(self, dim=1, affine=True, weight=1, bias=0):
super(StdNorm, self).__init__(dim=dim, affine=affine, weight=weight, bias=bias)
def forward(self, input):
# compute mean value
mean = input.mean(dim=self.dim, keepdim=True)
# compute var value
var = input.std(dim=self.dim, keepdim=True)
# normalize
normalized_input = (input - mean).abs() / (var + eps)
# apply weight and bias
output = normalized_input * self.weight + self.bias
return output
class SigmoidNorm(_CostVolumeNorm):
def __init__(self, dim=1, affine=True, weight=1, bias=0):
super(SigmoidNorm, self).__init__(dim=dim, affine=affine, weight=weight, bias=bias)
def forward(self, input):
# normalize
normalized_input = torch.sigmoid(input)
# apply weight and bias
output = normalized_input * self.weight + self.bias
return output
```
#### File: stereo/disp_predictors/faster_soft_argmin.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class FasterSoftArgmin(nn.Module):
"""
A faster implementation of soft argmin.
details can refer to dmb.modeling.stereo.disp_predictors.soft_argmin
Args:
max_disp, (int): under the scale of feature used,
often equals to (end disp - start disp + 1), the maximum searching range of disparity
start_disp (int): the start searching disparity index, usually be 0
dilation (optional, int): the step between near disparity index
alpha (float or int): a factor will times with cost_volume
details can refer to: https://bouthilx.wordpress.com/2013/04/21/a-soft-argmax/
normalize (bool): whether apply softmax on cost_volume, default True
Inputs:
cost_volume (Tensor): the matching cost after regularization,
in [BatchSize, disp_sample_number, Height, Width] layout
disp_sample (optional, Tensor): the estimated disparity samples,
in [BatchSize, disp_sample_number, Height, Width] layout. NOT USED!
Returns:
disp_map (Tensor): a disparity map regressed from cost volume,
in [BatchSize, 1, Height, Width] layout
"""
def __init__(self, max_disp, start_disp=0, dilation=1, alpha=1.0, normalize=True):
super(FasterSoftArgmin, self).__init__()
self.max_disp = max_disp
self.start_disp = start_disp
self.dilation = dilation
self.end_disp = start_disp + max_disp - 1
self.disp_sample_number = (max_disp + dilation - 1) // dilation
self.alpha = alpha
self.normalize = normalize
# compute disparity index: (1 ,1, disp_sample_number, 1, 1)
disp_sample = torch.linspace(
self.start_disp, self.end_disp, self.disp_sample_number
)
disp_sample = disp_sample.repeat(1, 1, 1, 1, 1).permute(0, 1, 4, 2, 3).contiguous()
self.disp_regression = nn.Conv3d(1, 1, (self.disp_sample_number, 1, 1), 1, 0, bias=False)
self.disp_regression.weight.data = disp_sample
self.disp_regression.weight.requires_grad = False
def forward(self, cost_volume, disp_sample=None):
# note, cost volume direct represent similarity
# 'c' or '-c' do not affect the performance because feature-based cost volume provided flexibility.
if cost_volume.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(cost_volume.dim()))
# scale cost volume with alpha
cost_volume = cost_volume * self.alpha
if self.normalize:
prob_volume = F.softmax(cost_volume, dim=1)
else:
prob_volume = cost_volume
# [B, disp_sample_number, W, H] -> [B, 1, disp_sample_number, W, H]
prob_volume = prob_volume.unsqueeze(1)
disp_map = self.disp_regression(prob_volume)
# [B, 1, 1, W, H] -> [B, 1, W, H]
disp_map = disp_map.squeeze(1)
return disp_map
def __repr__(self):
repr_str = '{}\n'.format(self.__class__.__name__)
repr_str += ' ' * 4 + 'Max Disparity: {}\n'.format(self.max_disp)
repr_str += ' ' * 4 + 'Start disparity: {}\n'.format(self.start_disp)
repr_str += ' ' * 4 + 'Dilation rate: {}\n'.format(self.dilation)
repr_str += ' ' * 4 + 'Alpha: {}\n'.format(self.alpha)
repr_str += ' ' * 4 + 'Normalize: {}\n'.format(self.normalize)
return repr_str
@property
def name(self):
return 'FasterSoftArgmin'
```
#### File: disp_refinement/utils/edge_aware.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from dmb.modeling.stereo.layers.basic_layers import conv_bn, conv_bn_relu, BasicBlock
class EdgeAwareRefinement(nn.Module):
"""
The edge aware refinement module proposed in StereoNet.
Args:
in_planes (int): the channels of input
batch_norm (bool): whether use batch normalization layer, default True
Inputs:
disp (Tensor): estimated disparity map, in [BatchSize, 1, Height//s, Width//s] layout
leftImage (Tensor): left image, in [BatchSize, Channels, Height, Width] layout
Outputs:
refine_disp (Tensor): refined disparity map, in [BatchSize, 1, Height, Width] layout
"""
def __init__(self, in_planes, batch_norm=True):
super(EdgeAwareRefinement, self).__init__()
self.in_planes = in_planes
self.batch_norm = batch_norm
self.conv_mix = conv_bn_relu(self.batch_norm, self.in_planes, 32,
kernel_size=3, stride=1, padding=1, dilation=1, bias=True)
# Dilated residual module
self.residual_dilation_blocks = nn.ModuleList()
self.dilation_list = [1, 2, 4, 8, 1, 1]
for dilation in self.dilation_list:
self.residual_dilation_blocks.append(
BasicBlock(self.batch_norm, 32, 32, stride=1, downsample=None,
padding=1, dilation=dilation)
)
self.conv_res = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, disp, leftImage):
h, w = leftImage.shape[-2:]
# the scale of downsample
scale = w / disp.shape[-1]
# upsample disparity map to image size, in [BatchSize, 1, Height, Width]
up_disp = F.interpolate(disp, size=(h, w), mode='bilinear', align_corners=False)
up_disp = up_disp * scale
# residual refinement
# mix the info inside the disparity map and left image
mix_feat = self.conv_mix(torch.cat((up_disp, leftImage), dim=1))
for block in self.residual_dilation_blocks:
mix_feat = block(mix_feat)
# get residual disparity map, in [BatchSize, 1, Height, Width]
res_disp = self.conv_res(mix_feat)
# refine the upsampled disparity map, in [BatchSize, 1, Height, Width]
refine_disp = res_disp + up_disp
# promise all disparity value larger than 0, in [BatchSize, 1, Height, Width]
refine_disp = F.relu(refine_disp, inplace=True)
return refine_disp
```
#### File: disp_refinement/utils/min_warp_error.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from dmb.modeling.stereo.layers.inverse_warp import inverse_warp
from dmb.modeling.stereo.layers.basic_layers import conv_bn_relu, BasicBlock, conv_bn, deconv_bn_relu
class WarpErrorRefinement(nn.Module):
"""
Minimise the warp error to refine initial disparity map.
Args:
in_planes, (int): the channels of left feature
batch_norm, (bool): whether use batch normalization layer
Inputs:
disp, (Tensor): the left disparity map, in (BatchSize, 1, Height//s, Width//s) layout
left, (Tensor): the left image feature, in (BatchSize, Channels, Height, Width) layout
right, (Tensor): the right image feature, in (BatchSize, Channels, Height, Width) layout
Outputs:
refine_disp (Tensor): refined disparity map, in [BatchSize, 1, Height, Width] layout
"""
def __init__(self, in_planes, C=16, batch_norm=True):
super(WarpErrorRefinement, self).__init__()
self.in_planes = in_planes
self.batch_norm = batch_norm
self.C = C
self.conv_mix = conv_bn_relu(batch_norm, in_planes*4 + 1, 2*C, kernel_size=3, stride=1, padding=1, dilation=1, bias=False)
# Dilated residual module
self.residual_dilation_blocks = nn.ModuleList()
self.dilation_list = [1, 2, 4, 8, 1, 1]
for dilation in self.dilation_list:
self.residual_dilation_blocks.append(
conv_bn_relu(batch_norm, 2*C, 2*C, kernel_size=3, stride=1,
padding=dilation, dilation=dilation, bias=False)
)
self.conv_res = nn.Conv2d(2*C, 1, kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, disp, left, right):
B, C, H, W = left.shape
# the scale of downsample
scale = W / disp.shape[-1]
# upsample disparity map to image size, in [BatchSize, 1, Height, Width]
up_disp = F.interpolate(disp, size=(H, W), mode='bilinear', align_corners=True)
up_disp = up_disp * scale
# calculate warp error
warp_right = inverse_warp(right, -up_disp)
error = torch.abs(left - warp_right)
# residual refinement
# mix the info inside the disparity map, left image, right image and warp error
mix_feat = self.conv_mix(torch.cat((left, right, warp_right, error, disp), 1))
for block in self.residual_dilation_blocks:
mix_feat = block(mix_feat)
# get residual disparity map, in [BatchSize, 1, Height, Width]
res_disp = self.conv_res(mix_feat)
# refine the upsampled disparity map, in [BatchSize, 1, Height, Width]
refine_disp = res_disp + up_disp
# promise all disparity value larger than 0, in [BatchSize, 1, Height, Width]
refine_disp = F.relu(refine_disp, inplace=True)
return refine_disp
```
#### File: stereo/layers/bilateral_filter.py
```python
import torch
import torch.nn as nn
import math
eps = 1e-12
class bilateralFilter(nn.Module):
"""
Args:
kernel_size(int, tuple): bilateral filter kernel size
sigma_image(int, float): the derivation of Image Gaussian distribution
sigma_gaussian(int, float): the derivation of Disparity Gaussian distribution
leftImage(tensor): in [BatchSize, 1, Height, Width] layout, gray image
estDisp(tensor): in [BatchSize, 1, Height, Width] layout, the estimated disparity map
Outputs:
fineDisp(tensor): in [BatchSize, 1, Height, Width] layout, the refined disparity map
"""
def __init__(self, kernel_size, sigma_image, sigma_gaussian):
super(bilateralFilter, self).__init__()
self.kernel_size = kernel_size
self.sigma_image = sigma_image
self.sigma_gaussian = sigma_gaussian
self.image_conv = []
self.image_kernel = self.create_image_kernel(self.kernel_size)
for i in range(len(self.image_kernel)):
self.image_conv.append(
nn.Conv2d(1, 1, kernel_size=kernel_size, stride=1, padding=kernel_size // 2, bias=False))
self.image_conv[i].weight.data = self.image_kernel[i]
self.image_conv[i].weight.requires_grad = False
self.disp_conv = []
self.disp_kernel = self.create_disparity_kernel(self.kernel_size)
for i in range(len(self.disp_kernel)):
self.disp_conv.append(
nn.Conv2d(1, 1, kernel_size=kernel_size, stride=1, padding=kernel_size // 2, bias=False))
self.disp_conv[i].weight.data = self.disp_kernel[i]
self.disp_conv[i].weight.requires_grad = False
def forward(self, leftImage, estDisp):
assert leftImage.shape == estDisp.shape
assert estDisp.shape[1] == 1
for i in range(len(self.disp_conv)):
self.disp_conv[i] = self.disp_conv[i].to(leftImage.device)
for i in range(len(self.image_conv)):
self.image_conv[i] = self.image_conv[i].to(leftImage.device)
index_image_conv = 0
index_disp_conv = 0
fineDisp = None
weight = None
for i in range(-(self.kernel_size // 2), (self.kernel_size // 2 + 1)):
for j in range(-(self.kernel_size // 2), (self.kernel_size // 2 + 1)):
if i == 0 and j == 0:
image_diff_weight = torch.ones_like(estDisp)
else:
image_diff_weight = (
(-self.image_conv[index_image_conv](leftImage).pow(2.0) / (2 * self.sigma_image ** 2)).exp())
index_image_conv += 1
dist = math.exp(-float(i ** 2 + j ** 2) / float(2 * self.sigma_gaussian ** 2))
dist_diff_weight = torch.full_like(estDisp, dist)
disp = self.disp_conv[index_disp_conv](estDisp)
if index_disp_conv == 0:
weight = dist_diff_weight * image_diff_weight
fineDisp = disp * dist_diff_weight * image_diff_weight
else:
weight += dist_diff_weight * image_diff_weight
fineDisp += disp * dist_diff_weight * image_diff_weight
fineDisp = (fineDisp + eps) / (weight + eps)
return fineDisp
def create_disparity_kernel(self, kernel_size):
total_direction = kernel_size * kernel_size
kernel = []
for i in range(total_direction):
kernel.append(torch.zeros(1, 1, total_direction))
kernel[i][:, :, i] = 1
kernel[i] = kernel[i].reshape(1, 1, kernel_size, kernel_size)
return kernel
def create_image_kernel(self, kernel_size):
total_direction = kernel_size * kernel_size
kernel = []
for i in range(total_direction):
kernel.append(torch.zeros(1, 1, total_direction))
kernel[i][:, :, i] = -1
kernel[i][:, :, total_direction // 2] = 1
kernel[i] = kernel[i].reshape(1, 1, kernel_size, kernel_size)
return kernel[:total_direction // 2] + kernel[total_direction // 2 + 1:]
```
#### File: stereo/losses/conf_nll_loss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConfidenceNllLoss(object):
"""
Args:
weights (list of float or None): weight for each scale of estCost.
start_disp (int): the start searching disparity index, usually be 0
max_disp (int): the max of Disparity. default: 192
sparse (bool): whether the ground-truth disparity is sparse,
for example, KITTI is sparse, but SceneFlow is not. default is False
Inputs:
estConf (Tensor or list of Tensor): the estimated confidence map,
in [BatchSize, 1, Height, Width] layout.
gtDisp (Tensor): the ground truth disparity map,
in [BatchSize, 1, Height, Width] layout.
Outputs:
weighted_loss_all_level (dict of Tensors): the weighted loss of all levels
"""
def __init__(self, max_disp, start_disp=0, weights=None, sparse=False):
self.max_disp = max_disp
self.start_disp = start_disp
self.weights = weights
self.sparse = sparse
if sparse:
# sparse disparity ==> max_pooling
self.scale_func = F.adaptive_max_pool2d
else:
# dense disparity ==> avg_pooling
self.scale_func = F.adaptive_avg_pool2d
def loss_per_level(self, estConf, gtDisp):
N, C, H, W = estConf.shape
scaled_gtDisp = gtDisp
scale = 1.0
if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
# compute scale per level and scale gtDisp
scale = gtDisp.shape[-1] / (W * 1.0)
scaled_gtDisp = gtDisp / scale
scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))
# mask for valid disparity
# gt zero and lt max disparity
mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
mask = mask.detach_().type_as(gtDisp)
# NLL loss
valid_pixel_number = mask.float().sum()
if valid_pixel_number < 1.0:
valid_pixel_number = 1.0
loss = (-1.0 * F.logsigmoid(estConf) * mask).sum() / valid_pixel_number
return loss
def __call__(self, estConf, gtDisp):
if not isinstance(estConf, (list, tuple)):
estConf = [estConf]
if self.weights is None:
self.weights = [1.0] * len(estConf)
# compute loss for per level
loss_all_level = [
self.loss_per_level(est_conf_per_lvl, gtDisp)
for est_conf_per_lvl in estConf
]
# re-weight loss per level
weighted_loss_all_level = dict()
for i, loss_per_level in enumerate(loss_all_level):
name = "conf_loss_lvl{}".format(i)
weighted_loss_all_level[name] = self.weights[i] * loss_per_level
return weighted_loss_all_level
def __repr__(self):
repr_str = '{}\n'.format(self.__class__.__name__)
repr_str += ' ' * 4 + 'Max Disparity: {}\n'.format(self.max_disp)
repr_str += ' ' * 4 + 'Loss weight: {}\n'.format(self.weights)
repr_str += ' ' * 4 + 'Disparity is sparse: {}\n'.format(self.sparse)
return repr_str
@property
def name(self):
return 'ConfidenceNLLLoss'
```
#### File: stereo/models/DeepPruner.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from dmb.modeling.stereo.backbones import build_backbone
from dmb.modeling.stereo.disp_samplers import build_disp_sampler
from dmb.modeling.stereo.cost_processors import build_cost_processor
from dmb.modeling.stereo.disp_refinement import build_disp_refinement
from dmb.modeling.stereo.losses import make_gsm_loss_evaluator
from dmb.modeling.stereo.losses.utils.quantile_loss import quantile_loss
class DeepPruner(nn.Module):
"""
DeepPruner: Learning Efficient Stereo Matching via Differentiable PatchMatch
Default maximum down-sample scale is 4. 8 is also optional.
"""
def __init__(self, cfg):
super(DeepPruner, self).__init__()
self.cfg = cfg.copy()
self.max_disp = cfg.model.max_disp
self.backbone = build_backbone(cfg)
self.disp_sampler = build_disp_sampler(cfg)
self.cost_processor = build_cost_processor(cfg)
self.disp_refinement = build_disp_refinement(cfg)
# make general stereo matching loss evaluator
self.loss_evaluator = make_gsm_loss_evaluator(cfg)
def forward(self, batch):
# parse batch
# [B, 3, H, W]
ref_img, tgt_img = batch['leftImage'], batch['rightImage']
target = batch['leftDisp'] if 'leftDisp' in batch else None
# extract image feature
ref_group_fms, tgt_group_fms = self.backbone(ref_img, tgt_img)
# for scale=4, [B, 32, H//4, W//4], [[B, 32, H//2, W//2]]
# for scale=8, [B, 32, H//8, W//8], [[B, 32, H//4, W//4], [B, 32, H//2, W//2]]
ref_fms, low_ref_group_fms = ref_group_fms
tgt_fms, low_tgt_group_fms = tgt_group_fms
# compute cost volume
# "pre"(Pre-PatchMatch) using patch match as sampler
# [B, patch_match_disparity_sample_number, H//4, W//4]
disparity_sample = self.disp_sampler(stage='pre', left=ref_fms, right=tgt_fms)
output = self.cost_processor(stage='pre', left=ref_fms, right=tgt_fms,
disparity_sample=disparity_sample)
# [B, 1, H//4, W//4], [B, patch_match_disparity_sample_number, H//4, W//4]
min_disparity, max_disparity, min_disparity_feature, max_disparity_feature = output
# "post"(Post-ConfidenceRangePredictor) using uniform sampler
# [B, uniform_disparity_sample_number, H//4, W//4]
disparity_sample = self.disp_sampler(stage='post', left=ref_fms, right=tgt_fms,
min_disparity=min_disparity, max_disparity=max_disparity)
output = self.cost_processor(stage='post', left=ref_fms, right=tgt_fms,
disparity_sample=disparity_sample,
min_disparity_feature=min_disparity_feature,
max_disparity_feature=max_disparity_feature)
# [B, 1, H//2, W//2], [B, uniform_disparity_sample_number, H//2, W//2]
disparity, disparity_feature = output
disps = [disparity]
# for the first refinement stage,
# the guide feature maps also including disparity feature
low_ref_group_fms[0] = torch.cat((low_ref_group_fms[0], disparity_feature), dim=1)
disps = self.disp_refinement(disps, low_ref_group_fms)
# up-sample all disparity map to full resolution
H, W = ref_img.shape[-2:]
disparity = F.interpolate(disparity * W / disparity.shape[-1], size=(H, W),
mode='bilinear',
align_corners=False)
min_disparity = F.interpolate(min_disparity * W / min_disparity.shape[-1], size=(H, W),
mode='bilinear',
align_corners=False)
max_disparity =F.interpolate(max_disparity * W / max_disparity.shape[-1], size=(H, W),
mode='bilinear',
align_corners=False)
disps.extend([min_disparity, max_disparity])
disps = [F.interpolate(d * W / d.shape[-1], size=(H, W), mode='bilinear', align_corners=False) for d in disps]
costs = [None]
if self.training:
loss_dict = dict(
quantile_loss=quantile_loss(min_disparity, max_disparity, target,
**self.cfg.model.losses.quantile_loss.copy()),
)
loss_args = dict(
variance = None,
)
gsm_loss_dict = self.loss_evaluator(disps, costs, target, **loss_args)
loss_dict.update(gsm_loss_dict)
return {}, loss_dict
else:
# visualize the residual between min, max and the true disparity map
disps.extend([abs(disparity - min_disparity), abs(max_disparity - disparity)])
results = dict(
disps=disps,
costs=costs,
)
return results, {}
```
#### File: ops/libGANet/function.py
```python
import torch
from torch.autograd import Function
try:
from .build.lib import GANet
except ImportError:
import GANet
class SgaFunction(Function):
def __init__(self):
self.wsize = 5
def forward(self, input, g0, g1, g2, g3):
self.input = input
self.g0 = g0
self.g1 = g1
self.g2 = g2
self.g3 = g3
assert (
input.is_contiguous() == True and g0.is_contiguous() == True and g1.is_contiguous() == True and g2.is_contiguous() == True and g3.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, depth, height, width = input.size()
output = input.new().resize_(num, channels, depth, height, width).zero_()
temp_out = input.new().resize_(num, channels, depth, height, width).zero_()
mask = input.new().resize_(num, channels, depth, height, width).zero_()
GANet.sga_cuda_forward(input, g0, g1, g2, g3, temp_out, output, mask)
output = output.contiguous()
self.save_for_backward(temp_out, mask)
return output
def backward(self, gradOutput):
temp_out, mask = self.saved_variables
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, depth, height, width = self.input.size()
_, _, fsize, _, _ = self.g0.size()
gradInput = gradOutput.new().resize_(num, channels, depth, height, width).zero_()
grad0 = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
grad1 = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
grad2 = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
grad3 = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
temp_grad = gradOutput.new().resize_(num, channels, depth, height, width).zero_()
max_idx = gradOutput.new().resize_(num, channels, height, width).zero_()
GANet.sga_cuda_backward(self.input, self.g0, self.g1, self.g2, self.g3, temp_out, mask, max_idx, gradOutput,
temp_grad, gradInput, grad0, grad1, grad2, grad3)
gradInput = gradInput.contiguous()
grad0 = grad0.contiguous()
grad1 = grad1.contiguous()
grad2 = grad2.contiguous()
grad3 = grad3.contiguous()
return gradInput, grad0, grad1, grad2, grad3
class Lga3d3Function(Function):
def __init__(self, radius=1):
self.radius = radius
def forward(self, input, filters):
self.input = input
self.filters = filters
assert (input.is_contiguous() == True and filters.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, depth, height, width = input.size()
temp_out1 = input.new().resize_(num, channels, depth, height, width).zero_()
temp_out2 = input.new().resize_(num, channels, depth, height, width).zero_()
output = input.new().resize_(num, channels, depth, height, width).zero_()
GANet.lga3d_cuda_forward(input, filters, temp_out1, self.radius)
GANet.lga3d_cuda_forward(temp_out1, filters, temp_out2, self.radius)
GANet.lga3d_cuda_forward(temp_out2, filters, output, self.radius)
output = output.contiguous()
self.save_for_backward(temp_out1, temp_out2)
return output
def backward(self, gradOutput):
temp_out1, temp_out2 = self.saved_variables
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, depth, height, width = self.input.size()
_, _, fsize, _, _ = self.filters.size()
gradFilters = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
GANet.lga3d_cuda_backward(temp_out2, self.filters, gradOutput, temp_out2, gradFilters, self.radius)
GANet.lga3d_cuda_backward(temp_out1, self.filters, temp_out2, temp_out1, gradFilters, self.radius)
GANet.lga3d_cuda_backward(self.input, self.filters, temp_out1, temp_out2, gradFilters, self.radius)
temp_out2 = temp_out2.contiguous()
gradFilters = gradFilters.contiguous()
return temp_out2, gradFilters
class Lga3d2Function(Function):
def __init__(self, radius=1):
self.radius = radius
def forward(self, input, filters):
self.input = input
self.filters = filters
assert (input.is_contiguous() == True and filters.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, depth, height, width = input.size()
temp_out = input.new().resize_(num, channels, depth, height, width).zero_()
output = input.new().resize_(num, channels, depth, height, width).zero_()
GANet.lga3d_cuda_forward(input, filters, temp_out, self.radius)
GANet.lga3d_cuda_forward(temp_out, filters, output, self.radius)
output = output.contiguous()
self.save_for_backward(temp_out)
return output
def backward(self, gradOutput):
temp_out, = self.saved_variables
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, depth, height, width = self.input.size()
_, _, fsize, _, _ = self.filters.size()
gradFilters = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
GANet.lga3d_cuda_backward(temp_out, self.filters, gradOutput, temp_out, gradFilters, self.radius)
GANet.lga3d_cuda_backward(self.input, self.filters, temp_out, gradOutput, gradFilters, self.radius)
temp_out[...] = gradOutput[...]
temp_out = temp_out.contiguous()
gradFilters = gradFilters.contiguous()
return temp_out, gradFilters
class Lga3dFunction(Function):
def __init__(self, radius=2):
self.radius = radius
def forward(self, input, filters):
self.input = input
self.filters = filters
assert (input.is_contiguous() == True and filters.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, depth, height, width = input.size()
output = input.new().resize_(num, channels, depth, height, width).zero_()
GANet.lga3d_cuda_forward(input, filters, output, self.radius)
output = output.contiguous()
return output
def backward(self, gradOutput):
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, depth, height, width = self.input.size()
_, _, fsize, _, _ = self.filters.size()
gradInput = gradOutput.new().resize_(num, channels, depth, height, width).zero_()
gradFilters = gradOutput.new().resize_(num, channels, fsize, height, width).zero_()
GANet.lga3d_cuda_backward(self.input, self.filters, gradOutput, gradInput, gradFilters, self.radius)
gradInput = gradInput.contiguous()
gradFilters = gradFilters.contiguous()
return gradInput, gradFilters
class Lga3Function(Function):
def __init__(self, radius=1):
self.radius = radius
def forward(self, input, filters):
self.input = input
self.filters = filters
assert (input.is_contiguous() == True and filters.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, height, width = input.size()
temp_out1 = input.new().resize_(num, channels, height, width).zero_()
temp_out2 = input.new().resize_(num, channels, height, width).zero_()
output = input.new().resize_(num, channels, height, width).zero_()
GANet.lga_cuda_forward(input, filters, temp_out1, self.radius)
GANet.lga_cuda_forward(temp_out1, filters, temp_out2, self.radius)
GANet.lga_cuda_forward(temp_out2, filters, output, self.radius)
output = output.contiguous()
self.save_for_backward(temp_out1, temp_out2)
return output
def backward(self, gradOutput):
temp_out1, temp_out2 = self.saved_variables
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, height, width = self.input.size()
_, fsize, _, _ = self.filters.size()
gradFilters = gradOutput.new().resize_(num, fsize, height, width).zero_()
GANet.lga_cuda_backward(temp_out2, self.filters, gradOutput, temp_out2, gradFilters, self.radius)
GANet.lga_cuda_backward(temp_out1, self.filters, temp_out2, temp_out1, gradFilters, self.radius)
GANet.lga_cuda_backward(self.input, self.filters, temp_out1, temp_out2, gradFilters, self.radius)
temp_out2 = temp_out2.contiguous()
gradFilters = gradFilters.contiguous()
return temp_out2, gradFilters
class Lga2Function(Function):
def __init__(self, radius=1):
self.radius = radius
def forward(self, input, filters):
self.input = input
self.filters = filters
assert (input.is_contiguous() == True and filters.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, height, width = input.size()
temp_out = input.new().resize_(num, channels, height, width).zero_()
output = input.new().resize_(num, channels, height, width).zero_()
GANet.lga_cuda_forward(input, filters, temp_out, self.radius)
GANet.lga_cuda_forward(temp_out, filters, output, self.radius)
output = output.contiguous()
self.save_for_backward(temp_out)
return output
def backward(self, gradOutput):
temp_out, = self.saved_variables
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, height, width = self.input.size()
_, fsize, _, _ = self.filters.size()
gradFilters = gradOutput.new().resize_(num, fsize, height, width).zero_()
GANet.lga_cuda_backward(temp_out, self.filters, gradOutput, temp_out, gradFilters, self.radius)
GANet.lga_cuda_backward(self.input, self.filters, temp_out, gradOutput, gradFilters, self.radius)
temp_out[...] = gradOutput[...]
temp_out = temp_out.contiguous()
gradFilters = gradFilters.contiguous()
return temp_out, gradFilters
class LgaFunction(Function):
def __init__(self, radius=2):
self.radius = radius
def forward(self, input, filters):
self.input = input
self.filters = filters
assert (input.is_contiguous() == True and filters.is_contiguous() == True)
with torch.cuda.device_of(input):
num, channels, height, width = input.size()
output = input.new().resize_(num, channels, height, width).zero_()
GANet.lga_cuda_forward(input, filters, output, self.radius)
output = output.contiguous()
return output
def backward(self, gradOutput):
assert (gradOutput.is_contiguous() == True)
with torch.cuda.device_of(gradOutput):
num, channels, height, width = self.input.size()
_, fsize, _, _ = self.filters.size()
gradInput = gradOutput.new().resize_(num, channels, height, width).zero_()
gradFilters = gradOutput.new().resize_(num, fsize, height, width).zero_()
GANet.lga_cuda_backward(self.input, self.filters, gradOutput, gradInput, gradFilters, self.radius)
gradInput = gradInput.contiguous()
gradFilters = gradFilters.contiguous()
return gradInput, gradFilters
```
#### File: visualization/flow/show_result.py
```python
import matplotlib.pyplot as plt
from collections import abc as container_abcs
import numpy as np
import torch
from dmb.visualization.flow.vis import flow_to_color, tensor_to_color, flow_max_rad, chw_to_hwc, group_color
# Attention: in this framework, we always set the first result, e.g., flow map, as the best.
class ShowFlow(object):
"""
Show the result related to flow
Args:
result (dict): the result to show
Flow (list, tuple, Tensor): in [1, 2, H, W]
GroundTruth (torch.Tensor): in [1, 2, H, W]
leftImage (numpy.array): in [H, W, 3]
rightImage (numpy.array): in [H, W, 3]
Returns:
dict, mode in HWC is for save convenient, mode in CHW is for tensor-board convenient
GrayFlow (numpy.array): the original flow map output of network, will be saved to disk
in (H, W, 2) layout, value range [-inf, inf]
ColorFlow (numpy.array): the converted flow color map, will be saved to disk
in (H, W, 3) layout, value range [0,1]
GroupColor (numpy.array): in (H, W, 3) layout, value range [0, 1], will be saved to disk
Flow (list, tuple, numpy.array): the converted flow color map, will be showed on TensorBoard
in (3, H, W) layout, value range [0,1]
GroundTruth (numpy.array): in (3, H, W) layout, value range [0, 1], will be showed on TensorBoard
"""
def __call__(self, result):
self.result = result
self.getItem()
process_result = {}
if self.estFlow is not None:
firstFlow = self.getFirstItem(self.estFlow)
if firstFlow is not None:
# [H, W, 3], [H, W, 3]
grayFlow, colorFlow = self.get_gray_and_color_flow(firstFlow, self.max_rad)
process_result.update(GrayFlow=grayFlow)
process_result.update(ColorFlow=colorFlow)
# [H, W, 3]
group = self.vis_group_color(self.estFlow[0], self.gtFlow, self.leftImage, self.rightImage)
# [3, H, W]
estFlowColor = self.vis_per_flow(self.estFlow, self.max_rad)
process_result.update(Flow=estFlowColor)
process_result.update(GroupColor=group)
if self.gtFlow is not None:
# [3, H, W]
gtFlowColor = self.vis_per_flow(self.gtFlow, self.max_rad)
process_result.update(GroundTruth=gtFlowColor)
return process_result
def getItem(self):
if "GroundTruth" in self.result.keys() and self.result['GroundTruth'] is not None:
# [1, 2, H, W] -> [2, H, W]
self.gtFlow = self.result['GroundTruth'][0, :, :, :]
# [2, H, W] -> [H, W, 2] -> scalar
self.max_rad = flow_max_rad(chw_to_hwc(self.gtFlow))
else:
self.max_rad = None
self.gtFlow = None
if 'Flow' in self.result.keys():
if isinstance(self.result['Flow'], (list, tuple)):
self.estFlow = self.result['Flow']
else:
self.estFlow = [self.result['Flow']]
else:
self.estFlow = None
if 'leftImage' in self.result.keys():
self.leftImage = self.result['leftImage']
else:
self.leftImage = None
if 'rightImage' in self.result.keys():
self.rightImage = self.result['rightImage']
else:
self.rightImage = None
def getFirstItem(self, item):
if isinstance(item, container_abcs.Sequence):
return item[0]
if isinstance(item, container_abcs.Mapping):
for key in item.keys():
return item[key]
if isinstance(item, (np.ndarray, torch.Tensor)):
return item
return None
# For TensorBoard log flow map, [3, H, W]
def vis_per_flow(self, Flow, max_rad):
# change every flow map to color map
error_msg = "Flow must contain tensors, dicts or lists; found {}"
if isinstance(Flow, torch.Tensor):
return tensor_to_color(Flow.clone(), max_rad)
elif isinstance(Flow, container_abcs.Mapping):
return {key: self.vis_per_flow(Flow[key], max_rad) for key in Flow}
elif isinstance(Flow, container_abcs.Sequence):
return [self.vis_per_flow(samples, max_rad) for samples in Flow]
raise TypeError((error_msg.format(type(Flow))))
# For saving flow map, [C, H, W]
def get_gray_and_color_flow(self, Flow, max_rad=None):
assert isinstance(Flow, (np.ndarray, torch.Tensor))
if torch.is_tensor(Flow):
Flow = Flow.clone().detach().cpu()
if len(Flow.shape) == 4:
Flow = Flow[0, :, :, :]
# [2, H, W] -> [H, W, 2]
Flow = chw_to_hwc(Flow)
# [H, W, 2]
grayFlow = Flow.copy()
# [H, W, 3]
colorFlow = flow_to_color(Flow.copy(), max_rad=max_rad)
return grayFlow, colorFlow
def vis_group_color(self, estFlow, gtFlow=None, leftImage=None, rightImage=None, save_path=None):
"""
Args:
estFlow, (tensor or numpy.array): in (1, 2, Height, Width) or (2, Height, Width) layout
gtFlow, (None or tensor or numpy.array): in (1, 2, Height, Width) or (2, Height, Width) layout
leftImage, (None or numpy.array), in (Height, Width, 3) layout
rightImage, (None or numpy.array), in (Height, Width, 3) layout
save_path, (None or String)
Output:
details refer to dmb.visualization.group_color, (Height, Width, 3)
"""
assert isinstance(estFlow, (np.ndarray, torch.Tensor))
if torch.is_tensor(estFlow):
estFlow = estFlow.clone().detach().cpu().numpy()
if estFlow.ndim == 4:
estFlow = estFlow[0, :, :, :]
if gtFlow is not None:
assert isinstance(gtFlow, (np.ndarray, torch.Tensor))
if torch.is_tensor(gtFlow):
gtFlow = gtFlow.clone().detach().cpu().numpy()
if gtFlow.ndim == 4:
gtFlow = gtFlow[0, :, :, :]
# [2, H, W] -> [H, W, 2]
estFlow = chw_to_hwc(estFlow)
gtFlow = chw_to_hwc(gtFlow)
return group_color(estFlow, gtFlow, leftImage, rightImage, save_path)
class ShowResultTool(object):
def __init__(self):
self.show_flow_tool = ShowFlow()
def __call__(self, result):
process_result = {}
process_result.update(self.show_flow_tool(result))
return process_result
```
#### File: visualization/flow/vis_hooks.py
```python
import os
import os.path as osp
from collections import abc as container_abcs
import numpy as np
from imageio import imread
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
from torch.utils.data import Dataset
import mmcv
from mmcv import mkdir_or_exist
from mmcv.runner import Hook, obj_from_dict
from mmcv.runner import LogBuffer
from mmcv.parallel import scatter, collate
from dmb.visualization.flow.show_result import ShowResultTool
from dmb.data.datasets.evaluation.flow.eval import remove_padding
def to_cpu(tensor):
error_msg = "Tensor must contain tensors, dicts or lists; found {}"
if isinstance(tensor, torch.Tensor):
return tensor.detach().cpu()
elif isinstance(tensor, container_abcs.Mapping):
return {key: to_cpu(tensor[key]) for key in tensor}
elif isinstance(tensor, container_abcs.Sequence):
return [to_cpu(samples) for samples in tensor]
raise TypeError((error_msg.format(type(tensor))))
def prepare_visualize(result, epoch, work_dir, image_name):
result_tool = ShowResultTool()
result = result_tool(result)
mkdir_or_exist(os.path.join(work_dir, image_name))
save_path = os.path.join(work_dir, image_name, '{}.png'.format(epoch))
plt.imsave(save_path, result['GroupColor'], cmap=plt.cm.hot)
log_result = {}
for pred_item in result.keys():
log_name = image_name + '/' + pred_item
if pred_item == 'Flow':
log_result['image/' + log_name] = result[pred_item]
if pred_item == 'GroundTruth':
log_result['image/' + log_name] = result[pred_item]
return log_result
class DistVisHook(Hook):
def __init__(self, dataset, cfg, interval=1):
self.cfg = cfg.copy()
if isinstance(dataset, Dataset):
self.dataset = dataset
else:
raise TypeError("dataset must be a Dataset object, not {}".format(type(dataset)))
self.interval = interval
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.logger.info(
"Start Visualizing on {} dataset({} images).".format(self.dataset.name, len(self.dataset))
)
# get program bar
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
else:
prog_bar = None
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()]
)[0]
# compute output
with torch.no_grad():
ori_result, _ = runner.model(data_gpu)
# remove the padding when data augmentation
flows = ori_result['flows']
ori_size = data_gpu['original_size']
flows = remove_padding(flows, ori_size)
target = data_gpu['flow'] if 'flow' in data_gpu else None
if target is not None:
target = remove_padding(target, ori_size)
result = {
'Flow': flows,
'GroundTruth': target,
}
# convert result to suitable visualization image
item = self.dataset.data_list[idx]
result['leftImage'] = imread(
osp.join(self.cfg.data.vis.data_root, item['left_image_path'])
).astype(np.float32)
result['rightImage'] = imread(
osp.join(self.cfg.data.vis.data_root, item['right_image_path'])
).astype(np.float32)
image_name = item['left_image_path'].split('/')[-1]
result = prepare_visualize(result, runner.epoch + 1, self.cfg.work_dir, image_name)
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
dist.barrier()
for i in range(1, min(runner.world_size, len(self.dataset))):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.visualize(runner, results)
else:
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
dist.barrier()
torch.cuda.empty_cache()
def visualize(self, runner, results):
raise NotImplementedError
class DistFlowVisHook(DistVisHook):
# only log image
def visualize(self, runner, results):
for result in results:
if result is None:
continue
for key in result.keys():
runner.log_buffer.output[key] = result[key]
# runner.epoch start at 0
log_str = "Epoch [{}] Visualization Finished!".format(runner.epoch + 1)
runner.logger.info(log_str)
runner.log_buffer.ready = True
```
#### File: stereo/models/test_model.py
```python
import os
import sys
import torch
import torch.nn as nn
from thop import profile
from collections import Iterable
import time
import unittest
from dmb.modeling import build_model
from mmcv import Config
def clever_format(nums, format="%.2f"):
if not isinstance(nums, Iterable):
nums = [nums]
clever_nums = []
for num in nums:
if num > 1e12:
clever_nums.append(format % (num / 1e12) + "T")
elif num > 1e9:
clever_nums.append(format % (num / 1e9) + "G")
elif num > 1e6:
clever_nums.append(format % (num / 1e6) + "M")
elif num > 1e3:
clever_nums.append(format % (num / 1e3) + "K")
else:
clever_nums.append(format % num + "B")
clever_nums = clever_nums[0] if len(clever_nums) == 1 else (*clever_nums, )
return clever_nums
def calcFlops(model, input):
flops, params = profile(model, inputs=(input, ))
flops, params = clever_format([flops, params], "%.3f")
print('flops: {} \nparameters: {}'.format(flops, params))
return flops, params
class testModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.device = torch.device('cuda:2')
config_path = '/home/zhixiang/youmin/projects/depth/public/' \
'DenseMatchingBenchmark/configs/PSMNet/kitti_2015.py'
cls.cfg = Config.fromfile(config_path)
cls.model = build_model(cls.cfg)
cls.model.to(cls.device)
cls.setUpTimeTestingClass()
cls.avg_time = {}
@classmethod
def setUpTimeTestingClass(cls):
cls.iters = 50
h, w = 384, 1248
leftImage = torch.rand(1, 3, h, w).to(cls.device)
rightImage = torch.rand(1, 3, h, w).to(cls.device)
leftDisp = torch.rand(1, 1, h, w).to(cls.device)
batch = {'leftImage': leftImage,
'rightImage': rightImage,
'leftDisp': leftDisp, }
cls.model_input = {
'batch': batch
}
print('Input preparation successful!')
def timeTemplate(self, module, module_name, *args, **kwargs):
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
if isinstance(module, nn.Module):
module.eval()
start_time = time.time()
for i in range(self.iters):
with torch.no_grad():
if len(args) > 0:
module(*args)
if len(kwargs) > 0:
module(**kwargs)
torch.cuda.synchronize(self.device)
end_time = time.time()
avg_time = (end_time - start_time) / self.iters
print('{} reference forward once takes {:.4f}ms, i.e. {:.2f}fps'.format(module_name, avg_time*1000, (1 / avg_time)))
if isinstance(module, nn.Module):
module.train()
self.avg_time[module_name] = avg_time
# @unittest.skip("demonstrating skipping")
def test_2_OutputModel(self):
print('\n', '*'*40, 'Model Configuration Result', '*'*40)
print(self.model)
calcFlops(self.model, self.model_input['batch'])
# @unittest.skip("demonstrating skipping")
def test_3_ModelTime(self):
print('\n', '*'*40, 'Runtime Test Result', '*'*40)
self.timeTemplate(self.model, 'Model', **self.model_input)
# @unittest.skip("demonstrating skipping")
def test_0_TrainingPhase(self):
h, w = self.cfg.data.train.input_shape
leftImage = torch.rand(1, 3, h, w).to(self.device)
rightImage = torch.rand(1, 3, h, w).to(self.device)
leftDisp = torch.rand(1, 1, h, w).to(self.device)
batch = {'leftImage': leftImage,
'rightImage': rightImage,
'leftDisp': leftDisp,
}
self.model.train()
_, loss_dict = self.model(batch)
print('\n', '*'*40, 'Train Result', '*'*40)
for k, v in loss_dict.items():
print(k, v)
print(self.model.loss_evaluator.loss_evaluators)
if hasattr(self.cfg.model, 'cmn'):
print(self.model.cmn.loss_evaluator.loss_evaluators)
# @unittest.skip("demonstrating skipping")
def test_1_TestingPhase(self):
h, w = self.cfg.data.test.input_shape
leftImage = torch.rand(1, 3, h, w).to(self.device)
rightImage = torch.rand(1, 3, h, w).to(self.device)
leftDisp = torch.rand(1, 1, h, w).to(self.device)
batch = {'leftImage': leftImage,
'rightImage': rightImage,
'leftDisp': leftDisp,
}
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
self.model.eval()
with torch.no_grad():
result, _ = self.model(batch)
print('\n', '*'*40, 'Test Result', '*'*40)
print('Result for disparity:')
print('Length of disparity map list: ', len(result['disps']))
for i in range(len(result['disps'])):
d = result['disps'][i]
if d is not None and torch.is_tensor(d):
print('Disparity {} with shape: '.format(i), d.shape)
if 'costs' in result:
print('Result for Cost: ')
print('Length of cost list: ', len(result['costs']))
if result['costs'][0] is not None:
print(result['costs'][0].shape)
print('Device of cost: ', result['costs'][0].device)
if 'confs' in result:
print('Result for Confidence map')
print('Length of confidence list: ', len(result['confs']))
for i in range(len(result['confs'])):
conf = result['confs'][i]
if conf is not None and torch.is_tensor(conf):
print('Confidence {} with shape: '.format(i), conf.shape)
if __name__ == '__main__':
unittest.main()
```
#### File: tools/datasets/gen_sceneflow_anns.py
```python
import os
import numpy as np
import argparse
import os.path as osp
import json
from tqdm import tqdm
from mmcv import mkdir_or_exist
def getFlying3dMetas(root, Type, data_type='clean'):
Metas = []
imgDir = 'flyingthings3d/frames_' + data_type + 'pass'
dispDir = 'flyingthings3d/disparity'
Parts = ['A', 'B', 'C']
for Part in Parts:
partDir = osp.join(root, dispDir, Type, Part)
idxDirs = os.listdir(partDir)
for idxDir in idxDirs:
dispNames = os.listdir(osp.join(partDir, idxDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, Type, Part, idxDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, Type, Part, idxDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, Type, Part, idxDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, Type, Part, idxDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def getMonkaaMetas(root, data_type='clean'):
Metas = []
imgDir = 'Monkaa/frames_' + data_type + 'pass'
dispDir = 'Monkaa/disparity'
sceneDirs = os.listdir(osp.join(root, dispDir))
for sceneDir in sceneDirs:
dispNames = os.listdir(osp.join(root, dispDir, sceneDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, sceneDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, sceneDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, sceneDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, sceneDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def getDrivingMetas(root, data_type='clean'):
Metas = []
imgDir = 'driving/frames_' + data_type + 'pass'
dispDir = 'driving/disparity'
focalLengthDirs = os.listdir(osp.join(root, dispDir))
for focalLengthDir in focalLengthDirs:
wardDirs = os.listdir(osp.join(root, dispDir, focalLengthDir))
for wardDir in wardDirs:
speedDirs = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir))
for speedDir in speedDirs:
dispNames = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir, speedDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, focalLengthDir, wardDir, speedDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, focalLengthDir, wardDir, speedDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, focalLengthDir, wardDir, speedDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, focalLengthDir, wardDir, speedDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def build_annoFile(root, save_annotation_root, data_type='clean'):
"""
Build annotation files for Scene Flow Dataset.
Args:
root:
"""
# check existence
assert osp.exists(root), 'Path: {} not exists!'.format(root)
mkdir_or_exist(save_annotation_root)
trainMetas = getFlying3dMetas(root, 'TRAIN', data_type)
testMetas = getFlying3dMetas(root, 'TEST', data_type)
trainMetas.extend(getMonkaaMetas(root, data_type))
trainMetas.extend(getDrivingMetas(root, data_type))
for meta in tqdm(trainMetas):
for k, v in meta.items():
assert osp.exists(osp.join(root, v)), 'trainMetas:{} not exists'.format(v)
for meta in tqdm(testMetas):
for k, v in meta.items():
assert osp.exists(osp.join(root, v)), 'testMetas: {} not exists'.format(v)
info_str = 'SceneFlow Dataset contains:\n' \
' {:5d} training samples \n' \
' {:5d} validation samples'.format(len(trainMetas), len(testMetas))
print(info_str)
def make_json(name, metas):
filepath = osp.join(save_annotation_root, data_type + 'pass_' + name + '.json')
print('Save to {}'.format(filepath))
with open(file=filepath, mode='w') as fp:
json.dump(metas, fp=fp)
make_json(name='train', metas=trainMetas)
make_json(name='test', metas=testMetas)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SceneFlow Data PreProcess.")
parser.add_argument(
"--data-root",
default=None,
help="root of data",
type=str,
)
parser.add_argument(
"--save-annotation-root",
default='./',
help="save root of generated annotation file",
type=str,
)
parser.add_argument(
"--data-type",
default='clean',
help="the type of data, (clean or final)pass",
type=str,
)
args = parser.parse_args()
build_annoFile(args.data_root, args.save_annotation_root, args.data_type)
```
#### File: DenseMatchingBenchmark/tools/demo.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import os
import matplotlib.pyplot as plt
import mmcv
from dmb.apis.inference import init_model, inference_stereo, is_image_file
from dmb.visualization.stereo.vis import group_color
def visualize_disp(result_pkl):
ori_data = result_pkl['OriginalData']
net_result = result_pkl['Result']
if 'disps' in net_result:
disps = net_result['disps']
best_disp = disps[0][0, 0, :, :].cpu().numpy()
else:
return
plt.imshow(group_color(best_disp, ori_data['leftDisp'], ori_data['leftImage'], ori_data['rightImage']), cmap='hot')
plt.show()
if __name__ == '__main__':
print("Start Inference Stereo ... ")
parser = argparse.ArgumentParser("DenseMatchingBenchmark Inference")
parser.add_argument(
"--config-path",
type=str,
help="config file path, e.g., ../configs/AcfNet/scene_flow_adaptive.py",
required=True,
)
parser.add_argument(
"--checkpoint-path",
type=str,
help="path to checkpoint, checkpoint download link often given in ../configs/Model/ResultOfModel.md, "
"e.g., for AcfNet, you can find download link in ../configs/AcfNet/ResultOfAcfNet.md",
required=True,
)
parser.add_argument(
"--data-root",
type=str,
help="data root contains directories including: "
"$(data-root)/images/left/: (dir for left image)"
"$(data-root)/images/right/: (dir for right image)"
"$(data-root)/disparity/left/: (dir for disparity map of left image), optional"
"$(data-root)/disparity/right/: (dir for disparity map of right image), optional",
default='./demo_data/',
)
parser.add_argument(
"--device",
type=str,
help="device for running, e.g., cpu, cuda:0",
default="cuda:0"
)
parser.add_argument(
"--log-dir",
type=str,
help="directory path for logging",
default='./output/'
)
parser.add_argument(
"--pad-to-shape",
nargs="+",
type=int,
help="image shape after padding for inference, e.g., [544, 960],"
"after inference, result will crop to original image size",
default=None,
)
parser.add_argument(
"--crop-shape",
nargs="+",
type=int,
help="image shape after cropping for inference, e.g., [512, 960]",
default=None,
)
parser.add_argument(
"--scale-factor",
type=float,
help="the scale of image upsample/downsample you want to inference, e.g., 2.0 upsample 2x, 0.5 downsample to 0.5x",
default=1.0,
)
parser.add_argument(
"--disp-div-factor",
type=float,
help="if disparity map given, after reading the disparity map, often have to divide a scale to get the real disparity value, e.g. 256 in KITTI",
default=1.0,
)
args = parser.parse_args()
config_path = args.config_path
os.path.isfile(config_path)
checkpoint_path = args.checkpoint_path
os.path.isfile(checkpoint_path)
print("Start Preparing Data ... ")
data_root = args.data_root
os.path.exists(data_root)
imageNames = os.listdir(os.path.join(data_root, 'images/left/'))
imageNames = [name for name in imageNames if is_image_file(name)]
imageNames.sort()
assert len(imageNames) > 1, "No images found in {}".format(os.path.join(data_root, 'images/left/'))
batchesDict = []
disparity_suffix = None
if os.path.isdir(os.path.join(data_root, 'disparity/left')):
dispNames = os.listdir(os.path.join(data_root, 'disparity/left'))
disparity_suffix = {name.split('.')[-1] for name in dispNames}
for imageName in imageNames:
left_image_path = os.path.join(data_root, 'images/left/', imageName)
right_image_path = os.path.join(data_root, 'images/right/', imageName)
left_disp_map_path = None
right_disp_map_path = None
if disparity_suffix is not None:
for suf in disparity_suffix:
path = os.path.join(data_root, 'disparity/left', imageName.split('.')[0]+'.'+suf)
if os.path.isfile(path):
left_disp_map_path = path
right_disp_map_path = path.replace('disparity/left', 'disparity/right')
break
batchesDict.append({
'left_image_path': left_image_path,
'right_image_path': right_image_path,
'left_disp_map_path': left_disp_map_path,
'right_disp_map_path': right_disp_map_path,
})
print("Total {} images found".format(len(batchesDict)))
device = args.device
log_dir = args.log_dir
os.makedirs(log_dir, exist_ok=True)
print("Result will save to ", log_dir)
pad_to_shape = args.pad_to_shape
if pad_to_shape is not None:
print("Image will pad to shape: ", pad_to_shape)
crop_shape = args.crop_shape
if crop_shape is not None:
print("Image will crop to shape: ", crop_shape)
scale_factor = args.scale_factor
if scale_factor > 1.0:
print("Image will upsample: {:.2f} ".format(scale_factor))
elif scale_factor < 1.0:
print("Image will downsample: {:.2f} ".format(1.0/scale_factor))
disp_div_factor = args.disp_div_factor
print("If disparity map given, it will be divided by {:.2f} to get the real disparity value".format(disp_div_factor))
print("Initial Model ... ")
model = init_model(config_path, checkpoint_path, device)
print("Model initialed!")
print("Start Inference ... ")
inference_stereo(
model,
batchesDict,
log_dir,
pad_to_shape,
crop_shape,
scale_factor,
disp_div_factor,
device,
)
print("Inference Done!")
print("Start Visualization ... ")
for batch in batchesDict:
pkl_path = os.path.join(log_dir, batch['left_image_path'].split('/')[-1].split('.')[0], 'result.pkl')
print("Visualize ", pkl_path)
result_pkl = mmcv.load(pkl_path)
visualize_disp(result_pkl)
print("Done!")
``` |
{
"source": "jiaw-z/FCStereo",
"score": 2
} |
#### File: stereo/backbones/backbones.py
```python
from .GCNet import GCNetBackbone
from .PSMNet import PSMNetBackbone
from .StereoNet import StereoNetBackbone
from .DeepPruner import DeepPrunerBestBackbone, DeepPrunerFastBackbone
from .AnyNet import AnyNetBackbone
from .FC_PSMNet import FCPSMNetBackbone
BACKBONES = {
'GCNet': GCNetBackbone,
'PSMNet': PSMNetBackbone,
'StereoNet': StereoNetBackbone,
'BestDeepPruner': DeepPrunerBestBackbone,
'FastDeepPruner': DeepPrunerFastBackbone,
'AnyNet': AnyNetBackbone,
'FCPSMNet': FCPSMNetBackbone,
}
def build_backbone(cfg):
backbone_type = cfg.model.backbone.type
assert backbone_type in BACKBONES, \
"model backbone type not found, excepted: {}," \
"but got {}".format(BACKBONES.keys, backbone_type)
default_args = cfg.model.backbone.copy()
default_args.pop('type')
default_args.update(batch_norm=cfg.model.batch_norm)
backbone = BACKBONES[backbone_type](**default_args)
return backbone
```
#### File: stereo/layers/instance_whitening.py
```python
import torch
import torch.nn as nn
class InstanceWhitening(nn.Module):
def __init__(self, dim):
super(InstanceWhitening, self).__init__()
self.instance_standardization = nn.InstanceNorm2d(dim, affine=False)
def forward(self, x):
x = self.instance_standardization(x)
w = x.clone()
return x, w
def instance_whitening_loss(f_map, eye, mask_matrix, num_remove_cov):
f_cor, B = get_covariance_matrix(f_map, eye=eye)
f_cor_masked = f_cor * mask_matrix
off_diag_sum = torch.sum(torch.abs(f_cor_masked), dim=(1,2), keepdim=True) # B X 1 X 1
loss = torch.clamp(torch.div(off_diag_sum, num_remove_cov), min=0) # B X 1 X 1
loss = torch.sum(loss)
return loss
def get_covariance_matrix(f_map, eye=None):
eps = 1e-5
B, C, H, W = f_map.shape # i-th feature size (B X C X H X W)
HW = H * W
if eye is None:
eye = torch.eye(C).cuda()
f_map = f_map.contiguous().view(B, C, -1) # B X C X H X W > B X C X (H X W)
f_cor = torch.bmm(f_map, f_map.transpose(1, 2)).div(HW-1) + (eps * eye) # C X C / HW
return f_cor, B
def make_cov_index_matrix(dim): # make symmetric matrix for embedding index
matrix = torch.LongTensor()
s_index = 0
for i in range(dim):
matrix = torch.cat([matrix, torch.arange(s_index, s_index + dim).unsqueeze(0)], dim=0)
s_index += (dim - (2 + i))
return matrix.triu(diagonal=1).transpose(0, 1) + matrix.triu(diagonal=1)
```
#### File: dmb/utils/tensorboard_logger.py
```python
import os.path as osp
import numpy as np
import torch
from mmcv.runner import LoggerHook, master_only
class TensorboardLoggerHook(LoggerHook):
"""
Hook for starting a tensor-board logger.
Args:
log_dir (str or Path): dir to save logger file.
interval (int): logging interval, default is 10
ignore_last:
reset_flag:
register_logWithIter_keyword:
"""
def __init__(
self,
log_dir=None,
interval=10,
ignore_last=True,
reset_flag=True,
register_logWithIter_keyword=None,
):
super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
reset_flag)
self.log_dir = log_dir
self.register_logWithIter_keyword = register_logWithIter_keyword
@master_only
def before_run(self, runner):
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorflow and tensorboardX '
'to use TensorboardLoggerHook.')
else:
if self.log_dir is None:
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
@master_only
def single_log(self, tag, record, global_step):
# self-defined, in format: prefix/suffix_tag
prefix = tag.split('/')[0]
suffix_tag = '/'.join(tag.split('/')[1:])
if prefix == 'image':
self.writer.add_image(suffix_tag, record, global_step)
return
if prefix == 'figure':
self.writer.add_figure(suffix_tag, record, global_step)
return
if prefix == 'histogram':
self.writer.add_histogram(suffix_tag, record, global_step)
return
if prefix == 'scalar':
self.writer.add_scalar(suffix_tag, record, global_step)
return
if isinstance(record, str):
self.writer.add_text(tag, record, global_step)
return
if torch.is_tensor(record):
self.writer.add_scalar(tag, record, global_step)
return
if record.size > 1:
self.writer.add_image(tag, record, global_step)
else:
self.writer.add_scalar(tag, record, global_step)
@master_only
def log(self, runner):
for var in runner.log_buffer.output:
if var in ['time', 'data_time']:
continue
tag = var
record = runner.log_buffer.output[var]
global_step = runner.epoch
# for example, loss will be log as iteration
if isinstance(self.register_logWithIter_keyword, (tuple, list)):
for keyword in self.register_logWithIter_keyword:
if var.find(keyword) > -1:
global_step = runner.iter
global_step = global_step + 1
if isinstance(record, (list, tuple)):
for idx, rec in enumerate(record):
tag = var + '/' + '{}'.format(idx)
self.single_log(tag, rec, global_step)
else:
self.single_log(tag, record, global_step)
@master_only
def after_run(self, runner):
self.writer.close()
``` |
{
"source": "JiaxiangBU/Keras-BiGAN",
"score": 2
} |
#### File: JiaxiangBU/Keras-BiGAN/bigan.py
```python
from PIL import Image
from math import floor
import numpy as np
import time
from functools import partial
from random import random
import os
im_size = 128
latent_size = 64
BATCH_SIZE = 32
directory = "Faces256"
suff = 'jpg'
cmode = 'RGB'
channels = 3
size_adjusted = False
k_images = 3
cha = 16
def noise(n):
return np.random.normal(0.0, 1.0, size = [n, latent_size])
class dataGenerator(object):
def __init__(self, loc, flip = True, suffix = 'png'):
self.flip = flip
self.suffix = suffix
self.files = []
self.n = 1e10
print("Importing Images...")
try:
os.mkdir("data/" + loc + "-npy-" + str(im_size))
except:
self.load_from_npy(loc)
return
for dirpath, dirnames, filenames in os.walk("data/" + loc):
for filename in [f for f in filenames if f.endswith("."+str(self.suffix))]:
print('\r' + str(len(self.files)), end = '\r')
fname = os.path.join(dirpath, filename)
temp = Image.open(fname).convert(cmode)
if not size_adjusted:
temp = temp.resize((im_size, im_size), Image.BILINEAR)
temp = np.array(temp, dtype='uint8')
self.files.append(temp)
if self.flip:
self.files.append(np.flip(temp, 1))
self.files = np.array(self.files)
np.save("data/" + loc + "-npy-" + str(im_size) + "/data.npy", self.files)
self.n = self.files.shape[0]
print("Found " + str(self.n) + " images in " + loc + ".")
def load_from_npy(self, loc):
print("Loading from .npy files.")
self.files = np.load("data/" + str(loc) + "-npy-" + str(im_size) + "/data.npy")
self.n = self.files.shape[0]
def get_batch(self, num):
idx = np.random.randint(0, self.n - 200, num)
out = []
for i in range(num):
out.append(self.files[idx[i]])
return np.array(out).astype('float32') / 255.0
def get_test_batch(self, num):
idx = np.random.randint(self.n - 200, self.n, num)
out = []
for i in range(num):
out.append(self.files[idx[i]])
return np.array(out).astype('float32') / 255.0
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r %s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
print()
from keras.layers import Conv2D, Dense, AveragePooling2D, Activation, Cropping2D, Dropout, BatchNormalization
from keras.layers import Reshape, UpSampling2D, Flatten, Input, add, Lambda, concatenate, LeakyReLU, multiply
from keras.layers import GlobalAveragePooling2D, average
from keras.models import model_from_json, Model
from keras.initializers import VarianceScaling
from keras.optimizers import Adam
import keras.backend as K
def gradient_penalty_loss(y_true, y_pred, averaged_samples, weight):
gradients = K.gradients(y_pred, averaged_samples)[0]
gradients_sqr = K.square(gradients)
gradient_penalty = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# (weight / 2) * ||grad||^2
# Penalize the gradient norm
return K.mean(gradient_penalty) * (weight / 2)
def hinge_d(y_true, y_pred):
return K.mean(K.relu(1.0 - (y_true * y_pred)))
def w_loss(y_true, y_pred):
return K.mean(y_true * y_pred)
def g_block(inp, fil, u = True):
if u:
out = UpSampling2D(interpolation = 'bilinear')(inp)
else:
out = Activation('linear')(inp)
skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)
out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)
out = add([out, skip])
out = LeakyReLU(0.2)(out)
return out
def d_block(inp, fil, p = True):
skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp)
out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp)
out = LeakyReLU(0.2)(out)
out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
out = LeakyReLU(0.2)(out)
out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)
out = add([out, skip])
out = LeakyReLU(0.2)(out)
if p:
out = AveragePooling2D()(out)
return out
class GAN(object):
def __init__(self, steps = 1, lr = 0.0001, decay = 0.00001):
#Models
self.D = None
self.E = None
self.G = None
self.GE = None
self.EE = None
self.DM = None
self.AM = None
#Config
self.LR = lr
self.steps = steps
self.beta = 0.999
#Init Models
self.discriminator()
self.generator()
self.encoder()
self.EE = model_from_json(self.E.to_json())
self.EE.set_weights(self.E.get_weights())
self.GE = model_from_json(self.G.to_json())
self.GE.set_weights(self.G.get_weights())
def discriminator(self):
if self.D:
return self.D
inp = Input(shape = [im_size, im_size, 3])
inpl = Input(shape = [latent_size])
#Latent input
l = Dense(512, kernel_initializer = 'he_normal')(inpl)
l = LeakyReLU(0.2)(l)
l = Dense(512, kernel_initializer = 'he_normal')(l)
l = LeakyReLU(0.2)(l)
l = Dense(512, kernel_initializer = 'he_normal')(l)
l = LeakyReLU(0.2)(l)
x = d_block(inp, 1 * cha) #64
x = d_block(x, 2 * cha) #32
x = d_block(x, 3 * cha) #16
x = d_block(x, 4 * cha) #8
x = d_block(x, 8 * cha) #4
x = d_block(x, 16 * cha, p = False) #4
x = Flatten()(x)
x = concatenate([x, l])
x = Dense(16 * cha, kernel_initializer = 'he_normal')(x)
x = LeakyReLU(0.2)(x)
x = Dense(1, kernel_initializer = 'he_normal')(x)
self.D = Model(inputs = [inp, inpl], outputs = x)
return self.D
def generator(self):
if self.G:
return self.G
#Inputs
inp = Input(shape = [latent_size])
#Latent
#Actual Model
x = Dense(4*4*16*cha, kernel_initializer = 'he_normal')(inp)
x = Reshape([4, 4, 16*cha])(x)
x = g_block(x, 16 * cha, u = False) #4
x = g_block(x, 8 * cha) #8
x = g_block(x, 4 * cha) #16
x = g_block(x, 3 * cha) #32
x = g_block(x, 2 * cha) #64
x = g_block(x, 1 * cha) #128
x = Conv2D(filters = 3, kernel_size = 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(x)
self.G = Model(inputs = inp, outputs = x)
return self.G
def encoder(self):
if self.E:
return self.E
inp = Input(shape = [im_size, im_size, 3])
x = d_block(inp, 1 * cha) #64
x = d_block(x, 2 * cha) #32
x = d_block(x, 3 * cha) #16
x = d_block(x, 4 * cha) #8
x = d_block(x, 8 * cha) #4
x = d_block(x, 16 * cha, p = False) #4
x = Flatten()(x)
x = Dense(16 * cha, kernel_initializer = 'he_normal')(x)
x = LeakyReLU(0.2)(x)
x = Dense(latent_size, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x)
self.E = Model(inputs = inp, outputs = x)
return self.E
def AdModel(self):
#D does not update
self.D.trainable = False
for layer in self.D.layers:
layer.trainable = False
#G does update
self.G.trainable = True
for layer in self.G.layers:
layer.trainable = True
#E does update
self.E.trainable = True
for layer in self.E.layers:
layer.trainable = True
# Fake Latent / Real Image
ri = Input(shape = [im_size, im_size, 3])
er = self.E(ri)
dr = self.D([ri, er])
# Real Latent / Fake Image
gi = Input(shape = [latent_size])
gf = self.G(gi)
df = self.D([gf, gi])
self.AM = Model(inputs = [ri, gi], outputs = [dr, df])
self.AM.compile(optimizer = Adam(self.LR, beta_1 = 0, beta_2 = 0.099), loss = [w_loss, w_loss])
return self.AM
def DisModel(self):
#D does update
self.D.trainable = True
for layer in self.D.layers:
layer.trainable = True
#G does not update
self.G.trainable = False
for layer in self.G.layers:
layer.trainable = False
#E does update
self.E.trainable = False
for layer in self.E.layers:
layer.trainable = False
# Fake Latent / Real Image
ri = Input(shape = [im_size, im_size, 3])
er = self.E(ri)
dr = self.D([ri, er])
# Real Latent / Fake Image
gi = Input(shape = [latent_size])
gf = self.G(gi)
df = self.D([gf, gi])
self.DM = Model(inputs = [ri, gi], outputs = [dr, df, df])
# Create partial of gradient penalty loss
# For r1, averaged_samples = ri
# For r2, averaged_samples = gf
# Weight of 10 typically works
partial_gp_loss = partial(gradient_penalty_loss, averaged_samples = [gf, gi], weight = 5)
#Compile With Corresponding Loss Functions
self.DM.compile(optimizer = Adam(self.LR, beta_1 = 0, beta_2 = 0.909), loss=[hinge_d, hinge_d, partial_gp_loss])
return self.DM
def EMA(self):
start = time.clock()
for i in range(len(self.G.layers)):
up_weight = self.G.layers[i].get_weights()
old_weight = self.GE.layers[i].get_weights()
new_weight = []
for j in range(len(up_weight)):
new_weight.append(old_weight[j] * self.beta + (1-self.beta) * up_weight[j])
self.GE.layers[i].set_weights(new_weight)
for i in range(len(self.E.layers)):
up_weight = self.E.layers[i].get_weights()
old_weight = self.EE.layers[i].get_weights()
new_weight = []
for j in range(len(up_weight)):
new_weight.append(old_weight[j] * self.beta + (1-self.beta) * up_weight[j])
self.EE.layers[i].set_weights(new_weight)
#print("Moved Average. " + str(time.clock() - start) + "s")
def MAinit(self):
self.EE.set_weights(self.E.get_weights())
self.GE.set_weights(self.G.get_weights())
class BiGAN(object):
def __init__(self, steps = 1, lr = 0.0001, decay = 0.00001, silent = True):
self.GAN = GAN(steps = steps, lr = lr, decay = decay)
self.DisModel = self.GAN.DisModel()
self.AdModel = self.GAN.AdModel()
self.lastblip = time.clock()
self.noise_level = 0
self.im = dataGenerator(directory, suffix = suff, flip = True)
self.silent = silent
#Train Generator to be in the middle, not all the way at real. Apparently works better??
self.ones = np.ones((BATCH_SIZE, 1), dtype=np.float32)
self.zeros = np.zeros((BATCH_SIZE, 1), dtype=np.float32)
self.nones = -self.ones
def train(self):
#Train Alternating
a = self.train_dis()
b = self.train_gen()
if self.GAN.steps % 10 == 0:
self.GAN.EMA()
if self.GAN.steps == 20000:
self.GAN.MAinit()
#Print info
if self.GAN.steps % 100 == 0 and not self.silent:
print("\n\nRound " + str(self.GAN.steps) + ":")
print("D: " + str(a))
print("G: " + str(b))
s = round((time.clock() - self.lastblip), 4)
steps_per_second = 100 / s
steps_per_minute = steps_per_second * 60
steps_per_hour = steps_per_minute * 60
print("Steps/Second: " + str(round(steps_per_second, 2)))
print("Steps/Hour: " + str(round(steps_per_hour)))
min1k = floor(1000/steps_per_minute)
sec1k = floor(1000/steps_per_second) % 60
print("1k Steps: " + str(min1k) + ":" + str(sec1k))
self.lastblip = time.clock()
steps_left = 200000 - self.GAN.steps + 1e-7
hours_left = steps_left // steps_per_hour
minutes_left = (steps_left // steps_per_minute) % 60
print("Til Completion: " + str(int(hours_left)) + "h" + str(int(minutes_left)) + "m")
print()
#Save Model
if self.GAN.steps % 500 == 0:
self.save(floor(self.GAN.steps / 10000))
if self.GAN.steps % 1000 == 0 or (self.GAN.steps % 100 == 0 and self.GAN.steps < 1000):
self.evaluate(floor(self.GAN.steps / 1000))
printProgressBar(self.GAN.steps % 100, 99, decimals = 0)
self.GAN.steps = self.GAN.steps + 1
def train_dis(self):
#Get Data
train_data = [self.im.get_batch(BATCH_SIZE), noise(BATCH_SIZE)]
#Train
d_loss = self.DisModel.train_on_batch(train_data, [self.ones, self.nones, self.ones])
return d_loss
def train_gen(self):
#Train
train_data = [self.im.get_batch(BATCH_SIZE), noise(BATCH_SIZE)]
g_loss = self.AdModel.train_on_batch(train_data, [self.ones, self.nones])
return g_loss
def evaluate(self, num = 0):
n1 = noise(32)
generated_images = self.GAN.G.predict(n1, batch_size = BATCH_SIZE)
real_images = self.im.get_test_batch(16)
latent_codes = self.GAN.E.predict(real_images, batch_size = BATCH_SIZE)
reconstructed_images = self.GAN.G.predict(latent_codes, batch_size = BATCH_SIZE)
print("E Mean: " + str(np.mean(latent_codes)))
print("E Std: " + str(np.std(latent_codes)))
print("E Std Featurewise: " + str(np.mean(np.std(latent_codes, axis = 0))))
print()
r = []
for i in range(0, 32, 8):
r.append(np.concatenate(generated_images[i:i+8], axis = 1))
hline = np.zeros([16, 8 * im_size, 3])
r.append(hline)
for i in range(0, 16, 8):
r.append(np.concatenate(real_images[i:i+8], axis = 1))
r.append(np.concatenate(reconstructed_images[i:i+8], axis = 1))
c1 = np.concatenate(r, axis = 0)
x = Image.fromarray(np.uint8(c1*255))
x.save("Results/i"+str(num)+".png")
# Moving Average
n1 = noise(32)
generated_images = self.GAN.GE.predict(n1, batch_size = BATCH_SIZE)
latent_codes = self.GAN.EE.predict(real_images, batch_size = BATCH_SIZE)
reconstructed_images = self.GAN.GE.predict(latent_codes, batch_size = BATCH_SIZE)
r = []
for i in range(0, 32, 8):
r.append(np.concatenate(generated_images[i:i+8], axis = 1))
hline = np.zeros([16, 8 * im_size, 3])
r.append(hline)
for i in range(0, 16, 8):
r.append(np.concatenate(real_images[i:i+8], axis = 1))
r.append(np.concatenate(reconstructed_images[i:i+8], axis = 1))
c1 = np.concatenate(r, axis = 0)
x = Image.fromarray(np.uint8(c1*255))
x.save("Results/i"+str(num)+"-ema.png")
def prepareSamples(self, cnum = 0, num = 1000): #8x8 images, bottom row is constant
try:
os.mkdir("Results/Samples-c" + str(cnum))
except:
x = 0
im = self.im.get_class(cnum)
e = self.GAN.E.predict(im, batch_size = BATCH_SIZE * k_images)
mean = np.mean(e, axis = 0)
std = np.std(e, axis = 0)
n = noise(num)
nc = nClass(num, mean, std)
im = self.GAN.G.predict([n, nc], batch_size = BATCH_SIZE)
for i in range(im.shape[0]):
x = Image.fromarray(np.uint8(im[i]*255), mode = 'RGB')
x.save("Results/Samples-c" + str(cnum) + "/im ("+str(i+1)+").png")
def saveModel(self, model, name, num):
json = model.to_json()
with open("Models/"+name+".json", "w") as json_file:
json_file.write(json)
model.save_weights("Models/"+name+"_"+str(num)+".h5")
def loadModel(self, name, num):
file = open("Models/"+name+".json", 'r')
json = file.read()
file.close()
mod = model_from_json(json)
mod.load_weights("Models/"+name+"_"+str(num)+".h5")
return mod
def save(self, num): #Save JSON and Weights into /Models/
self.saveModel(self.GAN.G, "gen", num)
self.saveModel(self.GAN.D, "dis", num)
self.saveModel(self.GAN.E, "enc", num)
self.saveModel(self.GAN.GE, "genMA", num)
self.saveModel(self.GAN.EE, "encMA", num)
def load(self, num): #Load JSON and Weights from /Models/
steps1 = self.GAN.steps
#Load Models
self.GAN.G = self.loadModel("gen", num)
self.GAN.D = self.loadModel("dis", num)
self.GAN.E = self.loadModel("enc", num)
self.GAN.GE = self.loadModel("genMA", num)
self.GAN.EE = self.loadModel("encMA", num)
self.GAN.steps = steps1
self.DisModel = self.GAN.DisModel()
self.AdModel = self.GAN.AdModel()
if __name__ == "__main__":
model = BiGAN(lr = 0.0001, silent = False)
model.evaluate(0)
while model.GAN.steps <= 600000:
model.train()
``` |
{
"source": "JiaxiangBU/SMARTINVEST-MACHINE-LEARNING-AND-PRODUCTIVITY-",
"score": 2
} |
#### File: JiaxiangBU/SMARTINVEST-MACHINE-LEARNING-AND-PRODUCTIVITY-/final_code.py
```python
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
# Importing the Keras libraries
from keras.layers import Dropout
from keras.layers.core import Activation
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import load_model
from keras.models import model_from_json
#Set the working directory to the folder data
# =============================================================================
# Part 1: Building model
# =============================================================================
# Importing the dataset
def read_data():
y = pd.read_csv('Classes1000.csv' , sep = ';')
X = pd.read_csv('portf1000.csv' , sep = ';')
X = X.iloc[:, 1:].values
y = y.iloc[:, 2].values
y = y-1
return X, y
# Tranforming y to the same length of X
def data_same_length(X, y, nb_of_portfolios, size_of_portfolio):
y_portfolios = []
for i in range(nb_of_portfolios):
for j in range(size_of_portfolio):
y_portfolios.append(y[i,])
return y_portfolios
# Split data into train and test
def split_train_test(grou, train_size,
portfolio_size,
nb_of_features,
nb_of_portfolios):
X_train=[]
X_test=[]
y_train=[]
y_test=[]
for k in range(nb_of_portfolios):
if k < int(train_size * nb_of_portfolios):
X_train.append(grou[k, 0:portfolio_size, 0:nb_of_features])
y_train.append(grou[k, 0:1, nb_of_features:nb_of_features+1])
else:
X_test.append(grou[k, 0:portfolio_size, 0:nb_of_features])
y_test.append(grou[k, 0:1, nb_of_features:nb_of_features+1])
X_test = np.array(X_test)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_test = np.array(y_test)
return X_test, X_train, y_train, y_test
# Encoding categorical data
def transform_categorical_variables(data):
labelencoder_X_1 = LabelEncoder()
data = labelencoder_X_1.fit_transform(data)
labelencoder_X_2 = LabelEncoder()
data = data.reshape(len(data),1)
onehotencoder = OneHotEncoder(categorical_features = [0])
data = onehotencoder.fit_transform(data).toarray()
return data
# Convolutional NN architecture
def convolutional_NN_model(X_train, X_test, y_train,
y_test,
batch_size,
nb_epoch):
# Step1 Convolutional layer
classifier = Sequential()
classifier.add(Conv2D(32, (1, 3), input_shape = (1, 20, 38),
activation = 'tanh'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (1, 3)))
# Step 2(b) - Add 2nd Convolution Layer making it Deep followed by a Pooling Layer
classifier.add(Conv2D(32, (1, 3), activation = 'tanh'))
classifier.add(MaxPooling2D(pool_size = (1, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Fully Connected Neural Network
# Hidden Layer - Activation Function RELU
classifier.add(Dense(units = 256, activation = 'tanh'))
# Output Layer - Activation Function Softmax(to clasify classes)
classifier.add(Dense(units = 2, activation = 'softmax'))
# Compile the CNN
# Binary Crossentropy - to classify between good and bad portfolios
classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy',
metrics = ['accuracy'])
classifier.fit(X_train, y_train,batch_size, nb_epoch,
validation_data=(X_test, y_test))
return classifier
# transforming 2d array into a list
def transform_matrix_tolist(y_test):
y_test_array = []
for f in range(len(y_test)):
if y_test[f][0] == 1:
y_test_array.append(0)
elif y_test[f][1] == 1:
y_test_array.append(1)
y_test_array = np.array(y_test_array)
return y_test_array
if __name__ == "__main__":
# Importing the dataset
X, y = read_data()
# Tranforming y to the same length of X
y_portfolios = data_same_length(X, y, nb_of_portfolios = 1000,
size_of_portfolio = 20)
# missing data
X = pd.DataFrame(X)
X = X.fillna(0)
# Concatenate X and y
y = pd.DataFrame(y)
y = np.array(y)
y_portfolios = np.array(y_portfolios)
y_portfolios = pd.DataFrame(y_portfolios)
grou = pd.concat((X,y_portfolios),axis=1)
grou = np.array(grou)
# Reshape grou into 3 dimensions tensor
grou = grou.reshape(y.shape[0], 20, 39)
# Split data into train and test
X_test, X_train, y_train, y_test = split_train_test(grou,
train_size = 0.8,
portfolio_size = 20,
nb_of_features = 38,
nb_of_portfolios = 1000)
# Reshape y
y_test = y_test.reshape(200,1)
y_train = y_train.reshape(800,1)
# Encoding categorical data
y_train = transform_categorical_variables(data = y_train)
y_test = transform_categorical_variables(y_test)
# Reshape into 4d tensors
X_train = np.reshape(X_train, (800, 1, 20, 38))
X_test = np.reshape(X_test, (200, 1, 20, 38))
#fit model
classifier = convolutional_NN_model(X_train, X_test, y_train, y_test, 1, 100)
# =============================================================================
# Part 2: Model evaluation
# =============================================================================
# Prediction
y_pred = classifier.predict_classes(X_test, batch_size = 1)
# Transforming 2d array into a list
y_test_array = transform_matrix_tolist(y_test)
# Confusion matrix
confusion_matrix(y_pred, y_test_array)
# Classification report
from sklearn.metrics import classification_report
target_names = ['good', 'bad']
print(classification_report(y_pred, y_test_array, target_names=target_names))
# =============================================================================
# Part 3: Model summary
# =============================================================================
# Model summary (architectures)
classifier.summary()
# model Configurations
classifier.get_config()
# Number of parameters in the model
classifier.count_params()
# Model weights
classifier.get_weights()
# =============================================================================
# Part 4: Save trained model parameters
# =============================================================================
## serialize weights to HDF5
#classifier.save_weights("pre_trained/final_model.h5")
#print("Saved model to disk")
#model_json = classifier.to_json()
#with open("pre_trained/final_model", "w") as json_file:
# json_file.write(model_json)
# =============================================================================
# Part5: Load Pre-trained model
# =============================================================================
# load json and create model
json_file = open('pre_trained/final_model', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("pre_trained/final_model.h5")
print("Loaded model from disk")
loaded_model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
loaded_model.fit(X_train, y_train,batch_size=1, nb_epoch=1,
validation_data=(X_test, y_test))
# evaluate loaded model on test data
score = loaded_model.evaluate(X_test, y_test, verbose=0)
print(score)
# =============================================================================
# Part 6:Confusion matrix
# =============================================================================
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test_array, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes= ("good", "bad"),
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=("good" "bad"), normalize=True,
title='Normalized confusion matrix')
plt.show()
``` |
{
"source": "jiaxiang-cheng/DeepSurv",
"score": 3
} |
#### File: jiaxiang-cheng/DeepSurv/func.py
```python
import os
import sys
import time
import numpy as np
import visualize
import utils
localtime = time.localtime()
TIMESTRING = time.strftime("%m%d%Y%M", localtime)
# matplotlib.use('Agg')
sys.path.append("/DeepSurv/deepsurv")
def evaluate_model(model, dataset, bootstrap=False):
"""
Calculate the Concordance Index with Confidence Interval
"""
# only applicable when ground-truth hazard ratio available
def mse(model):
def deepsurv_mse(x, hr, **kwargs):
hr_pred = np.squeeze(model.predict_risk(x))
return ((hr_pred - hr) ** 2).mean()
return deepsurv_mse
# calculate C-index
metrics = {'c_index': model.get_concordance_index(**dataset)}
# calculate C-index with bootstrap to get confidence interval
if bootstrap:
metrics['c_index_bootstrap'] = utils.bootstrap_metric(model.get_concordance_index, dataset)
# calculate MSE if ground-truth hazard ratio available
if 'hr' in dataset:
metrics['mse'] = mse(model)(**dataset)
if bootstrap:
metrics['mse_bootstrap'] = utils.bootstrap_metric(mse(model), dataset)
return metrics
def dataframe_to_deepsurv_ds(df, event_col='status', time_col='time'):
# Extract the event and time columns as numpy arrays
e = df[event_col].values.astype(np.int32)
t = df[time_col].values.astype(np.float32)
# Extract the patient's covariates as a numpy array
x_df = df.drop([event_col, time_col], axis=1)
x = x_df.values.astype(np.float32)
# Return the DeepSurv dataframe
return {'x': x, 'e': e, 't': t}
def save_risk_surface_visualizations(model, dataset, norm_vals, output_dir, plot_error, experiment, trt_idx):
if experiment == 'linear':
clim = (-3, 3)
elif experiment == 'gaussian' or experiment == 'treatment':
clim = (-1, 1)
else:
clim = (0, 1)
risk_fxn = lambda x: np.squeeze(model.predict_risk(x))
color_output_file = os.path.join(output_dir, "deep_viz_color_" + TIMESTRING + ".pdf")
visualize.plot_experiment_scatters(risk_fxn, dataset, norm_vals=norm_vals,
output_file=color_output_file, figsize=(4, 3), clim=clim,
plot_error=plot_error, trt_idx=trt_idx)
bw_output_file = os.path.join(output_dir, "deep_viz_bw_" + TIMESTRING + ".pdf")
visualize.plot_experiment_scatters(risk_fxn, dataset, norm_vals=norm_vals,
output_file=bw_output_file, figsize=(4, 3), clim=clim, cmap='gray',
plot_error=plot_error, trt_idx=trt_idx)
def save_treatment_rec_visualizations(model, dataset, output_dir, trt_i=1, trt_j=0, trt_idx=0):
trt_values = np.unique(dataset['x'][:, trt_idx])
print("Recommending treatments:", trt_values)
rec_trt = model.recommend_treatment(dataset['x'], trt_i, trt_j, trt_idx)
rec_trt = np.squeeze((rec_trt < 0).astype(np.int32))
rec_dict = utils.calculate_recs_and_antirecs(rec_trt, true_trt=trt_idx, dataset=dataset)
output_file = os.path.join(output_dir, '_'.join(['deepsurv', TIMESTRING, 'rec_surv.pdf']))
print(output_file)
visualize.plot_survival_curves(experiment_name='DeepSurv', output_file=output_file, **rec_dict)
def save_model(model, output_file):
model.save_weights(output_file)
``` |
{
"source": "jiaxiang-cheng/LSTM-for-RUL-Prediction",
"score": 3
} |
#### File: jiaxiang-cheng/LSTM-for-RUL-Prediction/loading_data.py
```python
import pandas as pd
def add_rul_1(df):
"""
:param df: raw data frame
:return: data frame labeled with targets
"""
# Get the total number of cycles for each unit
grouped_by_unit = df.groupby(by="unit_nr")
max_cycle = grouped_by_unit["time_cycles"].max()
# Merge the max cycle back into the original frame
result_frame = df.merge(max_cycle.to_frame(name='max_cycle'), left_on='unit_nr', right_index=True)
# Calculate remaining useful life for each row (piece-wise Linear)
remaining_useful_life = result_frame["max_cycle"] - result_frame["time_cycles"]
result_frame["RUL"] = remaining_useful_life
# drop max_cycle as it's no longer needed
result_frame = result_frame.drop("max_cycle", axis=1)
return result_frame
def load_FD001(cut):
"""
:param cut: upper limit for target RULs
:return: grouped data per sample
"""
# load data FD001.py
# define filepath to read data
dir_path = './CMAPSSData/'
# define column names for easy indexing
index_names = ['unit_nr', 'time_cycles']
setting_names = ['setting_1', 'setting_2', 'setting_3']
sensor_names = ['s_{}'.format(i) for i in range(1, 22)]
col_names = index_names + setting_names + sensor_names
# read data
train = pd.read_csv((dir_path + 'train_FD001.txt'), sep='\s+', header=None, names=col_names)
test = pd.read_csv((dir_path + 'test_FD001.txt'), sep='\s+', header=None, names=col_names)
y_test = pd.read_csv((dir_path + 'RUL_FD001.txt'), sep='\s+', header=None, names=['RUL'])
# drop non-informative features, derived from EDA
drop_sensors = ['s_1', 's_5', 's_10', 's_16', 's_18', 's_19']
drop_labels = setting_names + drop_sensors
train.drop(labels=drop_labels, axis=1, inplace=True)
title = train.iloc[:, 0:2]
data = train.iloc[:, 2:]
data_norm = (data - data.min()) / (data.max() - data.min()) # min-max normalization
# data_norm = (data-data.mean())/data.std() # standard normalization (optional)
train_norm = pd.concat([title, data_norm], axis=1)
train_norm = add_rul_1(train_norm)
# as in piece-wise linear function, there is an upper limit for target RUL,
# however, experimental results shows this goes even better without it:
# train_norm['RUL'].clip(upper=cut, inplace=True)
group = train_norm.groupby(by="unit_nr")
test.drop(labels=drop_labels, axis=1, inplace=True)
title = test.iloc[:, 0:2]
data = test.iloc[:, 2:]
data_norm = (data - data.min()) / (data.max() - data.min())
test_norm = pd.concat([title, data_norm], axis=1)
group_test = test_norm.groupby(by="unit_nr")
return group, group_test, y_test
``` |
{
"source": "jiaxiang-cheng/My-LeetCode",
"score": 4
} |
#### File: jiaxiang-cheng/My-LeetCode/mergeTwoLists.py
```python
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
result_list = ListNode(999) # Temp Node
result_list_head = result_list
while list1 and list2:
if list1.val <= list2.val:
result_list.next = list1
list1 = list1.next
else:
result_list.next = list2
list2 = list2.next
result_list = result_list.next
if list1 is not None:
result_list.next = list1
elif list2 is not None:
result_list.next = list2
return result_list_head.next
```
#### File: jiaxiang-cheng/My-LeetCode/reverseList.py
```python
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
rhead = None
while (head):
temp = head.next
head.next = rhead
rhead = head
head = temp
return rhead
```
#### File: jiaxiang-cheng/My-LeetCode/reverseWords.py
```python
class Solution:
def reverseWords(self, s: str) -> str:
word = ""
rs = ""
for i, j in enumerate(s):
if j != " ":
word += j
else:
end = int(len(word) / 2)
s = list(word)
for k in range(0, end):
if s[k] == s[-(1 + k)]:
continue
else:
temp = s[k]
s[k] = s[-(1 + k)]
s[-(1 + k)] = temp
rword = ''.join(s)
if rs != "":
rs += " "
rs += rword
else:
rs = rword
word = ""
end = int(len(word) / 2)
s = list(word)
for k in range(0, end):
if s[k] == s[-(1 + k)]:
continue
else:
temp = s[k]
s[k] = s[-(1 + k)]
s[-(1 + k)] = temp
rword = ''.join(s)
if rs != "":
rs += " "
rs += rword
else:
rs = rword
return rs
```
#### File: jiaxiang-cheng/My-LeetCode/rotate.py
```python
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
k %= n
tail = nums[- k:]
for i in range(-1, -n + k - 1, -1):
nums[i] = nums[i - k]
for i in range(k):
nums[i] = tail[i]
```
#### File: jiaxiang-cheng/My-LeetCode/updateMatrix.py
```python
class Solution:
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
distance = []
len_y = len(mat)
len_x = len(mat[0])
for i in range(len_y):
distance.append([0] * len_x)
# temp.append([0] * len_x)
for y in range(len_y):
for x in range(len_x):
if mat[y][x] == 0:
distance[y][x] = 0
else:
delta = 0
min_temp = 0
temp = []
for i in range(len_y):
# distance.append([0] * len_x)
temp.append([0] * len_x)
count = 0
count2 = 0
while count2 <= 2:
delta += 1
head_x = max(x - delta, 0)
tail_x = min(x + delta, len_x - 1)
head_y = max(y - delta, 0)
tail_y = min(y + delta, len_y - 1)
for m in range(head_x, tail_x + 1):
if mat[head_y][m] == 0:
temp[head_y][m] = abs(head_y - y) + abs(m - x)
if mat[tail_y][m] == 0:
temp[tail_y][m] = abs(tail_y - y) + abs(m - x)
for n in range(head_y, tail_y + 1):
if mat[n][head_x] == 0:
temp[n][head_x] = abs(head_x - x) + abs(n - y)
if mat[n][tail_x] == 0:
temp[n][tail_x] = abs(tail_x - x) + abs(n - y)
flag = 0
for i in range(len_y):
for j in range(len_x):
if temp[i][j] != 0 and flag == 0:
min_temp = temp[i][j]
flag = 1
if flag == 1 and temp[i][j] < min_temp and temp[i][j] != 0:
min_temp = temp[i][j]
if min_temp != 0:
count = 1
if count == 1:
count2 += 1
distance[y][x] = min_temp
return distance
``` |
{
"source": "jiaxiangc/mmdetection-mine",
"score": 3
} |
#### File: tools/dataset_converters/move_images.py
```python
import os
import shutil
def move_images(listdir, root, prefix, destination='val'):
to_path = os.path.join(root, destination)
if not os.path.exists(to_path):
os.mkdir(to_path)
for filename in listdir:
path = prefix + '/' + filename
shutil.move(path, to_path)
if __name__ == '__main__':
root = '../../data/sirst'
prefix = '../../data/sirst/images'
val_txt_path = '../../data/sirst/idx_427/val.txt'
test_txt_path = '../../data/sirst/idx_427/test.txt'
with open(val_txt_path, 'r') as f:
val_listdir = [x.strip() + '.png' for x in f.readlines()]
with open(test_txt_path, 'r') as f:
test_listdir = [x.strip() + '.png' for x in f.readlines()]
for listdir, destination in zip([val_listdir, test_listdir], ['val', 'test']):
move_images(listdir=listdir, root=root, prefix=prefix, destination=destination)
``` |
{
"source": "jiaxiangshang/nerf",
"score": 2
} |
#### File: nerf/load_data/load_blmvs.py
```python
from baselib_python.Geometry.Camera.np_rotation import get_eye, ext_to_rot_t, get_opengl_camAxis
from baselib_python.IO.BlendMVS import load_cam, parse_pair_txt
from tf_viewSyn.nerf.load_data.load_llff import *
def load_blmvs_data(scene='cube', basedir='/data/deepvoxels', interv=10, mask=False):
dic_base = '{}/{}/'.format(basedir, scene)
path_pair = os.path.join(dic_base, 'cams', 'pair.txt')
path_cam_pattern = os.path.join(dic_base, 'cams', '%.8d_cam.txt')
if mask:
pass
else:
path_img_pattern = os.path.join(dic_base, 'blended_images', '%.8d.jpg')
num_render = parse_pair_txt(path_pair)
list_c2w = []
list_cams = []
list_imgs = []
list_val_c2w = []
list_val_cams = []
list_val_imgs = []
for i in num_render:
cam = load_cam(path_cam_pattern%i)
img = imageio.imread(path_img_pattern%i) / 255.
#
H, W, _ = img.shape
focal = cam[1][0][0]
hwf = [H, W, focal]
hwf_np = np.array(hwf)
hwf_np = np.expand_dims(hwf_np, -1)
if 0:
# w2c convert to c2w
rot, trans = ext_to_rot_t(cam[0])
c2w_opengl = get_opengl_camAxis(rot, trans)
rot_c2w, trans_c2w = ext_to_rot_t(c2w_opengl)
position = get_eye(rot_c2w, trans_c2w)
position = np.expand_dims(position, -1)
c2w = np.concatenate([np.transpose(rot_c2w), position, hwf_np], axis=1)
else:
# rot, trans = ext_to_rot_t(cam[0])
# position = get_eye(rot, trans)
# position = np.expand_dims(position, -1)
# c2w = np.concatenate([np.transpose(rot), position, hwf_np], axis=1)
c2w = np.linalg.inv(cam[0])[:3, :4]
c2w = np.concatenate([c2w[:, 0:1], -c2w[:, 1:2], -c2w[:, 2:]], 1)
c2w = np.concatenate([c2w, hwf_np], axis=1)
#c2w = np.concatenate([rot, position, hwf_np], axis=1)
#cam[2][:3, :] = c2w[:3, :4]
#
if i % interv == 0:
list_val_imgs.append(img)
list_val_cams.append(cam)
list_val_c2w.append(c2w)
else:
list_imgs.append(img)
list_cams.append(cam)
list_c2w.append(c2w)
#
all_imgs = [list_imgs, list_val_imgs, list_val_imgs]
all_cams = list_cams + list_val_cams + list_val_cams
all_c2ws = list_c2w + list_val_c2w + list_val_c2w
counts = [0] + [len(x) for x in all_imgs]
counts = np.cumsum(counts)
i_split = [np.arange(counts[i], counts[i + 1]) for i in range(3)]
all_imgs = list_imgs + list_val_imgs + list_val_imgs
stk_imgs = np.stack(all_imgs, 0).astype(np.float32)
stk_cams = np.stack(all_cams, 0).astype(np.float32)
stk_c2ws = np.stack(all_c2ws, 0).astype(np.float32)
print('load_blmvs_data: img shape, cam shape', stk_imgs.shape, stk_cams.shape)
stk_depth_min = stk_cams[:, 1, 3, 0].min()
stk_depth_max = stk_cams[:, 1, 3, 3].max()
if 0:
# generate render pose
c2w = poses_avg(stk_c2ws)
print('load_blmvs_data: recentered cam pos', c2w.shape)
print(c2w[:3, :4])
## Get spiral
# Get average pose
up = normalize(stk_c2ws[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
stk_depth_min = stk_cams[:, 1, 3, 0].min()
stk_depth_max = stk_cams[:, 1, 3, 3].max()
close_depth, inf_depth = stk_depth_min * .9, stk_depth_max * 5.
dt = .75
mean_dz = 1. / ((1. - dt) / close_depth + dt / inf_depth)
focal_render = mean_dz
# Get radii for spiral path
shrink_factor = .8
zdelta = close_depth * .2
tt = stk_c2ws[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 240
N_rots = 2
# Generate poses for spiral path
render_poses = render_path_spiral(c2w_path, up, rads, focal_render, zdelta, zrate=.5, rots=N_rots, N=N_views)
render_poses = np.array(render_poses).astype(np.float32)
else:
bds = np.stack([stk_cams[:, 1, 3, 0], stk_cams[:, 1, 3, 3]], axis=0)
stk_c2ws = recenter_poses(stk_c2ws)
stk_c2ws, render_poses, bds = spherify_poses(stk_c2ws, bds)
return stk_imgs, stk_c2ws, render_poses, hwf, i_split, stk_depth_min, stk_depth_max
```
#### File: jiaxiangshang/nerf/run_nerf_helpers.py
```python
import os
import sys
import tensorflow as tf
import numpy as np
import imageio
import json
# Misc utils
def img2mse(x, y): return tf.reduce_mean(tf.square(x - y))
def mse2psnr(x): return -10.*tf.log(x)/tf.log(10.)
def to8b(x): return (255*np.clip(x, 0, 1)).astype(np.uint8)
# Positional encoding
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.**tf.linspace(0., max_freq, N_freqs)
else:
freq_bands = tf.linspace(2.**0., 2.**max_freq, N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn,
freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
pos_encode = tf.concat([fn(inputs) for fn in self.embed_fns], -1)
return pos_encode
def get_embedder(multires, i=0, input_dims=3):
if i == -1:
return tf.identity, 3
embed_kwargs = {
'include_input': True,
'input_dims': input_dims,
'max_freq_log2': multires-1,
'num_freqs': multires,
'log_sampling': True,
'periodic_fns': [tf.math.sin, tf.math.cos],
}
embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj): return eo.embed(x)
return embed, embedder_obj.out_dim
# Model architecture
def init_nerf_model(D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, skips=[4], use_viewdirs=False):
relu = tf.keras.layers.ReLU()
def dense(W, act=relu): return tf.keras.layers.Dense(W, activation=act)
print('MODEL coarse', input_ch, input_ch_views, type(input_ch), type(input_ch_views), use_viewdirs)
input_ch = int(input_ch)
input_ch_views = int(input_ch_views)
inputs = tf.keras.Input(shape=(input_ch + input_ch_views))
inputs_pts, inputs_views = tf.split(inputs, [input_ch, input_ch_views], -1)
inputs_pts.set_shape([None, input_ch])
inputs_views.set_shape([None, input_ch_views])
print(inputs.shape, inputs_pts.shape, inputs_views.shape)
outputs = inputs_pts
for i in range(D):
outputs = dense(W)(outputs)
if i in skips:
outputs = tf.concat([inputs_pts, outputs], -1)
if use_viewdirs:
alpha_out = dense(1, act=None)(outputs)
bottleneck = dense(256, act=None)(outputs)
inputs_viewdirs = tf.concat(
[bottleneck, inputs_views], -1) # concat viewdirs
outputs = inputs_viewdirs
# The supplement to the paper states there are 4 hidden layers here, but this is an error since
# the experiments were actually run with 1 hidden layer, so we will leave it as 1.
for i in range(1):
outputs = dense(W//2)(outputs)
outputs = dense(3, act=None)(outputs)
outputs = tf.concat([outputs, alpha_out], -1)
else:
outputs = dense(output_ch, act=None)(outputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def init_nerf_model_attention(D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, skips=[4], use_viewdirs=False, patch_size=4):
relu = tf.keras.layers.ReLU()
def dense(W, act=relu): return tf.keras.layers.Dense(W, activation=act)
print('MODEL', input_ch, input_ch_views, type(
input_ch), type(input_ch_views), use_viewdirs)
input_ch = int(input_ch)
input_ch_views = int(input_ch_views)
inputs = tf.keras.Input(shape=(patch_size*patch_size, input_ch + input_ch_views))
inputs_pts, inputs_views = tf.split(inputs, [input_ch, input_ch_views], -1)
inputs_pts.set_shape([None, patch_size*patch_size, input_ch])
inputs_views.set_shape([None, patch_size*patch_size, input_ch_views])
print(inputs.shape, inputs_pts.shape, inputs_views.shape)
outputs = inputs_pts
for i in range(D):
outputs = dense(W)(outputs)
if i in skips:
outputs = tf.concat([inputs_pts, outputs], -1)
if use_viewdirs:
alpha_out = dense(1, act=None)(outputs)
bottleneck = dense(256, act=None)(outputs)
inputs_viewdirs = tf.concat([bottleneck, inputs_views], -1) # concat viewdirs
outputs = inputs_viewdirs
# attention
outputs_att = tf.keras.layers.Attention()(
[outputs, outputs])
outputs = tf.concat([outputs, outputs_att], -1)
# The supplement to the paper states there are 4 hidden layers here, but this is an error since
# the experiments were actually run with 1 hidden layer, so we will leave it as 1.
for i in range(1):
outputs = dense(W//2)(outputs)
outputs = dense(3, act=None)(outputs)
outputs = tf.concat([outputs, alpha_out], -1)
else:
outputs = dense(output_ch, act=None)(outputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
# Ray helpers
def get_rays(H, W, focal, c2w):
"""Get ray origins, directions from a pinhole camera."""
i, j = tf.meshgrid(tf.range(W, dtype=tf.float32),
tf.range(H, dtype=tf.float32), indexing='xy')
dirs = tf.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -tf.ones_like(i)], -1)
rays_d = tf.reduce_sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
rays_o = tf.broadcast_to(c2w[:3, -1], tf.shape(rays_d))
return rays_o, rays_d
def get_rays_np(H, W, focal, c2w):
"""Get ray origins, directions from a pinhole camera."""
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))
return rays_o, rays_d
def get_patchRays(H, W, focal, c2w, pr_patch_size):
"""Get ray origins, directions from a pinhole camera."""
i, j = tf.meshgrid(tf.range(W, dtype=tf.float32),
tf.range(H, dtype=tf.float32), indexing='xy')
dirs = tf.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -tf.ones_like(i)], -1)
rays_d = tf.reduce_sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
rays_o = tf.broadcast_to(c2w[:3, -1], tf.shape(rays_d))
rays_rgb = tf.stack([rays_o, rays_d], axis=-2)
list_pr = []
for i in range(int(H / pr_patch_size)):
for j in range(int(W / pr_patch_size)):
i_jump = i * pr_patch_size
j_jump = j * pr_patch_size
pr = rays_rgb[i_jump:i_jump + pr_patch_size, j_jump:j_jump + pr_patch_size, :, :]
list_pr.append(pr)
patchRays_rgb = tf.stack(list_pr, axis=0) # [N, H * W / pr_patch_size^2, pr_patch_size^2, ro+rd+rgb, 3]
rays_o = patchRays_rgb[:, :, :, 0, :]
rays_d = patchRays_rgb[:, :, :, 1, :]
return rays_o, rays_d
def get_patchRays_np(H, W, focal, c2w, pr_patch_size):
"""Get ray origins, directions from a pinhole camera."""
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))
rays_rgb = np.stack([rays_o, rays_d], axis=-2)
list_pr = []
for i in range(int(H / pr_patch_size)):
for j in range(int(W / pr_patch_size)):
i_jump = i * pr_patch_size
j_jump = j * pr_patch_size
pr = rays_rgb[:, i_jump:i_jump + pr_patch_size, j_jump:j_jump + pr_patch_size, :, :]
list_pr.append(pr)
patchRays_rgb = np.stack(list_pr, axis=1) # [N, H * W / pr_patch_size^2, pr_patch_size^2, ro+rd+rgb, 3]
rays_o, rays_d = np.split(patchRays_rgb, axis=-2)
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
"""Normalized device coordinate rays.
Space such that the canvas is a cube with sides [-1, 1] in each axis.
Args:
H: int. Height in pixels.
W: int. Width in pixels.
focal: float. Focal length of pinhole camera.
near: float or array of shape[batch_size]. Near depth bound for the scene.
rays_o: array of shape [batch_size, 3]. Camera origin.
rays_d: array of shape [batch_size, 3]. Ray direction.
Returns:
rays_o: array of shape [batch_size, 3]. Camera origin in NDC.
rays_d: array of shape [batch_size, 3]. Ray direction in NDC.
"""
# Shift ray origins to near plane
t = -(near + rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[..., 0] / rays_o[..., 2]
o1 = -1./(H/(2.*focal)) * rays_o[..., 1] / rays_o[..., 2]
o2 = 1. + 2. * near / rays_o[..., 2]
d0 = -1./(W/(2.*focal)) * \
(rays_d[..., 0]/rays_d[..., 2] - rays_o[..., 0]/rays_o[..., 2])
d1 = -1./(H/(2.*focal)) * \
(rays_d[..., 1]/rays_d[..., 2] - rays_o[..., 1]/rays_o[..., 2])
d2 = -2. * near / rays_o[..., 2]
rays_o = tf.stack([o0, o1, o2], -1)
rays_d = tf.stack([d0, d1, d2], -1)
return rays_o, rays_d
# Hierarchical sampling helper
def sample_pdf(bins, weights, N_samples, det=False):
# Get pdf
weights += 1e-5 # prevent nans
pdf = weights / tf.reduce_sum(weights, -1, keepdims=True)
cdf = tf.cumsum(pdf, -1)
cdf = tf.concat([tf.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = tf.linspace(0., 1., N_samples)
u = tf.broadcast_to(u, list(cdf.shape[:-1]) + [N_samples])
else:
u = tf.random.uniform(list(cdf.shape[:-1]) + [N_samples])
# Invert CDF
inds = tf.searchsorted(cdf, u, side='right')
below = tf.maximum(0, inds-1)
above = tf.minimum(cdf.shape[-1]-1, inds)
inds_g = tf.stack([below, above], -1)
cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
denom = (cdf_g[..., 1]-cdf_g[..., 0])
denom = tf.where(denom < 1e-5, tf.ones_like(denom), denom)
t = (u-cdf_g[..., 0])/denom
samples = bins_g[..., 0] + t * (bins_g[..., 1]-bins_g[..., 0])
return samples
``` |
{
"source": "jiaxianhua/safaribookonline-video-downloader",
"score": 3
} |
#### File: jiaxianhua/safaribookonline-video-downloader/safari_books_online_parser.py
```python
import requests
import re
def retrieve_page_contents(url):
r = requests.get(url)
if r.status_code < 300:
return (r.content.decode())
print("Current url: {0} returned invalid status code.".format(url))
raise ValueError('Invalid server response.')
def parse_contents_into_list(text):
regex_string = "/library/cover/[\d]{2,}"
book_id_matches = re.findall(regex_string, text)
book_ids = [match.split('/')[-1] for match in book_id_matches]
print("Current page found {0} book ids.".format(len(book_ids)))
return book_ids
def write_id_list_to_txt_file(id_list, filename):
with open(filename + ".txt", 'a') as txt_file_handler:
txt_file_handler.write("\n".join([str(book_id) for book_id in id_list]))
txt_file_handler.close()
if __name__ == '__main__':
url_dict = {'math_and_science': 'https://www.safaribooksonline.com/topics/math-science',
'web_development': 'https://www.safaribooksonline.com/topics/web-development',
'computer_networking': 'https://www.safaribooksonline.com/topics/computer-networking',
'software_development': 'https://www.safaribooksonline.com/topics/software-development',
'databases': 'https://www.safaribooksonline.com/topics/databases',
'IT_operations': 'https://www.safaribooksonline.com/topics/information-technology-operations',
'engineering': 'https://www.safaribooksonline.com/topics/engineering',
'analytics': 'https://www.safaribooksonline.com/topics/analytics',
'game_development': 'https://www.safaribooksonline.com/topics/game-development'}
for topic, url in url_dict.items():
# don't expect to see a topic with more than 100 pages of books in it
book_list_for_topic = []
for page_number in range(1, 100):
try:
page_content = retrieve_page_contents(url + "?page={0}".format(page_number))
except ValueError:
break
finally:
book_list = parse_contents_into_list(page_content)
book_list_for_topic.extend(book_list)
print("{0} book ids found for topic: {1}".format(len(book_list_for_topic), topic))
write_id_list_to_txt_file(book_list_for_topic, topic)
```
#### File: jiaxianhua/safaribookonline-video-downloader/safari_video_downloader.py
```python
from bs4 import BeautifulSoup
import requests
import os
import subprocess
import unicodedata
import string
import config
class SafariDownloader:
def __init__(self, url, output_folder, username, password, domain, downloader_path):
self.output_folder = output_folder
self.username = username
self.password = password
self.domain = domain
self.downloader_path = downloader_path
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
self.title = soup.find('h1').text
self.topics = soup.find_all('li', class_='toc-level-1') # top-level topic titles
# Update youtube-dl first
subprocess.run([self.downloader_path, "-U"])
def validify(self, filename):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
valid_chars = frozenset(valid_chars)
# The unicodedata.normalize call replaces accented characters with the unaccented equivalent,
# which is better than simply stripping them out. After that all disallowed characters are removed.
cleaned_filename = unicodedata.normalize('NFKD', filename).encode('ascii', 'ignore').decode('ascii')
return ''.join(c for c in cleaned_filename if c in valid_chars)
def download(self):
for index, topic in enumerate(self.topics):
topic_name = topic.a.text
# Creating folder to put the videos in
save_folder = '{}/{}/{:03d} - {}'.format(self.output_folder, self.title, index + 1, topic_name)
os.makedirs(save_folder, exist_ok=True)
# You can choose to skip these topic_name, comment these three lines if you do not want to skip any
if topic_name in ('Keynotes', 'Strata Business Summit', 'Sponsored'):
print("Skipping {}...".format(topic_name))
continue
for index, video in enumerate(topic.ol.find_all('a')):
video_name = '{:03d} - {}'.format(index + 1, video.text)
video_name = self.validify(video_name)
video_url = self.domain + video.get('href')
video_out = '{}/{}.mp4'.format(save_folder, video_name)
video_out = video_out.replace(':', '-')
# Check if file already exists
if os.path.isfile(video_out):
print("File {} already exists! Skipping...".format(video_out))
continue
print("Downloading {} ...".format(video_name))
subprocess.run([self.downloader_path, "-u", self.username, "-p", self.password, "--verbose", "--output", video_out, video_url])
if __name__ == '__main__':
app_config = config.Config
for url in app_config.URLS:
downloader = SafariDownloader(url=url, output_folder=app_config.OUTPUT_FOLDER,
username=app_config.USERNAME, password=<PASSWORD>,
domain=app_config.DOMAIN, downloader_path=app_config.DOWNLOADER)
downloader.download()
``` |
{
"source": "jiaxianhua/skillshare",
"score": 2
} |
#### File: youtube_dl/extractor/skillshare.py
```python
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
try_get,
unified_timestamp
)
class SkillshareBaseIE(InfoExtractor):
_NETRC_MACHINE = "skillshare"
_TN_RE = r"uploads/video/thumbnails/[0-9a-f]+/(?P<width>[0-9]+)-(?P<height>[0-9]+)"
_LOGIN_URL = "https://api.skillshare.com/login"
_VIDEO_URL = "https://api.skillshare.com/sessions/%s/download"
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None or password is None:
self.raise_login_required("An email and password is needed to download any video (even non-premium ones)")
data = {
"email": username,
"password": password
}
headers = {
"Content-Type": "application/json"
}
user_json = self._download_json(self._LOGIN_URL,
None,
note="Logging in",
errnote="Error logging in, make sure the email and password is correct",
data=json.dumps(data).encode(),
headers=headers)
user_type = user_json.get("membership_label", "Premium Member")
if user_type == "Basic Member":
self._user_type = 0
elif user_type == "Premium Member" or "trial":
self._user_type = 2
else:
raise ExtractorError("User type %s unknown" % user_json["membership_label"])
# I can find no way of linking to a specific video so only entire course downloads are available.
class SkillshareCourseIE(SkillshareBaseIE):
IE_NAME = 'skillshare:course'
IE_DESC = 'skillshare.com classes'
_VALID_URL = r'https?://(?:www\.)?skillshare\.com/classes/[^/]+/(?P<id>[0-9]+)'
_CLASS_URL = "https://api.skillshare.com/classes/%s"
_TEST = {
"url": "https://www.skillshare.com/classes/Blender-3D-Fire-Smoke-Simulation-Guide/1850126092",
"only_matching": True
}
def _real_extract(self, url):
# Technically the SKU, not ID but the SKU is a more universal identifier.
class_id = self._match_id(url)
class_json = self._download_json(self._CLASS_URL % class_id,
None,
note="Downloading class JSON",
errnote="Error downloading class JSON")
if class_json.get("enrollment_type", 0) > self._user_type:
raise ExtractorError("This course requires a premium account and thus can't be downloaded")
lessons_json = []
# Pretty sure all classes only have one unit but flattening just in case.
for unit_json in class_json["_embedded"]["units"]["_embedded"]["units"]:
lessons_json += (unit_json["_embedded"]["sessions"]["_embedded"]["sessions"])
videos = []
for lesson_json in lessons_json:
lesson_thumbnail_urls = [
lesson_json.get("video_thumbnail_url"),
lesson_json.get("video_thumbnail_url"),
lesson_json.get("image_thumbnail")
]
lesson_thumbnail_urls = filter(None, lesson_thumbnail_urls)
lesson_thumbnails_json = []
for lesson_thumbnail_url in lesson_thumbnail_urls:
lesson_thumbnails_json.append({
"url": lesson_thumbnail_url,
"width": int_or_none(self._search_regex(self._TN_RE, lesson_thumbnail_url, "width", fatal=False)),
"height": int_or_none(self._search_regex(self._TN_RE, lesson_thumbnail_url, "height", fatal=False)),
})
if not lesson_thumbnails_json:
lesson_thumbnails_json = None
lesson_categories = [class_json.get("category")]
if lesson_categories == [None]:
lesson_categories = None
videos.append({
"id": compat_str(lesson_json["id"]),
"title": lesson_json.get("title"),
"url": self._VIDEO_URL % compat_str(lesson_json["id"]),
"ext": "mp4",
"thumbnails": lesson_thumbnails_json,
"uploader": try_get(class_json, lambda x: x["_embedded"]["teacher"]["full_name"]),
"creator": try_get(class_json, lambda x: x["_embedded"]["teacher"]["full_name"]),
"timestamp": unified_timestamp(lesson_json.get("create_time")),
"uploader_id": compat_str(try_get(class_json, lambda x: x["_embedded"]["teacher"]["username"])),
"categories": lesson_categories,
"chapter": try_get(lesson_json, lambda x: x["_links"]["unit"]["title"]),
"chapter_id": compat_str(lesson_json.get("unit_id"))
})
return {
"id": class_id,
"title": class_json["title"],
"uploader": try_get(class_json, lambda x: x["_embedded"]["teacher"]["full_name"]),
"uploader_id": compat_str(try_get(class_json, lambda x: x["_embedded"]["teacher"]["username"])),
"_type": "playlist",
"entries": videos
}
``` |
{
"source": "JiaXiao243/Parakeet",
"score": 2
} |
#### File: examples/clarinet/utils.py
```python
from __future__ import division
import os
import soundfile as sf
from tensorboardX import SummaryWriter
from collections import OrderedDict
from paddle import fluid
import paddle.fluid.dygraph as dg
def make_output_tree(output_dir):
checkpoint_dir = os.path.join(output_dir, "checkpoints")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
state_dir = os.path.join(output_dir, "states")
if not os.path.exists(state_dir):
os.makedirs(state_dir)
def eval_model(model, valid_loader, output_dir, iteration, sample_rate):
model.eval()
for i, batch in enumerate(valid_loader):
# print("sentence {}".format(i))
path = os.path.join(output_dir,
"sentence_{}_step_{}.wav".format(i, iteration))
audio_clips, mel_specs, audio_starts = batch
wav_var = model.synthesis(mel_specs)
wav_np = wav_var.numpy()[0]
sf.write(path, wav_np, samplerate=sample_rate)
print("generated {}".format(path))
def load_wavenet(model, path):
wavenet_dict, _ = dg.load_dygraph(path)
encoder_dict = OrderedDict()
teacher_dict = OrderedDict()
for k, v in wavenet_dict.items():
if k.startswith("encoder."):
encoder_dict[k.split('.', 1)[1]] = v
else:
# k starts with "decoder."
teacher_dict[k.split('.', 1)[1]] = v
model.encoder.set_dict(encoder_dict)
model.teacher.set_dict(teacher_dict)
print("loaded the encoder part and teacher part from wavenet model.")
```
#### File: examples/deepvoice3/data.py
```python
from __future__ import division
import os
import csv
from pathlib import Path
import numpy as np
from paddle import fluid
import pandas as pd
import librosa
from scipy import signal
import paddle.fluid.dygraph as dg
from parakeet.g2p.en import text_to_sequence, sequence_to_text
from parakeet.data import DatasetMixin, TransformDataset, FilterDataset, CacheDataset
from parakeet.data import DataCargo, PartialyRandomizedSimilarTimeLengthSampler, SequentialSampler, BucketSampler
class LJSpeechMetaData(DatasetMixin):
def __init__(self, root):
self.root = Path(root)
self._wav_dir = self.root.joinpath("wavs")
csv_path = self.root.joinpath("metadata.csv")
self._table = pd.read_csv(
csv_path,
sep="|",
encoding="utf-8",
header=None,
quoting=csv.QUOTE_NONE,
names=["fname", "raw_text", "normalized_text"])
def get_example(self, i):
fname, raw_text, normalized_text = self._table.iloc[i]
fname = str(self._wav_dir.joinpath(fname + ".wav"))
return fname, raw_text, normalized_text
def __len__(self):
return len(self._table)
class Transform(object):
def __init__(self,
replace_pronunciation_prob=0.,
sample_rate=22050,
preemphasis=.97,
n_fft=1024,
win_length=1024,
hop_length=256,
fmin=125,
fmax=7600,
n_mels=80,
min_level_db=-100,
ref_level_db=20,
max_norm=0.999,
clip_norm=True):
self.replace_pronunciation_prob = replace_pronunciation_prob
self.sample_rate = sample_rate
self.preemphasis = preemphasis
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.fmin = fmin
self.fmax = fmax
self.n_mels = n_mels
self.min_level_db = min_level_db
self.ref_level_db = ref_level_db
self.max_norm = max_norm
self.clip_norm = clip_norm
def __call__(self, in_data):
fname, _, normalized_text = in_data
# text processing
mix_grapheme_phonemes = text_to_sequence(
normalized_text, self.replace_pronunciation_prob)
text_length = len(mix_grapheme_phonemes)
# CAUTION: positions start from 1
speaker_id = None
# wave processing
wav, _ = librosa.load(fname, sr=self.sample_rate)
# preemphasis
y = signal.lfilter([1., -self.preemphasis], [1.], wav)
# STFT
D = librosa.stft(
y=y,
n_fft=self.n_fft,
win_length=self.win_length,
hop_length=self.hop_length)
S = np.abs(D)
# to db and normalize to 0-1
amplitude_min = np.exp(self.min_level_db / 20 * np.log(10)) # 1e-5
S_norm = 20 * np.log10(np.maximum(amplitude_min,
S)) - self.ref_level_db
S_norm = (S_norm - self.min_level_db) / (-self.min_level_db)
S_norm = self.max_norm * S_norm
if self.clip_norm:
S_norm = np.clip(S_norm, 0, self.max_norm)
# mel scale and to db and normalize to 0-1,
# CAUTION: pass linear scale S, not dbscaled S
S_mel = librosa.feature.melspectrogram(
S=S, n_mels=self.n_mels, fmin=self.fmin, fmax=self.fmax, power=1.)
S_mel = 20 * np.log10(np.maximum(amplitude_min,
S_mel)) - self.ref_level_db
S_mel_norm = (S_mel - self.min_level_db) / (-self.min_level_db)
S_mel_norm = self.max_norm * S_mel_norm
if self.clip_norm:
S_mel_norm = np.clip(S_mel_norm, 0, self.max_norm)
# num_frames
n_frames = S_mel_norm.shape[-1] # CAUTION: original number of frames
return (mix_grapheme_phonemes, text_length, speaker_id, S_norm.T,
S_mel_norm.T, n_frames)
class DataCollector(object):
def __init__(self, downsample_factor=4, r=1):
self.downsample_factor = int(downsample_factor)
self.frames_per_step = int(r)
self._factor = int(downsample_factor * r)
# CAUTION: small diff here
self._pad_begin = int(downsample_factor * r)
def __call__(self, examples):
batch_size = len(examples)
# lengths
text_lengths = np.array([example[1]
for example in examples]).astype(np.int64)
frames = np.array([example[5]
for example in examples]).astype(np.int64)
max_text_length = int(np.max(text_lengths))
max_frames = int(np.max(frames))
if max_frames % self._factor != 0:
max_frames += (self._factor - max_frames % self._factor)
max_frames += self._pad_begin
max_decoder_length = max_frames // self._factor
# pad time sequence
text_sequences = []
lin_specs = []
mel_specs = []
done_flags = []
for example in examples:
(mix_grapheme_phonemes, text_length, speaker_id, S_norm,
S_mel_norm, num_frames) = example
text_sequences.append(
np.pad(mix_grapheme_phonemes, (0, max_text_length - text_length
),
mode="constant"))
lin_specs.append(
np.pad(S_norm, ((self._pad_begin, max_frames - self._pad_begin
- num_frames), (0, 0)),
mode="constant"))
mel_specs.append(
np.pad(S_mel_norm, ((self._pad_begin, max_frames -
self._pad_begin - num_frames), (0, 0)),
mode="constant"))
done_flags.append(
np.pad(np.zeros((int(np.ceil(num_frames // self._factor)), )),
(0, max_decoder_length - int(
np.ceil(num_frames // self._factor))),
mode="constant",
constant_values=1))
text_sequences = np.array(text_sequences).astype(np.int64)
lin_specs = np.array(lin_specs).astype(np.float32)
mel_specs = np.array(mel_specs).astype(np.float32)
# downsample here
done_flags = np.array(done_flags).astype(np.float32)
# text positions
text_mask = (np.arange(1, 1 + max_text_length) <= np.expand_dims(
text_lengths, -1)).astype(np.int64)
text_positions = np.arange(
1, 1 + max_text_length, dtype=np.int64) * text_mask
# decoder_positions
decoder_positions = np.tile(
np.expand_dims(
np.arange(
1, 1 + max_decoder_length, dtype=np.int64), 0),
(batch_size, 1))
return (text_sequences, text_lengths, text_positions, mel_specs,
lin_specs, frames, decoder_positions, done_flags)
def make_data_loader(data_root, config):
# construct meta data
meta = LJSpeechMetaData(data_root)
# filter it!
min_text_length = config["meta_data"]["min_text_length"]
meta = FilterDataset(meta, lambda x: len(x[2]) >= min_text_length)
# transform meta data into meta data
c = config["transform"]
transform = Transform(
replace_pronunciation_prob=c["replace_pronunciation_prob"],
sample_rate=c["sample_rate"],
preemphasis=c["preemphasis"],
n_fft=c["n_fft"],
win_length=c["win_length"],
hop_length=c["hop_length"],
fmin=c["fmin"],
fmax=c["fmax"],
n_mels=c["n_mels"],
min_level_db=c["min_level_db"],
ref_level_db=c["ref_level_db"],
max_norm=c["max_norm"],
clip_norm=c["clip_norm"])
ljspeech = CacheDataset(TransformDataset(meta, transform))
# use meta data's text length as a sort key for the sampler
batch_size = config["train"]["batch_size"]
text_lengths = [len(example[2]) for example in meta]
sampler = PartialyRandomizedSimilarTimeLengthSampler(text_lengths,
batch_size)
env = dg.parallel.ParallelEnv()
num_trainers = env.nranks
local_rank = env.local_rank
sampler = BucketSampler(
text_lengths, batch_size, num_trainers=num_trainers, rank=local_rank)
# some model hyperparameters affect how we process data
model_config = config["model"]
collector = DataCollector(
downsample_factor=model_config["downsample_factor"],
r=model_config["outputs_per_step"])
ljspeech_loader = DataCargo(
ljspeech, batch_fn=collector, batch_size=batch_size, sampler=sampler)
loader = fluid.io.DataLoader.from_generator(capacity=10, return_list=True)
loader.set_batch_generator(
ljspeech_loader, places=fluid.framework._current_expected_place())
return loader
```
#### File: examples/fastspeech/train.py
```python
import numpy as np
import argparse
import os
import time
import math
from pathlib import Path
from pprint import pprint
from ruamel import yaml
from tqdm import tqdm
from matplotlib import cm
from collections import OrderedDict
from tensorboardX import SummaryWriter
import paddle.fluid.dygraph as dg
import paddle.fluid.layers as layers
import paddle.fluid as fluid
from parakeet.models.fastspeech.fastspeech import FastSpeech
from parakeet.models.fastspeech.utils import get_alignment
from data import LJSpeechLoader
from parakeet.utils import io
def add_config_options_to_parser(parser):
parser.add_argument("--config", type=str, help="path of the config file")
parser.add_argument("--use_gpu", type=int, default=0, help="device to use")
parser.add_argument("--data", type=str, help="path of LJspeech dataset")
parser.add_argument(
"--alignments_path", type=str, help="path of alignments")
g = parser.add_mutually_exclusive_group()
g.add_argument("--checkpoint", type=str, help="checkpoint to resume from")
g.add_argument(
"--iteration",
type=int,
help="the iteration of the checkpoint to load from output directory")
parser.add_argument(
"--output",
type=str,
default="experiment",
help="path to save experiment results")
def main(args):
local_rank = dg.parallel.Env().local_rank
nranks = dg.parallel.Env().nranks
parallel = nranks > 1
with open(args.config) as f:
cfg = yaml.load(f, Loader=yaml.Loader)
global_step = 0
place = fluid.CUDAPlace(local_rank) if args.use_gpu else fluid.CPUPlace()
fluid.enable_dygraph(place)
if not os.path.exists(args.output):
os.mkdir(args.output)
writer = SummaryWriter(os.path.join(args.output,
'log')) if local_rank == 0 else None
model = FastSpeech(cfg['network'], num_mels=cfg['audio']['num_mels'])
model.train()
optimizer = fluid.optimizer.AdamOptimizer(
learning_rate=dg.NoamDecay(1 / (cfg['train']['warm_up_step'] *
(cfg['train']['learning_rate']**2)),
cfg['train']['warm_up_step']),
parameter_list=model.parameters(),
grad_clip=fluid.clip.GradientClipByGlobalNorm(cfg['train'][
'grad_clip_thresh']))
reader = LJSpeechLoader(
cfg['audio'],
place,
args.data,
args.alignments_path,
cfg['train']['batch_size'],
nranks,
local_rank,
shuffle=True).reader()
# Load parameters.
global_step = io.load_parameters(
model=model,
optimizer=optimizer,
checkpoint_dir=os.path.join(args.output, 'checkpoints'),
iteration=args.iteration,
checkpoint_path=args.checkpoint)
print("Rank {}: checkpoint loaded.".format(local_rank))
if parallel:
strategy = dg.parallel.prepare_context()
model = fluid.dygraph.parallel.DataParallel(model, strategy)
for epoch in range(cfg['train']['max_epochs']):
pbar = tqdm(reader)
for i, data in enumerate(pbar):
pbar.set_description('Processing at epoch %d' % epoch)
(character, mel, pos_text, pos_mel, alignment) = data
global_step += 1
#Forward
result = model(
character, pos_text, mel_pos=pos_mel, length_target=alignment)
mel_output, mel_output_postnet, duration_predictor_output, _, _ = result
mel_loss = layers.mse_loss(mel_output, mel)
mel_postnet_loss = layers.mse_loss(mel_output_postnet, mel)
duration_loss = layers.mean(
layers.abs(
layers.elementwise_sub(duration_predictor_output,
alignment)))
total_loss = mel_loss + mel_postnet_loss + duration_loss
if local_rank == 0:
writer.add_scalar('mel_loss', mel_loss.numpy(), global_step)
writer.add_scalar('post_mel_loss',
mel_postnet_loss.numpy(), global_step)
writer.add_scalar('duration_loss',
duration_loss.numpy(), global_step)
writer.add_scalar('learning_rate',
optimizer._learning_rate.step().numpy(),
global_step)
if parallel:
total_loss = model.scale_loss(total_loss)
total_loss.backward()
model.apply_collective_grads()
else:
total_loss.backward()
optimizer.minimize(total_loss)
model.clear_gradients()
# save checkpoint
if local_rank == 0 and global_step % cfg['train'][
'checkpoint_interval'] == 0:
io.save_parameters(
os.path.join(args.output, 'checkpoints'), global_step,
model, optimizer)
if local_rank == 0:
writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train Fastspeech model")
add_config_options_to_parser(parser)
args = parser.parse_args()
# Print the whole config setting.
pprint(vars(args))
main(args)
```
#### File: examples/waveflow/data.py
```python
import os
import random
import librosa
import numpy as np
from paddle import fluid
from parakeet.datasets import ljspeech
from parakeet.data import SpecBatcher, WavBatcher
from parakeet.data import DataCargo, DatasetMixin
from parakeet.data import DistributedSampler, BatchSampler
from scipy.io.wavfile import read
class Dataset(ljspeech.LJSpeech):
def __init__(self, config):
super(Dataset, self).__init__(config.root)
self.config = config
def _get_example(self, metadatum):
fname, _, _ = metadatum
wav_path = os.path.join(self.root, "wavs", fname + ".wav")
loaded_sr, audio = read(wav_path)
assert loaded_sr == self.config.sample_rate
return audio
class Subset(DatasetMixin):
def __init__(self, dataset, indices, valid):
self.dataset = dataset
self.indices = indices
self.valid = valid
self.config = dataset.config
def get_mel(self, audio):
spectrogram = librosa.core.stft(
audio,
n_fft=self.config.fft_size,
hop_length=self.config.fft_window_shift,
win_length=self.config.fft_window_size)
spectrogram_magnitude = np.abs(spectrogram)
# mel_filter_bank shape: [n_mels, 1 + n_fft/2]
mel_filter_bank = librosa.filters.mel(sr=self.config.sample_rate,
n_fft=self.config.fft_size,
n_mels=self.config.mel_bands,
fmin=self.config.mel_fmin,
fmax=self.config.mel_fmax)
# mel shape: [n_mels, num_frames]
mel = np.dot(mel_filter_bank, spectrogram_magnitude)
# Normalize mel.
clip_val = 1e-5
ref_constant = 1
mel = np.log(np.clip(mel, a_min=clip_val, a_max=None) * ref_constant)
return mel
def __getitem__(self, idx):
audio = self.dataset[self.indices[idx]]
segment_length = self.config.segment_length
if self.valid:
# whole audio for valid set
pass
else:
# Randomly crop segment_length from audios in the training set.
# audio shape: [len]
if audio.shape[0] >= segment_length:
max_audio_start = audio.shape[0] - segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:(audio_start + segment_length)]
else:
audio = np.pad(audio, (0, segment_length - audio.shape[0]),
mode='constant',
constant_values=0)
# Normalize audio to the [-1, 1] range.
audio = audio.astype(np.float32) / 32768.0
mel = self.get_mel(audio)
return audio, mel
def _batch_examples(self, batch):
audios = [sample[0] for sample in batch]
mels = [sample[1] for sample in batch]
audios = WavBatcher(pad_value=0.0)(audios)
mels = SpecBatcher(pad_value=0.0)(mels)
return audios, mels
def __len__(self):
return len(self.indices)
class LJSpeech:
def __init__(self, config, nranks, rank):
place = fluid.CUDAPlace(rank) if config.use_gpu else fluid.CPUPlace()
# Whole LJSpeech dataset.
ds = Dataset(config)
# Split into train and valid dataset.
indices = list(range(len(ds)))
train_indices = indices[config.valid_size:]
valid_indices = indices[:config.valid_size]
random.shuffle(train_indices)
# Train dataset.
trainset = Subset(ds, train_indices, valid=False)
sampler = DistributedSampler(len(trainset), nranks, rank)
total_bs = config.batch_size
assert total_bs % nranks == 0
train_sampler = BatchSampler(
sampler, total_bs // nranks, drop_last=True)
trainloader = DataCargo(trainset, batch_sampler=train_sampler)
trainreader = fluid.io.PyReader(capacity=50, return_list=True)
trainreader.decorate_batch_generator(trainloader, place)
self.trainloader = (data for _ in iter(int, 1)
for data in trainreader())
# Valid dataset.
validset = Subset(ds, valid_indices, valid=True)
# Currently only support batch_size = 1 for valid loader.
validloader = DataCargo(validset, batch_size=1, shuffle=False)
validreader = fluid.io.PyReader(capacity=20, return_list=True)
validreader.decorate_batch_generator(validloader, place)
self.validloader = validreader
```
#### File: models/deepvoice3/loss.py
```python
from __future__ import division
import numpy as np
from numba import jit
from paddle import fluid
import paddle.fluid.layers as F
import paddle.fluid.dygraph as dg
def masked_mean(inputs, mask):
"""
Args:
inputs (Variable): shape(B, T, C), dtype float32, the input.
mask (Variable): shape(B, T), dtype float32, a mask.
Returns:
loss (Variable): shape(1, ), dtype float32, masked mean.
"""
channels = inputs.shape[-1]
masked_inputs = F.elementwise_mul(inputs, mask, axis=0)
loss = F.reduce_sum(masked_inputs) / (channels * F.reduce_sum(mask))
return loss
@jit(nopython=True)
def guided_attention(N, max_N, T, max_T, g):
"""Generate an diagonal attention guide.
Args:
N (int): valid length of encoder.
max_N (int): max length of encoder.
T (int): valid length of decoder.
max_T (int): max length of decoder.
g (float): sigma to adjust the degree of diagonal guide.
Returns:
np.ndarray: shape(max_N, max_T), dtype float32, the diagonal guide.
"""
W = np.zeros((max_N, max_T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))
return W
def guided_attentions(encoder_lengths, decoder_lengths, max_decoder_len,
g=0.2):
"""Generate a diagonal attention guide for a batch.
Args:
encoder_lengths (np.ndarray): shape(B, ), dtype: int64, encoder valid lengths.
decoder_lengths (np.ndarray): shape(B, ), dtype: int64, decoder valid lengths.
max_decoder_len (int): max length of decoder.
g (float, optional): sigma to adjust the degree of diagonal guide.. Defaults to 0.2.
Returns:
np.ndarray: shape(B, max_T, max_N), dtype float32, the diagonal guide. (max_N: max encoder length, max_T: max decoder length.)
"""
B = len(encoder_lengths)
max_input_len = encoder_lengths.max()
W = np.zeros((B, max_decoder_len, max_input_len), dtype=np.float32)
for b in range(B):
W[b] = guided_attention(encoder_lengths[b], max_input_len,
decoder_lengths[b], max_decoder_len, g).T
return W
class TTSLoss(object):
def __init__(self,
masked_weight=0.0,
priority_bin=None,
priority_weight=0.0,
binary_divergence_weight=0.0,
guided_attention_sigma=0.2,
downsample_factor=4,
r=1):
"""Compute loss for Deep Voice 3 model.
Args:
masked_weight (float, optional): the weight of masked loss. Defaults to 0.0.
priority_bin ([type], optional): frequency bands for linear spectrogram loss to be prioritized. Defaults to None.
priority_weight (float, optional): weight for the prioritized frequency bands. Defaults to 0.0.
binary_divergence_weight (float, optional): weight for binary cross entropy (used for spectrogram loss). Defaults to 0.0.
guided_attention_sigma (float, optional): `sigma` for attention guide. Defaults to 0.2.
downsample_factor (int, optional): the downsample factor for mel spectrogram. Defaults to 4.
r (int, optional): frames per decoder step. Defaults to 1.
"""
self.masked_weight = masked_weight
self.priority_bin = priority_bin # only used for lin-spec loss
self.priority_weight = priority_weight # only used for lin-spec loss
self.binary_divergence_weight = binary_divergence_weight
self.guided_attention_sigma = guided_attention_sigma
self.time_shift = r
self.r = r
self.downsample_factor = downsample_factor
def l1_loss(self, prediction, target, mask, priority_bin=None):
"""L1 loss for spectrogram.
Args:
prediction (Variable): shape(B, T, C), dtype float32, predicted spectrogram.
target (Variable): shape(B, T, C), dtype float32, target spectrogram.
mask (Variable): shape(B, T), mask.
priority_bin (int, optional): frequency bands for linear spectrogram loss to be prioritized. Defaults to None.
Returns:
Variable: shape(1,), dtype float32, l1 loss(with mask and possibly priority bin applied.)
"""
abs_diff = F.abs(prediction - target)
# basic mask-weighted l1 loss
w = self.masked_weight
if w > 0 and mask is not None:
base_l1_loss = w * masked_mean(abs_diff, mask) \
+ (1 - w) * F.reduce_mean(abs_diff)
else:
base_l1_loss = F.reduce_mean(abs_diff)
if self.priority_weight > 0 and priority_bin is not None:
# mask-weighted priority channels' l1-loss
priority_abs_diff = abs_diff[:, :, :priority_bin]
if w > 0 and mask is not None:
priority_loss = w * masked_mean(priority_abs_diff, mask) \
+ (1 - w) * F.reduce_mean(priority_abs_diff)
else:
priority_loss = F.reduce_mean(priority_abs_diff)
# priority weighted sum
p = self.priority_weight
loss = p * priority_loss + (1 - p) * base_l1_loss
else:
loss = base_l1_loss
return loss
def binary_divergence(self, prediction, target, mask):
"""Binary cross entropy loss for spectrogram. All the values in the spectrogram are treated as logits in a logistic regression.
Args:
prediction (Variable): shape(B, T, C), dtype float32, predicted spectrogram.
target (Variable): shape(B, T, C), dtype float32, target spectrogram.
mask (Variable): shape(B, T), mask.
Returns:
Variable: shape(1,), dtype float32, binary cross entropy loss.
"""
flattened_prediction = F.reshape(prediction, [-1, 1])
flattened_target = F.reshape(target, [-1, 1])
flattened_loss = F.log_loss(
flattened_prediction, flattened_target, epsilon=1e-8)
bin_div = fluid.layers.reshape(flattened_loss, prediction.shape)
w = self.masked_weight
if w > 0 and mask is not None:
loss = w * masked_mean(bin_div, mask) \
+ (1 - w) * F.reduce_mean(bin_div)
else:
loss = F.reduce_mean(bin_div)
return loss
@staticmethod
def done_loss(done_hat, done):
"""Compute done loss
Args:
done_hat (Variable): shape(B, T), dtype float32, predicted done probability(the probability that the final frame has been generated.)
done (Variable): shape(B, T), dtype float32, ground truth done probability(the probability that the final frame has been generated.)
Returns:
Variable: shape(1, ), dtype float32, done loss.
"""
flat_done_hat = F.reshape(done_hat, [-1, 1])
flat_done = F.reshape(done, [-1, 1])
loss = F.log_loss(flat_done_hat, flat_done, epsilon=1e-8)
loss = F.reduce_mean(loss)
return loss
def attention_loss(self, predicted_attention, input_lengths,
target_lengths):
"""
Given valid encoder_lengths and decoder_lengths, compute a diagonal guide, and compute loss from the predicted attention and the guide.
Args:
predicted_attention (Variable): shape(*, B, T_dec, T_enc), dtype float32, the alignment tensor, where B means batch size, T_dec means number of time steps of the decoder, T_enc means the number of time steps of the encoder, * means other possible dimensions.
input_lengths (numpy.ndarray): shape(B,), dtype:int64, valid lengths (time steps) of encoder outputs.
target_lengths (numpy.ndarray): shape(batch_size,), dtype:int64, valid lengths (time steps) of decoder outputs.
Returns:
loss (Variable): shape(1, ), dtype float32, attention loss.
"""
n_attention, batch_size, max_target_len, max_input_len = (
predicted_attention.shape)
soft_mask = guided_attentions(input_lengths, target_lengths,
max_target_len,
self.guided_attention_sigma)
soft_mask_ = dg.to_variable(soft_mask)
loss = fluid.layers.reduce_mean(predicted_attention * soft_mask_)
return loss
def __call__(self, outputs, inputs):
"""Total loss
Args:
outpus is a tuple of (mel_hyp, lin_hyp, attn_hyp, done_hyp).
mel_hyp (Variable): shape(B, T, C_mel), dtype float32, predicted mel spectrogram.
lin_hyp (Variable): shape(B, T, C_lin), dtype float32, predicted linear spectrogram.
done_hyp (Variable): shape(B, T), dtype float32, predicted done probability.
attn_hyp (Variable): shape(N, B, T_dec, T_enc), dtype float32, predicted attention.
inputs is a tuple of (mel_ref, lin_ref, done_ref, input_lengths, n_frames)
mel_ref (Variable): shape(B, T, C_mel), dtype float32, ground truth mel spectrogram.
lin_ref (Variable): shape(B, T, C_lin), dtype float32, ground truth linear spectrogram.
done_ref (Variable): shape(B, T), dtype float32, ground truth done flag.
input_lengths (Variable): shape(B, ), dtype: int, encoder valid lengths.
n_frames (Variable): shape(B, ), dtype: int, decoder valid lengths.
Returns:
Dict(str, Variable): details of loss.
"""
total_loss = 0.
mel_hyp, lin_hyp, attn_hyp, done_hyp = outputs
mel_ref, lin_ref, done_ref, input_lengths, n_frames = inputs
# n_frames # mel_lengths # decoder_lengths
max_frames = lin_hyp.shape[1]
max_mel_steps = max_frames // self.downsample_factor
# max_decoder_steps = max_mel_steps // self.r
# decoder_mask = F.sequence_mask(n_frames // self.downsample_factor //
# self.r,
# max_decoder_steps,
# dtype="float32")
mel_mask = F.sequence_mask(
n_frames // self.downsample_factor, max_mel_steps, dtype="float32")
lin_mask = F.sequence_mask(n_frames, max_frames, dtype="float32")
lin_hyp = lin_hyp[:, :-self.time_shift, :]
lin_ref = lin_ref[:, self.time_shift:, :]
lin_mask = lin_mask[:, self.time_shift:]
lin_l1_loss = self.l1_loss(
lin_hyp, lin_ref, lin_mask, priority_bin=self.priority_bin)
lin_bce_loss = self.binary_divergence(lin_hyp, lin_ref, lin_mask)
lin_loss = self.binary_divergence_weight * lin_bce_loss \
+ (1 - self.binary_divergence_weight) * lin_l1_loss
total_loss += lin_loss
mel_hyp = mel_hyp[:, :-self.time_shift, :]
mel_ref = mel_ref[:, self.time_shift:, :]
mel_mask = mel_mask[:, self.time_shift:]
mel_l1_loss = self.l1_loss(mel_hyp, mel_ref, mel_mask)
mel_bce_loss = self.binary_divergence(mel_hyp, mel_ref, mel_mask)
# print("=====>", mel_l1_loss.numpy()[0], mel_bce_loss.numpy()[0])
mel_loss = self.binary_divergence_weight * mel_bce_loss \
+ (1 - self.binary_divergence_weight) * mel_l1_loss
total_loss += mel_loss
attn_loss = self.attention_loss(attn_hyp,
input_lengths.numpy(),
n_frames.numpy() //
(self.downsample_factor * self.r))
total_loss += attn_loss
done_loss = self.done_loss(done_hyp, done_ref)
total_loss += done_loss
losses = {
"loss": total_loss,
"mel/mel_loss": mel_loss,
"mel/l1_loss": mel_l1_loss,
"mel/bce_loss": mel_bce_loss,
"lin/lin_loss": lin_loss,
"lin/l1_loss": lin_l1_loss,
"lin/bce_loss": lin_bce_loss,
"done": done_loss,
"attn": attn_loss,
}
return losses
```
#### File: models/fastspeech/utils.py
```python
import numpy as np
def get_alignment(attn_probs, mel_lens, n_head):
max_F = 0
assert attn_probs[0].shape[0] % n_head == 0
batch_size = int(attn_probs[0].shape[0] // n_head)
for i in range(len(attn_probs)):
multi_attn = attn_probs[i].numpy()
for j in range(n_head):
attn = multi_attn[j * batch_size:(j + 1) * batch_size]
F = score_F(attn)
if max_F < F:
max_F = F
max_attn = attn
alignment = compute_duration(max_attn, mel_lens)
return alignment, max_attn
def score_F(attn):
max = np.max(attn, axis=-1)
mean = np.mean(max)
return mean
def compute_duration(attn, mel_lens):
alignment = np.zeros([attn.shape[2]])
#for i in range(attn.shape[0]):
for j in range(mel_lens):
max_index = np.argmax(attn[0, j])
alignment[max_index] += 1
return alignment
```
#### File: models/transformer_tts/encoderprenet.py
```python
import math
from parakeet.g2p.text.symbols import symbols
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from parakeet.modules.customized import Conv1D
import numpy as np
class EncoderPrenet(dg.Layer):
def __init__(self, embedding_size, num_hidden, use_cudnn=True):
""" Encoder prenet layer of TransformerTTS.
Args:
embedding_size (int): the size of embedding.
num_hidden (int): the size of hidden layer in network.
use_cudnn (bool, optional): use cudnn or not. Defaults to True.
"""
super(EncoderPrenet, self).__init__()
self.embedding_size = embedding_size
self.num_hidden = num_hidden
self.use_cudnn = use_cudnn
self.embedding = dg.Embedding(
size=[len(symbols), embedding_size],
padding_idx=0,
param_attr=fluid.initializer.Normal(
loc=0.0, scale=1.0))
self.conv_list = []
k = math.sqrt(1.0 / embedding_size)
self.conv_list.append(
Conv1D(
num_channels=embedding_size,
num_filters=num_hidden,
filter_size=5,
padding=int(np.floor(5 / 2)),
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.XavierInitializer()),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-k, high=k)),
use_cudnn=use_cudnn))
k = math.sqrt(1.0 / num_hidden)
for _ in range(2):
self.conv_list.append(
Conv1D(
num_channels=num_hidden,
num_filters=num_hidden,
filter_size=5,
padding=int(np.floor(5 / 2)),
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.XavierInitializer()),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-k, high=k)),
use_cudnn=use_cudnn))
for i, layer in enumerate(self.conv_list):
self.add_sublayer("conv_list_{}".format(i), layer)
self.batch_norm_list = [
dg.BatchNorm(
num_hidden, data_layout='NCHW') for _ in range(3)
]
for i, layer in enumerate(self.batch_norm_list):
self.add_sublayer("batch_norm_list_{}".format(i), layer)
k = math.sqrt(1.0 / num_hidden)
self.projection = dg.Linear(
num_hidden,
num_hidden,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.XavierInitializer()),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-k, high=k)))
def forward(self, x):
"""
Prepare encoder input.
Args:
x (Variable): shape(B, T_text), dtype float32, the input character, where T_text means the timesteps of input text.
Returns:
(Variable): shape(B, T_text, C), the encoder prenet output.
"""
x = self.embedding(x)
x = layers.transpose(x, [0, 2, 1])
for batch_norm, conv in zip(self.batch_norm_list, self.conv_list):
x = layers.dropout(
layers.relu(batch_norm(conv(x))),
0.2,
dropout_implementation='upscale_in_train')
x = layers.transpose(x, [0, 2, 1]) #(N,T,C)
x = self.projection(x)
return x
```
#### File: models/transformer_tts/utils.py
```python
import numpy as np
import librosa
import os, copy
from scipy import signal
import paddle.fluid.layers as layers
def get_positional_table(d_pos_vec, n_position=1024):
position_enc = np.array(
[[pos / np.power(10000, 2 * i / d_pos_vec) for i in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
return sinusoid_table
def get_non_pad_mask(seq, num_head, dtype):
mask = layers.cast(seq != 0, dtype=dtype)
mask = layers.unsqueeze(mask, axes=[-1])
mask = layers.expand(mask, [num_head, 1, 1])
return mask
def get_attn_key_pad_mask(seq_k, num_head, dtype):
''' For masking out the padding part of key sequence. '''
# Expand to fit the shape of key query attention matrix.
padding_mask = layers.cast(seq_k == 0, dtype=dtype) * -1e30
padding_mask = layers.unsqueeze(padding_mask, axes=[1])
padding_mask = layers.expand(padding_mask, [num_head, 1, 1])
return padding_mask
def get_dec_attn_key_pad_mask(seq_k, num_head, dtype):
''' For masking out the padding part of key sequence. '''
# Expand to fit the shape of key query attention matrix.
padding_mask = layers.cast(seq_k == 0, dtype=dtype)
padding_mask = layers.unsqueeze(padding_mask, axes=[1])
len_k = seq_k.shape[1]
triu = layers.triu(
layers.ones(
shape=[len_k, len_k], dtype=dtype), diagonal=1)
padding_mask = padding_mask + triu
padding_mask = layers.cast(
padding_mask != 0, dtype=dtype) * -1e30 #* (-2**32 + 1)
padding_mask = layers.expand(padding_mask, [num_head, 1, 1])
return padding_mask
def guided_attention(N, T, g=0.2):
'''Guided attention. Refer to page 3 on the paper.'''
W = np.zeros((N, T), dtype=np.float32)
for n_pos in range(W.shape[0]):
for t_pos in range(W.shape[1]):
W[n_pos, t_pos] = 1 - np.exp(-(t_pos / float(T) - n_pos / float(N))
**2 / (2 * g * g))
return W
def cross_entropy(input, label, position_weight=1.0, epsilon=1e-30):
output = -1 * label * layers.log(input + epsilon) - (
1 - label) * layers.log(1 - input + epsilon)
output = output * (label * (position_weight - 1) + 1)
return layers.reduce_sum(output, dim=[0, 1])
```
#### File: parakeet/utils/layer_tools.py
```python
import numpy as np
import paddle.fluid.dygraph as dg
def summary(layer):
num_params = num_elements = 0
print("layer summary:")
for name, param in layer.state_dict().items():
print("{}|{}|{}".format(name, param.shape, np.prod(param.shape)))
num_elements += np.prod(param.shape)
num_params += 1
print("layer has {} parameters, {} elements.".format(num_params,
num_elements))
def freeze(layer):
for param in layer.parameters():
param.trainable = False
def unfreeze(layer):
for param in layer.parameters():
param.trainable = True
``` |
{
"source": "jiaxiaochu/Crawler",
"score": 3
} |
#### File: Crawler/about_Pyppeteer/info.py
```python
import asyncio
import pyppeteer
async def main():
browser = await pyppeteer.launch(
headless=False,
defaultViewport={'width': 1366, 'height': 668},
args=['--windows-size=1366,768']
)
page = await browser.newPage()
url = 'http://show.ybccode.com/travel/'
await page.goto(url)
await asyncio.sleep(1)
await page.click('.signIn')
await asyncio.sleep(1)
await page.type('#uName', 'admin')
await asyncio.sleep(1)
await page.type('#uPass', '<PASSWORD>')
await asyncio.sleep(1)
# await page.hover('.handler')
# await page.mouse.down()
# await page.mouse.move(828, 0)
# await page.mouse.up()
action =
await asyncio.sleep(1)
await page.click('#loginBtn')
await asyncio.sleep(5)
# 获取页面截图
# await page.screenshot(path='./成功打开页面截图2.jpg', fullPage=True)
description = await page.querySelectorAll('.description')
# print(description)
# 获取页面文本内容一
# for scene in description:
# text_object = await scene.getProperty('textContent')
# # print(text_object)
# res = await text_object.jsonValue()
# print(res)
# 获取页面文本内容二
# for scene in description:
# res = await(await scene.getProperty('textContent')).jsonValue()
# print(res)
# 获取景点名称
for scene in description:
h5 = await scene.querySelector('h5')
# print(h5)
name = await (await h5.getProperty('textContent')).jsonValue()
print(name)
await page.close()
await browser.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
# asyncio.run(main())
loop.close()
```
#### File: jiaxiaochu/Crawler/Universities_Ranking.py
```python
import csv
import os
import requests
import pandas
from bs4 import BeautifulSoup
all_Universities = []
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
except:
return ""
def fillUnivList(soup):
data = soup.find_all('tr')
for tr in data:
ltd = tr.find_all('td')
# print(ltd)
if len(ltd) == 0:
continue
# singleUniv = []
info_list = []
for td in ltd:
# print(td.string)
data = td.string
info_list.append(data)
print(info_list[0], info_list[1], info_list[2], info_list[3])
# singleUniv.append(td.string)
# allUniv.append(singleUniv)
# print(singleUniv[0], singleUniv[1], singleUniv[2], singleUniv[3])
def storing_data(title, num): # num参数,控制提取多少组数据写入文件
"""定义把数据写入文件函数"""
with open('./Universities_Ranking3.csv', 'a', newline='')as file:
csv_write = csv.writer(file, dialect='excel')
csv_write.writerow(title)
for i in range(num):
u = all_Universities[i]
csv_write.writerow(u)
def writercsv(save_road, num, title):
if os.path.isfile(save_road):
with open(save_road, 'a', newline='')as f:
csv_write = csv.writer(f, dialect='excel')
for i in range(num):
u = all_Universities[i]
csv_write.writerow(u)
else:
with open(save_road, 'w', newline='')as f:
csv_write = csv.writer(f, dialect='excel')
csv_write.writerow(title)
for i in range(num):
u = all_Universities[i]
csv_write.writerow(u)
def main():
url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2019.html'
html = getHTMLText(url)
soup = BeautifulSoup(html, "html.parser")
fillUnivList(soup)
title = ["排名", "学校名称", "省市", "总分"]
# save_road = "./Universities_Ranking3.csv"
# writercsv(save_road, 10, title)
storing_data(title, 100)
if __name__ == '__main__':
main()
``` |
{
"source": "jiaxiaochu/spider",
"score": 3
} |
#### File: spider/00-Knowledge-review/about_lambda.py
```python
import time
#
#
# def time1():
# print(time.time())
#
# time1()
now = lambda: time.time()
print(now())
# def do_time():
# print(time.time())
# do_time()
```
#### File: spider/about_gevent/second.py
```python
from gevent import monkey # monkey模块,这个模块能将程序转换成可异步的程序
monkey.patch_all()
import requests, time, gevent
start_time = time.time()
url_list = [
# 'https://www.google.com/',
'https://www.baidu.com/',
'https://www.sina.com.cn/',
'https://www.json.cn/',
'https://www.qq.com/',
'https://www.163.com/',
]
def crawler(url):
response = requests.get(url)
print("当前请求的网址是:{},请求状态:{}".format(url, response.status_code))
task_list = []
for url in url_list:
# 用gevent.spawn()函数创建执行crawler()函数的任务。
task = gevent.spawn(crawler, url)
task_list.append(task) # 往任务列表添加任务。
# 用gevent.joinall方法,执行任务列表里的所有任务,就是让爬虫开始爬取网站。
gevent.joinall(task_list)
end_time = time.time()
print("请求所用时间:%s" % (end_time - start_time))
```
#### File: spider/complex/f.py
```python
import requests
from lxml import etree
# 如果提取不成功,返回空字符串,成功则取值
def info(list_name):
if list_name == []:
return ''
else:
return list_name[0]
# 用Xpath提取数据
def get_data(url, headers):
r = requests.get(url, headers=headers)
html = etree.HTML(r.text)
# print(html)
books = html.xpath('//tr[@class="item"]')
for book in books:
title = book.xpath('./td[2]/div[1]/a/@title')
print(info(title))
# link = book.xpath('./td[2]/div[1]/a/@href')
# num = book.xpath('./td[2]/div[2]/span[2]/text()')
# introduce = book.xpath('./td[2]/p[2]/span/text()')
# print(info(title), info(num), info(introduce), info(link))
if __name__ == "__main__":
for i in range(10):
url = 'https://book.douban.com/top250?start=' + str(i * 25)
headers = {'User-Agent': 'Mozilla/5.0'}
get_data(url, headers)
``` |
{
"source": "jiaxiaolei/my_snap_demo",
"score": 2
} |
#### File: bcloud-3.9.1/bcloud/CloudPage.py
```python
import os
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from bcloud import Config
_ = Config._
from bcloud import decoder
from bcloud.BTBrowserDialog import BTBrowserDialog
from bcloud.FolderBrowserDialog import FolderBrowserDialog
from bcloud.VCodeDialog import VCodeDialog
from bcloud import gutil
from bcloud.log import logger
from bcloud import pcs
from bcloud import util
(TASKID_COL, NAME_COL, PATH_COL, SOURCEURL_COL, SIZE_COL, FINISHED_COL,
STATUS_COL, PERCENT_COL, HUMANSIZE_COL, TOOLTIP_COL) = list(range(10))
Status = (0, 1, )
StatusNames = (_('FINISHED'), _('DOWNLOADING'), )
class CloudPage(Gtk.Box):
icon_name = 'cloud-symbolic'
disname = _('Cloud')
name = 'CloudPage'
tooltip = _('Cloud download')
first_run = True
def __init__(self, app):
super().__init__(orientation=Gtk.Orientation.VERTICAL)
self.app = app
if Config.GTK_GE_312:
self.headerbar = Gtk.HeaderBar()
self.headerbar.props.show_close_button = True
self.headerbar.props.has_subtitle = False
self.headerbar.set_title(self.disname)
# link button
link_button = Gtk.Button()
link_img = Gtk.Image.new_from_icon_name('document-new-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
link_button.set_image(link_img)
link_button.set_tooltip_text(_('Create new cloud task'))
link_button.connect('clicked', self.on_link_button_clicked)
self.headerbar.pack_start(link_button)
# open button
open_button = Gtk.Button()
open_img = Gtk.Image.new_from_icon_name('document-open-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
open_button.set_image(open_img)
open_button.set_tooltip_text(_('Open target directory'))
open_button.connect('clicked', self.on_open_button_clicked)
self.headerbar.pack_start(open_button)
# remove box
right_box = Gtk.Box()
right_box_context = right_box.get_style_context()
right_box_context.add_class(Gtk.STYLE_CLASS_RAISED)
right_box_context.add_class(Gtk.STYLE_CLASS_LINKED)
self.headerbar.pack_end(right_box)
remove_button = Gtk.Button()
delete_img = Gtk.Image.new_from_icon_name('list-remove-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
remove_button.set_image(delete_img)
remove_button.set_tooltip_text(_('Remove selected tasks'))
remove_button.connect('clicked', self.on_remove_button_clicked)
right_box.pack_start(remove_button, False, False, 0)
clear_button = Gtk.Button()
clear_img = Gtk.Image.new_from_icon_name('list-remove-all-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
clear_button.set_image(clear_img)
clear_button.set_tooltip_text(_('Remove completed cloud tasks'))
clear_button.connect('clicked', self.on_clear_button_clicked)
right_box.pack_start(clear_button, False, False, 0)
reload_button = Gtk.Button()
reload_img = Gtk.Image.new_from_icon_name('view-refresh-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
reload_button.set_image(reload_img)
reload_button.set_tooltip_text(_('Reload (F5)'))
reload_button.connect('clicked', self.on_reload_button_clicked)
self.headerbar.pack_end(reload_button)
# show loading process
self.loading_spin = Gtk.Spinner()
self.headerbar.pack_end(self.loading_spin)
else:
control_box = Gtk.Box()
self.pack_start(control_box, False, False, 0)
link_button = Gtk.Button.new_with_label(_('New Link Task'))
link_button.connect('clicked', self.on_link_button_clicked)
control_box.pack_start(link_button, False, False, 0)
reload_button = Gtk.Button.new_with_label(_('Reload (F5)'))
reload_button.props.margin_left = 40
reload_button.connect('clicked', self.on_reload_button_clicked)
control_box.pack_start(reload_button, False, False, 0)
open_button = Gtk.Button.new_with_label(_('Open Directory'))
open_button.connect('clicked', self.on_open_button_clicked)
control_box.pack_start(open_button, False, False, 0)
clear_button = Gtk.Button.new_with_label(_('Clear'))
clear_button.set_tooltip_text(_('Remove completed cloud tasks'))
clear_button.connect('clicked', self.on_clear_button_clicked)
control_box.pack_end(clear_button, False, False, 0)
remove_button = Gtk.Button.new_with_label(_('Remove'))
remove_button.set_tooltip_text(_('Remove'))
remove_button.connect('clicked', self.on_remove_button_clicked)
control_box.pack_end(remove_button, False, False, 0)
# show loading process
self.loading_spin = Gtk.Spinner()
self.loading_spin.props.margin_right = 5
control_box.pack_end(self.loading_spin, False, False, 0)
scrolled_win = Gtk.ScrolledWindow()
self.pack_start(scrolled_win, True, True, 0)
# task_id, name, path, source_url, size, finished_size,
# status, percent, human_size, tooltip
self.liststore = Gtk.ListStore(str, str, str, str, GObject.TYPE_INT64,
GObject.TYPE_INT64, int, int, str, str)
self.treeview = Gtk.TreeView(model=self.liststore)
self.treeview.set_headers_clickable(True)
self.treeview.set_reorderable(True)
self.treeview.set_search_column(NAME_COL)
self.treeview.set_tooltip_column(TOOLTIP_COL)
self.selection = self.treeview.get_selection()
scrolled_win.add(self.treeview)
name_cell = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.END,
ellipsize_set=True)
name_col = Gtk.TreeViewColumn(_('Name'), name_cell, text=NAME_COL)
name_col.set_expand(True)
self.treeview.append_column(name_col)
name_col.set_sort_column_id(NAME_COL)
self.liststore.set_sort_func(NAME_COL, gutil.tree_model_natsort)
size_cell = Gtk.CellRendererText()
size_col = Gtk.TreeViewColumn(_('Size'), size_cell, text=HUMANSIZE_COL)
self.treeview.append_column(size_col)
size_col.props.min_width = 145
size_col.set_sort_column_id(SIZE_COL)
percent_cell = Gtk.CellRendererProgress()
percent_col = Gtk.TreeViewColumn(_('Progress'), percent_cell,
value=PERCENT_COL)
self.treeview.append_column(percent_col)
percent_col.props.min_width = 145
percent_col.set_sort_column_id(PERCENT_COL)
def on_page_show(self):
if Config.GTK_GE_312:
self.app.window.set_titlebar(self.headerbar)
self.headerbar.show_all()
def check_first(self):
if self.first_run:
self.first_run = False
self.load()
def load(self):
'''获取当前的离线任务列表'''
def on_list_task(info, error=None):
self.loading_spin.stop()
self.loading_spin.hide()
if not info:
self.app.toast(_('Network error, info is empty'))
if error or not info:
logger.error('CloudPage.load: %s, %s' % (info, error))
return
tasks = info['task_info']
for task in tasks:
self.liststore.append([
task['task_id'],
task['task_name'],
task['save_path'],
task['source_url'],
0,
0,
int(task['status']),
0,
'0',
gutil.escape(task['save_path'])
])
self.scan_tasks()
nonlocal start
start = start + len(tasks)
if info['total'] > start:
gutil.async_call(pcs.cloud_list_task, self.app.cookie,
self.app.tokens, start, callback=on_list_task)
self.loading_spin.start()
self.loading_spin.show_all()
start = 0
gutil.async_call(pcs.cloud_list_task, self.app.cookie, self.app.tokens,
start, callback=on_list_task)
def reload(self, *args, **kwds):
self.liststore.clear()
self.load()
def get_row_by_task_id(self, task_id):
'''返回这个任务的TreeModelRow, 如果不存在, 就返回None.'''
for row in self.liststore:
if row and row[TASKID_COL] == task_id:
return row
return None
def scan_tasks(self):
'''定期获取离线下载任务的信息, 比如10秒钟'''
def update_task_status(info, error=None):
if error or not info:
logger.error('CloudPage.scan_tasks: %s, %s' % (info, error))
return
tasks = info['task_info']
for row in self.liststore:
if not row or row[TASKID_COL] not in tasks:
continue
task = tasks[row[TASKID_COL]]
row[SIZE_COL] = int(task['file_size'])
row[FINISHED_COL] = int(task['finished_size'])
row[STATUS_COL] = int(task['status'])
if row[SIZE_COL]:
row[PERCENT_COL] = int(
row[FINISHED_COL] / row[SIZE_COL] * 100)
size = util.get_human_size(row[SIZE_COL])[0]
finished_size = util.get_human_size(row[FINISHED_COL])[0]
if row[SIZE_COL] == row[FINISHED_COL]:
row[HUMANSIZE_COL] = size
else:
row[HUMANSIZE_COL] = '{0}/{1}'.format(finished_size, size)
task_ids = [row[TASKID_COL] for row in self.liststore]
if task_ids:
gutil.async_call(pcs.cloud_query_task, self.app.cookie,
self.app.tokens, task_ids,
callback=update_task_status)
# Open API
def add_cloud_bt_task(self, source_url, save_path=None):
'''从服务器上获取种子, 并建立离线下载任务
source_url - BT 种子在服务器上的绝对路径, 或者是磁链的地址.
save_path - 要保存到的路径, 如果为None, 就会弹出目录选择的对话框
'''
def check_vcode(info, error=None):
if error or not info:
logger.error('CloudPage.check_vcode: %s, %s' % (info, error))
return
if info.get('error_code', -1) != 0:
logger.error('CloudPage.check_vcode: %s, %s' % (info, error))
if 'task_id' in info or info['error_code'] == 0:
self.reload()
elif info['error_code'] == -19:
vcode_dialog = VCodeDialog(self, self.app, info)
response = vcode_dialog.run()
vcode_input = vcode_dialog.get_vcode()
vcode_dialog.destroy()
if response != Gtk.ResponseType.OK:
return
gutil.async_call(pcs.cloud_add_bt_task, self.app.cookie,
self.app.tokens, source_url, save_path,
selected_idx, file_sha1, info['vcode'],
vcode_input, callback=check_vcode)
else:
self.app.toast(_('Error: {0}').format(info['error_msg']))
self.check_first()
if not save_path:
folder_browser = FolderBrowserDialog(self, self.app, _('Save to..'))
response = folder_browser.run()
save_path = folder_browser.get_path()
folder_browser.destroy()
if response != Gtk.ResponseType.OK:
return
if not save_path:
return
bt_browser = BTBrowserDialog(self, self.app, _('Choose..'),
source_url, save_path)
response = bt_browser.run()
selected_idx, file_sha1 = bt_browser.get_selected()
bt_browser.destroy()
if response != Gtk.ResponseType.OK or not selected_idx:
return
gutil.async_call(pcs.cloud_add_bt_task, self.app.cookie,
self.app.tokens, source_url, save_path, selected_idx,
file_sha1, callback=check_vcode)
self.app.blink_page(self.app.cloud_page)
# Open API
def add_link_task(self):
'''新建普通的链接任务'''
def do_add_link_task(source_url):
def on_link_task_added(info, error=None):
if error or not info:
logger.error('CloudPage.do_add_link_task: %s, %s' %
(info, error))
self.app.toast(_('Failed to parse download link'))
return
if info.get('error_code', -1) != 0:
logger.error('CloudPage.do_add_link_task: %s, %s' %
(info, error))
if 'task_id' in info or info['error_code'] == 0:
self.reload()
elif info['error_code'] == -19:
vcode = info['vcode']
vcode_dialog = VCodeDialog(self, self.app, info)
response = vcode_dialog.run()
vcode_input = vcode_dialog.get_vcode()
vcode_dialog.destroy()
if response != Gtk.ResponseType.OK:
return
gutil.async_call(pcs.cloud_add_link_task, self.app.cookie,
self.app.tokens, source_url, save_path,
vcode, vcode_input,
callback=on_link_task_added)
else:
self.app.toast(_('Error: {0}').format(info['error_msg']))
gutil.async_call(pcs.cloud_add_link_task, self.app.cookie,
self.app.tokens, source_url, save_path,
callback=on_link_task_added)
self.check_first()
dialog = Gtk.Dialog(_('Add new link tasks'), self.app.window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
dialog.set_border_width(10)
dialog.set_default_size(480, 300)
dialog.set_default_response(Gtk.ResponseType.OK)
box = dialog.get_content_area()
scrolled_win = Gtk.ScrolledWindow()
box.pack_start(scrolled_win, True, True, 0)
links_buf = Gtk.TextBuffer()
links_tv = Gtk.TextView.new_with_buffer(links_buf)
links_tv.set_tooltip_text(_('Paste links here, line by line'))
scrolled_win.add(links_tv)
infobar = Gtk.InfoBar()
infobar.set_message_type(Gtk.MessageType.INFO)
box.pack_start(infobar, False, False, 5)
info_content = infobar.get_content_area()
info_label = Gtk.Label.new(
_('Support http/https/ftp/thunder/qqdl/flashget/eMule/Magnet format'))
info_content.pack_start(info_label, False, False, 0)
box.show_all()
response = dialog.run()
contents = gutil.text_buffer_get_all_text(links_buf)
dialog.destroy()
if response != Gtk.ResponseType.OK or not contents:
return
link_tasks = []
bt_tasks = []
for source_url in contents.split('\n'):
source_url = source_url.strip()
if not source_url:
continue
if source_url.startswith('magnet'):
bt_tasks.append(source_url)
else:
priv_url = decoder.decode(source_url)
if priv_url:
link_tasks.append(priv_url)
else:
link_tasks.append(source_url)
folder_browser = FolderBrowserDialog(self, self.app, _('Save to..'))
response = folder_browser.run()
save_path = folder_browser.get_path()
folder_browser.destroy()
if response != Gtk.ResponseType.OK or not save_path:
return
for source_url in link_tasks:
do_add_link_task(source_url)
for source_url in bt_tasks:
self.add_cloud_bt_task(source_url, save_path)
def on_bt_button_clicked(self, button):
self.add_local_bt_task()
def on_link_button_clicked(self, button):
self.add_link_task()
def on_reload_button_clicked(self, button):
self.reload()
def on_open_button_clicked(self, button):
model, tree_paths = self.selection.get_selected_rows()
# tree_paths might be None or a list
if not tree_paths or len(tree_paths) != 1:
return
tree_path = tree_paths[0]
path = model[tree_path][PATH_COL]
dir_name = os.path.split(path)[0]
self.app.home_page.load(dir_name)
self.app.switch_page(self.app.home_page)
def on_remove_button_clicked(self, button):
def on_task_removed(resp, error=None):
self.reload()
model, tree_paths = self.selection.get_selected_rows()
if not tree_paths or len(tree_paths) != 1:
return
tree_path = tree_paths[0]
task_id = model[tree_path][TASKID_COL]
self.loading_spin.start()
self.loading_spin.show_all()
if model[tree_path][STATUS_COL] == Status[0]:
gutil.async_call(pcs.cloud_delete_task, self.app.cookie,
self.app.tokens, task_id, callback=on_task_removed)
else:
gutil.async_call(pcs.cloud_cancel_task, self.app.cookie,
self.app.tokens, task_id, callback=self.reload)
def on_clear_button_clicked(self, button):
def on_clear_task(info, error=None):
self.reload()
gutil.async_call(pcs.cloud_clear_task, self.app.cookie,
self.app.tokens, callback=on_clear_task)
```
#### File: bcloud-3.9.1/bcloud/decoder.py
```python
import base64
import traceback
from bcloud.log import logger
def decode_flashget(link):
try:
l = base64.decodestring(link[11:len(link)-7].encode()).decode()
except ValueError:
logger.warn(traceback.format_exc())
l = base64.decodestring(link[11:len(link)-7].encode()).decode('gbk')
return l[10:len(l)-10]
def decode_thunder(link):
# AAhttp://127.0.0.1
if link.startswith('QUFodHRwOi8vMTI3LjAuMC4'):
return ''
try:
l = base64.decodestring(link[10:].encode()).decode('gbk')
except ValueError:
logger.warn(traceback.format_exc())
l = base64.decodestring(link[10:].encode()).decode()
return l[2:-2]
def decode_qqdl(link):
try:
return base64.decodestring(link[7:].encode()).decode()
except ValueError:
logger.warn(traceback.format_exc())
return base64.decodestring(link[7:].encode()).decode('gbk')
_router = {
'flashge': decode_flashget,
'thunder': decode_thunder,
'qqdl://': decode_qqdl,
}
def decode(link):
if not isinstance(link, str) or len(link) < 10:
logger.error('unknown link: %s' % link)
return ''
link_prefix = link[:7].lower()
if link_prefix in _router:
try:
return _router[link_prefix](link)
except ValueError:
logger.error(traceback.format_exc())
return ''
else:
logger.warn('unknown protocol: %s' % link)
return ''
```
#### File: bcloud-3.9.1/bcloud/FolderBrowserDialog.py
```python
from gi.repository import GLib
from gi.repository import Gtk
from bcloud import Config
_ = Config._
from bcloud import gutil
from bcloud import pcs
from bcloud.NewFolderDialog import NewFolderDialog
NAME_COL, PATH_COL, EMPTY_COL, LOADED_COL = list(range(4))
NUM = 100
class FolderBrowserDialog(Gtk.Dialog):
is_loading = False
def __init__(self, parent, app, title=_('Save to..')):
self.parent = parent
self.app = app
super().__init__(title, app.window, Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(440, 480)
self.set_border_width(10)
self.set_default_response(Gtk.ResponseType.OK)
box = self.get_content_area()
control_box = Gtk.Box()
box.pack_start(control_box, False, False, 0)
mkdir_button = Gtk.Button.new_with_label(_('Create Folder'))
control_box.pack_end(mkdir_button, False, False, 0)
mkdir_button.connect('clicked', self.on_mkdir_clicked)
reload_button = Gtk.Button.new_with_label(_('Reload'))
control_box.pack_end(reload_button, False, False, 5)
reload_button.connect('clicked', self.on_reload_clicked)
scrolled_win = Gtk.ScrolledWindow()
box.pack_start(scrolled_win, True, True, 5)
# disname, path, empty, loaded
self.treestore = Gtk.TreeStore(str, str, bool, bool)
self.treeview = Gtk.TreeView(model=self.treestore)
self.selection = self.treeview.get_selection()
scrolled_win.add(self.treeview)
icon_cell = Gtk.CellRendererPixbuf(icon_name='folder')
name_cell = Gtk.CellRendererText()
name_col = Gtk.TreeViewColumn(_('Folder'))
name_col.pack_start(icon_cell, False)
name_col.pack_start(name_cell, True)
if Config.GTK_LE_36:
name_col.add_attribute(name_cell, 'text', NAME_COL)
else:
name_col.set_attributes(name_cell, text=NAME_COL)
self.treeview.append_column(name_col)
self.treeview.connect('row-expanded', self.on_row_expanded)
box.show_all()
self.reset()
def reset(self):
self.treestore.clear()
root_iter = self.treestore.append(None, ['/', '/', False, False,])
GLib.timeout_add(500, self.list_dir, root_iter)
def list_dir(self, parent_iter):
if self.treestore[parent_iter][LOADED_COL]:
return
tree_path = self.treestore.get_path(parent_iter)
path = self.treestore[tree_path][PATH_COL]
first_child_iter = self.treestore.iter_nth_child(parent_iter, 0)
if (first_child_iter and
not self.treestore[first_child_iter][NAME_COL]):
self.treestore.remove(first_child_iter)
has_next = True
page_num = 1
while has_next:
infos = pcs.list_dir(self.app.cookie, self.app.tokens, path,
page=page_num, num=NUM)
page_num = page_num + 1
if not infos or infos.get('errno', -1) != 0:
has_next = False
return
if len(infos['list']) < NUM:
has_next = False
for pcs_file in infos['list']:
if not pcs_file['isdir']:
continue
if pcs_file['dir_empty']:
empty = True
else:
empty = False
item = self.treestore.append(parent_iter, [
pcs_file['server_filename'],
pcs_file['path'],
empty,
False,
])
# 加入一个临时的占位点.
if not empty:
self.treestore.append(item,
['', pcs_file['path'], True, False])
self.treestore[parent_iter][LOADED_COL] = True
def get_path(self):
'''获取选择的路径, 如果没有选择, 就返回根目录'''
model, tree_iter = self.selection.get_selected()
if not tree_iter:
return '/'
else:
return model[tree_iter][PATH_COL]
def on_reload_clicked(self, button):
self.reset()
def on_mkdir_clicked(self, button):
path = self.get_path()
dialog = NewFolderDialog(self, self.app, path)
dialog.run()
dialog.destroy()
self.reset()
def on_row_expanded(self, treeview, tree_iter, tree_path):
if self.is_loading:
return
self.is_loading = True
self.list_dir(tree_iter)
self.is_loading = False
self.treeview.expand_row(tree_path, False)
```
#### File: bcloud-3.9.1/bcloud/hasher.py
```python
import hashlib
import os
import zlib
CHUNK = 2 ** 20
def crc(path):
_crc = 0
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_crc = zlib.crc32(chunk, _crc)
fh.close()
return '%X' % (_crc & 0xFFFFFFFF)
def md5(path, start=0, stop=-1):
_md5 = hashlib.md5()
fh = open(path, 'rb')
if start > 0:
fh.seek(start)
if stop == -1:
stop = os.path.getsize(path)
pos = start
while pos < stop:
size = min(CHUNK, stop - pos)
chunk = fh.read(size)
if not chunk:
break
pos += len(chunk)
_md5.update(chunk)
fh.close()
return _md5.hexdigest()
def sha1(path):
_sha1 = hashlib.sha1()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha1.update(chunk)
fh.close()
return _sha1.hexdigest()
def sha224(path):
_sha224 = hashlib.sha224()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha224.update(chunk)
fh.close()
return _sha224.hexdigest()
def sha256(path):
_sha256 = hashlib.sha256()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha256.update(chunk)
fh.close()
return _sha256.hexdigest()
def sha384(path):
_sha384 = hashlib.sha384()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha384.update(chunk)
fh.close()
return _sha384.hexdigest()
def sha512(path):
_sha512 = hashlib.sha512()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha512.update(chunk)
fh.close()
return _sha512.hexdigest()
```
#### File: bcloud-3.9.1/bcloud/Shutdown.py
```python
import os
import dbus
SESSION_BUS, SYSTEM_BUS = 0, 1
class Shutdown(object):
'''Shutdown the system after the current snapshot has finished.
This should work for KDE, Gnome, Unity, Cinnamon, XFCE, Mate and E17.
Note: this class is copied from `backintime` project.
'''
DBUS_SHUTDOWN = {
# Put unity dbus interface ahead of gnome
'unity': {
'bus': SESSION_BUS,
'service': 'com.canonical.Unity',
'objectPath': '/com/canonical/Unity/Session',
'method': 'Shutdown',
'interface': 'com.canonical.Unity.Session',
'arguments': (),
},
'gnome': {
'bus': SESSION_BUS,
'service': 'org.gnome.SessionManager',
'objectPath': '/org/gnome/SessionManager',
'method': 'Shutdown',
#methods Shutdown
# Reboot
# Logout
'interface': 'org.gnome.SessionManager',
'arguments': (),
#arg (only with Logout)
# 0 normal
# 1 no confirm
# 2 force
},
'kde': {
'bus': SESSION_BUS,
'service': 'org.kde.ksmserver',
'objectPath': '/KSMServer',
'method': 'logout',
'interface': 'org.kde.KSMServerInterface',
'arguments': (-1, 2, -1),
#1st arg -1 confirm
# 0 no confirm
#2nd arg -1 full dialog with default logout
# 0 logout
# 1 restart
# 2 shutdown
#3rd arg -1 wait 30sec
# 2 immediately
},
'xfce': {
'bus': SESSION_BUS,
'service': 'org.xfce.SessionManager',
'objectPath': '/org/xfce/SessionManager',
'method': 'Shutdown',
#methods Shutdown
# Restart
# Suspend (no args)
# Hibernate (no args)
# Logout (two args)
'interface': 'org.xfce.Session.Manager',
'arguments': (True, ),
#arg True allow saving
# False don't allow saving
#1nd arg (only with Logout)
# True show dialog
# False don't show dialog
#2nd arg (only with Logout)
# True allow saving
# False don't allow saving
},
'mate': {
'bus': SESSION_BUS,
'service': 'org.mate.SessionManager',
'objectPath': '/org/mate/SessionManager',
'method': 'Shutdown',
#methods Shutdown
# Logout
'interface': 'org.mate.SessionManager',
'arguments': ()
#arg (only with Logout)
# 0 normal
# 1 no confirm
# 2 force
},
'e17': {
'bus': SESSION_BUS,
'service': 'org.enlightenment.Remote.service',
'objectPath': '/org/enlightenment/Remote/RemoteObject',
'method': 'Halt',
#methods Halt -> Shutdown
# Reboot
# Logout
# Suspend
# Hibernate
'interface': 'org.enlightenment.Remote.Core',
'arguments': (),
},
'z_freed': {
'bus': SYSTEM_BUS,
'service': 'org.freedesktop.ConsoleKit',
'objectPath': '/org/freedesktop/ConsoleKit/Manager',
'method': 'Stop',
'interface': 'org.freedesktop.ConsoleKit.Manager',
'arguments': (),
},
}
def __init__(self):
self._proxy, self._args = self._prepair()
# Indicate if a valid dbus service is available to shutdown system.
self.can_shutdown = (self._proxy is not None)
def _prepair(self):
'''Try to connect to the given dbus services. If successful it will
return a callable dbus proxy and those arguments.
'''
try:
sessionbus = dbus.SessionBus()
systembus = dbus.SystemBus()
except:
return (None, None)
for dbus_props in self.DBUS_SHUTDOWN.values():
try:
if dbus_props['bus'] == SESSION_BUS:
bus = sessionbus
else:
bus = systembus
interface = bus.get_object(dbus_props['service'],
dbus_props['objectPath'])
proxy = interface.get_dbus_method(dbus_props['method'],
dbus_props['interface'])
return (proxy, dbus_props['arguments'])
except dbus.exceptions.DBusException:
continue
return (None, None)
def shutdown(self):
'''Call the dbus proxy to start the shutdown.'''
if self._proxy:
os.sync()
self._proxy(*self._args)
```
#### File: bcloud-3.9.1/bcloud/SigninDialog.py
```python
import json
import os
import time
from gi.repository import GLib
from gi.repository import Gtk
from bcloud import auth
from bcloud import Config
_ = Config._
from bcloud import gutil
from bcloud.log import logger
from bcloud.RequestCookie import RequestCookie
from bcloud import util
from bcloud import Widgets
DELTA = 1 * 24 * 60 * 60 # 1 days
class SigninVcodeDialog(Gtk.Dialog):
'''登陆时的验证码对话框'''
def __init__(self, parent, username, cookie, tokens, codeString, vcodetype):
super().__init__(_('Verification..'), parent, Gtk.DialogFlags.MODAL)
self.set_default_size(280, 130)
self.set_border_width(10)
self.username = username
self.cookie = cookie
self.tokens = tokens
self.codeString = codeString
self.vcodetype = vcodetype
box = self.get_content_area()
box.set_spacing(5)
self.vcode_img = Gtk.Image()
box.pack_start(self.vcode_img, True, True, 0)
button_box = Gtk.Box(spacing=5)
box.pack_start(button_box, True, True, 0)
self.vcode_entry = Gtk.Entry()
self.vcode_entry.connect('activate', self.check_entry)
button_box.pack_start(self.vcode_entry, True, True, 0)
if Config.GTK_GE_312:
vcode_refresh = Widgets.IconButton('view-refresh-symbolic')
else:
vcode_refresh = Gtk.Button.new_from_stock(Gtk.STOCK_REFRESH)
vcode_refresh.props.valign = Gtk.Align.CENTER
vcode_refresh.connect('clicked', self.on_vcode_refresh_clicked)
button_box.pack_start(vcode_refresh, False, False, 0)
# show loading process
self.loading_spin = Gtk.Spinner()
self.loading_spin.props.valign = Gtk.Align.CENTER
button_box.pack_start(self.loading_spin, False, False, 0)
vcode_confirm = Gtk.Button.new_from_stock(Gtk.STOCK_OK)
vcode_confirm.connect('clicked', self.on_vcode_confirm_clicked)
vcode_confirm.props.valign = Gtk.Align.END
box.pack_start(vcode_confirm, False, False, 10)
box.show_all()
self.loading_spin.hide()
gutil.async_call(auth.get_signin_vcode, cookie, codeString,
callback=self.update_img)
def get_vcode(self):
return self.vcode_entry.get_text()
def update_img(self, req_data, error=None):
if error or not req_data:
self.refresh_vcode()
logger.error('SigninDialog.update_img: %s, %s' % (req_data, error))
return
vcode_path = os.path.join(Config.get_tmp_path(self.username),
'bcloud-signin-vcode.jpg')
with open(vcode_path, 'wb') as fh:
fh.write(req_data)
self.vcode_img.set_from_file(vcode_path)
self.loading_spin.stop()
self.loading_spin.hide()
self.vcode_entry.set_sensitive(True)
def refresh_vcode(self):
def _refresh_vcode(info, error=None):
if not info or error:
logger.error('SigninVcode.refresh_vcode: %s, %s.' %
(info, error))
return
logger.debug('refresh vcode: %s' % info)
self.codeString = info['data']['verifyStr']
gutil.async_call(auth.get_signin_vcode, self.cookie,
self.codeString, callback=self.update_img)
self.loading_spin.start()
self.loading_spin.show_all()
self.vcode_entry.set_sensitive(False)
gutil.async_call(auth.refresh_signin_vcode, self.cookie, self.tokens,
self.vcodetype, callback=_refresh_vcode)
def check_entry(self, *args):
'''中文验证码长度为2,英文验证码长度为4'''
if len(self.vcode_entry.get_text()) == 4 or len(self.vcode_entry.get_text()) == 2:
self.response(Gtk.ResponseType.OK)
def on_vcode_refresh_clicked(self, button):
self.refresh_vcode()
def on_vcode_confirm_clicked(self, button):
self.check_entry()
class SigninDialog(Gtk.Dialog):
profile = None
password_changed = False
def __init__(self, app, auto_signin=True):
super().__init__(_('Sign in now'), app.window, Gtk.DialogFlags.MODAL)
self.app = app
self.auto_signin = auto_signin
self.set_default_size(460, 260)
self.set_border_width(15)
self.conf = Config.load_conf()
self.profile = None
box = self.get_content_area()
box.set_spacing(8)
username_ls = Gtk.ListStore(str)
for username in self.conf['profiles']:
username_ls.append([username,])
self.username_combo = Gtk.ComboBox.new_with_entry()
self.username_combo.set_model(username_ls)
self.username_combo.set_entry_text_column(0)
self.username_combo.set_tooltip_text(_('Username/Email/Phone...'))
box.pack_start(self.username_combo, False, False, 0)
self.username_combo.connect('changed', self.on_username_changed)
self.password_entry = Gtk.Entry()
self.password_entry.set_placeholder_text(_('Password ..'))
self.password_entry.props.visibility = False
self.password_entry.connect('changed', self.on_password_entry_changed)
self.password_entry.connect('activate', self.on_password_entry_activate)
box.pack_start(self.password_entry, False, False, 0)
self.remember_check = Gtk.CheckButton.new_with_label(
_('Remember Password'))
self.remember_check.props.margin_top = 20
if Config.GTK_GE_312:
self.remember_check.props.margin_start = 20
else:
self.remember_check.props.margin_left = 20
box.pack_start(self.remember_check, False, False, 0)
self.remember_check.connect('toggled', self.on_remember_check_toggled)
self.signin_check = Gtk.CheckButton.new_with_label(
_('Signin Automatically'))
self.signin_check.set_sensitive(False)
if Config.GTK_GE_312:
self.signin_check.props.margin_start = 20
else:
self.signin_check.props.margin_left = 20
box.pack_start(self.signin_check, False, False, 0)
self.signin_check.connect('toggled', self.on_signin_check_toggled)
self.signin_button = Gtk.Button.new_with_label(_('Sign in'))
self.signin_button.props.margin_top = 10
self.signin_button.connect('clicked', self.on_signin_button_clicked)
box.pack_start(self.signin_button, False, False, 0)
self.infobar = Gtk.InfoBar()
self.infobar.set_message_type(Gtk.MessageType.ERROR)
box.pack_end(self.infobar, False, False, 0)
info_content = self.infobar.get_content_area()
self.info_label = Gtk.Label.new(
_('Failed to sign in, please try again.'))
info_content.pack_start(self.info_label, False, False, 0)
box.show_all()
self.infobar.hide()
if not gutil.keyring_available:
self.signin_check.set_active(False)
self.signin_check.set_sensitive(False)
self.remember_check.set_active(False)
self.remember_check.set_sensitive(False)
GLib.timeout_add(500, self.load_defualt_profile)
def load_defualt_profile(self):
if self.conf['default']:
self.use_profile(self.conf['default'])
self.password_changed = False
# auto_signin here
if self.signin_check.get_active() and self.auto_signin:
self.signin_button.set_sensitive(False)
self.signin()
return False
def on_username_changed(self, combo):
tree_iter = combo.get_active_iter()
username = ''
if tree_iter != None:
model = combo.get_model()
username = model[tree_iter][0]
self.use_profile(username)
else:
entry = combo.get_child()
username = entry.get_text()
self.profile = None
def use_profile(self, username):
model = self.username_combo.get_model()
for row in model:
if row[0] == username:
self.username_combo.set_active_iter(row.iter)
break
self.profile = gutil.load_profile(username)
self.password_entry.set_text(self.profile['password'])
if gutil.keyring_available:
self.remember_check.set_active(self.profile['remember-password'])
if self.profile['remember-password']:
self.signin_check.set_active(self.profile['auto-signin'])
else:
self.signin_check.set_active(False)
else:
self.remember_check.set_sensitive(False)
self.password_changed = False
def signin_failed(self, error=None):
if error:
self.info_label.set_text(error)
self.infobar.show_all()
self.signin_button.set_sensitive(True)
self.signin_button.set_label(_('Sign in'))
def on_password_entry_changed(self, entry):
self.password_changed = True
def on_remember_check_toggled(self, button):
if button.get_active():
self.signin_check.set_sensitive(True)
else:
self.signin_check.set_sensitive(False)
self.signin_check.set_active(False)
if self.profile:
self.profile['remember-password'] = self.remember_check.get_active()
gutil.dump_profile(self.profile)
def on_signin_check_toggled(self, button):
if self.profile:
self.profile['auto-signin'] = self.signin_check.get_active()
gutil.dump_profile(self.profile)
def on_signin_button_clicked(self, button):
if (len(self.password_entry.get_text()) <= 1 or
not self.username_combo.get_child().get_text()):
return
self.infobar.hide()
button.set_label(_('In process...'))
button.set_sensitive(False)
self.signin()
def on_password_entry_activate(self, entry):
if (len(self.password_entry.get_text()) <= 1 or
not self.username_combo.get_child().get_text()):
return
self.infobar.hide()
self.signin_button.set_label(_('In process...'))
self.signin_button.set_sensitive(False)
self.signin()
def signin(self):
def on_get_bdstoken(bdstoken, error=None):
if error or not bdstoken:
logger.error('SigninDialog.on_get_bdstoken: %s, %s' %
(bdstoken, error))
self.signin_failed(_('Failed to get bdstoken!'))
else:
nonlocal tokens
tokens['bdstoken'] = bdstoken
self.update_profile(username, password, cookie, tokens,
dump=True)
def on_post_login(info, error=None):
if error or not info:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(
_('Login failed, please try again'))
else:
errno, query = info
if errno == 0:
cookie.load_list(query)
self.signin_button.set_label(_('Get bdstoken...'))
gutil.async_call(auth.get_bdstoken, cookie,
callback=on_get_bdstoken)
# 257: 需要输入验证码
elif errno == 257:
nonlocal verifycode
nonlocal codeString
vcodetype = query['vcodetype']
codeString = query['codeString']
dialog = SigninVcodeDialog(self, username, cookie,
tokens['token'], codeString,
vcodetype)
response = dialog.run()
verifycode = dialog.get_vcode()
codeString = dialog.codeString
dialog.destroy()
self.signin_button.set_label(_('Get bdstoken...'))
gutil.async_call(auth.post_login, cookie,
tokens, username,
password_enc, rsakey, verifycode,
codeString, callback=on_post_login)
# 密码错误
elif errno == 4:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(_('Password error, please try again'))
# 验证码错误
elif errno == 6:
self.signin_failed(
_('Verfication code error, please try again'))
# 需要短信验证
elif errno == 400031:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(
_('Does not support SMS/Email verification!'))
# 登录失败,请在弹出的窗口操作,或重新登录
elif errno == 120021:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(
_('NET:登录失败,请在弹出的窗口操作,或重新登录'))
elif errno == 120019:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(
_('NET:近期登录次数过多, 请先通过 passport.baidu.com 解除锁定'))
elif errno == 500010:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(
_('NET:登录过于频繁,请24小时后再试'))
elif errno == 400031:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(
_('NET:账号异常,请在当前网络环境下在百度网页端正常登录一次'))
else:
logger.error('SigninDialog.on_post_login: %s, %s' %
(info, error))
self.signin_failed(_('Unknown error, please try again'))
def on_get_public_key(info, error=None):
if not info or error:
logger.error('SigninDialog.on_get_public_key: %s, %s' %
(info, error))
self.signin_failed(
_('Failed to request public key, please try again'))
else:
pubkey = info['pubkey']
nonlocal rsakey
rsakey = info['key']
nonlocal password_enc
password_enc = util.RSA_encrypt(pubkey, password)
gutil.async_call(auth.post_login, cookie, tokens,
username, password_enc, rsakey, verifycode,
codeString, callback=on_post_login)
def on_check_login(info, error=None):
if not info or error:
logger.error('SigninDialog.on_check_login: %s, %s' %
(info, error))
self.signin_failed(_('Failed to check login, please try again'))
else:
ubi_cookie, status = info
cookie.load_list(ubi_cookie)
nonlocal codeString
nonlocal verifycode
codeString = status['data']['codeString']
vcodetype = status['data']['vcodetype']
if codeString:
dialog = SigninVcodeDialog(self, username, cookie,
tokens, codeString, vcodetype)
response = dialog.run()
verifycode = dialog.get_vcode()
codeString = dialog.codeString
dialog.destroy()
'''中文验证码长度为2,英文验证码长度为4'''
if not verifycode or (len(verifycode) != 4 and len(verifycode) != 2):
self.signin_failed(_('Please input verification code!'))
return
else:
gutil.async_call(auth.get_public_key, cookie,
tokens, callback=on_get_public_key)
else:
gutil.async_call(auth.get_public_key, cookie,
tokens, callback=on_get_public_key)
def on_get_UBI(ubi_cookie, error=None):
if error or not ubi_cookie:
logger.error('SigninDialog.on_getUBI: %s, %s' %
(ubi_cookie, error))
self.signin_failed(_('Failed to get UBI, please try again.'))
else:
cookie.load_list(ubi_cookie)
self.signin_button.set_label(_('Check login'))
gutil.async_call(auth.check_login, cookie, tokens,
username, callback=on_check_login)
def on_get_token(info, error=None):
if error or not info:
logger.error('SigninDialog.on_get_token: %s, %s' %
(info, error))
self.signin_failed(_('Failed to get token, please try again.'))
else:
nonlocal tokens
hosupport, token = info
cookie.load_list(hosupport)
cookie.load('cflag=65535%3A1; PANWEB=1;')
tokens['token'] = token
self.signin_button.set_label(_('Get UBI...'))
gutil.async_call(auth.get_UBI, cookie, tokens,
callback=on_get_UBI)
def on_get_BAIDUID(uid_cookie, error=None):
if error or not uid_cookie:
logger.error('SigninDialog.on_get_BAIDUID: %s, %s' %
(uid_cookie, error))
self.signin_failed(
_('Failed to get BAIDUID cookie, please try again.'))
else:
cookie.load_list(uid_cookie)
self.signin_button.set_label(_('Get TOKEN...'))
gutil.async_call(auth.get_token, cookie, callback=on_get_token)
username = self.username_combo.get_child().get_text()
password = <PASSWORD>()
# 使用本地的缓存token, 有效期是三天
if not self.password_changed and self.signin_check.get_active():
cookie, tokens = self.load_auth(username)
if cookie and tokens:
self.update_profile(username, password, cookie, tokens)
return
cookie = RequestCookie()
tokens = {}
verifycode = ''
codeString = ''
password_enc = ''
rsakey = ''
self.signin_button.set_label(_('Get BAIDUID...'))
gutil.async_call(auth.get_BAIDUID, callback=on_get_BAIDUID)
def load_auth(self, username):
auth_file = os.path.join(Config.get_tmp_path(username), 'auth.json')
# 如果授权信息被缓存, 并且没过期, 就直接读取它.
if os.path.exists(auth_file):
if time.time() - os.stat(auth_file).st_mtime < DELTA:
with open(auth_file) as fh:
c, tokens = json.load(fh)
cookie = RequestCookie(c)
return cookie, tokens
return None, None
def dump_auth(self, username, cookie, tokens):
auth_file = os.path.join(Config.get_tmp_path(username), 'auth.json')
with open(auth_file, 'w') as fh:
json.dump([str(cookie), tokens], fh)
def update_profile(self, username, password, cookie, tokens, dump=False):
if not self.profile:
self.profile = gutil.load_profile(username)
self.profile['username'] = username
self.profile['remember-password'] = self.remember_check.get_active()
self.profile['auto-signin'] = self.signin_check.get_active()
if self.profile['remember-password']:
self.profile['password'] = password
else:
self.profile['password'] = ''
gutil.dump_profile(self.profile)
if username not in self.conf['profiles']:
self.conf['profiles'].append(username)
if self.profile['auto-signin']:
self.conf['default'] = username
Config.dump_conf(self.conf)
self.app.cookie = cookie
self.app.tokens = tokens
# dump auth info
if dump:
self.dump_auth(username, cookie, tokens)
self.app.profile = self.profile
self.app.window.set_default_size(*self.profile['window-size'])
self.hide()
```
#### File: bcloud-3.9.1/bcloud/UploadPage.py
```python
import math
import os
import sqlite3
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from bcloud import Config
_ = Config._
from bcloud.const import UploadState as State
from bcloud.const import ValidatePathState
from bcloud.const import ValidatePathStateText
from bcloud.FolderBrowserDialog import FolderBrowserDialog
from bcloud.Uploader import Uploader
from bcloud import gutil
from bcloud.log import logger
from bcloud import pcs
from bcloud import util
(FID_COL, NAME_COL, SOURCEPATH_COL, PATH_COL, SIZE_COL,
CURRSIZE_COL, STATE_COL, STATENAME_COL, HUMANSIZE_COL,
PERCENT_COL, TOOLTIP_COL, THRESHOLD_COL) = list(range(12))
TASK_FILE = 'upload.sqlite'
StateNames = [
_('UPLOADING'),
_('WAITING'),
_('PAUSED'),
_('FINISHED'),
_('CANCELED'),
_('ERROR'),
]
RUNNING_STATES = (State.FINISHED, State.UPLOADING, State.WAITING)
class UploadPage(Gtk.Box):
icon_name = 'folder-upload-symbolic'
disname = _('Upload')
name = 'UploadPage'
tooltip = _('Uploading files')
first_run = True
workers = {} # {`fid`: (worker, row)}
commit_count = 0
def __init__(self, app):
super().__init__(orientation=Gtk.Orientation.VERTICAL)
self.app = app
if Config.GTK_GE_312:
self.headerbar = Gtk.HeaderBar()
self.headerbar.props.show_close_button = True
self.headerbar.props.has_subtitle = False
self.headerbar.set_title(self.disname)
control_box = Gtk.Box()
control_box_context = control_box.get_style_context()
control_box_context.add_class(Gtk.STYLE_CLASS_RAISED)
control_box_context.add_class(Gtk.STYLE_CLASS_LINKED)
self.headerbar.pack_start(control_box)
start_button = Gtk.Button()
start_img = Gtk.Image.new_from_icon_name(
'media-playback-start-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
start_button.set_image(start_img)
start_button.set_tooltip_text(_('Start'))
start_button.connect('clicked', self.on_start_button_clicked)
control_box.pack_start(start_button, False, False, 0)
pause_button = Gtk.Button()
pause_img = Gtk.Image.new_from_icon_name(
'media-playback-pause-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
pause_button.set_image(pause_img)
pause_button.set_tooltip_text(_('Pause'))
pause_button.connect('clicked', self.on_pause_button_clicked)
control_box.pack_start(pause_button, False, False, 0)
open_folder_button = Gtk.Button()
open_folder_img = Gtk.Image.new_from_icon_name(
'document-open-symbolic', Gtk.IconSize.SMALL_TOOLBAR)
open_folder_button.set_image(open_folder_img)
open_folder_button.set_tooltip_text(_('Open target directory'))
open_folder_button.connect('clicked',
self.on_open_folder_button_clicked)
self.headerbar.pack_start(open_folder_button)
upload_box = Gtk.Box()
upload_box_context = upload_box.get_style_context()
upload_box_context.add_class(Gtk.STYLE_CLASS_RAISED)
upload_box_context.add_class(Gtk.STYLE_CLASS_LINKED)
self.headerbar.pack_start(upload_box)
upload_file_button = Gtk.Button()
upload_file_img = Gtk.Image.new_from_icon_name(
'folder-upload-symbolic', Gtk.IconSize.SMALL_TOOLBAR)
upload_file_button.set_image(upload_file_img)
upload_file_button.set_tooltip_text(_('Upload files'))
upload_file_button.connect('clicked',
self.on_upload_file_button_clicked)
upload_box.pack_start(upload_file_button, False, False, 0)
upload_folder_button = Gtk.Button()
upload_folder_img = Gtk.Image.new_from_icon_name(
'folder-upload-symbolic', Gtk.IconSize.SMALL_TOOLBAR)
upload_folder_button.set_image(upload_folder_img)
upload_folder_button.set_tooltip_text(_('Upload folders'))
upload_folder_button.connect('clicked',
self.on_upload_folder_button_clicked)
upload_box.pack_start(upload_folder_button, False, False, 0)
right_box = Gtk.Box()
right_box_context = right_box.get_style_context()
right_box_context.add_class(Gtk.STYLE_CLASS_RAISED)
right_box_context.add_class(Gtk.STYLE_CLASS_LINKED)
self.headerbar.pack_end(right_box)
remove_button = Gtk.Button()
remove_img = Gtk.Image.new_from_icon_name('list-remove-symbolic',
Gtk.IconSize.SMALL_TOOLBAR)
remove_button.set_image(remove_img)
remove_button.set_tooltip_text(_('Remove selected tasks'))
remove_button.connect('clicked', self.on_remove_button_clicked)
right_box.pack_start(remove_button, False, False, 0)
remove_finished_button = Gtk.Button()
remove_finished_img = Gtk.Image.new_from_icon_name(
'list-remove-all-symbolic', Gtk.IconSize.SMALL_TOOLBAR)
remove_finished_button.set_image(remove_finished_img)
remove_finished_button.set_tooltip_text(_('Remove completed tasks'))
remove_finished_button.connect('clicked',
self.on_remove_finished_button_clicked)
right_box.pack_start(remove_finished_button, False, False, 0)
else:
control_box = Gtk.Box()
self.pack_start(control_box, False, False, 0)
start_button = Gtk.Button.new_with_label(_('Start'))
start_button.connect('clicked', self.on_start_button_clicked)
control_box.pack_start(start_button, False, False, 0)
pause_button = Gtk.Button.new_with_label(_('Pause'))
pause_button.connect('clicked', self.on_pause_button_clicked)
control_box.pack_start(pause_button, False, False, 0)
upload_file_button = Gtk.Button.new_with_label(_('Upload Files'))
upload_file_button.set_tooltip_text(_('Upload files'))
upload_file_button.connect('clicked',
self.on_upload_file_button_clicked)
control_box.pack_start(upload_file_button, False, False, 0)
upload_folder_button = Gtk.Button.new_with_label(
_('Upload Folders'))
upload_folder_button.set_tooltip_text(_('Upload folders'))
upload_folder_button.connect('clicked',
self.on_upload_folder_button_clicked)
control_box.pack_start(upload_folder_button, False, False, 0)
open_folder_button = Gtk.Button.new_with_label(_('Open Directory'))
open_folder_button.connect('clicked',
self.on_open_folder_button_clicked)
open_folder_button.props.margin_left = 40
control_box.pack_start(open_folder_button, False, False, 0)
remove_finished_button = Gtk.Button.new_with_label(
_('Remove completed tasks'))
remove_finished_button.connect('clicked',
self.on_remove_finished_button_clicked)
control_box.pack_end(remove_finished_button, False, False, 0)
remove_button = Gtk.Button.new_with_label(_('Remove'))
remove_button.connect('clicked', self.on_remove_button_clicked)
control_box.pack_end(remove_button, False, False, 0)
scrolled_win = Gtk.ScrolledWindow()
self.pack_start(scrolled_win, True, True, 0)
# fid, source_name, source_path, path, size,
# currsize, state, statename, humansize, percent, tooltip
# slice size
self.liststore = Gtk.ListStore(GObject.TYPE_INT, str, str, str,
GObject.TYPE_INT64, GObject.TYPE_INT64,
int, str, str, GObject.TYPE_INT, str,
GObject.TYPE_INT64)
self.treeview = Gtk.TreeView(model=self.liststore)
self.treeview.set_headers_clickable(True)
self.treeview.set_reorderable(True)
self.treeview.set_search_column(NAME_COL)
self.treeview.set_tooltip_column(TOOLTIP_COL)
self.selection = self.treeview.get_selection()
self.selection.set_mode(Gtk.SelectionMode.MULTIPLE)
scrolled_win.add(self.treeview)
name_cell = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.END,
ellipsize_set=True)
name_col = Gtk.TreeViewColumn(_('Name'), name_cell, text=NAME_COL)
name_col.set_expand(True)
self.treeview.append_column(name_col)
name_col.set_sort_column_id(NAME_COL)
self.liststore.set_sort_func(NAME_COL, gutil.tree_model_natsort)
percent_cell = Gtk.CellRendererProgress()
percent_col = Gtk.TreeViewColumn(_('Progress'), percent_cell,
value=PERCENT_COL)
self.treeview.append_column(percent_col)
percent_col.props.min_width = 145
percent_col.set_sort_column_id(PERCENT_COL)
size_cell = Gtk.CellRendererText()
size_col = Gtk.TreeViewColumn(_('Size'), size_cell, text=HUMANSIZE_COL)
self.treeview.append_column(size_col)
size_col.props.min_width = 100
size_col.set_sort_column_id(SIZE_COL)
state_cell = Gtk.CellRendererText()
state_col = Gtk.TreeViewColumn(_('State'), state_cell,
text=STATENAME_COL)
self.treeview.append_column(state_col)
state_col.props.min_width = 100
state_col.set_sort_column_id(PERCENT_COL)
def check_first(self):
if self.first_run:
self.first_run = False
self.load()
def on_page_show(self):
if Config.GTK_GE_312:
self.app.window.set_titlebar(self.headerbar)
self.headerbar.show_all()
def load(self):
self.show_all()
self.init_db()
self.load_tasks_from_db()
def init_db(self):
cache_path = os.path.join(Config.CACHE_DIR,
self.app.profile['username'])
if not os.path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
db = os.path.join(cache_path, TASK_FILE)
self.conn = sqlite3.connect(db)
self.cursor = self.conn.cursor()
sql = '''CREATE TABLE IF NOT EXISTS upload (
fid INTEGER PRIMARY KEY,
name CHAR NOT NULL,
source_path CHAR NOT NULL,
path CHAR NOT NULL,
size INTEGER NOT NULL,
curr_size INTEGER NOT NULL,
state INTEGER NOT NULL,
state_name CHAR NOT NULL,
human_size CHAR NOT NULL,
percent INTEGER NOT NULL,
tooltip CHAR,
threshold INTEGER NOT NULL
)
'''
self.cursor.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS slice (
fid INTEGER NOT NULL,
slice_end INTEGER NOT NULL,
md5 CHAR NOT NULL
)
'''
self.cursor.execute(sql)
def reload(self):
pass
def load_tasks_from_db(self):
sql = 'SELECT * FROM upload'
req = self.cursor.execute(sql)
for task in req:
self.liststore.append(task)
def check_commit(self, force=False):
'''当修改数据库超过50次后, 就自动commit数据.'''
self.commit_count = self.commit_count + 1
if force or self.commit_count >= 50:
self.commit_count = 0
self.conn.commit()
def add_task_db(self, task, force=True):
'''向数据库中写入一个新的任务记录, 并返回它的fid'''
sql = '''INSERT INTO upload (
name, source_path, path, size, curr_size, state, state_name,
human_size, percent, tooltip, threshold)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
req = self.cursor.execute(sql, task)
self.check_commit(force=force)
return req.lastrowid
def add_slice_db(self, fid, slice_end, md5):
'''在数据库中加入上传任务分片信息'''
sql = 'INSERT INTO slice VALUES(?, ?, ?)'
self.cursor.execute(sql, (fid, slice_end, md5))
self.check_commit()
def get_task_db(self, source_path):
'''从数据库中查询source_path的信息.
如果存在的话, 就返回这条记录;
如果没有的话, 就返回None
'''
sql = 'SELECT * FROM upload WHERE source_path=?'
req = self.cursor.execute(sql, [source_path, ])
if req:
return req.fetchone()
else:
None
def get_slice_db(self, fid):
'''从数据库中取得fid的所有分片.
返回的是一个list, 里面是按顺序排好的md5的值
'''
sql = 'SELECT md5 FROM slice WHERE fid=?'
req = self.cursor.execute(sql, [fid, ])
if req:
return [r[0] for r in req]
else:
return None
def update_task_db(self, row, force=False):
'''更新数据库中的任务信息'''
sql = '''UPDATE upload SET
curr_size=?, state=?, state_name=?, human_size=?, percent=?
WHERE fid=?
'''
self.cursor.execute(sql, [
row[CURRSIZE_COL], row[STATE_COL], row[STATENAME_COL],
row[HUMANSIZE_COL], row[PERCENT_COL], row[FID_COL]
])
self.check_commit(force=force)
def remove_task_db(self, fid, force=False):
'''将任务从数据库中删除'''
self.remove_slice_db(fid)
sql = 'DELETE FROM upload WHERE fid=?'
self.cursor.execute(sql, [fid, ])
self.check_commit(force=force)
def remove_slice_db(self, fid):
'''将上传任务的分片从数据库中删除'''
sql = 'DELETE FROM slice WHERE fid=?'
self.cursor.execute(sql, [fid, ])
self.check_commit()
def on_destroy(self, *args):
if not self.first_run:
self.conn.commit()
for row in self.liststore:
self.pause_task(row, scan=False)
self.conn.commit()
self.conn.close()
# Open API
def add_file_task(self, dir_name=None):
'''添加上传任务, 会弹出一个选择文件的对话框'''
file_dialog = Gtk.FileChooserDialog(_('Choose Files..'),
self.app.window, Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
file_dialog.set_modal(True)
file_dialog.set_select_multiple(True)
file_dialog.set_default_response(Gtk.ResponseType.OK)
response = file_dialog.run()
if response != Gtk.ResponseType.OK:
file_dialog.destroy()
return
source_paths = file_dialog.get_filenames()
file_dialog.destroy()
if source_paths:
self.upload_files(source_paths, dir_name)
def add_folder_task(self, dir_name=None):
'''添加上传任务, 会弹出一个选择文件夹的对话框'''
folder_dialog = Gtk.FileChooserDialog(_('Choose Folders..'),
self.app.window, Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
folder_dialog.set_modal(True)
folder_dialog.set_select_multiple(True)
folder_dialog.set_default_response(Gtk.ResponseType.OK)
folder_dialog.set_current_folder(Config.HOME_DIR)
response = folder_dialog.run()
if response != Gtk.ResponseType.OK:
folder_dialog.destroy()
return
source_paths = folder_dialog.get_filenames()
folder_dialog.destroy()
if source_paths:
self.upload_files(source_paths, dir_name)
def add_bg_task(self, source_path, dest_path):
GLib.idle_add(self.bg_upload_file, source_path, dest_path)
def bg_upload_file(self, source_path, dest_path):
self.check_first()
self.upload_file(source_path, dest_path)
self.scan_tasks()
# Open API
def upload_files(self, source_paths, dir_name=None):
'''批量创建上传任务, 会扫描子目录并依次上传.
source_path - 本地文件的绝对路径
dir_name - 文件在服务器上的父目录, 如果为None的话, 会弹出一个
对话框让用户来选择一个目录.
'''
def scan_folders(folder_path):
file_list = os.listdir(folder_path)
source_paths = [os.path.join(folder_path, f) for f in file_list]
self.upload_files(source_paths,
os.path.join(dir_name, os.path.split(folder_path)[1]))
self.check_first()
if not dir_name:
folder_dialog = FolderBrowserDialog(self, self.app)
response = folder_dialog.run()
if response != Gtk.ResponseType.OK:
folder_dialog.destroy()
return
dir_name = folder_dialog.get_path()
folder_dialog.destroy()
invalid_paths = []
for source_path in source_paths:
if util.validate_pathname(source_path) != ValidatePathState.OK:
invalid_paths.append(source_path)
continue
if (os.path.split(source_path)[1].startswith('.') and
not self.app.profile['upload-hidden-files']):
continue
if os.path.isfile(source_path):
self.upload_file(source_path, dir_name)
elif os.path.isdir(source_path):
scan_folders(source_path)
self.app.blink_page(self)
self.scan_tasks()
if not invalid_paths:
return
dialog = Gtk.Dialog(_('Invalid Filepath'), self.app.window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.OK))
dialog.set_default_size(640, 480)
dialog.set_border_width(10)
box = dialog.get_content_area()
scrolled_window = Gtk.ScrolledWindow()
box.pack_start(scrolled_window, True, True, 0)
text_buffer = Gtk.TextBuffer()
textview = Gtk.TextView.new_with_buffer(text_buffer)
scrolled_window.add(textview)
for invalid_path in invalid_paths:
text_buffer.insert_at_cursor(invalid_path)
text_buffer.insert_at_cursor('\n')
infobar = Gtk.InfoBar()
infobar.set_message_type(Gtk.MessageType.ERROR)
box.pack_end(infobar, False, False, 0)
info_label= Gtk.Label()
infobar.get_content_area().pack_start(info_label, False, False, 0)
info_label.set_label(''.join([
'* ', ValidatePathStateText[1], '\n',
'* ', ValidatePathStateText[2], '\n',
'* ', ValidatePathStateText[3], '\n',
]))
box.show_all()
dialog.run()
dialog.destroy()
def upload_file(self, source_path, dir_name):
'''上传一个文件'''
row = self.get_task_db(source_path)
source_dir, filename = os.path.split(source_path)
path = os.path.join(dir_name, filename)
size = os.path.getsize(source_path)
total_size = util.get_human_size(size)[0]
tooltip = gutil.escape(_('From {0}\nTo {1}').format(source_path, path))
if size < 2 ** 27: # 128M
threshold = 2 ** 17 # 128K
elif size < 2 ** 29: # 512M
threshold = 2 ** 19 # 512K
elif size < 10 * (2 ** 30): # 10G
threshold = math.ceil(size / 1000)
else:
self.app.toast(_('{0} is too large to upload (>10G).').format(path))
return
task = [
filename,
source_path,
path,
size,
0,
State.WAITING,
StateNames[State.WAITING],
'0 / {0}'.format(total_size),
0,
tooltip,
threshold,
]
row_id = self.add_task_db(task, force=False)
task.insert(0, row_id)
self.liststore.append(task)
def start_task(self, row, scan=True):
'''启动上传任务.
将任务状态设定为Uploading, 如果没有超过最大任务数的话;
否则将它设定为Waiting.
'''
if row[STATE_COL] in RUNNING_STATES :
self.scan_tasks()
return
row[STATE_COL] = State.WAITING
row[STATENAME_COL] = StateNames[State.WAITING]
self.update_task_db(row)
if scan:
self.scan_tasks()
# Open API
def pause_tasks(self):
'''暂停所有上传任务'''
if self.first_run:
return
for row in self.liststore:
self.pause_task(row, scan=False)
def pause_task(self, row, scan=True):
'''暂停下载任务'''
if row[STATE_COL] == State.UPLOADING:
self.remove_worker(row[FID_COL], stop=False)
if row[STATE_COL] in (State.UPLOADING, State.WAITING):
row[STATE_COL] = State.PAUSED
row[STATENAME_COL] = StateNames[State.PAUSED]
self.update_task_db(row)
if scan:
self.scan_tasks()
def remove_task(self, row, scan=True):
'''删除下载任务'''
if row[STATE_COL] == State.UPLOADING:
self.remove_worker(row[FID_COL], stop=True)
self.remove_task_db(row[FID_COL])
tree_iter = row.iter
if tree_iter:
self.liststore.remove(tree_iter)
if scan:
self.scan_tasks()
def scan_tasks(self):
if len(self.workers.keys()) >= self.app.profile['concurr-upload']:
return
for row in self.liststore:
if len(self.workers.keys()) >= self.app.profile['concurr-upload']:
break
if row[STATE_COL] == State.WAITING:
self.start_worker(row)
return True
def start_worker(self, row):
def on_worker_slice_sent(worker, fid, slice_end, md5):
GLib.idle_add(do_worker_slice_sent, fid, slice_end, md5)
def do_worker_slice_sent(fid, slice_end, md5):
if fid not in self.workers:
return
row = self.get_row_by_fid(fid)
if not row:
return
row[CURRSIZE_COL] = slice_end
total_size = util.get_human_size(row[SIZE_COL])[0]
curr_size = util.get_human_size(slice_end, False)[0]
row[PERCENT_COL] = int(slice_end / row[SIZE_COL] * 100)
row[HUMANSIZE_COL] = '{0} / {1}'.format(curr_size, total_size)
self.update_task_db(row)
self.add_slice_db(fid, slice_end, md5)
def on_worker_merge_files(worker, fid):
GLib.idle_add(do_worker_merge_files, fid)
def do_worker_merge_files(fid):
def on_create_superfile(pcs_file, error=None):
if error or not pcs_file:
self.app.toast(_('Failed to upload, please try again'))
logger.error('UploadPage.do_worker_merge_files: %s, %s' %
(pcs_file, error))
do_worker_error(fid)
return
else:
self.remove_slice_db(fid)
do_worker_uploaded(fid)
block_list = self.get_slice_db(fid)
if fid not in self.workers:
return
row = self.get_row_by_fid(fid)
if not row:
return
if not block_list:
# TODO:
pass
else:
gutil.async_call(pcs.create_superfile, self.app.cookie,
row[PATH_COL], block_list,
callback=on_create_superfile)
def on_worker_uploaded(worker, fid):
GLib.idle_add(do_worker_uploaded, fid)
def do_worker_uploaded(fid):
if fid not in self.workers:
return
row = self.get_row_by_fid(fid)
if not row:
return
row[PERCENT_COL] = 100
total_size = util.get_human_size(row[SIZE_COL])[0]
row[HUMANSIZE_COL] = '{0} / {1}'.format(total_size, total_size)
row[STATE_COL] = State.FINISHED
row[STATENAME_COL] = StateNames[State.FINISHED]
self.update_task_db(row, force=True)
self.workers.pop(fid, None)
self.app.toast(_('{0} uploaded').format(row[NAME_COL]))
self.app.home_page.reload()
self.scan_tasks()
def on_worker_disk_error(worker, fid):
GLib.idle_add(do_worker_error, fid)
def on_worker_network_error(worker, fid):
GLib.idle_add(do_worker_error, fid)
def do_worker_error(fid):
row = self.get_row_by_fid(fid)
if not row:
return
row[STATE_COL] = State.ERROR
row[STATENAME_COL] = StateNames[State.ERROR]
self.update_task_db(row)
self.remove_worker(fid, stop=False)
self.scan_tasks()
if row[FID_COL] in self.workers:
return
row[STATE_COL] = State.UPLOADING
row[STATENAME_COL] = StateNames[State.UPLOADING]
worker = Uploader(self, row, self.app.cookie, self.app.tokens)
self.workers[row[FID_COL]] = (worker, row)
# For slice upload
worker.connect('slice-sent', on_worker_slice_sent)
worker.connect('merge-files', on_worker_merge_files)
# For upload_small_files/rapid_upload
worker.connect('uploaded', on_worker_uploaded)
worker.connect('disk-error', on_worker_disk_error)
worker.connect('network-error', on_worker_network_error)
worker.start()
def remove_worker(self, fid, stop=True):
if fid not in self.workers:
return
worker = self.workers[fid][0]
if stop:
worker.stop()
else:
worker.pause()
self.workers.pop(fid, None)
def get_row_by_source_path(self, source_path):
for row in self.liststore:
if row[SOURCEPATH_COL] == source_path:
return row
return None
def get_row_by_fid(self, fid):
for row in self.liststore:
if row[FID_COL] == fid:
return row
return None
def operate_selected_rows(self, operator):
'''对选中的条目进行操作.
operator - 处理函数
'''
model, tree_paths = self.selection.get_selected_rows()
if not tree_paths:
return
fids = []
for tree_path in tree_paths:
fids.append(model[tree_path][FID_COL])
for fid in fids:
row = self.get_row_by_fid(fid)
if row:
operator(row)
def on_start_button_clicked(self, button):
self.operate_selected_rows(self.start_task)
def on_pause_button_clicked(self, button):
self.operate_selected_rows(self.pause_task)
def on_remove_button_clicked(self, button):
self.operate_selected_rows(self.remove_task)
def on_remove_finished_button_clicked(self, button):
tree_iters = []
for row in self.liststore:
if row[STATE_COL] == State.FINISHED:
tree_iters.append(self.liststore.get_iter(row.path))
for tree_iter in tree_iters:
if tree_iter:
self.remove_task_db(self.liststore[tree_iter][FID_COL], False)
self.liststore.remove(tree_iter)
self.check_commit()
def on_open_folder_button_clicked(self, button):
model, tree_paths = self.selection.get_selected_rows()
if not tree_paths or len(tree_paths) != 1:
return
tree_path = tree_paths[0]
path = model[tree_path][PATH_COL]
dir_name = os.path.split(path)[0]
self.app.home_page.load(dir_name)
self.app.switch_page(self.app.home_page)
def on_upload_file_button_clicked(self, button):
self.add_file_task()
def on_upload_folder_button_clicked(self, button):
self.add_folder_task()
``` |
{
"source": "jiaxiaosong1002/INTERPRET_challenge_Closed_loop",
"score": 3
} |
#### File: INTERPRET_challenge_Closed_loop/predictor/traj.py
```python
class State:
def __init__(self, pt=None):
if pt:
self.track_id = pt.track_id
self.frame_id = pt.frame_id
self.timestamp_ms = pt.timestamp_ms
self.agent_type = pt.agent_type
self.x = pt.x
self.y = pt.y
self.vx = pt.vx
self.vy = pt.vy
self.psi_rad = pt.psi_rad
self.length = pt.length
self.width = pt.width
self.jerk = pt.jerk
self.current_lanelet_id = pt.current_lanelet_id
self.s_of_current_lanelet = pt.s_of_current_lanelet
self.d_of_current_lanelet = pt.d_of_current_lanelet
else:
self.track_id = 0
self.frame_id = 0
self.timestamp_ms = 0
self.agent_type = 'car'
self.x = 0.0
self.y = 0.0
self.vx = 0.0
self.vy = 0.0
self.psi_rad = 0.0
self.length = 0.0
self.width = 0.0
self.jerk = 0.0
self.current_lanelet_id = 0
self.s_of_current_lanelet = 0.0
self.d_of_current_lanelet = 0
class Trajectory:
def __init__(self):
self._trajectory = []
@property
def states(self):
return self._trajectory
def append_state(self, state: State):
self._trajectory.append(state)
def state(self):
return self._trajectory
```
#### File: jiaxiaosong1002/INTERPRET_challenge_Closed_loop/simulator_client.py
```python
import logging
import time
import grpc
import predictor.predictor
import predictor.traj
import simulator_pb2
import simulator_pb2_grpc
class SimulatorClient:
def __init__(self, logger: logging.Logger, server_address, user_predictor: predictor.predictor.Predictor):
self._logger = logger
self._server_address = server_address
self._client = None
self._stopped = False
self._predictor = user_predictor
self._simulator_paused = False
def start(self, loop_interval):
with grpc.insecure_channel(self._server_address) as channel:
self._client = simulator_pb2_grpc.SimulatorServerStub(channel)
next_loop = time.perf_counter()
while True:
try:
self.fetch_env()
except Exception as e:
self._logger.warning(f'failed to connect to remote server')
self._logger.warning(e.__str__())
self._logger.warning(f'will try again 5 seconds later')
time.sleep(5)
if self._simulator_paused:
self.report_state()
else:
try:
self.report_state()
except Exception as e:
self._logger.warning(f'failed to connect to remote server')
self._logger.warning(e.__str__())
self._logger.warning(f'will try again 5 seconds later')
time.sleep(5)
curr = time.perf_counter()
interval = max(0, next_loop + loop_interval - curr)
next_loop = curr + interval
time.sleep(interval)
def shutdown(self):
self._stopped = True
@staticmethod
def _proto_traj_to_traj(proto_traj):
trajectory = predictor.traj.Trajectory()
for pt in proto_traj.state:
trajectory.append_state(predictor.traj.State(pt))
return trajectory
def fetch_env(self):
response = self._client.FetchEnv(simulator_pb2.FetchEnvRequest())
if response.resp_code == 0:
map_name = response.map_name
my_traj = self._proto_traj_to_traj(response.my_traj)
other_trajs = []
for other_traj in response.other_trajs:
other_trajs.append(self._proto_traj_to_traj(other_traj))
self._predictor.on_env(map_name, my_traj, other_trajs)
elif response.resp_code == 233: # the simulator paused
self._simulator_paused = True
print(f'resp_code={response.resp_code}, the simulator paused')
else:
self._logger.warning(f'fetch_env failed, resp_code={response.resp_code}')
def report_state(self):
req = simulator_pb2.PushMyTrajectoryRequest()
if self._simulator_paused:
try:
resp = self._client.PushMyTrajectory(req)
# send an empty request to inform the simulator that the client has quit
except Exception as e:
print('Close Predictor')
exit(0)
my_state = self._predictor.fetch_my_state()
for trajs in my_state.trajectories:
traj = req.pred_trajs.add()
for state in trajs.states:
pt = traj.state.add()
pt.track_id = state.track_id
pt.frame_id = state.frame_id
pt.timestamp_ms = state.timestamp_ms
pt.agent_type = state.agent_type
pt.x = state.x
pt.y = state.y
pt.vx = state.vx
pt.vy = state.vy
pt.psi_rad = state.psi_rad
pt.length = state.length
pt.width = state.width
pt.jerk = state.jerk
pt.current_lanelet_id = state.current_lanelet_id
pt.s_of_current_lanelet = state.s_of_current_lanelet
pt.d_of_current_lanelet = state.d_of_current_lanelet
for probability in my_state.probabilities:
req.probability.append(probability)
resp = self._client.PushMyTrajectory(req)
if resp.resp_code != 0:
self._logger.warning(f'report_state failed, resp_code={resp.resp_code}')
@property
def stopped(self):
self._predictor.shutdown()
return self._stopped
@property
def loop_interval(self):
return self._loop_interval
``` |
{
"source": "Jiaxigu/quapy",
"score": 3
} |
#### File: quapy/quapy/quapy.py
```python
import matplotlib.pyplot as plt
from shapely.geometry import *
class Building:
"""
qua-kit building class.
each Building instance represents a qua-kit building object, which is wrapped as a feature in the GeoJSON file.
applicable to GeoJSON files imported from both OpenStreetMap and 3D creation softwares.
"""
def __init__(self, feature):
"""
:type feature: dict
"""
# default params
self.geometry = None
self.properties = {}
self.tags = {}
if 'properties' in feature:
if isinstance(feature['properties'], dict):
self.properties = feature['properties']
if 'geometry' in feature:
if 'coordinates' in feature['geometry']:
self.geometry = shape(feature['geometry'])
###############
### Plotter ###
###############
def plot(self, c='k'):
"""
plot the polygon with matplotlib.
:type c: str
"""
plt.plot(self.geometry.convex_hull.exterior.xy[0], self.geometry.convex_hull.exterior.xy[1], c)
plt.axis('equal')
###########
### Tag ###
###########
def tag(self, category_tagger={}):
"""
updates and returns the categorical tags of the building.
:type category_tagger: dict
:rtype: dict
"""
self.tags.update(category_tagger)
return self.tags
class Scenario:
"""
qua-kit scenario class.
a Scenario instance represents a qua-kit scenario contained in a GeoJSON file.
applicable to GeoJSON files imported from both OpenStreetMap and 3D creation softwares.
"""
def __init__(self, file):
"""
initialize a Scenario instance with a GeoJSON file already parsed by json library.
:type file: dict
"""
# default params
self.lonlat = None
self.name = None
self.map_zoom_level = None
self.tags = []
self.buildings = []
# osm-based geometry is based on explicit lat, lon coordinates,
# while geometry rendered from 3D creation software(i.e. blender) is based on relative [x, y, z] coordinates
# and baseline longitude, latutide coordiantes as scenario properties.
if 'name' in file:
self.name = file['name']
if 'lon' in file and 'lat' in file:
self.lonlat = [float(file['lon']), float(file['lat'])]
if 'properties' in file:
if 'mapZoomLevel' in file['properties']:
self.map_zoom_level = int(file['properties']['mapZoomLevel'])
if 'geometry' in file:
if 'features' in file['geometry']:
self.buildings = [Building(feature) for feature in file['geometry']['features']]
############
### Area ###
############
def buildings_in_area(self, polygon):
"""
returns a list of buildings within the area given.
:type polygon: shapely.geometry.polygon
"""
return [b for b in self.buildings if polygon.contains(b.geometry.convex_hull)]
###############
### Plotter ###
###############
def plot(self, area=False):
"""
plot the buildings in the scenario with matplotlib.
"""
for b in self.buildings:
b.plot()
##############
### Tagger ###
##############
def tag(self, prop, tagger_dict):
"""
tag all buildings by given property with a pre-defined tagger dictionary.
:type tagger: dict
:type prop: str
"""
for tag_label, tagger in tagger_dict.items():
self._category_tagger(prop, tag_label, tagger)
if tag_label not in self.tags:
self.tags.append(tag_label)
def _category_tagger(self, prop, tag_label, tagger):
"""
categorical tagger.
"""
for b in self.buildings:
if prop in b.properties:
if b.properties[prop] in tagger.keys():
tag = tagger[b.properties[prop]]
b.tag({tag_label:tag})
``` |
{
"source": "Jiaxigu/sleepsort",
"score": 3
} |
#### File: Jiaxigu/sleepsort/test.py
```python
import unittest
from sleepsort import *
class SleepsortTestCases(unittest.TestCase):
def test_negative_number(self):
sample_negative_array = [-2, 3, 1, 6]
with self.assertRaises(ValueError):
sleepsort(sample_negative_array)
def test_sorted(self):
sample_short_array = [4, 2, 3, 1]
self.assertEqual(sleepsort(sample_short_array), sorted(sample_short_array))
def suite_loader():
test_cases = (SleepsortTestCases,)
suite = unittest.TestSuite()
for test_case in test_cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(test_case)
suite.addTests(tests)
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite_loader')
``` |
{
"source": "jiaxin1996/silkpy",
"score": 3
} |
#### File: symbolic/curve/transform.py
```python
from .curve import ParametricCurve as _ParametricCurve
from sympy import Symbol as _Symbol
def curve_normalization(
other:_ParametricCurve,
new_var=_Symbol('s', real=True)):
from sympy import S, solveset, Eq
from sympy import integrate
from silkpy.sympy_utility import norm
drdt = norm(other.expr().diff(other.sym(0)))
new_var_in_old = integrate(drdt, other.sym(0)).simplify()
solset = solveset(Eq(new_var, new_var_in_old), other.sym(0), domain=S.Reals).simplify()
try:
if len(solset) != 1:
raise RuntimeError(f"We have not yet succedded in inverse s(t) into t(s).\
It found these solutions: {solset}.\
Users need to choose from them or deduce manually, and then set it by obj.param_norm(s_symbol, t_expressed_by_s")
except:
raise RuntimeError(f"We have not yet succedded in inverse s(t) into t(s). Try the curve_param_transform function instead and set the transform relation manually.")
else:
old_var_in_new = next(iter(solset))
return _ParametricCurve(
other.expr().subs(other.sym(0), old_var_in_new),
(new_var,
new_var_in_old.subs(other.sym(0), other.sym_limit(0)[0]),
new_var_in_old.subs(other.sym(0), other.sym_limit(0)[1])))
def curve_param_transform(old_curve, newt, t_expr=None):
from sympy import S, solveset, Eq
return _ParametricCurve(
old_curve._r.applyfunc(lambda x: x.subs(old_curve._t, t_expr)),
(newt, newt_expr.subs(old_curve._t, old_curve._t_limit[0]), newt_expr.subs(old_curve._t, old_curve._t_limit[1])), old_curve._sys)
``` |
{
"source": "jiaxin1996/sinupy",
"score": 3
} |
#### File: sinupy/algebra/quadratic.py
```python
def is_sqrt(expr):
from sympy.core.power import Pow
from sympy.core.mul import Mul
from sympy.core.numbers import Half
is_sqrt_ = False # defaults to be False
expr = expr.factor()
# recurse the function on each arg if the top function is a multiplication
# e.g. 4 * sqrt(b) * sqrt(c) should also be regarded as a sqrt expression.
if expr.func == Mul:
args_is_sqrt = [is_sqrt(arg) for arg in expr.args]
return True if all(args_is_sqrt) else False
# Main Function
elif expr.func == Pow:
if isinstance(expr.args[1], Half): # The second arg of Pow is a Half number if the term is a sqrt
is_sqrt_ = True
elif not expr.is_negative:
is_sqrt_ = True
return is_sqrt_
def is_square(expr):
from sympy.core.power import Pow
from sympy.core.mul import Mul
is_square_ = False # defaults to be False
expr = expr.factor()
# recurse the function on each arg if the top function is a multiplication
# e.g. 4 * b^4 * b^6 * c^2 should also be regarded as a square expression.
if expr.func == Mul:
args_is_square = [is_square(arg) for arg in expr.args]
return True if all(args_is_square) else False
# Main Function
elif expr.func == Pow:
base, exponent = expr.args
if exponent % 2 == 0: # The second arg of Pow is an even number if the term is a square
is_square_ = True
elif not expr.is_negative:
is_square_ = True
return is_square_
def is_abs(expr):
from sympy.core.mul import Mul
from sympy import Abs
is_abs_ = False # defaults to be False
expr = expr.factor()
# recurse the function on each arg if the top function is a multiplication
# e.g. 4 * Abs(b) * Abs(c) should also be regarded as a abs expression.
if expr.func == Mul:
args_is_abs = [is_abs(arg) for arg in expr.args]
return True if all(args_is_abs) else False
# Main Function
elif expr.func == Abs:
is_abs_ = True
elif not expr.is_negative:
is_abs_ = True
return is_abs_
def signed_sqrt(expr):
"""Signed sqrt operator
Args:
expr (sympy.expr): sympy expression
Returns:
sympy.expr: A simplified expression
"""
from functools import reduce
from sympy.core.power import Pow
from sympy.core.mul import Mul
from sympy import sqrt
expr = expr.factor()
# recurse the function on each arg if the top function is a multiplication
# e.g. signed_sqrt( 4 * b^2 ) == 2 * b
if expr.func == Mul:
args_signed_sqrt = [signed_sqrt(arg) for arg in expr.args]
return reduce(Mul, args_signed_sqrt)
elif expr.func == Pow:
base, exponent = expr.args
if exponent.is_even:
return base**(exponent/2)
return sqrt(expr)
def are_quadratic_sols(expr1, expr2):
# Suppose the function is a form of -b +/- sqrt(Delta) / 2a, where Delta = b^2 - 4ac
expr_sum = expr1 + expr2 # -b/a
expr_diff = expr1 - expr2 # +/- sqrt(Delta)/a
expr_prod = expr1 * expr2 # c/a
# b^2/a^2 - 4 c/a == (b^2 - 4ac) / a^2
lhs = (expr_sum ** 2 - 4 * expr_prod).simplify()
rhs = (expr_diff ** 2).simplify()
lhs_minus_rhs = (lhs - rhs).simplify()
return True if lhs_minus_rhs==0 else False
def simplify_quadratic_sols(expr1, expr2):
if not are_quadratic_sols(expr1, expr2):
raise ValueError("The input expr1, expr2 may not be a pair of solutions of a quadratic equation. Make sure they look like -b +/- sqrt(Delta) / 2a.")
expr_sum =(expr1 + expr2).simplify() # -b/a
expr_diff =(expr1 - expr2).simplify() # +/- sqrt(Delta)/a
signed_sqrt_Delta_on_a = signed_sqrt(expr_diff**2) # Still +/- sqrt(Delta)/a, but simplified
simplified_expr1 = (expr_sum + signed_sqrt_Delta_on_a)/2
simplified_expr2 = (expr_sum - signed_sqrt_Delta_on_a)/2
simplified_expr1 = simplified_expr1.simplify()
simplified_expr2 = simplified_expr2.simplify()
return simplified_expr1, simplified_expr2
```
#### File: sinupy/waves/waves.py
```python
from sympy import Symbol as _Symbol
from sympy import symbols as _symbols
from sympy import Array as _Array
class Wave:
def __init__(self, varidx='', k=None, w=None):
self.varidx = varidx
if k is None:
k_x, k_y, k_z= _symbols('k_x_{varidx}, k_y_{varidx}, k_z_{varidx}'.format(varidx=self.varidx), complex=True)
self.k = _Array([k_x, k_y, k_z])
else:
self.k = k
if w is None:
self.w = _Symbol('omega_{varidx}'.format(varidx=self.varidx), complex=True)
else:
self.w = w
def k_amp(self):
return _Symbol('k_{amp}_{varidx}'.format(amp='amp', varidx=self.varidx), negative=False)
def w_amp(self):
return _Symbol('w_{amp}_{varidx}'.format(amp='amp', varidx=self.varidx), negative=False)
# Waves involved in electrodynamics
class ElectroMagneticWave(Wave):
def __init__(self, E=None, B=None, *arg, **kwarg):
super().__init__(*arg, **kwarg)
if E is None:
E_x, E_y, E_z = _symbols('E_x_{varidx}, E_y_{varidx}, E_z_{varidx}'.format(varidx=self.varidx), complex=True)
self.E = _Array([E_x, E_y, E_z])
else:
self.E = E
if B is None:
B_x, B_y, B_z = _symbols('B_x_{varidx}, B_y_{varidx}, B_z_{varidx}'.format(varidx=self.varidx), complex=True)
self.B = _Array([B_x, B_y, B_z])
else:
self.B = B
def E_amp(self):
return _Symbol('E_{amp}_{varidx}'.format(amp='amp', varidx=self.varidx), negative=False)
def B_amp(self):
return _Symbol('B_{amp}_{varidx}'.format(amp='amp', varidx=self.varidx), negative=False)
def relative_refraction_N(self):
return _Symbol('N_{varidx}'.format(varidx=self.varidx))
class ElectrostaticWave(ElectroMagneticWave):
def __init__(self, *arg, **kwarg):
super().__init__(*arg, **kwarg)
def makeWave(k=None, w=None, u=None, E=None, B=None):
pass
``` |
{
"source": "JiaXingBinggan/FAB_expr",
"score": 2
} |
#### File: ctr/main/pretrain_main.py
```python
import pandas as pd
import numpy as np
import datetime
import os
import random
from sklearn.metrics import roc_auc_score
import ctr.models.p_model as Model
import ctr.models.ctr_data as Data
import torch
import torch.nn as nn
import torch.utils.data
import logging
import sys
import threading
from ctr.config import config
from itertools import islice
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_model(model_name, feature_nums, field_nums, latent_dims):
if model_name == 'LR':
return Model.LR(feature_nums)
elif model_name == 'FM':
return Model.FM(feature_nums, latent_dims)
elif model_name == 'FFM':
return Model.FFM(feature_nums, field_nums, latent_dims)
elif model_name == 'W&D':
return Model.WideAndDeep(feature_nums, field_nums, latent_dims)
elif model_name == 'DeepFM':
return Model.DeepFM(feature_nums, field_nums, latent_dims)
elif model_name == 'FNN':
return Model.FNN(feature_nums, field_nums, latent_dims)
elif model_name == 'IPNN':
return Model.InnerPNN(feature_nums, field_nums, latent_dims)
elif model_name == 'OPNN':
return Model.OuterPNN(feature_nums, field_nums, latent_dims)
elif model_name == 'DCN':
return Model.DCN(feature_nums, field_nums, latent_dims)
elif model_name == 'AFM':
return Model.AFM(feature_nums, field_nums, latent_dims)
def map_fm(line):
return line.strip().split(',')
def get_dataset(args):
data_path = args.data_path + args.dataset_name + args.campaign_id
# click + winning price + hour + timestamp + encode
train_data= pd.read_csv(data_path + 'train.bid.all.txt', header=None).values.astype(int)
field_nums = train_data.shape[1] - 4
val_data = pd.read_csv(data_path + 'val.bid.all.txt', header=None).values.astype(int)
test_data = pd.read_csv(data_path + 'test.bid.' + args.sample_type + '.txt', header=None).values.astype(int)
with open(data_path + 'feat.bid.all.txt') as feat_f:
feature_nums = int(list(islice(feat_f, 0, 1))[0].replace('\n', ''))
return train_data, val_data, test_data, field_nums, feature_nums
def train(model, optimizer, data_loader, loss, device):
model.train() # 转换为训练模式
total_loss = 0
log_intervals = 0
for i, (features, labels) in enumerate(data_loader):
features, labels = features.long().to(device), torch.unsqueeze(labels, 1).to(device)
y = model(features)
train_loss = loss(y, labels.float())
model.zero_grad()
train_loss.backward()
optimizer.step()
total_loss += train_loss.item() # 取张量tensor里的标量值,如果直接返回train_loss很可能会造成GPU out of memory
log_intervals += 1
return total_loss / log_intervals
def test(model, data_loader, loss, device):
model.eval()
targets, predicts = list(), list()
intervals = 0
total_test_loss = 0
with torch.no_grad():
for features, labels in data_loader:
features, labels = features.long().to(device), torch.unsqueeze(labels, 1).to(device)
y = model(features)
test_loss = loss(y, labels.float())
targets.extend(labels.tolist()) # extend() 函数用于在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表)。
predicts.extend(y.tolist())
intervals += 1
total_test_loss += test_loss.item()
return roc_auc_score(targets, predicts), total_test_loss / intervals
def submission(model, data_loader, device):
model.eval()
targets, predicts = list(), list()
with torch.no_grad():
for features, labels in data_loader:
features, labels = features.long().to(device), torch.unsqueeze(labels, 1).to(device)
y = model(features)
targets.extend(labels.tolist()) # extend() 函数用于在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表)。
predicts.extend(y.tolist())
return predicts, roc_auc_score(targets, predicts)
def main(model, model_name, train_data_loader, val_data_loader, test_data_loader, optimizer, loss, device, args):
valid_aucs = []
valid_losses = []
early_stop_index = 0
is_early_stop = False
start_time = datetime.datetime.now()
for epoch in range(args.epoch):
torch.cuda.empty_cache() # 清理无用的cuda中间变量缓存
train_start_time = datetime.datetime.now()
train_average_loss = train(model, optimizer, train_data_loader, loss, device)
torch.save(model.state_dict(), args.save_param_dir + args.campaign_id + model_name + str(
np.mod(epoch, args.early_stop_iter)) + '.pth')
auc, valid_loss = test(model, val_data_loader, loss, device)
valid_aucs.append(auc)
valid_losses.append(valid_loss)
train_end_time = datetime.datetime.now()
logger.info(
'Model {}, epoch {}, train loss {}, val auc {}, val loss {} [{}s]'.format(model_name,
epoch,
train_average_loss,
auc, valid_loss,
(
train_end_time - train_start_time).seconds))
if eva_stopping(valid_aucs, valid_losses, args.early_stop_type, args):
early_stop_index = np.mod(epoch - args.early_stop_iter + 1, args.early_stop_iter)
is_early_stop = True
break
end_time = datetime.datetime.now()
if is_early_stop:
test_model = get_model(model_name, feature_nums, field_nums, args.latent_dims).to(device)
load_path = args.save_param_dir + args.campaign_id + model_name + str(
early_stop_index) + '.pth'
test_model.load_state_dict(torch.load(load_path, map_location=device)) # 加载最优参数
else:
test_model = model
test_predicts, test_auc = submission(test_model, test_data_loader, device)
torch.save(test_model.state_dict(),
args.save_param_dir + args.campaign_id + model_name + args.sample_type + 'best.pth') # 存储最优参数
logger.info('Model {}, test auc {} [{}s]'.format(model_name, test_auc,
(end_time - start_time).seconds))
for i in range(args.early_stop_iter):
os.remove(args.save_param_dir + args.campaign_id + model_name + str(i) + '.pth')
return test_predicts
def eva_stopping(valid_aucs, valid_losses, type, args): # early stopping
if type == 'auc':
if len(valid_aucs) >= args.early_stop_iter:
auc_campare_arrs = [valid_aucs[-i] < valid_aucs[-i - 1] for i in range(1, args.early_stop_iter)]
auc_div_mean = sum([abs(valid_aucs[-i] - valid_aucs[-i - 1]) for i in range(1, args.early_stop_iter)]) / args.early_stop_iter
if (False not in auc_campare_arrs) or (auc_div_mean <= args.auc_epsilon):
return True
else:
if len(valid_losses) >= args.early_stop_iter:
loss_campare_arrs = [valid_losses[-i] > valid_losses[-i - 1] for i in range(1, args.early_stop_iter)]
loss_div_mean = sum([abs(valid_losses[-i] - valid_losses[-i - 1]) for i in range(1, args.early_stop_iter)]) / args.early_stop_iter
if (False not in loss_campare_arrs) or (loss_div_mean <= args.loss_epsilon):
return True
return False
class ctrThread(threading.Thread):
def __init__(self, func, params):
threading.Thread.__init__(self)
self.func = func
self.params = params
self.res = self.func(*self.params)
def get_res(self):
try:
return self.res
except Exception:
return None
# 用于预训练传统预测点击率模型
if __name__ == '__main__':
campaign_id = '1458/' # 1458, 3427
args = config.init_parser(campaign_id)
train_data, val_data, test_data, field_nums, feature_nums = get_dataset(args)
log_dirs = [args.save_log_dir, args.save_log_dir + args.campaign_id]
for log_dir in log_dirs:
if not os.path.exists(log_dir):
os.mkdir(log_dir)
param_dirs = [args.save_param_dir, args.save_param_dir + args.campaign_id]
for param_dir in param_dirs:
if not os.path.exists(param_dir):
os.mkdir(param_dir)
# 设置随机数种子
setup_seed(args.seed)
logging.basicConfig(level=logging.DEBUG,
filename=args.save_log_dir + str(args.campaign_id).strip('/') + '_output.log',
datefmt='%Y/%m/%d %H:%M:%S',
format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
# click + winning price + hour + timestamp + encode
test_dataset = Data.libsvm_dataset(test_data[:, 4:], test_data[:, 0])
test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1024, num_workers=8)
val_dataset = Data.libsvm_dataset(val_data[:, 4:], val_data[:, 0])
val_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1024, num_workers=8)
loss = nn.BCELoss()
device = torch.device(args.device) # 指定运行设备
choose_models = [args.ctr_model_name]
logger.info(campaign_id)
logger.info('Models ' + ','.join(choose_models) + ' have been trained')
test_predict_arr_dicts = {}
for model_name in choose_models:
test_predict_arr_dicts.setdefault(model_name, [])
# click + winning price + hour + timestamp + encode
train_dataset = Data.libsvm_dataset(train_data[:, 4:], train_data[:, 0])
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8)
threads = []
for model_name in choose_models:
model = get_model(model_name, feature_nums, field_nums, args.latent_dims).to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=args.learning_rate,
weight_decay=args.weight_decay)
if model_name == 'FNN':
FM_pretain_args = torch.load(args.save_param_dir + args.campaign_id + 'FM' + args.sample_type + 'best.pth')
model.load_embedding(FM_pretain_args)
current_model_test_predicts = main(model, model_name, train_data_loader, val_data_loader, test_data_loader,
optimizer, loss, device, args)
test_predict_arr_dicts[model_name].append(current_model_test_predicts)
for key in test_predict_arr_dicts.keys():
submission_path = args.data_path + args.dataset_name + args.campaign_id + key + '/ctr/' # ctr 预测结果存放文件夹位置
sub_dirs = [args.data_path + args.dataset_name + args.campaign_id + key + '/',
args.data_path + args.dataset_name + args.campaign_id + key + '/ctr/']
for sub_dir in sub_dirs:
if not os.path.exists(sub_dir):
os.mkdir(sub_dir)
# 测试集submission
final_sub = np.mean(test_predict_arr_dicts[key], axis=0)
test_pred_df = pd.DataFrame(data=final_sub)
test_pred_df.to_csv(submission_path + 'test_submission_' + args.sample_type + '.csv', header=None)
final_auc = roc_auc_score(test_data[:, 0: 1].tolist(), final_sub.reshape(-1, 1).tolist())
day_aucs = [[final_auc]]
day_aucs_df = pd.DataFrame(data=day_aucs)
day_aucs_df.to_csv(submission_path + 'day_aucs_' + args.sample_type + '.csv', header=None)
if args.dataset_name == 'ipinyou/':
logger.info('Model {}, dataset {}, campain {}, test auc {}\n'.format(key, args.dataset_name,
args.campaign_id, final_auc))
else:
logger.info('Model {}, dataset {}, test auc {}\n'.format(key, args.dataset_name, final_auc))
ctr_model = get_model(args.ctr_model_name, feature_nums, field_nums, args.latent_dims).to(device)
pretrain_params = torch.load(args.save_param_dir + args.campaign_id + args.ctr_model_name + args.sample_type + 'best.pth')
ctr_model.load_state_dict(pretrain_params)
val_ctrs = ctr_model(torch.LongTensor(val_data[:, 4:]).to(args.device)).detach().cpu().numpy() # 11
test_ctrs = ctr_model(torch.LongTensor(test_data[:, 4:]).to(args.device)).detach().cpu().numpy() # 12
# click + winning price + hour + timestamp + encode
train_data = {'clk': val_data[:, 0].tolist(), 'ctr': val_ctrs.flatten().tolist(), 'mprice': val_data[:, 1].tolist(),
'hour': val_data[:, 2].tolist(), 'time_frac': val_data[:, 3].tolist()}
test_data = {'clk': test_data[:, 0].tolist(), 'ctr': test_ctrs.flatten().tolist(),
'mprice': test_data[:, 1].tolist(),
'hour': test_data[:, 2].tolist(), 'time_frac': test_data[:, 3].tolist()}
data_path = args.data_path + args.dataset_name + args.campaign_id
train_df = pd.DataFrame(data=train_data)
train_df.to_csv(data_path + 'train.bid.' + args.sample_type + '.data', index=None)
test_df = pd.DataFrame(data=test_data)
test_df.to_csv(data_path + 'test.bid.' + args.sample_type + '.data', index=None)
``` |
{
"source": "JiaXingBinggan/LSTM_BP",
"score": 3
} |
#### File: LSTM_BP/models/Model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import numpy as np
def weight_init(layers):
for layer in layers:
if isinstance(layer, nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
elif isinstance(layer, nn.Linear):
n = layer.in_features
y = 1.0 / np.sqrt(n)
layer.weight.data.uniform_(-y, y)
layer.bias.data.fill_(0)
class RNN(nn.Module):
def __init__(self,
feature_nums,
hidden_dims,
bi_lstm,
out_dims=1):
super(RNN, self).__init__()
self.feature_nums = feature_nums # 输入数据特征维度
self.hidden_dims = hidden_dims # 隐藏层维度
self.bi_lism = bi_lstm # LSTM串联数量
self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)
self.out = nn.Linear(self.hidden_dims, out_dims)
def forward(self,x):
x1, _ = self.lstm(x)
a, b, c = x1.shape
out = self.out(x1.view(-1, c))
out1 = out.view(a, b, -1)
return out1
``` |
{
"source": "jiaxinjiang2919/Refinance-Calculator",
"score": 4
} |
#### File: jiaxinjiang2919/Refinance-Calculator/6_refin_widgets.py
```python
from tkinter import *
import numpy as np
class LoanCalculator:
def __init__(self):
window = Tk()
window.title("Loan Calculator")
Label(window, text="Loan Amount").grid(row=1, column=1, sticky=W)
Label(window, text="Interest rate").grid(row=2, column=1, sticky=W)
Label(window, text="Term (years)").grid(row=3, column=1, sticky=W)
Label(window, text=None).grid(row=4,column=1) # space between inputs and outputs
Label(window, text="Payment:").grid(row=5, column=1, sticky=W)
Label(window, text="Total Payments:").grid(row=6, column=1, sticky=W)
# variables to store loan inputs
self.pv = StringVar()
self.interest_rate = StringVar()
self.term = StringVar()
# varianbles for loan outputs
self.pmt = StringVar()
self.total = StringVar()
# text boxes to hold inputs and outputs
Entry(window, textvariable = self.pv,
justify=RIGHT).grid(row=1,column=2, padx=(0,5))
Entry(window, textvariable = self.interest_rate,
justify=RIGHT).grid(row=2,column=2, padx=(0,5))
Entry(window, textvariable = self.term,
justify=RIGHT).grid(row=3,column=2, padx=(0,5))
Label(window, textvariable = self.pmt,
font="Helvetica 12 bold",
justify=RIGHT).grid(row=5,column=2,sticky= E )
Label(window, textvariable = self.total,
font="Helvetica 12 bold",
justify=RIGHT).grid(row=6,column=2, sticky= E)
Button(window, text="Calculate Payment", command=self.calcPayment).grid(row=7,column=2, padx= (60,5), pady=5)
# Refinance variables
self.old_pmt = StringVar()
self.time_left = StringVar()
self.refi_cost = StringVar()
# Refinance widgets
Label(window, text="Current Payment").grid(row=8,column=1)
Label(window, text="Time Left").grid(row=9,column=1)
Label(window, text="Cost of Refi").grid(row=10,column=1)
Entry(window, textvariable=self.old_pmt, justify=RIGHT).grid(row=8,column=2, padx=(0,5))
Entry(window, textvariable=self.time_left, justify=RIGHT).grid(row=9,column=2, padx=(0,5))
Entry(window, textvariable=self.refi_cost, justify=RIGHT).grid(row=10,column=2, padx=(0,5))
# Refi output variables
self.monthly_savings = StringVar()
self.payback = StringVar()
self.overall_savings = StringVar()
Label(window, text="Payback Months:").grid(row=11,column=1)
Label(window, text="Monthly Savings:").grid(row=12,column=1)
Label(window, text="Overall Savings:").grid(row=13,column=1)
Button(window, text="Evaluate Refi", command=self.evalRefi).grid(row=14,column=2, padx= (100,5), pady=5)
window.mainloop()
def calcPayment(self):
pv = float(self.pv.get())
rate = float(self.interest_rate.get())
term = int(self.term.get())
pmt = np.pmt(rate / 1200, term * 12, -pv,0)
total = pmt * term * 12
self.pmt.set("$" + format(pmt, "5,.2f"))
self.total.set("$" + format(total, "8,.2f"))
def evalRefi():
pass
LoanCalculator()
``` |
{
"source": "jiaxinjiang2919/stocktracker",
"score": 4
} |
#### File: jiaxinjiang2919/stocktracker/1.3-stocktracker.py
```python
import pandas as pd
import pandas_datareader as pdr
from time import sleep
# get prices from default list
symbols = ['AMZN', 'GOOG', 'NFLX', 'FB', 'GLD', 'SPY']
# create list to store menu options for app
options = " Track Default List, Show Default List, \
Add to Default, Edit Default List, Add New List, Quit".split(",")
# placeholder functions for most menu options`
def show_default():
pass
def add_to_default():
pass
def edit_default():
pass
def add_list():
pass
def get_prices(symbols):
symbols.sort()
return pdr.get_quote_yahoo(symbols)['price']
# replace True with variable to be used later and display menu
def main():
run_program = True
while run_program:
print("Choose option:")
for i in range(1, len(options)+1):
print("{:} - {}".format(i,options[i-1]))
print(get_prices(symbols))
print("CNTL + C to quit")
sleep(5)
if __name__ == "__main__":
main()
``` |
{
"source": "jiaxinonly/docker-mirror",
"score": 2
} |
#### File: jiaxinonly/docker-mirror/docker-mirror.py
```python
from __future__ import print_function
from __future__ import unicode_literals
from concurrent.futures import TimeoutError
from subprocess import Popen
from os import path, system, mknod
import json
import time
import timeout_decorator
import sys
from getopt import getopt, GetoptError
# 镜像列表
mirrors = {
"docker": "", # 使用官方默认
"docker-cn": "https://registry.docker-cn.com", # docker官方中国镜像
"azure": "https://dockerhub.azk8s.cn",
"tencentyun": "https://mirror.ccs.tencentyun.com", # 腾讯云
"daocloud": "https://f1361db2.m.daocloud.io", # 道客
"netease": "https://hub-mirror.c.163.com", # 网易
"ustc": "https://docker.mirrors.ustc.edu.cn", # 中科大
"aliyun": "https://tzqukqlm.mirror.aliyuncs.com", # 阿里云 请替换为自己的阿里云镜像加速地址
"qiniu": "https://reg-mirror.qiniu.com" # 七牛云
}
class DockerClient:
def __init__(self, image, timeout):
self.image = image # 测试用镜像
self.timeout = timeout
self.config_file = "/etc/docker/daemon.json" # docker配置文件路径
self.result_list = [] # 用于存储测试结果
# 配置docker
def set_docker_config(self, mirror_url):
config_dict = {}
if not path.exists(self.config_file):
# 如果不存在则创建配置文件
mknod(self.config_file, 0o644)
pass
else:
# 如果存在则读取参数
with open(self.config_file, "r") as file:
config_dict = json.load(file)
config_dict["registry-mirrors"] = mirror_url
with open(self.config_file, "w") as file:
json.dump(config_dict, file)
@staticmethod
def docker_reload_config():
# 热加载docker配置
# os.system默认使用sh,不支持kill -SIGHUP,使用kill -1代替,或者使用sudo切换到bash,或者使用/bin/bash -c "kill -SIGHUP"
system("sudo kill -SIGHUP $(pidof dockerd)")
# 拉取镜像,超时取消
def pull_image(self, mirror):
@timeout_decorator.timeout(self.timeout, timeout_exception=TimeoutError)
def pull_start():
pull = ""
try:
print("pulling {} from {}".format(self.image, mirror))
begin_time = time.time()
pull = Popen("docker pull {}".format(self.image), shell=True)
exit_code = pull.wait()
if exit_code == 0:
end_time = time.time()
cost_time = end_time - begin_time
print("mirror {} cost time \033[32m{}\033[0m seconds".format(mirror, cost_time))
return cost_time
else:
# 退出码为1
# net/http: TLS handshake timeout
# image not found
return 1000000000
except TimeoutError:
pull.kill()
self.clean_image()
print("\033[31mTime out {} seconds, skip!\033[0m".format(self.timeout))
return 666666666
cost_time = pull_start()
print("--------------------------------------------")
return cost_time
def speed_test(self, mirror):
self.clean_image()
return self.pull_image(mirror)
# 对测试结果排序
def mirror_sort(self):
self.result_list.sort(key=lambda cost_time: cost_time[2])
def clean_image(self):
# 强制删除镜像
system("docker rmi {} -f > /dev/null 2>&1".format(self.image))
if __name__ == '__main__':
image = "busybox:1.34.1" # 默认拉取的镜像
timeout = 60 # 默认超过60秒取消
version = "0.1.1" # 版本号
# 获取参数
try:
options_list = getopt(sys.argv[1:], "i:t:vh", ["image=", "timeout=", "version", "help"])[0]
for option, option_value in options_list:
if option in ("-i", "--image"):
image = option_value # 设置要拉取的镜像
elif option in ("-t", "--timeout"):
timeout = float(option_value) # 设置超时时间,并转换为float型数据
if timeout < 10: # 超时时间必须大于10秒
print("\033[31mError, timeout value must be greater than 10.\033[0m")
exit()
elif option in ("-v", "--version"):
print("docker-mirror version \033[32m{}\033[0m".format(version)) # 当前版本号
exit()
elif option in ("-h", "--help"):
print("Usage: docker-mirror [OPTIONS]")
print("Options:")
print(" -h, --help".ljust(25), "Print usage")
print(
" -i, --image string".ljust(25),
"Docker image for testing speed, use the default busybox:1.34.1 (e.g., busybox:1.34.1)")
print(" -t, --timeout float".ljust(25),
"Docker pull timeout threshold, must be greater than 10, use the default 60, (e.g., 88.88)")
print(" -v, --version".ljust(25), "Print version information and quit")
exit()
# 创建类
docker_client = DockerClient(image, timeout)
# 读取镜像列表,依次测试速度
for mirror, mirror_url in mirrors.items():
docker_client.set_docker_config([mirror_url]) # 设置docker仓库镜像源
docker_client.docker_reload_config() # 重载配置
cost_time = docker_client.speed_test(mirror) # 测试该镜像源拉取镜像花费时间
docker_client.result_list.append((mirror, mirror_url, cost_time)) # 保存测试结果
docker_client.mirror_sort() # 对测试结果进行排序
# 输出测试结果
for mirror in docker_client.result_list:
if mirror[2] == 666666666:
print("mirror {}: \033[31mtime out\033[0m".format(mirror[0]))
elif mirror[2] == 1000000000:
print("mirror {}: \033[31mpull error\033[0m".format(mirror[0]))
else:
print("mirror {}: \033[32m{:.3f}\033[0m seconds".format(mirror[0], mirror[2]))
if docker_client.result_list[0][2] == 666666666: # 全部超时
print("\033[31moh, your internet is terrible, all mirror time out!\033[0m")
print("Restore the default configuration.")
docker_client.set_docker_config(mirrors["docker"])
docker_client.docker_reload_config()
else:
print(
"\033[32mnow, set top three mirrors {}, {}, {} for you.\033[0m".format(docker_client.result_list[0][0],
docker_client.result_list[1][0],
docker_client.result_list[2][0]))
excellent_mirror_url = [docker_client.result_list[0][1], docker_client.result_list[1][1],
docker_client.result_list[2][1]]
docker_client.set_docker_config(excellent_mirror_url)
docker_client.docker_reload_config()
# 清理镜像
docker_client.clean_image()
# 错误的参数输入导致解析错误
except GetoptError:
print("Your command is error.")
print('You can use the "docker-mirror -h" command to get help.')
exit()
# timeout的值不为float
except ValueError:
print("\033[31mError, timeout value must a number.\033[0m")
exit()
# 用户使用ctrl+c取消
except KeyboardInterrupt:
print("\033[31m\nUser manual cancel, restore the default configuration.\033[0m")
docker_client.set_docker_config(mirrors["docker"])
docker_client.docker_reload_config()
exit()
``` |
{
"source": "jiaxin-scu/attendance_system",
"score": 2
} |
#### File: jiaxin-scu/attendance_system/insert_photos.py
```python
import os
import cv2
import sys
import main
import ui.input_photos_ui as insert
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from PySide2.QtGui import QPalette, QBrush, QPixmap, QIcon
import face_recognize
import requests
import matplotlib.pyplot as plt
class WinInsert(QMainWindow, insert.Ui_insert):
def __init__(self, face_check):
super().__init__()
self.setupUi(self)
self.setWindowTitle("基于人脸识别的考勤系统:录入照片")
self.setWindowIcon(QIcon('ui/img/charu_icon.png'))
self.cap = cv2.VideoCapture(0) # 打开摄像头
self.timer_camera = QtCore.QTimer() # 设定计时器
self.timer_camera.timeout.connect(self.show_camera) # 计时结束,显示图片
self.face_check = face_check
self.image = None
self.lururenlian.setEnabled(False)
self.paizhao.setEnabled(False)
self.luru.setEnabled(False)
self.out.clicked.connect(self.to_main_ui) # 返回主界面
self.chazhao.clicked.connect(self.search_stu)
self.lururenlian.clicked.connect(self.insert_face)
self.paizhao.clicked.connect(self.shoot)
self.luru.clicked.connect(self.insert_pic)
# 检查摄像头
flag = self.cap.open(0)
if not flag:
msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
def insert_pic(self):
if not self.face_check.is_success(self.image):
QtWidgets.QMessageBox.warning(self, u"Warning", u"录入的照片不够清晰!", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
sname = self.stu_id_2.text()
name = sname + ".jpg"
path = os.path.join('data', name)
plt.imsave(path, self.image)
self.face_check.known_face_names.append(sname)
self.face_check.update_face_embeddings()
QtWidgets.QMessageBox.information(self, u"Warning", u"录入成功!", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
self.lururenlian.setEnabled(True)
self.chazhao.setEnabled(True)
self.paizhao.setEnabled(False)
self.luru.setEnabled(False)
def shoot(self):
self.timer_camera.stop()
ret, self.image = self.cap.read()
self.image = cv2.flip(self.image, 1)
self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
self.paizhao.setEnabled(False)
def insert_face(self):
self.timer_camera.start(30)
self.luru.setEnabled(True)
self.paizhao.setEnabled(True)
self.lururenlian.setEnabled(False)
self.chazhao.setEnabled(False)
def search_stu(self):
stu_id = self.stu_id.text()
try:
response = requests.get("https://vignetting.work/student/" + stu_id)
result = response.json()
if (result['code'] == 200):
stu_info = result['data']
self.chaxunjieguo.setText("查找成功!")
self.stu_id_2.setText(str(stu_info['id']))
self.stu_name.setText(stu_info['name'])
self.stu_major.setText(stu_info['major'])
self.stu_age.setText(str(stu_info['age']))
self.lururenlian.setEnabled(True)
pic_path = "data/" + stu_id + ".jpg"
if (os.path.exists(pic_path)):
self.camera.setPixmap(QPixmap(pic_path))
else:
self.camera.setPixmap("ui/img/meiyoulurerenlian.png")
else:
self.lururenlian.setEnabled(False)
self.chaxunjieguo.setText("查找不到该学生!")
self.stu_id_2.setText(" ")
self.stu_name.setText(" ")
self.stu_major.setText(" ")
self.stu_age.setText(" ")
self.camera.setPixmap(QPixmap(u":/img/backgrand.png"))
except requests.exceptions.ConnectionError:
self.chaxunjieguo.setText("网络连接出现问题!")
def show_camera(self):
"""Convert the images captured by the camera to Qt readable format
Output the image in the Camera: QLabel location
"""
ret, self.image = self.cap.read()
show = cv2.flip(self.image, 1)
show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
self.camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
def to_main_ui(self):
self.timer_camera.stop()
global init_windows
init_windows = main.initshow(self.face_check)
self.cap.release()
init_windows.show()
self.close()
```
#### File: jiaxin-scu/attendance_system/mtcnn.py
```python
import cv2
import numpy as np
from keras.layers import Activation, Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, Reshape
from keras.layers.advanced_activations import PReLU
from keras.models import Model, Sequential
import utils
def create_Pnet(weight_path):
"""
搭建Pnet网络,粗略获取人脸框, 输出bbox位置和是否有人脸
"""
inputs = Input(shape=[None, None, 3])
x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(inputs)
x = PReLU(shared_axes=[1, 2], name='PReLU1')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='PReLU2')(x)
x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='PReLU3')(x)
classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(x)
bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(x)
model = Model([inputs], [classifier, bbox_regress])
model.load_weights(weight_path, by_name=True)
return model
def create_Rnet(weight_path):
"""
搭建Rnet网络,同时精修框
"""
inputs = Input(shape=[24, 24, 3])
x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(inputs)
x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
x = Permute((3, 2, 1))(x)
x = Flatten()(x)
x = Dense(128, name='conv4')(x)
x = PReLU(name='prelu4')(x)
classifier = Dense(2, activation='softmax', name='conv5-1')(x)
bbox_regress = Dense(4, name='conv5-2')(x)
model = Model([inputs], [classifier, bbox_regress])
model.load_weights(weight_path, by_name=True)
return model
def create_Onet(weight_path):
"""
搭建Onet网络,再次精修框并获得五个点
"""
inputs = Input(shape=[48, 48, 3])
x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(inputs)
x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
x = PReLU(shared_axes=[1, 2], name='prelu4')(x)
x = Permute((3, 2, 1))(x)
x = Flatten()(x)
x = Dense(256, name='conv5')(x)
x = PReLU(name='prelu5')(x)
classifier = Dense(2, activation='softmax', name='conv6-1')(x)
bbox_regress = Dense(4, name='conv6-2')(x)
landmark_regress = Dense(10, name='conv6-3')(x)
model = Model([inputs], [classifier, bbox_regress, landmark_regress])
model.load_weights(weight_path, by_name=True)
return model
class MTCNN():
"""What MTCNN does is get the face image frame.
First of all, the photos will be scaled to different sizes according to different zoom ratios,forming the
feature pyramid of the picture.
PNET mainly obtains the candidate window and the regression vector of the boundary box in the face region.
The candidate window is calibrated by using the bounding box regression, and then the highly overlapping
candidate boxes are merged by non-maximum suppression (NMS).
RNET will be trained in RNET network by PNET candidate box, and then use regression value of bounding box
to fine tune candidate form, and use NMS to remove overlapping form.
The function of ONET is similar to that of RNET, except that while removing overlapping candidate Windows,
it displays the key points of five faces (eyes, mouth corners, nose tip) at the same time.
"""
def __init__(self):
"""Initialize the MTCNN network and build P-Net, R-Net and O-Net"""
self.Pnet = create_Pnet('model/pnet.h5')
self.Rnet = create_Rnet('model/rnet.h5')
self.Onet = create_Onet('model/onet.h5')
def detectFace(self, img, threshold):
"""Detect the face and get the face detection box"""
copy_img = (img.copy() - 127.5) / 127.5 # 归一化
origin_h, origin_w, _ = copy_img.shape # 原始图像大小
scales = utils.calculateScales(img) # 计算原始输入图像缩放的比例
#-------------------------------------------------#
# pnet部分:粗略计算人脸框
# 先粗略预测,存放到 out
# 然后进行解码预测,生成人脸框(粗略坐标),存放到 rectangles
#-------------------------------------------------#
out = []
rectangles = []
for scale in scales:
hs = int(origin_h * scale) # 缩放
ws = int(origin_w * scale) # 缩放
scale_img = cv2.resize(copy_img, (ws, hs))
inputs = np.expand_dims(scale_img, 0)
ouput = self.Pnet.predict(inputs)
ouput = [ouput[0][0], ouput[1][0]] # 一张图片二维图,消除第三维数据
out.append(ouput)
for i in range(len(scales)):
cls_prob = out[i][0][:, :, 1]
out_h, out_w = cls_prob.shape
out_side = max(out_h, out_w)
roi = out[i][1]
rectangle = utils.detect_face_12net(cls_prob, roi, out_side, 1 / scales[i], origin_w, origin_h, threshold[0]) # 解码
rectangles.extend(rectangle)
rectangles = np.array(utils.NMS(rectangles, 0.7)) # 非极大抑制
if len(rectangles) == 0:
return []
#--------------------------------------#
# Rnet部分:稍微精确计算人脸框
# 最后将人脸框转化为正方形
#--------------------------------------#
predict_24_batch = []
for rectangle in rectangles:
crop_img = copy_img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])] # 利用获取到的粗略坐标,在原图上进行截取
scale_img = cv2.resize(crop_img, (24, 24))
predict_24_batch.append(scale_img)
cls_prob, roi_prob = self.Rnet.predict(np.array(predict_24_batch))
rectangles = utils.filter_face_24net(cls_prob, roi_prob, rectangles, origin_w, origin_h, threshold[1]) # 解码
if len(rectangles) == 0:
return rectangles
#-----------------------------#
# Onet部分:计算人脸框
# 输出五个人脸关键点定位(眼睛、嘴角、鼻尖)
#-----------------------------#
predict_batch = []
for rectangle in rectangles:
crop_img = copy_img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])] # 利用获取到的粗略坐标,在原图上进行截取
scale_img = cv2.resize(crop_img, (48, 48))
predict_batch.append(scale_img)
cls_prob, roi_prob, pts_prob = self.Onet.predict(np.array(predict_batch))
rectangles = utils.filter_face_48net(cls_prob, roi_prob, pts_prob, rectangles, origin_w, origin_h, threshold[2]) # 解码
return rectangles
```
#### File: jiaxin-scu/attendance_system/process.py
```python
import face_recognize
import os
import cv2
import requests
def update():
face_check = face_recognize.face_rec()
face_check.update_face_embeddings()
def test_acc():
recognized_num = 0
not_recognized_num = 0
face_check = face_recognize.face_rec()
face_list = os.listdir("data")
for i in face_list:
img = cv2.imread("data/" + i)
img = cv2.flip(img, 1)
name = face_check.recognize(img)
if name + ".jpg" == i:
recognized_num += 1
print(i + " successfully resognized.")
else:
not_recognized_num += 1
print(i + " can not resognized.")
# if name == "Unknown":
# not_recognized_num += 1
# print(i + " can not resognized.")
# else:
# recognized_num += 1
# print(i + " successfully resognized.")
acc = recognized_num / (recognized_num + not_recognized_num)
print("recognized_num: " + str(recognized_num))
print("not_recognized_num: " + str(not_recognized_num))
print("acc: " + str(acc))
def test_api_post():
response = requests.post("https://vignetting.work/record" + "?studentId=1&classRoomId=1")
result = response.json()
print(result)
def test_api_get():
response = requests.get("https://vignetting.work/student/" + "1")
result = response.json()
print(result)
update()
``` |
{
"source": "JiaxinYangJX/MTH994",
"score": 3
} |
#### File: Deeplearning_tutorial/CNN/CNNPytorch.py
```python
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas as pd
import time
import random
from sklearn import preprocessing
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
tic = time.perf_counter()
#======================================Classes==================================
class CNNLayerNet(nn.ModuleList):
def __init__(self, C_in, H0, H1, H2, K1, P1, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
H0: shape[1] of Matrix after flatten layer
"""
super(CNNLayerNet, self).__init__()
self.conv1 = nn.Conv2d(C_in, H1, K1, bias=True)
nn.init.xavier_uniform_(self.conv1.weight)
self.pool1 = nn.MaxPool2d(P1)
self.fc = nn.Linear(H0, H2, bias = True)
nn.init.xavier_uniform_(self.fc.weight)
self.linear1 = nn.Linear(H2, D_out, bias = True)
nn.init.xavier_uniform_(self.linear1.weight)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.lrelu = nn.LeakyReLU()
self.prelu = nn.PReLU()
def forward(self, X):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
X = self.conv1(X)
X = self.relu(X)
X = self.pool1(X)
# X = (N,C, H, W)
X = X.view(X.size(0),-1)
# X = (N, C*H*W)
X = self.fc(X)
X = self.relu(X)
X = self.linear1(X)
y_hat = F.log_softmax(X, dim=1)
return y_hat
#=================================Training & Testing============================
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = F.nll_loss(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), loss.item()))
def test(args, model, device, epoch, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
# pcc = PCC(output, target)[0]
# rmse = RMSE(output, target)
test_loss /= len(test_loader.dataset)
if epoch % args.log_interval == 0:
print('\n Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))
# print("[test_loss: {:.4f}] [PCC: {:.4f}] [RMSE: {:.4f}] [Epoch: {:d}] [2DCNN] ".format(test_loss, pcc, rmse, epoch))
def read_dataset(feature_file, label_file):
''' Read data set in *.csv to data frame in Pandas'''
df_X = pd.read_csv(feature_file)
df_y = pd.read_csv(label_file)
X = df_X.values # convert values in dataframe to numpy array (features)
y = df_y.values # convert values in dataframe to numpy array (label)
return X, y
def normalize_features(X_train, X_test):
from sklearn.preprocessing import StandardScaler #import libaray
scaler = StandardScaler() # call an object function
scaler.fit(X_train) # calculate mean, std in X_train
X_train_norm1 = scaler.transform(X_train) # apply normalization on X_train
X_test_norm1 = scaler.transform(X_test) # we use the same normalization on X_test
X_train_norm = np.reshape(X_train_norm1,(-1,1,28,28)) # reshape X to be a 4-D array
X_test_norm = np.reshape(X_test_norm1,(-1,1,28,28))
return X_train_norm, X_test_norm
def main():
# Training settings
parser = argparse.ArgumentParser(description='MNIST')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 50)') # train itself 9221, test 3767
parser.add_argument('--epochs', type=int, default=60, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.005)')
parser.add_argument('--momentum', type=float, default=0.3, metavar='M',
help='SGD momentum (default: 0.005)')
parser.add_argument('--weight_decay', type=float, default=0, metavar='M',
help='SGD momentum (default: 0.0005)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=2, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
#=================================Load Data=================================
X_train, y_train = read_dataset('MNIST_X_train.csv', 'MNIST_y_train.csv')
X_test, y_test = read_dataset('MNIST_X_test.csv', 'MNIST_y_test.csv')
X_train, X_test = normalize_features(X_train, X_test)
print('Trian:', X_train.shape)
print('Test:', X_test.shape)
print(y_train.shape)
print(y_test.shape)
#==================================Pack Data================================
train_data = torch.from_numpy(X_train).float()
test_data = torch.from_numpy(X_test).float()
trainset = torch.utils.data.TensorDataset(train_data, torch.from_numpy(y_train.ravel()))
testset = torch.utils.data.TensorDataset(test_data, torch.from_numpy(y_test.ravel()))
# Define data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(dataset=testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
#=================================Design Net================================
C_in = 1
H1 = 4
H2 = 256
K1 = (3,3)
P1 = 2
H0 = H1*13*13
D_out = 10
model = CNNLayerNet(C_in, H0, H1, H2, K1, P1, D_out).to(device)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=args.lr, eps=1e-08, weight_decay=args.weight_decay, amsgrad=False)
lr_adjust = optim.lr_scheduler.StepLR(optimizer, step_size = 20, gamma = 0.5, last_epoch = -1)
for epoch in range(1, args.epochs + 1):
lr_adjust.step()
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, epoch, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
if __name__ == '__main__':
main()
toc = time.perf_counter()
print(("Elapsed time: %.1f [min]" % ((toc-tic)/60)))
print("==============================Finish=====================================")
```
#### File: Deeplearning_tutorial/LSTM/LSTM.py
```python
import math
import time
import random
import numpy as np
import pandas as pd
np.random.seed(7)
tic = time.perf_counter()
from nn.LSTMLayers import lstm_cell_forward, lstm_forward
from nn.LSTMLayers import lstm_cell_backward, lstm_backward
from nn.activations import tanh, sigmoid, softmax
# from nn.gradient_clip import clip
'''
In this file, we will use dictionary to combine all the seperate layers together
'''
def read_dataset(feature_file, label_file):
''' Read data set in *.csv to data frame in Pandas'''
df_X = pd.read_csv(feature_file)
df_y = pd.read_csv(label_file)
X = df_X.values # convert values in dataframe to numpy array (features)
y = df_y.values # convert values in dataframe to numpy array (label)
return X, y
def normalize_features(X_train, X_test):
from sklearn.preprocessing import StandardScaler #import libaray
scaler = StandardScaler() # call an object function
scaler.fit(X_train) # calculate mean, std in X_train
X_train_norm1 = scaler.transform(X_train) # apply normalization on X_train
X_test_norm1 = scaler.transform(X_test) # we use the same normalization on X_test
X_train_norm = np.reshape(X_train_norm1,(-1,28,28)) # reshape X to be a 3-D array
X_test_norm = np.reshape(X_test_norm1,(-1,28,28))
return X_train_norm, X_test_norm
def one_hot_encoder(y_train, y_test):
''' convert label to a vector under one-hot-code fashion '''
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
lb.fit(y_train)
y_train_ohe = lb.transform(y_train)
y_test_ohe = lb.transform(y_test)
return y_train_ohe, y_test_ohe
X_train, y_train = read_dataset('MNIST_X_train.csv', 'MNIST_y_train.csv')
X_test, y_test = read_dataset('MNIST_X_test.csv', 'MNIST_y_test.csv')
X_train_norm, X_test_norm = normalize_features(X_train, X_test)
y_train_ohe, y_test_ohe = one_hot_encoder(y_train, y_test)
print(X_train_norm.shape)
print(X_test_norm.shape)
print(y_train_ohe.shape)
print(y_test_ohe.shape)
class LSTM():
def __init__(self, X, y, H = 128, lr = 0.01):
self.X = X
self.y = y
self.lr = lr
self.H = H # numbers of hidden neurans
self.N = self.X.shape[0]
self.D = self.X.shape[1]
self.T = self.X.shape[2]
self.M = self.y.shape[1] # M = 10 for MNIST dataset
# self.batch_size = batch_size
self.Wf = np.random.randn(self.H + self.D, self.H)
self.Wi = np.random.randn(self.H + self.D, self.H)
self.Wo = np.random.randn(self.H + self.D, self.H)
self.Wc = np.random.randn(self.H + self.D, self.H)
self.Wy = np.random.randn(self.H, self.M)
self.bf = np.zeros((1, self.H))
self.bi = np.zeros((1, self.H))
self.bo = np.zeros((1, self.H))
self.bc = np.zeros((1, self.H))
self.by = np.zeros((1, self.M))
self.h0 = np.zeros((self.N, self.H))
# self.h0 = np.random.randn(self.N, self.H)
self.parameters = {"Wf": self.Wf, "Wi": self.Wi, "Wo": self.Wo, "Wc": self.Wc, "Wy": self.Wy, "bf": self.bf, "bi": self.bi, "bo": self.bo, "bc": self.bc, "by": self.by}
# self.gradients = {"dWx": dWx, "dWh": dWaa, "dWy": dWy, "dbh": dbh, "dby": dby}
def forward(self):
self.h, self.y_hat, self.c, self.caches = lstm_forward(self.X, self.h0, self.parameters)
def backward(self):
self.loss, dy = cross_entropy_loss(self.y_hat[:,:,27], self.y)
dWy = np.dot(self.h[:,:,27].T, dy)
dby = np.sum(dy, axis = 0, keepdims = True)
dh = np.zeros((self.N, self.H, self.T))
for t in range(self.T):
dh[:,:,t] = np.dot(dy, self.Wy.T)
gradients = lstm_backward(dh, self.caches)
# self.X = self.X - self.lr * gradients['dX']
# self.h0 = self.h0 - self.lr * gradients['dh0']
self.Wf = self.Wf - self.lr * gradients['dWf']
self.Wi = self.Wi - self.lr * gradients['dWi']
self.Wo = self.Wo - self.lr * gradients['dWo']
self.Wc = self.Wc - self.lr * gradients['dWc']
self.bf = self.bf - self.lr * gradients['dbf']
self.bi = self.bi - self.lr * gradients['dbi']
self.bc = self.bc - self.lr * gradients['dbc']
self.bo = self.bo - self.lr * gradients['dbo']
self.Wy = self.Wy - self.lr * dWy
self.by = self.by - self.lr * dby
self.parameters = {"Wf": self.Wf, "Wi": self.Wi, "Wo": self.Wo, "Wc": self.Wc, "Wy": self.Wy, "bf": self.bf, "bi": self.bi, "bo": self.bo, "bc": self.bc, "by": self.by}
def predict(self, X_test):
h, y_hat_test_all, c_hat_test_all, caches = lstm_forward(X_test, self.h0, self.parameters)
y_hat_test = y_hat_test_all[:,:,27]
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
num_test_samples = X_test.shape[0]
ypred = np.zeros(num_test_samples, dtype=int)
for i in range(num_test_samples):
ypred[i] = labels[np.argmax(y_hat_test[i,:])]
return ypred
def cross_entropy_loss(y_hat, y):
"""
Cross entropy loss
y_hat: predict y after softmax, shape:(M,d), M is the #of samples
y: shape(M,d)
"""
loss = np.mean(np.sum(- y * np.log(y_hat), axis=-1))
# loss = np.sum(- y * np.log(y_hat))
dy = y_hat - y
return loss, dy
def accuracy(ypred, yexact):
p = np.array(ypred == yexact, dtype = int)
return np.sum(p)/float(len(yexact))
myLSTM = LSTM(X_test_norm, y_test_ohe, H = 128, lr= 0.01)
epoch_num = 100
for i in range(epoch_num):
myLSTM.forward()
myLSTM.backward()
if ((i + 1) % 20 == 0):
print('epoch = %d, current loss = %.5f' % (i+1, myLSTM.loss))
y_pred = myLSTM.predict(X_test_norm)
print(y_pred)
print(y_test.ravel())
print('Accuracy of our model ', accuracy(y_pred, y_test.ravel()))
toc = time.perf_counter()
print('Totol time:' + str((toc-tic))+ 's')
print('===============================Finish===================================')
```
#### File: RNN/nn/RNNLayers.py
```python
import math
import numpy as np
import pandas as pd
from nn.activations import tanh, softmax, sigmoid
def rnn_cell_forward(Xt,h_prev,parameters):
'''
RNN Cell:
Input:
- Xt: (N,D) N=2000 D=28
- h_prev: (N,H) #of neurons in the hidden state. "prev" is actually for timestep "t-1"
- parameters:
: Wx: Weight matrix multiplying the input Xt, (D, H)
: Wh: Weight matrix multiplying the hidden state (H,H)
: Wy: Weight matrix relating to the hidden-state. Shape is (H,M) # M = 10
: bh: Bias, (1, H)
: by: Bias, (1, M)
Returns:
- h_next: next hidden state (N, H)
- yt_pred: prediction at timestep t, (N, M)
- cache : tuple of values needed for the back-propagation part, has shape (h_next, h_prev, Xt, parameters)
'''
Wx = parameters["Wx"]
Wh = parameters["Wh"]
Wy = parameters["Wy"]
bh = parameters["bh"]
by = parameters["by"]
# compute next activation state using the formula tanh(xxxx)
h_next = tanh(np.dot(Xt,Wx) + np.dot(h_prev,Wh) + bh)
yt_pred = softmax(np.dot(h_next, Wy) + by)
cache = (h_next, h_prev, Xt, parameters)
return h_next, yt_pred, cache
def rnn_forward(X,h0, parameters):
'''
Forward Layer of RNN
Input:
- X: Input data for every time-step. (N,D,T) # D=28, T=28
- h0: Initial hidden state (N,H)
- parameters:
: Wx: Weight matrix multiplying the input, (D, H)
: Wh: Weight matrix multiplying the hidden state (H,H)
: Wy: Weight matrix relating to the hidden-state. Shape is (H,M) # M = 10
: bh: Bias, (1, H)
: by: Bias, (1, M)
Returns:
- h : Hidden states for all of the time steps. (N, H, T)
- y_pred: Predictions that saves all of the yt_pred, The shape will be (N,M,T)
- caches: tuple of values that needed for the back-propagation part, caches.append(cache)
'''
caches = []
N, D, T = X.shape
H, M = parameters['Wy'].shape
# Initialize 'h' and 'y'
h = np.zeros((N,H,T))
y_pred = np.zeros((N,M,T))
# Initialize h_next
h_next = h0
for t in range(T):
h_next, yt_pred, cache = rnn_cell_forward(X[:,:,t], h_next, parameters)
h[:,:,t] = h_next
y_pred[:,:,t] = yt_pred
caches.append(cache)
caches.append(X)
#[[cache1,cache2, ..., cache28],X]
return h, y_pred, caches
def rnn_cell_backward(dh_next, cache):
'''
Backward Layer of RNN cell:
Input:
- dh_next : Upstream gradients of hidden state (Gradient of loss wrt hidden state)
- cache: Output of the rnn_cell_forward
Returns:
- dXt:
- dh_prev
- dWx
- dWh
- dbh
'''
(xxxx,xxxxx, parameters) = cache
Wx = parameters["Wx"]
Wh = parameters["Wh"]
Wy = parameters["Wy"]
bh = parameters["bh"]
by = parameters["by"]
dtanh = xxxxxxxx
dWx = x xxx
dWh =x xxx
dbh =x xxx
dh_prev = x xxx
dXt = x xxx
gradients = {'dXt': dXt, 'dh_prev': dh_prev, 'dWx': dWx,'dWh':dWh, 'dbh':dbh} #keys and values in the Dictionary
return gradients
def rnn_backward(dh, caches):
'''
Backward Layers
Input:
- dh : Upstream gradients of all hidden states. (N,H,T)
- caches: output of the rnn_forward
Returns:
- dh: (N,H,T)
- dh0: (N,H,T)
- dWx
- dWh
- dbh
- dWy
- dhy
'''
(caches, X) = caches
(h1, h0, X1,parameters) = caches[0]
N, H, T = dh.shape
dWx = xxxxxxxxxxxx
dWh = xxxxxxxxxxxx
dbh = xxxxxxxxxxxx
dh0 = xxxxxxxxxxxx
dh_prevt = xxxxxxxxxxxx
for t in reversed(range(T)):
gradients = rnn_cell_backward(xxxx, xxxx )
dXt, dh_prevt, dWxt, dWht, dbht = gradients['dXt'], gradients['dh_prev'], gradients['dWx'], gradients['dWh'], gradients['dbh']
dX[:,:,t] = dXt
dWx = dWx+ dWxt # dWx += dWxt
dWh += dWht
dbh += dbht
dh0 = dh_prevt
gradients = {'dX': dX, 'dh0': dh0, 'dWx': dWx, 'dWh': dWh, 'dbh': dbh}
return gradients
# def cross_entropy_loss(inPut):
``` |
{
"source": "JiaxinYangJX/VR_analysis",
"score": 2
} |
#### File: VR_analysis/py2src/analysis_module.py
```python
import pandas as pd
import numpy as np
from helper import *
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import fclusterdata
from collections import Counter
def read_structure(xyz_path, frag_id_path):
'''
@description: read structure
@param:
xyz_path: path to xyz coord
frag_id_path: path to fragment 1D
@return:
frag: numpy array, fragment 1D position
xyz: numpy array, xyz
'''
xyz = pd.read_csv(xyz_path, header=None, sep='\t')
frag_id = pd.read_csv(frag_id_path, header=None, sep='\t')
xyz = xyz.iloc[:,1:].values
start = frag_id.iloc[:,0].values
start = np.expand_dims(start, axis=1)
end = start + 5000
frag = np.concatenate([start, end], axis=1)
return frag, xyz
def read_sites(sites_path, chr_id):
'''
@description: read binding sites dataset
@param:
sites_path: path to the binding sites data
chr_id: chr id
@return:
sites: numpy array, binding sites
'''
sites = pd.read_csv(sites_path, header=None, sep='\t')
sites = sites[sites.iloc[:,0]==chr_id].iloc[:,1:3].values
return sites
def read_links(links_path, chr_id):
'''
@description: read chromatin interaction dataset
@param:
links_path: path to the links: chr, frag1_start, end, frag2_start, end
chr_id: chr id
@return:
links: numpy array, fragment links
'''
links = pd.read_csv(links_path, header=None, sep='\t')
links = links[links.iloc[:,0]==chr_id].iloc[:,1:5].values
return links
def spatial_hub_hiera(frag, xyz, sites, dist_size=4, cluster_size_thres=0.95):
'''
@description: generate 3D spatial hubs of specific sites
@param:
frag: frag_id
xyz: xyz
sites: a specific binding sites, protein, DNase, gv
dist_size: distance size threshold in hierachial clustering
cluster_size_thres: only clusters with top 0.95 sizes
@return:
group_list: list, contains the frag_id in each 3D hub
'''
# 1. map the sites into 3D structure
sites_coord, sites_id = sites_map(frag, xyz, sites)
# 2. hierachical cluster
dist_thres = np.mean(np.linalg.norm(np.diff(xyz,axis=0),axis=1)) * dist_size
my_hiera = fclusterdata(sites_coord, t=dist_thres,criterion='distance')
# 3. only keep the cluster with enough fragments, default: top 95%
cluster_counter = Counter(my_hiera)
size_thres = np.quantile(np.array(list(cluster_counter.items()))[:,1],q=cluster_size_thres)
group_list = []
for label, count in cluster_counter.most_common():
if count > size_thres:
group_list.append(sites_coord[my_hiera==label,])
return group_list
def interaction_hub(frag, xyz, links, q_quantile = 0.99):
'''
@description: generate hubs with high degree of interaction
@param:
frag: frag_id
xyz: xyz
links: frag-frag links
q_quantile: top 0.99 dense degree
@return:
group_list: numpy array, contains the start and end id of hubs
'''
# 1. links to 2 regions
region_1 = links[:,0:2]
region_2 = links[:,2:4]
region = np.concatenate([region_1, region_2], axis=0)
# 2. map to 1d, get degree
frag_degree = degree_map(frag, region)
# 3. cumulative increase
cum_degree = np.cumsum(frag_degree)
# 4. find the dense 1D region
size = 5
degree_list = []
for p in range(frag.shape[0]-size):
degree_list.append(cum_degree[p+size]-cum_degree[p])
degree_list = np.array(degree_list)
# find the high degree regions
thres = np.quantile(degree_list, q = q_quantile)
high_region_start = np.where(degree_list > thres)[0] # high range: (p,p+size]
idx = 0
start_idx = high_region_start[0] + 1 # [p+1,p+size]
# merge the region
group_list = []
while idx < len(high_region_start)-1:
if (high_region_start[idx] + size) >= high_region_start[idx+1]:
# overlap
idx += 1
else: # save
group_list.append([start_idx, high_region_start[idx]+size])
start_idx = high_region_start[idx+1] + 1
idx += 1
group_list.append([start_idx, high_region_start[idx]+size]) # add last
return np.array(group_list)
def loop_3d(frag, xyz, scale = 100000, resolution = 5000, q_quantile=0.002):
'''
@description: get the chromatin loops
@param:
frag: frag_id
xyz: xyz
scale: loop scale
resolution: resolution of the structure
q_quantile: top 0.002 closest
@return:
loop_list: numpy array, contains the start and end id of loops
'''
# 1. find the 1) distance between two fragment
size = scale // resolution
dist_list = []
for p in range(frag.shape[0]-size+1):
dist_tmp = np.linalg.norm(xyz[p] - xyz[p+size-1])
dist_list.append(dist_tmp)
dist_list = np.array(dist_list)
# 2. find the loop
thres = np.quantile(dist_list, q = q_quantile)
close_loop_start = np.where(dist_list < thres)[0] # range: [p,p+size]
# 3. merge the loop
idx = 0
start_idx = close_loop_start[0]
loop_list = []
while idx < len(close_loop_start)-1:
if (close_loop_start[idx] + size) >= close_loop_start[idx+1]:
# overlap
idx += 1
else: # save
loop_list.append([start_idx, start_idx + size])
start_idx = close_loop_start[idx+1]
idx += 1
loop_list.append([start_idx, start_idx+size]) # add last
return np.array(loop_list)
def main():
xyz_path = '../data/structure/chr1_1502144569709.xyz.txt'
frag_id_path = '../data/structure/chr1_coordinate_mapping.txt'
sites_path = '../data/binding/ENCSR000EMT_rep2_1_se_bwa_biorep_filtered_peaks.bed'
links_path = '../data/links/GM_link.txt'
chr_id = 'chr1'
frag, xyz = read_structure(xyz_path, frag_id_path)
sites = read_sites(sites_path, chr_id)
links = read_links(links_path, chr_id)
hub_3d = spatial_hub_hiera(frag, xyz, sites)
inter_hub = interaction_hub(frag, xyz, links)
loop = loop_3d(frag, xyz)
return None
if __name__ == "__main__":
main()
``` |
{
"source": "jiaxinying/analytics-zoo",
"score": 2
} |
#### File: automl/xgboost/test_xgbregressor.py
```python
from test.zoo.pipeline.utils.test_utils import ZooTestCase
import numpy as np
import pandas as pd
import os
from numpy.testing import assert_array_almost_equal
from zoo.orca.automl.xgboost.XGBoost import XGBoost
from zoo.chronos.feature.identity_transformer import IdentityTransformer
import pytest
class TestXgbregressor(ZooTestCase):
def setup_method(self, method):
# super().setup_method(method)
self.model = XGBoost(config={'n_estimators': 5, 'max_depth': 2, 'tree_method': 'hist'})
feature_cols = ["f", "f2"]
target_col = "t"
train_df = pd.DataFrame({"f": np.random.randn(20),
"f2": np.random.randn(20),
"t": np.random.randint(20)})
val_df = pd.DataFrame({"f": np.random.randn(5),
"f2": np.random.randn(5),
"t": np.random.randint(5)})
ft = IdentityTransformer(feature_cols=feature_cols, target_col=target_col)
self.x, self.y = ft.transform(train_df)
self.val_x, self.val_y = ft.transform(val_df)
def teardown_method(self, method):
pass
def test_fit_predict_evaluate(self):
self.model.fit_eval((self.x, self.y), [(self.val_x, self.val_y)])
# test predict
result = self.model.predict(self.val_x)
# test evaluate
evaluate_result = self.model.evaluate(self.val_x, self.val_y)
def test_save_restore(self):
self.model.fit_eval((self.x, self.y), [(self.val_x, self.val_y)])
result_save = self.model.predict(self.val_x)
model_file = "tmp.pkl"
self.model.save(model_file)
assert os.path.isfile(model_file)
new_model = XGBoost()
new_model.restore(model_file)
assert new_model.model
result_restore = new_model.predict(self.val_x)
assert_array_almost_equal(result_save, result_restore, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(result_save, result_restore)
os.remove(model_file)
def test_metric(self):
# metric not in XGB_METRIC_NAME but in Evaluator.metrics_func.keys()
self.model.fit_eval(
data=(self.x, self.y),
validation_data=[(self.val_x, self.val_y)],
metric="mse")
# metric in XGB_METRIC_NAME
self.model.fit_eval(
data=(self.x, self.y),
validation_data=[(self.val_x, self.val_y)],
metric="mlogloss")
with pytest.raises(ValueError):
self.model.fit_eval(
data=(self.x, self.y),
validation_data=[(self.val_x, self.val_y)],
metric="wrong_metric")
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: data/utils/resample.py
```python
import pandas as pd
def resample_timeseries_dataframe(df,
dt_col,
interval,
start_time,
end_time,
merge_mode="mean"):
'''
resample and return a dataframe with a new time interval.
:param df: input dataframe.
:param dt_col: name of datetime column.
:param interval: pandas offset aliases, indicating time interval of the output dataframe
:param start_time: start time of the output dataframe
:param end_time: end time of the output dataframe
:param merge_mode: if current interval is smaller than output interval,
we need to merge the values in a mode. "max", "min", "mean"
or "sum" are supported for now.
'''
assert dt_col in df.columns, f"dt_col {dt_col} can not be found in df."
assert pd.isna(df[dt_col]).sum() == 0, "There is N/A in datetime col"
assert pd.Timestamp(start_time) <= pd.Timestamp(
end_time), "end time must be later than start time."
assert merge_mode in ["max", "min", "mean", "sum"],\
f"merge_mode should be one of [\"max\", \"min\", \"mean\", \"sum\"]," \
f" but found {merge_mode}."
start_time_stamp = pd.Timestamp(start_time)
end_time_stamp = pd.Timestamp(end_time)
zero_time_stamp = pd.Timestamp(0, unit='ms')
res_df = df.copy()
res_df[dt_col] = df.apply(
lambda row: resample_helper(
row[dt_col],
interval,
start_time_stamp,
end_time_stamp,
zero_time_stamp),
axis=1)
res_df = res_df[~res_df[dt_col].isin([None])]
if merge_mode == "max":
res_df = res_df.groupby([dt_col]).max()
if merge_mode == "min":
res_df = res_df.groupby([dt_col]).min()
if merge_mode == "mean":
res_df = res_df.groupby([dt_col]).mean()
if merge_mode == "sum":
res_df = res_df.groupby([dt_col]).sum()
new_start = start_time_stamp + \
(interval - divmod(start_time_stamp - zero_time_stamp, pd.Timedelta(interval))[1])
new_end = end_time_stamp - \
divmod(end_time_stamp - zero_time_stamp, pd.Timedelta(interval))[1]
new_end = new_start if new_start > new_end else new_end
new_index = pd.date_range(start=new_start, end=new_end, freq=interval)
res_df = res_df.reindex(new_index)
res_df.index.name = dt_col
res_df = res_df.reset_index()
return res_df
def resample_helper(curr_time,
interval,
start_time_stamp,
end_time_stamp,
zero_time_stamp):
offset = divmod((curr_time - zero_time_stamp), pd.Timedelta(interval))[1]
if(offset / interval) >= 0.5:
resampled_time = curr_time + (interval - offset)
else:
resampled_time = curr_time - offset
if (resampled_time < start_time_stamp or resampled_time > end_time_stamp):
return None
return resampled_time
```
#### File: examples/tcmf/run_electricity.py
```python
import argparse
from zoo.orca import init_orca_context, stop_orca_context
import numpy as np
from zoo.chronos.model.forecast.tcmf_forecaster import TCMFForecaster
import tempfile
import logging
import sys
import os
def get_dummy_data():
return np.random.randn(300, 480)
os.environ["KMP_AFFINITY"] = "disabled"
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster.')
parser.add_argument("--num_workers", type=int, default=2,
help="The number of workers to be used in the cluster."
"You can change it depending on your own cluster setting.")
parser.add_argument("--cores", type=int, default=4,
help="The number of cpu cores you want to use on each node. "
"You can change it depending on your own cluster setting.")
parser.add_argument("--memory", type=str, default="10g",
help="The memory you want to use on each node. "
"You can change it depending on your own cluster setting.")
parser.add_argument("--data_dir", type=str,
help="the directory of electricity data file, you can download by running "
"https://github.com/rajatsen91/deepglo/blob/master/datasets/"
"download-data.sh. Note that we only need electricity.npy.")
parser.add_argument("--use_dummy_data", action='store_true', default=False,
help="Whether to use dummy data")
parser.add_argument("--smoke", action='store_true', default=False,
help="Whether to run smoke test")
parser.add_argument("--predict_local", action='store_true', default=False,
help="You can set this if want to run distributed training on yarn and "
"run distributed inference on local.")
parser.add_argument("--num_predict_cores", type=int, default=4,
help="The number of cores you want to use for prediction on local."
"You should only parse this arg if you set predict_local to true.")
parser.add_argument("--num_predict_workers", type=int, default=4,
help="The number of workers you want to use for prediction on local. "
"You should only parse this arg if you set predict_local to true.")
if __name__ == "__main__":
args = parser.parse_args()
num_nodes = 1 if args.cluster_mode == "local" else args.num_workers
init_orca_context(cluster_mode=args.cluster_mode, cores=args.cores, num_nodes=num_nodes,
memory=args.memory, init_ray_on_spark=True)
if not args.use_dummy_data:
assert args.data_dir is not None, "--data_dir must be provided if not using dummy data"
logger.info('Initalizing TCMFForecaster.')
model = TCMFForecaster(
vbsize=128,
hbsize=256,
num_channels_X=[32, 32, 32, 32, 32, 1],
num_channels_Y=[32, 32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
rank=64,
kernel_size_Y=7,
learning_rate=0.0005,
normalize=False,
use_time=True,
svd=True,
)
ymat = np.load(args.data_dir) if not args.use_dummy_data else get_dummy_data()
horizon = 24
train_data = ymat[:, : -2 * horizon]
target_data = ymat[:, -2 * horizon: -horizon]
incr_target_data = ymat[:, -horizon:]
logger.info('Start fitting.')
model.fit({'y': train_data},
val_len=24,
start_date="2012-1-1",
freq="H",
covariates=None,
dti=None,
period=24,
y_iters=1 if args.smoke else 10,
init_FX_epoch=1 if args.smoke else 100,
max_FX_epoch=1 if args.smoke else 300,
max_TCN_epoch=1 if args.smoke else 300,
alt_iters=2 if args.smoke else 10,
num_workers=args.num_workers)
logger.info('Fitting ends.')
# you can save and load model as you want
with tempfile.TemporaryDirectory() as tempdirname:
model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
if args.predict_local:
logger.info('Stopping context for yarn cluster and init context on local.')
stop_orca_context()
import ray
ray.init(num_cpus=args.num_predict_cores)
logger.info('Start prediction.')
yhat = model.predict(horizon=horizon,
num_workers=args.num_predict_workers
if args.predict_local else args.num_workers)
logger.info("Prediction ends")
yhat = yhat["prediction"]
target_value = dict({"y": target_data})
# evaluate with prediction results
from zoo.automl.common.metrics import Evaluator
evaluate_mse = Evaluator.evaluate("mse", target_data, yhat)
# You can also evaluate directly without prediction results.
mse, smape = model.evaluate(target_value=target_value, metric=['mse', 'smape'],
num_workers=args.num_predict_workers if args.predict_local
else args.num_workers)
print(f"Evaluation results:\nmse: {mse}, \nsmape: {smape}")
logger.info("Evaluation ends")
# incremental fitting
logger.info("Start fit incremental")
model.fit_incremental({'y': target_data})
logger.info("Start evaluation after fit incremental")
incr_target_value = dict({"y": incr_target_data})
mse, smape = model.evaluate(target_value=incr_target_value, metric=['mse', 'smape'],
num_workers=args.num_predict_workers
if args.predict_local else args.num_workers)
print(f"Evaluation results after incremental fitting:\nmse: {mse}, \nsmape: {smape}")
logger.info("Evaluation ends")
stop_orca_context()
```
#### File: zoo/tfpark/__init__.py
```python
def check_tf_version():
import logging
try:
import tensorflow as tf
except Exception as e:
return False, RuntimeError("Importing TensorFlow failed, "
"please install tensorflow 1.15.0.", e)
v_str = tf.__version__
major, minor, patch = v_str.split(".")
if v_str != "1.15.0":
if int(major) == 1:
logging.warning("\n######################### WARNING ##########################\n"
"\nAnalytics Zoo TFPark has only been tested on TensorFlow 1.15.0,"
" but your current TensorFlow installation is {}.".format(v_str) +
"\nYou may encounter some version incompatibility issues. "
"\n##############################################################")
else:
message = "Currently Analytics Zoo TFPark only supports TensorFlow 1.15.0, " + \
"but your current TensorFlow installation is {}".format(v_str)
return False, RuntimeError(message)
return True, None
passed, error = check_tf_version()
if passed:
from .model import KerasModel
from .estimator import TFEstimator
from .tf_optimizer import TFOptimizer
from .tf_dataset import TFDataset
from .zoo_optimizer import ZooOptimizer
from .tf_predictor import TFPredictor
from .tfnet import TFNet
else:
CLASSES_WITH_MAGIC_METHODS = (str(), object, float(), dict())
# Combines all magic methods I can think of.
MAGIC_METHODS_TO_CHANGE = set()
for i in CLASSES_WITH_MAGIC_METHODS:
MAGIC_METHODS_TO_CHANGE |= set(dir(i))
MAGIC_METHODS_TO_CHANGE.add('__call__')
# __init__ and __new__ must not raise an UnusableObjectError
# otherwise it would raise error even on creation of objects.
MAGIC_METHODS_TO_CHANGE -= {'__class__', '__init__', '__new__'}
def error_func(*args, **kwargs):
"""(nearly) all magic methods will be set to this function."""
raise error
class UnusableClass(object):
pass
for i in MAGIC_METHODS_TO_CHANGE:
setattr(UnusableClass, i, error_func)
KerasModel = UnusableClass()
TFEstimator = UnusableClass()
TFOptimizer = UnusableClass()
TFDataset = UnusableClass()
ZooOptimizer = UnusableClass()
TFPredictor = UnusableClass()
TFNet = UnusableClass()
``` |
{
"source": "JiaxiongQ/SlimConv",
"score": 2
} |
#### File: JiaxiongQ/SlimConv/test.py
```python
import time
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import os
import argparse
# from models import *
from .SC-ResNet import *
from utils import progress_bar
parser = argparse.ArgumentParser(description='PyTorch ImageNet Test')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
valdir = os.path.join('../data/', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=256, shuffle=False,
num_workers=4, pin_memory=True)
# exit(0)
# Model
print('==> Building model..')
net = resnet50()
if device == 'cuda':
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
checkpoint = torch.load('./')
best = checkpoint['best_prec1']
print(best)
net.load_state_dict(checkpoint['state_dict'])
criterion = nn.CrossEntropyLoss()
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
latency=0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
inputs, targets = inputs.to(device), targets.to(device)
time_s = time.time()
outputs = net(inputs)
torch.cuda.synchronize()
latency += time.time() - time_s
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
avg_time = latency
print(avg_time)
def test2():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
_, predicted = outputs.topk(5,1,True,True)
total += targets.size(0)
predicted = predicted.t()
ct = predicted.eq(targets.view(1,-1).expand_as(predicted))
correct += ct[:5].view(-1).float().sum(0).item()
progress_bar(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
test()
#test2()
``` |
{
"source": "JiaXiu01/AlgoGraphs_Dev",
"score": 4
} |
#### File: AlgoGraphs_Dev/backend/bipartite.py
```python
import networkx as nx
import matplotlib.pyplot as plt
import random
def bipartite(numNodes):
odds=[]
evens=[]
colours=[]
for i in range(1,numNodes+1,2):
odds.append(i)
colours.append('red')
for i in range(2,numNodes+1,2):
evens.append(i)
colours.append('blue')
B = nx.Graph()
B.add_nodes_from(odds, bipartite=0)
B.add_nodes_from(evens, bipartite=1)
for i in range(1,numNodes):
B.add_edge(i,i+1)
#just adds a few more edges on
if numNodes>=3:
for j in range(1,numNodes):
x=random.uniform(0, 1)
if x>0.6:
y=random.randint(1, len(evens)-1)
z=random.randint(1, len(odds)-1)
if z!=y:
B.add_edge(odds[z],evens[y])
lhs = nx.bipartite.sets(B)[0]
positions = nx.bipartite_layout(B, lhs,scale=40)
nx.draw_networkx_labels(B, pos=positions)
nx.draw(B, pos=positions,node_color=colours)
plt.savefig((str(numNodes)+"bipartite.png"), dpi=300)
plt.show()
bipartite(10)
```
#### File: AlgoGraphs_Dev/backend/hypercube.py
```python
import networkx as nx
import matplotlib.pyplot as plt
def hypercube(n):
if n==0:
x= nx.Graph()
x.add_node(0)
else:
x=nx.generators.lattice.hypercube_graph(n)
positions = nx.spring_layout(x, scale=0.8)
nx.draw(x, pos=positions,node_color='grey', width=1, edge_color="skyblue", style="solid")
nx.draw_networkx_labels(x, pos=positions, font_size=10)
#plt.figure(figsize=(10.0,10.0))
#fits everything in
plt.margins(0.15)
plt.savefig((str(n)+"hypercube.png"), dpi=800)#,figsize=(10.0,10.0))
plt.show()
##hypercube(0)
##hypercube(1)
##hypercube(2)
hypercube(3)
hypercube(4)
hypercube(5)
``` |
{
"source": "jiaxu0017/Manifold_Segmentation",
"score": 2
} |
#### File: network/backbone/attention.py
```python
import torch
from torch.nn import Module,Conv2d,Parameter,Softmax
torch_ver = torch.__version__[:3]
__all__ = ['PAM_Module', 'CAM_Module']
class CAM_Module(Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_Module, self).__init__()
self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
'''
Calcuate attetion between channels
Args:
x: input feature maps (B * C * H * W)
Returns:
out: attention value + input feature (B * C * H * W)
attention: B * C * C
'''
m_batchsize, C, height, wight = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize,C, -1).permute(0,2,1)
proj_value = x.view(m_batchsize,C, -1)
energy = torch.bmm(proj_query,proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy) - energy
attention = self.softmax(energy_new)
out = torch.bmm(attention,proj_value)
out = out.view(m_batchsize,C, height, wight)
mean = torch.mean(out)
out = out/mean
out = self.gamma*out + x
return out
class PAM_Module(Module):
""" Position attention module"""
#Ref from SAGAN
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out
if __name__ == '__main__':
input = torch.ones([1,16,9,5])
for i in range(9):
for j in range(5):
input[:,:,i,j] = i * 5 + j
# print(input.size())
print(input[0,1,])
# test kronecker
output_H, output_W , output= kronecker(input)
# print('H & W:',output_H.size(), output_W.size())
# print('out',output.size())
print('H & W:',output_H.shape, output_W.shape)
# print(output)
# test Inverse_kronecker
# out = kronecker(input)
# print(H[0,1,],W[0,1,])
out = Inverse_kronecker(output, input.shape[0],input.shape[1],input.shape[2],input.shape[3])
print(out.shape)
# # print(out[0,1,])
# out = out/5
# test PAM_Module
# model = PAM_Module(16)
# out = model(input)
# print(out.shape)
``` |
{
"source": "jiaxu0017/Segmentation_attention_mainfold-Pytorch",
"score": 3
} |
#### File: Segmentation_attention_mainfold-Pytorch/utils/loss.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(
inputs, targets, reduction='none', ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
class ManifondLoss(nn.Module):
def __init__(self, alpha=1, size_average=True, ignore_index=255):
super(ManifondLoss,self).__init__()
self.alpha = alpha
self.ignore_index = ignore_index
self.size_average = size_average
# self.coss_manifode
def forward(self, input, targets):
ce_loss = self.coss_manifode(input, targets,k = 3)
return ce_loss * self.alpha
def coss_manifode(self, inputs, targets, k=3):
# print(inputs.shape, targets.shape)
k_l = int(k/2)
h = inputs.size(2)
w = inputs.size(3)
inputs= inputs.detach().max(dim=1)[1]
# print(inputs.shape, targets.shape)
input = inputs[:, k_l:h-k+k_l , k_l:w-k+k_l].float()
target = targets[:, k_l:h-k+k_l , k_l:w-k+k_l].float()
# temp = torch.Tensor(input.size())
temp = torch.zeros(input.size()).cuda()
for i in range(k):
for j in range(k):
output = inputs[:, k_l+i:h-k+i+k_l, k_l+j:w-k+j+k_l].float()
target_out = targets[:, k_l+i:h-k+i+k_l, k_l+j:w-k+j+k_l].float()
# print(input.size(),output.size())
temp += torch.pow((input-output),2) * torch.exp(-torch.pow((target-target_out),2))
return torch.mean(temp)
if __name__ == '__main__':
input = torch.ones([8,19,5,5])
for i in range(5):
for j in range(5):
input[:,:,i,j] = i * 5 + j
loss = coss_manifode(input)
# print(input)
# output = input[:,:,0:-1-2,:0:-1-2]
# temp = input[:,:,0:5-2,0:5-2] - output[:,:,0:5-2,0+1:5-1]
print(loss)
# print(loss)
# print(temp)
# print(temp.size())
# print(loss.size())
input = torch.ones([2,2,5,5])
for i in range(5):
for j in range(5):
input[:,:,i,j] = i * 5 + j
out = torch.pow(input,2)
# print(out)
``` |
{
"source": "jiaxu825/VyPy",
"score": 2
} |
#### File: tests/data/hashed_dict.py
```python
from VyPy.data import HashedDict
import pickle
from copy import deepcopy
from time import time, sleep
import numpy as np
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# --------------------------------------------------------
# Initialize
# --------------------------------------------------------
cache = HashedDict()
# --------------------------------------------------------
# Load up data
# --------------------------------------------------------
cache['a'] = 1 # normal dictionary keys are strings
cache[[1,2,3]] = 2 # HashedDict accepts lists for example
cache[[1,2,5]] = 5
funny_key = object()
cache[[6,2,5]] = HashedDict() # sub-dictionary
cache[[6,2,5]][funny_key] = 77
# --------------------------------------------------------
# Printing
# --------------------------------------------------------
print '>>> print cache'
print cache
print '>>> print cache[[1,2,3]]'
print cache[[1,2,3]]
print ''
print '>>> print cache[(1,2,3)]'
print cache[(1,2,3)]
print ''
print 'should be True:' , cache.has_key([1,2,3])
assert cache.has_key([1,2,3])
print 'should be True:' , [1,2,3] in cache
assert [1,2,3] in cache
del cache[[1,2,3]]
print 'should be False:' , cache.has_key([1,2,3])
assert not cache.has_key([1,2,3])
print ''
# --------------------------------------------------------
# Pickling test
# --------------------------------------------------------
print '>>> pickle.dumps()'
d = pickle.dumps(cache)
print '>>> pickle.loads()'
p = pickle.loads(d)
print ''
print '>>> print p'
print p
print 'should be True:' , [1,2,5] in p
assert [1,2,5] in p
# beware after pickling some objects...
print 'should be False:' , funny_key in p[[6,2,5]]
assert not funny_key in p[[6,2,5]]
print ''
# --------------------------------------------------------
# Access Speed test
# --------------------------------------------------------
print 'Access speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e5)):
v = cache[[6,2,5]][funny_key]
t1 = time()-t0
# a test dictionary
z = dict()
z['t'] = dict()
z['t']['i'] = 0
# accessing a normal dictionary
t0 = time()
for i in range(int(1e5)):
v = z['t']['i']
t2 = time()-t0
# results
print 'HashedDict: %.6f s' % (t1)
print 'dict: %.6f s' % (t2)
assert (t1-t2)/t2 < 60.0
print ''
# --------------------------------------------------------
# Assignment Speed test
# --------------------------------------------------------
print 'Assignment speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e5)):
v = cache[[6,2,5]][funny_key] = 10
t1 = time()-t0
# accessing a normal dictionary
t0 = time()
for i in range(int(1e5)):
z['t']['i'] = 10
t2 = time()-t0
# results
print 'HashedDict: %.6f s' % (t1)
print 'dict: %.6f s' % (t2)
assert (t1-t2)/t2 < 60.0
print ''
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
```
#### File: tests/_experimental/nonstat_test_02.py
```python
import time, os, sys, copy
import numpy as np
import pylab as plt
import VyPy
def main():
fit_1D()
return
def fit_1D():
# ---------------------------------------------------------
# Sampling
# ---------------------------------------------------------
XS,YS,DYS = training_data()
XB = [[min(XS),max(XS)]]
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
# Training
Train = VyPy.sbo.Training(XB,XS,YS,None)
# Scaling
Scaling = VyPy.sbo.Scaling.Training(Train)
Train = Scaling.set_scaling(Train)
# Length Scaling
#Length = length_scaling
Length = lambda(Z): length_scaling(Scaling.X_unset(Z))
# Model
#Kernel = VyPy.sbo.Kernels.Gaussian(Train)
Kernel = VyPy.sbo.Kernels.Gaussian_NS(Train,Length)
#Kernel.Hypers.sig_f = -0.1
#Kernel.Hypers.len_s = -0.4
#Kernel.Hypers.sig_ny = -4.0
#Kernel.Hypers.sig_ndy = -4.0
Model = VyPy.sbo.Modeling(Kernel)
# Learning
Model.learn()
# ---------------------------------------------------------
# Post Processing
# ---------------------------------------------------------
# plot sites
XP = np.array([ np.linspace(XB[0][0],XB[0][1],200) ]).T
# functions, in not scaled space
The_Data = Model.evaluate( Scaling.X_set(XP) )
The_Data = Scaling.unset_scaling(The_Data)
YP = The_Data.YI
DYP = The_Data.DYI
# plot
plt.figure(1)
plt.plot(XP,YP,'b-')
plt.plot(XS,YS,'r+')
# plot
plt.figure(2)
plt.plot(XP,DYP,'b-')
plt.plot(XS,DYS,'r+')
plt.figure(3)
plt.plot(XP,length_scaling(XP),'b-')
plt.show()
plt.show()
return
import scipy.interpolate
interpolate = scipy.interpolate
l_guesses = np.array([0.95, 0.10, 0.20, 0.50, 1.0])
x_guesses = np.array([0.00, 0.08, 0.11, 0.20, 1.0 ]) * 10.
interpolator = interpolate.pchip(x_guesses, l_guesses)
def length_scaling(xs):
xs = VyPy.sbo.tools.atleast_2d(xs)
#ys = np.zeros([xs.shape[0],1])
#for i,x in enumerate(xs):
#ys[i] = interpolator(x)
ys = np.array([ interpolator(xs[:,0]) ]).T
#ys = np.ones_like(xs)
return ys
def training_data():
X = np.array([
[ 0. ],
[ 0.3],
[ 0.7],
[ 0.9],
[ 1.2],
[ 1.5],
[ 2. ],
[ 2.5],
[ 3. ],
[ 4. ],
[ 6. ],
[ 8. ],
[ 10. ],
])
Y = np.array([
[-0.03222723],
[-0.03222746],
[-0.007998 ],
[ 0.003999 ],
[-0.03599099],
[-0.03293293],
[-0.01717217],
[-0.00752753],
[ 0.00094094],
[ 0.00940941],
[ 0.01411411],
[ 0.01693694],
[ 0.01928929],
])
DY = np.array([
[-0.00564939],
[ 0.01507649],
[ 0.12407742],
[-0.11633803],
[ 0.04211901],
[ 0.01023362],
[ 0.0315054 ],
[ 0.01544723],
[ 0.01524186],
[ 0.00428248],
[ 0.00141053],
[ 0.00135261],
[ 0.00094123],
])
return X,Y,DY
if __name__ == '__main__':
main()
```
#### File: tests/_experimental/regression_tests_01.py
```python
import time, os, gc, sys, copy
import cPickle as pickle
import numpy as np
from numpy import pi
import pylab as plt
import VyPy
from VyPy.regression import gpr
from VyPy.tools import atleast_2d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
from warnings import simplefilter
simplefilter('error',Warning)
def main():
T0 = time.time()
#fit_2D()
fit_ND()
#opt_ND()
#fit_VF()
#fit_VFND()
T1 = time.time()
print 'Total Time: %.4f' % (T1-T0)
def fit_2D():
# ---------------------------------------------------------
# Setup
# ---------------------------------------------------------
The_Func = Rosenbrock_Function
XB = np.array([ [-2. , 2. ] ,
[-2. , 2. ] ])
# truth
nx = 20 # grid number of points per dimension
XT = np.linspace(XB[0,0], XB[0,1], nx)
YT = np.linspace(XB[1,0], XB[1,1], nx)
XT,YT = np.meshgrid(XT,YT)
ZT = XT*0.
for ix in range(nx):
for iy in range(nx):
ZT[ix,iy] = The_Func([ XT[ix,iy], YT[ix,iy] ])[0][0,0]
# training
nt = 20 # number of samples
X = ViPy.tools.LHC_uniform(XB,nt)
Y,DY = The_Func(X)
# zip data to evaluate
XI = np.zeros([nx*nx,2])
it = 0
for ix in range(nx):
for iy in range(nx):
XI[it,:] = [XT[ix,iy],YT[ix,iy]]
it = it+1
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
# Training
Train = gpr.Training(XB,X,Y,DY)
#Train = VyPy.sbo.Training(XB,X,Y,DY=None)
# Scaling
Scaling = gpr.Scaling.Training(Train)
Train = Scaling.set_scaling(Train)
# Learning
Kernel = gpr.Kernel.Gaussian(Train)
Model = gpr.Modeling(Kernel)
Model.learn()
# Evaluate
XI_scl = Scaling.set_scaling(XI,'X')
The_Data = Model.evaluate(XI_scl)
The_Data = Scaling.unset_scaling(The_Data)
# ---------------------------------------------------------
# Post Processing
# ---------------------------------------------------------
# unzip
YI = The_Data.YI
XP = XT; YP = YT; ZP = YT*0.
it = 0
for ix in range(nx):
for iy in range(nx):
ZP[ix,iy] = YI[it]
it = it+1
# plot
fig = plt.figure()
ax = fig.gca(projection='3d')
pnts = ax.plot(X[:,0],X[:,1],Y[:,0],'r+',mew=3,ms=15)
surf = ax.plot_surface(XP,YP,ZP, cmap=cm.autumn, rstride=1,cstride=1, linewidth=0, antialiased=False)
truh = ax.plot_surface(XT,YT,ZT, cmap=cm.jet, rstride=1,cstride=1, linewidth=0, antialiased=False)
plt.draw()
plt.show()
#: def fit_2D()
def fit_ND():
# ---------------------------------------------------------
# Setup
# ---------------------------------------------------------
The_Func = Rosenbrock_Function
ND = 3 # dimensions
XB = np.array( [[-2.,2.]]*ND )
# training
nt = 60
X = VyPy.sampling.lhc_uniform(XB,nt)
Y,DY = The_Func(X)
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
# Training
Train = gpr.training.Training(XB,X,Y,DY=None)
# Scaling
Scaling = gpr.scaling.Linear(Train)
Train = Scaling.set_scaling(Train)
# Learning
Kernel = gpr.kernel.Gaussian(Train)
Infer = gpr.inference.Gaussian(Kernel)
Learn = gpr.learning.Likelihood(Infer)
Model = gpr.modeling.Regression(Learn)
Model.learn()
# ---------------------------------------------------------
# Post Processing
# ---------------------------------------------------------
# functions to plot, in scaled space
f1 = Model.predict_YI
f2 = lambda(Z): The_Func(Z)[0]
f2 = Scaling.wrap_function(f2)
# center point, scaled space
x0 = Scaling.X.set_scaling( [1.0] * ND )
# plot bounds, scaled space
xb = Train.XB
# plot
fig = plt.figure(1)
ax = VyPy.plotting.spider_axis(fig,x0,xb)
VyPy.plotting.spider_trace(ax,f1,x0,xb,20,'b-',lw=2,label='Fit')
VyPy.plotting.spider_trace(ax,f2,x0,xb,20,'r-',lw=2,label='Truth')
ax.legend()
ax.set_zlabel('Y (scaled)')
plt.draw();
plt.show(block=True)
#: def fit_ND()
def opt_ND():
# ---------------------------------------------------------
# Setup
# ---------------------------------------------------------
The_Func = Rosenbrock_Function
ND = 4 # dimensions
XB = np.array( [[-2.,2.]]*ND )
# training
nt = 50
X = VyPy.sbo.tools.LHC_uniform(XB,nt)
Y,DY = The_Func(X)
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
print "Learning ... "
# Training
Train = VyPy.sbo.Training(XB,X,Y,DY)
# Scaling
Scaling = VyPy.sbo.Scaling.Training(Train)
Train = Scaling.set_scaling(Train)
# Learning
Kernel = VyPy.sbo.Kernels.Gaussian(Train)
Model = VyPy.sbo.Modeling(Kernel)
Model.learn()
print ""
# ---------------------------------------------------------
# Optimization
# ---------------------------------------------------------
print "Optimization ..."
# functions
Opt_Func = Model.pyopt_function
Opt_Grad = Model.pyopt_gradient
# problem setup
Opt_Prob = pyOpt.Optimization('YI Minimization',Opt_Func)
# variables and bounds
for ix in range(ND):
Opt_Prob.addVar('X%i'%ix,'c',lower=Train.XB[ix,0],upper=Train.XB[ix,1],value=0.)
# objective name
Opt_Prob.addObj('Estimated Objective')
# global optimization
print 'Global Optimization (ALPSO)'
The_Optimizer = pyOpt.ALPSO(pll_type='MP')
The_Optimizer.setOption('fileout',0)
The_Optimizer.setOption('maxOuterIter',2)
The_Optimizer.setOption('stopCriteria',0) # by maxits
The_Optimizer.setOption('SwarmSize',ND*10)
The_Optimizer(Opt_Prob) # runs the optimization
# local optimization
print 'Local Optimization (SLSQP)'
The_Optimizer = pyOpt.SLSQP()
The_Optimizer.setOption('IPRINT',-1)
The_Optimizer.setOption('ACC',1e-10)
[YI_min,X_min,Info] = The_Optimizer(Opt_Prob.solution(0),sens_type=Opt_Grad) # starts from last solution
# report
print "YImin = %.4f" % Scaling.unset_scaling(YI_min,'YI')
print "XImin = %s" % Scaling.unset_scaling(X_min,'X')
# ---------------------------------------------------------
# Post Processing
# ---------------------------------------------------------
# functions to plot, in scaled space
f1 = lambda(Z): Model.evaluate(Z).YI
f2 = lambda(Z): Scaling.Y_set( The_Func( Scaling.X_unset(Z) )[0] )
# center point, scaled space
x0 = X_min
# plot bounds, scaled space
xb = Train.XB
# plot
fig = plt.figure()
ax = fig.gca(projection='3d')
VyPy.sbo.tools.plot_spider(ax,[f1,f2],x0,xb)
# labels
ax.legend(['Fit','Truth'])
ax.set_zlabel('Y (scaled)')
# show
plt.draw(); plt.show()
#: def opt_ND()
def fit_VF():
# ---------------------------------------------------------
# Setup
# ---------------------------------------------------------
# high fidelity first
def Func_FI1(X):
Y = X*X + 0.2*np.sin(2*pi*X)
DY = 2.0*X + 0.2* 2*pi * np.cos(2*pi*X)
return Y,DY
def Func_FI2(X):
Y = X*X
DY = 2.0*X
return Y,DY
# bounds
XB = np.array([ [-2,2] ])
# truth
XT = np.array([ np.linspace(XB[0,0],XB[0,1],100) ]).T
YT1 = Func_FI1(XT)[0]
YT2 = Func_FI2(XT)[0]
# training
nt_1 = 4
nt_2 = 20
X1 = np.array([ np.linspace(XB[0,0],XB[0,1],nt_1) ]).T
X2 = np.array([ np.linspace(XB[0,0],XB[0,1],nt_2) ]).T
Y1,DY1 = Func_FI1(X1)
Y2,DY2 = Func_FI2(X2)
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
# Training
Trains = VyPy.sbo.VF_Training()
Trains['FI0'] = VyPy.sbo.Training(XB,X1,Y1)
Trains['FI1'] = VyPy.sbo.Training(XB,X2,Y2)
# Scaling
Scaling = VyPy.sbo.Scaling.Training(Trains[1])
Trains[0] = Scaling.set_scaling(Trains[0])
Trains[1] = Scaling.set_scaling(Trains[1])
# Learning
Kernel = VyPy.sbo.Kernels.Gaussian_VF(Trains)
Model = VyPy.sbo.Modeling(Kernel)
Model.learn()
# Evaluate
XI_scl = Scaling.set_scaling(XT,'X')
The_Data = Model.evaluate(XI_scl)
The_Data = Scaling.unset_scaling(The_Data)
YI_VF = The_Data.YI
# ---------------------------------------------------------
# Verification
# ---------------------------------------------------------
# High Fidelity Only
Kernel = VyPy.sbo.Kernels.Gaussian(Trains[0])
Model = VyPy.sbo.Modeling(Kernel)
Model.precalc()
# Evaluate
The_Data = Model.evaluate(XI_scl)
The_Data = Scaling.unset_scaling(The_Data)
YI_SF = The_Data.YI
# ---------------------------------------------------------
# Post Processing
# ---------------------------------------------------------
plt.figure(1)
# labels
plt.plot([np.nan],[0], '-' , color='b', lw=2)
plt.plot([np.nan],[0], '-' , color='r', lw=2)
plt.plot([np.nan],[0], '--', color='y', lw=1)
plt.plot([np.nan],[0], '-' , color='k', lw=2)
plt.legend(['Fidelity 1','Fidelity 2','SF Surrogate','VF Surrogate'])
plt.ylabel('Y')
plt.xlabel('X')
# truth
plt.plot(XT,YT1, '-', color='b', lw=2)
plt.plot(XT,YT2, '-', color='r', lw=2)
plt.plot(X1,Y1 , '+', color='b', mew=3, ms=10)
plt.plot(X2,Y2 , '+', color='r', mew=3, ms=10)
# predicted
plt.plot(XT,YI_SF, '--', color='y', lw=1)
plt.plot(XT,YI_VF, '-', color='k', lw=2)
plt.show()
#: def fit_VF_1D()
def fit_VFND():
# ---------------------------------------------------------
# Setup
# ---------------------------------------------------------
# high fidelity first
def Func_FI1(X):
return Rosenbrock_Function(X)[0]
def Func_FI2(X):
return Rosenbrock_Function(X)[0] + Hyperplane_Function(X)[0]*50.0
#return Rosenbrock_Function(X)[0] * Hyperplane_Function(X)[0]/10.0 # works bad
# bounds
ND = 3
XB = np.array( [[-2,2]]*ND )
# training
nt_1 = 20
nt_2 = ND*60
X1 = VyPy.sbo.tools.LHC_uniform(XB,nt_1)
X2 = VyPy.sbo.tools.LHC_uniform(XB,nt_2)
Y1 = Func_FI1(X1)
Y2 = Func_FI2(X2)
# ---------------------------------------------------------
# Machine Learning
# ---------------------------------------------------------
# Training
Trains = VyPy.sbo.VF_Training()
Trains['FI0'] = VyPy.sbo.Training(XB,X1,Y1)
Trains['FI1'] = VyPy.sbo.Training(XB,X2,Y2)
# Scaling
Scaling = VyPy.sbo.Scaling.Training(Trains[1])
Trains[0] = Scaling.set_scaling(Trains[0])
Trains[1] = Scaling.set_scaling(Trains[1])
# Learning
Kernel = VyPy.sbo.Kernels.Gaussian_VF(Trains)
Model = VyPy.sbo.Modeling(Kernel)
#Model.learn()
# Function Handle
Model.precalc()
Func_VF = lambda(Z): Model.evaluate(Z).YI
# ---------------------------------------------------------
# Verification
# ---------------------------------------------------------
# High Fidelity Only
Train0 = copy.deepcopy(Trains[0])
Kernel0 = VyPy.sbo.Kernels.Gaussian(Train0)
Model0 = VyPy.sbo.Modeling(Kernel0)
#Model0.learn()
# Function Handle
Model0.precalc()
Func_SF = lambda(Z): Model0.evaluate(Z).YI
# ---------------------------------------------------------
# Post Processing
# ---------------------------------------------------------
# truth functions in scaled space
Func_FI1_Scl = lambda(Z): Scaling.Y_set( Func_FI1( Scaling.X_unset(Z) ) )
Func_FI2_Scl = lambda(Z): Scaling.Y_set( Func_FI2( Scaling.X_unset(Z) ) )
# errors
print "Errors ..."
NS = 100*ND
Xs_err = VyPy.sbo.tools.LHC_uniform(Trains.XB,NS,None,3)
YT = Func_FI1_Scl(Xs_err)
YI_SF = Func_SF(Xs_err)
YI_VF = Func_VF(Xs_err)
Err_SF = np.sqrt(np.mean( (YT-YI_SF)**2 ))
Err_VF = np.sqrt(np.mean( (YT-YI_VF)**2 ))
print "Err_SF = %.2f %%" % (Err_SF*100.)
print "Err_VF = %.2f %%" % (Err_VF*100.)
# center point, scaled space
x0 = Scaling.X_set( [1.0] * ND )
# plot bounds, scaled space
xb = Trains.XB
# plot
fig = plt.figure(1)
ax = VyPy.sbo.tools.plot_spider_axis(fig,x0,xb)
VyPy.sbo.tools.plot_spider_trace(ax,Func_FI1_Scl,x0,xb,'b-' ,lw=2,label='Fidelity 1')
VyPy.sbo.tools.plot_spider_trace(ax,Func_FI2_Scl,x0,xb,'r-' ,lw=2,label='Fidelity 2')
VyPy.sbo.tools.plot_spider_trace(ax,Func_SF ,x0,xb,'y--',lw=1,label='SF Surrogate')
VyPy.sbo.tools.plot_spider_trace(ax,Func_VF ,x0,xb,'k-' ,lw=2,label='HF Surrogate')
ax.legend()
ax.set_zlabel('Y (scaled)')
plt.draw(); plt.show()
#: def fit_VFND()
# -------------------------------------------------------------
# Test Functions
# -------------------------------------------------------------
def Rosenbrock_Function(X):
X = atleast_2d(X)
D = X.shape[1]
Y = 0.
DY = X*0.
for I in range(D):
if I < D-1:
Y = Y + 100.*( X[:,I+1]-X[:,I]**2. )**2. + ( 1-X[:,I] )**2.
DY[:,I] = DY[:,I] - 400.*( X[:,I+1]-X[:,I]**2. ) * X[:,I] - 2.*( 1.-X[:,I] )
if I>0:
DY[:,I] = DY[:,I] + 200.*( X[:,I]-X[:,I-1]**2. )
Y = atleast_2d(Y,'col')
return Y,DY
def Rastrigin_Function(X):
X = atleast_2d(X)
scl = 1./2.5
sgn = 1.0
X = X * scl
D = X.shape[1]
Y = sgn*( 10.*D + np.sum( X**2. - 10.*np.cos(2.*pi*X) , 1 ) );
DY = sgn*( 2.*X + 20.*pi*np.sin(2.*pi*X) ) * scl;
Y = atleast_2d(Y,'col')
return Y,DY
def Parabolic_Function(X):
X = atleast_2d(X)
D = X.shape[1]
C = np.ones([1,D])
#C = np.array([ np.arange(D)+1. ])
Y = np.dot( X**2. , C.T ) - 10.
DY = 2.*X*C
Y = atleast_2d(Y,'col')
return Y,DY
def Hyperplane_Function(X):
X = atleast_2d(X) + 0.5
N,D = X.shape
C = np.array([ np.arange(D)+1. ])
I = np.ones([N,D])
Y = np.dot( X , C.T )
DY = C * I
Y = atleast_2d(Y,'col')
return Y,DY
if __name__=='__main__':
profile = False
if not profile:
main()
else:
profile_file = 'log_Profile.out'
import cProfile
cProfile.run('import package_tests_01; package_tests_01.main()', profile_file)
import pstats
p = pstats.Stats(profile_file)
p.sort_stats('time').print_stats(20)
## GRAVETART
## ---------------------------------------------------------
## Post Processing
## ---------------------------------------------------------
## Evaluate
#XI_scl = Scaling.set_scaling(XT,'X')
#YI_VF = Func_VF(XI_scl)
#YI_VF = Scaling.unset_scaling(YI_VF,'Y')
#YI_SF = Func_SF(XI_scl)
#YI_SF = Scaling.unset_scaling(YI_SF,'Y')
#plt.figure(1)
## labels
#plt.plot([np.nan],[0], '-' , color='b', lw=2)
#plt.plot([np.nan],[0], '-' , color='r', lw=2)
#plt.plot([np.nan],[0], '--', color='y', lw=1)
#plt.plot([np.nan],[0], '-' , color='k', lw=2)
#plt.legend(['Fidelity 1','Fidelity 2','SF Surrogate','VF Surrogate'])
#plt.ylabel('Y')
#plt.xlabel('X')
## truth
#plt.plot(XT,YT1, '-', color='b', lw=2)
#plt.plot(XT,YT2, '-', color='r', lw=2)
#plt.plot(X1,Y1 , '+', color='b', mew=3, ms=10)
#plt.plot(X2,Y2 , '+', color='r', mew=3, ms=10)
## predicted
#plt.plot(XT,YI_SF, '--', color='y', lw=1)
#plt.plot(XT,YI_VF, '-', color='k', lw=2)
#plt.show()
```
#### File: tests/parallel/multitask.py
```python
import numpy as np
import os, sys, shutil, time
import VyPy
from VyPy import parallel as para
tic = time.time()
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
function = test_func
print function
resource = para.Resource.ComputeCores(max_cores=4)
print resource
gate = resource.gate(default=2)
print gate
function = para.Operation(function,gate)
print function
function = para.MultiTask(function,copies=4)
print function
print ''
print 'call multitask, 1 job'
result = function(10.)
print result
print ''
print 'call multitask, 3 jobs'
result = function([1.,2.,3.])
print result
print ''
# ----------------------------------------------------------------------
# Test Function
# ----------------------------------------------------------------------
def test_func(x):
print 'function start'
print 't = %.4f' % (time.time()-tic)
print 'x =',x
sys.stdout.flush()
y = x*2.
print 'function wait ...'
sys.stdout.flush()
time.sleep(1.0)
print 'function done'
print 't = %.4f' % (time.time()-tic)
print 'y =',y
return y
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
```
#### File: VyPy/data/Dict.py
```python
class Dict(dict):
def update(self,other):
""" Dict.update(other)
updates the dictionary in place, recursing into additional
dictionaries inside of other
Assumptions:
skips keys that start with '_'
"""
if not isinstance(other,dict):
raise TypeError , 'input is not a dictionary type'
for k,v in other.iteritems():
# recurse only if self's value is a Dict()
if k.startswith('_'):
continue
try:
self[k].update(v)
except:
self[k] = v
return
# new keys by wild card integer
def next_key(self,key_wild):
""" Dict.next_key(key_wild):
finds the next index to use on a indexed key and applies it to key_wild
key_wild is a string containing '%i' to indicate where to increment
the key
"""
if '%i' not in key_wild:
return key_wild
ksplit = key_wild.split('%i')
keys = []
for k in keys():
try:
i = int( k.lstrip(ksplit[0]).rstrip(ksplit[1]) )
keys.append(i)
except:
pass
if keys:
key_index = max(keys)+1
else:
key_index = 0
key = key_wild % (key_index)
return key
# allow override of iterators
__iter = dict.__iter__
def keys(self):
"""Dict.keys() -> list of keys in the dictionary"""
return list(self.__iter())
def values(self):
"""Dict.values() -> list of values in the dictionary"""
return [self[key] for key in self.__iter()]
def items(self):
"""Dict.items() -> list of (key, value) pairs in the dictionary"""
return [(key, self[key]) for key in self.__iter()]
def iterkeys(self):
"""Dict.iterkeys() -> an iterator over the keys in the dictionary"""
return self.__iter()
def itervalues(self):
"""Dict.itervalues -> an iterator over the values in the dictionary"""
for k in self.__iter():
yield self[k]
def iteritems(self):
"""od.iteritems -> an iterator over the (key, value) items in the dictionary"""
for k in self.__iter():
yield (k, self[k])
# prettier printing
def __repr__(self):
""" Invertible* string-form of a Dict.
"""
keys = self.keys()
args = ', '.join(['%s=%r' % (key, self[key]) for key in keys if not key.startswith('_')])
return '%s(%s)' % (self.__class__.__name__, args)
def __str__(self,indent=''):
""" String-form of a Dict.
"""
new_indent = ' '
args = ''
# trunk data name
if indent: args += '\n'
# print values
for key,value in self.iteritems():
# skip 'hidden' items
if isinstance(key,str) and key.startswith('_'):
continue
# recurse into other dict types
if isinstance(value,Dict):
if not value:
val = '\n'
else:
try:
val = value.__str__(indent+new_indent)
except RuntimeError: # recursion limit
val = ''
# everything else
else:
val = str(value) + '\n'
# this key-value, indented
args+= indent + str(key) + ' : ' + val
return args
# ----------------------------------------------------------------------
# Module Tests
# ----------------------------------------------------------------------
if __name__ == '__main__':
o = Dict()
o['x'] = 'hello'
o['y'] = 1
o['z'] = [3,4,5]
o['t'] = Dict()
o['t']['h'] = 20
o['t']['i'] = (1,2,3)
print o
import pickle
d = pickle.dumps(o)
p = pickle.loads(d)
print ''
print p
o['t']['h'] = 'changed'
p.update(o)
p['t'].update(o)
print ''
print p
# ----------------------------------------------------------------------
# Gravetart
# ----------------------------------------------------------------------
## implement descriptor protocol for items
#def __getitem__(self,k):
#try:
#return super(Dict,self).__getitem__(k).__get__(self,type(self))
#except AttributeError:
#return super(Dict,self).__getitem__(k)
#except KeyError:
#raise KeyError(k)
#def __setitem__(self,k,v):
#try:
#super(Dict,self).__getitem__(k).__set__(self,v)
#except AttributeError:
#super(Dict,self).__setitem__(k,v)
#except KeyError:
#raise KeyError(k)
#def __delitem__(self,k):
#try:
#super(Dict,self).__getitem__(k).__del__(self)
#except AttributeError:
#super(Dict,self).__delitem__(k)
#except KeyError:
#raise KeyError(k)
#class TestDescriptor(object):
#def __init__(self,x):
#self.x = x
#def __get__(self,obj,kls=None):
#print '__get__'
#print type(obj), type(self)
#print self.x
#return self.x
#def __set__(self,obj,val):
#print '__set__'
#print type(obj), type(self)
#print val
#self.x = val
#class TestObject(Dict):
#def __init__(self,c):
#self.c = c
#o = TestObject(555)
#o['x'] = TestDescriptor([1,2,3])
#o['y'] = 1
#o.desc = TestDescriptor([5,7,8])
#print ''
#print o['x']
#print o['y']
##print o.desc
##print o.c
#print o
#print ''
#o['x'] = [3,4,5]
#import pickle
#d = pickle.dumps(o)
#p = pickle.loads(d)
#print ''
#print p['x']
#print p['y']
#print o.c
#print o.desc
```
#### File: VyPy/data/HashedDict.py
```python
from IndexableDict import IndexableDict
from make_hashable import make_hashable
# ----------------------------------------------------------------------
# Hashable Dict
# ----------------------------------------------------------------------
class HashedDict(IndexableDict):
""" An indexable dictionary that permits typically unhashable keys,
such as lists and other dictionaries
"""
def __getitem__(self,k):
_k = make_hashable(k)
return super(HashedDict,self).__getitem__(_k)
#raise KeyError , ('Key does not exist: %s' % k)
def __setitem__(self,k,v):
_k = make_hashable(k)
super(HashedDict,self).__setitem__(_k,v)
def __delitem__(self,k):
_k = make_hashable(k)
super(HashedDict,self).__delitem__(_k)
def __contains__(self,k):
_k = make_hashable(k)
return super(HashedDict,self).__contains__(_k)
def has_key(self,k):
_k = make_hashable(k)
return super(HashedDict,self).has_key(_k)
# ----------------------------------------------------------------------
# Module Tests
# ----------------------------------------------------------------------
if __name__ == '__main__':
# tests
cache = HashedDict()
cache['a'] = 1
cache[[1,2,3]] = 2
print 'should be True:' , cache.has_key([1,2,3])
print 'should be True:' , [1,2,3] in cache
del cache[[1,2,3]]
print 'should be False:' , cache.has_key([1,2,3])
cache[[1,2,5]] = 5
import pickle
d = pickle.dumps(cache)
p = pickle.loads(d)
print ''
print p[1]
print 'should be True:' , [1,2,5] in cache
```
#### File: VyPy/data/input_output.py
```python
import os, sys, shutil, copy
import cPickle as pickle
from filelock import filelock
# TODO: don't overwrite other core_names
# -------------------------------------------------------------------
# Load a Dictionary of Data
# -------------------------------------------------------------------
def load_data( file_name,
file_format = 'infer' ,
core_name = 'python_data' ):
""" data = load_data( file_name,
file_format = 'infer' ,
core_name = 'python_data' )
loads dictionary of data from python pickle or matlab struct
Inputs:
file_name - data file name
file_format - 'infer', 'pickle', or 'matlab'
core_name - data is stored under a dictionary with this name
default looks for variable 'python_data' in file_name
file_format = pickle, will return any python object
file_format = matlab, will return strings or float lists and
requires scipy.io.loadmat
file_format = infer (default), will infer format from extention
('.mat','.pkl')
"""
try:
import scipy.io
scipy_loaded = True
except ImportError:
scipy_loaded = False
if not os.path.exists(file_name):
raise Exception , 'File does not exist: %s' % file_name
# process file format
if file_format == 'infer':
if os.path.splitext(file_name)[1] == '.mat':
file_format = 'matlab'
elif os.path.splitext(file_name)[1] == '.pkl':
file_format = 'pickle'
assert file_format in ['matlab','pickle'] , 'unsupported file format'
# get filelock
with filelock(file_name):
# LOAD MATLAB
if file_format == 'matlab' and scipy_loaded:
input_data = scipy.io.loadmat( file_name = file_name ,
squeeze_me = False ,
chars_as_strings = True ,
struct_as_record = True )
# pull core variable
assert input_data.has_key(core_name) , 'core data field "%s%" not found' % core_name
input_data = input_data[core_name]
# convert recarray to dictionary
input_data = rec2dict(input_data)
# LOAD PICKLE
elif file_format == 'pickle':
input_data = load_pickle(file_name)
# pull core variable
assert input_data.has_key(core_name) , 'core data field "%s%" not found' % core_name
input_data = input_data[core_name]
#: if file_format
#: with filelock
return input_data
#: def load()
# -------------------------------------------------------------------
# Save a Dictionary of Data
# -------------------------------------------------------------------
def save_data( data_dict, file_name, append=False ,
file_format = 'infer' ,
core_name='python_data' ):
""" save_data( data_dict, file_name, append=False ,
file_format = 'infer' ,
core_name='python_data' ):
Inputs:
file_name - data file name
data_dict - a dictionary or bunch to write
append - True/False to append existing data
file_format - 'infer', 'pickle', or 'matlab'
core_name - data is stored under a dictionary with this name
file_format = pickle, will save any pickleable python object
file_format = matlab, will save strings or float lists and
requires scipy.io.loadmat
file_format = infer (default), will infer format from extention
('.mat','.pkl')
matlab format saves data file from matlab 5 and later
will save nested dictionaries into nested matlab structures
cannot save classes and modules
uses scipy.io.loadmat
"""
try:
import scipy.io
scipy_loaded = True
except ImportError:
scipy_loaded = False
# process file format
if file_format == 'infer':
if os.path.splitext(file_name)[1] == '.mat':
file_format = 'matlab'
elif os.path.splitext(file_name)[1] == '.pkl':
file_format = 'pickle'
assert file_format in ['matlab','pickle'] , 'unsupported file format'
# get filelock
with filelock(file_name):
# if appending needed
# TODO: don't overwrite other core_names
if append == True and os.path.exists(file_name):
# check file exists
if not os.path.exists(file_name):
raise Exception , 'Cannot append, file does not exist: %s' % file_name
# load old data
data_dict_old = load_data( file_name = file_name ,
file_format = file_format ,
core_name = core_name )
# check for keys not in new data
for key,value in data_dict_old.iteritems():
if not data_dict.has_key(key):
data_dict[key] = value
#: for each dict item
#: if append
# save to core name
data_dict = {core_name : data_dict}
# SAVE MATLAB
if file_format == 'matlab':
# bunch it
data_dict = mat_bunch(data_dict)
# save it
scipy.io.savemat( file_name = file_name ,
mdict = data_dict,
format = '5', # matlab 5 .mat format
oned_as = 'column' )
elif file_format == 'pickle':
# save it
save_pickle(file_name,data_dict)
#: if file_format
#: with filelock
return
#: def save()
# -------------------------------------------------------------------
# Load Pickle
# -------------------------------------------------------------------
def load_pickle(file_name):
""" data = load_pickle(file_name)
loads a pickle with core_data dictionaries
assumes first entry is a list of all following data names
returns dictionary of data
"""
pkl_file = open(file_name,'rb')
#names = safe_unpickle.loadf(pkl_file)
names = pickle.load(pkl_file)
data_dict = dict.fromkeys(names,[])
for key in names:
#data_dict[key] = safe_unpickle.loadf(pkl_file)
data_dict[key] = pickle.load(pkl_file)
pkl_file.close()
return data_dict
#: def load_pickle()
# -------------------------------------------------------------------
# Save Pickle
# -------------------------------------------------------------------
def save_pickle(file_name,data_dict):
""" save_pickle(file_name,data_dict)
saves a core data dictionary
first pickle entry is a list of all following data names
"""
pkl_file = open(file_name,'wb')
names = data_dict.keys()
pickle.dump(names,pkl_file)
for key in names:
pickle.dump(data_dict[key],pkl_file)
pkl_file.close()
#: def save_pickle()
# -------------------------------------------------------------------
# Safe UnPickle
# -------------------------------------------------------------------
#class safe_unpickle(pickle.Unpickler):
#''' adds some safety to unpickling
#checks that only supported classes are loaded
#original source from http://nadiana.com/python-pickle-insecure#comment-144
#'''
## modules : classes considered safe
#PICKLE_SAFE = {
#'copy_reg' : ['_reconstructor'] ,
#'__builtin__' : ['object'] ,
#'numpy' : ['dtype','ndarray'] ,
#'numpy.core.multiarray' : ['scalar','_reconstruct'] ,
#'collections' : ['OrderedDict'] ,
#'SU2.io.state' : ['State'] , # SU2 Specific
#'SU2.io.config' : ['Config'] ,
#'SU2.eval.design' : ['Design'] ,
#'SU2.opt.project' : ['Project'] ,
#'SU2.util.ordered_bunch' : ['OrderedBunch'] ,
#'SU2.util.bunch' : ['Bunch'] ,
#'tasks_general' : ['General_Task'] ,
#'tasks_project' : ['Project','Job'] ,
#'tasks_su2' : ['Decomp','Deform','Direct','Cont_Adjoint',
#'Multiple_Cont_Adjoint','Finite_Diff','Adapt'] ,
#}
## make sets
#for key in PICKLE_SAFE.keys():
#PICKLE_SAFE[key] = set(PICKLE_SAFE[key])
## check for save module/class
#def find_class(self, module, name):
#if not module in self.PICKLE_SAFE:
#raise pickle.UnpicklingError(
#'Attempting to unpickle unsafe module %s' % module
#)
#__import__(module)
#mod = sys.modules[module]
#if not name in self.PICKLE_SAFE[module]:
#raise pickle.UnpicklingError(
#'Attempting to unpickle unsafe class %s' % name
#)
#klass = getattr(mod, name)
#return klass
## extend the load() and loads() methods
#@classmethod
#def loadf(self, pickle_file): # loads a file like pickle.load()
#return self(pickle_file).load()
#@classmethod
#def loads(self, pickle_string): #loads a string like pickle.loads()
#return self(StringIO.StringIO(pickle_string)).load()
# -------------------------------------------------------------------
# Convert Record Array to Dictionary
# -------------------------------------------------------------------
def rec2dict(array_in):
""" converts numpy record array to dictionary of lists
needed for loading matlab data
assumes array comes from scipy.io.loadmat, with
squeeze_me = False and struct_as_record = True
"""
import numpy
assert isinstance(array_in,numpy.ndarray) , 'input must be a numpy record array'
# make sure it's not an object array
if array_in.dtype == numpy.dtype('object'):
array_in = array_in.tolist()
# get record keys/names
keys = array_in.dtype.names
# start output dictionary
dataout = dict.fromkeys(keys,[])
for key in keys:
# squeeze_me option puts all items in a two-dim array
value = array_in[key].tolist()[0][0]
# convert string
if isinstance(value[0],unicode):
value = str(value[0])
# convert array
elif isinstance(value,numpy.ndarray):
# check for another struct level
if value.dtype.names == None:
value = value.tolist()
# telescoping
else:
value = rec2dict(value)
# store value
dataout[key] = value
return dataout
#: def rec2dict()
# -------------------------------------------------------------------
# Flatten a List
# -------------------------------------------------------------------
def flatten_list(input_list):
''' flatten an irregular list of lists of any depth
'''
output_list = []
for value in input_list:
if isinstance(value,(list,tuple)):
output_list.extend( flatten_list(value) ) # telescope
else:
output_list.append(value)
return output_list
#: def flatten_list()
# -------------------------------------------------------------------
# Append Lists in a Nested Dictionary
# -------------------------------------------------------------------
def append_nestdict(base_dict,add_dict):
""" append_nestdict(base_dict,add_dict)
appends base_dict with add_dict, allowing for
updating nested dictionaries
will update base_dict in place
"""
# break pointer
add_dict = copy.deepcopy(add_dict)
# append add_dict keys
for key in add_dict.keys():
# ensure base_dict key exists and is a list
if not base_dict.has_key(key):
if isinstance( add_dict[key] , dict ):
base_dict[key] = {}
else:
base_dict[key] = []
elif not ( isinstance( base_dict[key] , list )
or isinstance( base_dict[key] , dict ) ):
assert not isinstance( add_dict[key] , dict ) , 'base[key] is not a dictionary while add[key] is'
base_dict[key] = [base_dict[key]]
# append list or telescope
if isinstance( base_dict[key] , dict ):
append_nestdict(base_dict[key],add_dict[key]) # telescope
else:
base_dict[key].append(add_dict[key])
#: for add_dict[key]
# base_dict will be updated through its pointer
return
#: def append_nestdict()
# -------------------------------------------------------------------
# Matlab Bunch Class
# -------------------------------------------------------------------
class mat_bunch:
""" replicates dictionary functionality with class dot structure
for output of dictionaries to matlab
"""
def __init__(self, d):
for k, v in d.items():
if isinstance(v, dict):
if len(v): v = mat_bunch(v)
else: v = []
self.__dict__[k] = v
def __dict__(self):
return self.__dict__
# items
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
# dictionary get/set/etc
def __getitem__(self,k):
return self.__dict__[k]
def __setitem__(self,k,v):
self.__dict__[k] = v
def __delitem__(self,k):
del self.__dict__[k]
def __str__(self):
print_format = '%s: %s'
state = []
for k,v in self.__dict__.items():
if isinstance(v,mat_bunch):
v = '%i-item mat_bunch' % len(v.items())
state.append(print_format % (k,v) )
return '\n'.join(state)
#: class mat_bunch
```
#### File: data/scaling/ScalingBunch.py
```python
import copy
from VyPy.data import OrderedBunch
from ScalingFunction import ScalingFunction
# ----------------------------------------------------------------------
# Linear Scaling Function
# ----------------------------------------------------------------------
class ScalingBunch(OrderedBunch,ScalingFunction):
def set_scaling(self,Other):
Other = copy.deepcopy(Other)
for key in Other.keys():
if self.has_key(key):
Other[key] = Other[key] / self[key]
return Other
def unset_scaling(self,Other):
Other = copy.deepcopy(Other)
for key in Other.keys():
if self.has_key(key):
Other[key] = Other[key] * self[key]
return Other
# ----------------------------------------------------------------------
# Module Tests
# ----------------------------------------------------------------------
if __name__ == '__main__':
import numpy as np
from Linear import Linear
S = ScalingBunch()
S.X = Linear(10.0,0.0)
S.Y = Linear(2.0,1.0)
data = OrderedBunch()
data.X = 10.0
data.Y = np.array([1,2,3.])
print data
data = data / S
print data
data = data * S
print data
```
#### File: optimize/drivers/Driver.py
```python
from VyPy.data import ibunch, obunch
# ----------------------------------------------------------------------
# Driver
# ----------------------------------------------------------------------
class Driver(object):
def __init__(self):
self.verbose = True
self.other_options = obunch()
def run(self,problem):
raise NotImplementedError
def pack_outputs(self,vars_min):
# unpack
objectives = self.problem.objectives
equalities = self.problem.constraints.equalities
inequalities = self.problem.constraints.inequalities
# start the data structure
outputs = ibunch()
outputs.variables = None
outputs.objectives = ibunch()
outputs.equalities = ibunch()
outputs.inequalities = ibunch()
outputs.success = False
outputs.messages = ibunch()
# varaiables
outputs.variables = vars_min
# objectives
for tag in objectives.tags():
outputs.objectives[tag] = objectives[tag].evaluator.function(vars_min)[tag]
# equalities
for tag in equalities.tags():
outputs.equalities[tag] = equalities[tag].evaluator.function(vars_min)[tag]
# inequalities
for tag in inequalities.tags():
outputs.inequalities[tag] = inequalities[tag].evaluator.function(vars_min)[tag]
return outputs
```
#### File: gpr/inference/Inference.py
```python
import numpy as np
import scipy as sp
from VyPy import tools
from VyPy.exceptions import EvaluationFailure
from VyPy.tools import atleast_2d
class Inference(object):
def __init__(self,Kernel):
self.Kernel = Kernel
self.Train = Kernel.Train
return
def __call__(self,XI):
return self.predict(XI)
def precalc(self):
return
def predict(self,XI):
''' Evaluate GPR model at XI
'''
raise NotImplementedError
# unpack
Kernel = self.Kernel
XI = atleast_2d(XI)
# process
## CODE
# results
YI_solve = 0 # predicted output
CovI_solve = 0 # predicted covariance
# pack up outputs
try:
data = Kernel.pack_outputs(XI,YI_solve,CovI_solve)
except NotImplementedError:
data = [YI_solve,CovI_solve]
return data
#: def predict()
```
#### File: gpr/library/Gaussian.py
```python
from VyPy.regression import gpr
def Gaussian(XB,X,Y,DY=None,learn=True,**hypers):
""" class factory for a Gaussian Model
"""
Train = gpr.training.Training(XB,X,Y,DY)
Scaling = gpr.scaling.Linear(Train)
Train = Scaling.set_scaling(Train)
Kernel = gpr.kernel.Gaussian(Train,**hypers)
Infer = gpr.inference.Gaussian(Kernel)
Learn = gpr.learning.Likelihood(Infer)
Model = gpr.modeling.Regression(Learn,Scaling)
if learn:
Model.learn()
return Model
``` |
{
"source": "JiaxuanYou/graph-pooling",
"score": 2
} |
#### File: JiaxuanYou/graph-pooling/dataloader.py
```python
import torch
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import pickle as pkl
import scipy.sparse as sp
import torch.utils.data
import itertools
from collections import Counter
from random import shuffle
import json
#
from networkx.readwrite import json_graph
from argparse import ArgumentParser
import pdb
import time
import random
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
# def caveman_special(c=2,k=20,p_path=0.1,p_edge=0.3):
# p = p_path
# # path_count = max(int(np.ceil(p * k)),1)
# path_count = max(int(np.ceil(p * k)),1)
# G = nx.caveman_graph(c, k)
# # remove 50% edges
# p = 1-p_edge
# for (u, v) in list(G.edges()):
# if np.random.rand() < p and ((u < k and v < k) or (u >= k and v >= k)):
# G.remove_edge(u, v)
# # add path_count links
# for i in range(path_count):
# u = np.random.randint(0, k)
# v = np.random.randint(k, k * 2)
# G.add_edge(u, v)
# G = max(nx.connected_component_subgraphs(G), key=len)
# return G
def Graph_load_batch(min_num_nodes = 20, max_num_nodes = 1000, name = 'ENZYMES',node_attributes = True,graph_labels=True):
'''
load many graphs, e.g. enzymes
:return: a list of graphs
'''
print('Loading graph dataset: '+str(name))
G = nx.Graph()
# load data
path = 'data/'+name+'/'
data_adj = np.loadtxt(path+name+'_A.txt', delimiter=',').astype(int)
if node_attributes:
data_node_att = np.loadtxt(path+name+'_node_attributes.txt', delimiter=',')
data_node_label = np.loadtxt(path+name+'_node_labels.txt', delimiter=',').astype(int)
data_graph_indicator = np.loadtxt(path+name+'_graph_indicator.txt', delimiter=',').astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(path+name+'_graph_labels.txt', delimiter=',').astype(int)
data_tuple = list(map(tuple, data_adj))
# print(len(data_tuple))
# print(data_tuple[0])
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i+1, feature = data_node_att[i])
G.add_node(i+1, label = data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# print(G.number_of_nodes())
# print(G.number_of_edges())
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0])+1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator==i+1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph['label'] = data_graph_labels[i]
# print('nodes', G_sub.number_of_nodes())
# print('edges', G_sub.number_of_edges())
# print('label', G_sub.graph)
if G_sub.number_of_nodes()>=min_num_nodes and G_sub.number_of_nodes()<=max_num_nodes:
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
# print(G_sub.number_of_nodes(), 'i', i)
# print('Graph dataset name: {}, total graph num: {}'.format(name, len(graphs)))
# logging.warning('Graphs loaded, total num: {}'.format(len(graphs)))
print('Loaded')
return graphs, data_node_att, data_node_label
# graphs = Graph_load_batch(name='PROTEINS_full')
# pdb.set_trace()
# def caveman_special(c=2,k=20,p_path=0.01):
# G = nx.caveman_graph(c, k)
# comps = [comp for comp in nx.connected_components(G)]
#
# for edge in list(G.edges()):
# if np.random.rand()<0.5:
# G.remove_edge(edge[0],edge[1])
#
# labels = {}
# for id,comp in enumerate(comps):
#
# for node in comp:
# labels[node] = id
#
# # pdb.set_trace()
# for u in G.nodes():
# for v in G.nodes():
# if labels[u] != labels[v] and np.random.rand()<p_path:
# G.add_edge(u,v)
#
# G = max(nx.connected_component_subgraphs(G), key=len)
# print(G.number_of_nodes(), G.number_of_edges())
# return G,labels
def caveman_special(l=2,k=20,p=0.1):
G = nx.caveman_graph(l, k)
comps = [comp for comp in nx.connected_components(G)]
nodes = G.nodes()
for (u, v) in G.edges():
if random.random() < p: # rewire the edge
x = random.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
labels = {}
for id,comp in enumerate(comps):
for node in comp:
labels[node] = id
G = max(nx.connected_component_subgraphs(G), key=len)
return G,labels
# caveman_special(c = 20, k = 20)
def load_graphs(dataset_str):
if dataset_str == 'grid':
graphs = []
features = []
for _ in range(1):
graph = nx.grid_2d_graph(20, 20)
# graph = nx.grid_2d_graph(100, 100)
graph = nx.convert_node_labels_to_integers(graph)
# node_order = list(range(graph.number_of_nodes()))
# shuffle(node_order)
# order_mapping = dict(zip(graph.nodes(), node_order))
# graph = nx.relabel_nodes(graph, order_mapping, copy=True)
# feature = np.ones((graph.number_of_nodes(),1))
feature = np.identity(graph.number_of_nodes())
# label = nx.adjacency_matrix(graph).toarray()
graphs.append(graph)
features.append(feature)
labels = None
elif dataset_str == 'caveman_single':
graph = nx.connected_caveman_graph(20, 20)
feature = np.ones((graph.number_of_nodes(), 1))
# feature = np.identity(graph.number_of_nodes())
# graphs = [graph for _ in range(10)]
# features = [feature for _ in range(10)]
graphs = [graph]
features = [feature]
labels = None
#
# graphs = []
# features = []
# labels = None
# for k in range(10):
# graphs.append(caveman_special(c=20, k=20, p_edge=0.2, p_path=500))
# features.append(np.ones((400, 1)))
elif dataset_str == 'caveman':
graphs = []
features = []
labels = []
# labels = None
for i in range(50):
community_size = 20
graph = nx.connected_caveman_graph(20, community_size)
# graph,labels_dict = caveman_special(20,20,0)
# node_dict = {}
# for id, node in enumerate(graph.nodes()):
# node_dict[node] = id
p=0.001
count = 0
for (u, v) in graph.edges():
if random.random() < p: # rewire the edge
x = random.choice(graph.nodes())
if graph.has_edge(u, x):
continue
graph.remove_edge(u, v)
graph.add_edge(u, x)
count += 1
print('rewire:', count)
n = graph.number_of_nodes()
feature = np.ones((n, 1))
label = np.zeros((n,n))
for u in graph.nodes():
for v in graph.nodes():
# if labels_dict[u] == labels_dict[v] and u!=v:
if u//community_size == v//community_size and u!=v:
label[u,v] = 1
# label[node_dict[u],node_dict[v]] = 1
# feature = np.identity(graph.number_of_nodes())
graphs.append(graph)
features.append(feature)
labels.append(label)
elif dataset_str == 'protein':
graphs_all, features_all, labels_all = Graph_load_batch(name='PROTEINS_full')
features_all = (features_all-np.mean(features_all,axis=-1,keepdims=True))/np.std(features_all,axis=-1,keepdims=True)
graphs = []
features = []
labels = []
for graph in graphs_all:
n = graph.number_of_nodes()
label = np.zeros((n, n))
for i,u in enumerate(graph.nodes()):
for j,v in enumerate(graph.nodes()):
if labels_all[u-1] == labels_all[v-1] and u!=v:
label[i,j] = 1
if label.sum() > n*n/2:
continue
graphs.append(graph)
labels.append(label)
idx = [node-1 for node in graph.nodes()]
feature = features_all[idx,:]
# label_dict = labels_all[graph.nodes()]
features.append(feature)
# pdb.set_trace()
print('final num', len(graphs))
elif dataset_str == 'email':
with open('data/email.txt', 'rb') as f:
graph = nx.read_edgelist(f)
label_all = np.loadtxt('data/email_labels.txt')
graph_label_all = label_all.copy()
graph_label_all[:,1] = graph_label_all[:,1]//6
for edge in graph.edges():
if graph_label_all[int(edge[0])][1] != graph_label_all[int(edge[1])][1]:
graph.remove_edge(edge[0], edge[1])
comps = [comp for comp in nx.connected_components(graph) if len(comp)>10]
graphs = [graph.subgraph(comp) for comp in comps]
labels = []
features = []
for g in graphs:
n = g.number_of_nodes()
feature = np.ones((n, 1))
features.append(feature)
label = np.zeros((n, n))
for i, u in enumerate(g.nodes()):
for j, v in enumerate(g.nodes()):
if label_all[int(u)][1] == label_all[int(v)][1]:
label[i, j] = 1
label = label - np.identity(n)
labels.append(label)
elif dataset_str == 'ppi':
dataset_dir = 'data/ppi'
print("Loading data...")
G = json_graph.node_link_graph(json.load(open(dataset_dir + "/ppi-G.json")))
labels = json.load(open(dataset_dir + "/ppi-class_map.json"))
labels = {int(i): l for i, l in labels.items()}
train_ids = [n for n in G.nodes()]
train_labels = np.array([labels[i] for i in train_ids])
if train_labels.ndim == 1:
train_labels = np.expand_dims(train_labels, 1)
print("Using only features..")
feats = np.load(dataset_dir + "/ppi-feats.npy")
## Logistic gets thrown off by big counts, so log transform num comments and score
feats[:, 0] = np.log(feats[:, 0] + 1.0)
feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
feat_id_map = {int(id): val for id, val in feat_id_map.items()}
train_feats = feats[[feat_id_map[id] for id in train_ids]]
# pdb.set_trace()
node_dict = {}
for id,node in enumerate(G.nodes()):
node_dict[node] = id
comps = [comp for comp in nx.connected_components(G) if len(comp)>10]
graphs = [G.subgraph(comp) for comp in comps]
id_all = []
for comp in comps:
id_temp = []
for node in comp:
id = node_dict[node]
id_temp.append(id)
id_all.append(np.array(id_temp))
features = [train_feats[id_temp,:]+0.1 for id_temp in id_all]
# graphs = [G.subgraph(comp) for comp in ]
# pdb.set_trace()
# real
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
objects.append(pkl.load(f, encoding='latin1'))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
graph = nx.from_dict_of_lists(graph)
# keep the max connected component
nodes_id = sorted(max(nx.connected_components(graph), key=len))
graph = max(nx.connected_component_subgraphs(graph), key=len)
# adj = nx.adjacency_matrix(G)
feature = features[nodes_id, :].toarray()
# feature = np.concatenate((np.identity(graph.number_of_nodes()), feature), axis=-1)
graphs = [graph]
features = [feature]
labels = None
return graphs, features, labels
# load cora, citeseer and pubmed dataset
def load_data(dataset_str):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
# synthetic
# todo: design node label
labels, idx_train, idx_val, idx_test = None,None,None,None
if dataset_str == 'grid':
G = nx.grid_2d_graph(20, 20)
# G = nx.grid_2d_graph(100, 100)
# features = np.ones((G.number_of_nodes(),1))
features = np.identity(G.number_of_nodes())
labels = np.zeros((G.number_of_nodes(),2))
labels[0:G.number_of_nodes()//2,0] = 1
labels[G.number_of_nodes()//2:,1] = 1
idx = np.random.permutation(G.number_of_nodes())
idx_train = idx[0:G.number_of_nodes()//2]
idx_val = idx[G.number_of_nodes()//2:]
elif dataset_str == 'caveman':
G = nx.connected_caveman_graph(20,20)
features = np.identity(G.number_of_nodes())
# features = np.ones((G.number_of_nodes(),1))
elif dataset_str == 'barabasi':
G = nx.barabasi_albert_graph(1000, 2)
features = np.identity(G.number_of_nodes())
# features = np.ones((G.number_of_nodes(), 1))
# real
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
objects.append(pkl.load(f, encoding='latin1'))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
G = nx.from_dict_of_lists(graph)
# print(all(G.nodes()[i] <= G.nodes()[i + 1] for i in range(len(G.nodes()) - 1))) # check if sorted
# keep the max connected component
nodes_id = sorted(max(nx.connected_components(G), key=len))
G = max(nx.connected_component_subgraphs(G), key=len)
# adj = nx.adjacency_matrix(G)
features = features[nodes_id,:]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
labels = labels[nodes_id,:]
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y) + 500)
idx_train = range(500)
idx_val = range(500, 1000)
idx_test = range(G.number_of_nodes()-1000,G.number_of_nodes())
return G, features, labels, idx_train, idx_val, idx_test
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
#
# return G, adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def get_random_subset(G, p=0.5, return_id = True):
'''
get a random subset of nodes
:param G: input graph
:param p: prob of including a node
:return: a list of nodes, will not be empty
'''
nodes = G.nodes()
while True:
rand_values = np.random.rand(len(nodes))
if np.any(np.less(rand_values,p)):
break
if return_id:
nodes_return = [id for id,node in enumerate(nodes) if rand_values[id]<p]
else:
nodes_return = [node for id,node in enumerate(nodes) if rand_values[id]<p]
return nodes_return
def get_random_subsets(G, c=1):
'''
get c*log^(n) random subsets of nodes
:param G: input graph
:param c: repeat same Sij for c*log(n) times
:return: list of list of nodes, length fixed
'''
random_subsets = []
for i in range(int(np.log2(G.number_of_nodes()))):
p = 1/np.exp2(i+1)
for j in range(int(np.log2(G.number_of_nodes())*c)):
subset = get_random_subset(G,p)
random_subsets.append(subset)
return random_subsets
def get_shortest_dist(shortest_dist, random_subsets):
'''
get the dist from a node to random subsets
:param shortest_dist:
:param node_id:
:param random_subsets:
:return: 2-d array, dist
TODO: may consider different output format
'''
node_dist = np.zeros((1,len(random_subsets)))
node_id = np.zeros((1,len(random_subsets)))
for i, random_subset in enumerate(random_subsets):
dist_min = 1e6 # todo: other aggregation possible: min, mean, sum, etc.
node_min = 0
for node in random_subset:
dist = shortest_dist[node]
if dist<dist_min:
dist_min = dist
node_min = node
node_dist[0, i] = dist_min
node_id[0, i] = node_min
return node_dist, node_id
def get_shortest_dists(shortest_dists, random_subsets, nodes):
'''
get dist for all nodes
:param shortest_dists:
:param random_subsets:
:param nodes: from G.nodes(), used to make sure node order is correct
:return: subset_dists n*m, subset_ids n*m
'''
subset_dists = np.zeros((len(shortest_dists),len(random_subsets)))
subset_ids = np.zeros((len(shortest_dists),len(random_subsets))).astype(int)
for i,node_id in enumerate(nodes):
shortest_dist = shortest_dists[node_id]
node_dist, node_id_new = get_shortest_dist(shortest_dist,random_subsets)
subset_dists[i] = node_dist
subset_ids[i] = node_id_new
return subset_dists, subset_ids
def get_feature(subset_ids, node_feature):
'''
match node ids for each subset with the corresponding features
:param subset_ids: n*m
:param node_feature: n*d
:return: subset_features n*m*d
'''
# subset_features = np.zeros((subset_ids.shape[0],subset_ids.shape[1],
# node_feature.shape[1]))
# for i in range(subset_features.shape[0]):
# subset_features[i,:,:] = node_feature[subset_ids[i,:]]
subset_features = node_feature[subset_ids.flatten(),:]
subset_features = subset_features.reshape((subset_ids.shape[0],subset_ids.shape[1],
node_feature.shape[1]))
return subset_features
class graph_dataset_node_classification(torch.utils.data.Dataset):
def __init__(self, name = 'cora', permute = False):
self.G, self.node_feature, self.label, self.idx_train, self.idx_val, self.idx_test = \
load_data(name)
self.n = self.G.number_of_nodes()
self.subset_types = int(np.log2(self.G.number_of_nodes()))
self.adj = nx.adjacency_matrix(self.G).toarray() + np.identity(self.n)
try:
self.node_feature = self.node_feature.toarray()
except:
pass
self.node_feature = self.node_feature[:, np.newaxis, :]
# G = max(nx.connected_component_subgraphs(G), key=len)
self.G = nx.convert_node_labels_to_integers(self.G)
self.node_label = np.zeros(self.label.shape[0])
for i in range(self.label.shape[0]):
self.node_label[i] = np.where(self.label[i] == 1)[0][0]
self.num_class = self.label.shape[-1]
self.shortest_dists = nx.shortest_path_length(self.G)
self.permute = permute
if not permute:
self.recompute_feature()
def recompute_feature(self):
# compute dist
t1 = time.time()
self.random_subsets = get_random_subsets(self.G, c=0.5)
t2 = time.time()
self.subset_dists, self.subset_ids = get_shortest_dists(self.shortest_dists, self.random_subsets, self.G.nodes())
t3 = time.time()
self.subset_features = get_feature(self.subset_ids, self.node_feature[:,0,:]) # remove extra dim
t4 = time.time()
self.subset_dists = self.subset_dists[:, :, np.newaxis]
t5 = time.time()
print('node num:', self.G.number_of_nodes(), 'subset num:', len(self.random_subsets),
'time:', t5 - t1, t2-t1,t3-t2,t4-t3,t5-t4)
return self.subset_dists, self.subset_features
def __len__(self):
return self.subset_dists.shape[0]
def __getitem__(self, idx):
return self.node_feature[self.idx][idx], self.node_label[self.idx][idx], self.subset_dists[idx], self.subset_features[idx]
def get_fullbatch_train(self):
if self.permute:
self.recompute_feature()
return self.node_feature[self.idx_train], self.adj, self.node_label[self.idx_train], self.subset_dists[self.idx_train], self.subset_features[self.idx_train], self.subset_ids[self.idx_train]
def get_fullbatch_val(self):
if self.permute:
self.recompute_feature()
return self.node_feature[self.idx_val], self.adj, self.node_label[self.idx_val], self.subset_dists[self.idx_val], self.subset_features[self.idx_val], self.subset_ids[self.idx_val]
def get_fullbatch_test(self):
if self.permute:
self.recompute_feature()
return self.node_feature[self.idx_test], self.adj, self.node_label[self.idx_test], self.subset_dists[self.idx_test], self.subset_features[self.idx_test], self.subset_ids[self.idx_test]
def get_fullbatch(self):
if self.permute:
self.recompute_feature()
return self.node_feature, self.adj, self.node_label, self.subset_dists, self.subset_features, self.subset_ids
class graph_dataset_link_prediction(torch.utils.data.Dataset):
def __init__(self, name = 'cora', test_ratio = 0.2, permute = False, approximate=False):
self.G, self.node_feature, _, _, _, _ = load_data(name)
self.n = self.G.number_of_nodes()
self.subset_types = int(np.log2(self.G.number_of_nodes()))
self.approximate = approximate
# default value
self.subset_dists, self.subset_features = np.zeros((0)), np.zeros((0))
try:
self.node_feature = self.node_feature.toarray()
except:
pass
self.node_feature = self.node_feature[:, np.newaxis, :]
self.G = nx.convert_node_labels_to_integers(self.G)
self.split_dataset(test_ratio)
assert self.G.nodes()==self.G_train.nodes()
if approximate:
self.node_dict = {}
for i in range(self.n):
self.node_dict[self.G.nodes()[i]] = i
else:
self.shortest_dists = nx.shortest_path_length(self.G_train)
self.adj = nx.adjacency_matrix(self.G).toarray() + np.identity(self.n)
self.adj_train = nx.adjacency_matrix(self.G_train).toarray() + np.identity(self.n)
self.adj_test = self.adj - self.adj_train
# mask
num_positive_train = np.sum((self.adj_train>0).astype(int))
self.mask_train = self.adj_train + np.random.rand(self.n, self.n)
self.mask_train = (self.adj_train + (self.mask_train < num_positive_train/(self.n*self.n)).astype(int)).astype(bool).astype(int)
num_positive_test = np.sum((self.adj_test>0).astype(int))
self.mask_test = self.adj + np.random.rand(self.n, self.n)
self.mask_test = (self.adj_test + (self.mask_test < num_positive_test / (self.n * self.n)).astype(int)).astype(bool).astype(int)
self.permute = permute
if not self.permute:
self.recompute_feature()
def split_dataset(self, test_ratio=0.2):
self.G_train = self.G.copy()
for edge in self.G_train.edges():
self.G_train.remove_edge(edge[0],edge[1])
if np.random.rand() > test_ratio or not nx.is_connected(self.G_train):
self.G_train.add_edge(edge[0],edge[1])
print('Train:', 'Connected', nx.is_connected(self.G_train),
'Node', self.G_train.number_of_nodes(), 'Edge', self.G_train.number_of_edges())
print('All:', 'Connected', nx.is_connected(self.G),
'Node', self.G.number_of_nodes(), 'Edge', self.G.number_of_edges())
def mask_adj_list(self):
self.adj_list = self.G_train.adjacency_list()
self.adj_count = np.zeros((self.n, self.n))
# self.adj_count = np.zeros((len(self.random_subsets),self.n, self.n))
# aggreagated adj_count
for i,node_list in enumerate(self.adj_list):
adj_list_temp = []
for random_subset in self.random_subsets:
node_list_temp = list(set(node_list) & set(random_subset))
if len(node_list_temp)>0:
# adj_list_temp.append(node_list_temp)
adj_list_temp += node_list_temp
for node in adj_list_temp:
self.adj_count[i, self.node_dict[node]] += 1
# batch version
# for i,node_list in enumerate(self.adj_list):
# for b,random_subset in enumerate(self.random_subsets):
# node_list_temp = list(set(node_list) & set(random_subset))
# if len(node_list_temp)>0:
# for node in node_list_temp:
# self.adj_count[b, i, self.node_dict[node]] += 1
# pdb.set_trace()
def recompute_feature(self):
# compute dist
t1 = time.time()
self.random_subsets = get_random_subsets(self.G_train, c=1)
if self.approximate:
self.mask_adj_list()
else:
self.subset_dists, self.subset_ids = get_shortest_dists(self.shortest_dists, self.random_subsets, self.G_train.nodes())
self.subset_features = get_feature(self.subset_ids, self.node_feature[:,0,:]) # remove extra dim
self.subset_dists = self.subset_dists[:, :, np.newaxis]
t2 = time.time()
print('node num:', self.G_train.number_of_nodes(), 'subset num:', len(self.random_subsets),
'time:', t2 - t1)
# return self.subset_dists, self.subset_features
def __len__(self):
return self.G_train.number_of_nodes()
def __getitem__(self, idx): # todo: edit for link pred
return self.node_feature[self.idx][idx], self.subset_dists[idx], self.subset_features[idx]
def get_fullbatch_train(self):
if self.permute:
self.recompute_feature()
return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_train)
def get_fullbatch_test(self):
if self.permute:
self.recompute_feature()
return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_test, self.adj_test)
def preprocess(A):
# Get size of the adjacency matrix
size = len(A)
# Get the degrees for each node
degrees = []
for node_adjaceny in A:
num = 0
for node in node_adjaceny:
if node == 1.0:
num = num + 1
# Add an extra for the "self loop"
num = num + 1
degrees.append(num)
# Create diagonal matrix D from the degrees of the nodes
D = np.diag(degrees)
# Cholesky decomposition of D
D = np.linalg.cholesky(D)
# Inverse of the Cholesky decomposition of D
D = np.linalg.inv(D)
# Create an identity matrix of size x size
I = np.eye(size)
# Turn adjacency matrix into a numpy matrix
A = np.matrix(A)
# Create A hat
A_hat = A + I
# Return A_hat
return D @ A @ D
# return A_hat, D
class graphs_dataset_loader():
def __init__(self, name = 'grid', remove_link_ratio = 0.1, graph_test_ratio = 0.2,
permute = True, approximate=-1, normalize_adj = False):
# args
self.name = name
self.remove_link_ratio = remove_link_ratio
self.graph_test_ratio = graph_test_ratio
self.permute = permute
self.approximate = approximate
self.normalize_adj = normalize_adj
# 1 load data
# list of networkx graphs; list of n*m arrays; list of n*n arrays/None(when link prediction)
self.graphs, self.graphs_feature, self.graphs_label = load_graphs(self.name)
# 2 (Link predition only) randomly remove edges for graphs, get different labels
if self.remove_link_ratio>0:
self.graphs, self.graphs_label_train, self.graphs_label_test = self.remove_link_graphs()
else:
self.graphs_label_train, self.graphs_label_test = self.graphs_label, self.graphs_label
# 3 get adj
self.graphs_adj = [nx.adjacency_matrix(graph).toarray() for graph in self.graphs]
if self.normalize_adj:
self.graphs_adj = [preprocess(adj) for adj in self.graphs_adj]
# 4 precompute dists for all node pairs for all graphs
self.graphs_dist = self.precompute_dist()
# 5 set up mask for train and test
self.graphs_mask_train, self.graphs_mask_test = self.set_masks()
# 6 set up data index
if len(self.graphs)>1:
self.ids = np.random.permutation(len(self.graphs))
self.ids_test = self.ids[:int(len(self.graphs) * self.graph_test_ratio)]
self.ids_train = self.ids[int(len(self.graphs) * self.graph_test_ratio):]
else: # transductive
self.ids_test = np.array([0])
self.ids_train = np.array([0])
self.counter_train = 0
self.counter_test = 0
self.done_train = False
self.done_test = False
print(name, len(self.graphs))
def set_masks(self):
# for link prediction, two masks are different
# for community detection, two masks are the same
# Note: diag of adj should be 0!!!
if self.remove_link_ratio > 0:
graphs_mask_train = []
graphs_mask_test = []
for i in range(len(self.graphs)):
adj = self.graphs_label_test[i]
adj_train = self.graphs_label_train[i]
adj_test = adj - adj_train
n = adj_train.shape[0]
num_positive_train = np.sum((adj_train > 0).astype(int))
mask_train = adj_train + np.identity(n) + np.random.rand(n, n)
mask_train = (adj_train + (mask_train < num_positive_train / (n * n)).astype(int)).astype(bool).astype(int)
num_positive_test = np.sum((adj_test > 0).astype(int))
mask_test = adj + np.identity(n) + np.random.rand(n, n)
mask_test = (adj_test + (mask_test < num_positive_test / (n * n)).astype(int)).astype(bool).astype(int)
graphs_mask_train.append(mask_train)
graphs_mask_test.append(mask_test)
else:
graphs_mask_train = []
for i in range(len(self.graphs)):
adj = self.graphs_label_train[i]
n = adj.shape[0]
num_positive_train = np.sum((adj > 0).astype(int))
mask_train = adj + np.identity(n) + np.random.rand(n, n)
mask_train = (adj + (mask_train < num_positive_train / (n * n)).astype(int)).astype(bool).astype(int)
graphs_mask_train.append(mask_train)
graphs_mask_test = graphs_mask_train
return graphs_mask_train, graphs_mask_test
def get_batch_train(self):
# reset epoch token
if self.done_train:
self.done_train = False
id = self.ids_train[self.counter_train]
self.counter_train += 1
# check epoch ends
if self.counter_train == len(self.ids_train):
self.counter_train = 0
self.done_train = True
np.random.shuffle(self.ids_train)
# re-sample random subsets
self.random_subsets = get_random_subsets(self.graphs[id], c=1)
self.dist_max, self.dist_argmax = self.get_shortest_dists(self.graphs_dist[id], self.random_subsets)
return (self.graphs_adj[id], self.graphs_feature[id], self.graphs_dist[id], self.graphs_label_train[id], self.graphs_mask_train[id])
def get_batch_test(self):
# reset epoch token
if self.done_test:
self.done_test = False
id = self.ids_test[self.counter_test]
self.counter_test += 1
# check epoch ends
if self.counter_test == len(self.ids_test):
self.counter_test = 0
self.done_test = True
np.random.shuffle(self.ids_test)
# re-sample random subsets
self.random_subsets = get_random_subsets(self.graphs[id], c=1)
self.dist_max, self.dist_argmax = self.get_shortest_dists(self.graphs_dist[id], self.random_subsets)
return (self.graphs_adj[id], self.graphs_feature[id], self.graphs_dist[id], self.graphs_label_test[id],
self.graphs_mask_test[id])
def precompute_dist(self):
'''
Here dist is 1/real_dist, higher actually means closer, 0 means disconnected
:return:
'''
graphs_dist = []
for graph in self.graphs:
if self.approximate>0:
# dists_array = np.maximum(nx.adjacency_matrix(graph).toarray()*0.5 + np.identity(graph.number_of_nodes()), 0.1)
# dists_array = nx.adjacency_matrix(graph).toarray()*0.5 + np.identity(graph.number_of_nodes())
dists_array = np.zeros((graph.number_of_nodes(), graph.number_of_nodes()))
# todo: consider disconnected graph
dists_dict = nx.all_pairs_shortest_path_length(graph,cutoff=self.approximate)
for i, node_i in enumerate(graph.nodes()):
shortest_dist = dists_dict[node_i]
for j, node_j in enumerate(graph.nodes()):
dist = shortest_dist.get(node_j, -1)
if dist!=-1:
dists_array[i, j] = 1 / (dist + 1)
else:
dists_array = np.zeros((graph.number_of_nodes(), graph.number_of_nodes()))
# todo: consider disconnected graph
dists_dict = nx.all_pairs_shortest_path_length(graph)
for i, node_i in enumerate(graph.nodes()):
shortest_dist = dists_dict[node_i]
for j, node_j in enumerate(graph.nodes()):
dist = shortest_dist.get(node_j, -1)
if dist != -1:
dists_array[i, j] = 1 / (dist + 1)
graphs_dist.append(dists_array)
return graphs_dist
def get_shortest_dists(self, graph_dist, random_subsets):
dist_max = np.zeros((graph_dist.shape[0], len(random_subsets)))
dist_argmax = np.zeros((graph_dist.shape[0], len(random_subsets)))
for id,random_subset in enumerate(random_subsets):
graph_dist_temp = graph_dist[:, random_subset]
dist_max[:,id] = np.amax(graph_dist_temp, axis=-1)
dist_argmax[:,id] = np.argmax(graph_dist_temp, axis=-1)
return dist_max, dist_argmax
def get_ordered_neighbours(self):
pass
def remove_link_graph(self, graph):
graph_removed = graph.copy()
for edge in graph_removed.edges():
if np.random.rand() < self.remove_link_ratio:
graph_removed.remove_edge(edge[0], edge[1])
if self.name != 'ppi':
if not nx.is_connected(graph_removed):
graph_removed.add_edge(edge[0], edge[1])
print('Before:', 'Connected', nx.is_connected(graph),
'Node', graph.number_of_nodes(), 'Edge', graph.number_of_edges())
print('After:', 'Connected', nx.is_connected(graph_removed),
'Node', graph_removed.number_of_nodes(), 'Edge', graph_removed.number_of_edges())
return graph_removed
def remove_link_graphs(self):
graphs_removed = []
graphs_label_train = []
graphs_label_test = []
for graph in self.graphs:
graph_removed = self.remove_link_graph(graph)
graphs_removed.append(graph_removed)
graphs_label_train.append(nx.adjacency_matrix(graph_removed).toarray())
graphs_label_test.append(nx.adjacency_matrix(graph).toarray())
return graphs_removed, graphs_label_train, graphs_label_test
def read_graphs():
pass
# for explainer project
class graphs_dataset_loader_simple():
def __init__(self, name='grid', remove_link_ratio=0.1, graph_test_ratio=0.2,
permute=True, approximate=-1, normalize_adj=False):
# args
self.name = name
self.remove_link_ratio = remove_link_ratio
self.graph_test_ratio = graph_test_ratio
self.permute = permute
self.approximate = approximate
self.normalize_adj = normalize_adj
# 1 load data
# list of networkx graphs; list of n*m arrays; list of n*n arrays/None(when link prediction)
self.graphs, self.graphs_feature, self.graphs_label = load_graphs(self.name)
# 3 get adj
self.graphs_adj = [nx.adjacency_matrix(graph).toarray() for graph in self.graphs]
if self.normalize_adj:
self.graphs_adj = [preprocess(adj) for adj in self.graphs_adj]
# 6 set up data index
self.counter_train = 0
self.done_train = False
print(name, len(self.graphs))
def get_batch_train(self):
# reset epoch token
if self.done_train:
self.done_train = False
id = self.counter_train
self.counter_train += 1
# check epoch ends
if self.counter_train == len(self.graphs):
self.counter_train = 0
self.done_train = True
return (self.graphs_adj[id], self.graphs_feature[id])
# dataset = graphs_dataset_loader_simple()
# dataset.get_batch_train()
#
# t1 = time.time()
# dataset = graphs_dataset_loader(approximate=-1, name='ppi')
#
# for i in range(10):
# t2 = time.time()
# batch_train = dataset.get_batch_train()
# t3 = time.time()
# print(t3-t2)
# t2 = time.time()
# print(t2-t1)
# batch_test = dataset.get_batch_test()
# pdb.set_trace()
# dataset = graph_dataset_link_prediction(name='grid')
# 0113 archive
# class graph_dataset_link_prediction(torch.utils.data.Dataset):
# def __init__(self, name = 'cora', test_ratio = 0.2, permute = False, approximate=False):
# self.G, self.node_feature, _, _, _, _ = load_data(name)
# self.n = self.G.number_of_nodes()
# self.subset_types = int(np.log2(self.G.number_of_nodes()))
#
# try:
# self.node_feature = self.node_feature.toarray()
# except:
# pass
# self.node_feature = self.node_feature[:, np.newaxis, :]
#
# # G = max(nx.connected_component_subgraphs(G), key=len)
#
# self.G = nx.convert_node_labels_to_integers(self.G)
#
# self.node_dict = {}
# for i in range(self.n):
# self.node_dict[self.G.nodes()[i]] = i
#
#
#
# self.split_dataset(test_ratio)
# assert self.G.nodes()==self.G_train.nodes()
#
# self.shortest_dists = nx.shortest_path_length(self.G_train)
#
# # self.G_raw = self.G.copy()
# # self.G_train_raw = self.G_train.copy()
#
# # self.G = nx.convert_node_labels_to_integers(self.G)
# # self.G_train = nx.convert_node_labels_to_integers(self.G_train)
#
# self.adj = nx.adjacency_matrix(self.G).toarray() + np.identity(self.n)
# self.adj_train = nx.adjacency_matrix(self.G_train).toarray() + np.identity(self.n)
# self.adj_test = self.adj - self.adj_train
#
# # mask
# num_positive_train = np.sum((self.adj_train>0).astype(int))
# self.mask_train = self.adj_train + np.random.rand(self.n, self.n)
# self.mask_train = (self.adj_train + (self.mask_train < num_positive_train/(self.n*self.n)).astype(int)).astype(bool).astype(int)
# num_positive_test = np.sum((self.adj_test>0).astype(int))
# self.mask_test = self.adj + np.random.rand(self.n, self.n)
# self.mask_test = (self.adj_test + (self.mask_test < num_positive_test / (self.n * self.n)).astype(int)).astype(bool).astype(int)
#
# self.permute = permute
# if not self.permute:
# self.recompute_feature()
#
# def split_dataset(self, test_ratio=0.2):
# self.G_train = self.G.copy()
# for edge in self.G_train.edges():
# self.G_train.remove_edge(edge[0],edge[1])
# if np.random.rand() > test_ratio or not nx.is_connected(self.G_train):
# self.G_train.add_edge(edge[0],edge[1])
# print('Train:', 'Connected', nx.is_connected(self.G_train),
# 'Node', self.G_train.number_of_nodes(), 'Edge', self.G_train.number_of_edges())
# print('All:', 'Connected', nx.is_connected(self.G),
# 'Node', self.G.number_of_nodes(), 'Edge', self.G.number_of_edges())
#
# # def recompute_feature(self, G):
# # # compute dist
# # t1 = time.time()
# # # random_subsets = get_random_subsets(G, c=0.5)
# # random_subsets = get_random_subsets(G, c=1)
# # shortest_dists = nx.shortest_path_length(G)
# # subset_dists, subset_ids = get_shortest_dists(shortest_dists, random_subsets, G.nodes())
# # subset_features = get_feature(subset_ids, self.node_feature[:,0,:]) # remove extra dim
# #
# # subset_dists = subset_dists[:, :, np.newaxis]
# #
# # t2 = time.time()
# # print('node num:', self.G.number_of_nodes(), 'subset num:', len(random_subsets),
# # 'time:', t2 - t1)
# # return subset_dists, subset_features
#
# def mask_adj_list(self):
# self.adj_list = self.G_train.adjacency_list()
# self.adj_count = np.zeros((self.n, self.n))
# # self.adj_count = np.zeros((len(self.random_subsets),self.n, self.n))
#
# # aggreagated adj_count
# for i,node_list in enumerate(self.adj_list):
# adj_list_temp = []
# for random_subset in self.random_subsets:
# node_list_temp = list(set(node_list) & set(random_subset))
# if len(node_list_temp)>0:
# # adj_list_temp.append(node_list_temp)
# adj_list_temp += node_list_temp
# for node in adj_list_temp:
# self.adj_count[i, self.node_dict[node]] += 1
#
#
# # for i,node_list in enumerate(self.adj_list):
# # for b,random_subset in enumerate(self.random_subsets):
# # node_list_temp = list(set(node_list) & set(random_subset))
# # if len(node_list_temp)>0:
# # for node in node_list_temp:
# # self.adj_count[b, i, self.node_dict[node]] += 1
#
# # pdb.set_trace()
#
#
#
# def recompute_feature(self):
# # compute dist
# t1 = time.time()
# self.random_subsets = get_random_subsets(self.G_train, c=1)
# t2 = time.time()
# self.subset_dists, self.subset_ids = get_shortest_dists(self.shortest_dists, self.random_subsets, self.G_train.nodes())
# t3 = time.time()
# self.subset_features = get_feature(self.subset_ids, self.node_feature[:,0,:]) # remove extra dim
# t4 = time.time()
# self.subset_dists = self.subset_dists[:, :, np.newaxis]
#
# t5 = time.time()
# print('node num:', self.G_train.number_of_nodes(), 'subset num:', len(self.random_subsets),
# 'time:', t5 - t1, t2-t1,t3-t2,t4-t3,t5-t4)
#
# self.mask_adj_list()
# return self.subset_dists, self.subset_features
#
# def __len__(self):
# return self.G_train.number_of_nodes()
#
# def __getitem__(self, idx): # todo: edit for link pred
# return self.node_feature[self.idx][idx], self.subset_dists[idx], self.subset_features[idx]
#
# def get_fullbatch_train(self):
# if self.permute:
# self.recompute_feature()
# return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_train)
#
# def get_fullbatch_test(self):
# if self.permute:
# self.recompute_feature()
# return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_test, self.adj_test)
``` |
{
"source": "jiaxue1993/pytorch-material-classification",
"score": 2
} |
#### File: pytorch-material-classification/dataloader/gtos_mobile.py
```python
import os
import os.path
import torch
from utils.data_aug import Lighting
from torchvision import datasets, transforms
class Dataloder():
def __init__(self, config):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.4,0.4,0.4),
transforms.ToTensor(),
Lighting(0.1),
normalize,
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
trainset = datasets.ImageFolder(os.path.join(config.dataset_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(config.dataset_path, 'test'), transform_test)
kwargs = {'num_workers': 8, 'pin_memory': True} if config.cuda else {}
trainloader = torch.utils.data.DataLoader(trainset, batch_size=
config.batch_size, shuffle=True, **kwargs)
testloader = torch.utils.data.DataLoader(testset, batch_size=
config.batch_size, shuffle=False, **kwargs)
self.trainloader = trainloader
self.testloader = testloader
self.classes = trainset.classes
def getloader(self):
return self.classes, self.trainloader, self.testloader
if __name__ == "__main__":
data_dir = 'dataset/gtos-mobile'
trainset = datasets.ImageFolder(os.path.join(data_dir, 'train'))
testset = datasets.ImageFolder(os.path.join(data_dir, 'test'))
print(trainset.classes)
print(len(testset))
```
#### File: experiments/gtos.dain.resnet/network.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
class DAIN(nn.Module):
def __init__(self, nclass, model1, model2):
super(DAIN, self).__init__()
self.model1 = model1
self.model2 = model2
self.fc = nn.Linear(512*2, nclass)
def forward(self, img, diff_img):
# pre-trained ResNet feature
img_f = self.model1.conv1(img)
img_f = self.model1.bn1(img_f)
img_f = self.model1.relu(img_f)
img_f = self.model1.maxpool(img_f)
img_f = self.model1.layer1(img_f)
img_f = self.model1.layer2(img_f)
img_f = self.model1.layer3(img_f)
img_f = self.model1.layer4(img_f)
img_f = self.model1.avgpool(img_f)
# differential angular feature
diff_img_f = self.model2.conv1(diff_img)
diff_img_f = self.model2.bn1(diff_img_f)
diff_img_f = self.model2.relu(diff_img_f)
diff_img_f = self.model2.maxpool(diff_img_f)
diff_img_f = self.model2.layer1(diff_img_f)
diff_img_f = self.model2.layer2(diff_img_f)
diff_img_f = self.model2.layer3(diff_img_f)
diff_img_f = self.model2.layer4(diff_img_f)
diff_img_f = self.model2.avgpool(diff_img_f)
# DAIN head
img_f = torch.flatten(img_f, 1)
diff_img_f = torch.flatten(diff_img_f, 1)
diff_img_f = diff_img_f + img_f
out = torch.cat((img_f, diff_img_f), dim=1)
out = self.fc(out)
return out
def test():
net = DAIN(nclass=23).cuda()
print(net)
x = Variable(torch.randn(1,3,224,224)).cuda()
y = net(x)
print(y)
params = net.parameters()
sum = 0
for param in params:
sum += param.nelement()
print('Total params:', sum)
if __name__ == "__main__":
test()
```
#### File: pytorch-material-classification/utils/utils.py
```python
import os
import shutil
import torch
# refer to https://github.com/xternalz/WideResNet-pytorch
def save_checkpoint(state, config, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
if not os.path.exists(config.snapshot_dir):
os.makedirs(config.snapshot_dir)
filename = config.snapshot_dir + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, config.snapshot_dir + 'model_best.pth.tar')
``` |
{
"source": "jiaxunsongucb/library",
"score": 3
} |
#### File: library/code/spider.py
```python
import requests
from lxml import etree
import re
def connect_url(url, parser=etree.HTML, i=3, timeout=3):
req = None
for _ in range(i):
try:
req = requests.get(url=url, timeout=timeout)
if req.status_code == 200:
root = parser(req.content)
return True, root
else:
return False, "bad connection"
except:
pass
if not req:
return False, "bad network"
def getinfo_douban(isbn, data, timeout=10):
"""
豆瓣API
没有主题和索书号信息
"""
print ("从豆瓣图书数据库中获得数据...")
data["isbn"] = isbn
url = "http://api.douban.com/book/subject/isbn/" + isbn
status, msg = connect_url(url, parser=etree.XML, timeout=timeout)
if status:
root = msg
else:
if msg == "bad connection":
print ("豆瓣图书数据库中无该书目!")
return False, data
if msg == "bad network":
print ("网络连接故障!")
return False, data
attributes = root.findall('{http://www.douban.com/xmlns/}attribute')
for attribute in attributes:
if attribute.attrib["name"] == "pages":
data["page_number"] = attribute.text
if attribute.attrib["name"] == "author":
data["author"] = attribute.text
if attribute.attrib["name"] == "price":
data["price"] = attribute.text
if attribute.attrib["name"] == "publisher":
data["publisher"] = attribute.text
if attribute.attrib["name"] == "pubdate":
data["publish_date"] = attribute.text
try:
title = root.find('./{http://www.w3.org/2005/Atom}title').text
data["title"] = "《{}》".format(title)
data["summary"] = root.find('./{http://www.w3.org/2005/Atom}summary').text
if data["summary"]:
data["summary"] = re.sub(r"[\\n\s\n\t\r]+", "", data["summary"])
except:
pass
return True, data
def getinfo_guotu1(isbn, data, timeout=10):
"""
国家图书馆数据库1,首选
"""
print ("从国家图书馆数据库1中获得数据...")
data["isbn"] = isbn
# 尝试获得key
url_main = "http://opac.nlc.cn/F/"
status, msg = connect_url(url_main, timeout=timeout)
if status:
root_key = msg
try:
key = re.findall(r"opac.nlc.cn:80\/F\/(.+)\-", root_key.xpath('//head/meta[@http-equiv="REFRESH"]/@content')[0])[0]
except:
print ("网络连接故障!")
return False, data
else:
print ("网络连接故障!")
return False, data
# 搜索url
url = "http://opac.nlc.cn/F/" + key + "?func=find-b&find_code=ISB&request=" + isbn
# 尝试连接搜索url
status, msg = connect_url(url, timeout=timeout)
if status:
root_main = msg
else:
print ("网络连接故障!")
return False, data
# 如果返回的是一个图书列表,获得列表中第一本书的url
if str(root_main.xpath('/comment()')[0]) == "<!-- filename: short-2-head -->":
print ("查找到多本图书,正在选取列表中的第一本!")
try:
url = root_main.xpath('//*[@id="brief"]/table/tr[1]/td[1]/div[2]/table/tr/td[3]/div/a[1]/@href')[0]
except:
print ("国家图书馆数据库1中无该书目!")
return False, data
status, msg = connect_url(url, timeout=timeout)
if status:
root = msg
else:
print ("网络连接故障!")
return False, data
else:
root = root_main
# 解析图书信息
try:
for e in root.xpath('/html/head/comment()'):
if str(e).find("publish section")>0:
comment = str(e)
break
info_comment = re.findall(r'(ISBN[\w\W\d\W\s\S]+)DOC', comment)[0].split("\n")
data["publisher"] = info_comment[3].split(":")[1].strip()
data["price"] = info_comment[0].split()[-1]
data["call_no"] = info_comment[4].split(":")[1].strip()
info_table = root.xpath('//*[@id="td"]/tr')
for tr in info_table:
name = tr.xpath('.//td[1]/text()')[0].strip()
if name == "题名与责任":
name_temp = tr.xpath('.//td[2]/a/text()')[0].replace("\xa0", "")
try:
data["author"] = name_temp.split("/")[1]
except:
data["author"] = None
data["title"] = "《{}》".format(re.findall("(.+?)\[", name_temp)[0])
elif name == "著者":
author_alt = tr.xpath('.//td[2]/a/text()')[0].split("\xa0")[0]
elif name == "主题":
data["subject"] = tr.xpath('.//td[2]/a/text()')[0].replace("\xa0", "")
elif name == "内容提要":
data["summary"] = tr.xpath('.//td[2]/text()')[0].strip()
if data["summary"]:
data["summary"] = re.sub(r"[\\n\s\n\t\r]+", "", data["summary"])
elif name == "载体形态项":
data["page_number"] = tr.xpath('.//td[2]/text()')[0].strip().replace("\xa0", "")
elif name == "出版项":
data["publish_date"] = re.findall(',(.+)', tr.xpath('.//td/a/text()')[0].replace("\xa0", ""))[0].strip(")")
if not data["author"]:
data["author"] = author_alt
except:
print ("国家图书馆数据库1中无该书目!")
return False, data
return True, data
def getinfo_guotu2(isbn, data, timeout=10):
"""
国家图书馆数据库2,书目不全,但信息准确
没有价格信息
"""
print ("从国家图书馆数据库2中获得数据...")
data["isbn"] = isbn
# 尝试获得key
url_main = "http://ucs.nlc.cn/F/"
status, msg = connect_url(url_main, timeout=timeout)
if status:
root_key = msg
try:
key = re.findall(r"ucs.nlc.cn:80\/F\/(.+)\-", root_key.xpath('//head/meta[@http-equiv="REFRESH"]/@content')[0])[0]
except:
print ("网络连接故障!")
return False, data
else:
print ("网络连接故障!")
return False, data
# 搜索url
url = "http://ucs.nlc.cn/F/" + key + "?func=find-b&find_code=ISB&request=" + isbn + "&local_base=UCS01"
# 尝试连接搜索url
status, msg = connect_url(url, timeout=timeout)
if status:
root_main = msg
else:
print ("网络连接故障!")
return False, data
# 如果返回的是一个图书列表,获得列表中第一本书的url
if str(root_main.xpath('/comment()')[0]) == "<!-- filename: short-2-head -->":
print ("查找到多本图书,正在选取列表中的第一本!")
try:
url = root_main.xpath('//*[@id="brief"]/table[1]/tr[1]/td[1]/table[1]/tr/td[3]/div/a/@href')[0]
except:
print ("国家图书馆数据库2中无该书目!")
return False, data
status, msg = connect_url(url, timeout=timeout)
if status:
root = msg
else:
print ("网络连接故障!")
return False, data
else:
root = root_main
# 解析图书信息
try:
for e in root.xpath('/html/head/comment()'):
if str(e).find("publish section")>0:
comment = str(e)
break
info_comment = re.findall(r'(ISBN[\w\W\d\W\s\S]+)DOC', comment)[0].split("\n")
data["publisher"] = info_comment[3].split(":")[1].strip()
data["call_no"] = info_comment[4].split(":")[1].strip()
info_table = root.xpath('//*[@id="details2"]/table/tr')
for tr in info_table:
name = tr.xpath('.//td[1]/text()')[0].strip()
if name == "题名与责任":
name_temp = tr.xpath('.//td[2]/a/text()')[0].replace("\xa0", "")
try:
data["author"] = name_temp.split("/")[1]
except:
data["author"] = None
data["title"] = "《{}》".format(re.findall("(.+?)\[", name_temp)[0])
elif name == "著者":
author_alt = tr.xpath('.//td[2]/a/text()')[0].split("\xa0")[0]
elif name == "主题":
data["subject"] = tr.xpath('.//td[2]/a/text()')[0].replace("\xa0", "")
elif name == "内容提要":
data["summary"] = tr.xpath('.//td[2]/text()')[0].strip()
if data["summary"]:
data["summary"] = re.sub(r"[\\n\s\n\t\r]+", "", data["summary"])
elif name == "载体形态项":
data["page_number"] = tr.xpath('.//td[2]/text()')[0].strip().replace("\xa0", "")
elif name == "出版项":
data["publish_date"] = re.findall(',(.+)', tr.xpath('.//td/a/text()')[0].replace("\xa0", ""))[0]
elif name == "电子馆藏:":
break
if not data["author"]:
data["author"] = author_alt
except:
print ("国家图书馆数据库2中无该书目!")
return False, data
return True, data
``` |
{
"source": "jiaxw32/FridaLib",
"score": 2
} |
#### File: Android/sample/android-app-dexdump.py
```python
import frida
import sys
import time
import os
import codecs
def on_message(message, data):
if message['type'] == 'send':
print("[*] {0}".format(message['payload']))
else:
print(message)
def loadJSScript(filePath):
source = ''
script_dir = os.path.dirname(os.path.realpath(__file__))
JSHookFile = os.path.join(script_dir, filePath)
with codecs.open(JSHookFile, 'r', 'utf-8') as f:
source = source + f.read()
return source
#/art/runtime/dex/art_dex_file_loader.cc on android 9
#std::unique_ptr<DexFile> ArtDexFileLoader::OpenCommon(const uint8_t* base,
# size_t size,
# const uint8_t* data_base,
# size_t data_size,
# const std::string& location,
# uint32_t location_checksum,
# const OatDexFile* oat_dex_file,
# bool verify,
# bool verify_checksum,
# std::string* error_msg,
# std::unique_ptr<DexFileContainer> container,
# VerifyResult* verify_result)
def do_hook():
hook_script = '''
XLOG("Init Android Native Frida Hook!");
var funcName = "_ZN3art13DexFileLoader10OpenCommonEPKhjS2_jRKNSt3__112basic_stringIcNS3_11char_traitsIcEENS3_9allocatorIcEEEEjPKNS_10OatDexFileEbbPS9_NS3_10unique_ptrINS_16DexFileContainerENS3_14default_deleteISH_EEEEPNS0_12VerifyResultE";
xia0NativeHook("libart.so", funcName, function (){
if(Memory.readU32(args[1]) == DEX_MAGIC) {
dexrec.push(args[1]);
}
},function (){
XLOG(" I am in onLeave");
});
Java.perform(function(){
var hookClass = Java.use("android.os.ServiceManager");
XLOG("Inited Android Java Frida Hook! Waiting for triggle");
});
'''
source = loadJSScript('../androidFridaLib.js')
return hook_script + source
app_package = "com.lvse.juren"
device = frida.get_remote_device()
pid = device.spawn(app_package)
session = device.attach(pid)
script = session.create_script(do_hook())
script.on("message", on_message)
script.load()
sys.stdin.read()
``` |
{
"source": "jiaxw32/iScripts",
"score": 3
} |
#### File: iScripts/Python/darkimage.py
```python
import json
import os
import shutil
import xlsxwriter
from xlsxwriter import workbook
from xlsxwriter import worksheet
def create_workbook(filename):
if os.path.exists(filename):
os.remove(filename)
workbook = xlsxwriter.Workbook(filename)
return workbook
def create_worksheet(workbook):
worksheet = workbook.add_worksheet('darkimage')
cell_format = workbook.add_format()
cell_format.set_bg_color('#BFBFBF') # 设置单元格背景色
headers = [
('pod', 23),
('filename', 60),
('filepath', 200),
('filesize', 12),
('scale', 10)
]
idx = 0
for header in headers:
title, width = header
worksheet.write(0, idx, title, cell_format)
worksheet.set_column(idx, idx, width) # 设置列宽度
idx += 1
return worksheet
def search_dark_imagefile(wbdir: str, callback = None):
cnt = 0
for root, dirs, files in os.walk(wbdir):
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.json' and filename == 'Contents' and not dirs:
contents_file = os.path.join(root, file)
paths = os.path.normpath(root.replace(wbdir, '')).split(os.sep)
pod = paths[1]
with open(contents_file) as json_file:
data = json.load(json_file)
if 'images' in data:
for image in data['images']:
if ('appearances' not in image) or ('filename' not in image):
continue
appearances = image['appearances']
imagename = image['filename']
scale = image.get('scale', 'unkonw')
for obj in appearances:
if 'value' not in obj:
continue
mode = obj['value']
if mode == 'dark':
imagefile = os.path.join(root, imagename)
cnt += 1
callback(cnt, pod, imagefile, scale)
if __name__ == "__main__":
wbdir = "/Users/admin/iproject"
destdir = "/Users/admin/darkimage"
if not os.path.exists(destdir):
os.makedirs(destdir)
xlsxfile = os.path.join(destdir, 'darkimage.xlsx')
workbook = create_workbook(xlsxfile)
worksheet = create_worksheet(workbook)
def search_darkimage_handler(row: int, pod: str, imagefile: str, scale: str):
imagename = os.path.basename(imagefile)
imagesize = os.path.getsize(imagefile)
subpath = imagefile[len(wbdir):]
# insert row data
worksheet.write(row, 0, pod)
worksheet.write(row, 1, imagename)
worksheet.write(row, 2, subpath)
worksheet.write_number(row, 3, imagesize)
worksheet.write(row, 4, scale)
# copy image to dest dir
dstdir = os.path.join(destdir, pod)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
shutil.copy2(imagefile, dstdir)
search_dark_imagefile(wbdir, callback=search_darkimage_handler)
workbook.close()
``` |
{
"source": "jiaxx/temporal_learning_paper",
"score": 2
} |
#### File: temporal_learning_paper/code/main.py
```python
import numpy as np
import scipy
from scipy.stats import norm
import numpy.random as npr
import random
import utils as ut
import learningutil as lt
def d_prime(CF):
d = []
for i in range(len(CF[1])):
H = CF[i, i]/sum(CF[:,i]) # H = target diagnal/target column
tempCF = scipy.delete(CF, i, 1) # delete the target column
F = sum(tempCF[i,:])/sum(tempCF)
d.append(norm.ppf(H)-norm.ppf(F))
return d
def sample_with_replacement(list):
l = len(list) # the sample needs to be as long as list
r = xrange(l)
_random = random.random
return [list[int(_random()*l)] for i in r] # using
def compute_CM(neuron, meta, obj, s, train, test):
metric_kwargs = {'model_type': 'MCC2'} # multi-class classifier
eval_config = {
'train_q': {'obj': [obj[0], obj[1]]}, # train on all sizes
'test_q': {'obj': [obj[0], obj[1]], 's': [s]}, #'size_range': [1.3],
'npc_train': train, #smaller than total number of samples in each split_by object
'npc_test': test,
'npc_validate': 0,
'num_splits': 100,
'split_by': 'obj',
'metric_screen': 'classifier', # use correlation matrix as classifier
'labelfunc': 'obj',
'metric_kwargs': metric_kwargs,
}
result = ut.compute_metric_base(neuron, meta, eval_config)
# sum of the CMs is equal to npc_test*number of objs
CMs = []
for i in range(eval_config['num_splits']):
temp = np.array(result['result_summary']['cms'])[:,:,i]
CMs.append(lt.normalize_CM(temp))
d = ut.dprime(np.mean(CMs,axis=0))[0]
return CMs, d
def compute_CM_samesize(neuron, meta, obj, s, train, test):
metric_kwargs = {'model_type': 'MCC2'} # multi-class classifier
eval_config = {
'train_q': {'obj': [obj[0], obj[1]], 's': [s]}, # train on particular size
'test_q': {'obj': [obj[0], obj[1]], 's': [s]}, #test on particular size
'npc_train': train, #smaller than total number of samples in each split_by object
'npc_test': test,
'npc_validate': 0,
'num_splits': 100,
'split_by': 'obj',
'metric_screen': 'classifier', # use correlation matrix as classifier
'labelfunc': 'obj',
'metric_kwargs': metric_kwargs,
}
result = ut.compute_metric_base(neuron, meta, eval_config)
# sum of the CMs is equal to npc_test*number of objs
CMs = []
for i in range(eval_config['num_splits']):
temp = np.array(result['result_summary']['cms'])[:,:,i]
CMs.append(lt.normalize_CM(temp))
d = ut.dprime(np.mean(CMs,axis=0))[0]
return CMs, d
def compute_CM_fixed_classifier(neuron, meta, obj, s, train, test):
metric_kwargs = {'model_type': 'MCC2'} # multi-class classifier
eval_config = {
'train_q': {'obj': [obj[0], obj[1]], 'test_phase':['Pre']}, # train on all sizes
'test_q': {'obj': [obj[0], obj[1]], 's': [s], 'test_phase':['Post']}, #'size_range': [1.3],
'npc_train': train, #smaller than total number of samples in each split_by object
'npc_test': test,
'npc_validate': 0,
'num_splits': 100,
'split_by': 'obj',
'metric_screen': 'classifier', # use correlation matrix as classifier
'labelfunc': 'obj',
'metric_kwargs': metric_kwargs,
}
result = ut.compute_metric_base(neuron, meta, eval_config)
# sum of the CMs is equal to npc_test*number of objs
CMs = []
for i in range(eval_config['num_splits']):
temp = np.array(result['result_summary']['cms'])[:,:,i]
CMs.append(lt.normalize_CM(temp))
d = ut.dprime(np.mean(CMs,axis=0))[0]
return CMs, d
``` |
{
"source": "jiaxy/EHAdviser",
"score": 3
} |
#### File: demo/ehadvisor/__init__.py
```python
from typing import List, Optional
import pandas as pd
from . import core, graph, nlp
from .data_structure import Method, ChainEntry, ProjectFeature
__all__ = ['EHAdvisor']
class EHAdvisor:
def __init__(self,
project_folder: str,
abstract_path: Optional[str],
predictor_path: str,
get_features_jar_path: str,
features_csv_path: str,
comments_csv_path: str,
link_txt_path: str,
abstract_doc2vec_path: str,
method_docs_doc2vec_path: str,
abstract_vec_size=128,
method_doc_vec_size=128):
self.__proj_folder = project_folder
self.__abs_path = abstract_path
self.__jar_path = get_features_jar_path
self.__feat_csv = features_csv_path
self.__cmt_csv = comments_csv_path
self.__link_txt = link_txt_path
self.___abs_vec_size = abstract_vec_size
self.__mdoc_vec_size = method_doc_vec_size
print('__init__')
# load autogluon model
self.__predictor = core.load_predictor(predictor_path)
print('autogluon model loaded')
# construct graph and method feat
self.__graph = None
self.__method_feats = None
self.update_methods()
print('method feat ok')
# load doc2vec models
from gensim.models.doc2vec import Doc2Vec
self.__abs_model = Doc2Vec.load(abstract_doc2vec_path)
self.__mdoc_model = Doc2Vec.load(method_docs_doc2vec_path)
print('doc2vec model loaded')
# construct project feature
self.__proj_feat = ProjectFeature()
self.update_abstract()
self.update_dependencies()
print('project feat ok')
def update_methods(self):
"""update call graph and method feat dict"""
core.run_jar(self.__proj_folder, self.__jar_path) # run jar
# construct call graph
with open(self.__link_txt, encoding='utf-8') as link_txt:
self.__graph = core.call_graph_from_link(
link_txt.read().splitlines())
# construct method feature dict
self.__method_feats = core.method_features_from_df(
pd.read_csv(self.__feat_csv))
core.complete_method_features(
method_features=self.__method_feats,
graph=self.__graph,
comment_dict=core.read_comments(self.__cmt_csv)
)
def update_abstract(self, new_abs_path: Optional[str] = None):
if new_abs_path is not None:
self.__abs_path = new_abs_path
self.__proj_feat.abstract_vec = core.make_abstract_vec(
project_folder=self.__proj_folder,
readme_path=self.__abs_path,
model=self.__abs_model
)
def update_dependencies(self):
self.__proj_feat.dependencies_vec = core.make_dependencies_vec(
self.__proj_folder)
def query(self, exception_source: Method) -> List[List[ChainEntry]]:
# TODO check the method is ex-src or not
if exception_source not in self.__method_feats:
return []
chains: List[List[Method]] = graph.chains_from_source(
self.__graph, exception_source)
results: List[List[ChainEntry]] = []
for chain in chains:
prediction = core.predict_chain(
chain=chain,
source=exception_source,
method_features=self.__method_feats,
project_feature=self.__proj_feat,
d2v_model=self.__mdoc_model,
predictor=self.__predictor
)
results.append(prediction)
return results
def query_all(self) -> List[List[ChainEntry]]:
chains: List[List[Method]] = []
sources: List[Method] = []
for source in self.__method_feats:
for chain in graph.chains_from_source(self.__graph, source):
chains.append(chain)
sources.append(source)
results = core.predict_chains(
chains=chains,
sources=sources,
method_feats=self.__method_feats,
proj_feat=self.__proj_feat,
d2v_model=self.__mdoc_model,
predictor=self.__predictor
)
return results
``` |
{
"source": "jiayanduo456/xeasy-ml",
"score": 3
} |
#### File: src/cross_validation/data_split.py
```python
import traceback
import pandas as pd
from ..systemlog import sysmanagerlog, syserrorlog
from ..ml_utils import runstatus
from ..ml_utils import pre_utils
class DataSplit(object):
"""
This class is used to split a data set into train and test data set.
Parameters
--------
conf: configparser.ConfigParser, default = None
Configuration file for data set division.
data: pandas.DataFrame, default = None
Data set need split.
Attributes
--------
_is_executed: bool, default = False
The flag of the data set dividing.
Examples
--------
>>> from xes_ml_arch.src.cross_validation import data_split
>>> from xes_ml_arch.src.model import model_factory
>>> import configparser
>>> import pandas as pd
>>> import numpy as np
>>> import random
>>> conf = configparser.ConfigParser()
>>> conf.read("myconfig.conf")
>>> columns = ["col%s" % x for x in range(10)]
>>> x = columns[:9]
>>> y = columns[9]
>>> data = pd.DataFrame(
>>> [[int(random.random() * 100) for _x in range(10)] for _y in range(1000)],
>>> columns=columns)
>>> data["col9"] = np.random.choice([0, 1], size=1000)
>>> ins = data_split.DataSplit(conf=conf, data=data)
>>> ins.execute()
>>> train_data = ins.train_data
>>> test_data = ins.test_data
>>> ins.store_train_data()
>>> ins.store_test_data()
"""
DATA_SPLIT = "data_split"
RATIO = "test_ratio"
TRAIN_FILE = "train_file"
TEST_FILE = "test_file"
LOAD_DATA_FROM_LOCAL_FILE = "load_data_from_local_file"
TRUE = "true"
def __init__(self, conf=None, log_path = None, data=None):
self._conf = conf
self.xeasy_log_path = log_path
self._data = data
self._train_data = None
self._test_data = None
self._is_executed = False
self.managerlogger = sysmanagerlog.SysManagerLog(__file__, self.xeasy_log_path)
self.errorlogger = syserrorlog.SysErrorLog(__file__, self.xeasy_log_path)
def reset(self, conf=None, data=None):
"""
Reset config and data.
Parameters
--------
conf: configparser.ConfigParser, default = None
Configuration file for data set division.
data: pandas.DataFrame, default = None
Data set need split.
"""
if conf is not None:
self._conf = conf
if data is not None:
self._data = data
self._is_executed = False
def execute(self):
"""
Start dividing the data set.
Returns
--------
Bool: runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
if self._is_executed:
return runstatus.RunStatus.SUCC
if not pre_utils.PredictUtils.valid_pandas_data(self._data):
return runstatus.RunStatus.FAILED
try:
if self.LOAD_DATA_FROM_LOCAL_FILE in self._conf.options(self.DATA_SPLIT):
if self._conf.get(self.DATA_SPLIT,
self.LOAD_DATA_FROM_LOCAL_FILE).lower() == self.TRUE:
return self._load_data()
self._shuf_data()
# Dividing
self._split_data()
self._is_executed = True
self.store_train_data()
self.store_test_data()
return runstatus.RunStatus.SUCC
except Exception as err:
self.errorlogger.logger.error("cross validation error\n" + traceback.format_exc())
self.managerlogger.logger.error("cross validation error: %s" % err)
return runstatus.RunStatus.FAILED
@property
def train_data(self):
"""
Get train data as property.
Examples
--------
usage: ds = data_split.DataSplit()
train_data =ds.train_data
Returns
--------
self._train_data : pandas.DatFrame
"""
if pre_utils.PredictUtils.valid_pandas_data(self._train_data):
return self._train_data
else:
raise TypeError("split train data error")
@property
def test_data(self):
"""
Get test data as property.
Examples
--------
usage: ds = data_split.DataSplit()
test_data =ds.test_data
Returns
--------
self._test_data : pandas.DateFrame
"""
if pre_utils.PredictUtils.valid_pandas_data(self._test_data):
return self._test_data
else:
raise TypeError("split test data error")
def store_train_data(self):
"""Store the divided train data set to file."""
return self._store_data(self.TRAIN_FILE, self._train_data)
def store_test_data(self):
"""Store the divided test data set to file."""
return self._store_data(self.TEST_FILE, self._test_data)
def _split_data(self):
"""Divide orignal data into test data and train data."""
# get sample ratio
try:
ratio = float(self._conf.get(self.DATA_SPLIT, self.RATIO))
except TypeError:
self.managerlogger.logger.info(
"%s not found %s, use default value 0.1" % (self.DATA_SPLIT, self.RATIO))
ratio = 0.1
if ratio > 1 or ratio < 0:
raise ValueError("sample ratio=%s, but range (0, 1) is allowed" % (ratio))
cure = int(self._data.shape[0] * ratio)
self._test_data = self._data.iloc[:cure]
self._train_data = self._data.iloc[cure:]
def _shuf_data(self):
"""Shuffle data."""
# self._data = shuffle(self._data)
self._data = self._data.sample(frac=1)
def _store_data(self, result_conf, data):
"""
Store data to file.
Parameters
--------
result_conf: str
File path in configuration file.
data: pandas.DataFrame
Data need to store.
Returns
--------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
if not pre_utils.PredictUtils.valid_pandas_data(data):
self.managerlogger.logger.error("train data is null")
return runstatus.RunStatus.FAILED
try:
resutl_path = self._conf.get(self.DATA_SPLIT, result_conf)
except Exception as err:
self.managerlogger.logger.error("result path %s is not exist, please check out" % err)
return runstatus.RunStatus.FAILED
data.to_csv(resutl_path, index=False, encoding='utf8')
return runstatus.RunStatus.SUCC
def _load_data(self):
"""
Load data from local file system.
Returns
--------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
try:
# get file path
test_data_file = self._conf.get(self.DATA_SPLIT, self.TEST_FILE)
train_data_file = self._conf.get(self.DATA_SPLIT, self.TRAIN_FILE)
# read_file
self._train_data = pd.read_csv(train_data_file)
self._test_data = pd.read_csv(test_data_file)
self._is_executed = True
self.managerlogger.logger.info("load data success")
return runstatus.RunStatus.SUCC
except Exception as err:
self.managerlogger.logger.error("load error: %s" % err)
self.errorlogger.logger.error("data split load data error! \n" + traceback.format_exc())
return runstatus.RunStatus.FAILED
```
#### File: src/feature_enginnering/data_sampler.py
```python
import pandas as pd
import traceback
from ..systemlog import sysmanagerlog, syserrorlog
from ..systemlog import syserrorlog
from ..ml_utils import runstatus
from ..ml_utils import pre_utils
class DataSampler(object):
"""
Data sampling of train data or test data.
Parameters
---------
conf: configparser.ConfigParser
Configuration information
data: pandas.DataFrame
sample_rate: float
target: col of data, label
base_size:int, default = -1
Examples
--------
>>> from xes_ml_arch.src.feature_enginnering import data_sampler
>>> import configparser
>>> import pandas as pd
>>> conf = configparser.ConfigParser()
>>> conf.read("myconfig.conf")
>>> data = pd.read_csv("my_data.csv")
# this
>>> ins = data_sampler.DataSampler(data=data, conf=conf, target='col9')
>>> ins.init()
>>> ins.excute()
# or this
>>> ins = data_sampler.DataSampler(conf=conf, target='col9')
>>> ins.init()
>>> ins.set_data(data=data)
>>> ins.excute()
"""
DATA = "data"
VALUE = "value"
SIZE = "size"
RATE = "rate"
DATA_SAMPLE = "data_sample"
SAMPLE_RATE = "sample_rate"
SAMPLE_FLAG = "sample_flag"
def __init__(self, conf=None, log_path = None, data=None, sample_rate=None, target="", base_size=-1):
self._conf = conf
self.xeasy_log_path = log_path
self._data = data
self._sample_rate = sample_rate
self._target = target
self._base_size = base_size
self._result_sample_data = None
self._type_data = []
self._sample_flag = True
self.managerlogger = sysmanagerlog.SysManagerLog(__file__,self.xeasy_log_path)
self.errorlogger = syserrorlog.SysErrorLog(__file__,self.xeasy_log_path)
def init(self):
"""
Init data sampler object
Returns
---------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
try:
self._sample_flag = eval(self._conf.get(self.DATA_SAMPLE, self.SAMPLE_FLAG))
if not self._sample_flag:
return runstatus.RunStatus.SUCC
self._sample_rate = eval(self._conf.get(self.DATA_SAMPLE, self.SAMPLE_RATE))
return runstatus.RunStatus.SUCC
except Exception as err:
self.managerlogger.logger.error("data sampler init error: %s" % err)
self.errorlogger.logger.error("data sampler init error:\n %s" % traceback.format_exc())
return runstatus.RunStatus.FAILED
def set_target_field(self, target_str):
"""
Set target.
Parameters
----------
target_str: str
name of col, label
Returns
----------
:return: bool, runstatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
if not isinstance(target_str, str):
return runstatus.RunStatus.FAILED
self._target = target_str
return runstatus.RunStatus.SUCC
def excute(self):
'''
Data sampling start.
Returns
----------
:return: bool, runstatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
'''
self.managerlogger.logger.info("start sample")
# init
if not self._sample_flag:
self.managerlogger.logger.info("no need to sample")
self._result_sample_data = self._data
return runstatus.RunStatus.SUCC
if self._init() == runstatus.RunStatus.FAILED:
return runstatus.RunStatus.FAILED
# sample
if self._start_sample() == runstatus.RunStatus.FAILED:
return runstatus.RunStatus.FAILED
self.managerlogger.logger.info("finish sample")
return runstatus.RunStatus.SUCC
def get_data(self):
"""
Get data.
Returns
----------
:return: result of data sample
"""
return self._result_sample_data
def set_data(self, data):
"""
Set data.
Parameters
----------
data: pandas.DataFrame
data need sample
Returns
----------
:return: bool, runstatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
if not pre_utils.PredictUtils.valid_pandas_data(data):
return runstatus.RunStatus.FAILED
self._data = data
return runstatus.RunStatus.SUCC
def _init(self):
"""
Check input and init info of all class
Returns
----------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
if not isinstance(self._data, pd.DataFrame):
self.managerlogger.logger.error("input data is not dataframe")
return runstatus.RunStatus.FAILED
if self._target == "":
self.managerlogger.logger.error("target not find")
return runstatus.RunStatus.FAILED
if self._sample_rate is None:
self.managerlogger.logger.error("sample rate is required")
return runstatus.RunStatus.FAILED
return self._init_type_data()
def _init_type_data(self):
"""
Init info of all class:
{data:samples, value:target value, size:numbers of sample, rate:rate of max(size)}
Returns
----------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
# Get the number of samples for each category, and the label value.
di_type_size = self._get_type_size()
if len(self._sample_rate) != len(di_type_size):
self.managerlogger.logger.error(
"%s \n and %s \n not match" % (str(self._sample_rate), str(di_type_size)))
return runstatus.RunStatus.FAILED
try:
for key in self._sample_rate:
tmp_data = self._data[self._data[self._target] == key]
tmp_size = di_type_size[key]
tmp_rate = self._sample_rate[key]
tmp_res_di = {self.DATA: tmp_data, self.SIZE: tmp_size, self.RATE: tmp_rate,
self.VALUE: key}
self._type_data.append(tmp_res_di)
self.managerlogger.logger.info("_init_type_data succeed")
return runstatus.RunStatus.SUCC
except Exception as e:
self.managerlogger.logger.error("_init_type_data error: %s" % e)
self.errorlogger.logger.error(traceback.format_exc())
return runstatus.RunStatus.FAILED
def _get_type_size(self):
"""
Get szie for every class
Returns
---------
:return: dict
Return the name of each category and the corresponding quantity of each category.
"""
count_series = self._data[self._target].value_counts()
if self._base_size == -1:
self._base_size = max(count_series.tolist())
return dict(zip(count_series.index.tolist(), count_series.tolist()))
def _start_sample(self):
"""
Sample function.
Returns
---------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
try:
self._result_sample_data = pd.DataFrame(columns=self._data.columns)
for di_value in self._type_data:
sample_size = di_value[self.SIZE]
sample_rate = di_value[self.RATE]
sample_data = di_value[self.DATA]
# Calculate the number of samples after sampling according to the relative
# sampling ratio, compare with the actual number of samples, and select
# over-sampling or under-sampling.
target_size = int(self._base_size * sample_rate)
if target_size < sample_size:
# under-sampling
sample_data = sample_data.sample(n=target_size, replace=False, random_state=37)
elif target_size > sample_size:
# over-sampling
add_sample_data = sample_data.sample(n=target_size - sample_size, replace=True,
random_state=37)
sample_data = sample_data.append(add_sample_data)
self._result_sample_data = self._result_sample_data.append(sample_data)
# set type for all col
for col in self._result_sample_data.columns:
try:
self._result_sample_data[col] = self._result_sample_data[col].astype("float64")
except:
self.errorlogger.logger.warning("%s can not astype float, which type is %s" % (
col, str(self._result_sample_data[col].dtype)))
return runstatus.RunStatus.SUCC
except Exception as e:
self.managerlogger.logger.error("start sample error: %s" % e)
self.errorlogger.logger.error("start sample error:\n %s" % traceback.format_exc())
return runstatus.RunStatus.FAILED
```
#### File: src/feature_enginnering/feature_discretizer.py
```python
import pandas as pd
import numpy as np
import traceback
from ..systemlog import sysmanagerlog
from ..systemlog import syserrorlog
class FeatureDiscretizer(object):
"""
Discretize feature.
Parameters
----------
data : pandas.DataFrame, default = None
origin dataset
conf: configparser.ConfigParser, default = None
Configuration file
Examples
--------
>>> from xes_ml_arch.src.feature_enginnering import feature_discretizer
>>> import configparser
>>> import pandas as pd
>>> conf = configparser.ConfigParser()
>>> conf.read("myconfig.conf")
>>> data = pd.read_csv("my_data.csv")
# this
>>> ins = feature_discretizer.FeatureDiscretizer(data=data, conf=conf)
>>> ins.excute()
"""
DIS_FUNC = "dis_func"
ARGS = "args"
EQ_FREQUE = "eq_freque"
EQ_FREQUE_VALUE = "eq_freque_value"
EQ_VALUE = "eq_value"
FEATURE_DISCRETIZE = "feature_discretize"
PARAMS = "params"
def __init__(self, data=None, conf=None, log_path = None):
self._data = data
self._conf = conf
self._feature_conf = {}
self.managerlogger = sysmanagerlog.SysManagerLog(__file__, log_path)
self.errorlogger = syserrorlog.SysErrorLog(__file__, log_path)
def _init(self):
"""
Initialization.
Returns
--------
:return: bool
True: succ
False: failed
"""
try:
if self.FEATURE_DISCRETIZE not in self._conf.sections():
return False
# load config
self._feature_conf = eval(self._conf.get(self.FEATURE_DISCRETIZE, self.PARAMS))
self.managerlogger.logger.info("feature discretizer init succeed")
return True
except Exception as e:
self.managerlogger.logger.error("Feature Discretizer error: %s" % e)
self.errorlogger.logger.error("Feature Discretizer error:\n %s" % traceback.format_exc())
return False
def _discretize(self):
"""
Discretize features based on configuration
Returns
--------
:return: bool
True: succ
False: failed
"""
try:
data_columns = self._data.columns.tolist()
for key in self._feature_conf:
if key not in data_columns:
continue
# Use different discretization functions according to configuration.
func = self._discretize_freque
if self._feature_conf[key][self.DIS_FUNC] == self.EQ_FREQUE:
func = self._discretize_freque
elif self._feature_conf[key][self.DIS_FUNC] == self.EQ_VALUE:
func = self._discretize_value
elif self._feature_conf[key][self.DIS_FUNC] == self.EQ_FREQUE_VALUE:
func = self._discretize_freque_value
self._data[key] = pd.Series(func(self._data[key].tolist(), self._feature_conf[key][self.ARGS]))
self.managerlogger.logger.info("feature discretize succeed")
return True
except Exception as e:
self.managerlogger.logger.error("feature discretize error: %s" % e)
self.errorlogger.logger.error("feature discretize error:\n %s" % traceback.format_exc())
return False
def _discretize_freque(self, value_list, args):
"""
Equal frequency division, evenly divided into multiple segments.
Parameters
--------
value_list:
Columns that need to be discretized in the data set
args: int
Number of discrete classes.
Returns
--------
tmp_res : list
Discretization result
Examples
--------
>>>value_list = np.random.randint(2, 10, 6)
array([7, 3, 4, 2, 9, 8])
>>>args = 2
>>>self._discretize_freque(value_list, args)
[1, 0, 0, 0, 1, 1]
"""
length = len(value_list)
tmp_res = [[index, value_list[index], 0] for index in range(length)]
tmp_res.sort(key=lambda x: x[1])
class_nums = int(float(args))
res = [int((x * class_nums) / length) for x in range(length)]
for index in range(len(tmp_res)):
tmp_res[index][2] = res[index]
tmp_res.sort(key=lambda x: x[0])
tmp_res = [x[2] for x in tmp_res]
return tmp_res
def _discretize_freque_value(self, value_list, args):
"""
Equal frequency division, different from the above equal frequency division, the same
value can be divided into the same category.
Parameters
--------
value_list:
Columns that need to be discretized in the data set
args: int
Number of discrete classes.
Returns
--------
:return: list
"""
length = len(value_list)
tmp_res = [[index, value_list[index], 0] for index in range(length)]
tmp_res.sort(key=lambda x: x[1])
class_nums = int(float(args))
# Divide equally into n segments and take the value of the node in each segment.
step = int(length / class_nums)
bins = [tmp_res[x][1] for x in range(step, length, step)]
return np.digitize(value_list, bins=bins).tolist()
def _discretize_value(self, value_list, args):
"""
Discretize according to the given threshold.
Parameters
--------
value_list: list
args: list
Returns
--------
:return: list
Examples
--------
>>> a = [7, 3, 4, 2, 9, 8]
>>> args = [4, 7, 10]
>>>self._discretize_value(a, args)
[2, 0, 1, 0, 2, 2]
"""
return np.digitize(value_list, bins=args).tolist()
def reset(self, data=None, conf=None):
"""
Reset data and conf.
Parameters
--------
data: pandas,DataFrame
conf: ConfigParser.ConfigParser
"""
if not isinstance(data, pd.DataFrame):
self.managerlogger.logger.error("data is not dataframe")
return False
self._data = data
self._conf = conf
def excute(self):
"""
Excute Member method,self._init and self._discretize.
Returns
--------
:return: bool
True: succ
False: failed
"""
if not self._init():
return False
return self._discretize()
@property
def get_data(self):
"""
useage: feature_discretizer = FeatureDiscretizer()
data = feature_discretizer.get_data
Returns
--------
self._data
"""
return self._data
```
#### File: src/feature_enginnering/pre_feature_utils.py
```python
import traceback
import configparser
from ..ml_utils import pre_utils
from ..systemlog import sysmanagerlog, syserrorlog
from ..ml_utils import runstatus
class PreFeatureUtils(object):
"""
Data Feature preprocessing
Parameters
--------
data : pandas.DataFrame
conf : configparser.ConfigParser
Examples
--------
>>> from xes_ml_arch.src.feature_enginnering import pre_feature_utils
>>> import configparser
>>> import pandas as pd
>>> conf = configparser.ConfigParser()
>>> conf.read("pre_feature.conf")
>>> data = pd.read_csv("my_data.csv")
# this
>>> ins = pre_feature_utils.PreFeatureUtils(data=data, conf=conf)
>>> ins.init()
>>> ins.excute()
# or this
>>> ins = pre_feature_utils.PreFeatureUtils(conf=conf)
>>> ins.init()
>>> ins.set_data(data)
>>> ins.excute()
pre_feature.conf
----------------
[pre_feature_utils]
pre_flag = True # if use Feature preprocessing or not
single_feature_apply = {"col0":"time2stamp", "col1":"stamp2time"}
multi_feature_apply = {'col0-col1':'minus_data', 'col2-col3':'abs_minus_data'}
# which_feature_apply = {col_name: function name in utils}
"""
FEATURE_PROC = "feature_processor.FeatureProcessor."
PRE_FEATURE = 'pre_feature_utils'
PRE_FLAG = 'pre_flag'
SINGLE_APP = 'single_feature_apply'
MULTI_APP = 'multi_feature_apply'
DASH = ','
def __init__(self, data=None, conf=None, log_path = None):
self._conf = conf
self.xeasy_log_path = log_path
self._data = data
self._single_app = {}
self._multi_app = {}
self._feature_function = ''
self._pre_flag = False
self.managerlogger = sysmanagerlog.SysManagerLog(__file__,self.xeasy_log_path)
self.errorlogger = syserrorlog.SysErrorLog(__file__,self.xeasy_log_path)
def init(self):
"""
Init feature pre utils object.
Returns
--------
:return: bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
try:
if not isinstance(self._conf, configparser.ConfigParser):
self.managerlogger.logger.error("init pre feature_utils error: config is illegal")
return runstatus.RunStatus.FAILED
self._pre_flag = eval(self._conf.get(self.PRE_FEATURE, self.PRE_FLAG))
if not self._pre_flag:
return runstatus.RunStatus.SUCC
if self.SINGLE_APP in self._conf.options(self.PRE_FEATURE):
self._single_app = eval(self._conf.get(self.PRE_FEATURE, self.SINGLE_APP))
if self.MULTI_APP in self._conf.options(self.PRE_FEATURE):
self._multi_app = eval(self._conf.get(self.PRE_FEATURE, self.MULTI_APP))
return runstatus.RunStatus.SUCC
except Exception as err:
self.managerlogger.logger.error("feature pre process init error: %s" % err)
self.errorlogger.logger.error("feature pre process init error:\n %s" % traceback.format_exc())
return runstatus.RunStatus.FAILED
def set_data(self, data):
"""
Set data.
Parameters
--------
data: pandas.DataFrame
data to feature pre,
Returns
--------
:return: bool
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: False
"""
if not pre_utils.PredictUtils.valid_pandas_data(data):
self.managerlogger.logger.error("feature pre process set data error")
return runstatus.RunStatus.FAILED
self._data = data
return runstatus.RunStatus.SUCC
def excute(self):
'''
Excute.
Returns
--------
self._data: processed data
'''
if not self._init():
return None
if not self._pre_flag:
self.managerlogger.logger.info("no need pre feature processer")
return self._data
return self.pre_feature_process()
def _init(self):
"""
Here, we convert all column names into lowercase letters, confirm that the data
is a pandas DataFrame and the length is not zero.
Returns
------
:return : bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
if not pre_utils.PredictUtils.valid_pandas_data(self._data):
self.errorlogger.logger.error("pre feature data is not DataFram: %s" % type(self._data))
return runstatus.RunStatus.FAILED
try:
# Convert all column names to lowercase letters.
self._data = self._data.copy()
self._data.rename(columns=lambda x: x.lower(), inplace=True)
return runstatus.RunStatus.SUCC
except:
self.errorlogger.logger.error(traceback.format_exc())
return runstatus.RunStatus.FAILED
def _single_feature_process(self):
"""
One column of data feature processor.
Returns
------
:return : bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
try:
for key in self._single_app:
key_new = key.replace(' ', '')
_data_in = self._data[key_new.lower()]
_fun = self._single_app[key_new]
_res = eval(self.FEATURE_PROC + _fun)(_data_in)
if _res is not None:
self._data[key_new.lower()] = _res
else:
self.managerlogger.logger.error(eval(self.FEATURE_PROC + _fun) + "error: %s " % key)
return runstatus.RunStatus.SUCC
except Exception as err:
self.managerlogger.logger.error("_signle_feature_process error: %s" % err)
self.errorlogger.logger.error("_signle_feature_process error:\n %s" % traceback.format_exc())
return runstatus.RunStatus.FAILED
def _multi_feature_process(self):
"""
Multi-column data feature processor.
Returns
------
:return : bool, runstatus.RunStatus
runstatus.RunStatus.SUCC: True
runstatus.RunStatus.FAILED: Failed
"""
try:
for key in self._multi_app:
key_new = key.replace(' ', '')
_col_list = key_new.lower().split(self.DASH)
_fun = self._multi_app[key_new]
if len(_col_list) == 2:
_data_in1 = self._data[_col_list[0]]
_data_in2 = self._data[_col_list[1]]
_res = eval(self.FEATURE_PROC + _fun)(_data_in1, _data_in2)
self._data[key_new.lower() + '_' + _fun] = _res
return runstatus.RunStatus.SUCC
except Exception as e:
self.managerlogger.logger.error("_multi_feature_process error: %s" % e)
self.errorlogger.logger.error("_multi_feature_process error:\n %s" % traceback.format_exc())
return runstatus.RunStatus.FAILED
def pre_feature_process(self):
"""
pre_feature_process
Returns
--------
self._data: processed data
"""
if self._single_app:
self._single_feature_process()
if self._multi_app:
self._multi_feature_process()
return self._data
```
#### File: src/ml/prediction_ml.py
```python
from . import base_ml
import traceback
from ..ml_utils import runstatus
class PredictionML(base_ml.BaseML):
"""
This basic class encapsulates the functions of the prediction part, and you can call the method
of the class to make predictions on the test set.
Parameters
--------
conf : configparser.ConfigParser, default = None
Configuration file for prediction of the test data set.
Examples
--------
>>> from xes_ml_arch.src.ml import prediction_ml
>>> import configparser
>>> import pandas as pd
>>> conf = configparser.ConfigParser()
>>> conf.read("myconfig.conf")
>>> pml = prediction_ml.PredictionML(conf=conf)
>>> data = pd.read_csv("my_data.csv")
>>> pml.set_data(data)
>>> pml.start()
"""
def __init__(self, conf=None,xeasy_log_path = None):
self._test_data = None
super(PredictionML, self).__init__(config=conf, xeasy_log_path = xeasy_log_path)
def start(self):
"""
Start predict data handle.
"""
self.managerlogger.logger.info("start ml predict...")
if runstatus.RunStatus.SUCC == self._predict_handle():
self.managerlogger.logger.info("finished ml predict!")
else:
self.managerlogger.logger.error("ml predict failed!")
def _init_model(self):
"""
Load the trained model.
Returns
-------
:return: bool
True : Succ
False : failed
"""
if not super(PredictionML, self)._init_model():
return False
# load model
if runstatus.RunStatus.FAILED == self._model.load_model():
self.managerlogger.logger.error("load model error")
return False
self.managerlogger.logger.info("successfly load model to predict: %s" % self._model.MODEL_ID)
return True
def _predict_handle(self):
'''
Model predict handle.
Returns
-------
:return: bool
True : Succ
False : failed
'''
try:
self._feature_processor.test_data = self._data
if runstatus.RunStatus.FAILED == self._feature_processor.execute():
self.managerlogger.logger.error("predict feature processor error")
return False
self.managerlogger.logger.info("successfly predict model: %s" % self._model.MODEL_ID)
# get predict result
if runstatus.RunStatus.FAILED == self._get_result():
self.managerlogger.logger.error("predict get result error")
return False
self.managerlogger.logger.info("successfly get result of predict : %s" % self._model.MODEL_ID)
# store result to file
if runstatus.RunStatus.FAILED == self._store_predict_result():
self.managerlogger.logger.error("store predict result error")
return False
self.managerlogger.logger.info("successfly store result of predict : %s" % self._model.MODEL_ID)
return True
except:
self.managerlogger.logger.debug(traceback.format_exc())
self.managerlogger.logger.error("predict handle error")
return False
```
#### File: src/ml_utils/configmanager.py
```python
import configparser
from ..systemlog import sysmanagerlog
from ..systemlog import syserrorlog
class ConfigManager():
"""Reading of log system synchronization configuration file"""
def __init__(self, configfile, log_path = None):
"""
Initialization parameter.
Parameters
--------
configfile: System Configuration file path of user.
"""
self.configfile = configfile
self.managerlogger = sysmanagerlog.SysManagerLog(__file__,log_path)
self.errorlogger = syserrorlog.SysErrorLog(__file__,log_path)
self.configP = self.init_config(self.configfile, self.managerlogger)
def init_config(self, configfile, logger):
"""
Loading configuration information.
Parameters
----------
configfile:System Configuration file path
Returns
-------
Configuration information file.
"""
self.managerlogger.logger.info('Start innit the config.....')
conf = configparser.ConfigParser()
conf.read(configfile)
self.managerlogger.logger.info('End init the config.....')
return conf
def get_key(self, group, key):
"""Gets the option value of the named part"""
return self.configP.get(group, key)
def get_keys(self, group):
"""Gets the content of the configuration file section(group);
contens: tuple of list.
Parameters
----------
group: section name of configuration file.
Returns
-------
tuple of list
"""
return self.configP.items(group)
def get_sections(self):
return self.configP.sections()
def get_float(self, section, option):
return self.configP.getfloat(section, option)
def has_option(self, section, option):
return self.configP.has_option(section, option)
def __iter__(self):
for section in self.configP.sections():
yield section
```
#### File: src/ml_utils/jsonmanager.py
```python
import json
def get_message(message):
"""Generating Python objects into JSON objects. Element of json is string.
Parameters
----------
message: input date,dict.
Returns
-------
date of json.
"""
# json_str = json.dumps(message, default=lambda o: o.__dict__, sort_keys=True, indent=4)
# json_str = json_str.replace('\n', '').replace('\t', '')
json_str = json.dumps(message, default=lambda o: o.__dict__, sort_keys=True)
return json_str
def get_message_without_whitespace(message):
"""Generating Python objects into JSON objects with no space. Element of json is string.
Parameters
----------
message: input date,dict.
Returns
-------
date of json.
"""
json_str = json.dumps(message, default=lambda o: o.__dict__, sort_keys=True)
json_str = json_str.replace('\n', '').replace('\t', '').replace(' ', '')
return json_str
```
#### File: src/model/model_factory.py
```python
from . import base_model
from . import lr
from . import my_xgb
from . import rf
from . import linear
from . import sklearn_xgb
from . import sklearn_xgb_reg
from . import desion_tree
from . import my_lightgbm
from . import lgb_category
import configparser
from ..ml_utils import runstatus
class ModelFactory(object):
"""Model factory.Create a model based on the parameters passed in.
Parameters
--------
config: configparser.ConfigParser()
Configuration file of model.
model_name: str
Name of model.
Examples
--------
>>> from xes_ml_arch.src.model import model_factory
>>> import configparser
>>> import pandas as pd
>>> import numpy as np
>>> import random
>>> config = configparser.ConfigParser()
>>> config.read("model_online.conf")
>>> x = pd.read_csv('data.txt', sep=',')
>>> y = np.array([int(random.random() * 100) for _x in range(99)])
>>> ins = model_factory.ModelFactory()
>>> my_model = ins.create_model(config=config, model_name='lr')
>>> my_model.train(x, y)
...
...
"""
@staticmethod
def create_model(config, model_name=None,log_path = None):
"""Create a model.
Parameters
--------
config: configparser.ConfigParser
Config object.
model_name: str
Name of model.
Returns
--------
:return: model object or None
Notes
--------
This function may raise exception include TypeError, RuntimeError
"""
model = None
# get model name
if not isinstance(config, configparser.ConfigParser):
raise TypeError("config object is not instance of ConfigParser")
if model_name is None:
try:
model_name = config.get(base_model.BaseModel.BASE_CONF,
base_model.BaseModel.MODEL_NAME)
except configparser.Error:
raise RuntimeError("config has no section named %s, or has no option named %s" % (
base_model.BaseModel.BASE_CONF, base_model.BaseModel.MODEL_NAME))
# create a model
if model_name == base_model.BaseModel.MODEL_XGB:
model = my_xgb.MyXgb(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_LR:
model = lr.LR(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_RF:
model = rf.RF(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_LINE_REG:
model = linear.Liner(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_SKLEARN_XGB:
model = sklearn_xgb.SklearnXGB(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_SKLEARN_XGB_REG:
model = sklearn_xgb_reg.SklearnXGBReg(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_DST:
model = desion_tree.MyDesionTree(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_LIGHTGBM:
model = my_lightgbm.MyLightGBM(config=config,log_path = log_path)
elif model_name == base_model.BaseModel.MODEL_CATE_LIGHTGBM:
model = lgb_category.Lgbcf(config=config, log_path = log_path)
else:
pass
if model is None:
raise RuntimeError("can not create a model named: %s" % (model_name))
# Initialize model
if runstatus.RunStatus.FAILED == model.init():
raise RuntimeError("model init faild")
return model
```
#### File: src/model/sklearn_xgb.py
```python
from .base_model import BaseModel
import time
import traceback
from ..ml_utils import runstatus
try:
from xgboost import XGBClassifier
except:
pass
# raise ImportError("no model named xgboost")
class SklearnXGB(BaseModel):
"""A xgboost classifier.
Encapsulation form base model
Parameters
-----------
config: the instance of ConfigParser.ConfigParser().
Examples
----------
>>> from xes_ml_arch.src.model import sklearn_xgb
>>> import configparser
>>> import pandas as pd
>>> import numpy as np
>>> import random
>>> config = configparser.ConfigParser()
>>> config.read("sklearn_xgb.conf")
>>> x = pd.read_csv('data.txt', sep=',')
>>> y = np.array([int(random.random() * 100) for _x in range(99)]) #label = num of samples
>>> ins = sklearn_xgb.SklearnXGB(config=config)
>>> ins.init()
>>> ins.train(x, y)
...
...
"""
MODEL_ID = BaseModel.MODEL_SKLEARN_XGB
def __init__(self, config=None, log_path = None):
super(SklearnXGB, self).__init__(config=config, log_path = log_path)
def _init_model(self):
"""Init model"""
try:
self._model = XGBClassifier(**self._model_params, use_label_encoder=False)
except:
self._model = False
def fit(self,x,y):
"""Model train func"""
return self.train(x,y)
def train(self, x, y):
"""Model train function.
Parameters
----------
x: input sample data.
y: label.
Returns
---------
Bool: True(train successed) or False(train faild).
"""
try:
t_start = time.time()
self.managerlogger.logger.info("start xgboost.sklearn..")
self._model.fit(x, y)
self.managerlogger.logger.info("finished xgboost.sklearn!")
t_end = time.time()
self.managerlogger.logger.info("xgboost.sklearn train time: %s" % (t_end - t_start))
return runstatus.RunStatus.SUCC
except Exception as err:
self.managerlogger.logger.error("xgboost.sklearn train error: %s " % err)
self.errorlogger.logger.error("xgboost.sklearn train error:\n %s " % traceback.format_exc())
return runstatus.RunStatus.FAILED
def predict(self, x, thresh=0.5):
"""Forecast input data.
Parameters
----------
x:input sample data.
thresh: label classification threshold.
Returns
-------
Prediction label(list).
"""
try:
return self._model.predict(x)
except Exception as err:
self.managerlogger.logger.error("xgboost.sklearn predict error: %s " % err)
self.errorlogger.logger.error("xgboost.sklearn predict error:\n %s " % traceback.format_exc())
return None
def predict_proba(self, x):
"""Calculate data predict probability value.
Parameters
----------
x: input sample data.
Returns
---------
List of probability value [[ ]]
"""
try:
return self._model.predict_proba(x)
except Exception as err:
self.managerlogger.logger.error("xgboost.sklearn predict_proba error: %s " % err)
self.errorlogger.logger.error("xgboost.sklearn predict_proba error:\n %s " % traceback.format_exc())
return None
def get_feature_importance(self, feature):
"""Get feature weights of user data.
Parameters
----------
feature: Target characteristics(string of list).
Returns
---------
The score of feature importrance.(list, [[feature, score]]).
"""
try:
res = zip(self._model.feature_importances_, feature)
return res
except Exception as err:
self.managerlogger.logger.error("xgboost.sklearn get_feature_importance error: %s " % err)
self.errorlogger.logger.error("xgboost.sklearn get_feature_importance error:\n %s " % traceback.format_exc())
return None
```
#### File: src/schema/accessdictmodel.py
```python
import json
import datetime
from ..ml_utils import configmanager
class AccessDictModel():
"""Get config profile information"""
def __init__(self, log_path = None):
self.acc_Logger_Dict = {}
self.log_option_list = list()
self.log_option_property_dict = dict()
#Configuration file instance
standard_log_conf = configmanager.ConfigManager('./config/log_schemas.conf', log_path)
#get options
for k, v in standard_log_conf.get_keys('default_schema'):
self.log_option_list.append(k)
self.log_option_property_dict[k] = v
self.reset()
def format_body(self):
"""Convert dictionary type data to JSON type.
Returns
-------
josn string.
"""
json_str = json.dumps(self.acc_Logger_Dict, default=lambda o: o.__dict__, sort_keys=True, indent=4)
json_str = json_str.replace('\n', '').replace('\t', '')
return json_str
def get_log_dic(self):
return self.acc_Logger_Dict
def set_log_dic_key(self, key, value):
self.acc_Logger_Dict[key] = value
self.set_log_dic_key_time(key)
def set_log_dic_key_time(self, key):
key_time = str(key) + '_time'
time_value = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.acc_Logger_Dict[key_time] = time_value
def set_data_log_dic_key(self, req_or_resp, key, value):
for data_dict in self.acc_Logger_Dict['data']:
if data_dict.get(req_or_resp) is None:
continue
else:
inner_dict = data_dict.get(req_or_resp)
inner_dict[key] = value
break
def reset(self):
"""The configuration information is converted into a dictionary whose value represents the data type of
the configuration information;For example:'string'== V, self.acc_ Logger_ Dict[k] = ''.
Returns
-------
the dict of configuration information data type.
"""
for k in self.log_option_property_dict:
v = self.log_option_property_dict[k]
if 'int' == v:
self.acc_Logger_Dict[k] = 0
elif 'string' == v:
self.acc_Logger_Dict[k] = ''
elif 'dict' == v:
if self.acc_Logger_Dict.get(k) is None:
self.acc_Logger_Dict[k] = dict()
else:
self.acc_Logger_Dict[k].clear()
elif 'list' == v:
if 'data' == k:
if self.acc_Logger_Dict.get(k) is None:
self.acc_Logger_Dict[k] = list()
request_dict = dict()
request_dict["req"] = dict()
self.acc_Logger_Dict[k].append(request_dict)
response_dict = dict()
response_dict["resp"] = dict()
self.acc_Logger_Dict[k].append(response_dict)
else:
for data_dict in self.acc_Logger_Dict[k]:
inner_dict = data_dict.get('req')
if inner_dict is not None:
inner_dict.clear()
inner_dict = data_dict.get('resp')
if inner_dict is not None:
inner_dict.clear()
else:
if self.acc_Logger_Dict.get(k) is None:
self.acc_Logger_Dict[k] = list()
else:
del self.acc_Logger_Dict[k][:]
else:
self.acc_Logger_Dict[k] = None
```
#### File: tests/cross_validation_test/test_cross_vaidation.py
```python
import unittest
import sys
import configparser
import random
import pandas
import pandas as pd
import numpy as np
sys.path.append("../../../..")
from xes_ml_arch.src.cross_validation import cross_validation, data_split
from xes_ml_arch.src.model import base_model, model_factory
class TestCrossVaidation(unittest.TestCase):
def setUp(self):
self.conf = configparser.ConfigParser()
self.conf_file = "cross.conf"
self.conf.read(self.conf_file)
self.columns = ["col%s" % (x) for x in range(10)]
self.x = self.columns[:9]
self.y = self.columns[9]
self._data = pd.DataFrame(
[[int(random.random() * 100) for x in range(10)] for y in range(10000)],
columns=self.columns)
self._data["col9"] = np.random.choice([0, 1], size=10000)
self.create_config()
self.ins = cross_validation.Cross_Validation(conf=self._cv_config, log_path = '../log/log.conf',data=self._data,
x_columns=self.x, y_column=self.y)
self.ins_no = cross_validation.Cross_Validation(conf=self._cv_config, log_path = '../log/log.conf',data=pandas.DataFrame(),
x_columns=self.x, y_column=self.y)
self.split_ins = data_split.DataSplit(conf=self._data_spilt_config, log_path = '../log/log.conf', data=self._data)
def test_data_split(self):
self.assertTrue(self.split_ins.execute())
train_data = self.split_ins.train_data
test_data = self.split_ins.test_data
# assert (train_data.shape[0] == 9000)
# assert (test_data.shape[0] == 1000)
self.assertEqual(train_data.shape[0], 9000)
self.assertEqual(test_data.shape[0], 1000)
self.assertTrue(self.split_ins.store_test_data())
self.assertTrue(self.split_ins.store_train_data())
self.assertTrue(self.split_ins._load_data())
self.assertFalse(self.split_ins._store_data(self._data_spilt_config, self._data))
self.split_ins.reset(None, None)
self.split_ins.reset(self._data_spilt_config, self._data)
def test_data_split_no(self):
self.split_ins_no = data_split.DataSplit(conf=self._data_spilt_config_no, log_path = '../log/log.conf',data=self._data)
self.split_ins_no.reset()
self.assertTrue(self.split_ins_no.execute())
try:
train_data = self.split_ins_no.train_data
test_data = self.split_ins_no.test_data
self.assertTrue(self.split_ins.store_test_data())
self.assertTrue(self.split_ins.store_train_data())
except:
pass
def test_cv_model(self):
model = model_factory.ModelFactory.create_model(self._model_config,log_path = '../log/log.conf',
model_name= base_model.BaseModel.MODEL_SKLEARN_XGB)
self.assertTrue(self.ins.execute())
self.assertTrue(self.ins.cv_model(model))
self.ins.reset(self._cv_config, self._data, 'col1', 'col2')
def test_cv_model_1(self):
model = None
self.assertFalse(self.ins_no.execute())
self.assertFalse(self.ins.cv_model(model))
def create_config(self):
self._model_config = configparser.ConfigParser()
self._data_spilt_config = configparser.ConfigParser()
self._cv_config = configparser.ConfigParser()
self._data_spilt_config_no = configparser.ConfigParser()
self._data_spilt_config.read("./data_split.conf")
self._model_config.read("../../../config/demo/model_online.conf")
self._cv_config.read("./cross_validation.conf")
self._data_spilt_config_no.read("./data_split_try.conf")
if __name__ == '__main__':
unittest.main()
```
#### File: tests/feature_enginnering_test/test_data_processor.py
```python
import unittest
import sys
import pandas as pd
import configparser
import numpy as np
sys.path.append("../../../..")
from xes_ml_arch.src.feature_enginnering import data_processor
class TestDataProcessor(unittest.TestCase):
def setUp(self):
self._data_path = "../data/test.txt"
self._config = "./conf/feature_enginnering.conf"
self._data = pd.read_csv(self._data_path)
self._conf = configparser.ConfigParser()
self._conf_no = configparser.ConfigParser()
self._conf_false = configparser.ConfigParser()
self._conf.read(self._config)
self._conf_no.read("./conf/feature_enginnering_no.conf")
self._conf_false.read("./conf/feature_enginnering_false.conf")
self.target = np.random.randint(0, 4, 100)
self.feature_processor = data_processor.DataProcessor(conf=self._conf, log_path = '../log/log.conf')
self.feature_processor_none = data_processor.DataProcessor(conf=self._conf, log_path = '../log/log.conf')
self.feature_processor_no = data_processor.DataProcessor(conf=self._conf_no, log_path = '../log/log.conf')
def test_start(self):
self.assertTrue(self.feature_processor.init())
self.feature_processor.train_data = self._data
self.feature_processor.test_data = self._data
self.assertTrue(self.feature_processor.execute())
def test_start_false(self):
self.assertTrue(self.feature_processor.init())
self.assertFalse(self.feature_processor.execute())
self.assertFalse(self.feature_processor_no.init())
self.assertFalse(self.feature_processor.execute())
self.assertTrue(self.feature_processor_none.init())
self.assertFalse(self.feature_processor.execute())
# def test_set_data(self):
# self.assertTrue(self.feature_processor.init())
# self.assertTrue(self.feature_processor.set_data(self._data))
# data_no = None
# self.assertFalse(self.feature_processor.set_data(data_no))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/feature_enginnering_test/test_pre_fea_utils.py
```python
import unittest
import pandas as pd
import configparser
import numpy as np
import sys
import json
sys.path.append("../../../..")
from xes_ml_arch.src.feature_enginnering import pre_feature_utils
class TestPreFeatureUtils(unittest.TestCase):
def setUp(self):
self.conf_1 = configparser.ConfigParser()
self.conf = configparser.ConfigParser()
self.conf_d = configparser.ConfigParser()
self.ff_t = pre_feature_utils.PreFeatureUtils(conf=self.conf_1,log_path = '../log/log.conf')
self.conf.read('./conf/feature_enginnering.conf')
self.conf_d.read('./conf/feature_enginnering_no.conf')
self.ff = pre_feature_utils.PreFeatureUtils(conf=self.conf,log_path = '../log/log.conf')
self.ff_no = pre_feature_utils.PreFeatureUtils('./conf/feature_enginnering.conf',log_path = '../log/log.conf')
self.data = pd.read_csv('data/featureFilter.csv')
self.ff_d = pre_feature_utils.PreFeatureUtils(conf=self.conf, data=self.data,log_path = '../log/log.conf')
self.ff_f = pre_feature_utils.PreFeatureUtils(conf=self.conf_d,log_path = '../log/log.conf')
def test_init(self):
# app = self.conf.get('pre_feature_utils', "single_feature_apply")
self.assertTrue(self.ff.init())
self.assertFalse(self.ff_no.init())
self.assertFalse(self.ff_t.init())
def test_setdata(self):
a = [1, 2, 3, 4]
self.assertFalse(self.ff.set_data(a))
def test_start(self):
self.assertTrue(self.ff.init())
self.assertTrue(self.ff.set_data(self.data))
res = self.ff.excute()
print("res:", res)
def test_start_a(self):
self.assertTrue(self.ff_d.init())
res2 = self.ff_d.excute()
print("res2:", res2)
def test_start_b(self):
self.assertTrue(self.ff_f.init())
res_no = self.ff_f.excute()
print("res_no:", res_no)
def test_start_c(self):
self.assertTrue(self.ff_f.set_data(self.data))
res1 = self.ff_f.excute()
print("res1:", res1)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/ml_utils_test/test_feature_processor.py
```python
import unittest
import sys
import random
import pandas as pd
sys.path.append('../../../..')
from xes_ml_arch.src.ml_utils import feature_processor
class TestUtils(unittest.TestCase):
def setUp(self):
self.covertint = 3
self.covertflo = 3.3
self.covertstr = '3.3'
self.covertbool = (6,7)
self.time1 = pd.Series(pd.date_range(start='2019-1-09',periods=10,freq='H'))
self.time2 = pd.Series(pd.date_range(start='2017-2-21',periods=10,freq='H'))
self.stamp = pd.Series(feature_processor.FeatureProcessor().time2stamp(self.time2))
self.year = ['2020+','2031+']
self.age = ['28-30','28+3']
self.disli = [random.randint(1,15) for _ in range(100)]
self.day = 100000
def test_cover2int(self):
resa = feature_processor.FeatureProcessor().convert_to_int(self.covertint)
resb = feature_processor.FeatureProcessor().convert_to_int(self.covertflo)
resc = feature_processor.FeatureProcessor().convert_to_int(self.covertstr)
self.assertTrue(isinstance(resa, int))
self.assertTrue(isinstance(resb, int))
self.assertEqual(resc,0)
def test_cover2str(self):
#resa = feature_processor.FeatureProcessor().convert_to_str(self.covertbool)
resb = feature_processor.FeatureProcessor().convert_to_str(self.covertflo)
resc = feature_processor.FeatureProcessor().convert_to_str(self.covertstr)
self.assertTrue(isinstance(resc, str))
self.assertTrue(isinstance(resb, str))
#self.assertEqual(resa, '')
def test_time2stamp(self):
resa = feature_processor.FeatureProcessor().time2stamp(self.time1)
self.assertIsNotNone(resa)
def test_stamp2time(self):
resa = feature_processor.FeatureProcessor().stamp2time(self.stamp)
self.assertIsNotNone(resa)
def test_timediff(self):
_diff = feature_processor.FeatureProcessor().time_diff(self.time1,self.time2)
self.assertIsNotNone(_diff)
def test_minusdata(self):
_res = feature_processor.FeatureProcessor().minus_data(self.time1, self.time2)
self.assertIsNotNone(_res)
def test_absminusdata(self):
_res = feature_processor.FeatureProcessor().abs_minus_data(self.time1, self.time2)
self.assertIsNotNone(_res)
def test_stayinyear(self):
res = feature_processor.FeatureProcessor().stay_in_year(self.year)
self.assertIsNotNone(res)
def test_age2int(self):
res = feature_processor.FeatureProcessor().age2int(self.age)
self.assertNotEqual(res,[-1 for _ in range(len(self.age))])
def test_discretizefreque(self):
res = feature_processor.FeatureProcessor().discretize_freque(self.disli)
self.assertIsNotNone(res)
def test_getdayrange(self):
ans = res = feature_processor.FeatureProcessor().get_day_range(self.day)
self.assertTrue(res > 0)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/model_test/test_model_factory.py
```python
import unittest
import sys
import random
import configparser
import pandas as pd
import numpy as np
import traceback
sys.path.append('../../../..')
from xes_ml_arch.src.model import model_factory
class TestModelFactory(unittest.TestCase):
def setUp(self):
self.model_list = ['lr', 'rf', 'line_reg', 'xgb', 'sklearn_xgb', 'dst', 'lightgbm']
self.conf_file_lr = "../config/xiaozhuan_prediction_lr.conf"
self.conf_file_rf = "../config/xiaozhuan_prediction_rf.conf"
self.conf_file_xgb = "../config/xiaozhuan_prediction_xgb.conf"
self.conf_file_sklearn_xgb = "../config/xiaozhuan_prediction_sklearn_xgb.conf"
self.conf_file_line_reg = "../config/xiaozhuan_prediction_linereg.conf"
self.conf_file_lightgbm = "../config/xiaozhuan_prediction_lightgbm.conf"
self.x = pd.read_csv('../data/test_aaa_test.txt', sep=',')
self.n_x = None
self.n_y = None
self.e_x = pd.read_csv('../data/test_aaa_test.txt', sep=',')
# self.y = pd.DataFrame([int(random.random() * 100) for x in range(999)])
self._y = np.random.randint(0,3,size=(99,))
self._y.reshape(1, 99)
self.y = np.array([random.random() for x in range(99)])
#print(self._y,self.y)
self.y.reshape(1, 99)
def test_model(self):
conf_file = configparser.ConfigParser()
conf_file.read('../config/model.conf')
for name in self.model_list:
# file = 'self.conf_file_' + name
# conf_file.read(eval(file))
# print type(conf_file)
# print conf_file.sections()
model = model_factory.ModelFactory.create_model(conf_file, name, log_path = '../log/log.conf')
if name == 'xgb':
self.assertTrue(model.train(self.x, self.y))
print("xgb init successed")
else:
print("%s 开始测试" % (name))
self.assertTrue(model.train(self.x, self._y))
self.assertIsNotNone(model.predict(self.x))
if name == 'line_reg':
pass
else:
self.assertIsNotNone(model.predict_proba(self.x))
self.assertIsNotNone(model.get_feature_importance(self.x))
self.assertTrue(model.store_model('../config/zscal_pickle_file_lr.properties'))
self.assertTrue(model.load_model('../config/zscal_pickle_file_lr.properties'))
if name == "lr":
self.assertTrue(model.store_feature_importance(self.x))
print("lr init successed")
if name == "dst":
model.show_dot('./dst', self.x)
print("决策树流程结构图创建")
def test_model_exception(self):
conf_file = configparser.ConfigParser()
conf_file.read("../config/model_try.conf")
nofile = None
feature = None
model1 = model_factory.ModelFactory.create_model(conf_file, log_path = '../log/log.conf')
for name in self.model_list:
print('name: --------',name)
model = None
# file = 'self.conf_file_' + name
# conf_file.read(eval(file))
try:
if name not in ['line_reg','xgb']:
model = model_factory.ModelFactory.create_model(conf_file, name, log_path= '../log/log.conf')
self.assertFalse(model.train(self.e_x, self.y))
self.assertFalse(model.load_model('../config/zscal_pickle_file'+'_%s.pickle'%(name)))
self.assertFalse(model.store_feature_importance(self.n_x))
self.assertIsNone(model.predict(self.x))
self.assertIsNone(model.get_feature_importance(feature))
self.assertTrue(model.store_model(nofile))
else:
#line_reg No matter what the data format is, it can be trained successfully
#xgb obj:binary train false while y is Continuous categories(int)
model = model_factory.ModelFactory.create_model(conf_file, name, log_path= '../log/log.conf')
self.assertFalse(model.train(self.e_x, self._y))
self.assertFalse(model.load_model('../config/zscal_pickle_file' + '_%s.pickle' % (name)))
self.assertFalse(model.store_feature_importance(self.n_x))
self.assertIsNone(model.predict(self.x))
self.assertIsNone(model.get_feature_importance(feature))
self.assertTrue(model.store_model(nofile))
if name == 'line_reg':
pass
else:
self.assertIsNone(model.predict_proba(self.x))
except:
traceback.print_exc()
# model.get_feature_importance(self.n_x)
try:
#test create_model function test: the parameter passed in when creating a model can only be config
model = model_factory.ModelFactory.create_model('../config/zscal_pickle_file', name)
except:
traceback.print_exc()
# model = model_factory.ModelFactory.create_model(nofile, name)
try:
model = model_factory.ModelFactory.create_model(conf_file, 'sklearn_xgb', log_path= '../log/log.conf')
except:
traceback.print_exc()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiayangchen/SJTU_courses",
"score": 3
} |
#### File: SJTU_courses/SE227_Computer System Engineering/mapreduce.py
```python
import sys
import os
import pickle
import string
from multiprocessing import Pool
# A class for the MapReduce framwork
class MapReduce(object):
def __init__(self, m, r, path):
self.maptask = m
self.reducetask = r
self.path = path
self.Split(path)
# Splits input in mapworker temporary files, each with a keyvalue, respecting word boundaries
# The keyvalue is the byte offset in the input file. User of class can overwrite the default.
def Split(self, path):
size = os.stat(self.path).st_size;
chunk = size / self.maptask
chunk += 1
f = open(self.path, "r")
buffer = f.read()
f.close()
f = open("#split-%s-%s" % (self.path, 0), "w+")
f.write(str(0) + "\n")
i = 0
m = 1
for c in buffer:
f.write(c)
i += 1
if (c in string.whitespace) and (i > chunk * m):
f.close()
m += 1
f = open("#split-%s-%s" % (self.path, m-1), "w+")
f.write(str(i) + "\n")
f.close()
# Maps value into into a list of (key, value) pairs
# To be defined by user of class
def Map(self, keyvalue, value):
pass
# Determines the default reduce task that will receive (key, value)
# User of class can overwrite the default.
def Partition(self, item):
return hash(item[0]) % self.reducetask
# Reduces all pairs for one key [(key, value), ...])
# To be defined by user of class
def Reduce(self, key, keyvalues):
pass
# Optionally merge all reduce partitions into a single output file
# A better implementation would do a merge sort of the reduce partitions,
# since each partition has been sorted by key.
def Merge(self):
out = []
for r in xrange(0, self.reducetask):
f = open("#reduce-%s-%d" % (self.path, r), "r")
out = out + pickle.load(f)
f.close()
os.unlink("#reduce-%s-%d" % (self.path, r))
out = sorted(out, key=lambda pair: pair[0])
return out
# Load a mapper's split and apply Map to it
def doMap(self, i):
f = open("#split-%s-%s" % (self.path, i), "r")
keyvalue = f.readline()
value = f.read()
f.close()
os.unlink("#split-%s-%s" % (self.path, i))
keyvaluelist = self.Map(keyvalue, value)
for r in range(0, self.reducetask):
# print "map", i, "#map-%s-%s-%d" % (self.path, i, r)
f = open("#map-%s-%s-%d" % (self.path, i, r), "w+")
itemlist = [item for item in keyvaluelist if self.Partition(item) == r]
pickle.dump(itemlist, f)
f.close()
return [(i, r) for r in range(0, self.reducetask)]
# Get reduce regions from maptasks, sort by key, and apply Reduce for each key
def doReduce(self, i):
keys = {}
out = []
for m in range(0, self.maptask):
# print "reduce", i, "#map-%s-%s-%d" % (self.path, m, i)
f = open("#map-%s-%s-%d" % (self.path, m, i), "r")
itemlist = pickle.load(f)
for item in itemlist:
if keys.has_key(item[0]):
keys[item[0]].append(item)
else:
keys[item[0]] = [item]
f.close()
os.unlink("#map-%s-%s-%d" % (self.path, m, i))
for k in sorted(keys.keys()):
out.append(self.Reduce(k, keys[k]))
f = open("#reduce-%s-%d" % (self.path, i), "w+")
pickle.dump(out, f)
f.close()
return i
# The master.
def run(self):
pool = Pool(processes=max(self.maptask, self.reducetask),)
regions = pool.map(self.doMap, range(0, self.maptask))
partitions = pool.map(self.doReduce, range(0, self.reducetask))
# An instance of the MapReduce framework. It performs word count on title-cased words.
class WordCount(MapReduce):
def __init__(self, maptask, reducetask, path):
MapReduce.__init__(self, maptask, reducetask, path)
# Produce a (key, value) pair for each title word in value
def Map(self, keyvalue, value):
results = []
i = 0
n = len(value)
while i < n:
# skip non-ascii letters in C/C++ style a la MapReduce paper:
while i < n and value[i] not in string.ascii_letters:
i += 1
start = i
while i < n and value[i] in string.ascii_letters:
i += 1
w = value[start:i]
if start < i and w.istitle():
results.append ((w.lower(), 1))
return results
# Reduce [(key,value), ...])
def Reduce(self, key, keyvalues):
return (key, sum(pair[1] for pair in keyvalues))
class ReverseIndex(MapReduce):
def __init__(self, maptask, reducetask, path):
MapReduce.__init__(self, maptask, reducetask, path)
# Produce a (key, value) pair for each word in value
# TODO: your code here
def Map(self, keyvalue, value):
results = []
return results
# Reduce [(key,value), ...])
# TODO: your code here
def Reduce(self, key, keyvalues):
valuelist = []
return (key, valuelist)
# Python doesn't pickle method instance by default, so here you go:
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
import copy_reg
import types
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
# Run WordCount instance
if __name__ == '__main__':
if (len(sys.argv) != 2):
print "Program requires path to file for reading!"
sys.exit(1)
# Modify the following code to produce a correct output of the last section
# TODO: your code here
# Create a WordCount MapReduce program
wc = WordCount(4, 2, sys.argv[1])
# wc = ReverseIndex(4, 2, sys.argv[1])
# Run it
wc.run()
# Merge out of Reduce tasks:
out = wc.Merge()
if isinstance(wc, WordCount):
# Sort by word count:
out = sorted(out, key=lambda pair: pair[1], reverse=True)
# Print top 20:
print "WordCount:"
for pair in out[0:20]:
print pair[0], pair[1]
else:
out = sorted(out, key=lambda pair: pair[0], reverse=True)
# Print top 20:
print "ReverseIndex:"
for pair in out[0:20]:
print pair[0], pair[1]
``` |
{
"source": "jiayangshi/pcf",
"score": 3
} |
#### File: pcf/pcfv/dataset.py
```python
import os
import glob
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
class CustomImageDataset(Dataset):
def __init__(self, img_dir, width, height, vmax=255, transform=None, target_transform=None):
'''
:param img_dir: The directory where all images are located
:param width: The desired height of output image's width
:param height: The desired height of output image's height
:param vmax: The max possible value of pixel of given image, used to normalize the images
:param transform: The transformation function for images
:param target_transform: The transformation function for images targets(labels)
'''
self.paths = glob.glob(os.path.join(img_dir, "*.JPEG"))
self.width = width
self.height = height
self.vmax = vmax
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
image = Image.open(self.paths[idx])
image = image.resize((self.width, self.height), Image.ANTIALIAS)
if (image.mode != 'RGB'):
image = image.convert("RGB")
image = np.asarray(image)/self.vmax
label = image.copy()
image = image.transpose(2, 0, 1)
label = label.transpose(2, 0, 1)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
class CustomGreyImageDataset(Dataset):
def __init__(self, img_dir, width, height, vmax=255, transform=None, target_transform=None):
'''
:param img_dir: The directory where all images are located
:param width: The desired height of output image's width
:param height: The desired height of output image's height
:param vmax: The max possible value of pixel of given image, used to normalize the images
:param transform: The transformation function for images
:param target_transform: The transformation function for images targets(labels)
'''
self.paths = glob.glob(os.path.join(img_dir, "*.JPEG"))
self.width = width
self.height = height
self.vmax = vmax
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
image = Image.open(self.paths[idx])
image = image.resize((self.width, self.height), Image.ANTIALIAS)
if (image.mode == 'RGB'):
image = image.convert("L")
image = np.asarray(image)/self.vmax
label = image.copy()
image = np.expand_dims(image, (0))
label = np.expand_dims(label, (0))
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
```
#### File: pcfv/layers/ConvBlock.py
```python
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.batch_norm1 = nn.BatchNorm2d(num_features=out_channels)
self.batch_norm2 = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
tmp = self.relu1(self.batch_norm1(self.conv1(x)))
y = self.relu2(self.batch_norm2(self.conv2(tmp)))
return y
```
#### File: pcfv/networks/UNet.py
```python
import torch
import torch.nn as nn
from pcfv.layers.ConvBlock import ConvBlock
from pcfv.layers.ScalingBlock import ScalingBlock
class UNet(nn.Module):
'''
Implementation of UNet (Ronneberger et al. U-Net: Convolutional Networks for Biomedical Image Segmentation)
'''
def __init__(self, in_channels, out_channels, inter_channel=64):
'''
:param in_channels:
:param out_channels:
'''
super(UNet, self).__init__()
self.scale_in = ScalingBlock(in_channels)
self.scale_out = ScalingBlock(out_channels)
self.conv_block1 = ConvBlock(in_channels=in_channels, out_channels=inter_channel)
self.conv_block2 = ConvBlock(in_channels=inter_channel, out_channels=inter_channel*2)
self.conv_block3 = ConvBlock(in_channels=inter_channel*2, out_channels=inter_channel*4)
self.conv_block4 = ConvBlock(in_channels=inter_channel*4, out_channels=inter_channel*8)
self.conv_block5 = ConvBlock(in_channels=inter_channel*8, out_channels=inter_channel*16)
self.conv_block6 = ConvBlock(in_channels=inter_channel*16, out_channels=inter_channel*8)
self.conv_block7 = ConvBlock(in_channels=inter_channel*8, out_channels=inter_channel*4)
self.conv_block8 = ConvBlock(in_channels=inter_channel*4, out_channels=inter_channel*2)
self.conv_block9 = ConvBlock(in_channels=inter_channel*2, out_channels=inter_channel)
self.max_pooling1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pooling2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pooling3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pooling4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_transpose1 = nn.ConvTranspose2d(in_channels=inter_channel*16, out_channels=inter_channel*8, kernel_size=2, stride=2)
self.conv_transpose2 = nn.ConvTranspose2d(in_channels=inter_channel*8, out_channels=inter_channel*4, kernel_size=2, stride=2)
self.conv_transpose3 = nn.ConvTranspose2d(in_channels=inter_channel*4, out_channels=inter_channel*2, kernel_size=2, stride=2)
self.conv_transpose4 = nn.ConvTranspose2d(in_channels=inter_channel*2, out_channels=inter_channel, kernel_size=2, stride=2)
self.final_conv = nn.Conv2d(in_channels=inter_channel, out_channels=out_channels, kernel_size=(1, 1))
def forward(self, x):
x = self.scale_in(x)
tmp1 = self.conv_block1(x)
tmp2 = self.conv_block2(self.max_pooling1(tmp1))
tmp3 = self.conv_block3(self.max_pooling1(tmp2))
tmp4 = self.conv_block4(self.max_pooling1(tmp3))
tmp5 = self.conv_block5(self.max_pooling1(tmp4))
tmp6 = self.conv_transpose1(tmp5)
tmp7 = self.conv_block6(torch.cat((tmp6, tmp4), dim=1))
tmp8 = self.conv_transpose2(tmp7)
tmp9 = self.conv_block7(torch.cat((tmp8, tmp3), dim=1))
tmp10 = self.conv_transpose3(tmp9)
tmp11 = self.conv_block8(torch.cat((tmp10, tmp2), dim=1))
tmp12 = self.conv_transpose4(tmp11)
tmp13 = self.conv_block9(torch.cat((tmp12, tmp1), dim=1))
y = self.final_conv(tmp13)
y = self.scale_out(y)
return y
def normalized_input(self, x):
x = self.scale_in(x)
return x
```
#### File: pcf/pcfv/train.py
```python
import os
import torch
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from .layers.ScalingBlock import scaling_module_set_scale, scaling_module_set_bias
def train_loop(dataloader, model, optimizer, loss, device='cuda'):
'''
The training function
:param dataloader: The dataloader to provide the data
:param model: The to be trained model
:param optimizer: The optimizer
:param loss: The used loss function
:param device: The device on which the training is done
:return: The average training loss
'''
training_loss = 0
size = len(dataloader.dataset)
batches = len(dataloader)
bar = tqdm(dataloader)
for batch, (X, y) in enumerate(bar):
# Compute prediction and loss
X = X.to(device, dtype=torch.float)
pred = model(X)
label = y.to(device, dtype=torch.float)
cur_loss = loss(pred, label)
# Backpropagation
optimizer.zero_grad()
cur_loss.backward()
optimizer.step()
# display current batch and loss
cur_loss, current = cur_loss.item(), batch * len(X)
bar.set_description(f"training loss: {cur_loss:>7f} [{current:>5d}/{size:>5d}]")
# calculates the average training loss
training_loss += cur_loss/batches
return training_loss
def valid_loop(dataloader, model, loss, device='cuda'):
'''
The validation function
:param dataloader: The dataloader to provide the data
:param model: The to be trained model
:param loss: The used loss function
:param device: The device on which the validation is done
:return: The average validation loss
'''
validation_loss = 0
size = len(dataloader.dataset)
batches = len(dataloader)
bar = tqdm(dataloader)
with torch.no_grad():
for batch, (X, y) in enumerate(bar):
# Compute prediction and loss
X = X.to(device, dtype=torch.float)
pred = model(X)
label = y.to(device, dtype=torch.float)
cur_loss = loss(pred, label)
# display current batch and loss
cur_loss, current = cur_loss.item(), batch * len(X)
bar.set_description(f"validation loss: {cur_loss:>7f} [{current:>5d}/{size:>5d}]")
# calculates the average training loss
validation_loss += cur_loss/batches
return validation_loss
def test_loop(dataloader, model, loss, metric, output_directory=None, device='cuda'):
'''
:param dataloader: The dataloader to provide the data
:param model: The to be trained model
:param loss: The used loss function
:param metric: The used metric function
:param output_directory: The directory to save the test results
:param device: The device on which the validation is done
'''
batches = len(dataloader)
test_loss, test_metric = 0, 0
i = 0
with torch.no_grad():
for X, y in dataloader:
X = X.to(device, dtype=torch.float)
pred = model(X)
label = y.to(device, dtype=torch.float)
cur_loss = loss(pred, label)
cur_metric = metric(pred, label)
if not output_directory:
for j in range(pred.shape[0]):
fig = plt.figure(frameon=True)
ax1 = plt.subplot(1, 3, 1)
ax1.imshow(np.squeeze(label[j].cpu().numpy()), vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
ax1.set_title("Original")
ax2 = plt.subplot(1, 3, 2)
ax2.imshow(np.squeeze(X[j].cpu().numpy()), vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
ax2.set_title("Noised")
ax2.set_xlabel("PSNR:{:,.2f} dB".format(metric(label[j], X[j]).cpu().numpy()))
ax3 = plt.subplot(1, 3, 3)
ax3.imshow(np.squeeze(pred[j].cpu().numpy()), vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
ax3.set_title("Denoised")
ax3.set_xlabel("PSNR:{:,.2f} dB".format(metric(label[j], X[j]).cpu().numpy()))
fig.savefig(os.path.join(output_directory, str(i) + ".png"))
print("The {}th test image is processed".format(i + 1))
i += 1
test_loss += cur_loss / batches
test_metric += cur_metric / batches
print(f"Avg loss on whole image: {test_loss:>8f} \n")
print(f"Avg metric on whole image: {test_metric:>8f} \n")
def set_normalization(model, dataloader):
"""Normalize input and target data.
This function goes through all the training data to compute
the mean and std of the training data.
It modifies the network so that all future invocations of the
network first normalize input data and target data to have
mean zero and a standard deviation of one.
These modified parameters are not updated after this step and
are stored in the network, so that they are not lost when the
network is saved to and loaded from disk.
Normalizing in this way makes training more stable.
:param dataloader: The dataloader associated to the training data.
:returns:
:rtype:
"""
print("Calculating the normalization factors")
mean_in = square_in = mean_out = square_out = 0
for (data_in, data_out) in dataloader:
mean_in += data_in.mean(axis=(2,3))
mean_out += data_out.mean(axis=(2,3))
square_in += data_in.pow(2).mean(axis=(2,3))
square_out += data_out.pow(2).mean(axis=(2,3))
mean_in /= len(dataloader)
mean_out /= len(dataloader)
square_in /= len(dataloader)
square_out /= len(dataloader)
std_in = np.sqrt(square_in - mean_in ** 2)
std_out = np.sqrt(square_out - mean_out ** 2)
# The input data should be roughly normally distributed after
# passing through scale_in. Note that the input is first
# scaled and then recentered.
scaling_module_set_scale(model.scale_in, 1 / std_in)
scaling_module_set_bias(model.scale_in, -mean_in / std_in)
# The scale_out layer should rather 'denormalize' the network
# output.
scaling_module_set_scale(model.scale_out, std_out)
scaling_module_set_bias(model.scale_out, mean_out)
# def early_stopping(valid_losses, patience=4):
# if len(valid_losses) > patience:
# # if current loss larger than max value in patience
# # or last patience number losses non decreasing
# if valid_losses[-1] > max(valid_losses[-patience-1:-1]) or \
# all(x<=y for x, y in zip(valid_losses[-patience-1:-1], valid_losses[-patience:])):
# return True
# return False
``` |
{
"source": "JiayangWu/python-qt-miniUI",
"score": 2
} |
#### File: JiayangWu/python-qt-miniUI/1.py
```python
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(20, 40, 93, 28))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(20, 120, 93, 28))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setGeometry(QtCore.QRect(20, 210, 93, 28))
self.pushButton_3.setObjectName("pushButton_3")
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(120, 210, 271, 61))
self.lineEdit.setObjectName("lineEdit")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "1"))
self.pushButton_2.setText(_translate("Dialog", "2"))
self.pushButton_3.setText(_translate("Dialog", "3"))
class PaintingPad(object):
def Pad(self,Dialog):
super(PaintingPad, self).__init__()
#resize设置宽高,move设置位置
self.resize(400, 300)
self.move(100, 100)
self.setWindowTitle("简单的画板4.0")
#setMouseTracking设置为False,否则不按下鼠标时也会跟踪鼠标事件
self.setMouseTracking(False)
'''
要想将按住鼠标后移动的轨迹保留在窗体上
需要一个列表来保存所有移动过的点
'''
self.pos_xy = []
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
pen = QPen(Qt.black, 2, Qt.SolidLine)
painter.setPen(pen)
'''
首先判断pos_xy列表中是不是至少有两个点了
然后将pos_xy中第一个点赋值给point_start
利用中间变量pos_tmp遍历整个pos_xy列表
point_end = pos_tmp
判断point_end是否是断点,如果是
point_start赋值为断点
continue
判断point_start是否是断点,如果是
point_start赋值为point_end
continue
画point_start到point_end之间的线
point_start = point_end
这样,不断地将相邻两个点之间画线,就能留下鼠标移动轨迹了
'''
if len(self.pos_xy) > 1:
point_start = self.pos_xy[0]
for pos_tmp in self.pos_xy:
point_end = pos_tmp
if point_end == (-1, -1):
point_start = (-1, -1)
continue
if point_start == (-1, -1):
point_start = point_end
continue
painter.drawLine(point_start[0], point_start[1], point_end[0], point_end[1])
point_start = point_end
painter.end()
def mouseMoveEvent(self, event):
'''
按住鼠标移动事件:将当前点添加到pos_xy列表中
调用update()函数在这里相当于调用paintEvent()函数
每次update()时,之前调用的paintEvent()留下的痕迹都会清空
'''
#中间变量pos_tmp提取当前点
pos_tmp = (event.pos().x(), event.pos().y())
#pos_tmp添加到self.pos_xy中
self.pos_xy.append(pos_tmp)
self.update()
def mouseReleaseEvent(self, event):
'''
重写鼠标按住后松开的事件
在每次松开后向pos_xy列表中添加一个断点(-1, -1)
然后在绘画时判断一下是不是断点就行了
是断点的话就跳过去,不与之前的连续
'''
pos_test = (-1, -1)
self.pos_xy.append(pos_test)
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_Dialog()
ui.setupUi(MainWindow)
pad = PaintingPad()
MainWindow.show()
sys.exit(app.exec_())
``` |
{
"source": "JiaYanhao/OpenMMLab-Edu",
"score": 2
} |
#### File: OpenMMLab-Edu/demo/gen_demo.py
```python
from base import *
from MMEdu import MMGeneration
def only_infer_demo():
img = '184_AB.jpg'
model = MMGeneration(backbone="Pix2Pix", dataset_path="../dataset/gen_model/edges2shoes")
model.inference(infer_data=img, save_path = "../results/gen_result.jpg")
def normal_train_demo():
model = MMGeneration(backbone='Pix2Pix')
model.load_dataset(path='../dataset/gen/edges2shoes')
model.save_fold = "../checkpoints/gen_model"
model.train(epochs=50, validate=True, inverse=False)
model.inference(pretrain_model = '../checkpoints/gen_model/ckpt/gen_model/latest.pth',
infer_data= '184_AB.jpg',
save_path = "../results/gen_result.jpg")
def continue_train_demo():
model = MMGeneration(backbone='Pix2Pix')
model.load_dataset(path='../dataset/edges2shoes')
model.save_fold = "../checkpoints/gen_model"
model.train(epochs=15, checkpoint='../checkpoints/gen_model/ckpt/gen_model/latest.pth', validate=True, inverse=True)
if __name__ == "__main__":
only_infer_demo()
# normal_train_demo()
# continue_train_demo()
``` |
{
"source": "jiayaozhang/CS360-3D-point-cloud-",
"score": 3
} |
#### File: CS360-3D-point-cloud-/Homework3/GMM.py
```python
import numpy as np
from numpy import *
import pylab
import random,math
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import multivariate_normal
from tqdm import tqdm
plt.style.use('seaborn')
class GMM(object):
def __init__(self, n_clusters, dim = 2, max_iter=50, tolerance=0.001):
# how to choose tolerance value?
self.n_clusters = n_clusters
self.max_iter = max_iter
self.dim = dim
# 屏蔽开始
# 更新W
# 更新pi
self.weights = np.ones(n_clusters)/n_clusters
# 更新Mu
self.means = np.random.random((n_clusters, self.dim))
# 更新Var
"""
GMM.py:45: RuntimeWarning: invalid value encountered in double_scalars
(1/pow(np.linalg.det(self.covs[j]), 0.5)) * \
numpy\linalg\linalg.py:2116: RuntimeWarning: invalid value encountered in det
r = _umath_linalg.det(a, signature=signature)
"""
# self.covs = np.random.random((n_clusters, self.dim, self.dim))
self.covs = np.array(n_clusters * [np.identity(self.dim)])
self.tolerance = tolerance
# print("weights", self.weights)
# print("means", self.means)
# print("covs", self.covs)
# print("covs det", np.linalg.det(self.covs[0]))
# print("covs det sqrt", pow(np.linalg.det(self.covs[0]), 0.5))
# 屏蔽结束
def _gauss(self, j, datum):
# j: the id of gaussian model
# datum: we need to calculate the prob of datum in this model
# print("j", j)
# print("cov", self.covs[j])
# print("det", np.linalg.det(self.covs[j]))
# print("inv", np.linalg.inv(self.covs[j]))
return 1/pow(2*np.pi, self.dim/2) * \
(1/pow(np.linalg.det(self.covs[j]), 0.5)) * \
np.exp(-1/2*np.dot(np.dot((datum-self.means[j]).T,
np.linalg.inv(self.covs[j])),
(datum-self.means[j])))
def fit(self, data):
# 作业3
# 屏蔽开始
N = data.shape[0]
last_log_likelihood = float("-inf")
for cur_iter in range(self.max_iter):
# print("iter", cur_iter)
# E-step: calculate posterior probability
# posterior probability
post_probs = np.zeros((N, self.n_clusters))
for i, datum in enumerate(data):
for j in range(self.n_clusters):
# pdf_official = multivariate_normal.pdf(datum,
# mean=self.means[j], cov=self.covs[j])
# print("pdf official:", pdf_official)
# print("pdf self:", self._gauss(j, datum))
# assert(np.allclose(pdf_official, self._gauss(j, datum)))
post_probs[i][j] = self.weights[j]*self._gauss(j, datum)
post_probs[i] /= post_probs[i].sum()
# M-step: update weights, means and covs
for j in range(self.n_clusters):
N_j = post_probs[:,j].sum()
# view post_probs[:,j] as vector and data as matrix
# calculate their dot product
# method 1
self.means[j] = np.zeros(self.dim)
for i, datum in enumerate(data):
self.means[j] += post_probs[i][j] * datum
self.means[j] /= N_j
# method 2
#self.means[j] = post_probs[:,j].dot(data) / N_j
self.covs[j] = np.zeros((self.dim, self.dim))
for i in range(N):
diff = np.array([data[i] - self.means[j]])
# print(diff.dot(diff.T))
# print(np.matmul(diff.T, diff))
self.covs[j] += post_probs[i][j] * \
np.matmul(diff.T, diff)
# (data[i] - self.means[j]).dot((data[i] - self.means[j]).T)
self.covs[j] /= N_j
self.weights[j] = N_j/N
log_likelihood = 0
for i in range(N):
tmp = 0
for j in range(self.n_clusters):
tmp += self.weights[j] * self._gauss(j, data[i])
log_likelihood += np.log(tmp)
# print(cur_iter, "'s log likelihood:", log_likelihood)
# if log_likelihood - last_log_likelihood < self.tolerance:
# break
last_log_likelihood = log_likelihood
# 屏蔽结束
def predict(self, data):
# 屏蔽开始
N = data.shape[0]
post_probs = np.zeros((N, self.n_clusters))
for i, datum in enumerate(data):
for j in range(self.n_clusters):
post_probs[i][j] = self.weights[j]*self._gauss(j, datum)
post_probs[i] /= post_probs[i].sum()
return np.argmax(post_probs, axis=1)
# 屏蔽结束
# 生成仿真数据
def generate_X(true_Mu, true_Var):
# 第一簇的数据
num1, mu1, var1 = 400, true_Mu[0], true_Var[0]
X1 = np.random.multivariate_normal(mu1, np.diag(var1), num1)
# 第二簇的数据
num2, mu2, var2 = 600, true_Mu[1], true_Var[1]
X2 = np.random.multivariate_normal(mu2, np.diag(var2), num2)
# 第三簇的数据
num3, mu3, var3 = 1000, true_Mu[2], true_Var[2]
X3 = np.random.multivariate_normal(mu3, np.diag(var3), num3)
# 合并在一起
X = np.vstack((X1, X2, X3))
# 显示数据
plt.figure(figsize=(10, 8))
plt.axis([-10, 15, -5, 15])
plt.scatter(X1[:, 0], X1[:, 1], s=5)
plt.scatter(X2[:, 0], X2[:, 1], s=5)
plt.scatter(X3[:, 0], X3[:, 1], s=5)
plt.show()
return X
if __name__ == '__main__':
# 生成数据
true_Mu = [[0.5, 0.5], [5.5, 2.5], [1, 7]]
true_Var = [[1, 3], [2, 2], [6, 2]]
X = generate_X(true_Mu, true_Var)
gmm = GMM(n_clusters=3)
gmm.fit(X)
cat = gmm.predict(X)
print(cat)
# 初始化
```
#### File: CS360-3D-point-cloud-/Homework3/SpectralClustering.py
```python
import numpy as np
from tqdm import tqdm
import time
from sklearn.neighbors import KDTree
import numpy.linalg as LA
from sklearn.cluster import KMeans
class SpectralClustering(object):
# k是分组数;tolerance‘中心点误差’;max_iter是迭代次数
def __init__(self, n_clusters=2, nnk = 3, nnradius = 1,
normalized = True, use_radius_nn = False,
use_gauss_dist = False, gauss_sigma = 5e-1):
self.k_ = n_clusters
# 屏蔽开始
# the k for KNN
self.nnk_ = nnk
self.nnradius_ = nnradius
self.labels_ = np.empty(0)
self.normalized_ = normalized
self.use_radius_nn_ = use_radius_nn
self.use_gauss_dist_ = use_gauss_dist
self.gauss_sigma_ = gauss_sigma
# 屏蔽结束
def gauss_(self, x):
return np.exp(-x*x/(2*self.gauss_sigma_*self.gauss_sigma_))
def fit(self, data):
# 屏蔽开始
# data: m * dim array
m = data.shape[0]
# print("m", m)
tree = KDTree(data)
W = np.zeros((m, m))
for di, datum in enumerate(data):
# neighbors' index
if self.use_radius_nn_:
nis, ndists = tree.query_radius([datum], self.nnradius_,
return_distance=True)
else:
# the order of return value is different from query_radius!
ndists, nis = tree.query([datum], self.nnk_+1,
return_distance=True)
nis = nis[0]
ndists = ndists[0]
# print("indices", nis)
# print("ndists", ndists)
# print(nis.shape)
# if len(nis.shape) == 0: continue
# print(di, nis, ndists)
# print("neighbors",nis.shape)
for ni, ndist in zip(nis, ndists):
# the point itself will be one of its knn, need to skip it
if ni == di: continue
if self.use_gauss_dist_:
W[di][ni] = W[ni][di] = self.gauss_(ndist)
else:
W[di][ni] = W[ni][di] = 1/ndist
D = np.diag(W.sum(axis=1))
# unnormalized Laplacian
L = D - W
# for debugging
self.W = W
self.D = D
if self.normalized_:
L = a = np.matmul(LA.inv(D), L)
L = b = np.identity(m) - np.matmul(LA.inv(D), W)
assert(np.allclose(a,b))
# for debugging
self.L = L
eigvals, eigvecs = LA.eig(L)
"""
From numpy.linalg.eig's doc:
The eigenvalues are not necessarily ordered!!
so we need to sort eigen values!!
"""
sorted_idx = np.argsort(eigvals)
# smallest self.k_ eigenvectors
V = eigvecs[:, sorted_idx[:self.k_]]
# for debugging
self.eigvals = eigvals
self.eigvecs = eigvecs
self.V = V
# run kmeans
self.labels_ = KMeans(n_clusters=self.k_).fit_predict(V)
# 屏蔽结束
def predict(self, p_datas):
pass
if __name__ == '__main__':
x = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])
sc = SpectralClustering(n_clusters=2)
sc.fit(x)
cat = sc.labels_
print(cat)
```
#### File: CS360-3D-point-cloud-/Homework4/clustering.py
```python
import numpy as np
import os
import struct
from sklearn import cluster, datasets, mixture
from itertools import cycle, islice
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# 功能:从kitti的.bin格式点云文件中读取点云
# 输入:
# path: 文件路径
# 输出:
# 点云数组
def read_velodyne_bin(path):
'''
:param path:
:return: homography matrix of the point cloud, N*3
'''
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2]])
return np.asarray(pc_list, dtype=np.float32)
# 功能:从点云文件中滤除地面点
# 输入:
# data: 一帧完整点云
# 输出:
# segmengted_cloud: 删除地面点之后的点云
def ground_segmentation(data):
# 作业1
# 屏蔽开始
# 屏蔽结束
print('origin data points num:', data.shape[0])
print('segmented data points num:', segmengted_cloud.shape[0])
return segmengted_cloud
# 功能:从点云中提取聚类
# 输入:
# data: 点云(滤除地面后的点云)
# 输出:
# clusters_index: 一维数组,存储的是点云中每个点所属的聚类编号(参考上一章内容容易理解)
def clustering(data):
# 作业2
# 屏蔽开始
# 屏蔽结束
return clusters_index
# 功能:显示聚类点云,每个聚类一种颜色
# 输入:
# data:点云数据(滤除地面后的点云)
# cluster_index:一维数组,存储的是点云中每个点所属的聚类编号(与上同)
def plot_clusters(data, cluster_index):
ax = plt.figure().add_subplot(111, projection = '3d')
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(cluster_index) + 1))))
colors = np.append(colors, ["#000000"])
ax.scatter(data[:, 0], data[:, 1], data[:, 2], s=2, color=colors[cluster_index])
plt.show()
def main():
root_dir = 'data/' # 数据集路径
cat = os.listdir(root_dir)
cat = cat[1:]
iteration_num = len(cat)
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
print('clustering pointcloud file:', filename)
origin_points = read_velodyne_bin(filename)
segmented_points = ground_segmentation(data=origin_points)
cluster_index = clustering(segmented_points)
plot_clusters(segmented_points, cluster_index)
if __name__ == '__main__':
main()
``` |
{
"source": "jiayaozhang/CS-370-Mesh-Processing",
"score": 3
} |
#### File: python/scripts/generate_docstrings.py
```python
import os, sys, glob
from joblib import Parallel, delayed
from multiprocessing import cpu_count
from mako.template import Template
from parser import parse
# http://stackoverflow.com/questions/3207219/how-to-list-all-files-of-a-directory-in-python
def get_filepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
root_file_paths = []
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
if root.endswith(directory): # Add only the files in the root directory
root_file_paths.append(filepath)
return file_paths, root_file_paths # file_paths contains all file paths, core_file_paths only the ones in <directory>
def get_name_from_path(path, basepath, prefix, postfix):
f_clean = os.path.relpath(path, basepath)
f_clean = f_clean.replace(postfix, "")
f_clean = f_clean.replace(prefix, "")
f_clean = f_clean.replace("/", "_")
f_clean = f_clean.replace("\\", "_")
f_clean = f_clean.replace(" ", "_")
f_clean = f_clean.replace(".", "_")
return f_clean
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Syntax: %s generate_docstrings.py <path to libigl C++ header_files> <path to python binding C++ files>' % sys.argv[0])
exit(-1)
# List all files in the given folder and subfolders
cpp_base_path = sys.argv[1]
py_base_path = sys.argv[2]
cpp_file_paths, cpp_root_file_paths = get_filepaths(cpp_base_path)
py_file_paths, py_root_file_paths = get_filepaths(py_base_path)
# Add all the .h filepaths to a dict
mapping = {}
for f in cpp_file_paths:
if f.endswith(".h"):
name = get_name_from_path(f, cpp_base_path, "", ".h")
mapping[name] = f
# Add all python binding files to a list
implemented_names = []
core_implemented_names = []
for f in py_file_paths:
if f.endswith(".cpp"):
name = get_name_from_path(f, py_base_path, "py_", ".cpp")
implemented_names.append(name)
if f in py_root_file_paths:
core_implemented_names.append(name)
implemented_names.sort()
core_implemented_names.sort()
# Create a list of cpp header files for which a python binding file exists
files_to_parse = []
for n in implemented_names:
if n not in mapping:
print("No cpp header file for python function %s found." % n)
continue
files_to_parse.append(mapping[n])
# print(mapping[n])
# Parse c++ header files
job_count = cpu_count()
dicts = Parallel(n_jobs=job_count)(delayed(parse)(path) for path in files_to_parse)
hpplines = []
cpplines = []
for idx, n in enumerate(implemented_names):
d = dicts[idx]
contained_elements = sum(map(lambda x: len(x), d.values()))
# Check for files that don't contain functions/enums/classes
if contained_elements == 0:
print("Function %s contains no parseable content in cpp header. Something might be wrong." % n)
continue
else:
names = []
namespaces = "_".join(d["namespaces"]) # Assumption that all entities lie in deepest namespace
for f in d["functions"]:
h_string = "extern const char *__doc_" + namespaces + "_" + f.name + ";\n"
docu_string = "See " + f.name + " for the documentation."
if f.documentation:
docu_string = f.documentation
cpp_string = "const char *__doc_" + namespaces + "_" + f.name + " = R\"igl_Qu8mg5v7(" + docu_string + ")igl_Qu8mg5v7\";\n"
if f.name not in names: # Prevent multiple additions of declarations, TODO: Possible fix is to merge comments and add them to all functions
hpplines.append(h_string)
cpplines.append(cpp_string)
names.append(f.name)
# Change directory to become independent of execution directory
path = os.path.dirname(__file__)
if path != "":
os.chdir(path)
# Update the two files py_doc.h and py_doc.cpp
with open('../py_doc.h', 'w') as fh:
fh.writelines(hpplines)
with open('../py_doc.cpp', 'w') as fc:
fc.writelines(cpplines)
# Write python_shared_cpp file
tpl = Template(filename='python_shared.mako')
rendered = tpl.render(functions=implemented_names)
with open("../python_shared.cpp", 'w') as fs:
fs.write(rendered)
# Write py_igl_cpp file with all core library files
tpl = Template(filename='py_igl.mako')
rendered = tpl.render(functions=core_implemented_names)
with open("../py_igl.cpp", 'w') as fs:
fs.write(rendered)
```
#### File: python/tutorial/605_Tetgen.py
```python
import sys, os
# Add the igl library to the modules search path
sys.path.insert(0, os.getcwd() + "/../")
import pyigl as igl
from shared import TUTORIAL_SHARED_PATH, check_dependencies
dependencies = ["tetgen", "glfw"]
check_dependencies(dependencies)
# Input polygon
V = igl.eigen.MatrixXd()
F = igl.eigen.MatrixXi()
B = igl.eigen.MatrixXd()
# Tetrahedralized interior
TV = igl.eigen.MatrixXd()
TT = igl.eigen.MatrixXi()
TF = igl.eigen.MatrixXi()
viewer = igl.glfw.Viewer()
def key_down(viewer, key, modifier):
if key >= ord('1') and key <= ord('9'):
t = float((key - ord('1')) + 1) / 9.0
v = igl.eigen.MatrixXd()
v = B.col(2) - B.col(2).minCoeff()
v /= v.col(0).maxCoeff()
s = []
for i in range(v.size()):
if v[i, 0] < t:
s.append(i)
V_temp = igl.eigen.MatrixXd(len(s) * 4, 3)
F_temp = igl.eigen.MatrixXd(len(s) * 4, 3).castint()
for i in range(len(s)):
V_temp.setRow(i * 4 + 0, TV.row(TT[s[i], 0]))
V_temp.setRow(i * 4 + 1, TV.row(TT[s[i], 1]))
V_temp.setRow(i * 4 + 2, TV.row(TT[s[i], 2]))
V_temp.setRow(i * 4 + 3, TV.row(TT[s[i], 3]))
F_temp.setRow(i * 4 + 0, igl.eigen.MatrixXd([[(i*4)+0, (i*4)+1, (i*4)+3]]).castint())
F_temp.setRow(i * 4 + 1, igl.eigen.MatrixXd([[(i*4)+0, (i*4)+2, (i*4)+1]]).castint())
F_temp.setRow(i * 4 + 2, igl.eigen.MatrixXd([[(i*4)+3, (i*4)+2, (i*4)+0]]).castint())
F_temp.setRow(i * 4 + 3, igl.eigen.MatrixXd([[(i*4)+1, (i*4)+2, (i*4)+3]]).castint())
viewer.data().clear()
viewer.data().set_mesh(V_temp, F_temp)
viewer.data().set_face_based(True)
else:
return False
return True
# Load a surface mesh
igl.readOFF(TUTORIAL_SHARED_PATH + "fertility.off", V, F)
# Tetrahedralize the interior
igl.tetgen.tetrahedralize(V, F, "pq1.414Y", TV, TT, TF)
# Compute barycenters
igl.barycenter(TV, TT, B)
# Plot the generated mesh
key_down(viewer, ord('5'), 0)
viewer.callback_key_down = key_down
viewer.launch()
``` |
{
"source": "jiayawei119/CDSelector",
"score": 2
} |
#### File: jiayawei119/CDSelector/UCAS_Selector.py
```python
import sys
import time
import requests
from configparser import RawConfigParser
from bs4 import BeautifulSoup
class UCAS_Selector:
def __init__(self):
self.__readCoursesId('./courseid')
cf = RawConfigParser()
cf.read('config')
self.username = cf.get('info', 'username')
self.password = cf.get('info', 'password')
self.runtime = cf.getint('info', 'runtime')
self.debug = cf.getboolean('action', 'debug')
self.enroll = cf.getboolean('action', 'enroll')
self.evaluate = cf.getboolean('action', 'evaluate')
self.select_bat = cf.getboolean('action', 'select_bat')
self.loginPage = 'http://sep.ucas.ac.cn'
self.loginUrl = self.loginPage + '/slogin'
self.courseSystem = self.loginPage + '/portal/site/226/821'
self.courseBase = 'http://jwxk.ucas.ac.cn'
self.courseIdentify = self.courseBase + '/login?Identity='
self.courseSelected = self.courseBase + '/courseManage/selectedCourse'
self.courseSelectionBase = self.courseBase + '/courseManage/main'
self.courseCategory = self.courseBase + '/courseManage/selectCourse?s='
self.courseSave = self.courseBase + '/courseManage/saveCourse?s='
self.studentCourseEvaluateUrl = 'http://jwjz.ucas.ac.cn/Student/DeskTopModules/'
self.selectCourseUrl = 'http://jwjz.ucas.ac.cn/Student/DesktopModules/Course/SelectCourse.aspx'
self.enrollCount = {}
self.headers = {
'Host': 'sep.ucas.ac.cn',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
}
self.s = requests.Session()
loginPage = self.s.get(self.loginPage, headers=self.headers)
self.cookies = loginPage.cookies
def login(self):
postdata = {
'userName': self.username,
'pwd': <PASSWORD>,
'sb': 'sb'
}
self.s.post(self.loginUrl, data=postdata, headers=self.headers)
if 'sepuser' in self.s.cookies.get_dict():
return True
return False
def getMessage(self, restext):
css_soup = BeautifulSoup(restext, 'html.parser')
text = css_soup.select('#main-content > div > div.m-cbox.m-lgray > div.mc-body > div')[0].text
return "".join(line.strip() for line in text.split('\n'))
def __readCoursesId(self, filename):
coursesFile = open(filename, 'r')
self.coursesId = {}
for line in coursesFile.readlines():
line = line.strip().replace(' ', '').split(':')
courseId = line[0]
isDegree = False
if len(line) == 2 and line[1] == 'on':
isDegree = True
self.coursesId[courseId] = isDegree
def enrollCourses(self):
response = self.s.get(self.courseSystem, headers=self.headers)
soup = BeautifulSoup(response.text, 'html.parser')
try:
identity = str(soup).split('Identity=')[1].split('"'[0])[0]
coursePage = self.courseIdentify + identity
response = self.s.get(coursePage)
response = self.s.get(self.courseSelected)
idx, lastMsg = 0, ""
while True:
msg = ""
if self.select_bat:
# print self.coursesId
result, msg = self.__enrollCourses(self.coursesId)
if result: self.enrollCount[eachCourse] = 0
else:
for eachCourse in self.coursesId:
if eachCourse in response.text:
print("Course " + eachCourse + " has been selected.")
continue
if (eachCourse in self.enrollCount and
self.enrollCount[eachCourse] == 0):
continue
self.enrollCount[eachCourse] = 1
result = self.__enrollCourse(eachCourse, self.coursesId[eachCourse])
if result:
self.enrollCount[eachCourse] = 0
for enroll in self.enrollCount:
if self.enrollCount[enroll] == 0:
self.coursesId.pop(enroll)
self.enrollCount.clear()
if not self.coursesId: return
idx += 1
time.sleep(self.runtime)
showText = "\r> " + "%s <%d> %s" % (
msg if msg!=lastMsg else "", idx,
time.asctime( time.localtime(time.time()) )
)
lastMsg = msg
sys.stdout.write(showText)
sys.stdout.flush()
except KeyboardInterrupt:
print("Bye")
except Exception as exception:
print("System error")
print(exception)
exit()
def __enrollCourse(self, courseId, isDegree):
response = self.s.get(self.courseSelectionBase)
if debug:
print #(response.text.encode('utf-8'))
soup = BeautifulSoup(response.text, 'html.parser')
categories = dict([(label.contents[0][:2], label['for'][3:])
for label in soup.find_all('label')[2:]])
categoryId = categories[courseId[:2]]
identity = soup.form['action'].split('=')[1]
postdata = {
'deptIds': categoryId,
'sb': 0
}
categoryUrl = self.courseCategory + identity
response = self.s.post(categoryUrl, data=postdata)
if debug:
print #(response.text.encode('utf-8'))
soup = BeautifulSoup(response.text, 'html.parser')
courseTable = soup.body.form.table.find_all('tr')[1:]
courseDict = dict([(c.span.contents[0], c.span['id'].split('_')[1])
for c in courseTable])
if courseId in courseDict:
postdata = {
'deptIds': categoryId,
'sids': courseDict[courseId]
}
print categoryId
if isDegree:
postdata['did_' + courseDict[courseId]] = courseDict[courseId]
courseSaveUrl = self.courseSave + identity
response = self.s.post(courseSaveUrl, data=postdata)
if 'class="error' not in response.text:
print('[Success] ' + courseId)
return True
else: return False
else:
print("No such course")
return True
def __enrollCourses(self, courseIds): # For English
response = self.s.get(self.courseSelectionBase)
if debug: print #(response.text.encode('utf-8'))
soup = BeautifulSoup(response.text, 'html.parser')
categories = dict([(label.contents[0][:2], label['for'][3:])
for label in soup.find_all('label')[2:]])
identity = soup.form['action'].split('=')[1]
categoryIds = []
for courseId in courseIds:
categoryIds.append(categories[courseId[:2]])
postdata = {
'deptIds': categoryIds,
'sb': 0
}
categoryUrl = self.courseCategory + identity
response = self.s.post(categoryUrl, data=postdata)
if debug: print #(response.text.encode('utf-8'))
soup = BeautifulSoup(response.text, 'html.parser')
courseTable = soup.body.form.table.find_all('tr')[1:]
courseDict = dict([(c.span.contents[0], c.span['id'].split('_')[1])
for c in courseTable])
postdata = {
'deptIds': categoryIds,
'sids': [courseDict[courseId] for courseId in courseIds]
}
courseSaveUrl = self.courseSave + identity
response = self.s.post(courseSaveUrl, data=postdata)
with open('result.html','wb+') as f:
f.write(response.text.encode('utf-8'))
if 'class="error' not in response.text:
print('[Success] ' + courseId)
return True, "Success!"
else: return False, self.getMessage(response.text).strip()
``` |
{
"source": "jiayeguo/geometry_analysis",
"score": 3
} |
#### File: geometry_analysis/tests/test_geometry_analysis.py
```python
import geometry_analysis
import pytest
import sys
import numpy as np
@pytest.fixture()
def water_molecule():
name = "water"
symbols = ["H", "O", "H"]
coordinates = np.array([[2, 0, 0], [0, 0, 0], [-2, 0, 0]])
water = geometry_analysis.Molecule(name, symbols, coordinates)
return water
def test_create_failure():
name = 25
symbols = ["H", "O", "H"]
coordinates = np.zeros([3, 3])
with pytest.raises(TypeError):
water = geometry_analysis.Molecule(name, symbols, coordinates)
def test_molecule_set_coordinates(water_molecule):
"""Test that bond list is rebuilt when we reset coordinates"""
num_bonds = len(water_molecule.bonds)
assert num_bonds == 2 # initially there should be two bonds
new_coordinates = np.array([[5, 0, 0], [0, 0, 0], [-2, 0, 0]])
water_molecule.coordinates = new_coordinates
new_bonds = len(water_molecule.bonds)
assert new_bonds == 1 # one bond should be broken
assert np.array_equal(new_coordinates, water_molecule.coordinates)
def test_geometry_analysis_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "geometry_analysis" in sys.modules
def test_calculate_distance():
"""Test the calculate_distance function"""
r1 = np.array([0, 0, -1])
r2 = np.array([0, 1, 0])
expected_distance = np.sqrt(2.)
calculated_distnace = geometry_analysis.calculate_distance(r1, r2)
assert expected_distance == calculated_distnace
def test_angle_calculation_angle_90():
"""Test the calculate_angle function: this is a case where the answer is exact"""
rA = np.array([1, 0, 0])
rB = np.array([0, 0, 0])
rC = np.array([0, 1, 0])
expected_angle = 90
calculated_angle = geometry_analysis.calculate_angle(rA, rB, rC, degrees=True)
assert expected_angle == calculated_angle
def test_angle_calculation_angle_60():
"""Test the calculate_angle function: this is a case where the answer is not exact"""
rA = np.array([0, 0, -1])
rB = np.array([0, 1, 0])
rC = np.array([1, 0, 0])
expected_angle = 60.
calculated_angle = geometry_analysis.calculate_angle(rA, rB, rC, degrees=True)
assert np.isclose(expected_angle, calculated_angle)
#assert AlmostEqual(expected_angle, calculated_angle) (??)
@pytest.mark.parametrize("p1, p2, p3, expected_angle", [(np.array([1, 0, 0]), np.array([0, 0, 0]), np.array([0, 1, 0]), 90), (np.array([0, 0, -1]), np.array([0, 1, 0]), np.array([1, 0, 0]), 60)])
def test_calculated_angle(p1, p2, p3, expected_angle):
calculated_angle = geometry_analysis.calculate_angle(p1, p2, p3, degrees=True)
assert np.isclose(expected_angle, calculated_angle)
``` |
{
"source": "jiayeguo/sams_dunbrack",
"score": 2
} |
#### File: sams_dunbrack/unbiased/unbiased_simulation.py
```python
import os
from pdbfixer import PDBFixer
import simtk.openmm as mm
from simtk.openmm import unit, version, Context
from simtk.openmm.app import Topology, PDBFile, Modeller, ForceField, PDBxFile, PME, Simulation, StateDataReporter
from mdtraj.reporters import NetCDFReporter
# set up basic parameters
experiment = 'state0' # setting of the experiment (e.g. different combinations of the CVs)
pdbid = '1M17' # PDB ID of the system
chain = 'A'
min_steps = 100000
steps = 250000000 ## 500 ns
work_dir = f'/home/guoj1/data_projects/cv_selection/unbiased_simulation/{experiment}'
temperature = 310.15 * unit.kelvin
pressure = 1.0 * unit.atmospheres
# if protein is not minimized
if not os.path.isfile(os.path.join(work_dir,f'{pdbid}_chain{chain}_minimized.pdb')):
print("Need to minimize the protein structure.")
## clean up the input pdb file using pdbfixer and load using Modeller
if not os.path.isfile(os.path.join(work_dir,f'{pdbid}_chain{chain}.pdb')):
fixer = PDBFixer(url=f'http://www.pdb.org/pdb/files/{pdbid}.pdb')
'''
for this case somehow the pdb after chain selection doesn't go through fixing
so fix and then select
'''
## find missing residues
fixer.findMissingResidues()
# modify missingResidues so the extra residues on the end are ignored
fixer.missingResidues = {}
# remove ligand but keep crystal waters
fixer.removeHeterogens(True)
print("Done removing heterogens.")
# find missing atoms/terminals
fixer.findMissingAtoms()
if fixer.missingAtoms or fixer.missingTerminals:
fixer.addMissingAtoms()
print("Done adding atoms/terminals.")
else:
print("No atom/terminal needs to be added.")
# add hydrogens
fixer.addMissingHydrogens(7.0)
print("Done adding hydrogens.")
# output fixed pdb
PDBFile.writeFile(fixer.topology, fixer.positions, open(f'{pdbid}_fixed.pdb', 'w'), keepIds=True)
print("Done outputing the fixed pdb file.")
## select the chain from the original pdb file
from Bio.PDB import Select, PDBIO
from Bio.PDB.PDBParser import PDBParser
class ChainSelect(Select):
def __init__(self, chain):
self.chain = chain
def accept_chain(self, chain):
if chain.get_id() == self.chain:
return 1
else:
return 0
p = PDBParser(PERMISSIVE=1)
structure = p.get_structure(f'{pdbid}', f'{pdbid}_fixed.pdb')
pdb_chain_file = f'chain_{chain}.pdb'
io_w_no_h = PDBIO()
io_w_no_h.set_structure(structure)
io_w_no_h.save(f'{pdbid}_chain{chain}.pdb', ChainSelect(chain))
print("The fixed.pdb file with selected chain is ready.")
# load pdb to Modeller
pdb = PDBFile(f'{pdbid}_chain{chain}.pdb')
molecule = Modeller(pdb.topology,pdb.positions)
print("Done loading pdb to Modeller.")
# load force field
forcefield = ForceField('amber14-all.xml', 'amber14/tip3pfb.xml')
print("Done loading force field.")
print("OpenMM version:", version.version)
# prepare system
molecule.addSolvent(forcefield, padding=12*unit.angstrom, model='tip3p', positiveIon='Na+', negativeIon='Cl-', ionicStrength=0*unit.molar)
print("Done adding solvent.")
PDBxFile.writeFile(molecule.topology,molecule.positions,open(f'{pdbid}_chain{chain}.pdbx', 'w'), keepIds=True)
PDBFile.writeFile(molecule.topology,molecule.positions,open(f'{pdbid}_chain{chain}_solvated.pdb', 'w'), keepIds=True)
print("Done outputing pdbx and solvated pdb.")
system = forcefield.createSystem(molecule.topology, nonbondedMethod=PME, rigidWater=True, nonbondedCutoff=1*unit.nanometer)
# specify the rest of the context for minimization
integrator = mm.VerletIntegrator(0.5*unit.femtoseconds)
print("Done specifying integrator.")
platform = mm.Platform.getPlatformByName('CUDA')
print("Done specifying platform.")
platform.setPropertyDefaultValue('Precision', 'mixed')
print("Done setting the precision to mixed.")
minimize = Simulation(molecule.topology, system, integrator, platform)
print("Done specifying simulation.")
minimize.context.setPositions(molecule.positions)
print("Done recording a context for positions.")
minimize.context.setVelocitiesToTemperature(310.15*unit.kelvin)
print("Done assigning velocities.")
# start minimization
tolerance = 0.1*unit.kilojoules_per_mole/unit.angstroms
print("Done setting tolerance.")
minimize.minimizeEnergy(tolerance=tolerance,maxIterations=1000)
print("Done setting energy minimization.")
minimize.reporters.append(StateDataReporter('relax-hydrogens.log', 1000, step=True, temperature=True, potentialEnergy=True, totalEnergy=True, speed=True))
minimize.step(min_steps)
print("Done 100000 steps of minimization.")
print("Potential energy after minimization:")
#print(minimize.context.getState(getEnergy=True).getPotentialEnergy())
positions = minimize.context.getState(getPositions=True).getPositions()
print("Done updating positions.")
#velocities = minimize.context.getState(getVelocities=True).getVelocities()
#print("Done updating velocities.")
minimize.saveCheckpoint('state.chk')
print("Done saving checkpoints.")
# update the current context with changes in system
# minimize.context.reinitialize(preserveState=True)
# output the minimized protein as a shortcut
PDBFile.writeFile(molecule.topology,positions,open(f'{pdbid}_chain{chain}_minimized.pdb', 'w'), keepIds=True)
print("Done outputing minimized pdb.")
# clean the context
del minimize.context
# directly load the minimized protein
pdb = PDBFile(f'{pdbid}_chain{chain}_minimized.pdb')
molecule = Modeller(pdb.topology,pdb.positions)
# load force field
forcefield = ForceField('amber14-all.xml', 'amber14/tip3pfb.xml')
print("Done loading force field.")
print("OpenMM version:", version.version)
system = forcefield.createSystem(molecule.topology, nonbondedMethod=PME, rigidWater=True, nonbondedCutoff=1*unit.nanometer)
# Set up the context for unbiased simulation
integrator = mm.LangevinIntegrator(temperature, 1.0, 0.002) ## 2 fs time steps
platform = mm.Platform.getPlatformByName('CUDA')
print("Done specifying integrator and platform for simulation.")
simulation = Simulation(molecule.topology, system, integrator, platform)
simulation.context.setPositions(molecule.positions)
print("Done recording a context for positions.")
simulation.context.setVelocitiesToTemperature(310.15*unit.kelvin)
print("Done assigning velocities.")
storage_path = os.path.join(work_dir,'traj.nc')
simulation.reporters.append(NetCDFReporter(storage_path, reportInterval=250000, coordinates=True))
print("Done specifying simulation.")
simulation.step(steps)
print(f"Done with {steps} steps of simulation.")
``` |
{
"source": "jiayi42/Detect_COVID19_Fake_News_in_Twitter",
"score": 3
} |
#### File: fact_check_websites_clawer_and_LDA/web crawler/get_article.py
```python
import requests
import json
from bs4 import BeautifulSoup
# w = open("test2.txt", "w")
def dateMap(month) -> str:
if month == "nov":
return str(11)
elif month == "oct":
return str(10)
elif month == "sep":
return str(9)
elif month == "aug":
return str(8)
elif month == "jul":
return str(7)
elif month == "jun":
return str(6)
elif month == "may":
return str(5)
elif month == "apr":
return str(4)
elif month == "mar":
return str(3)
elif month == "feb":
return str(2)
elif month == "jan":
return str(1)
elif month == "dec":
return str(12)
return "unknown"
result = {}
for page in range(1, 136):
url = "https://www.politifact.com/search/factcheck/?page=" + str(page) + "&q=covid"
req = requests.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
# print(soup.prettify())
# print(soup)
mydivs = soup.findAll("div", {"class": "c-textgroup__title"})
for div in mydivs:
component_in_url = div.contents[1]["href"].split("/")
component_in_url[3] = dateMap(component_in_url[3])
key = "_".join(component_in_url[2:5]) + "_" + component_in_url[-2]
date = "_".join(component_in_url[2:5])
urlArticle = "https://www.politifact.com" + div.contents[1]["href"]
reqArticle = requests.get(urlArticle)
soupArticle = BeautifulSoup(reqArticle.content, "html.parser")
contextArticle = soupArticle.find("article", {"class": "m-textblock"})
all_p = contextArticle.contents[1].find_all("p")
context = ""
for p in all_p:
context += str(p.text)
# file.write(p.text)
result[key] = {}
result[key]["date"] = date
result[key]["context"] = context
print("Complete {}/135 pages".format(page))
with open('context2.json', 'w') as outfile:
json.dump(result, outfile)
# mydivs = soup.find("div", {"class": "c-textgroup__title"})
# print(mydivs.contents[1]["href"].split("/"))
# urlArticle = "https://www.politifact.com" + mydivs.contents[1]["href"]
# print(urlArticle)
# reqArticle = requests.get(urlArticle)
# soupArticle = BeautifulSoup(reqArticle.content, "html.parser")
# a = soupArticle.find("article", {"class": "m-textblock"})
# allP = a.contents[1].find_all("p")
# for p in allP:
# print(p.text)
# print(type(soupArticle))
# w.write(soupArticle.prettify())
# myimgs = soup.findAll("img", {"class": "c-image__original"})
# for idx, div in enumerate(mydivs):
# print(str(idx) + ": " + div.contents[1].contents[0].string.strip())
# for idx, img in enumerate(myimgs):
# print(str(idx) + ": " + str("true" in img["src"]))
# w.close()
``` |
{
"source": "jia-yi-chen/multimodal-deep-learning",
"score": 2
} |
#### File: multimodal-deep-learning/Low-rank-Multimodal-Fusion/utils.py
```python
from torch.utils.data import Dataset
import sys
if sys.version_info.major == 2:
import cPickle as pickle
else:
import pickle
import pdb
AUDIO = b'covarep'
VISUAL = b'facet'
TEXT = b'glove'
LABEL = b'label'
TRAIN = b'train'
VALID = b'valid'
TEST = b'test'
def total(params):
'''
count the total number of hyperparameter settings
'''
settings = 1
for k, v in params.items():
settings *= len(v)
return settings
def load_pom(data_path):
# parse the input args
class POM(Dataset):
'''
PyTorch Dataset for POM, don't need to change this
'''
def __init__(self, audio, visual, text, labels):
self.audio = audio
self.visual = visual
self.text = text
self.labels = labels
def __getitem__(self, idx):
return [self.audio[idx, :], self.visual[idx, :], self.text[idx, :, :], self.labels[idx]]
def __len__(self):
return self.audio.shape[0]
if sys.version_info.major == 2:
pom_data = pickle.load(open(data_path + "pom.pkl", 'rb'))
else:
pom_data = pickle.load(open(data_path + "pom.pkl", 'rb'), encoding='bytes')
pom_train, pom_valid, pom_test = pom_data[TRAIN], pom_data[VALID], pom_data[TEST]
train_audio, train_visual, train_text, train_labels \
= pom_train[AUDIO], pom_train[VISUAL], pom_train[TEXT], pom_train[LABEL]
valid_audio, valid_visual, valid_text, valid_labels \
= pom_valid[AUDIO], pom_valid[VISUAL], pom_valid[TEXT], pom_valid[LABEL]
test_audio, test_visual, test_text, test_labels \
= pom_test[AUDIO], pom_test[VISUAL], pom_test[TEXT], pom_test[LABEL]
# code that instantiates the Dataset objects
train_set = POM(train_audio, train_visual, train_text, train_labels)
valid_set = POM(valid_audio, valid_visual, valid_text, valid_labels)
test_set = POM(test_audio, test_visual, test_text, test_labels)
audio_dim = train_set[0][0].shape[0]
print("Audio feature dimension is: {}".format(audio_dim))
visual_dim = train_set[0][1].shape[0]
print("Visual feature dimension is: {}".format(visual_dim))
text_dim = train_set[0][2].shape[1]
print("Text feature dimension is: {}".format(text_dim))
input_dims = (audio_dim, visual_dim, text_dim)
# remove possible NaN values
train_set.visual[train_set.visual != train_set.visual] = 0
valid_set.visual[valid_set.visual != valid_set.visual] = 0
test_set.visual[test_set.visual != test_set.visual] = 0
train_set.audio[train_set.audio != train_set.audio] = 0
valid_set.audio[valid_set.audio != valid_set.audio] = 0
test_set.audio[test_set.audio != test_set.audio] = 0
return train_set, valid_set, test_set, input_dims
def load_iemocap(data_path, emotion):
# parse the input args
class IEMOCAP(Dataset):
'''
PyTorch Dataset for IEMOCAP, don't need to change this
'''
def __init__(self, audio, visual, text, labels):
self.audio = audio
self.visual = visual
self.text = text
self.labels = labels
def __getitem__(self, idx):
return [self.audio[idx, :], self.visual[idx, :], self.text[idx, :, :], self.labels[idx]]
def __len__(self):
return self.audio.shape[0]
if sys.version_info.major == 2:
iemocap_data = pickle.load(open(data_path + "iemocap.pkl", 'rb'))
else:
iemocap_data = pickle.load(open(data_path + "iemocap.pkl", 'rb'), encoding='bytes')
iemocap_train, iemocap_valid, iemocap_test = iemocap_data[emotion][TRAIN], iemocap_data[emotion][VALID], iemocap_data[emotion][TEST]
train_audio, train_visual, train_text, train_labels \
= iemocap_train[AUDIO], iemocap_train[VISUAL], iemocap_train[TEXT], iemocap_train[LABEL]
valid_audio, valid_visual, valid_text, valid_labels \
= iemocap_valid[AUDIO], iemocap_valid[VISUAL], iemocap_valid[TEXT], iemocap_valid[LABEL]
test_audio, test_visual, test_text, test_labels \
= iemocap_test[AUDIO], iemocap_test[VISUAL], iemocap_test[TEXT], iemocap_test[LABEL]
# code that instantiates the Dataset objects
train_set = IEMOCAP(train_audio, train_visual, train_text, train_labels)
valid_set = IEMOCAP(valid_audio, valid_visual, valid_text, valid_labels)
test_set = IEMOCAP(test_audio, test_visual, test_text, test_labels)
audio_dim = train_set[0][0].shape[0]
print("Audio feature dimension is: {}".format(audio_dim))
visual_dim = train_set[0][1].shape[0]
print("Visual feature dimension is: {}".format(visual_dim))
text_dim = train_set[0][2].shape[1]
print("Text feature dimension is: {}".format(text_dim))
input_dims = (audio_dim, visual_dim, text_dim)
# remove possible NaN values
train_set.visual[train_set.visual != train_set.visual] = 0
valid_set.visual[valid_set.visual != valid_set.visual] = 0
test_set.visual[test_set.visual != test_set.visual] = 0
train_set.audio[train_set.audio != train_set.audio] = 0
valid_set.audio[valid_set.audio != valid_set.audio] = 0
test_set.audio[test_set.audio != test_set.audio] = 0
return train_set, valid_set, test_set, input_dims
def load_mosi(data_path):
# parse the input args
class MOSI(Dataset):
'''
PyTorch Dataset for MOSI, don't need to change this
'''
def __init__(self, audio, visual, text, labels):
self.audio = audio
self.visual = visual
self.text = text
self.labels = labels
def __getitem__(self, idx):
return [self.audio[idx, :], self.visual[idx, :], self.text[idx, :, :], self.labels[idx]]
def __len__(self):
return self.audio.shape[0]
if sys.version_info.major == 2:
mosi_data = pickle.load(open(data_path + "mosi.pkl", 'rb'))
else:
mosi_data = pickle.load(open(data_path + "mosi.pkl", 'rb'), encoding='bytes')
mosi_train, mosi_valid, mosi_test = mosi_data[TRAIN], mosi_data[VALID], mosi_data[TEST]
train_audio, train_visual, train_text, train_labels \
= mosi_train[AUDIO], mosi_train[VISUAL], mosi_train[TEXT], mosi_train[LABEL]
valid_audio, valid_visual, valid_text, valid_labels \
= mosi_valid[AUDIO], mosi_valid[VISUAL], mosi_valid[TEXT], mosi_valid[LABEL]
test_audio, test_visual, test_text, test_labels \
= mosi_test[AUDIO], mosi_test[VISUAL], mosi_test[TEXT], mosi_test[LABEL]
print(train_audio.shape)
print(train_visual.shape)
print(train_text.shape)
print(train_labels.shape)
# code that instantiates the Dataset objects
train_set = MOSI(train_audio, train_visual, train_text, train_labels)
valid_set = MOSI(valid_audio, valid_visual, valid_text, valid_labels)
test_set = MOSI(test_audio, test_visual, test_text, test_labels)
audio_dim = train_set[0][0].shape[0]
print("Audio feature dimension is: {}".format(audio_dim))
visual_dim = train_set[0][1].shape[0]
print("Visual feature dimension is: {}".format(visual_dim))
text_dim = train_set[0][2].shape[1]
print("Text feature dimension is: {}".format(text_dim))
input_dims = (audio_dim, visual_dim, text_dim)
# remove possible NaN values
train_set.visual[train_set.visual != train_set.visual] = 0
valid_set.visual[valid_set.visual != valid_set.visual] = 0
test_set.visual[test_set.visual != test_set.visual] = 0
train_set.audio[train_set.audio != train_set.audio] = 0
valid_set.audio[valid_set.audio != valid_set.audio] = 0
test_set.audio[test_set.audio != test_set.audio] = 0
return train_set, valid_set, test_set, input_dims
```
#### File: MISA/src/models.py
```python
import numpy as np
import random
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from transformers import BertModel, BertConfig
from utils import to_gpu
from utils import ReverseLayerF
def masked_mean(tensor, mask, dim):
"""Finding the mean along dim"""
masked = torch.mul(tensor, mask)
return masked.sum(dim=dim) / mask.sum(dim=dim)
def masked_max(tensor, mask, dim):
"""Finding the max along dim"""
masked = torch.mul(tensor, mask)
neg_inf = torch.zeros_like(tensor)
neg_inf[~mask] = -math.inf
return (masked + neg_inf).max(dim=dim)
# let's define a simple model that can deal with multimodal variable length sequence
class MISA(nn.Module):
def __init__(self, config):
super(MISA, self).__init__()
self.config = config
self.text_size = config.embedding_size
self.visual_size = config.visual_size
self.acoustic_size = config.acoustic_size
self.input_sizes = input_sizes = [self.text_size, self.visual_size, self.acoustic_size]
self.hidden_sizes = hidden_sizes = [int(self.text_size), int(self.visual_size), int(self.acoustic_size)]
self.output_size = output_size = config.num_classes
self.dropout_rate = dropout_rate = config.dropout
self.activation = self.config.activation()
self.tanh = nn.Tanh()
rnn = nn.LSTM if self.config.rnncell == "lstm" else nn.GRU
# defining modules - two layer bidirectional LSTM with layer norm in between
if self.config.use_bert:
# Initializing a BERT bert-base-uncased style configuration
bertconfig = BertConfig.from_pretrained('bert-base-uncased', output_hidden_states=True)
self.bertmodel = BertModel.from_pretrained('bert-base-uncased', config=bertconfig)
else:
self.embed = nn.Embedding(len(config.word2id), input_sizes[0])
self.trnn1 = rnn(input_sizes[0], hidden_sizes[0], bidirectional=True)
self.trnn2 = rnn(2*hidden_sizes[0], hidden_sizes[0], bidirectional=True)
self.vrnn1 = rnn(input_sizes[1], hidden_sizes[1], bidirectional=True)
self.vrnn2 = rnn(2*hidden_sizes[1], hidden_sizes[1], bidirectional=True)
self.arnn1 = rnn(input_sizes[2], hidden_sizes[2], bidirectional=True)
self.arnn2 = rnn(2*hidden_sizes[2], hidden_sizes[2], bidirectional=True)
##########################################
# mapping modalities to same sized space
##########################################
if self.config.use_bert:
self.project_t = nn.Sequential()
self.project_t.add_module('project_t', nn.Linear(in_features=768, out_features=config.hidden_size))
self.project_t.add_module('project_t_activation', self.activation)
self.project_t.add_module('project_t_layer_norm', nn.LayerNorm(config.hidden_size))
else:
self.project_t = nn.Sequential()
self.project_t.add_module('project_t', nn.Linear(in_features=hidden_sizes[0]*4, out_features=config.hidden_size))
self.project_t.add_module('project_t_activation', self.activation)
self.project_t.add_module('project_t_layer_norm', nn.LayerNorm(config.hidden_size))
self.project_v = nn.Sequential()
self.project_v.add_module('project_v', nn.Linear(in_features=hidden_sizes[1]*4, out_features=config.hidden_size))
self.project_v.add_module('project_v_activation', self.activation)
self.project_v.add_module('project_v_layer_norm', nn.LayerNorm(config.hidden_size))
self.project_a = nn.Sequential()
self.project_a.add_module('project_a', nn.Linear(in_features=hidden_sizes[2]*4, out_features=config.hidden_size))
self.project_a.add_module('project_a_activation', self.activation)
self.project_a.add_module('project_a_layer_norm', nn.LayerNorm(config.hidden_size))
##########################################
# private encoders
##########################################
self.private_t = nn.Sequential()
self.private_t.add_module('private_t_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.private_t.add_module('private_t_activation_1', nn.Sigmoid())
self.private_v = nn.Sequential()
self.private_v.add_module('private_v_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.private_v.add_module('private_v_activation_1', nn.Sigmoid())
self.private_a = nn.Sequential()
self.private_a.add_module('private_a_3', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.private_a.add_module('private_a_activation_3', nn.Sigmoid())
##########################################
# shared encoder
##########################################
self.shared = nn.Sequential()
self.shared.add_module('shared_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.shared.add_module('shared_activation_1', nn.Sigmoid())
##########################################
# reconstruct
##########################################
self.recon_t = nn.Sequential()
self.recon_t.add_module('recon_t_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.recon_v = nn.Sequential()
self.recon_v.add_module('recon_v_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.recon_a = nn.Sequential()
self.recon_a.add_module('recon_a_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
##########################################
# shared space adversarial discriminator
##########################################
if not self.config.use_cmd_sim:
self.discriminator = nn.Sequential()
self.discriminator.add_module('discriminator_layer_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.discriminator.add_module('discriminator_layer_1_activation', self.activation)
self.discriminator.add_module('discriminator_layer_1_dropout', nn.Dropout(dropout_rate))
self.discriminator.add_module('discriminator_layer_2', nn.Linear(in_features=config.hidden_size, out_features=len(hidden_sizes)))
##########################################
# shared-private collaborative discriminator
##########################################
self.sp_discriminator = nn.Sequential()
self.sp_discriminator.add_module('sp_discriminator_layer_1', nn.Linear(in_features=config.hidden_size, out_features=4))
self.fusion = nn.Sequential()
self.fusion.add_module('fusion_layer_1', nn.Linear(in_features=self.config.hidden_size*6, out_features=self.config.hidden_size*3))
self.fusion.add_module('fusion_layer_1_dropout', nn.Dropout(dropout_rate))
self.fusion.add_module('fusion_layer_1_activation', self.activation)
self.fusion.add_module('fusion_layer_3', nn.Linear(in_features=self.config.hidden_size*3, out_features= output_size))
self.tlayer_norm = nn.LayerNorm((hidden_sizes[0]*2,))
self.vlayer_norm = nn.LayerNorm((hidden_sizes[1]*2,))
self.alayer_norm = nn.LayerNorm((hidden_sizes[2]*2,))
encoder_layer = nn.TransformerEncoderLayer(d_model=self.config.hidden_size, nhead=2)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
def extract_features(self, sequence, lengths, rnn1, rnn2, layer_norm):
packed_sequence = pack_padded_sequence(sequence, lengths)
if self.config.rnncell == "lstm":
packed_h1, (final_h1, _) = rnn1(packed_sequence)
else:
packed_h1, final_h1 = rnn1(packed_sequence)
padded_h1, _ = pad_packed_sequence(packed_h1)
normed_h1 = layer_norm(padded_h1)
packed_normed_h1 = pack_padded_sequence(normed_h1, lengths)
if self.config.rnncell == "lstm":
_, (final_h2, _) = rnn2(packed_normed_h1)
else:
_, final_h2 = rnn2(packed_normed_h1)
return final_h1, final_h2
def alignment(self, sentences, visual, acoustic, lengths, bert_sent, bert_sent_type, bert_sent_mask):
batch_size = lengths.size(0)
if self.config.use_bert:
bert_output = self.bertmodel(input_ids=bert_sent,
attention_mask=bert_sent_mask,
token_type_ids=bert_sent_type)
bert_output = bert_output[0]
# masked mean
masked_output = torch.mul(bert_sent_mask.unsqueeze(2), bert_output)
mask_len = torch.sum(bert_sent_mask, dim=1, keepdim=True)
bert_output = torch.sum(masked_output, dim=1, keepdim=False) / mask_len
utterance_text = bert_output
else:
# extract features from text modality
sentences = self.embed(sentences)
final_h1t, final_h2t = self.extract_features(sentences, lengths, self.trnn1, self.trnn2, self.tlayer_norm)
utterance_text = torch.cat((final_h1t, final_h2t), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)
# extract features from visual modality
final_h1v, final_h2v = self.extract_features(visual, lengths, self.vrnn1, self.vrnn2, self.vlayer_norm)
utterance_video = torch.cat((final_h1v, final_h2v), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)
# extract features from acoustic modality
final_h1a, final_h2a = self.extract_features(acoustic, lengths, self.arnn1, self.arnn2, self.alayer_norm)
utterance_audio = torch.cat((final_h1a, final_h2a), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)
# Shared-private encoders
self.shared_private(utterance_text, utterance_video, utterance_audio)
if not self.config.use_cmd_sim:
# discriminator
reversed_shared_code_t = ReverseLayerF.apply(self.utt_shared_t, self.config.reverse_grad_weight)
reversed_shared_code_v = ReverseLayerF.apply(self.utt_shared_v, self.config.reverse_grad_weight)
reversed_shared_code_a = ReverseLayerF.apply(self.utt_shared_a, self.config.reverse_grad_weight)
self.domain_label_t = self.discriminator(reversed_shared_code_t)
self.domain_label_v = self.discriminator(reversed_shared_code_v)
self.domain_label_a = self.discriminator(reversed_shared_code_a)
else:
self.domain_label_t = None
self.domain_label_v = None
self.domain_label_a = None
self.shared_or_private_p_t = self.sp_discriminator(self.utt_private_t)
self.shared_or_private_p_v = self.sp_discriminator(self.utt_private_v)
self.shared_or_private_p_a = self.sp_discriminator(self.utt_private_a)
self.shared_or_private_s = self.sp_discriminator( (self.utt_shared_t + self.utt_shared_v + self.utt_shared_a)/3.0 )
# For reconstruction
self.reconstruct()
# 1-LAYER TRANSFORMER FUSION
h = torch.stack((self.utt_private_t, self.utt_private_v, self.utt_private_a, self.utt_shared_t, self.utt_shared_v, self.utt_shared_a), dim=0)
h = self.transformer_encoder(h)
h = torch.cat((h[0], h[1], h[2], h[3], h[4], h[5]), dim=1)
o = self.fusion(h)
return o
def reconstruct(self,):
self.utt_t = (self.utt_private_t + self.utt_shared_t)
self.utt_v = (self.utt_private_v + self.utt_shared_v)
self.utt_a = (self.utt_private_a + self.utt_shared_a)
self.utt_t_recon = self.recon_t(self.utt_t)
self.utt_v_recon = self.recon_v(self.utt_v)
self.utt_a_recon = self.recon_a(self.utt_a)
def shared_private(self, utterance_t, utterance_v, utterance_a):
# Projecting to same sized space
self.utt_t_orig = utterance_t = self.project_t(utterance_t)
self.utt_v_orig = utterance_v = self.project_v(utterance_v)
self.utt_a_orig = utterance_a = self.project_a(utterance_a)
# Private-shared components
self.utt_private_t = self.private_t(utterance_t)
self.utt_private_v = self.private_v(utterance_v)
self.utt_private_a = self.private_a(utterance_a)
self.utt_shared_t = self.shared(utterance_t)
self.utt_shared_v = self.shared(utterance_v)
self.utt_shared_a = self.shared(utterance_a)
def forward(self, sentences, video, acoustic, lengths, bert_sent, bert_sent_type, bert_sent_mask):
batch_size = lengths.size(0)
o = self.alignment(sentences, video, acoustic, lengths, bert_sent, bert_sent_type, bert_sent_mask)
return o
``` |
{
"source": "jiayid/auto3dgm",
"score": 3
} |
#### File: auto3dgm/jobrun/job.py
```python
class Job(object):
"""Data structure encapsulating data and parameters for JobRun task.
Example dict version of Job data
{
‘data’:
{
‘analysis_1’: {‘mesh’: mesh1},
‘analysis_2’: {‘mesh’: mesh2},
‘analysis_3’: {‘mesh’: mesh3}
},
‘params’:
{
‘point_number’: 200,
‘subsample_method’: ‘GPR’
},
‘func’: function_reference
}
"""
def __init__(self, job_dict={}, data={}, params={}, func=None):
self.data = {}
self.params = {}
self.func = None
if job_dict:
self.import_job_dict(job_dict)
elif data or params or func:
self.import_args(data, params, func)
def import_job_dict(self, job_dict):
if (job_dict and isinstance(job_dict, dict)):
if 'data' in job_dict and self.__validate_data(job_dict['data']):
self.data = job_dict['data']
if 'params' in job_dict and self.__validate_params(job_dict['params']):
self.params = job_dict['params']
if 'func' in job_dict and self.__validate_func(job_dict['func']):
self.func = job_dict['func']
def import_args(data={}, params={}, func={}):
if data and __validate_data(data):
self.data = data
if params and __validate_params(params):
self.params = params
if func and __validate_func(func):
self.func = func
def as_dict(self):
"""Returns job data structure as dict"""
if (__validate_data(self.data)
and __validate_params(self.params)
and __validate_func(self.func)):
return {
'data': self.data,
'params': self.params,
'func': self.func
}
def validate(self):
"""Check all components and return true if all validate"""
if (self.data and self.__validate_data(self.data)
and self.params and self.__validate_params(self.params)
and self.func and self.__validate_func(self.func)):
return True
def __validate_data(self, data):
"""data must be dict, every element must be dict with >=1 element"""
if (not data
or not isinstance(data, dict)
or not len(data)
or not self.__validate_data_items(data.values())):
self.__validation_error(error_type='data', var=data)
return True
def __validate_data_items(self, items):
for x in items:
if not isinstance(x, dict) or not len(x):
self.__validation_error(error_type='data_item', var=x)
return True
def __validate_params(self, params):
"""Params must be dict with at least one value"""
if not params or not isinstance(params, dict) or not len(params):
self.__validation_error(error_type='params', var=params)
return True
def __validate_func(self, func):
"""Func must be callable"""
if not func or not callable(func):
self.__validation_error(error_type='func', var=func)
return True
def __validation_error(self, error_type, var):
allowed_types = ['data', 'data_item', 'params', 'func']
if error_type not in allowed_types:
raise ValueError('Unexpected error type ' + str(error_type))
else:
raise ValueError('Unexpected value' + str(var) + 'for type ' + str(error_type))
``` |
{
"source": "JiayiFeng/Paddle",
"score": 3
} |
#### File: fluid/layers/learning_rate_scheduler.py
```python
import control_flow
import nn
import ops
import tensor
from ..initializer import init_on_cpu
__all__ = [
'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
'polynomial_decay', 'piecewise_decay', 'noam_decay'
]
def _decay_step_counter(begin=0):
# the first global step is zero in learning rate decay
global_step = nn.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1)
global_step = tensor.cast(global_step, 'float32')
return global_step
def noam_decay(d_model, warmup_steps):
"""
Noam decay method. The numpy implementation of noam decay as follows.
>>> import numpy as np
>>> lr_value = np.power(d_model, -0.5) * np.min([
>>> np.power(current_steps, -0.5),
>>> np.power(warmup_steps, -1.5) * current_steps])
Please reference `attention is all you need
<https://arxiv.org/pdf/1706.03762.pdf>`_.
Args:
d_model(Variable): The dimensionality of input and output of model.
warmup_steps(Variable): A super parameter.
Returns:
The decayed learning rate.
"""
global_step = _decay_step_counter(1)
with init_on_cpu():
a = global_step**-0.5
b = (warmup_steps**-1.5) * global_step
lr_value = (d_model**-0.5) * ops.elementwise_min(a, b)
return lr_value
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies exponential decay to the learning rate.
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
# update learning_rate
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res)
return decayed_lr
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies natural exponential decay to the initial learning rate.
>>> if not staircase:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
>>> else:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
return decayed_lr
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies inverse time decay to the initial learning rate.
>>> if staircase:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step))
>>> else:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step)
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training.
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate / (1 + decay_rate * div_res)
return decayed_lr
def polynomial_decay(learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False):
"""Applies polynomial decay to the initial learning rate.
>>> if cycle:
>>> decay_steps = decay_steps * ceil(global_step / decay_steps)
>>> else:
>>> global_step = min(global_step, decay_steps)
>>> decayed_learning_rate = (learning_rate - end_learning_rate) *
>>> (1 - global_step / decay_steps) ^ power +
>>> end_learning_rate
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
end_learning_rate: A Python `float` number.
power: A Python `float` number
cycle: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
if cycle:
div_res = ops.ceil(global_step / decay_steps)
zero_var = tensor.fill_constant(
shape=[1], dtype='float32', value=0.0)
one_var = tensor.fill_constant(
shape=[1], dtype='float32', value=1.0)
with control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
tensor.assign(input=one_var, output=div_res)
decay_steps = decay_steps * div_res
else:
decay_steps_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps))
global_step = ops.elementwise_min(x=global_step, y=decay_steps_var)
decayed_lr = (learning_rate - end_learning_rate) * \
((1 - global_step / decay_steps) ** power) + end_learning_rate
return decayed_lr
def piecewise_decay(boundaries, values):
"""Applies piecewise decay to the initial learning rate.
>>> boundaries = [10000, 20000]
>>> values = [1.0, 0.5, 0.1]
>>>
>>> if step < 10000:
>>> learning_rate = 1.0
>>> elif 10000 <= step < 20000:
>>> learning_rate = 0.5
>>> else:
>>> learning_rate = 0.1
"""
if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1")
global_step = _decay_step_counter()
with init_on_cpu():
lr = tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
with control_flow.Switch() as switch:
for i in range(len(boundaries)):
boundary_val = tensor.fill_constant(
shape=[1], dtype='float32', value=float(boundaries[i]))
value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[i]))
with switch.case(global_step < boundary_val):
tensor.assign(value_var, lr)
last_value_var = tensor.fill_constant(
shape=[1],
dtype='float32',
value=float(values[len(values) - 1]))
with switch.default():
tensor.assign(last_value_var, lr)
return lr
``` |
{
"source": "JiayiGuan97/heinz-95729-project",
"score": 3
} |
#### File: ml/src/knn_old.py
```python
import argparse
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
parser = argparse.ArgumentParser(description='ML Part for the project')
parser.add_argument('--data', type=str, default='../data/data/',
help='location of the data')
parser.add_argument('--result', type=str, default='../data/res/',
help='location of the result data')
args = parser.parse_args()
args.tied = True
def prepare_matrix(df):
res_df = df.pivot_table(index='res_id', columns='user_id', values='rating').fillna(0)
res_df_matrix = csr_matrix(res_df.values)
return res_df, res_df_matrix
def knn_fit(res_df_matrix):
model_knn = NearestNeighbors(metric='cosine', algorithm='brute')
model_knn.fit(res_df_matrix)
return model_knn
def get_recommendations(key, res_df, model_knn):
res = []
for index, name in enumerate(res_df.index):
if (str(name) == key) | (name == key):
test = index
distance, indices = model_knn.kneighbors(res_df.iloc[test, :].values.reshape(1, -1), n_neighbors=7)
for i in range(0, len(distance.flatten())):
if i == 0:
# print('Recommendations for {0}:\n'.format(res_df.index[test]))
pass
else:
res.append(res_df.index[indices.flatten()[i]])
# print('{0}: {1}'.format(i, res_df.index[indices.flatten()[i]]))
return res
return res
# topping recommendation since it exists in both algorithms
def merge_res(content_res_lst, user_res_lst):
intersection_res = set(content_res_lst).intersection(user_res_lst)
res_lst = list(intersection_res)
return res_lst
def trans_x(s):
lst = s.split(" ")
return [int(i) for i in lst]
def get_search(chicago_data, key_list):
chicago_data['star'] = chicago_data['features'].apply(lambda x: len(set(trans_x(x)).intersection(key_list)))
chicago_data = chicago_data.sort_values('star', ascending=False)
return chicago_data[:6]['id'].tolist()
if __name__ == "__main__":
features = pd.read_csv(args.data + "features.txt", delimiter='\t', names=['id', 'name'])
feature_dict = features.set_index('name').T.to_dict('list')
chicago_data = pd.read_csv(args.data + "chicago.txt", delimiter='\t', names=['id', 'name', 'features'])
content_based_res = pd.read_csv(args.result + "content_based_chicago.csv")
name_df = content_based_res[(content_based_res['region'] == 'chicago')][['id','name']]
demand = input("Enter the restaurant feature you want recommendations for \n")
demand_list = [feature_dict[i][0] for i in demand.split(", ")]
# feature_used will be the input for list
feature_used = demand.split(", ")
search_lst = get_search(chicago_data, demand_list)
res_id = search_lst[0]
print("===========Here is the Search result List==============")
print(name_df.loc[name_df['id'] == res_id]['name'].values[0])
train = pd.read_csv(args.result + "session_data_concat.csv")
res_df, res_df_matrix = prepare_matrix(train)
model_knn = knn_fit(res_df_matrix)
user_res = get_recommendations(res_id, res_df, model_knn)
user_res_lst = []
for i in user_res:
user_res_lst.append(name_df.loc[name_df['id'] == i]['name'].values[0])
content_df = content_based_res[(content_based_res['id'] == int(res_id)) & (content_based_res['region'] == 'chicago')]
name_res1 = name_df.loc[name_df['id'] == content_df['res1'].values[0]]['name'].values[0]
name_res2 = name_df.loc[name_df['id'] == content_df['res2'].values[0]]['name'].values[0]
name_res3 = name_df.loc[name_df['id'] == content_df['res3'].values[0]]['name'].values[0]
name_res4 = name_df.loc[name_df['id'] == content_df['res4'].values[0]]['name'].values[0]
name_res5 = name_df.loc[name_df['id'] == content_df['res5'].values[0]]['name'].values[0]
name_res6 = name_df.loc[name_df['id'] == content_df['res6'].values[0]]['name'].values[0]
content_res_lst = [name_res1,name_res2,name_res3,name_res4,name_res5,name_res6]
merge_res_lst = merge_res(content_res_lst, user_res_lst)
print("===========Here is the User-Based recommendation List==============")
print(user_res_lst)
print("===========Here is the Content-Based recommendation List==============")
print(content_res_lst)
print("===========Here is the Intersection recommendation List of above two methods ==============")
print(merge_res_lst)
``` |
{
"source": "JiayiGuo821/product-image-retrieval",
"score": 3
} |
#### File: JiayiGuo821/product-image-retrieval/datasets.py
```python
import torch.utils.data as data
import pandas as pd
import numpy as np
import os
from pytorch_pretrained_bert import BertTokenizer, BertModel
from sklearn.feature_extraction.text import TfidfVectorizer
from rank_bm25 import BM25Okapi
from torchvision import transforms
from PIL import Image
class Shopee_product_matching(data.Dataset):
def __init__(self, mode: str, image_type, text_type, transform=None, debug=False, input_size=224):
self.root = './data'
self.expr_root = './expr'
self.images = []
self.labels = []
self.mode = mode
self.text_type = text_type
self.image_type = image_type
if image_type == 'image':
mean_pix = [0.485, 0.456, 0.406]
std_pix = [0.229, 0.224, 0.225]
if transform == None:
self.transform = transforms.Compose([
transforms.Resize((input_size,input_size)),
transforms.ToTensor(),
transforms.Normalize(mean=mean_pix, std=std_pix),
])
else:
self.transform = transform
csv_file = os.path.join(self.root, 'splitted', f'{mode}.csv') #test set
data = pd.read_csv(csv_file)
if debug:
data = data.iloc[:100]
data['image'] = data['image'].apply(lambda image:os.path.join(self.root, 'train_images', image))
self.images = list(data['image'])
else:
try:
self.images = np.load(os.path.join(self.expr_root, f'{image_type}.npy'))
except:
print('NO Features!')
try:
self.labels = np.load(os.path.join(self.expr_root, f'labels_{mode}.npy'))
except:
vc = list(set(pd.read_csv(os.path.join(self.root, 'train.csv'))['label_group']))
vc.sort()
group2label = dict(zip(vc,range(len(vc))))
import operator
data = pd.read_csv(os.path.join(self.root, 'splitted', f'{mode}.csv'))
self.labels = operator.itemgetter(*list(data['label_group']))(group2label)
np.save(os.path.join(self.expr_root, f'labels_{mode}.npy'), self.labels)
if self.text_type == 'bert':
self.features_title = np.load(os.path.join(self.expr_root, f'features_bert_{mode}.npy'))
elif self.text_type == 'tfidf':
csv_file = os.path.join(self.root, 'splitted', f'{mode}.csv') #test set
data = pd.read_csv(csv_file)
if debug:
data = data.iloc[:100]
corpus = list(data['title'])
vector = TfidfVectorizer()
tfidf = vector.fit_transform(corpus)
self.features_title = tfidf.toarray()
elif self.text_type == 'bm25':
csv_file = os.path.join(self.root, 'splitted', f'{mode}.csv') #test set
data = pd.read_csv(csv_file)
if debug:
data = data.iloc[:100]
corpus = list(data['title'])
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
tokenized_corpus = [tokenizer.tokenize(doc) for doc in corpus]
self.bm25 = BM25Okapi(tokenized_corpus)
self.features_title = tokenized_corpus
def __getitem__(self, index):
if self.image_type == 'image':
image_path = self.images[index]
img = Image.open(image_path).convert('RGB')
img = self.transform(img)
else:
img = self.images[index]
target = int(self.labels[index])
if self.text_type == 'bm25':
tokenized_query = self.features_title[index]
feature_title = self.bm25.get_scores(tokenized_query)
else:
feature_title = self.features_title[index]
return img, target, feature_title
def __len__(self):
return len(self.labels)
if __name__ == '__main__':
dataset = Shopee_product_matching(mode='test', text_type='bm25', debug=True)
#trainset = FashionMNIST(train=True)
#for img, target in dataset:
# print(img, target)
# break
#trainset = MiniImageNetL(r=72, r1=0, vis=True, low=True, debug=False, train=False)
#low_path = './results/detail/low_detail_results(r=72).csv'
#low = pd.read_csv(low_path).loc[:,'acc']
#low = np.array(low)
#print (np.sum(low))
#low = ~low.astype(np.bool)
#
#std_path = './results/detail/std_detail_results.csv'
#std = pd.read_csv(std_path).loc[:,'acc']
#std = np.array(std)
#print (np.sum(std))
#std = std.astype(np.bool)
#
#for i in np.argwhere((std*low)==True).squeeze():
# trainset[i]
#for i in range(1):
#index = np.random.permutation(np.array(range(10000)))[i]
#trainset[index]
```
#### File: JiayiGuo821/product-image-retrieval/test_moco.py
```python
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from PIL import Image
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.utils.data as data
import moco.builder
global feats_batch
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-s', '--set', metavar='DIR', default='test',
choices=['train', 'valid', 'test'],
help='valid set or test set')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--pretrained', default='', type=str,
help='path to moco pretrained checkpoint')
parser.add_argument('--gpu', default=0, type=int,
help='GPU id to use.')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
# moco specific configs:
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
# options for moco v2
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--name', default='', type=str,
help='experiment name')
class product_image_retrieval(data.Dataset):
def __init__(self, root, mode='train', debug=False, transform=None):
self.root = root
self.images = []
self.labels = []
self.transform = transform
if mode == 'train':
csv_file = os.path.join(self.root, 'splitted', 'train.csv')
elif mode == 'valid':
csv_file = os.path.join(self.root, 'splitted', 'valid.csv')
elif mode == 'test':
csv_file = os.path.join(self.root, 'splitted', 'test.csv')
else:
print('mode must in train, valid or test')
raise NotImplementedError
data = pd.read_csv(csv_file)
if debug:
data = data.iloc[:20]
corpus = list(data['title'])
vector = TfidfVectorizer()
tfidf = vector.fit_transform(corpus)
self.weightlist = tfidf.toarray()
data['image'] = data['image'].apply(lambda image: os.path.join(self.root, 'train_images', image))
vc = list(set(data['label_group']))
vc.sort()
group2label = dict(zip(vc, range(len(vc))))
import operator
self.images = list(data['image'])
self.labels = operator.itemgetter(*list(data['label_group']))(group2label)
data['label_group'] = self.labels
self.data = data
def __getitem__(self, index):
image_path = self.images[index]
img = Image.open(image_path).convert('RGB')
target = int(self.labels[index])
title = self.weightlist[index]
if self.transform is not None:
img = self.transform(img)
sample = {'img': img, 'title': title, 'target': target, 'index': index} # 根据图片和标签创建字典
return sample
def __len__(self):
return len(self.labels)
def main():
args = parser.parse_args()
print("=> creating model '{}'".format(args.arch))
model = moco.builder.MoCo_single(
models.__dict__[args.arch],
args.moco_dim, args.mlp)
print(model)
for name, param in model.named_parameters():
param.requires_grad = False
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# rename moco pre-trained keys
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q'):
# remove prefix
state_dict[k[len("module."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=True)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
datadir = args.data
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
pir_dataset = product_image_retrieval(
datadir,
args.set,
transform=transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
]))
loader = torch.utils.data.DataLoader(
pir_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=False)
length_dir = len(pd.read_csv(os.path.join(datadir, 'splitted', args.set) + '.csv'))
feats = np.zeros((length_dir, 2048))#args.moco_dim))
model.eval()
def hook(module, input, output):
feats_batch = output.cpu().numpy()
for idx in range(len(index)):
feats[index[idx], :] = feats_batch[idx, :].reshape(2048,)
model.encoder_q.avgpool.register_forward_hook(hook)
for i, samples in tqdm(enumerate(loader)):
if args.gpu is not None:
img = samples['img'].cuda(args.gpu, non_blocking=True)
title = samples['title'].cuda(args.gpu, non_blocking=True)
target = samples['target'].cuda(args.gpu, non_blocking=True)
index = samples['index'].cuda(args.gpu, non_blocking=True)
model(im_q=img, title=title, target=target)
np.save('expr/features_moco' + args.name + '_' + args.set + '.npy', feats)
if __name__ == '__main__':
main()
```
#### File: JiayiGuo821/product-image-retrieval/test_predict.py
```python
from torchvision import transforms, utils
import torch.optim as optim
import torch
import pandas as pd
import numpy as np
import argparse
import pickle
import sys
import os
import models
import datasets
import nets
def parse_args():
net_names = ['resnet50']
model_names = ['Matcher', 'PCA_Matcher']
dataset_names = ['Shopee_product_matching']
text_type_names = ['tfidf', 'bm25', 'bert']
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='None')
parser.add_argument('--net', choices=net_names)
parser.add_argument('--model', default='Matcher', choices=model_names, help='default:Matcher')
parser.add_argument('--batch_size','-b',default=16, type=int,metavar='N',help='default: 16')
parser.add_argument('--dataset', default=None, choices=dataset_names)
parser.add_argument('--pretrained', default=False, action='store_true')
parser.add_argument('--debug', default=False, action='store_true')
parser.add_argument('--mode', default='None')
parser.add_argument('--text_type', choices=text_type_names)
parser.add_argument('--image_type', default='image')
args=parser.parse_args()
return args
def test(args):
args = vars(args)
name = args['name']
args['path'] = f'./results/{name}'
path = args['path']
load_path = f'./results/{name}/{name}'
if not os.path.exists(path):
os.makedirs(path)
print(f"=> creating model {name}")
print('Config -----')
for arg in args:
print(f'{arg}: {args[arg]}')
print('------------')
with open(os.path.join(path, 'args.txt'), 'w') as f:
for arg in args:
print(f'{arg}: {args[arg]}', file=f)
args['net'] = vars(nets)[args['net']](pretrained=args['pretrained'])
args['device'] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
args['testset'] = vars(datasets)[args['dataset']](mode=args['mode'],
image_type=args['image_type'],
text_type=args['text_type'],
debug=args['debug'])
args['testloader'] = torch.utils.data.DataLoader(args['testset'],
batch_size=args['batch_size'],
shuffle=False,
pin_memory=True,
num_workers=4)
model = vars(models)[args['model']](args)
log = pd.DataFrame(index=[],
columns=['ap', 'roc', 'f1', 'threshold', 'ap_title', 'roc_title', 'f1_title', 'threshold_title','f1_merge'],
)
test_log = model.test(path=path,
debug=args['debug'],
image_type=args['image_type'],
text_type=args['text_type'])
print(f"ap: {test_log['ap']:.4f}")
print(f"roc: {test_log['roc']:.4f}")
print(f"f1: {test_log['f1']:.4f}")
print(f"threshold: {test_log['threshold']:.4f}")
print(f"ap_title: {test_log['ap_title']:.4f}")
print(f"roc_title: {test_log['roc_title']:.4f}")
print(f"f1_title: {test_log['f1_title']:.4f}")
print(f"threshold_title: {test_log['threshold_title']:.4f}")
print(f"f1_merge: {test_log['f1_merge']:.4f}")
tmp = pd.Series([test_log['ap'],
test_log['roc'],
test_log['f1'],
test_log['threshold'],
test_log['ap_title'],
test_log['roc_title'],
test_log['f1_title'],
test_log['threshold_title'],
test_log['f1_merge']],
index=['ap',
'roc',
'f1',
'threshold',
'ap_title',
'roc_title',
'f1_title',
'threshold_title',
'f1_merge'])
log = log.append(tmp, ignore_index=True)
log.to_csv(f'{load_path}_test_log.csv', index=False)
#torch.cuda.empty_cache()
#return log
if __name__ == '__main__':
torch.cuda.empty_cache()
args = parse_args()
#args.name = 'moco_test'
#args.mode = 'test'
args.net = 'resnet50'
args.model = 'Matcher'
args.dataset = 'Shopee_product_matching'
args.batch_size = 128
#args.debug = False
args.pretrained = True
if args.image_type == 'image':
print("invalid choice")
raise Exception
else:
test(args)
``` |
{
"source": "jiayiliu/explore_badges",
"score": 3
} |
#### File: explore_badges/ebadge/ebadge.py
```python
class EBadge:
def funcA(self):
"""return 1
Returns:
int -- value 1
"""
return 1
def funcB(self): # pragma: no cover
"""return a
Returns:
str -- value a
"""
return "a"
```
#### File: explore_badges/tests/test_ebadge.py
```python
import unittest
from ebadge import EBadge
class TestEBadge(unittest.TestCase):
def testA(self):
a = EBadge()
self.assertEqual(a.funcA(), 1)
def test_exec(self):
import subprocess
ans = subprocess.check_output(["python", "-m","ebadge"])
ans = ans.decode('utf8').strip()
self.assertEqual(ans, 'a')
``` |
{
"source": "jiayiliu/gradio",
"score": 3
} |
#### File: gradio/demo/matrix_transpose.py
```python
import gradio as gr
def transpose(matrix):
return matrix.T
io = gr.Interface(
transpose,
gr.inputs.Dataframe(type="numpy", datatype="number", row_count=5, col_count=3),
"numpy"
)
io.test_launch()
io.launch()
```
#### File: gradio/demo/reverse_audio.py
```python
import gradio as gr
import numpy as np
def reverse_audio(audio):
sr, data = audio
return (sr, np.flipud(data))
io = gr.Interface(reverse_audio, "microphone", "audio")
io.test_launch()
io.launch()
```
#### File: gradio/demo/webcam.py
```python
import gradio as gr
import numpy as np
def snap(image):
return np.flipud(image)
io = gr.Interface(snap, gr.inputs.Image(shape=(100,100), image_mode="L", source="webcam"), "image")
io.test_launch()
io.launch()
```
#### File: jiayiliu/gradio/generate_docs.py
```python
import json
from gradio.inputs import InputComponent
from gradio.outputs import OutputComponent
from gradio.interface import Interface
import inspect
from os import listdir
from os.path import join
import re
in_demos, out_demos = {}, {}
demo_regex = "# Demo: \((.*)\) -> \((.*)\)"
for demo in listdir("demo"):
if demo.endswith(".py"):
screenshots = listdir(join("demo/screenshots", demo[:-3]))[0]
demoset = [demo, [screenshots]]
with open(join("demo", demo)) as demo_file:
first_line = demo_file.readline()
match = re.match(demo_regex, first_line)
inputs = match.group(1).split(", ")
outputs = match.group(2).split(", ")
for i in inputs:
if i not in in_demos:
in_demos[i] = []
if demoset not in in_demos[i]:
in_demos[i].append(demoset)
for o in outputs:
if o not in out_demos:
out_demos[o] = []
if demoset not in out_demos[o]:
out_demos[o].append(demoset)
def get_params(func):
params_str = inspect.getdoc(func)
params_doc = []
documented_params = {"self"}
for param_line in params_str.split("\n")[1:]:
if param_line.strip() == "Returns":
break
space_index = param_line.index(" ")
colon_index = param_line.index(":")
name = param_line[:space_index]
documented_params.add(name)
params_doc.append((name, param_line[space_index+2:colon_index-1], param_line[colon_index+2:]))
params = inspect.getfullargspec(func)
param_set = []
for i in range(len(params.args)):
neg_index = -1 - i
if params.args[neg_index] not in documented_params:
continue
if i < len(params.defaults):
default = params.defaults[neg_index]
if type(default) == str:
default = '"' + default + '"'
else:
default = str(default)
param_set.insert(0, (params.args[neg_index], default))
else:
param_set.insert(0, (params.args[neg_index],))
return param_set, params_doc
def document(cls_set, demos):
docset = []
for cls in cls_set:
inp = {}
inp["name"] = cls.__name__
doc = inspect.getdoc(cls)
if doc.startswith("DEPRECATED"):
continue
inp["doc"] = "\n".join(doc.split("\n")[:-1])
inp["type"] = doc.split("\n")[-1].split("type: ")[-1]
inp["params"], inp["params_doc"] = get_params(cls.__init__)
inp["shortcuts"] = list(cls.get_shortcut_implementations().items())
cls_name = cls.__name__
if cls_name in demos:
inp["demos"] = demos.get(cls_name, [])
docset.append(inp)
return docset
inputs = document(InputComponent.__subclasses__(), in_demos)
outputs = document(OutputComponent.__subclasses__(), out_demos)
interface_params = get_params(Interface.__init__)
interface = {
"doc": inspect.getdoc(Interface),
"params": interface_params[0],
"params_doc": interface_params[1],
}
launch_params = get_params(Interface.launch)
launch = {
"params": launch_params[0],
"params_doc": launch_params[1],
}
with open("docs.json", "w") as docs:
json.dump({
"inputs": inputs,
"outputs": outputs,
"interface": interface,
"launch": launch,
}, docs)
```
#### File: gradio/gradio/inputs.py
```python
import datetime
import json
import os
import time
import warnings
from gradio.component import Component
import base64
import numpy as np
import PIL
import scipy.io.wavfile
from gradio import processing_utils, test_data
import pandas as pd
import math
import tempfile
class InputComponent(Component):
"""
Input Component. All input components subclass this.
"""
pass
class Textbox(InputComponent):
"""
Component creates a textbox for user to enter input. Provides a string (or number is `type` is "float") as an argument to the wrapped function.
Input type: str
"""
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, type="str", label=None):
"""
Parameters:
lines (int): number of line rows to provide in textarea.
placeholder (str): placeholder hint to provide behind textarea.
default (str): default text to provide in textarea.
numeric (bool): DEPRECATED. Whether the input should be parsed as a number instead of a string.
type (str): Type of value to be returned by component. "str" returns a string, "number" returns a float value.
label (str): component name in interface.
"""
self.lines = lines
self.placeholder = placeholder
self.default = default
if numeric:
warnings.warn("The 'numeric' parameter has been deprecated. Set parameter 'type' to 'number' instead.", DeprecationWarning)
self.type = "number"
else:
self.type = type
if default is None:
self.test_input = {
"str": "the quick brown fox jumped over the lazy dog",
"number": 786.92,
}[type]
else:
self.test_input = default
super().__init__(label)
def get_template_context(self):
return {
"lines": self.lines,
"placeholder": self.placeholder,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {},
"textbox": {"lines": 7},
"number": {"type": "number"}
}
def preprocess(self, x):
if self.type == "str":
return x
elif self.type == "number":
return float(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'str', 'number'.")
class Slider(InputComponent):
"""
Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function.
Input type: float
"""
def __init__(self, minimum=0, maximum=100, step=None, default=None, label=None):
'''
Parameters:
minimum (float): minimum value for slider.
maximum (float): maximum value for slider.
step (float): increment between slider values.
default (float): default value.
label (str): component name in interface.
'''
self.minimum = minimum
self.maximum = maximum
if step is None:
difference = maximum - minimum
power = math.floor(math.log10(difference) - 1)
step = 10 ** power
self.step = step
self.default = minimum if default is None else default
self.test_input = self.default
super().__init__(label)
def get_template_context(self):
return {
"minimum": self.minimum,
"maximum": self.maximum,
"step": self.step,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"slider": {},
}
class Checkbox(InputComponent):
"""
Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function.
Input type: bool
"""
def __init__(self, label=None):
"""
Parameters:
label (str): component name in interface.
"""
self.test_input = True
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"checkbox": {},
}
class CheckboxGroup(InputComponent):
"""
Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function.
Input type: Union[List[str], List[int]]
"""
def __init__(self, choices, type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
**super().get_template_context()
}
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return [self.choices.index(choice) for choice in x]
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'.")
class Radio(InputComponent):
"""
Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
"""
def __init__(self, choices, type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
**super().get_template_context()
}
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'.")
class Dropdown(InputComponent):
"""
Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
"""
def __init__(self, choices, type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
**super().get_template_context()
}
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'.")
class Image(InputComponent):
"""
Component creates an image upload box with editing capabilities.
Input type: Union[numpy.array, PIL.Image, str]
"""
def __init__(self, shape=None, image_mode='RGB', invert_colors=False, source="upload", tool="editor", type="numpy", label=None):
'''
Parameters:
shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size.
image_mode (str): "RGB" if color, or "L" if black and white.
invert_colors (bool): whether to invert the image as a preprocessing step.
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
tool (str): Tools used for editing. "editor" allows a full screen editor, "select" provides a cropping and zoom tool.
type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (width, height, 3) and values from 0 to 255, "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name.
label (str): component name in interface.
'''
self.shape = shape
self.image_mode = image_mode
self.source = source
self.tool = tool
self.type = type
self.invert_colors = invert_colors
self.test_input = test_data.BASE64_IMAGE
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"webcam": {"source": "webcam"},
"sketchpad": {"image_mode": "L", "source": "canvas", "shape": (28, 28), "invert_colors": True},
}
def get_template_context(self):
return {
"image_mode": self.image_mode,
"source": self.source,
"tool": self.tool,
**super().get_template_context()
}
def preprocess(self, x):
im = processing_utils.decode_base64_to_image(x)
fmt = im.format
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)
if self.shape is not None:
im = processing_utils.resize_and_crop(
im, (self.shape[0], self.shape[1]))
if self.invert_colors:
im = PIL.ImageOps.invert(im)
if self.type == "pil":
return im
elif self.type == "numpy":
return np.array(im)
elif self.type == "file":
file_obj = tempfile.NamedTemporaryFile(suffix="."+fmt)
im.save(file_obj.name)
return file_obj
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'numpy', 'pil', 'file'.")
def rebuild(self, dir, data):
"""
Default rebuild method to decode a base64 image
"""
im = processing_utils.decode_base64_to_image(data)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.png'
im.save(f'{dir}/{filename}', 'PNG')
return filename
class Audio(InputComponent):
"""
Component accepts audio input files.
Input type: Union[Tuple[int, numpy.array], str, numpy.array]
"""
def __init__(self, source="upload", type="numpy", label=None):
"""
Parameters:
source (str): Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input.
type (str): Type of value to be returned by component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file object whose path can be retrieved by file_obj.name, "mfcc" returns the mfcc coefficients of the input audio.
label (str): component name in interface.
"""
self.source = source
self.type = type
self.test_input = test_data.BASE64_AUDIO
super().__init__(label)
def get_template_context(self):
return {
"source": self.source,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"audio": {},
"microphone": {"source": "microphone"}
}
def preprocess(self, x):
"""
By default, no pre-processing is applied to a microphone input file
"""
file_obj = processing_utils.decode_base64_to_file(x)
if self.type == "file":
return file_obj
elif self.type == "numpy":
return scipy.io.wavfile.read(file_obj.name)
elif self.type == "mfcc":
return processing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
class File(InputComponent):
"""
Component accepts generic file uploads.
Input type: Union[str, bytes]
"""
def __init__(self, type="file", label=None):
'''
Parameters:
type (str): Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name, "binary" returns an bytes object.
label (str): component name in interface.
'''
self.type = type
self.test_input = None
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
def preprocess(self, x):
if self.type == "file":
return processing_utils.decode_base64_to_file(x)
elif self.type == "bytes":
return processing_utils.decode_base64_to_binary(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'file', 'bytes'.")
class Dataframe(InputComponent):
"""
Component accepts 2D input through a spreadsheet interface.
Input type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
"""
def __init__(self, headers=None, row_count=3, col_count=3, datatype="str", type="pandas", label=None):
"""
Parameters:
headers (List[str]): Header names to dataframe.
row_count (int): Limit number of rows for input.
col_count (int): Limit number of columns for input. If equal to 1, return data will be one-dimensional. Ignored if `headers` is provided.
datatype (Union[str, List[str]]): Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", and "date".
type (str): Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array.
label (str): component name in interface.
"""
self.headers = headers
self.datatype = datatype
self.row_count = row_count
self.col_count = len(headers) if headers else col_count
self.type = type
sample_values = {"str": "abc", "number": 786, "bool": True, "date": "02/08/1993"}
column_dtypes = [datatype]*self.col_count if isinstance(datatype, str) else datatype
self.test_input = [[sample_values[c] for c in column_dtypes] for _ in range(row_count)]
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
"datatype": self.datatype,
"row_count": self.row_count,
"col_count": self.col_count,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {"type": "pandas"},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array", "col_count": 1},
}
def preprocess(self, x):
if self.type == "pandas":
if self.headers:
return pd.DataFrame(x, columns=self.headers)
else:
return pd.DataFrame(x)
if self.col_count == 1:
x = [row[0] for row in x]
if self.type == "numpy":
return np.array(x)
elif self.type == "array":
return x
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'pandas', 'numpy', 'array'.")
#######################
# DEPRECATED COMPONENTS
#######################
class Sketchpad(InputComponent):
"""
DEPRECATED. Component creates a sketchpad for black and white illustration. Provides numpy array of shape `(width, height)` as an argument to the wrapped function.
Input type: numpy.array
"""
def __init__(self, shape=(28, 28), invert_colors=True,
flatten=False, label=None):
'''
Parameters:
shape (Tuple[int, int]): shape to crop and resize image to.
invert_colors (bool): whether to represent black as 1 and white as 0 in the numpy array.
flatten (bool): whether to reshape the numpy array to a single dimension.
label (str): component name in interface.
'''
warnings.warn("Sketchpad has been deprecated. Please use 'Image' component to generate a sketchpad. The string shorcut 'sketchpad' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0]
self.image_height = shape[1]
self.invert_colors = invert_colors
self.flatten = flatten
super().__init__(label)
def preprocess(self, x):
"""
Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
"""
im_transparent = processing_utils.decode_base64_to_image(x)
# Create a white background for the alpha channel
im = PIL.Image.new("RGBA", im_transparent.size, "WHITE")
im.paste(im_transparent, (0, 0), im_transparent)
im = im.convert('L')
if self.invert_colors:
im = PIL.ImageOps.invert(im)
im = im.resize((self.image_width, self.image_height))
if self.flatten:
array = np.array(im).flatten().reshape(
1, self.image_width * self.image_height)
else:
array = np.array(im).flatten().reshape(
1, self.image_width, self.image_height)
return array
def process_example(self, example):
return processing_utils.encode_file_to_base64(example)
def rebuild(self, dir, data):
"""
Default rebuild method to decode a base64 image
"""
im = processing_utils.decode_base64_to_image(data)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.png'
im.save(f'{dir}/{filename}', 'PNG')
return filename
class Webcam(InputComponent):
"""
DEPRECATED. Component creates a webcam for captured image input. Provides numpy array of shape `(width, height, 3)` as an argument to the wrapped function.
Input type: numpy.array
"""
def __init__(self, shape=(224, 224), label=None):
'''
Parameters:
shape (Tuple[int, int]): shape to crop and resize image to.
label (str): component name in interface.
'''
warnings.warn("Webcam has been deprecated. Please use 'Image' component to generate a webcam. The string shorcut 'webcam' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0]
self.image_height = shape[1]
self.num_channels = 3
super().__init__(label)
def preprocess(self, x):
"""
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
"""
im = processing_utils.decode_base64_to_image(x)
im = im.convert('RGB')
im = processing_utils.resize_and_crop(
im, (self.image_width, self.image_height))
return np.array(im)
def rebuild(self, dir, data):
"""
Default rebuild method to decode a base64 image
"""
im = processing_utils.decode_base64_to_image(data)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.png'
im.save('{}/{}'.format(dir, filename), 'PNG')
return filename
class Microphone(InputComponent):
"""
DEPRECATED. Component creates a microphone element for audio inputs.
Input type: numpy.array
"""
def __init__(self, preprocessing=None, label=None):
'''
Parameters:
preprocessing (Union[str, Callable]): preprocessing to apply to input
label (str): component name in interface.
'''
warnings.warn("Microphone has been deprecated. Please use 'Audio' component to generate a microphone. The string shorcut 'microphone' has been moved to the Audio component.", DeprecationWarning)
super().__init__(label)
if preprocessing is None or preprocessing == "mfcc":
self.preprocessing = preprocessing
else:
raise ValueError(
"unexpected value for preprocessing", preprocessing)
def preprocess(self, x):
"""
By default, no pre-processing is applied to a microphone input file
"""
file_obj = processing_utils.decode_base64_to_file(x)
if self.preprocessing == "mfcc":
return processing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
_, signal = scipy.io.wavfile.read(file_obj.name)
return signal
def rebuild(self, dir, data):
inp = data.split(';')[1].split(',')[1]
wav_obj = base64.b64decode(inp)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.wav'
with open("{}/{}".format(dir, filename), "wb+") as f:
f.write(wav_obj)
return filename
``` |
{
"source": "jiayiliu/stock_playground",
"score": 3
} |
#### File: stock_playground/src/Portfolio.py
```python
import logging
logger = logging.getLogger(__name__)
class _Holding(dict):
"""
Holding of a stock
:param name: stock name
:param share: number of share, negative number for short.
:param price: trade price
:param date: trade date
"""
def __init__(self, name, price, share, date):
super().__init__()
self['name'] = name
self['share'] = share
self['price'] = price
self['date'] = date
class Portfolio:
"""
Create a portfolio with a given balance
"""
def __init__(self, balance=0):
self.__positions = []
self.__balance = balance
def __repr__(self):
return_str = super().__repr__()
return_str += "\nCurrent Cash Balance: %f\n" % self.balance
return_str += "\n".join([p.__str__() for p in self.positions])
return return_str
@property
def balance(self):
"""
:return: current cash balance
"""
return self.__balance
@balance.setter
def balance(self, new_balance):
self.__balance = new_balance
@property
def positions(self):
"""
:return: current holdings
"""
return self.__positions
@positions.setter
def positions(self, positions):
self.__positions = positions
def total_asset(self, stock_price):
"""
Return the total value of the portfolio
:param stock_price: {"stock":price} dictionary
:return: valuation of the current portfolio
"""
stock_price = {i.upper():stock_price[i] for i in stock_price}
total = 0
for i in self.positions:
total += stock_price[i['name']] * i['share']
return total + self.balance
def update(self, stock, price, share, date, FIFO=True):
"""
Update the portfolio
:param stock: stock name
:param price: price of the stock
:param share: number of shares to execute - negative number for sell
:param date: not used
:param FIFO: accounting for FIFO or FILO
:return: self
"""
if share > 0:
self.balance -= price*share
bought = _Holding(stock, price, share, date)
if FIFO:
self.positions.append(bought)
else:
self.positions.insert(0, bought)
else: # sell
share = -share
seq = range(len(self.positions)) if FIFO else range(len(self.positions) - 1, -1, -1)
for i in seq:
if self.positions[i]['name'] == stock:
if self.positions[i]['share'] > share:
self.positions[i]['share'] -= share
self.balance += share*price
share = 0
break
else:
self.balance += self.positions[i]['share'] * price
share -= self.positions[i]['share']
self.positions[i] = None
if share > 0:
self.positions.append(_Holding(stock, price, -share, date))
self.balance += share*price
# remove stock no longer holds
self.positions = [i for i in self.positions if i is not None]
return self
```
#### File: stock_playground/tests/TestStrategy.py
```python
import unittest
from Strategy import BuyDipHold
from Portfolio import Portfolio
import pandas as pd
class TestStrategy(unittest.TestCase):
def test_buydiphold(self):
stat = BuyDipHold(down_percent=0.1)
port = Portfolio(120)
stock_prices = {"A":pd.DataFrame({"close":[10, 12],"open":[8, 8]}),
"B":pd.DataFrame({"close": [10, 12],"open": [11, 11]})}
is_buy = stat.is_buy(stock_prices, ["A","B"], port)
self.assertEqual(is_buy["A"][0], 10)
self.assertEqual("B" not in is_buy, True)
self.assertEqual(stat.is_sell(stock_prices, ["A"], port), {})
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiayi-ma/GTP-PNet",
"score": 2
} |
#### File: jiayi-ma/GTP-PNet/model_T.py
```python
from utils import (
read_data,
input_setup_MS,
input_setup_PAN,
imsave,
merge,
sobel_gradient,
lrelu,
l2_norm,
linear_map,
lpls_gradient,
lpls_gradient_4,
sobel_gradient_4
)
import time
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
class T_model(object):
def __init__(self,
sess,
image_size_MS=20,
image_size_PAN=80,
batch_size=48,
c_dim=1,
checkpoint_dir=None,
sample_dir=None):
self.sess = sess
self.is_grayscale = (c_dim == 1)
self.image_size_MS = image_size_MS
self.image_size_PAN = image_size_PAN
self.image_size_Label = image_size_PAN
self.batch_size = batch_size
self.c_dim = c_dim
self.checkpoint_dir = checkpoint_dir
self.sample_dir = sample_dir
self.build_model()
def build_model(self):
########## MS Input ######################
with tf.name_scope('MS1_input'):
self.images_MS1 = tf.placeholder(tf.float32, [None, self.image_size_MS, self.image_size_MS, self.c_dim], name='images_MS1')
with tf.name_scope('MS2_input'):
self.images_MS2 = tf.placeholder(tf.float32, [None, self.image_size_MS, self.image_size_MS, self.c_dim], name='images_MS2')
with tf.name_scope('MS3_input'):
self.images_MS3 = tf.placeholder(tf.float32, [None, self.image_size_MS, self.image_size_MS, self.c_dim], name='images_MS3')
with tf.name_scope('MS4_input'):
self.images_MS4 = tf.placeholder(tf.float32, [None, self.image_size_MS, self.image_size_MS, self.c_dim], name='images_MS4')
########## MS Label Input ######################
with tf.name_scope('MS1_Label'):
self.Label_MS1 = tf.placeholder(tf.float32, [None, self.image_size_Label, self.image_size_Label, self.c_dim], name='Label_MS1')
with tf.name_scope('MS2_Label'):
self.Label_MS2 = tf.placeholder(tf.float32, [None, self.image_size_Label, self.image_size_Label, self.c_dim], name='Label_MS2')
with tf.name_scope('MS3_Label'):
self.Label_MS3 = tf.placeholder(tf.float32, [None, self.image_size_Label, self.image_size_Label, self.c_dim], name='Label_MS3')
with tf.name_scope('MS4_Label'):
self.Label_MS4 = tf.placeholder(tf.float32, [None, self.image_size_Label, self.image_size_Label, self.c_dim], name='Label_MS4')
########## PAN Input ######################
with tf.name_scope('PAN_input'):
self.images_PAN = tf.placeholder(tf.float32, [None, self.image_size_PAN, self.image_size_PAN, self.c_dim], name='images_PAN')
with tf.name_scope('input'):
self.input_image_MS1 = self.images_MS1
self.input_image_MS2 = self.images_MS2
self.input_image_MS3 = self.images_MS3
self.input_image_MS4 = self.images_MS4
self.input_image_MS =tf.concat([self.images_MS1,self.images_MS2,self.images_MS3,self.images_MS4],axis=-1)
self.input_image_PAN = self.images_PAN
self.input_Label_MS1 = self.Label_MS1
self.input_Label_MS2 = self.Label_MS2
self.input_Label_MS3 = self.Label_MS3
self.input_Label_MS4 = self.Label_MS4
self.input_Label_MS =tf.concat([self.input_Label_MS1,self.input_Label_MS2,self.input_Label_MS3,self.input_Label_MS4],axis=-1)
with tf.name_scope('Gradient'):
self.Label_MS_gradient_x,self.Label_MS_gradient_y=sobel_gradient_4(self.input_Label_MS)
self.HRPAN_gradient_x,self.HRPAN_gradient_y=sobel_gradient(self.images_PAN)
with tf.name_scope('fusion'):
self.MS2PAN_gradient_x=self.transfer_model(self.Label_MS_gradient_x,reuse=False)
self.MS2PAN_gradient_y=self.transfer_model(self.Label_MS_gradient_y,reuse=True,update_collection='NO_OPS')
with tf.name_scope('t_loss'):
self.t_loss_MS2PAN_grad_x = tf.reduce_mean(tf.square(self.MS2PAN_gradient_x - self.HRPAN_gradient_x))
self.t_loss_MS2PAN_grad_y = tf.reduce_mean(tf.square(self.MS2PAN_gradient_y - self.HRPAN_gradient_y))
self.t_loss_total=100*(self.t_loss_MS2PAN_grad_x+self.t_loss_MS2PAN_grad_y)
tf.summary.scalar('t_loss_MS2PAN_grad_x',self.t_loss_MS2PAN_grad_x)
tf.summary.scalar('t_loss_MS2PAN_grad_y',self.t_loss_MS2PAN_grad_y)
tf.summary.scalar('t_loss_total',self.t_loss_total)
self.saver = tf.train.Saver(max_to_keep=100)
with tf.name_scope('image'):
tf.summary.image('input_Label_MS',tf.expand_dims(self.input_Label_MS[1,:,:,:],0))
tf.summary.image('input_image_PAN',tf.expand_dims(self.input_image_PAN[1,:,:,:],0))
tf.summary.image('MS2PAN_gradient_x',tf.expand_dims(self.MS2PAN_gradient_x[1,:,:,:],0))
tf.summary.image('MS2PAN_gradient_y',tf.expand_dims(self.MS2PAN_gradient_y[1,:,:,:],0))
tf.summary.image('HRPAN_gradient_x',tf.expand_dims(self.HRPAN_gradient_x[1,:,:,:],0))
tf.summary.image('HRPAN_gradient_y',tf.expand_dims(self.HRPAN_gradient_y[1,:,:,:],0))
def train(self, config):
if config.is_train:
input_setup_MS(self.sess, config,"data/Train_data/Train_MS1")
input_setup_MS(self.sess, config,"data/Train_data/Train_MS2")
input_setup_MS(self.sess, config,"data/Train_data/Train_MS3")
input_setup_MS(self.sess, config,"data/Train_data/Train_MS4")
input_setup_PAN(self.sess,config,"data/Train_data/Train_PAN")
input_setup_PAN(self.sess, config,"data/Train_data/Label_MS1")
input_setup_PAN(self.sess, config,"data/Train_data/Label_MS2")
input_setup_PAN(self.sess, config,"data/Train_data/Label_MS3")
input_setup_PAN(self.sess, config,"data/Train_data/Label_MS4")
if config.is_train:
data_dir_MS1 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Train_MS1","train.h5")
data_dir_MS2 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Train_MS2","train.h5")
data_dir_MS3 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Train_MS3","train.h5")
data_dir_MS4 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Train_MS4","train.h5")
data_dir_PAN = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Train_PAN","train.h5")
data_dir_Label_MS1 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Label_MS1","train.h5")
data_dir_Label_MS2 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Label_MS2","train.h5")
data_dir_Label_MS3 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Label_MS3","train.h5")
data_dir_Label_MS4 = os.path.join('./{}'.format(config.checkpoint_dir), "data/Train_data/Label_MS4","train.h5")
train_data_MS1= read_data(data_dir_MS1)
train_data_MS2= read_data(data_dir_MS2)
train_data_MS3= read_data(data_dir_MS3)
train_data_MS4= read_data(data_dir_MS4)
train_data_PAN= read_data(data_dir_PAN)
train_data_Label_MS1= read_data(data_dir_Label_MS1)
train_data_Label_MS2= read_data(data_dir_Label_MS2)
train_data_Label_MS3= read_data(data_dir_Label_MS3)
train_data_Label_MS4= read_data(data_dir_Label_MS4)
t_vars = tf.trainable_variables()
self.trans_vars = [var for var in t_vars if 'transfer_model' in var.name]
print(self.trans_vars)
with tf.name_scope('train_step'):
self.train_trans_op = tf.train.AdamOptimizer(config.learning_rate).minimize(self.t_loss_total,var_list=self.trans_vars)
self.summary_op = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(config.summary_dir + '/train',self.sess.graph,flush_secs=60)
tf.initialize_all_variables().run()
counter = 0
start_time = time.time()
if config.is_train:
print("Training...")
for ep in xrange(config.epoch):
# Run by batch images
batch_idxs = len(train_data_PAN) // config.batch_size
for idx in xrange(0, batch_idxs):
batch_images_MS1 = train_data_MS1[idx*config.batch_size : (idx+1)*config.batch_size]
batch_images_MS2 = train_data_MS2[idx*config.batch_size : (idx+1)*config.batch_size]
batch_images_MS3 = train_data_MS3[idx*config.batch_size : (idx+1)*config.batch_size]
batch_images_MS4 = train_data_MS4[idx*config.batch_size : (idx+1)*config.batch_size]
batch_images_PAN = train_data_PAN[idx*config.batch_size : (idx+1)*config.batch_size]
batch_Label_MS1 = train_data_Label_MS1[idx*config.batch_size : (idx+1)*config.batch_size]
batch_Label_MS2 = train_data_Label_MS2[idx*config.batch_size : (idx+1)*config.batch_size]
batch_Label_MS3 = train_data_Label_MS3[idx*config.batch_size : (idx+1)*config.batch_size]
batch_Label_MS4 = train_data_Label_MS4[idx*config.batch_size : (idx+1)*config.batch_size]
counter += 1
_, err_trans,summary_str= self.sess.run([self.train_trans_op, self.t_loss_total,self.summary_op], feed_dict={self.images_MS1: batch_images_MS1,self.images_MS2: batch_images_MS2,self.images_MS3: batch_images_MS3,self.images_MS4: batch_images_MS4,self.images_PAN: batch_images_PAN,self.Label_MS1: batch_Label_MS1,self.Label_MS2: batch_Label_MS2,self.Label_MS3: batch_Label_MS3,self.Label_MS4: batch_Label_MS4})
self.train_writer.add_summary(summary_str,counter)
if counter % 10 == 0:
print("Epoch: [%2d], step: [%2d], time: [%4.4f],loss_trans:[%.8f]" \
% ((ep+1), counter, time.time()-start_time, err_trans))
self.save(config.checkpoint_dir, ep)
def transfer_model(self,img_MS_grad,reuse,update_collection=None):
with tf.variable_scope('transfer_model',reuse=reuse):
#########################################################
#################### grad Layer 1 #######################
#########################################################
with tf.variable_scope('layer1_grad'):
weights=tf.get_variable("w1_grad",[3,3,4,16],initializer=tf.truncated_normal_initializer(stddev=1e-3))
bias=tf.get_variable("b1_grad",[16],initializer=tf.constant_initializer(0.0))
conv1_grad = tf.nn.conv2d(img_MS_grad, weights, strides=[1,1,1,1], padding='SAME') + bias
conv1_grad = lrelu(conv1_grad)
#########################################################
#################### grad Layer 2 ###########################
#########################################################
with tf.variable_scope('layer2_grad'):
weights=tf.get_variable("w2_grad",[3,3,16,16],initializer=tf.truncated_normal_initializer(stddev=1e-3))
bias=tf.get_variable("b2_grad",[16],initializer=tf.constant_initializer(0.0))
conv2_grad = tf.nn.conv2d(conv1_grad, weights, strides=[1,1,1,1], padding='SAME') + bias
conv2_grad = lrelu(conv2_grad)
#########################################################
#################### grad Layer 3 ###########################
#########################################################
with tf.variable_scope('layer3_grad'):
weights=tf.get_variable("w3_grad",[3,3,16,16],initializer=tf.truncated_normal_initializer(stddev=1e-3))
bias=tf.get_variable("b3_grad",[16],initializer=tf.constant_initializer(0.0))
conv3_grad = tf.nn.conv2d(conv2_grad, weights, strides=[1,1,1,1], padding='SAME') + bias
conv3_grad = lrelu(conv3_grad)
#########################################################
#################### grad Layer 4 ###########################
#########################################################
grad_cat_4=tf.concat([conv1_grad,conv3_grad],axis=-1)
with tf.variable_scope('layer4_grad'):
weights=tf.get_variable("w4_grad",[3,3,32,16],initializer=tf.truncated_normal_initializer(stddev=1e-3))
bias=tf.get_variable("b4_grad",[16],initializer=tf.constant_initializer(0.0))
conv4_grad = tf.nn.conv2d(grad_cat_4, weights, strides=[1,1,1,1], padding='SAME') + bias
conv4_grad = lrelu(conv4_grad)
#########################################################
#################### grad Layer 5 #######################
#########################################################
grad_cat_5=tf.concat([img_MS_grad,conv4_grad],axis=-1)
with tf.variable_scope('layer5_grad'):
weights=tf.get_variable("w5_grad",[3,3,20,8],initializer=tf.truncated_normal_initializer(stddev=1e-3))
bias=tf.get_variable("b5_grad",[8],initializer=tf.constant_initializer(0.0))
conv5_grad = tf.nn.conv2d(grad_cat_5, weights, strides=[1,1,1,1], padding='SAME') + bias
conv5_grad = lrelu(conv5_grad)
#########################################################
#################### grad Layer 6 #######################
#########################################################
with tf.variable_scope('layer6_grad'):
weights=tf.get_variable("w6_grad",[3,3,8,1],initializer=tf.truncated_normal_initializer(stddev=1e-3))
bias=tf.get_variable("b6_grad",[1],initializer=tf.constant_initializer(0.0))
conv6_grad = tf.nn.conv2d(conv5_grad, weights, strides=[1,1,1,1], padding='SAME') + bias
conv6_grad = tf.nn.tanh(conv6_grad)*2
return conv6_grad
def save(self, checkpoint_dir, step):
model_name = "T_model.model"
model_dir = "%s" % ("T_model")
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
model_dir = "%s" % ("T_model")
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
print(ckpt_name)
self.saver.restore(self.sess, os.path.join(checkpoint_dir,ckpt_name))
return True
else:
return False
``` |
{
"source": "jiayi-ma/STDFusionNet",
"score": 2
} |
#### File: jiayi-ma/STDFusionNet/train.py
```python
from model import STMFusion
from utils import input_setup
import numpy as np
import tensorflow as tf
import pprint
import os
flags = tf.app.flags
flags.DEFINE_integer("epoch", 30, "Number of epoch [10]")
flags.DEFINE_integer("batch_size", 32, "The size of batch images [128]")
flags.DEFINE_integer("image_size", 128, "The size of image to use [33]")
flags.DEFINE_integer("label_size", 128, "The size of label to produce [21]")
flags.DEFINE_float("learning_rate", 1e-3, "The learning rate of gradient descent algorithm [1e-4]")
flags.DEFINE_integer("c_dim", 1, "Dimension of image color. [1]")
flags.DEFINE_integer("scale", 3, "The size of scale factor for preprocessing input image [3]")
flags.DEFINE_integer("stride", 24, "The size of stride to apply input image [14]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Name of checkpoint directory [checkpoint]")
flags.DEFINE_string("sample_dir", "sample", "Name of sample directory [sample]")
flags.DEFINE_string("summary_dir", "log", "Name of log directory [log]")
flags.DEFINE_boolean("is_train", True, "True for training, False for testing [True]")
FLAGS = flags.FLAGS
pp = pprint.PrettyPrinter()
def main(_):
# pp.pprint(flags.FLAGS.__flags)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
srcnn = STMFusion(sess,
image_size=FLAGS.image_size,
label_size=FLAGS.label_size,
batch_size=FLAGS.batch_size,
c_dim=FLAGS.c_dim,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir)
srcnn.train(FLAGS)
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "JiayingClaireWu/ReAgent",
"score": 2
} |
#### File: gym/envs/oracle_pvm.py
```python
import logging
from collections import OrderedDict
from typing import Callable, Dict, List
# pyre-fixme[21]: Could not find module `gym`.
import gym
import numpy as np
import reagent.types as rlt
import torch
from reagent.core.dataclasses import dataclass
from reagent.gym.envs import RecSim
from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor
from scipy import stats
logger = logging.getLogger(__name__)
# score function takes user and doc features, and outputs a score
SCORE_FUNCTION_T = Callable[[np.ndarray, np.ndarray], float]
def make_default_score_fn(fn_i: int) -> SCORE_FUNCTION_T:
"""
Make ith score_fn (constructor of ith score)
"""
def fn(user: np.ndarray, doc: np.ndarray) -> float:
return doc[fn_i]
# user = user ** (fn_i + 1)
# doc = doc ** (fn_i + 1)
# return np.inner(user, doc)
# return user[fn_i] * doc[fn_i]
return fn
VM_WEIGHT_LOW = -1.0
VM_WEIGHT_HIGH = 1.0
MATCH_REWARD_BOOST = 3.0
def get_default_score_fns(num_weights):
return [make_default_score_fn(i) for i in range(num_weights)]
def get_ground_truth_weights(num_weights):
return np.array([1] * num_weights)
@dataclass
class OraclePVM(RecSim):
"""
Wrapper over RecSim for simulating (Personalized) VM Tuning.
The state is the same as for RecSim (user feature + candidate features).
There are num_weights VM weights to tune, and so action space is a vector
of length num_weights.
OraclePVM hides num_weights number of
(1) score_fns (akin to VM models), that take in
user + candidate_i feature and produces a score for candidate_i.
(2) ground_truth_weights, that are used to produce "ground truth", a.k.a.
"Oracle", rankings.
Reward is the Kendall-Tau between ground truth and the ranking created from the
weights given by action. If the rankings match exactly, the reward is boosted to 3.
NOTE: This environment only tests if the Agent can learn the hidden ground
truth weights, which may be far from optimal (in terms of RecSim's rewards,
which we're ignoring). This is easier for unit tests, but in the real world
we will be trying to learn the optimal weights, and the reward signal would
reflect that.
TODO: made environment easier to learn from by not using RecSim.
"""
user_feat_dim: int = 1
candidate_feat_dim: int = 3
num_weights: int = 3
def __post_init_post_parse__(self):
assert (
self.slate_size == self.num_candidates
), f"Must be equal (slate_size) {self.slate_size} != (num_candidates) {self.num_candidates}"
super().__post_init_post_parse__()
self.score_fns: List[SCORE_FUNCTION_T] = get_default_score_fns(self.num_weights)
self.ground_truth_weights: List[float] = get_ground_truth_weights(
self.num_weights
)
assert len(self.score_fns) == len(
self.ground_truth_weights
), f"{len(self.score_fns)} != {len(self.ground_truth_weights)}"
assert (
len(self.ground_truth_weights) == self.num_weights
), f"{self.ground_truth_weights.shape} != {self.num_weights}"
def reset(self):
self.prev_obs = super().reset()
self.prev_obs.update(
{
"user": np.random.rand(self.user_feat_dim),
"doc": OrderedDict(
[
(str(i), np.random.rand(self.candidate_feat_dim))
for i in range(self.num_candidates)
]
),
}
)
return self.prev_obs
def step(self, action):
user_feat = self.prev_obs["user"]
doc_feats = self.prev_obs["doc"]
scores = self._get_scores(user_feat, doc_feats)
ground_truth_ranking = self._get_ranking(scores, self.ground_truth_weights)
policy_ranking = self._get_ranking(scores, action)
t = True
# comment out to avoid non-stationary
# self.prev_obs, _, t, i = super().step(policy_ranking)
num_matches = (ground_truth_ranking == policy_ranking).sum()
if num_matches == self.slate_size:
reward = MATCH_REWARD_BOOST
else:
reward, _p_value = stats.kendalltau(ground_truth_ranking, policy_ranking)
return self.prev_obs, reward, t, None
def is_match(self, reward):
# for evaluation, return true iff the reward represents a match
return reward > (MATCH_REWARD_BOOST - 1e-6)
@property
def action_space(self):
return gym.spaces.Box(
low=VM_WEIGHT_LOW, high=VM_WEIGHT_HIGH, shape=(self.num_weights,)
)
@action_space.setter
def action_space(self, val):
pass
def _get_scores(
self, user_feat: np.ndarray, doc_feats: Dict[str, np.ndarray]
) -> np.ndarray:
# num_docs x num_scores where i,j coordinate is jth score for ith doc
scores = np.array(
[
# pyre-fixme[16]: `OraclePVM` has no attribute `score_fns`.
[score_fn(user_feat, doc_feat) for score_fn in self.score_fns]
for _k, doc_feat in doc_feats.items()
]
)
return scores
def _get_ranking(self, scores: np.ndarray, weights: np.ndarray):
assert weights.shape == (scores.shape[1],), f"{weights.shape}, {scores.shape}"
weighted_scores = scores * weights
values = weighted_scores.sum(axis=1)
indices = np.argsort(-values)
return indices[: self.slate_size]
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
preprocessor = RecsimObsPreprocessor.create_from_env(self)
preprocessed_obs = preprocessor(obs)
return rlt._embed_states(preprocessed_obs)
def serving_obs_preprocessor(self, obs: np.ndarray):
preprocessor = RecsimObsPreprocessor.create_from_env(self)
x = preprocessor(obs)
# user was batch_size x state_size, stack
user = x.float_features.unsqueeze(1).repeat_interleave(
self.num_candidates, dim=1
)
candidates = x.candidate_docs.float_features
combined = torch.cat([user, candidates], dim=2).squeeze(0)
return (combined, torch.ones_like(combined, dtype=torch.uint8))
```
#### File: envs/pomdp/pocman.py
```python
import logging
from typing import NamedTuple
import numpy as np
# pyre-fixme[21]: Could not find module `gym`.
from gym import Env
# pyre-fixme[21]: Could not find module `gym.spaces`.
from gym.spaces import Box, Discrete
logger = logging.getLogger(__name__)
MINI = dict( # noqa
_maze=np.array(
[[1, 1, 1, 1], [1, 0, 0, 1], [1, 0, 0, 1], [1, 1, 1, 1]], dtype=np.int8
),
_num_ghosts=1,
_ghost_range=3,
_ghost_home=(3, 3),
_poc_home=(0, 0),
_smell_range=1,
_hear_range=2,
_power_duration=15,
_max_step=20,
)
MICRO = dict( # noqa
_maze=np.array(
[
[1, 3, 3, 2, 3, 3],
[3, 3, 0, 3, 0, 3],
[3, 3, 3, 3, 3, 3],
[1, 1, 0, 3, 0, 3],
[1, 2, 3, 3, 3, 1],
],
dtype=np.int8,
),
_num_ghosts=2,
_ghost_range=3,
_ghost_home=(4, 4),
_poc_home=(0, 0),
_smell_range=1,
_hear_range=2,
_power_duration=15,
_max_step=200,
)
STATE_DIM = 10
class Action:
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
ACTIONS = [Action.UP, Action.RIGHT, Action.DOWN, Action.LEFT]
ACTION_DICT = {
Action.UP: "UP",
Action.RIGHT: "RIGHT",
Action.DOWN: "DOWN",
Action.LEFT: "LEFT",
}
class Element:
WALL = 0
CLEAR_WALK_WAY = 1
POWER = 2
FOOD_PELLET = 3
def manhattan_distance(c1, c2):
return np.sqrt((c1.x - c2.x) ** 2 + (c1.y - c2.y) ** 2)
def opposite_direction(d):
if d == Action.UP:
return Action.DOWN
if d == Action.RIGHT:
return Action.LEFT
if d == Action.DOWN:
return Action.UP
if d == Action.LEFT:
return Action.RIGHT
class Position(NamedTuple):
"""
The index at the left up corner is (0, 0);
The index at the right bottom corner is (height-1, width-1)
"""
x: int
y: int
def __eq__(self, pos1):
return isinstance(pos1, Position) and pos1.x == self.x and pos1.y == self.y
class InternalState(object):
def __init__(self):
self.agent_pos = None
self.ghosts = []
self.food_pos = []
self.power_duration = 0
# pyre-fixme[13]: Attribute `home` is never initialized.
# pyre-fixme[13]: Attribute `max_x` is never initialized.
# pyre-fixme[13]: Attribute `max_y` is never initialized.
class Ghost(object):
max_x: int
max_y: int
home: Position
def __init__(self, env, pos, direction, ghost_range):
self.env = env
self.pos = pos
self.direction = direction
self.ghost_range = ghost_range
self.move_type = "init"
def move(self, agent_pos, agent_in_power):
if manhattan_distance(agent_pos, self.pos) < self.ghost_range:
if agent_in_power > 0:
self.move_type = "defensive"
self._move_defensive(agent_pos)
else:
self.move_type = "aggressive"
self._move_aggressive(agent_pos)
else:
self.move_type = "random"
self._move_random()
def _move_random(self):
movable_directions = set()
for action in ACTIONS:
next_pos = self.env.next_pos(self.pos, action)
if self.pos != next_pos:
movable_directions.add(action)
# no doubling back unless no other choice
if (
opposite_direction(self.direction) in movable_directions
and len(movable_directions) > 1
):
movable_directions.remove(opposite_direction(self.direction))
d = np.random.choice(list(movable_directions))
next_pos = self.env.next_pos(self.pos, d)
self.update(next_pos, d)
def _move_aggressive(self, agent_pos):
best_dist = self.max_x + self.max_y
best_pos = self.pos
best_action = -1
for a in ACTIONS:
next_pos = self.env.next_pos(self.pos, a)
# not movable in this action
if next_pos == self.pos:
continue
dist = manhattan_distance(next_pos, agent_pos)
if dist <= best_dist:
best_pos = next_pos
best_dist = dist
best_action = a
self.update(best_pos, best_action)
def _move_defensive(self, agent_pos):
best_dist = 0
best_pos = self.pos
best_action = -1
for a in ACTIONS:
next_pos = self.env.next_pos(self.pos, a)
# not movable in this action
if next_pos == self.pos:
continue
dist = manhattan_distance(next_pos, agent_pos)
if dist >= best_dist:
best_pos = next_pos
best_dist = dist
best_action = a
self.update(best_pos, best_action)
def update(self, pos, direction):
self.pos = pos
self.direction = direction
def reset(self):
self.pos = self.home
self.direction = PocManEnv.random_action()
self.move_type = "init"
def select_maze(maze):
maze = maze.lower()
if maze == "micro":
return MICRO
if maze == "mini":
return MINI
else:
raise ValueError("Maze size can only be micro or mini. ")
# pyre-fixme[11]: Annotation `Env` is not defined as a type.
class PocManEnv(Env):
def __init__(self):
self.board = select_maze("micro")
self._get_init_state()
self.action_space = Discrete(4)
self.observation_space = Box(low=0, high=1, shape=(STATE_DIM,))
self._reward_range = 100
self.step_cnt = 0
self.max_steps = self.board["_max_step"]
def seed(self, seed=None):
np.random.seed(seed)
def _passable(self, pos):
return self.maze[pos.x, pos.y] != Element.WALL
def _inside(self, pos):
if 0 <= pos.x < self.maze.shape[0] and 0 <= pos.y < self.maze.shape[1]:
return True
return False
def step(self, action):
assert self.action_space.contains(action)
assert self.done is False
self.step_cnt += 1
reward = -1
next_pos = self.next_pos(self.internal_state.agent_pos, action)
self.internal_state.agent_pos = next_pos
if self.internal_state.power_duration > 0:
self.internal_state.power_duration -= 1
agent_in_power = self.internal_state.power_duration > 0
hit_ghost = set()
for g, ghost in enumerate(self.internal_state.ghosts):
# check if the ghost hits the agent before and after it moves
if ghost.pos == self.internal_state.agent_pos:
hit_ghost.add(g)
else:
ghost.move(self.internal_state.agent_pos, agent_in_power)
if ghost.pos == self.internal_state.agent_pos:
hit_ghost.add(g)
hit_ghost = list(hit_ghost)
for g in hit_ghost:
if self.internal_state.power_duration > 0:
reward += 25
self.internal_state.ghosts[g].reset()
else:
reward += -100
self.done = True
break
if self.step_cnt > self.board["_max_step"]:
self.done = True
if self._agent_at_food():
reward += 10
self.maze[
self.internal_state.agent_pos.x, self.internal_state.agent_pos.y
] = Element.CLEAR_WALK_WAY
if self._food_left() == 0:
self.done = True
if self._agent_at_power():
self.internal_state.power_duration = self.board["_power_duration"]
self.maze[
self.internal_state.agent_pos.x, self.internal_state.agent_pos.y
] = Element.CLEAR_WALK_WAY
reward += 10
ob = self._make_ob()
return ob, reward, self.done, {"state": self.internal_state}
def _agent_at_food(self):
agent_pos = self.internal_state.agent_pos
if self.maze[agent_pos.x, agent_pos.y] == Element.FOOD_PELLET:
return True
return False
def _agent_at_power(self):
agent_pos = self.internal_state.agent_pos
if self.maze[agent_pos.x, agent_pos.y] == Element.POWER:
return True
return False
def _make_ob(self):
"""
Return 10 state features of observation:
4 features indicating whether the agent can see a ghost
in that direction (UP, RIGHT, DOWN, LEFT)
4 features indicating whether he can feel a wall in each of the
cardinal directions, which is set to 1 if he is adjacent to a wall
1 feature indicating whether he can hear a ghost, which is set to 1
if he is within Manhattan distance 2 of a ghost
1 feature indicating whether he can smell food (adjacent or
diagonally adjacent to any food)
"""
ob = np.zeros(STATE_DIM)
for i, action in enumerate(ACTIONS):
ob[i] = self._see_ghost(action)
next_pos = self.next_pos(self.internal_state.agent_pos, action)
# If an agent couldn't move from the current position, then there is a wall
if next_pos == self.internal_state.agent_pos:
ob[i + len(ACTIONS)] = 1
if self._hear_ghost():
ob[2 * len(ACTIONS)] = 1
if self._smell_food():
ob[2 * len(ACTIONS) + 1] = 1
return ob
def _see_ghost(self, action):
distances = []
agent_pos = self.internal_state.agent_pos
for ghost in self.internal_state.ghosts:
if agent_pos.x != ghost.pos.x and agent_pos.y != ghost.pos.y:
continue
if agent_pos == ghost.pos:
distances.append(0)
break
if (
(
action == Action.UP
and ghost.pos.x < agent_pos.x
and ghost.pos.y == agent_pos.y
)
or (
action == Action.DOWN
and ghost.pos.x > agent_pos.x
and ghost.pos.y == agent_pos.y
)
or (
action == Action.LEFT
and ghost.pos.y < agent_pos.y
and ghost.pos.x == agent_pos.x
)
or (
action == Action.RIGHT
and ghost.pos.y > agent_pos.y
and ghost.pos.x == agent_pos.x
)
) and not self._wall_between(agent_pos, ghost.pos):
distances.append(manhattan_distance(agent_pos, ghost.pos))
if not distances:
return -1
return 1
# the environment can also be adapted to return a real-valued distance
# return min(distances)
def _smell_food(self):
smell_range = self.board["_smell_range"]
agent_pos = self.internal_state.agent_pos
for x in range(-smell_range, smell_range + 1):
for y in range(-smell_range, smell_range + 1):
smell_x = agent_pos.x + x
smell_y = agent_pos.y + y
if (
0 <= smell_x < self.maze.shape[0]
and 0 <= smell_y < self.maze.shape[1]
and self.maze[smell_x, smell_y] == Element.FOOD_PELLET
):
return True
return False
def _hear_ghost(self):
for ghost in self.internal_state.ghosts:
if (
manhattan_distance(ghost.pos, self.internal_state.agent_pos)
<= self.board["_hear_range"]
):
return True
return False
def _wall_between(self, pos1, pos2):
if pos1 == pos2:
return False
assert pos1.x == pos2.x or pos1.y == pos2.y
if pos1.y == pos2.y:
for i in range(min(pos1.x, pos2.x) + 1, max(pos1.x, pos2.x)):
if self.maze[i, pos1.y] == Element.WALL:
return True
elif pos1.x == pos2.x:
for i in range(min(pos1.y, pos2.y), max(pos1.y, pos2.y)):
if self.maze[pos1.x, i] == Element.WALL:
return True
return False
def _food_left(self):
return np.sum(self.maze == Element.FOOD_PELLET)
@staticmethod
def random_action():
return np.random.randint(0, 4)
@staticmethod
def print_action(action):
return ACTION_DICT[action]
def reset(self):
self.done = False
self.step_cnt = 0
self._get_init_state()
ob = self._make_ob()
return ob
def _get_init_state(self):
self.maze = self.board["_maze"].copy()
self.internal_state = InternalState()
self.internal_state.agent_pos = Position(*self.board["_poc_home"])
Ghost.max_x = self.maze.shape[0]
Ghost.max_y = self.maze.shape[1]
ghost_home = Position(*self.board["_ghost_home"])
Ghost.home = ghost_home
for _ in range(self.board["_num_ghosts"]):
pos = Position(ghost_home.x, ghost_home.y)
self.internal_state.ghosts.append(
Ghost(
self,
pos,
direction=self.random_action(),
ghost_range=self.board["_ghost_range"],
)
)
return self.internal_state
def next_pos(self, pos, action):
x_offset, y_offset = 0, 0
if action == Action.UP:
x_offset = -1
y_offset = 0
elif action == Action.DOWN:
x_offset = 1
y_offset = 0
elif action == Action.RIGHT:
x_offset = 0
y_offset = 1
elif action == Action.LEFT:
x_offset = 0
y_offset = -1
next_pos = Position(pos.x + x_offset, pos.y + y_offset)
if self._inside(next_pos) and self._passable(next_pos):
return next_pos
else:
return pos
def print_internal_state(self):
print("Step", self.step_cnt)
print_maze = self.maze.astype(str)
print_maze[
self.internal_state.agent_pos.x, self.internal_state.agent_pos.y
] = "A"
ghost_str = ""
for g, ghost in enumerate(self.internal_state.ghosts):
print_maze[ghost.pos.x, ghost.pos.y] = "G"
ghost_str += "Ghost {} at {}, direction={}, type={}\n".format(
g, ghost.pos, ACTION_DICT[ghost.direction], ghost.move_type
)
np.set_printoptions(formatter={"str_kind": lambda x: x})
print("Maze: \n{}".format(print_maze))
print(
"Agent at {}, power duration {}".format(
self.internal_state.agent_pos, self.internal_state.power_duration
)
)
print(ghost_str[:-1])
def print_ob(self, ob):
ob_str = ""
for i, action in enumerate(ACTIONS):
if ob[i] >= 0:
ob_str += " SEE GHOST {},".format(ACTION_DICT[action])
for i, action in enumerate(ACTIONS):
if ob[i + len(ACTIONS)] == 1:
ob_str += " FEEL WALL {},".format(ACTION_DICT[action])
if ob[-2]:
ob_str += " HEAR GHOST,"
if ob[-1]:
ob_str += " SMELL FOOD,"
return ob_str
```
#### File: reagent/preprocessing/sparse_to_dense.py
```python
from typing import Dict, List, Tuple
# @manual=third-party//pandas:pandas-py
import pandas as pd
import torch
from reagent.preprocessing import normalization
class SparseToDenseProcessor:
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
self.sorted_features = sorted_features
self.set_missing_value_to_zero = set_missing_value_to_zero
def __call__(self, sparse_data):
return self.process(sparse_data)
class StringKeySparseToDenseProcessor(SparseToDenseProcessor):
"""
We just have this in case the input data is keyed by string
"""
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
super().__init__(sorted_features, set_missing_value_to_zero)
self._sparse_to_dense = PythonSparseToDenseProcessor(
sorted_features, set_missing_value_to_zero
)
def process(self, sparse_data) -> Tuple[torch.Tensor, torch.Tensor]:
# Convert all keys to integers
sparse_data_int = []
for sd in sparse_data:
sd_int = {}
for k, v in sd.items():
sd_int[int(k)] = v
sparse_data_int.append(sd_int)
return self._sparse_to_dense(sparse_data_int)
class PythonSparseToDenseProcessor(SparseToDenseProcessor):
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
super().__init__(sorted_features, set_missing_value_to_zero)
self.feature_to_index: Dict[int, int] = {
f: i for i, f in enumerate(sorted_features)
}
def process(
self, sparse_data: List[Dict[int, float]]
) -> Tuple[torch.Tensor, torch.Tensor]:
missing_value = normalization.MISSING_VALUE
if self.set_missing_value_to_zero:
missing_value = 0.0
# pyre-fixme[16]: Module `pd` has no attribute `DataFrame`.
state_features_df = pd.DataFrame(sparse_data).fillna(missing_value)
# Add columns identified by normalization, but not present in batch
for col in self.sorted_features:
if col not in state_features_df.columns:
state_features_df[col] = missing_value
values = torch.from_numpy(
state_features_df[self.sorted_features].to_numpy()
).float()
if self.set_missing_value_to_zero:
# When we set missing values to 0, we don't know what is and isn't missing
presence = torch.ones_like(values, dtype=torch.bool)
else:
presence = values != missing_value
return values, presence
```
#### File: reagent/samplers/frechet.py
```python
from typing import Optional
import reagent.types as rlt
import torch
from reagent.core.configuration import resolve_defaults
from reagent.gym.types import Sampler
from torch.distributions import Gumbel
class FrechetSort(Sampler):
@resolve_defaults
def __init__(
self,
shape: float = 1.0,
topk: Optional[int] = None,
equiv_len: Optional[int] = None,
log_scores: bool = False,
):
"""FréchetSort is a softer version of descending sort which samples all possible
orderings of items favoring orderings which resemble descending sort. This can
be used to convert descending sort by rank score into a differentiable,
stochastic policy amenable to policy gradient algorithms.
:param shape: parameter of Frechet Distribution. Lower values correspond to
aggressive deviations from descending sort.
:param topk: If specified, only the first topk actions are specified.
:param equiv_len: Orders are considered equivalent if the top equiv_len match. Used
in probability computations
:param log_scores Scores passed in are already log-transformed. In this case, we would
simply add Gumbel noise.
Example:
Consider the sampler:
sampler = FrechetSort(shape=3, topk=5, equiv_len=3)
Given a set of scores, this sampler will produce indices of items roughly
resembling a argsort by scores in descending order. The higher the shape,
the more it would resemble a descending argsort. `topk=5` means only the top
5 ranks will be output. The `equiv_len` determines what orders are considered
equivalent for probability computation. In this example, the sampler will
produce probability for the top 3 items appearing in a given order for the
`log_prob` call.
"""
self.shape = shape
self.topk = topk
self.upto = equiv_len
if topk is not None:
if equiv_len is None:
self.upto = topk
# pyre-fixme[58]: `>` is not supported for operand types `Optional[int]`
# and `Optional[int]`.
if self.upto > self.topk:
raise ValueError(f"Equiv length {equiv_len} cannot exceed topk={topk}.")
self.gumbel_noise = Gumbel(0, 1.0 / shape)
self.log_scores = log_scores
@staticmethod
def select_indices(scores: torch.Tensor, actions: torch.Tensor) -> torch.Tensor:
"""Helper for scores[actions] that are also works for batched tensors"""
if len(actions.shape) > 1:
num_rows = scores.size(0)
row_indices = torch.arange(num_rows).unsqueeze(0).T # pyre-ignore[ 16 ]
return scores[row_indices, actions].T
else:
return scores[actions]
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
"""Sample a ranking according to Frechet sort. Note that possible_actions_mask
is ignored as the list of rankings scales exponentially with slate size and
number of items and it can be difficult to enumerate them."""
assert scores.dim() == 2, "sample_action only accepts batches"
log_scores = scores if self.log_scores else torch.log(scores)
perturbed = log_scores + self.gumbel_noise.sample((scores.shape[1],))
action = torch.argsort(perturbed.detach(), descending=True)
if self.topk is not None:
action = action[: self.topk]
log_prob = self.log_prob(scores, action)
return rlt.ActorOutput(action, log_prob)
def log_prob(self, scores: torch.Tensor, action) -> torch.Tensor:
"""What is the probability of a given set of scores producing the given
list of permutations only considering the top `equiv_len` ranks?"""
log_scores = scores if self.log_scores else torch.log(scores)
s = self.select_indices(log_scores, action)
n = len(log_scores)
p = self.upto if self.upto is not None else n
return -sum(
torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))
for k in range(p) # pyre-ignore
)
``` |
{
"source": "jiayingwang/simple_graph",
"score": 3
} |
#### File: simple_graph/simple_graph/edges.py
```python
from collections import defaultdict
class Edge:
def __init__(self, **kwargs):
for key, value in kwargs.items():
if key == 'weight':
self.__dict__[key] = float(value)
else:
self.__dict__[key] = value
def __repr__(self):
return str(self.to_dict())
def to_dict(self):
return self.__dict__
class Edges:
def __init__(self, undirected=True, verbose=False):
self.undirected = undirected
self.verbose = verbose
self.clear()
def clear(self):
self._neighbors = defaultdict(dict)
self._reverse_neighbors = defaultdict(dict)
@property
def items(self):
return [(u, v) for u in self._neighbors for v in self._neighbors[u]]
def neighbors(self, u):
neighbors = []
if u in self._neighbors:
neighbors += list(self._neighbors[u].keys())
if self.undirected and u in self._reverse_neighbors:
neighbors += list(self._reverse_neighbors[u].keys())
return neighbors
def reverse_neighbors(self, u):
reverse_neighbors = []
if u in self._reverse_neighbors:
reverse_neighbors += list(self._reverse_neighbors[u].keys())
if self.undirected and u in self._neighbors:
reverse_neighbors += list(self._neighbors[u].keys())
return reverse_neighbors
def __getitem__(self, items):
u, v = items[0], items[1]
if self.undirected and u > v:
u, v = v, u
if u not in self._neighbors:
return None
return self._neighbors[u].get(v, None)
def remove(self, u, v):
if self.undirected and u > v:
u, v = v, u
if u not in self._neighbors:
return
try:
self._neighbors[u].pop(v)
self._reverse_neighbors[v].pop(u)
except:
pass
def remove_vertex(self, x):
'''
remove a vertex needs to remove the related edges
'''
for n in self._neighbors[x]:
# remove link in reverse_neighors
if n in self._reverse_neighbors and x in self._reverse_neighbors[n]:
self._reverse_neighbors[n].pop(x)
self._neighbors.pop(x)
for n in self._reverse_neighbors[x]:
# remove link in neighbors
if n in self._neighbors and x in self._neighbors[n]:
self._neighbors[n].pop(x, None)
self._reverse_neighbors.pop(x)
def add(self, u, v, **kwargs):
if self.undirected and u > v:
u, v = v, u
edge = self[u, v]
if edge:
if self.verbose:
print(f'Edge ({u},{v}) already exists.')
return
edge = Edge(**kwargs)
self._neighbors[u][v] = edge
if u == v and self.undirected:
return
self._reverse_neighbors[v][u] = edge
```
#### File: simple_graph/tests/test_graph.py
```python
import unittest
from simple_graph import Graph
class TestGraph(unittest.TestCase):
def test_vertices_edges(self):
G = Graph()
self.assertEqual(G.vertices, [])
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.vertices, [0, 1, 2])
self.assertEqual(G.edges, [(0, 1), (0, 2), (1, 2)])
def test_init(self):
G = Graph({'V': [1]})
self.assertEqual(G.vertices, [1])
def test_edge_weight(self):
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.total_edge_weight(1), 2)
self.assertEqual(G.total_edge_weight(), 6)
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(G.total_edge_weight(), 30)
self.assertEqual(G.total_edge_weight(1), 10)
G = Graph(undirected=False)
G.add_edge(1, 2)
self.assertEqual(G.total_edge_weight(1), 0)
self.assertEqual(G.total_edge_weight(), 1)
self.assertEqual(G.total_edge_weight(2), 1)
self.assertEqual(G.total_edge_weight(1, 'out'), 1)
self.assertEqual(G.total_edge_weight(1, 'all'), 1)
def test_add_weight(self):
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.edge_weight(1, 2), 1)
G.add_edge_weight(1, 2, 1)
self.assertEqual(G.edge_weight(1, 2), 2)
self.assertEqual(G.vertex_weight(1), 1)
G.add_vertex_weight(1, 1)
self.assertEqual(G.vertex_weight(1), 2)
def test_to_dict(self):
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(G.to_dict(),
{'V': [(1, {}), (2, {}), (0, {})],
'E': [(1, 1, {'weight': 6}),
(1, 2, {'weight': 2}),
(0, 1, {'weight': 2}),
(0, 2, {'weight': 2}),
(0, 0, {'weight': 6}),
(2, 2, {'weight': 6})]})
def test_edges(self):
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(G.edges, [(1, 1), (1, 2), (0, 1), (0, 2), (0, 0), (2, 2)])
def test_vertices(self):
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(set(G.vertices), {1, 2, 0})
G = Graph(undirected=False)
G.add_edge(1, 2)
self.assertEqual(set(G.vertices), {1, 2})
def test_add_vertex(self):
G = Graph({'E':[[0, 1], [1, 2], [0, 2]]})
G.add_vertex(3)
self.assertEqual(G.find_isolated_vertices(), [3])
def test_remove_vertex(self):
G = Graph(undirected=False)
G.add_edge(1, 2)
G.remove_vertex(1)
self.assertEqual(set(G.vertices), {2})
G.remove_edge(1, 2)
G = Graph({'V': ['1', '2', '0', '4', '3', '7', '6', '5', '11', '10', '8', '15', '14', '9', '12', '13'], 'E': [('1', '2'), ('1', '4'), ('1', '7'), ('2', '0'), ('2', '4'), ('2', '6'), ('0', '3'), ('0', '5'), ('7', '5'), ('7', '6'), ('5', '11'), ('4', '10'), ('8', '15'), ('8', '14'), ('8', '9'), ('14', '9'), ('9', '12'), ('10', '14'), ('10', '13'), ('11', '10'), ('6', '11'), ('3', '7')]})
G.remove_vertex('1')
self.assertNotIn('1', G.vertices)
self.assertNotIn(('1', '2'), G.edges)
self.assertNotIn(('1', '4'), G.edges)
self.assertNotIn(('1', '7'), G.edges)
G.remove_vertex('4')
self.assertNotIn('4', G.vertices)
self.assertNotIn(('2', '4'), G.edges)
self.assertNotIn(('4', '10'), G.edges)
G = Graph({'E':{ "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}})
G.remove_vertex('a')
G.remove_vertex('c')
self.assertEqual(set(G.vertices), {'d', 'b', 'e', 'f'})
self.assertEqual(G.edges, [])
def test_remove_edge(self):
G = Graph({'E': [(1, 2)]})
G.remove_edge(1, 2)
G.remove_edge(2, 1)
def test_neighbors(self):
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(set(G.neighbors(1)), {0, 2})
def test_add_edge(self):
G = Graph()
self.assertEqual(G.has_edge(1, 2), False)
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.has_edge(2, 3), False)
G.add_edge(2, 3)
self.assertEqual(G.has_edge(2, 3), True)
self.assertEqual(G.total_edge_weight(), 8)
G.add_edge(2, 3)
self.assertEqual(G.total_edge_weight(), 8)
G = Graph()
G.add_edge('a', 'z')
G.add_edge('x', 'y')
self.assertEqual(G.has_edge('a', 'z'), True)
self.assertEqual(G.has_edge('x', 'y'), True)
def test_isolate(self):
G = Graph({
"a" : ["c"],
"b" : ["c", "e"],
"c" : ["a", "b", "d", "e"],
"d" : ["c"],
"e" : ["c", "b"],
"f" : []
})
self.assertEqual(G.find_isolated_vertices(), ['f'])
G = Graph({1: [2, 3], 2: [3]}, undirected = False)
self.assertEqual(G.find_isolated_vertices(), [])
def test_find_path(self):
G = Graph({
"a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
})
self.assertEqual(G.find_path('a', 'b'), ['a', 'd', 'c', 'b'])
self.assertEqual(G.find_path('a', 'f'), None)
self.assertEqual(G.find_path('c', 'c'), ['c'])
def test_find_all_paths(self):
G = Graph({
"a" : ["d", "f"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["d"]
})
self.assertEqual(G.find_all_paths('a', 'b'), [['a', 'd', 'c', 'b'], ['a', 'f', 'd', 'c', 'b']])
self.assertEqual(G.find_all_paths('a', 'f'), [['a', 'd', 'f'], ['a', 'f']])
self.assertEqual(G.find_all_paths('c', 'c'), [['c']])
def test_degree(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.degree('a'), 1)
self.assertEqual(G.degree('c'), 5)
self.assertEqual(G.degree('d'), 2)
self.assertEqual(G.degree('f'), 0)
def test_max_degree(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.max_degree(), 5)
def test_min_degree(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.min_degree(), 0)
def test_degrees(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.degrees(), [5, 2, 1, 1, 1, 0])
def test_density(self):
G = Graph({
"a" : ["d","f"],
"b" : ["c","b"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
})
self.assertEqual(float(f"{G.density():.4f}"), 0.3889)
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(float(f"{G.density():.4f}"), 0.2778)
complete_graph = {
"a" : ["b","c"],
"b" : ["a","c"],
"c" : ["a","b"]
}
G = Graph(complete_graph)
self.assertEqual(float(f"{G.density():.4f}"), 1.0)
isolated_graph = {
"a" : [],
"b" : [],
"c" : []
}
G = Graph(isolated_graph)
self.assertEqual(float(f"{G.density():.4f}"), 0.0)
def test_is_connected(self):
G = Graph({
"a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
})
self.assertEqual(G.is_connected(), False)
G = Graph({ "a" : ["d","f"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
})
self.assertEqual(G.is_connected(), True)
G = Graph({ "a" : ["d","f"],
"b" : ["c","b"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
})
self.assertEqual(G.is_connected(), True)
def test_diameter(self):
G = Graph({
"a" : ["c"],
"b" : ["c","e","f"],
"c" : ["a","b","d","e"],
"d" : ["c"],
"e" : ["b","c","f"],
"f" : ["b","e"]
})
self.assertEqual(G.diameter(), 3)
def test_edge_betweenness(self):
G = Graph({'s': {'u':{'weight': 10}, 'x':{'weight': 5}},
'u': {'v':{'weight': 1}, 'x':{'weight': 2}},
'v': {'y':{'weight': 4}},
'x':{'u':{'weight': 3},'v':{'weight': 9},'y':{'weight': 2}},
'y':{'s':{'weight': 7},'v':{'weight': 6}}}, undirected=False)
self.assertDictEqual(G.edge_betweenness(), {('s', 'u'): 0.0,
('s', 'x'): 0.4,
('u', 'v'): 0.15000000000000002,
('u', 'x'): 0.15000000000000002,
('v', 'y'): 0.2,
('x', 'u'): 0.30000000000000004,
('x', 'v'): 0.0,
('x', 'y'): 0.25,
('y', 's'): 0.4,
('y', 'v'): 0.05})
def test_connected_components(self):
G = Graph({'E':[(1, 2), (2, 3), (4, 5)] })
self.assertEqual(G.connected_components, [[1, 2, 3], [4, 5]])
def test_max_cliques(self):
G = Graph({'E': [(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (3, 4), (4, 5)]})
self.assertEqual(G.max_cliques, [[1, 4, 2, 3], [1, 4, 5]])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiayingwang/smart-match",
"score": 3
} |
#### File: smart-match/tests/test_overlap_coefficient.py
```python
import unittest
import smart_match
class TestOverlapCoefficient(unittest.TestCase):
def setUp(self):
smart_match.use('Overlap Coefficient')
def test_similarity(self):
self.assertEqual(smart_match.similarity('hello', 'hero'), 0.75)
smart_match.set_params(level='term')
self.assertAlmostEqual(smart_match.similarity('test string1', 'test string2'), 0.5)
self.assertEqual(smart_match.similarity("aaa bbb ccc ddd", "aaa bbb ccc eee"),0.7500)
self.assertEqual(smart_match.similarity("aaa bbb ccc ddd aaa bbb ccc ddd", "aaa bbb ccc eee"),0.7500)
self.assertEqual(smart_match.similarity("a b c d", "a b c e"),0.7500)
self.assertEqual(smart_match.similarity( "a b c d", "a b e f"),0.5000)
self.assertEqual(smart_match.similarity("a b c", "a b c e f g"),1.0000)
self.assertEqual(smart_match.similarity("a b b c c", "a b c e f g"),1.0000)
self.assertEqual(smart_match.similarity("Healed", "Sealed"),0.0000)
self.assertEqual(smart_match.similarity("Healed", "Healthy"),0.0000)
self.assertEqual(smart_match.similarity("Healed", "Heard"),0.0000)
self.assertEqual(smart_match.similarity("Healed", "Herded"),0.0000)
self.assertEqual(smart_match.similarity("Healed", "Help"),0.0000)
self.assertEqual(smart_match.similarity("Healed", "Sold"),0.0000)
self.assertEqual(smart_match.similarity("Healed", "Help"),0.0000)
self.assertEqual(float('%.4f' %smart_match.similarity("<NAME>", "<NAME>")),0.3333)
self.assertEqual(smart_match.similarity("<NAME>", "<NAME>"),0.5000)
self.assertEqual(smart_match.similarity("<NAME>", "<NAME>"),0.5000)
self.assertEqual(smart_match.similarity("<NAME>", "<NAME>"),0.0000)
self.assertEqual(smart_match.similarity("<NAME>", "<NAME>"),0.0000)
self.assertEqual(smart_match.similarity("<NAME>", "<NAME>"),0.0000)
self.assertEqual(smart_match.similarity("Web Database Applications",
"Web Database Applications with PHP & MySQL"),1.0000)
self.assertEqual(smart_match.similarity("Web Database Applications",
"Creating Database Web Applications with PHP and ASP"),1.0000)
self.assertEqual(smart_match.similarity("Web Database Applications",
"Building Database Applications on the Web Using PHP3"),1.0000)
self.assertEqual(smart_match.similarity("Web Database Applications",
"Building Web Database Applications with Visual Studio 6"),1.0000)
self.assertEqual(float('%.4f' %smart_match.similarity("Web Database Applications",
"Web Application Development With PHP")),0.3333)
self.assertEqual(smart_match.similarity("Web Database Applications",
"WebRAD: Building Database Applications on the Web with Visual FoxPro and Web Connection"),
1.0000)
self.assertEqual(smart_match.similarity("Web Database Applications",
"Structural Assessment: The Role of Large and Full-Scale Testing"),0.0000)
self.assertEqual(smart_match.similarity("Web Database Applications",
"How to Find a Scholarship Online"),0.0000)
self.assertEqual(smart_match.similarity("Web Aplications",
"Web Database Applications with PHP & MySQL"),0.5000)
self.assertEqual(smart_match.similarity("Web Aplications",
"Creating Database Web Applications with PHP and ASP"),0.5000)
self.assertEqual(smart_match.similarity("Web Aplications",
"Building Database Applications on the Web Using PHP3"),0.5000)
self.assertEqual(smart_match.similarity("Web Aplications",
"Building Web Database Applications with Visual Studio 6"),0.5000)
self.assertEqual(smart_match.similarity("Web Aplications",
"Web Application Development With PHP"),0.5000)
self.assertEqual(smart_match.similarity("Web Aplications",
"WebRAD: Building Database Applications on the Web with Visual FoxPro and Web Connection"),
0.5000)
self.assertEqual(smart_match.similarity("Web Aplications",
"Structural Assessment: The Role of Large and Full-Scale Testing"),0.0000)
self.assertEqual(smart_match.similarity("Web Aplications",
"How to Find a Scholarship Online"),0.0000)
def test_dissimilarity(self):
self.assertEqual(smart_match.dissimilarity('hello', 'hero'), 0.25)
self.assertEqual(smart_match.dissimilarity('hello', 'ehllo'), 0)
smart_match.set_params(level='term')
self.assertAlmostEqual(smart_match.dissimilarity('test string1', 'test string2'), 0.5)
self.assertEqual(smart_match.dissimilarity("<NAME>", "<NAME>"), 0.5000)
self.assertEqual(smart_match.dissimilarity("<NAME>", "<NAME>"), 0.5000)
self.assertEqual(smart_match.dissimilarity("<NAME>", "<NAME>"), 1.0000)
self.assertEqual(smart_match.dissimilarity("<NAME>", "<NAME>"), 1.0000)
self.assertEqual(smart_match.dissimilarity("<NAME>", "<NAME>"), 1.0000)
self.assertEqual(smart_match.dissimilarity("a b b c c", "a b c e f g"), 0.0000)
self.assertEqual(smart_match.dissimilarity("Healed", "Sealed"), 1.0000)
self.assertEqual(smart_match.dissimilarity("Healed", "Healthy"), 1.0000)
self.assertEqual(smart_match.dissimilarity("Healed", "Heard"), 1.0000)
self.assertEqual(smart_match.dissimilarity("Web Database Applications",
"Web Database Applications with PHP & MySQL"), 0.0000)
self.assertEqual(smart_match.dissimilarity("Web Database Applications",
"Creating Database Web Applications with PHP and ASP"), 0.0000)
self.assertEqual(smart_match.dissimilarity("Web Database Applications",
"Building Database Applications on the Web Using PHP3"), 0.0000)
self.assertEqual(smart_match.dissimilarity("Web Database Applications",
"Building Web Database Applications with Visual Studio 6"), 0.0000)
if __name__ == '__main__':
unittest.main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.