metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JerryLinLinLin/VirusTotalSmartScanner",
"score": 2
}
|
#### File: JerryLinLinLin/VirusTotalSmartScanner/VTGUI.py
```python
import wx
import wx.xrc
import wx.grid
import wx.adv
###########################################################################
## Class VT_AVScanner
###########################################################################
class VT_AVScanner(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"VirusTotal Smart Scanner", pos=wx.DefaultPosition,
size=wx.Size(800, 370), style=wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.SetFont(
wx.Font(wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
False, wx.EmptyString))
self.SetBackgroundColour(wx.Colour(255, 255, 255))
bSizer7 = wx.BoxSizer(wx.VERTICAL)
bSizer7.SetMinSize(wx.Size(800, 330))
self.path_dir = wx.DirPickerCtrl(self, wx.ID_ANY, wx.EmptyString, u"Select a folder", wx.DefaultPosition,
wx.Size(770, -1), wx.DIRP_DEFAULT_STYLE)
self.path_dir.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_INFOTEXT))
bSizer7.Add(self.path_dir, 0, wx.ALL, 5)
bSizer71 = wx.BoxSizer(wx.VERTICAL)
bSizer71.SetMinSize(wx.Size(800, -1))
wSizer11 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
wSizer11.SetMinSize(wx.Size(800, 30))
self.scan_but = wx.Button(self, wx.ID_ANY, u"Scan", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer11.Add(self.scan_but, 0, wx.ALL, 5)
self.stop_but = wx.Button(self, wx.ID_ANY, u"Stop", wx.DefaultPosition, wx.DefaultSize, 0)
self.stop_but.Enable(False)
wSizer11.Add(self.stop_but, 0, wx.ALL, 5)
self.scan_gauge = wx.Gauge(self, wx.ID_ANY, 100, wx.DefaultPosition, wx.Size(585, -1), wx.GA_HORIZONTAL)
self.scan_gauge.SetValue(0)
wSizer11.Add(self.scan_gauge, 1, wx.ALL | wx.EXPAND, 5)
bSizer71.Add(wSizer11, 1, wx.EXPAND, 5)
bSizer7.Add(bSizer71, 1, wx.EXPAND, 5)
self.data_grid = wx.grid.Grid(self, wx.ID_ANY, wx.DefaultPosition, wx.Size(800, 200), 0)
# Grid
self.data_grid.CreateGrid(0, 4)
self.data_grid.EnableEditing(False)
self.data_grid.EnableGridLines(False)
self.data_grid.EnableDragGridSize(True)
self.data_grid.SetMargins(0, 0)
# Columns
self.data_grid.SetColSize(0, 150)
self.data_grid.SetColSize(1, 150)
self.data_grid.SetColSize(2, 200)
self.data_grid.SetColSize(3, 270)
self.data_grid.EnableDragColMove(False)
self.data_grid.EnableDragColSize(False)
self.data_grid.SetColLabelSize(30)
self.data_grid.SetColLabelValue(0, u"Threat")
self.data_grid.SetColLabelValue(1, u"File")
self.data_grid.SetColLabelValue(2, u"Path")
self.data_grid.SetColLabelValue(3, u"sha256")
self.data_grid.SetColLabelAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
# Rows
self.data_grid.EnableDragRowSize(False)
self.data_grid.SetRowLabelSize(0)
self.data_grid.SetRowLabelAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
# Label Appearance
self.data_grid.SetLabelBackgroundColour(wx.Colour(255, 255, 255))
# Cell Defaults
self.data_grid.SetDefaultCellAlignment(wx.ALIGN_LEFT, wx.ALIGN_TOP)
self.m_menu1 = wx.Menu()
self.m_menuItem1 = wx.MenuItem(self.m_menu1, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL)
self.m_menu1.Append(self.m_menuItem1)
self.data_grid.Bind(wx.EVT_RIGHT_DOWN, self.data_gridOnContextMenu)
bSizer7.Add(self.data_grid, 0, wx.ALL, 5)
wSizer4 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
wSizer4.SetMinSize(wx.Size(-1, 30))
self.remove_but = wx.Button(self, wx.ID_ANY, u"Remove File(s)", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer4.Add(self.remove_but, 0, wx.ALL, 5)
self.open_log_folder_but = wx.Button(self, wx.ID_ANY, u"Open Log Folder", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer4.Add(self.open_log_folder_but, 0, wx.ALL, 5)
self.open_set_but = wx.Button(self, wx.ID_ANY, u"Settings", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer4.Add(self.open_set_but, 0, wx.ALL, 5)
self.about_but = wx.Button(self, wx.ID_ANY, u"About", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer4.Add(self.about_but, 0, wx.ALL, 5)
bSizer7.Add(wSizer4, 1, wx.EXPAND, 5)
self.SetSizer(bSizer7)
self.Layout()
self.status_bar = self.CreateStatusBar(1, wx.STB_SIZEGRIP, wx.ID_ANY)
self.Centre(wx.BOTH)
# Connect Events
self.Bind(wx.EVT_CLOSE, self.close_main)
self.scan_but.Bind(wx.EVT_BUTTON, self.scan_but_click)
self.stop_but.Bind(wx.EVT_BUTTON, self.stop_but_click)
self.remove_but.Bind(wx.EVT_BUTTON, self.remove_but_click)
self.open_log_folder_but.Bind(wx.EVT_BUTTON, self.open_log_folder_but_click)
self.open_set_but.Bind(wx.EVT_BUTTON, self.open_set_but_click)
self.about_but.Bind(wx.EVT_BUTTON, self.about_click)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def close_main(self, event):
event.Skip()
def scan_but_click(self, event):
event.Skip()
def stop_but_click(self, event):
event.Skip()
def remove_but_click(self, event):
event.Skip()
def open_log_folder_but_click(self, event):
event.Skip()
def open_set_but_click(self, event):
event.Skip()
def about_click(self, event):
event.Skip()
def data_gridOnContextMenu(self, event):
self.data_grid.PopupMenu(self.m_menu1, event.GetPosition())
###########################################################################
## Class Settings_window
###########################################################################
class Settings_window(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"Settings", pos=wx.DefaultPosition, size=wx.Size(500, 300),
style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.SetBackgroundColour(wx.Colour(255, 255, 255))
bSizer5 = wx.BoxSizer(wx.VERTICAL)
wSizer2 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.engine_threshold_text = wx.StaticText(self, wx.ID_ANY, u"Engines Threshold", wx.DefaultPosition,
wx.DefaultSize, 0)
self.engine_threshold_text.Wrap(-1)
wSizer2.Add(self.engine_threshold_text, 0, wx.ALL, 5)
self.engine_threshold_slider = wx.Slider(self, wx.ID_ANY, 80, 0, 100, wx.DefaultPosition, wx.Size(500, -1),
wx.SL_HORIZONTAL)
wSizer2.Add(self.engine_threshold_slider, 0, wx.ALL, 5)
wSizer3 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
wSizer3.SetMinSize(wx.Size(500, -1))
self.sens_low_text = wx.StaticText(self, wx.ID_ANY, u" Low", wx.DefaultPosition, wx.Size(250, -1), 0)
self.sens_low_text.Wrap(-1)
wSizer3.Add(self.sens_low_text, 0, wx.ALL, 5)
self.sens_high_text = wx.StaticText(self, wx.ID_ANY, u" High",
wx.Point(-1, -1), wx.Size(200, -1), 0)
self.sens_high_text.Wrap(-1)
wSizer3.Add(self.sens_high_text, 0, wx.ALL, 5)
wSizer2.Add(wSizer3, 1, wx.EXPAND, 5)
bSizer5.Add(wSizer2, 0, 0, 5)
wSizer31 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.scan_pe_check = wx.CheckBox(self, wx.ID_ANY, u"Scan only PE files", wx.DefaultPosition, wx.Size(150, -1),
0)
self.scan_pe_check.SetValue(True)
wSizer31.Add(self.scan_pe_check, 0, wx.ALL, 5)
self.grayware_check = wx.CheckBox(self, wx.ID_ANY, u"Grayware Detection", wx.DefaultPosition, wx.Size(150, -1),
0)
self.grayware_check.SetValue(True)
wSizer31.Add(self.grayware_check, 0, wx.ALL, 5)
self.upload_check = wx.CheckBox(self, wx.ID_ANY, u"Auto Upload Files", wx.DefaultPosition, wx.Size(150, -1), 0)
wSizer31.Add(self.upload_check, 0, wx.ALL, 5)
self.log_check = wx.CheckBox(self, wx.ID_ANY, u"Save Log", wx.DefaultPosition, wx.Size(150, -1), 0)
self.log_check.SetValue(True)
wSizer31.Add(self.log_check, 0, wx.ALL, 5)
self.crawler_check = wx.CheckBox(self, wx.ID_ANY, u"Use Crawler ", wx.DefaultPosition,
wx.DefaultSize, 0)
wSizer31.Add(self.crawler_check, 0, wx.ALL, 5)
self.menu_check = wx.CheckBox(self, wx.ID_ANY, u"Folder Context Menu", wx.DefaultPosition, wx.Size(150, -1), 0)
wSizer31.Add(self.menu_check, 0, wx.ALL, 5)
self.white_check = wx.CheckBox(self, wx.ID_ANY, u"WhiteList Cache ", wx.DefaultPosition, wx.Size(150, -1), 0)
self.white_check.SetValue(True)
wSizer31.Add(self.white_check, 0, wx.ALL, 5)
self.black_check = wx.CheckBox(self, wx.ID_ANY, u"BlackList Cache", wx.DefaultPosition, wx.Size(150, -1), 0)
wSizer31.Add(self.black_check, 0, wx.ALL, 5)
self.menu_file_check = wx.CheckBox(self, wx.ID_ANY, u"File Context Menu", wx.DefaultPosition, wx.Size(150, -1),
0)
wSizer31.Add(self.menu_file_check, 0, wx.ALL, 5)
bSizer5.Add(wSizer31, 1, wx.EXPAND, 5)
wSizer19 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.vtapi_text = wx.StaticText(self, wx.ID_ANY, u"VTAPI:", wx.DefaultPosition, wx.Size(150, -1), 0)
self.vtapi_text.Wrap(-1)
wSizer19.Add(self.vtapi_text, 0, wx.ALL, 5)
self.vtapi_input = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(300, -1), 0)
wSizer19.Add(self.vtapi_input, 0, wx.ALL, 5)
bSizer5.Add(wSizer19, 1, wx.EXPAND, 5)
self.ok_but = wx.Button(self, wx.ID_ANY, u"Save", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer5.Add(self.ok_but, 0, wx.ALL, 5)
self.SetSizer(bSizer5)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.Bind(wx.EVT_CLOSE, self.close_set)
self.engine_threshold_slider.Bind(wx.EVT_SCROLL, self.show_threshold_slider)
self.engine_threshold_slider.Bind(wx.EVT_SCROLL_CHANGED, self.update_value)
self.crawler_check.Bind(wx.EVT_CHECKBOX, self.crawler_message)
self.menu_check.Bind(wx.EVT_CHECKBOX, self.add_menu)
self.menu_file_check.Bind(wx.EVT_CHECKBOX, self.add_file_menu)
self.ok_but.Bind(wx.EVT_BUTTON, self.save_settings_but_click)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def close_set(self, event):
event.Skip()
def show_threshold_slider(self, event):
event.Skip()
def update_value(self, event):
event.Skip()
def crawler_message(self, event):
event.Skip()
def add_menu(self, event):
event.Skip()
def add_file_menu(self, event):
event.Skip()
def save_settings_but_click(self, event):
event.Skip()
###########################################################################
## Class about_frame
###########################################################################
class about_frame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"About", pos=wx.DefaultPosition, size=wx.Size(300, 180),
style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.SetBackgroundColour(wx.Colour(255, 255, 255))
bSizer4 = wx.BoxSizer(wx.VERTICAL)
self.text_static = wx.StaticText(self, wx.ID_ANY, u"\nBy 191196846", wx.DefaultPosition, wx.DefaultSize, 0)
self.text_static.Wrap(-1)
bSizer4.Add(self.text_static, 0, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 5)
self.link_kafan = wx.adv.HyperlinkCtrl(self, wx.ID_ANY, u"See the post on Kafan",
u"https://bbs.kafan.cn/thread-2133049-1-1.html", wx.DefaultPosition,
wx.DefaultSize, wx.adv.HL_DEFAULT_STYLE)
bSizer4.Add(self.link_kafan, 0, wx.ALL, 5)
self.text_static_2 = wx.StaticText(self, wx.ID_ANY, u"To use this tool, you must agree", wx.DefaultPosition,
wx.DefaultSize, 0)
self.text_static_2.Wrap(-1)
bSizer4.Add(self.text_static_2, 0, wx.ALL, 5)
self.link_vt = wx.adv.HyperlinkCtrl(self, wx.ID_ANY, u"VirusTotal Terms of Service",
u"https://support.virustotal.com/hc/en-us/articles/115002145529-Terms-of-Service",
wx.DefaultPosition, wx.DefaultSize, wx.adv.HL_DEFAULT_STYLE)
bSizer4.Add(self.link_vt, 0, wx.ALL, 5)
self.SetSizer(bSizer4)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.Bind(wx.EVT_CLOSE, self.close_set)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def close_set(self, event):
event.Skip()
```
|
{
"source": "jerryliu55/pyre-check",
"score": 2
}
|
#### File: python_ast/flake8_tests/ast_linter.py
```python
import ast
from pathlib import Path
from typing import Iterator, List, NamedTuple, Type
from tools.pyre.python_ast.pyre import PyreAst
class Error(NamedTuple):
line: int
column: int
message: str
type: Type
class AstChecker:
name = "flake8-pyre-test-linter"
version = "0.0.1"
def __init__(
self, tree: ast.Module, lines: List[str], repository: str, filename: str
) -> None:
self.tree = PyreAst(repository).typed_ast(tree, filename)
self.lines = lines
self.filename = Path(filename).resolve()
def run(self) -> Iterator[Error]:
visitor = AstVisitor()
visitor.visit(self.tree)
for error in visitor.errors:
yield error
class AstVisitor(ast.NodeVisitor):
def __init__(self) -> None:
self.errors = [] # type: List[Error]
def _create_error(self, node, message) -> None:
self.errors.append(Error(node.lineno, node.col_offset, message, AstChecker))
def visit_Assign(self, node: ast.Assign) -> None:
# pyre-fixme: ast.AST doesn't have attribute 'type'
# TODO(T41594507): Adapt this test now that literal types exist.ArithmeticError
if node.targets[0].type == "typing_extensions.Literal[4]":
# TODO(T37004997): Type should be fully qualified
self._create_error(node, "Assigning to expression of type `int`.")
self.generic_visit(node)
```
|
{
"source": "jerrylizilong/HTMLReport",
"score": 3
}
|
#### File: HTMLReport/images/SaveImages.py
```python
import base64
import os
import random
import threading
import time
report_path = ""
imageList = {}
def AddImage(base64_data: base64, alt: str = "", name: str = "image"):
"""添加截图到报告
:param base64_data:base64格式的图片文本
:param alt:图片提示
:param name:图片命名前缀
:return:None
"""
if base64_data and report_path:
current_id = str(threading.current_thread().ident)
if current_id not in imageList:
imageList[current_id] = []
random_name = '{}_{}_{}_{}.jpg'.format(name, current_id, time.strftime('%Y_%m_%d_%H_%M_%S'),
random.randint(1, 999))
image_path = os.path.join(report_path, "images")
if not os.path.exists(image_path):
os.makedirs(image_path)
image_file = os.path.join(image_path, random_name)
with open(image_file, "wb") as f:
f.write(base64.b64decode(base64_data))
imageList[current_id].append((os.path.join('images', random_name).replace("\\", "/"), alt))
```
#### File: HTMLReport/log/Logger.py
```python
import logging.handlers
import os
from .HandlerFactory import *
class InfoOrLessCritical(logging.Filter):
def filter(self, record):
return record.levelno < LOG_LEVEL_WARNING
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class GeneralLogger(object):
def __init__(self, level=LOG_LEVEL_NOTSET, log_by_thread=False, log_path='', max_bytes=0, backup_count=0,
stream=StringIO()):
# 设置根记录器
logging.getLogger().setLevel(LOG_LEVEL_NOTSET)
logging.getLogger().addHandler(HandlerFactory.get_std_out_handler())
logging.getLogger().addHandler(HandlerFactory.get_std_err_handler())
logging.getLogger().addHandler(HandlerFactory.get_stream_handler())
# 默认日志设置
self._loggers = {}
self._log_level = level
self._main_thread_id = str(self.get_current_thread_id())
self._log_destination = LOG_TARGET_CONSOLE
self._log_by_thread = log_by_thread
self._log_path = log_path
self._log_file_max_bytes = max_bytes
self._log_file_backup_count = backup_count
self.stream = stream
@staticmethod
def get_current_thread_id():
return threading.current_thread().ident
@staticmethod
def get_current_thread_name():
return threading.current_thread().name
def get_log_file_name(self):
log_path = os.path.abspath(self._log_path)
base_name = os.path.basename(log_path)
base_dir = os.path.dirname(log_path)
if os.path.isdir(log_path):
# 只提供了文件夹路径,为日志文件创建一个名称
return os.path.join(log_path, base_name)
elif base_name and '.' not in base_name:
# 创建路径
os.makedirs(log_path)
return os.path.join(log_path, base_name)
else:
return os.path.join(base_dir, base_name)
def get_logger(self, is_stream: bool = False) -> logging.Logger:
name = self._main_thread_id
if self._log_by_thread:
current_id = str(self.get_current_thread_id())
if current_id != self._main_thread_id:
# 将子线程的日志记录器作为主记录器的子进程
# 因此,来自子线程的日志将由主日志程序处理。
# 否则,主日志将不包含子日志。
name = self._main_thread_id + '.' + current_id
if name not in self._loggers:
self.set_logger(name, is_stream)
return self._loggers[name]
def set_logger(self, name, is_stream=False):
if name not in self._loggers:
if name == self._main_thread_id:
new_logger = logging.getLogger()
else:
new_logger = logging.getLogger(name)
new_logger.setLevel(self._log_level)
if self._log_path:
if is_stream:
new_logger.addHandler(HandlerFactory.get_stream_handler())
else:
# 如果启用了线程,日志路径将会变化
log_path = self.get_log_file_name()
new_logger.addHandler(HandlerFactory.get_rotating_file_handler(
log_path, self._log_file_max_bytes, self._log_file_backup_count))
self._loggers[name] = new_logger
def set_log_path(self, file_path, max_bytes=0, backup_count=0):
if isinstance(file_path, str):
self._log_path = file_path
if isinstance(max_bytes, int):
self._log_file_max_bytes = max_bytes
if isinstance(backup_count, int):
self._log_file_backup_count = backup_count
def set_log_level(self, new_level):
self._log_level = new_level
for instanceLogger in self._loggers.values():
instanceLogger.setLevel(self._log_level)
def set_log_by_thread_log(self, log_by_thread):
self._log_by_thread = log_by_thread
# 如果启用了线程日志,只启用主日志记录器
for instanceLogger in self._loggers.values():
instanceLogger.disabled = not self._log_by_thread
try:
self._loggers[self._main_thread_id].disabled = self._log_by_thread
except KeyError:
pass
```
|
{
"source": "jerrylsu/TC-Bot",
"score": 3
}
|
#### File: deep_dialog/usersims/usersim_rule_general.py
```python
import logging
import argparse
import json
import random
import copy
from .usersim import UserSimulator
from deep_dialog import dialog_config
_logger = logging.getLogger(__name__)
class CustomRuleSimulator(UserSimulator):
""" A custom rule-based user simulator for testing dialog policy """
def __init__(self, movie_dict=None, act_set=None, slot_set=None, start_set=None, params=None):
""" Constructor shared by all user simulators """
self.movie_dict = movie_dict
self.act_set = act_set
self.slot_set = slot_set
self.start_set = start_set
self.max_turn = params['max_turn']
self.slot_err_probability = params['slot_err_probability']
self.slot_err_mode = params['slot_err_mode']
self.intent_err_probability = params['intent_err_probability']
self.simulator_run_mode = params['simulator_run_mode']
self.simulator_act_level = params['simulator_act_level']
self.learning_phase = params['learning_phase']
def initialize_episode(self):
""" Initialize a new episode (dialog)
state['history_slots']: keeps all the informed_slots
state['rest_slots']: keep all the slots (which is still in the stack yet)
"""
self.state = {}
self.state['history_slots'] = {}
self.state['inform_slots'] = {}
self.state['request_slots'] = {}
self.state['rest_slots'] = []
# first turn is agent, so user start dialogue is second turn
# but turn initialize value is -1(-1+2=1)
self.state['turn'] = -1
self.episode_over = False
self.dialog_status = dialog_config.NO_OUTCOME_YET
# get goal
self.goal = self._sample_goal()
# add new dict key for the request_slots dict,
# for example:(dialogue task is book ticket)
# self.goal['request_slots']['ticket'] = 'UNK'
self.constraint_check = dialog_config.CONSTRAINT_CHECK_FAILURE
# 获取agent的观测内容,例如系统通过摄像头获取头像信息
# (此种信息可以直接生成为user action,因此user initialize获取)
user_sample_action = self.get_initialize_info()
return user_sample_action
def _sample_goal(self):
"""sample a user goal"""
sample_goal = random.choice(self.start_set[self.learning_phase])
return sample_goal
def get_initialize_info(self):
"""get the user is initialize input inform, for example face inform
:return:{"request_slots": {}, "inform_slots": {}}
"""
# 包含一个对人脸输入信息的解析过程
user_initialize_sample_action = {}
return user_initialize_sample_action
def corrupt(self, user_action):
"""Randomly corrupt an action with error probs
(slot_err_probability and slot_err_mode) on Slot and
Intent (intent_err_probability).
:param user_action:
:return:
"""
pass
def debug_fake_goal(self):
"""Debug function: build a fake goal mannually (Can be moved in future)
:return:
"""
pass
def next(self, system_action):
"""Generate next User Action based on Last System Action.
:param system_action:
:return:
"""
self.state['turn'] += 2
self.episode_over = False
self.dialog_status = dialog_config.NO_OUTCOME_YET
sys_act = system_action.get('diaact')
if self.max_turn > 0 and self.state.get('turn') > self.max_turn:
self.dialog_status = dialog_config.FAILED_DIALOG
self.episode_over = True
self.state['diaact'] = 'closing'
else:
self.state['history_slots'].update(self.state.get('inform_slots'))
self.state['inform_slots'].clear()
if sys_act == "request":
self.response_request(system_action)
elif sys_act in ["welcomestaff", "welcomevisitor", "staffmeet"]:
self.response_end()
elif sys_act == "closing":
self.episode_over = True
self.state['diaact'] = "manualservice"
response_action = {}
response_action['diaact'] = self.state['diaact']
response_action['inform_slots'] = self.state['inform_slots']
response_action['request_slots'] = self.state['request_slots']
response_action['turn'] = self.state['turn']
response_action['nl'] = ""
# add NL to dia_act
# self.add_nl_to_action(response_action)
return response_action, self.episode_over, self.dialog_status
def response_request(self, system_action):
""" Response for Request (System Action)
Replay agent's request: speaker -> agent, diaact -> request"""
if len(system_action['request_slots'].keys()) > 0:
slot = system_action['request_slots'].keys()[0] # only one slot
if slot in self.goal['inform_slots'].keys(): # request slot in user's constraints #and slot not in self.state['request_slots'].keys():
self.state['inform_slots'][slot] = self.goal['inform_slots'][slot]
self.state['diaact'] = "inform"
if slot in self.state['rest_slots']: self.state['rest_slots'].remove(slot)
if slot in self.state['request_slots'].keys(): del self.state['request_slots'][slot]
self.state['request_slots'].clear() # 之前累加的问题全部删除,不在提问,回答当前轮agent的问题。
elif slot in self.goal['request_slots'].keys() and slot not in self.state['rest_slots'] and slot in self.state['history_slots'].keys(): # the requested slot has been answered
self.state['inform_slots'][slot] = self.state['history_slots'][slot]
self.state['request_slots'].clear()
self.state['diaact'] = "inform"
elif slot in self.goal['request_slots'].keys() and slot in self.state['rest_slots']: # request slot in user's goal's request slots, and not answered yet
self.state['diaact'] = "request" # "confirm_question"
self.state['request_slots'][slot] = "UNK"
########################################################################
# Inform the rest of informable slots
########################################################################
for info_slot in self.state['rest_slots']:
if info_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][info_slot] = self.goal['inform_slots'][info_slot]
for info_slot in self.state['inform_slots'].keys():
if info_slot in self.state['rest_slots']:
self.state['rest_slots'].remove(info_slot)
else:
if len(self.state['request_slots']) == 0 and len(self.state['rest_slots']) == 0:
self.state['diaact'] = "thanks"
else:
self.state['diaact'] = "inform"
self.state['inform_slots'][slot] = dialog_config.I_DO_NOT_CARE
else: # this case should not appear
if len(self.state['rest_slots']) > 0:
random_slot = random.choice(self.state['rest_slots'])
if random_slot in self.goal['inform_slots'].keys():
self.state['inform_slots'][random_slot] = self.goal['inform_slots'][random_slot]
self.state['rest_slots'].remove(random_slot)
self.state['diaact'] = "inform"
elif random_slot in self.goal['request_slots'].keys():
self.state['request_slots'][random_slot] = self.goal['request_slots'][random_slot]
self.state['diaact'] = "request"
def response_end(self):
self.episode_over = True
self.dialog_status = dialog_config.SUCCESS_DIALOG
self.state["diaact"] = "thanks"
request_slot_set = copy.deepcopy(self.state['request_slots'].keys())
rest_slot_set = copy.deepcopy(self.state['rest_slots'])
if len(request_slot_set) > 0 or len(rest_slot_set) > 0:
self.dialog_status = dialog_config.FAILED_DIALOG
for info_slot in self.state['history_slots'].keys():
if self.state['history_slots'][info_slot] == dialog_config.NO_VALUE_MATCH:
self.dialog_status = dialog_config.FAILED_DIALOG
if info_slot in self.goal['inform_slots'].keys():
if self.state['history_slots'][info_slot] != self.goal['inform_slots'][info_slot]:
self.dialog_status = dialog_config.FAILED_DIALOG
# if self.constraint_check == dialog_config.CONSTRAINT_CHECK_FAILURE:
# self.dialog_status = dialog_config.FAILED_DIALOG
def respone_welcomevisitor(self, system_action):
pass
def respone_staffmeet(self, system_action):
pass
```
|
{
"source": "jerrylui803/components-contrib",
"score": 2
}
|
#### File: conformance/azure/allow-github-ips-in-azuresql.py
```python
import argparse
import ipaddress
import json
import os
import subprocess
import sys
import urllib.request
def parseArgs():
abscwd = os.path.abspath(os.getcwd())
arg_parser = argparse.ArgumentParser(description='Generates the IP ranges based on CIDRs for GitHub Actions from meta API.')
arg_parser.add_argument('--outpath', type=str, default=abscwd, help='Optional. Full path to write the JSON output to.')
arg_parser.add_argument('--sqlserver', type=str, help='Name of the Azure SQL server to update firewall rules of. Required for deployment.')
arg_parser.add_argument('--resource-group', type=str, help='Resouce group containing the target Azure SQL server. Required for deployment.')
arg_parser.add_argument('--no-deployment', action='store_true', help='Specify this flag to generate the ARM template without deploying it.')
args = arg_parser.parse_args()
if not args.no_deployment:
is_missing_args = False
if not args.sqlserver:
print('ERROR: the following argument is required: --sqlserver')
is_missing_args = True
if not args.resource_group:
print('ERROR: the following argument is required: --resource-group')
is_missing_args = True
if is_missing_args:
arg_parser.print_help()
sys.exit(-1)
print('Arguments parsed: {}'.format(args))
return args
def getResponse(url):
operUrl = urllib.request.urlopen(url)
if(operUrl.getcode()==200):
data = operUrl.read()
jsonData = json.loads(data)
else:
print('ERROR: failed to receive data', operUrl.getcode())
return jsonData
def writeAllowedIPRangesJSON(outpath):
url = 'https://api.github.com/meta'
jsonData = getResponse(url)
ipRanges = []
prevStart = ''
prevEnd = ''
# Iterate the public IP CIDRs used to run GitHub Actions, and convert them
# into IP ranges for test SQL server firewall access.
for cidr in jsonData['actions']:
net = ipaddress.ip_network(cidr)
# SQL server firewall only supports up to 128 firewall rules.
# As a first cut, exclude all IPv6 addresses.
if net.version == 4:
start = net[0]
end = net[-1]
# print(f'{cidr} --> [{start}, {end}]')
if prevStart == '':
prevStart = start
if prevEnd == '':
prevEnd = end
elif prevEnd + 65536 > start:
# If the current IP range is within the granularity of a /16
# subnet mask to the previous range, coalesce them into one.
# This is necessary to get the number of rules down to ~100.
prevEnd = end
else:
ipRange = [str(prevStart), str(prevEnd)]
ipRanges.append(ipRange)
prevStart = start
prevEnd = end
if prevStart != '' and prevEnd != '':
ipRange = [str(prevStart), str(prevEnd)]
ipRanges.append(ipRange)
with open(outpath, 'w') as outfile:
json.dump(ipRanges, outfile)
def main():
args = parseArgs()
# Get the GitHub IP Ranges to use as firewall allow-rules from the GitHub meta API
ipRangesFileName = os.path.join(args.outpath, 'github-ipranges.json')
writeAllowedIPRangesJSON(ipRangesFileName)
print(f'INFO: GitHub Actions public IP range rules written {ipRangesFileName}')
# Generate the ARM template from bicep to update Azure SQL server firewall rules
subprocess.call(['az', 'bicep', 'install'])
firewallTemplateName = os.path.join(args.outpath, 'update-sql-firewall-rules.json')
subprocess.call(['az', 'bicep', 'build', '--file', 'conf-test-azure-sqlserver-firewall.bicep', '--outfile', firewallTemplateName])
print(f'INFO: ARM template to update SQL Server firewall rules written to {firewallTemplateName}')
# Update the Azure SQL server firewall rules
if args.no_deployment:
print(f'INFO: --no-deployment specified, skipping update of SQL server {firewallTemplateName}')
else:
subprocess.call(['az', 'deployment', 'group', 'create', '--name', 'UpdateSQLFirewallRules', '--template-file', firewallTemplateName, '--resource-group', args.resource_group, '--parameters', f'sqlServerName={args.sqlserver}', '--parameters', f'ipRanges=@{ipRangesFileName}'])
sys.exit(0)
if __name__ == '__main__':
main()
```
|
{
"source": "jerryluosuper/Lensi",
"score": 2
}
|
#### File: Lensi/main/Lensi_uninstall.py
```python
import os
def Scoop_uninstall_app(app_name):
os.system("scoop uninstall " + app_name)
def choco_uninstall_app(app_name):
os.system("choco uninstall " + app_name)
def winget_uninstall_app(app_name):
os.system("winget uninstall " + app_name)
def Lensi_uninstall(app_name,app_source):
if app_source == "Choco":
choco_uninstall_app(app_name)
elif app_source == "Scoop":
Scoop_uninstall_app(app_name)
elif app_source == "Winget":
winget_uninstall_app(app_name)
```
#### File: Lensi/main/Lensi_update.py
```python
import os
def choco_update_all():
os.system("choco upgrade all --yes")
def choco_update_app(app_name):
os.system("choco upgrade " + app_name)
def choco_update():
os.system("choco upgrade chocolatey -y")
def Scoop_update_all():
os.system("scoop update *")
def Scoop_update_app(app_name):
os.system("scoop update "+ app_name)
def Scoop_update():
os.system("scoop update")
def winget_update_all():
os.system("winget upgrade --silent --all")
def winget_update_app_id(app_id):
os.system("winget upgrade --silent --id --accept-source-agreement"+ app_id)
def winget_update_source():
os.system("winget source update")
def Lensi_update_app(app_name,app_source):
if app_source == "Choco":
choco_update_app(app_name)
elif app_source == "Scoop":
Scoop_update_app(app_name)
elif app_source == "Winget":
winget_update_app_id(app_name)
def Lensi_update_all(app_source):
if app_source == "Choco":
choco_update_all()
choco_update()
elif app_source == "Scoop":
Scoop_update_all()
elif app_source == "Winget":
winget_update_all()
winget_update_source()
def Lensi_update(app_source):
if app_source == "Choco":
choco_update()
elif app_source == "Scoop":
Scoop_update()
elif app_source == "Winget":
winget_update_source()
```
#### File: SCW/choco/choco_update _app.py
```python
import os
def choco_update_app(app_name):
os.system("choco upgrade " + app_name)
choco_update_app("")
```
#### File: SCW/choco/choco_update.py
```python
import os
def choco_update():
os.system("choco upgrade chocolatey -y")
choco_update()
```
#### File: SCW/Scoop/Scoop_buckets_load.py
```python
import csv
buckets_list_install = []
def Scoop_buckets_load(args):
with open("buckets_list_install.csv", "r", encoding='UTF-8') as file:
data = csv.reader(file)
for row in data:
buckets_list_install.append(row)
print(buckets_list_install)
```
#### File: SCW/Scoop/Scoop_search_lensi.py
```python
import csv
import os
from fuzzywuzzy import process
buckets_list_install = []
def Scoop_buckets_load():
with open("buckets_list_install.csv", "r", encoding='UTF-8') as file:
data = csv.reader(file)
for row in data:
buckets_list_install.append(row)
def Scoop_install_apps(app_name):
os.system("scoop install " + app_name)
Scoop_buckets_load()
name = input("请输入软件名称:")
limit_name = 3
search_list = process.extract(name,buckets_list_install, limit=limit_name)
print(search_list)
app_name_1 = search_list[0][0][0][search_list[0][0][0].find('\\'):].strip("\\")
app_name_2 = search_list[1][0][0][search_list[1][0][0].find('\\'):].strip("\\")
app_name_3 = search_list[2][0][0][search_list[2][0][0].find('\\'):].strip("\\")
app_name_search = search_list[0][0][0]
app_name_search = search_list[1][0][0]
app_name_search = search_list[2][0][0]
print("1:",app_name_1)
print("2:",app_name_2)
print("3:",app_name_3)
result = input("What to install:")
if result == "1":
Scoop_install_apps(app_name_1)
elif result == "2":
Scoop_install_apps(app_name_2)
elif result == "3":
Scoop_install_apps(app_name_3)
else:
print("What are you doing?")
```
#### File: web/360/web_360_info.py
```python
import os
from urllib import request
from urllib.request import urlopen, urlretrieve
from bs4 import BeautifulSoup
def web_baoku_info(baoku_id):
headers = {'User-Agent':' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
# print(baoku_id)
baoku_info_url = 'https://baoku.360.cn/soft/show/appid/' + baoku_id
print(baoku_info_url)
baoku_info_html_req = request.Request(url=baoku_info_url,headers=headers)
baoku_info_html = urlopen(baoku_info_html_req)
baoku_info_soup = BeautifulSoup(baoku_info_html.read(),"html.parser")
baoku_info_data = baoku_info_soup.select('body > div.app-container > div:nth-child(2) > div.dashboard-container.pic-container > div > img')
for item in baoku_info_data:
baoku_info_image_url = item.get('src')
baoku_info_image_url = "https:" +baoku_info_image_url
# print(baoku_info_image_url)
urlretrieve(url=baoku_info_image_url,filename="baoku_info.png")
baoku_icon_data = baoku_info_soup.select('body > div.app-container > div:nth-child(2) > div:nth-child(2) > h1 > img')
for item in baoku_icon_data:
baoku_icon_image_url = item.get('src')
baoku_icon_image_url = "https:" +baoku_icon_image_url
print(baoku_icon_image_url)
urlretrieve(url=baoku_icon_image_url,filename="baoku_icon.png")
baoku_info_url = 'https://baoku.360.cn/soft/show/appid/' + baoku_id + 'd'
# print(baoku_info_url)
baoku_info_html_req = request.Request(url=baoku_info_url,headers=headers)
baoku_info_html = urlopen(baoku_info_html_req)
baoku_info_soup = BeautifulSoup(baoku_info_html.read(),"html.parser")
baoku_detail = baoku_info_soup.select('body > div.wrap.clearfix > div.main-list.fr > div.app-info > div.app-introduce > div.introduce-txt1 > p')
for item in baoku_detail:
baoku_detail_text = item.get_text
print(baoku_detail_text)
return baoku_info_image_url
# print(baoku_info_main,"\n",baoku_info_home)
print(web_baoku_info("102215446"))
```
|
{
"source": "jerrylzy/CS231N-Final-Project",
"score": 2
}
|
#### File: src/tasks/vqa_visualization.py
```python
import os
import collections
import random
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from param import args
from pretrain.qa_answer_table import load_lxmert_qa
from tasks.vqa_model import VQAModel
from tasks.vqa_atten_model import VQAModelAttn
from tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import cv2
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
def get_data_tuple(splits: str, bs: int, shuffle=False, drop_last=False) -> DataTuple:
dset = VQADataset(splits)
tset = VQATorchDataset(dset)
evaluator = VQAEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
class VQA:
def __init__(self):
# Datasets
self.train_tuple = get_data_tuple(
args.train, bs=args.batch_size, shuffle=True, drop_last=True
)
if args.valid != "":
self.valid_tuple = get_data_tuple(
args.valid, bs=1024,
shuffle=False, drop_last=False
)
else:
self.valid_tuple = None
# Model
self.model = VQAModel(self.train_tuple.dataset.num_answers)
# self.model = VQAModelAttn(self.train_tuple.dataset.num_answers)
# Load pre-trained weights
if args.load_lxmert is not None:
self.model.lxrt_encoder.load(args.load_lxmert)
if args.load_lxmert_qa is not None:
load_lxmert_qa(args.load_lxmert_qa, self.model,
label2ans=self.train_tuple.dataset.label2ans)
# GPU options
self.model = self.model.cuda()
if args.multiGPU:
self.model.lxrt_encoder.multi_gpu()
# Loss and Optimizer
self.bce_loss = nn.BCEWithLogitsLoss()
if 'bert' in args.optim:
batch_per_epoch = len(self.train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
print("BertAdam Total Iters: %d" % t_total)
from lxrt.optimization import BertAdam
self.optim = BertAdam(list(self.model.parameters()),
lr=args.lr,
warmup=0.1,
t_total=t_total)
else:
self.optim = args.optimizer(self.model.parameters(), args.lr)
# Output Directory
self.output = args.output
os.makedirs(self.output, exist_ok=True)
def train(self, train_tuple, eval_tuple):
dset, loader, evaluator = train_tuple
iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)
best_valid = 0.
for epoch in range(args.epochs):
quesid2ans = {}
for i, (ques_id, feats, boxes, sent, target, _, _) in iter_wrapper(enumerate(loader)):
self.model.train()
self.optim.zero_grad()
feats, boxes, target = feats.cuda(), boxes.cuda(), target.cuda()
logit = self.model(feats, boxes, sent)
assert logit.dim() == target.dim() == 2
loss = self.bce_loss(logit, target)
loss = loss * logit.size(1)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 5.)
self.optim.step()
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
log_str = "\nEpoch %d: Train %0.2f\n" % (epoch, evaluator.evaluate(quesid2ans) * 100.)
if self.valid_tuple is not None: # Do Validation
valid_score = self.evaluate(eval_tuple)
if valid_score > best_valid:
best_valid = valid_score
self.save("BEST")
log_str += "Epoch %d: Valid %0.2f\n" % (epoch, valid_score * 100.) + \
"Epoch %d: Best %0.2f\n" % (epoch, best_valid * 100.)
print(log_str, end='')
with open(self.output + "/log.log", 'a') as f:
f.write(log_str)
f.flush()
self.save("LAST")
def predict(self, eval_tuple: DataTuple, dump=None):
"""
Predict the answers to questions in a data split.
:param eval_tuple: The data tuple to be evaluated.
:param dump: The path of saved file to dump results.
:return: A dict of question_id to answer.
"""
self.model.eval()
dset, loader, evaluator = eval_tuple
quesid2ans = {}
for i, datum_tuple in enumerate(loader):
ques_id, feats, boxes, sent = datum_tuple[:4] # Avoid seeing ground truth
with torch.no_grad():
feats, boxes = feats.cuda(), boxes.cuda()
logit = self.model(feats, boxes, sent)
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
if dump is not None:
evaluator.dump_result(quesid2ans, dump)
return quesid2ans
def plot_analysis(self, eval_tuple: DataTuple, dump=None, plot_bb=False, plot_attention=False, plot_confidence=False):
# plot confidence bar graph for one example
self.model.eval()
dset, loader, evaluator = eval_tuple
# sample = random.randint(0, len(loader) - 1)
sample = 450
img_no = 0
output_folder = 'output/'
for i, datum_tuple in enumerate(loader):
ques_id, feats, boxes, sent, _, img_id, original_boxes, ans_type = datum_tuple
if ans_type[0] != 'number':
continue
with torch.no_grad():
print('image id: ', img_id[0])
print('question id: ', ques_id[0].item())
pic = img_id[0]
question = sent[0].replace("?", "").split()
## draw bounding box
if plot_bb == True:
image = cv2.imread(f'data/mscoco/val2014/{pic}.jpg')
target_ob = [25, 19, 21]
color = [(0,0,255), (0,165,255), (0,255,255)]
for o in range(len(target_ob)):
box = original_boxes[0][target_ob[o]].cpu().numpy()
image = cv2.rectangle(image, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), color[o], 2)
cv2.imwrite(f'{output_folder}bbImage{img_no}_{question}.png', image)
feats, boxes = feats.cuda(), boxes.cuda()
logit = self.model(feats, boxes, sent)
print(logit)
logit = nn.Softmax(dim=1)(logit)
# plot attention map
if plot_attention == True:
for j in range(5):
attn_wgts = torch.load(f'{output_folder}attn_wgts_{img_no}_{j}.pt')
attn_wgts = attn_wgts[0][1:1+len(question)].flip([0]).cpu().numpy()
fig = go.Figure(data=go.Heatmap(
z=attn_wgts,
y=question[::-1]
))
fig.update_layout(
title=f'Attention map of layer {j} for image {img_no}',
yaxis_title='Sentence',
xaxis_title='Objects'
)
fig.write_image(f'{output_folder}atten_vis_{img_no}_{j}_{ques_id[0].item()}.png')
fig.show()
scores, labels = torch.topk(logit, 5, dim=1)
answers = []
scores = scores[0]
labels = labels[0]
scores = scores.cpu().numpy() * 100
for label in labels.cpu().numpy():
answers.append(dset.label2ans[label])
# plot confidence level
if plot_confidence == True:
fig = go.Figure(data=[go.Bar(
x=scores, y=answers,
text=scores,
textposition='auto',
orientation='h',
marker=dict(color='lightsalmon')
)])
fig.update_traces(texttemplate='%{x:.2f}')
fig.update_layout(
title=f'Predicted confidence of top-5 answers <br> {sent[0]} for image {img_no}',
yaxis_title='Answers',
xaxis_title='Confidence'
)
fig.write_image(f'{output_folder}SampleQuestionConfidence_{img_no}_{ques_id[0].item()}.png')
img_no += 1
if img_no == 10:
break
def evaluate(self, eval_tuple: DataTuple, dump=None):
"""Evaluate all data in data_tuple."""
quesid2ans = self.predict(eval_tuple, dump)
return eval_tuple.evaluator.evaluate(quesid2ans)
@staticmethod
def oracle_score(data_tuple):
dset, loader, evaluator = data_tuple
quesid2ans = {}
for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):
_, label = target.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
return evaluator.evaluate(quesid2ans)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(self.output, "%s.pth" % name))
def load(self, path):
print("Load model from %s" % path)
state_dict = torch.load("%s.pth" % path)
self.model.load_state_dict(state_dict)
if __name__ == "__main__":
# Build Class
vqa = VQA()
# Load VQA model weights
# Note: It is different from loading LXMERT pre-trained weights.
if args.load is not None:
vqa.load(args.load)
# Test or Train
if args.test is not None:
args.fast = args.tiny = False # Always loading all data in test
if 'test' in args.test:
vqa.predict(
get_data_tuple(args.test, bs=950,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'test_predict.json')
)
elif 'val' in args.test:
# Since part of valididation data are used in pre-training/fine-tuning,
# only validate on the minival set.
# create bar graph for top answers
vqa.plot_analysis(
get_data_tuple('minival', bs=1,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'minival_predict.json'),
plot_bb=True,
plot_attention=True,
plot_confidence=True
)
else:
assert False, "No such test option for %s" % args.test
else:
print('Splits in Train data:', vqa.train_tuple.dataset.splits)
if vqa.valid_tuple is not None:
print('Splits in Valid data:', vqa.valid_tuple.dataset.splits)
print("Valid Oracle: %0.2f" % (vqa.oracle_score(vqa.valid_tuple) * 100))
else:
print("DO NOT USE VALIDATION")
vqa.train(vqa.train_tuple, vqa.valid_tuple)
```
|
{
"source": "jerrylzy/RNN_Jazzy_Haydn",
"score": 2
}
|
#### File: jerrylzy/RNN_Jazzy_Haydn/data_utils.py
```python
from music_utils import *
from preprocess import *
from keras.utils import to_categorical
chords, abstract_grammars = get_musical_data('sonata_48_3_(c)iscenko.mid')
corpus, tones, tones_indices, indices_tones = get_corpus_data(abstract_grammars)
N_tones = len(set(corpus))
n_a = 64
x_initializer = np.zeros((1, 1, N_tones))
a_initializer = np.zeros((1, n_a))
c_initializer = np.zeros((1, n_a))
def load_music_utils():
chords, abstract_grammars = get_musical_data('sonata_48_3_(c)iscenko.mid')
corpus, tones, tones_indices, indices_tones = get_corpus_data(abstract_grammars)
N_tones = len(set(corpus))
X, Y, N_tones = data_processing(corpus, tones_indices, 60, 30)
return (X, Y, N_tones, indices_tones)
def predict_and_sample(inference_model, x_initializer = x_initializer, a_initializer = a_initializer,
c_initializer = c_initializer):
"""
Predicts the next value of values using the inference model.
Arguments:
inference_model -- Keras model instance for inference time
x_initializer -- numpy array of shape (1, 1, 78), one-hot vector initializing the values generation
a_initializer -- numpy array of shape (1, n_a), initializing the hidden state of the LSTM_cell
c_initializer -- numpy array of shape (1, n_a), initializing the cell state of the LSTM_cel
Ty -- length of the sequence you'd like to generate.
Returns:
results -- numpy-array of shape (Ty, 78), matrix of one-hot vectors representing the values generated
indices -- numpy-array of shape (Ty, 1), matrix of indices representing the values generated
"""
### START CODE HERE ###
pred = inference_model.predict([x_initializer, a_initializer, c_initializer])
indices = np.argmax(pred, axis = -1)
results = to_categorical(indices, num_classes=N_tones)
### END CODE HERE ###
return results, indices
```
|
{
"source": "jerrylzy/SQuAD-QANet",
"score": 3
}
|
#### File: jerrylzy/SQuAD-QANet/models.py
```python
import layers
import qanet_layers
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiDAF(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by <NAME>, <NAME>, <NAME>, <NAME>
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed char/word indices to get char/word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
char_vectors (torch.Tensor): Pre-trained char vectors.
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
use_char_cnn (bool): Whether to use Char-CNN
"""
def __init__(self, char_vectors, word_vectors, hidden_size, drop_prob=0., use_char_cnn=False, num_heads=5):
super(BiDAF, self).__init__()
self.emb = layers.Embedding(char_vectors=char_vectors,
word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob,
use_char_cnn=use_char_cnn)
self.enc = layers.RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.self_att = layers.SelfAttention(hidden_size=2 * hidden_size,
num_heads=num_heads,
dropout=drop_prob)
self.mod = layers.RNNEncoder(input_size=10 * hidden_size,
hidden_size=2 * hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=0)
self.apply(self._init_weights)
def _init_weights(self, module):
# if isinstance(module, (nn.Linear, nn.Embedding)):
# module.weight.data.normal_(mean=0.0, std=0.02)
# if isinstance(module, nn.Linear) and module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.LayerNorm):
# module.bias.data.zero_()
# module.weight.data.fill_(1.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
# (batch_size, c_len, hidden_size)
c_emb = self.emb(cw_idxs, cc_idxs)
# (batch_size, q_len, hidden_size)
q_emb = self.emb(qw_idxs, qc_idxs)
# (batch_size, c_len, 2 * hidden_size)
c_enc = self.enc(c_emb, c_len)
# (batch_size, q_len, 2 * hidden_size)
q_enc = self.enc(q_emb, q_len)
att = self.att(c_enc, q_enc,
c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
# (batch_size, c_len, 2 * hidden_size)
self_att = self.self_att(c_enc, c_mask)
concat_att = torch.cat((att, self_att), dim=2)
# (batch_size, c_len, 2 * hidden_size)
mod = self.mod(concat_att, c_len)
# 2 tensors, each (batch_size, c_len)
out = self.out(concat_att, mod, c_mask)
return out
class QANet(nn.Module):
# Character embedding size limit
CHAR_LIMIT = 16
"""QANet model for SQuAD.
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed char/word indices to get char/word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
char_vectors (torch.Tensor): Pre-trained char vectors.
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
use_char_cnn (bool): Whether to use Char-CNN
"""
def __init__(self, char_vectors, word_vectors, hidden_size=128, drop_prob=0., project=False, use_char_cnn=True, use_seq=True):
super().__init__()
self.drop_prob = drop_prob
# Dimension of the embedding layer output.
self.emb = qanet_layers.Embedding(char_vectors=char_vectors,
word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob,
use_char_cnn=use_char_cnn,
use_seq=use_seq)
num_conv_layers = 4
self.enc = qanet_layers.EncoderBlock(
hidden_size=hidden_size,
num_heads=8,
dropout=drop_prob,
kernel_size=7,
num_conv_layers=num_conv_layers,
base_layer_num=1,
total_num_layers=num_conv_layers + 2)
self.att = layers.BiDAFAttention(hidden_size=hidden_size,
drop_prob=drop_prob)
self.mod_proj = layers.Conv1dLinear(4 * hidden_size, hidden_size, bias=False) if project else None
self.mod = qanet_layers.StackedEmbeddingEncoderBlock(
hidden_size=hidden_size if project else 4 * hidden_size,
num_blocks=7,
num_heads=8,
dropout=drop_prob,
kernel_size=5,
num_conv_layers=2
)
self.out = qanet_layers.QANetOutput(hidden_size=hidden_size if project else 4 * hidden_size, drop_prob=drop_prob)
self.apply(self._init_weights)
def _init_weights(self, module):
# if isinstance(module, (nn.Linear, nn.Embedding)):
# module.weight.data.normal_(mean=0.0, std=0.02)
# if isinstance(module, nn.Linear) and module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.LayerNorm):
# module.bias.data.zero_()
# module.weight.data.fill_(1.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
# c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
# (batch_size, c_len, emb_size)
c_emb = self.emb(cw_idxs, cc_idxs)
# (batch_size, q_len, emb_size)
q_emb = self.emb(qw_idxs, qc_idxs)
c_enc = F.dropout(self.enc(c_emb, c_mask), self.drop_prob, self.training) # (batch_size, c_len, hidden_size)
q_enc = F.dropout(self.enc(q_emb, q_mask), self.drop_prob, self.training) # (batch_size, q_len, hidden_size)
att = self.att(c_enc, q_enc, c_mask, q_mask) # (batch_size, c_len, 4 * hidden_size)
att = self.mod_proj(att) if self.mod_proj != None else att
# stackd encoder blocks share weights among its three repetitions
att_emb_1 = F.dropout(self.mod(att, c_mask), self.drop_prob, self.training)
att_emb_2 = F.dropout(self.mod(att_emb_1, c_mask), self.drop_prob, self.training)
att_emb_3 = F.dropout(self.mod(att_emb_2, c_mask), self.drop_prob, self.training)
# 2 tensors, each (batch_size, c_len)
out = self.out(att_emb_1, att_emb_2, att_emb_3, c_mask)
return out
```
|
{
"source": "JerryMa90/whitenoise-system",
"score": 2
}
|
#### File: whitenoise/client/__init__.py
```python
import os
import json
import uuid
import logging
import pkg_resources
from requests import Session
from .restclient.models.project_run_details import ProjectRunDetails
from .restclient.rest_client import RestClient
from .restclient.models.dataset_read_request import DatasetReadRequest
from .restclient.models.dataset_document import DatasetDocument
from .restclient.models.release_dataset_document import ReleaseDatasetDocument
from .restclient.models.dataset_read_release_request import DatasetReadReleaseRequest
module_logger = logging.getLogger(__name__)
KNOWN_DATASET_TYPE_KEYS = ["csv_details", "dataverse_details"]
class _MockCredentials(object):
def signed_session(self, session=None):
return session if session is not None else Session()
def _get_client():
port = os.environ.get("WHITENOISE_SERVICE_PORT", "5000")
url = os.environ.get("WHITENOISE_SERVICE_URL", "localhost:{}".format(port))
base_url = "{}/api/".format(url)
base_url = base_url if base_url.startswith("http") else "http://" + base_url
client = RestClient(_MockCredentials(), base_url)
return client
def _guid_header():
"""
Generates a guid header for client, to be tracked by client/server.
:return: A UUID
:rtype: str
"""
return str(uuid.uuid4())
class ExecutionClient(object):
def submit(self, params, uri):
client = _get_client()
details = ProjectRunDetails(params=json.dumps(params),
project_uri=uri)
return client.executerun(details)
class DatasetClient(object):
"""
A client for registering, reading and releasing differentially private
datasets using the opendp-whitenoise service
"""
def __init__(self):
# Tag requests with this custom header for now
self._guid = _guid_header()
self.custom_headers = {'client_guid': self._guid}
def _register_release_request_helper(self, dataset):
"""
Helper for register/release,
both of which use DatasetDocuments as request formats
"""
for key in KNOWN_DATASET_TYPE_KEYS:
if not key in dataset:
dataset[key]=None
request = DatasetDocument(dataset_name=dataset['dataset_name'], \
dataset_type=dataset['dataset_type'], \
budget=dataset['budget'], \
release_cost=dataset['release_cost'], \
csv_details=dataset['csv_details'], \
dataverse_details=dataset['dataverse_details'], \
authorized_users=dataset['authorized_users'])
return request
def release(self, dataset):
"""
Generates a DatasetDocument and sends it to the service.
Requests the release of a Differentially Private DatasetDocument, with budget
(to authorized users)
Tags the request with Client guid.
"""
client = _get_client()
release_request = self._register_release_request_helper(dataset)
return client.datasetrelease(release_request, custom_headers=self.custom_headers)
def register(self, dataset):
"""
Generates a DatasetDocument and sends it to the service.
Requests the registration of this private DatasetDocument
Tags the request with Client guid.
"""
client = _get_client()
register_request = self._register_release_request_helper(dataset)
return client.datasetregister(register_request, custom_headers=self.custom_headers)
def read(self, dataset_name, budget):
"""
Generates a DatasetReadRequest and sends it to the service.
Reads from a private DatasetDocument
Tags the request with Client guid.
"""
client = _get_client()
read_request = DatasetReadRequest(dataset_name=dataset_name, budget=budget)
return client.datasetread(read_request, custom_headers=self.custom_headers)
def read_released(self, dataset_name):
"""
Generates a DatasetReadReleaseRequest and sends it to the service.
Tags the request with Client guid.
"""
client = _get_client()
read_released_request = DatasetReadReleaseRequest(dataset_name=dataset_name)
return client.datasetreadreleased(read_released_request, custom_headers=self.custom_headers)
def get_dataset_client():
client_overrides = [entrypoint for entrypoint in pkg_resources.iter_entry_points("opendp_whitenoise_dataset_client")]
if len(client_overrides) == 1:
try:
entrypoint = client_overrides[0]
extension_class = entrypoint.load()
return extension_class()
except Exception as e: # pragma: no cover
msg = "Failure while loading {} with exception {}.".format(
entrypoint, e)
module_logger.warning(msg)
else:
if len(client_overrides) > 1:
module_logger.warning("Multiple client overrides found {}".format(client_overrides))
return DatasetClient()
def get_execution_client():
return ExecutionClient()
__all__ = ["get_dataset_client", "get_execution_client"]
```
#### File: sdk/query/test_query.py
```python
import os
import subprocess
import pandas as pd
from pandasql import sqldf
import math
from opendp.whitenoise.metadata import CollectionMetadata
from opendp.whitenoise.sql import PrivateReader, PandasReader
from opendp.whitenoise.sql.parse import QueryParser
from opendp.whitenoise.reader.rowset import TypedRowset
git_root_dir = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
meta_path = os.path.join(git_root_dir, os.path.join("service", "datasets", "PUMS.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("service", "datasets", "PUMS.csv"))
schema = CollectionMetadata.from_file(meta_path)
df = pd.read_csv(csv_path)
# Unit tests
#
class TestQuery:
def test_count_exact(self):
reader = PandasReader(schema, df)
rs = reader.execute("SELECT COUNT(*) AS c FROM PUMS.PUMS")
assert(rs[1][0] == 1000)
def test_empty_result(self):
reader = PandasReader(schema, df)
rs = reader.execute("SELECT age as a FROM PUMS.PUMS WHERE age > 100")
assert(len(rs) == 1)
def test_empty_result_typed(self):
reader = PandasReader(schema, df)
rs = reader.execute("SELECT age as a FROM PUMS.PUMS WHERE age > 100")
trs = TypedRowset(rs, ['int'])
assert(len(trs) == 0)
def test_group_by_exact_order(self):
reader = PandasReader(schema, df)
rs = reader.execute("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c")
assert(rs[1][0] == 451)
assert(rs[2][0] == 549)
def test_group_by_exact_order_desc(self):
reader = PandasReader(schema, df)
rs = reader.execute("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c DESC")
assert(rs[1][0] == 549)
assert(rs[2][0] == 451)
def test_group_by_exact_order_expr_desc(self):
reader = PandasReader(schema, df)
rs = reader.execute("SELECT COUNT(*) * 5 AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c DESC")
assert(rs[1][0] == 549 * 5)
assert(rs[2][0] == 451 * 5)
def test_group_by_noisy_order(self):
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 4.0)
rs = private_reader.execute("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c")
assert(rs[1][0] < rs[2][0])
# def test_group_by_noisy_order_desc(self):
# reader = PandasReader(schema, df)
# private_reader = PrivateReader(schema, reader, 4.0)
# rs = private_reader.execute("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c DESC")
# assert(rs[1][0] > rs[2][0])
def test_group_by_noisy_typed_order(self):
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 4.0)
rs = private_reader.execute_typed("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c")
assert(rs['c'][0] < rs['c'][1])
def test_group_by_noisy_typed_order_desc(self):
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 4.0)
rs = private_reader.execute_typed("SELECT COUNT(*) AS c, married AS m FROM PUMS.PUMS GROUP BY married ORDER BY c DESC")
assert(rs['c'][0] > rs['c'][1])
def test_no_tau(self):
# should never drop rows
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 4.0)
for i in range(10):
rs = private_reader.execute_typed("SELECT COUNT(*) AS c FROM PUMS.PUMS WHERE age > 90 AND educ = '8'")
assert(len(rs['c']) == 1)
def test_no_tau_noisy(self):
# should never drop rows
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 0.01)
for i in range(10):
rs = private_reader.execute_typed("SELECT COUNT(*) AS c FROM PUMS.PUMS WHERE age > 90 AND educ = '8'")
assert(len(rs['c']) == 1)
def test_yes_tau(self):
# should usually drop some rows
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 1.0, 1/10000)
lengths = []
for i in range(10):
rs = private_reader.execute_typed("SELECT COUNT(*) AS c FROM PUMS.PUMS WHERE age > 80 GROUP BY educ")
lengths.append(len(rs['c']))
l = lengths[0]
assert(any([l != ll for ll in lengths]))
def test_count_no_rows_exact_typed(self):
reader = PandasReader(schema, df)
query = QueryParser(schema).queries("SELECT COUNT(*) as c FROM PUMS.PUMS WHERE age > 100")[0]
trs = reader.execute_ast_typed(query)
assert(trs['c'][0] == 0)
def test_sum_no_rows_exact_typed(self):
reader = PandasReader(schema, df)
query = QueryParser(schema).queries("SELECT SUM(age) as c FROM PUMS.PUMS WHERE age > 100")[0]
trs = reader.execute_ast_typed(query)
assert(trs['c'][0] == None)
def test_empty_result_count_typed_notau_prepost(self):
reader = PandasReader(schema, df)
query = QueryParser(schema).queries("SELECT COUNT(*) as c FROM PUMS.PUMS WHERE age > 100")[0]
private_reader = PrivateReader(schema, reader, 1.0)
private_reader._execute_ast(query, True)
for i in range(3):
trs = private_reader._execute_ast(query, True)
assert(len(trs) == 1)
def test_sum_noisy(self):
reader = PandasReader(schema, df)
query = QueryParser(schema).queries("SELECT SUM(age) as age_total FROM PUMS.PUMS")[0]
trs = reader.execute_ast_typed(query)
assert(trs['age_total'][0] > 1000)
def test_sum_noisy_postprocess(self):
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 1.0)
trs = private_reader.execute_typed("SELECT POWER(SUM(age), 2) as age_total FROM PUMS.PUMS")
assert(trs['age_total'][0] > 1000 ** 2)
def test_execute_with_dpsu(self):
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 1.0)
query = QueryParser(schema).queries("SELECT COUNT(*) AS c FROM PUMS.PUMS GROUP BY married")[0]
assert(private_reader._get_reader(query) is not private_reader.reader)
def test_execute_without_dpsu(self):
reader = PandasReader(schema, df)
private_reader = PrivateReader(schema, reader, 1.0)
query = QueryParser(schema).queries("SELECT COUNT(*) AS c FROM PUMS.PUMS GROUP BY married")[0]
private_reader.options.use_dpsu = False
assert(private_reader._get_reader(query) is private_reader.reader)
def test_check_thresholds_gauss(self):
# check tau for various privacy parameters
epsilons = [0.1, 2.0]
max_contribs = [1, 3]
deltas = [10E-5, 10E-15]
query = "SELECT COUNT(*) FROM PUMS.PUMS GROUP BY married"
reader = PandasReader(schema, df)
qp = QueryParser(schema)
q = qp.query(query)
for eps in epsilons:
for d in max_contribs:
for delta in deltas:
# using slightly different formulations of same formula from different papers
# make sure private_reader round-trips
gaus_scale = math.sqrt(d) * math.sqrt(2 * math.log(1.25/delta))/eps
gaus_rho = 1 + gaus_scale * math.sqrt(2 * math.log(d / math.sqrt(2 * math.pi * delta)))
private_reader = PrivateReader(schema, reader, eps, delta)
q.max_ids = d # hijack the AST
r = private_reader.execute_ast(q)
assert(math.isclose(private_reader.tau, gaus_rho, rel_tol=0.03, abs_tol=2))
```
#### File: sdk/query/test_validate.py
```python
import pytest
from opendp.whitenoise.ast import Validate
from opendp.whitenoise.sql.parse import QueryParser
from opendp.whitenoise.metadata import CollectionMetadata
from os import listdir
from os.path import isfile, join, dirname
dir_name = dirname(__file__)
testpath = join(dir_name, "queries") + "/"
other_dirs = [f for f in listdir(testpath) if not isfile(join(testpath, f)) and f not in ["parse", "validate", "validate_pums", "compare"]]
validate_files = [join(testpath + "validate/", f) for f in listdir(testpath + "validate") if isfile(join(testpath + "validate", f))]
good_files = [f for f in validate_files if not "_fail" in f]
bad_files = [f for f in validate_files if "_fail" in f]
for d in other_dirs:
other_files = [join(testpath + d + "/", f) for f in listdir(testpath + d) if isfile(join(testpath + d, f))]
good_files.extend(other_files)
metadata = CollectionMetadata.from_file(join(dir_name, "Devices.yaml"))
#
# Unit tests
#
class TestValidate:
def test_all_good_queries(self):
for goodpath in good_files:
print(goodpath)
gqt = GoodQueryTester(goodpath)
gqt.runValidate()
def test_all_bad_queries(self):
for badpath in bad_files:
bqt = BadQueryTester(badpath)
bqt.runValidate()
class GoodQueryTester:
def __init__(self, path):
lines = open(path).readlines()
self.queryBatch = "\n".join(lines)
queryLines = " ".join([line for line in lines if line.strip() != "" and not line.strip().startswith("--")])
self.queries = [q.strip() for q in queryLines.split(";") if q.strip() != ""]
def runValidate(self):
for qs in self.queries:
print(qs)
q = QueryParser(metadata).query(qs)
Validate().validateQuery(q, metadata)
class BadQueryTester:
def __init__(self, path):
lines = open(path).readlines()
self.queryBatch = "\n".join(lines)
queryLines = " ".join([line for line in lines if line.strip() != "" and not line.strip().startswith("--")])
self.queries = [q.strip() for q in queryLines.split(";") if q.strip() != ""]
def runValidate(self):
for qs in self.queries:
print(qs)
with pytest.raises(ValueError):
q = QueryParser(metadata).query(qs)
self.validateSingle(q)
def validateSingle(self, q):
with pytest.raises(ValueError):
Validate().validateQuery(q, metadata)
```
#### File: sdk/synthesizers/test_mwem.py
```python
import subprocess
import os
import pytest
import warnings
import string
import numpy as np
import pandas as pd
from opendp.whitenoise.metadata import CollectionMetadata
from opendp.whitenoise.synthesizers.mwem import MWEMSynthesizer
git_root_dir = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
meta_path = os.path.join(git_root_dir, os.path.join("service", "datasets", "PUMS.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("service", "datasets", "PUMS.csv"))
schema = CollectionMetadata.from_file(meta_path)
df = pd.read_csv(csv_path, index_col=0)
df = df.drop(["income"], axis=1)
nf = df.to_numpy().astype(int)
synth = MWEMSynthesizer(split_factor=3)
faux_synth = MWEMSynthesizer(split_factor=1)
test_data = np.array([[1,1,1],[2,2,2],[3,3,3]])
test_histogram = [[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]]
test_histogram_dims = (3,3,3)
class TestMWEM:
def test_fit(self):
synth.fit(nf)
assert synth.histograms
def test_sample(self):
sample_size = nf.shape[0]
synthetic = synth.sample(sample_size)
assert synthetic.shape == nf.shape
def test_initialize_A(self):
h = synth._initialize_A(test_histogram,(3,3,3))
assert int(np.sum(h)) == int(np.sum(test_histogram))
def test_histogram_from_data_attributes(self):
three_dims = synth._histogram_from_data_attributes(test_data,np.array([[0,1,2]]))
one_dims = synth._histogram_from_data_attributes(test_data,np.array([np.array([0]),np.array([1]),np.array([2])]))
assert three_dims[0][1] == [3,3,3]
assert one_dims[0][1] == [3]
def test_compose_arbitrary_slices(self):
ss = synth._compose_arbitrary_slices(10, (3,3,3))
assert np.array(ss).shape == (10,3)
def test_evaluate(self):
ss = synth._evaluate([slice(0, 2, None), slice(0, 2, None), slice(0, 3, None)],np.array(test_histogram))
assert ss == 6.0
def test_binary_replace_in_place_slice(self):
b = synth._binary_replace_in_place_slice(np.array(test_histogram), [slice(0, 2, None), slice(0, 2, None), slice(0, 3, None)])
assert (b == np.array([[[1., 1., 0.],
[1., 1., 0.],
[0., 0., 1.]],
[[1., 1., 0.],
[1., 1., 0.],
[0., 0., 1.]],
[[1., 1., 0.],
[1., 1., 0.],
[0., 0., 1.]]])).all()
def test_reorder(self):
original = np.array([[1,2,3,4,5,6], [6,7,8,9,10,11]])
splits = np.array([[1,3,4],[0,2,5]])
m1 = original[:, splits[0]]
m2 = original[:, splits[1]]
reordered = synth._reorder(splits)
reconstructed = np.hstack((m1,m2))
assert (original == reconstructed[:, reordered]).all()
def test_generate_splits(self):
assert (synth._generate_splits(3,3) == np.array([[0, 1, 2]])).all()
# TODO: More split tests
def test_faux_fit(self):
pytest.warns(Warning, faux_synth.fit, test_data)
assert faux_synth.histograms
```
#### File: sdk/synthesizers/test_pategan.py
```python
import subprocess
import os
import pytest
import string
import pandas as pd
from opendp.whitenoise.metadata import CollectionMetadata
try:
from opendp.whitenoise.synthesizers.preprocessors.preprocessing import GeneralTransformer
from opendp.whitenoise.synthesizers.pytorch.pytorch_synthesizer import PytorchDPSynthesizer
from opendp.whitenoise.synthesizers.pytorch.nn import PATEGAN
except:
import logging
test_logger = logging.getLogger(__name__)
test_logger.warning("Requires torch and torchdp")
git_root_dir = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
meta_path = os.path.join(git_root_dir, os.path.join("service", "datasets", "PUMS.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("service", "datasets", "PUMS.csv"))
schema = CollectionMetadata.from_file(meta_path)
df = pd.read_csv(csv_path)
@pytest.mark.torch
class TestDPGAN:
def setup(self):
self.pategan = PytorchSynthesizer(PATEGAN(), GeneralTransformer())
def test_fit(self):
self.pategan.fit(df)
assert self.pategan.gan.generator
def test_sample(self):
self.pategan.fit(df)
sample_size = len(df)
synth_data = self.pategan.sample(sample_size)
assert synth_data.shape == df.shape
```
|
{
"source": "Jerry-Ma/glue",
"score": 2
}
|
#### File: core/tests/test_state_objects.py
```python
import numpy as np
from numpy.testing import assert_allclose
from glue.external.echo import CallbackProperty, ListCallbackProperty
from glue.core import Data, DataCollection
from .test_state import clone
from ..state_objects import State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper
class SimpleTestState(State):
a = CallbackProperty()
b = CallbackProperty()
flat = ListCallbackProperty()
nested = ListCallbackProperty()
def test_state_serialization():
state1 = SimpleTestState()
state1.a = 2
state1.b = 'hello'
state1.flat = [1, 3, 4]
sub_state = SimpleTestState()
sub_state.a = 3
sub_state.b = 'blah'
sub_state.flat = [1, 2]
sub_state.nested = []
state1.nested = [1, 3, sub_state]
state2 = clone(state1)
assert state2.a == 2
assert state2.b == 'hello'
assert state2.flat == [1, 3, 4]
assert state2.nested[0:2] == [1, 3]
assert state2.nested[2].a == 3
assert state2.nested[2].b == 'blah'
assert state2.nested[2].flat == [1, 2]
assert state2.nested[2].nested == []
class TestStateAttributeLimitsHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
log = CallbackProperty(False)
scale = CallbackProperty(100)
self.state = SimpleState()
self.helper = StateAttributeLimitsHelper(self.state, attribute='comp',
lower='lower', upper='upper',
percentile='scale', log='log')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.visible_components[0]
self.y_id = self.data.visible_components[1]
def test_minmax(self):
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.helper.attribute = self.x_id
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_percentile(self):
# Changing scale mode updates the limits
self.helper.percentile = 99.5
assert_allclose(self.helper.lower, -99.5)
assert_allclose(self.helper.upper, +99.5)
self.helper.percentile = 99
assert_allclose(self.helper.lower, -99)
assert_allclose(self.helper.upper, +99)
self.helper.percentile = 90
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
# When switching to custom, the last limits are retained
self.helper.percentile = "Custom"
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
def test_percentile_cached(self):
# Make sure that if we change scale and change attribute, the scale
# modes are cached on a per-attribute basis.
self.helper.percentile = 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 100
self.helper.percentile = 99
self.state.comp = self.x_id
assert self.helper.percentile == 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 99
def test_flip_button(self):
self.helper.flip_limits()
assert self.helper.lower == +100
assert self.helper.upper == -100
# Make sure that values were re-cached when flipping
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.state.comp = self.x_id
assert self.helper.lower == +100
assert self.helper.upper == -100
def test_manual_edit(self):
# Make sure that values are re-cached when edited manually
self.helper.percentile = "Custom"
self.state.lower = -122
self.state.upper = 234
self.helper.log = True
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
assert not self.helper.log
self.state.comp = self.x_id
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
class TestStateAttributeSingleValueHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 30, 9999),
y=np.linspace(2, 3, 9999), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
val = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp',
function=np.nanmedian, value='val')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.visible_components[0]
self.y_id = self.data.visible_components[1]
def test_value(self):
assert self.helper.value == -35.
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.value == 2.5
self.helper.attribute = self.x_id
assert self.helper.value == -35
def test_manual_edit(self):
self.state.val = 42.
assert self.helper.value == 42
self.state.comp = self.y_id
assert self.helper.value == 2.5
self.state.comp = self.x_id
assert self.helper.value == 42
def test_limits_helper_initial_values():
# Regression test for a bug that occurred if the limits cache was empty
# but some attributes were set to values - in this case we don't want to
# override the existing values.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
state = SimpleState()
state.lower = 1
state.upper = 2
state.comp = data.id['x']
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper')
assert helper.lower == 1
assert helper.upper == 2
```
#### File: qt/tests/test_data_viewer.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
from mock import MagicMock, patch
from glue.core import Data, DataCollection
from glue.app.qt import GlueApplication
from glue.core.tests.util import simple_session
from ..data_viewer import DataViewer
from glue.viewers.histogram.qt import HistogramWidget
from glue.viewers.image.qt import ImageWidget
from glue.viewers.scatter.qt import ScatterWidget
# TODO: We should maybe consider running these tests for all
# registered Qt viewers.
def setup_function(func):
import os
os.environ['GLUE_TESTING'] = 'True'
class BaseTestDataViewer(object):
ndim = 1
def test_unregister_on_close(self):
session = simple_session()
hub = session.hub
w = self.widget_cls(session)
w.register_to_hub(hub)
with patch.object(DataViewer, 'unregister') as unregister:
w.close()
unregister.assert_called_once_with(hub)
def test_single_draw_call_on_create(self):
d = Data(x=np.random.random((2,) * self.ndim))
dc = DataCollection([d])
app = GlueApplication(dc)
try:
from glue.viewers.common.qt.mpl_widget import MplCanvas
draw = MplCanvas.draw
MplCanvas.draw = MagicMock()
app.new_data_viewer(self.widget_cls, data=d)
# each Canvas instance gives at most 1 draw call
selfs = [c[0][0] for c in MplCanvas.draw.call_arg_list]
assert len(set(selfs)) == len(selfs)
finally:
MplCanvas.draw = draw
def test_close_on_last_layer_remove(self):
# regression test for 391
d1 = Data(x=np.random.random((2,) * self.ndim))
d2 = Data(y=np.random.random((2,) * self.ndim))
dc = DataCollection([d1, d2])
app = GlueApplication(dc)
with patch.object(self.widget_cls, 'close') as close:
w = app.new_data_viewer(self.widget_cls, data=d1)
w.add_data(d2)
dc.remove(d1)
dc.remove(d2)
assert close.call_count >= 1
def test_viewer_size(self, tmpdir):
# regression test for #781
# viewers were not restored with the right size
d1 = Data(x=np.random.random((2,) * self.ndim))
d2 = Data(x=np.random.random((2,) * self.ndim))
dc = DataCollection([d1, d2])
app = GlueApplication(dc)
w = app.new_data_viewer(self.widget_cls, data=d1)
w.viewer_size = (300, 400)
filename = tmpdir.join('session.glu').strpath
app.save_session(filename, include_data=True)
app2 = GlueApplication.restore_session(filename)
for viewer in app2.viewers:
assert viewer[0].viewer_size == (300, 400)
app.close()
app2.close()
class TestDataViewerScatter(BaseTestDataViewer):
widget_cls = ScatterWidget
class TestDataViewerImage(BaseTestDataViewer):
ndim = 2
widget_cls = ImageWidget
class TestDataViewerHistogram(BaseTestDataViewer):
widget_cls = HistogramWidget
```
#### File: histogram/tests/test_client.py
```python
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from mock import MagicMock
from glue.core.subset import RangeSubsetState, CategoricalROISubsetState
from glue.core.component_id import ComponentID
from glue.core.component import CategoricalComponent
from glue.core.data import Data
from glue.core.exceptions import IncompatibleDataException
from glue.core.data_collection import DataCollection
from glue.core.roi import PolygonalROI
from glue.utils import renderless_figure
from ..client import HistogramClient
from ..layer_artist import HistogramLayerArtist
FIGURE = renderless_figure()
class TestHistogramClient(object):
def setup_method(self, method):
self.data = Data(x=[0, 0, 0, 1, 2, 3, 3, 10, 20],
y=[-1, -1, -1, -2, -2, -2, -3, -5, -7])
self.subset = self.data.new_subset()
self.collect = DataCollection(self.data)
self.client = HistogramClient(self.collect, FIGURE)
self.axes = self.client.axes
FIGURE.canvas.draw = MagicMock()
assert FIGURE.canvas.draw.call_count == 0
def draw_count(self):
return self.axes.figure.canvas.draw.call_count
def layer_drawn(self, layer):
return layer in self.client._artists and \
all(a.visible for a in self.client._artists[layer]) and \
all(len(a.artists) > 0 for a in self.client._artists[layer])
def layer_present(self, layer):
return layer in self.client._artists
def assert_autoscaled(self):
yra = self.client.axes.get_ylim()
datara = [99999, -99999]
for a in self.client._artists:
if a.y.size > 0:
datara[0] = min(datara[0], a.y.min())
datara[1] = max(datara[1], a.y.max())
assert yra[0] <= datara[0]
assert yra[1] >= datara[1]
def test_empty_on_creation(self):
assert self.data not in self.client._artists
def test_add_layer(self):
self.client.add_layer(self.data)
assert self.layer_present(self.data)
assert not self.layer_drawn(self.data)
self.client.set_component(self.data.components[0])
assert self.layer_drawn(self.data)
def test_add_invalid_layer_raises(self):
self.collect.remove(self.data)
with pytest.raises(IncompatibleDataException):
self.client.add_layer(self.data)
def test_add_subset_auto_adds_data(self):
subset = self.data.new_subset()
self.client.add_layer(subset)
assert self.layer_present(self.data)
assert self.layer_present(subset)
self.client.set_component(self.data.components[0])
assert self.layer_drawn(self.data)
def test_double_add_ignored(self):
self.client.add_layer(self.data)
art = self.client._artists[self.data]
self.client.add_layer(self.data)
assert self.client._artists[self.data] == art
def test_add_data_auto_adds_subsets(self):
s = self.data.new_subset()
self.client.add_layer(self.data)
assert self.layer_present(s)
def test_data_removal(self):
self.client.add_layer(self.data)
self.client.remove_layer(self.data)
assert not (self.layer_present(self.data))
def test_data_removal_removes_subsets(self):
self.client.add_layer(self.data)
self.client.remove_layer(self.data)
self.data.new_subset()
assert len(self.data.subsets) > 0
for subset in self.data.subsets:
assert not (self.layer_present(subset))
def test_layer_updates_on_data_add(self):
self.client.add_layer(self.data)
for s in self.data.subsets:
assert s in self.client._artists
def test_set_component_updates_component(self):
self.client.add_layer(self.data)
comp = self.data.find_component_id('uniform')
self.client.set_component(comp)
assert self.client._component is comp
def test_set_component_redraws(self):
self.client.add_layer(self.data)
comp = self.data.id['x']
comp2 = self.data.id['y']
self.client.set_component(comp)
ct0 = self.draw_count()
self.client.set_component(comp2)
assert self.draw_count() > ct0
def test_remove_not_present_ignored(self):
self.client.remove_layer(self.data)
def test_set_visible_external_data(self):
self.client.set_layer_visible(None, False)
def test_get_visible_external_data(self):
assert not (self.client.is_layer_visible(None))
def test_set_visible(self):
self.client.add_layer(self.data)
self.client.set_layer_visible(self.data, False)
assert not (self.client.is_layer_visible(self.data))
def test_draw_histogram_one_layer(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.find_component_id('uniform'))
def test_draw_histogram_subset_hidden(self):
self.client.add_layer(self.data)
s = self.data.new_subset()
self.client.set_layer_visible(s, False)
self.client.set_component(self.data.find_component_id('uniform'))
def test_draw_histogram_two_layers(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.find_component_id('uniform'))
def test_update_property_set_triggers_redraw(self):
self.client.add_layer(self.data)
ct = self.draw_count()
self.client.normed ^= True
assert self.draw_count() > ct
@pytest.mark.parametrize(('prop'), ['normed', 'cumulative'])
def test_set_boolean_property(self, prop):
"""Boolean properties should sync with artists"""
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
setattr(self.client, prop, False)
for a in self.client._artists:
assert not getattr(a, prop)
setattr(self.client, prop, True)
for a in self.client._artists:
assert getattr(a, prop)
def test_set_nbins(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.nbins = 100
for a in self.client._artists[self.data]:
assert a.nbins == 100
assert a.x.size == 100 + 1
def test_autoscale(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.axes.set_ylim(0, .1)
self.client.autoscale = False
self.client.autoscale = True
self.assert_autoscaled()
def test_xlimits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.xlimits = -12, 20
assert self.client.xlimits == (-12, 20)
for a in self.client._artists[self.data]:
assert a.lo == -12
assert a.hi == 20
def test_set_xlimits_out_of_data_range(self):
"""Setting xlimits outside of range shouldn't crash"""
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.xlimits = 100, 200
self.client.xlimits = -200, -100
def test_component_property(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
assert self.client.component is self.data.components[0]
def test_apply_roi(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['y'])
# bins are -7...-1
self.data.edit_subset = [self.data.subsets[0]]
roi = PolygonalROI(vx=[-5.1, -4.5, -3.2], vy=[2, 3, 4])
self.client.apply_roi(roi)
state = self.data.subsets[0].subset_state
assert isinstance(state, RangeSubsetState)
# range should expand to nearest bin edge
assert state.lo == -6
assert state.hi == -3
def test_apply_roi_xlog(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.data.edit_subset = [self.data.subsets[0]]
self.client.xlog = True
roi = PolygonalROI(vx=[1, 2, 3], vy=[2, 3, 4])
self.client.apply_roi(roi)
state = self.data.subsets[0].subset_state
assert isinstance(state, RangeSubsetState)
np.testing.assert_allclose(state.lo, 7.3680629972807736)
np.testing.assert_allclose(state.hi, 1000)
def test_xlimits_sticky_with_component(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.xlimits = 5, 6
self.client.set_component(self.data.components[1])
self.client.xlimits = 7, 8
self.client.set_component(self.data.components[0])
assert self.client.xlimits == (5, 6)
self.client.set_component(self.data.components[1])
assert self.client.xlimits == (7, 8)
def test_default_xlimits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
assert self.client.xlimits == (0, 20)
self.client.set_component(self.data.id['y'])
assert self.client.xlimits == (-7, -1)
def test_xlimit_single_set(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlimits = (None, 5)
assert self.client.xlimits == (0, 5)
self.client.xlimits = (3, None)
assert self.client.xlimits == (3, 5)
def test_xlimit_reverse_set(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlimits = 5, 3
assert self.client.xlimits == (3, 5)
def test_xlog_axes_labels(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlog = True
assert self.client.axes.get_xlabel() == 'Log x'
self.client.xlog = False
assert self.client.axes.get_xlabel() == 'x'
self.client.ylog = True
assert self.client.axes.get_ylabel() == 'N'
self.client.ylog = False
assert self.client.axes.get_ylabel() == 'N'
def test_xlog_snaps_limits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.axes.set_xlim((-1, 1))
self.client.xlog = True
assert self.client.axes.get_xlim() != (-1, 1)
def test_artist_clear_resets_arrays(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
for a in self.client._artists[self.data]:
assert a.get_data()[0].size > 0
a.clear()
assert a.get_data()[0].size == 0
def test_component_replaced(self):
# regression test for 508
self.client.register_to_hub(self.collect.hub)
self.client.add_layer(self.data)
self.client.component = self.data.components[0]
test = ComponentID('test')
self.data.update_id(self.client.component, test)
assert self.client.component is test
def test_update_when_limits_unchanged(self):
# Regression test for glue-viz/glue#1010 - this bug caused histograms
# to not be recomputed if the attribute changed but the limits and
# number of bins did not.
self.client.add_layer(self.data)
self.client.set_component(self.data.id['y'])
self.client.xlimits = -20, 20
self.client.nbins = 12
y1 = self.client._artists[0]._y
self.client.set_component(self.data.id['x'])
self.client.xlimits = -20, 20
self.client.nbins = 12
y2 = self.client._artists[0]._y
assert not np.allclose(y1, y2)
self.client.set_component(self.data.id['y'])
y3 = self.client._artists[0]._y
np.testing.assert_allclose(y1, y3)
class TestCategoricalHistogram(TestHistogramClient):
def setup_method(self, method):
self.data = Data(y=[-1, -1, -1, -2, -2, -2, -3, -5, -7])
self.data.add_component(CategoricalComponent(['a', 'a', 'a', 'b', 'c', 'd', 'd', 'e', 'f']), 'x')
self.subset = self.data.new_subset()
self.collect = DataCollection(self.data)
self.client = HistogramClient(self.collect, FIGURE)
self.axes = self.client.axes
FIGURE.canvas.draw = MagicMock()
assert FIGURE.canvas.draw.call_count == 0
def test_xlimit_single_set(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlimits = (None, 5)
assert self.client.xlimits == (-0.5, 5)
self.client.xlimits = (3, None)
assert self.client.xlimits == (3, 5)
def test_default_xlimits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
assert self.client.xlimits == (-0.5, 5.5)
self.client.set_component(self.data.id['y'])
assert self.client.xlimits == (-7, -1)
def test_change_default_bins(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
assert self.client.nbins == 6
def test_tick_labels(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
correct_labels = ['a', 'b', 'c', 'd', 'e', 'f']
formatter = self.client.axes.xaxis.get_major_formatter()
xlabels = [formatter.format_data(pos) for pos in range(6)]
assert correct_labels == xlabels
def test_apply_roi(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
# bins are 1...4
self.data.edit_subset = [self.data.subsets[0]]
roi = MagicMock()
roi.to_polygon.return_value = [1.2, 2, 4], [2, 3, 4]
self.client.apply_roi(roi)
state = self.data.subsets[0].subset_state
assert isinstance(state, CategoricalROISubsetState)
np.testing.assert_equal(self.data.subsets[0].subset_state.roi.categories,
np.array(['b', 'c', 'd', 'e']))
# REMOVED TESTS
def test_xlog_axes_labels(self):
""" log-scale doesn't make sense for categorical data"""
pass
def test_xlog_snaps_limits(self):
""" log-scale doesn't make sense for categorical data"""
pass
def test_apply_roi_xlog(self):
""" log-scale doesn't make sense for categorical data"""
pass
def test_nbin_override_persists_over_attribute_change(self):
# regression test for #398
self.collect.append(self.data)
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.nbins = 7
self.client.set_component(self.data.id['y'])
assert self.client.nbins == 7
class TestCommunication(object):
def setup_method(self, method):
self.data = Data(x=[1, 2, 3, 2, 2, 3, 1])
figure = MagicMock()
self.collect = DataCollection()
self.client = HistogramClient(self.collect, figure)
self.axes = self.client.axes
self.hub = self.collect.hub
self.connect()
def draw_count(self):
return self.axes.figure.canvas.draw.call_count
def connect(self):
self.client.register_to_hub(self.hub)
self.collect.register_to_hub(self.hub)
def test_ignore_data_add_message(self):
self.collect.append(self.data)
assert not (self.client.layer_present(self.data))
def test_update_data_ignored_if_data_not_present(self):
self.collect.append(self.data)
ct0 = self.draw_count()
self.data.style.color = 'blue'
assert self.draw_count() == ct0
def test_update_data_processed_if_data_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
ct0 = self.draw_count()
self.data.style.color = 'blue'
assert self.draw_count() > ct0
def test_add_subset_ignored_if_data_not_present(self):
self.collect.append(self.data)
sub = self.data.new_subset()
assert not (self.client.layer_present(sub))
def test_add_subset_processed_if_data_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
assert (self.client.layer_present(sub))
def test_update_subset_ignored_if_not_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
self.client.remove_layer(sub)
ct0 = self.draw_count()
sub.style.color = 'blue'
assert self.draw_count() == ct0
def test_update_subset_processed_if_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
ct0 = self.draw_count()
sub.style.color = 'blue'
assert self.draw_count() > ct0
def test_data_remove_message(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
self.collect.remove(self.data)
assert not self.client.layer_present(self.data)
def test_subset_remove_message(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
assert self.client.layer_present(sub)
sub.delete()
assert not self.client.layer_present(sub)
class TestHistogramLayerArtist(object):
def setup_subset(self):
ax = MagicMock()
d = Data(x=[1, 2, 3])
s = d.new_subset()
s.subset_state = d.id['x'] > 1
self.artist = HistogramLayerArtist(s, ax)
def setup_hist_calc_counter(self):
self.setup_subset()
m = MagicMock()
self.artist._calculate_histogram = m
return m
def setup_hist_scale_counter(self):
self.setup_subset()
m = MagicMock()
self.artist._scale_histogram = m
self.artist._calculate_histogram = MagicMock()
return m
def test_calculate_histogram_efficient(self):
ct = self.setup_hist_calc_counter()
self.artist.update()
assert ct.call_count == 1
self.artist.update()
assert ct.call_count == 1
def test_recalc_on_state_changes(self):
ct = self.setup_hist_calc_counter()
assert ct.call_count == 0
self.artist.update()
assert ct.call_count == 1
# lo
self.artist.lo -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 2
# hi
self.artist.hi -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 3
# nbins
self.artist.nbins += 1
self.artist.update()
self.artist.update()
assert ct.call_count == 4
# xlog
self.artist.xlog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
# ylog -- no call
self.artist.ylog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
# cumulative -- no call
self.artist.cumulative ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
# normed -- no call
self.artist.normed ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
# subset style -- no call
self.artist.layer.style.color = '#00ff00'
self.artist.update()
self.artist.update()
assert ct.call_count == 5
# subset state
self.artist.layer.subset_state = self.artist.layer.data.id['x'] > 10
self.artist.update()
self.artist.update()
assert ct.call_count == 6
def test_rescale_on_state_changes(self):
ct = self.setup_hist_scale_counter()
assert ct.call_count == 0
self.artist.update()
self.artist.update()
assert ct.call_count == 1
# lo
self.artist.lo -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 2
# hi
self.artist.hi -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 3
# nbins
self.artist.nbins += 1
self.artist.update()
self.artist.update()
assert ct.call_count == 4
# xlog
self.artist.xlog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
# ylog
self.artist.ylog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 6
# cumulative
self.artist.cumulative ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 7
# normed
self.artist.normed ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 8
# subset state
self.artist.layer.subset_state = self.artist.layer.data.id['x'] > 10
self.artist.update()
self.artist.update()
assert ct.call_count == 9
# subset style -- no call
self.artist.layer.style.color = '#00ff00'
self.artist.update()
self.artist.update()
assert ct.call_count == 9
```
|
{
"source": "jerrymakesjelly/nlp-homework",
"score": 3
}
|
#### File: nlp-homework/Problem1-3/problem1.py
```python
class Corpus(object):
def __init__(self):
self._filename = '1998-01-105-带音.txt'
self._root = {'word':'', 'next':[], 'child':[]}
self._unreconized = 0
def train(self, end_paragraph=''):
# Read the file
with open(self._filename) as f:
paragraphs = f.readlines()
for paragraph in paragraphs: # For each paragraph
paragraph = paragraph.split() # Split words
for x in paragraph:
x = x.split('/') # divide
if '-' in x[0] and x[1] == 'm': # The beginning of a paragraph
if x[0] == end_paragraph: # End position
return
# Remove bracket []
if len(x[0]) > 0 and x[0][0] == '[':
x[0] = x[0][1:]
x[1] = x[1].split(']')[0]
# Find words in the tree
node = self._root
i = 0
while i < len(x[0]) and x[0][i] in node['next']:
node = node['child'][node['next'].index(x[0][i])] # Next Node
i += 1
# Add nodes
while i < len(x[0]):
node['next'].append(x[0][i])
node['child'].append({'word':x[0][i], 'next':[], 'child':[]})
node = node['child'][-1]
i += 1
# Mark
node['next'].append('')
node['child'].append(0)
def seg(self, sentences):
begin = near_end = end = 0
result = list()
while begin < len(sentences):
end = begin
near_end = end
node = self._root
while end < len(sentences) and sentences[end] in node['next']:
node = node['child'][node['next'].index(sentences[end])] # Next Node
if '' in node['next']:
near_end = end
end += 1
if near_end == begin: # No words
result.append(sentences[begin:begin+1])
self._unreconized += 1
else:
result.append(sentences[begin:near_end+1])
begin = near_end + 1
return result
def unreconized_words(self): # Return the number of unreconized words during running
return self._unreconized
if __name__=='__main__':
corpus = Corpus()
# Train
print('Training...', end='', flush=True)
corpus.train()
print('Ok.')
# Segmentation
print('Please input sentences for word segmentation.')
while True:
print('>> ', end='')
print(corpus.seg(input()))
```
#### File: nlp-homework/Problem1-3/problem3.py
```python
class Viterbi(object):
def __init__(self):
self._filename = '1998-01-105-带音.txt'
# the part of speech. P(After|Before)
self._pos_relations = dict()
# the part of speech of each word. P(Word|Part of Speech)
self._pos_total = dict()
self._pos_of_word = dict()
# the sequence of part of speech
self._pos_vector = list()
# the number of states
self._N = 0
def train(self, end_segmentation=''):
# Read the file
with open(self._filename) as f:
paragraphs = f.readlines()
for paragraph in paragraphs:
paragraph = paragraph.split() # Split words
paragraph.append('END/END')
prev = 'START'
for x in paragraph:
x = x.split('/') # Split
if '-' in x[0] and x[1] == 'm': # The beginning of a paragraph
if x[0] == end_segmentation: # Reach end position
return
continue
# Remove brackets
if len(x[0]) > 0 and x[0][0] == '[': # Left bracket
x[0] = x[0][1:]
x[1] = x[1].split(']')[0] # Right bracket
# Statistics 1
if prev in self._pos_relations:
if x[1] in self._pos_relations[prev]:
self._pos_relations[prev][x[1]] += 1
else:
self._pos_relations[prev][x[1]] = 1
else:
self._pos_relations[prev] = dict()
self._pos_relations[prev][x[1]] = 1
# Statistics 2
if x[1] in self._pos_total:
self._pos_total[x[1]] += 1
else:
self._pos_total[x[1]] = 1
# Statistics 3
if x[1] in self._pos_of_word:
if x[0] in self._pos_of_word[x[1]]:
self._pos_of_word[x[1]][x[0]] += 1
else:
self._pos_of_word[x[1]][x[0]] = 1
else:
self._pos_of_word[x[1]] = dict()
self._pos_of_word[x[1]][x[0]] = 1
prev = x[1]
# Compute 1
for p in self._pos_relations:
sum = 0
for (o, w) in self._pos_relations[p].items():
sum += w#self._pos_relations[p][o]
for o in self._pos_relations[p]:
self._pos_relations[p][o] /= 0.05*sum
# Compute 2
for p in self._pos_of_word:
for o in self._pos_of_word[p]:
self._pos_of_word[p][o] /= 0.05*self._pos_total[p]
# Generate sequence of Part of Speech
self._pos_vector = [p for p in self._pos_total]
self._pos_vector.insert(0, 'START')
self._pos_vector.remove('END')
# Calculate N
self._N = len(self._pos_total)
self._pos_vector.append('END')
#print('Training Completed.')
def _a(self, before, after):
if self._pos_vector[after] in self._pos_relations[self._pos_vector[before]]:
return self._pos_relations[self._pos_vector[before]][self._pos_vector[after]]
else:
return 0
def _b(self, state, word):
if word in self._pos_of_word[self._pos_vector[state]]:
return self._pos_of_word[self._pos_vector[state]][word]
else:
return 0
def _max(self, li):
max = li[0]
for x in li:
if x > max:
max = x
return max
def _argmax(self, li):
maxarg = 0
for i in range(len(li)):
if li[i] > li[maxarg]:
maxarg = i
return maxarg
def viterbi(self, sentence):
# The sentence should start with START
sentence.insert(0, 'START')
T = len(sentence)-1
# Create a path probability matrix[N+2, T]
vit = [[0 for i in range(T+1)] for j in range(self._N+2)]
backpointer = [[0 for i in range(T+1)] for j in range(self._N+2)]
for state in range(1, self._N):
vit[state][1] = self._a(0, state) * self._b(state, sentence[1])
backpointer[state][1] = 0
for t in range(2, T+1):
for state in range(1, self._N):
vit[state][t] = self._max([vit[s][t-1]*self._a(s, state)*self._b(state, sentence[t]) for s in range(1, self._N)])
backpointer[state][t] = self._argmax([vit[s][t-1]*self._a(s, state) for s in range(1, self._N)])+1
# The state N+1 is END
vit[self._N+1][T] = self._max([vit[s][T]*self._a(s, self._N) for s in range(1, self._N)])
backpointer[self._N+1][T] = self._argmax([vit[s][T]*self._a(s, self._N) for s in range(1, self._N)])+1
return self._backtrace(backpointer)
#print('Completed.')
def _backtrace(self, backpointer):
t = len(backpointer[0])-1
result = list()
state = backpointer[self._N+1][t]
while t > 0:
result.insert(0, self._pos_vector[state])
state = backpointer[state][t]
t = t-1
return result
if __name__ == '__main__':
vi = Viterbi()
print('Training Viterbi...', end='', flush=True)
vi.train()
print("Ok.")
print("Input the word sequence (NOT sentence) which is going to be analyzed.")
while True:
print('>> ', end='')
sentence = input()
print(vi.viterbi(sentence.split()))
```
#### File: nlp-homework/Tester/tester3.py
```python
import problem3
class Tester(object):
def __init__(self, start_segment, end_segment=''):
self._viterbi = Viterbi()
self._viterbi.train()
print('The Viterbi Algorithm is trained.')
self._filename = '1998-01-105-带音.txt'
self._word_set = list()
self._answer_set = list()
self._load_testset(start_segment, end_segment)
print('The test data is ready. '+str(len(self._word_set))+' line(s).')
def _load_testset(self, start_segment, end_segment):
start = False
# Read the file
with open(self._filename) as f:
paragraphs = f.readlines()
for paragraph in paragraphs:
one_word_set = list()
one_answer_set = list()
paragraph = paragraph.split() # Split words
for x in paragraph:
x = x.split('/') # Split
if '-' in x[0] and x[1] == 'm': # The beginning of a paragraph
if x[0] == start_segment: # Reach end position
start=True
if x[0] == end_segment:
start=False
continue
if not start:
break # Go to the next paragraph
# Remove brackets
if len(x[0]) > 0 and x[0][0] == '[': # Left bracket
x[0] = x[0][1:]
x[1] = x[1].split(']')[0] # Right bracket
one_word_set.append(x[0])
one_answer_set.append(x[1])
if len(one_word_set) > 0:
self._word_set.append(one_word_set)
self._answer_set.append(one_answer_set)
def test(self):
correct = 0
wrong = 0
for i in range(len(self._word_set)):
result = self._viterbi.viterbi(self._word_set[i])
if 'START' in self._word_set[i]:
self._word_set[i].remove('START')
# Check the answer
for j in range(len(result)):
if result[j] == self._answer_set[i][j]:
correct += 1
else:
print('Wrong Answer: '+self._word_set[i][j]+': '+result[j]+'->'+self._answer_set[i][j])
wrong += 1
print('Correct: '+str(correct))
print('Wrong: '+str(wrong))
print('Rate: '+str(correct/(correct+wrong)))
if __name__ == '__main__':
Tester('19980101-01-001-001', '19980102-01-005-001').test()
```
|
{
"source": "Jerry-Ma/njweather",
"score": 3
}
|
#### File: Jerry-Ma/njweather/njweather.py
```python
import logging
import logging.config
from fake_useragent import UserAgent
import urllib.parse
import requests
# import re
# from bs4 import BeautifulSoup
import pandas as pd
# import json
from datetime import datetime
class NjWeatherQuery(object):
_ua = UserAgent()
_baseurl = "https://www.njweather.org/data"
_query_template = 'startDate={date_start}&endDate={date_end}&formSubmitted=1&selectedElements%5B%5D=1&selectedElements%5B%5D=2&selectedElements%5B%5D=5&selectedElements%5B%5D=4&selectedElements%5B%5D=1037&selectedElements%5B%5D=3&selectedElements%5B%5D=6&selectedElements%5B%5D=7&selectedElements%5B%5D=15&selectedStations%5B%5D={site_id}' # noqa: E501
_site_id_map = {
'Jersey City': 3411,
}
logger = logging.getLogger('njweatherquery')
@classmethod
def get_site_id(cls, site):
if site not in cls._site_id_map:
raise ValueError(
f"site {site} not in {list(cls._site_id_map.keys())}")
site_id = cls._site_id_map[site]
return site_id
def __init__(self, site):
s = self._session = requests.Session()
s.headers.update({
'User-Agent': self._ua.random
})
self._site_id = self.get_site_id(site)
@staticmethod
def parse_response(response):
# print(response.text)
df = pd.read_html(response.text, attrs={'id': 'dataout'})[0]
# post-process the date column
df['date'] = df['Eastern Time'].apply(pd.to_datetime)
df = df.sort_values(by=['date'])
print(df)
return df
@staticmethod
def _pprint_df(df):
return (
f"{len(df)} records "
f"from {df['date'].iloc[-1]} "
f"to {df['date'].iloc[0]}")
def get_data_by_datetime(self, start, end):
# check date to get start df
date_start = datetime.fromisoformat(start)
date_end = datetime.fromisoformat(end)
if date_end <= date_start:
raise ValueError('invalid date range')
s = self._session
s.cookies.clear()
url = '{}?{}'.format(
self._baseurl,
self._query_template.format(
date_start=urllib.parse.quote_plus(
date_start.strftime('%Y-%m-%d %H:%M')
),
date_end=urllib.parse.quote_plus(
date_end.strftime('%Y-%m-%d %H:%M')
),
site_id=self._site_id
))
r = s.post(url)
df = self.parse_response(r)
self.logger.debug(f"finish download {self._pprint_df(df)}")
return df
class NjWeather(object):
_ua = UserAgent()
_baseurl = "https://www.njweather.org/data"
_valid_cadences = ['5min', 'hourly', 'daily']
_valid_offset_units = ['month', 'day', 'hour']
_site_id_map = {
'Jersey City': 3411,
}
logger = logging.getLogger('njweather')
@classmethod
def get_initial_query_url(cls, cadence, site):
if cadence not in cls._valid_cadences:
raise ValueError(
f"cadence {cadence} not in {cls._valid_cadences}")
if site not in cls._site_id_map:
raise ValueError(
f"site {site} not in {list(cls._site_id_map.keys())}")
site_id = cls._site_id_map[site]
url = f"{cls._baseurl}/{cadence}/{site_id}"
cls.logger.debug(f"query {url} for cadence={cadence} site={site}")
return url
@classmethod
def get_offset_query_url(cls, value, unit):
value = int(value)
if unit not in cls._valid_offset_units:
raise ValueError(
f"unit {unit} not in {cls._valid_offset_units}")
if value > 0:
verb = 'add'
else:
verb = 'sub'
url = f"{cls._baseurl}/{verb}/{abs(value)}/{unit}"
cls.logger.debug(f"query offset {url} for value={value} unit={unit}")
return url
def __init__(self, cadence, site):
s = self._session = requests.Session()
s.headers.update({
'User-Agent': self._ua.random
})
self._initial_query_url = self.get_initial_query_url(cadence, site)
@staticmethod
def parse_response(response):
return NjWeatherQuery.parse_response(response)
# soup = BeautifulSoup(response.text, 'lxml')
# pattern = re.compile(r'\"aaData\"\s*:\s*(\[[.\s\S]*?\])')
# scripts = soup.find_all('script')
# data_json = None
# for script in scripts:
# if script.string is None:
# continue
# # print(script.string)
# m = pattern.search(script.string)
# if m:
# data_json = m.group(1)
# break
# else:
# return None
# df = pd.DataFrame.from_records(json.loads(data_json))
# # post-process the date column
# # print(df)
# df['date'] = df['date'].apply(pd.to_datetime)
# return df
@staticmethod
def _pprint_df(df):
return (
f"{len(df)} records "
f"from {df['date'].iloc[-1]} "
f"to {df['date'].iloc[0]}")
def get_data_by_datetime(self, start, end):
# make init query
s = self._session
s.cookies.clear()
r_init = s.get(self._initial_query_url)
# self.logger.debug(f'{q.cookies}')
df_init = self.parse_response(r_init)
# check date to get start df
date_start = datetime.fromisoformat(start)
date_end = datetime.fromisoformat(end)
# we assume the data is sorted already.
# find out if we have end date captured
if date_end > df_init['date'].iloc[0]:
self.logger.warning(
"the end date seems to be at future which data may not exists."
)
if date_end < df_init['date'].iloc[-1]:
# we need to compute a delta days to replace the initial query
init_offset = (date_end - df_init['date'].iloc[-1])
r_start = s.get(
self.get_offset_query_url(
init_offset.total_seconds() / (24 * 60 * 60), 'day'))
df_start = self.parse_response(r_start)
else:
r_start = r_init
df_start = df_init
# now we can keep -1 day to search through the range
day_span = int(abs(
(date_end - date_start).total_seconds() / (24 * 60 * 60)))
dfs = [df_start, ]
self.logger.debug(f"init with {self._pprint_df(df_start)}")
for i in range(day_span):
r_step = s.get(
self.get_offset_query_url(
-1, 'day'
)
)
# the extra slice is to avoid the duplicate of the latest
# entry
df_step = self.parse_response(r_step).iloc[1:]
self.logger.debug(f"append {self._pprint_df(df_step)}")
dfs.append(df_step)
df = pd.concat(dfs)
df = df.sort_values(by=['date'])
self.logger.debug(f"finish download {self._pprint_df(df)}")
return df
def init_log():
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'short': {
'format': '[%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'short',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
}
}
logging.config.dictConfig(config)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Download data from NJWeather.")
parser.add_argument(
'--cadence', '-c',
default='5min',
help="The data cadence",
choices=NjWeather._valid_cadences)
parser.add_argument(
'--site', '-s',
default='Jersey City',
help="The site",
choices=list(NjWeather._site_id_map.keys()))
parser.add_argument(
'--date', '-d',
nargs=2,
required=True,
help="The date range, specified as <start> <end>",
)
parser.add_argument(
'--quiet', '-q',
action='store_true',
help='Suppress the debug messages.'
)
option = parser.parse_args()
if not option.quiet:
init_log()
cadence = option.cadence
site = option.site
start = option.date[0]
end = option.date[1]
logger = logging.getLogger()
# njw = NjWeatherQuery(site=site)
njw = NjWeather(cadence=cadence, site=site)
df = njw.get_data_by_datetime(start, end)
outname = f'njw_{cadence}_{site.replace(" ", "_")}_{start}-{end}.csv'
df.to_csv(
f'njw_{cadence}_{site.replace(" ", "_")}_{start}-{end}.csv',
index=False)
logger.debug(f"{len(df)} records saved in {outname}")
```
|
{
"source": "jerrymarino/rules_apple",
"score": 2
}
|
#### File: apple/bundling/bundling_support.bzl
```python
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"product_support",
)
def _binary_file(ctx, src, dest, executable=False):
"""Returns a bundlable file whose destination is in the binary directory.
Args:
ctx: The Skylark context.
src: The `File` artifact that should be bundled.
dest: The path within the bundle's binary directory where the file should
be placed.
executable: True if the file should be made executable.
Returns:
A bundlable file struct (see `bundling_support.bundlable_file`).
"""
return _bundlable_file(src, _path_in_binary_dir(ctx, dest), executable)
def _bundlable_file(src, dest, executable=False, contents_only=False):
"""Returns a value that represents a bundlable file or ZIP archive.
A "bundlable file" is a struct that maps a file (`"src"`) to a path within a
bundle (`"dest"`). This can be used with plain files, where `dest` denotes
the path within the bundle where the file should be placed (including its
filename, which allows it to be changed), or with ZIP archives, where `dest`
denotes the location within the bundle where the ZIP's contents should be
extracted.
Args:
src: The `File` artifact that should be bundled.
dest: The path within the bundle where the file should be placed.
executable: True if the file should be made executable.
contents_only: If `src` is a directory and this is True, then the _contents_
of the directory will be added at `dest` to the bundle; if this is
False (the default) then the directory _itself_ will be added at `dest`
to the bundle.
Returns:
A struct with `src`, `dest`, and `executable` fields representing the
bundlable file.
"""
return struct(
src=src, dest=dest, executable=executable, contents_only=contents_only)
def _bundlable_file_sources(bundlable_files):
"""Returns the source files from the given collection of bundlable files.
This is a convenience function that allows a set of bundlable files to be
quickly turned into a list of files that can be passed to an action's inputs,
for example.
Args:
bundlable_files: A list or set of bundlable file values (as returned by
`bundling_support.bundlable_file`).
Returns:
A `depset` containing the `File` artifacts from the given bundlable files.
"""
return depset([bf.src for bf in bundlable_files])
def _bundle_name(ctx):
"""Returns the name of the bundle.
The name of the bundle is the value of the `bundle_name` attribute if it was
given; if not, then the name of the target will be used instead.
Args:
ctx: The Skylark context.
Returns:
The bundle name.
"""
bundle_name = getattr(ctx.attr, "bundle_name", None)
if not bundle_name:
bundle_name = ctx.label.name
return bundle_name
def _bundle_extension(ctx):
"""Returns the bundle extension.
Args:
ctx: The Skylark context.
Returns:
The bundle extension.
"""
ext = getattr(ctx.attr, "bundle_extension", "")
if ext:
# When the *user* specifies the bundle extension in a public attribute, we
# do *not* require them to include the leading dot, so we add it here.
ext = "." + ext
else:
product_type = product_support.product_type(ctx)
product_type_descriptor = product_support.product_type_descriptor(
product_type)
if product_type_descriptor:
ext = product_type_descriptor.bundle_extension
return ext
def _bundle_name_with_extension(ctx):
"""Returns the name of the bundle with its extension.
Args:
ctx: The Skylark context.
Returns:
The bundle name with its extension.
"""
return _bundle_name(ctx) + _bundle_extension(ctx)
def _contents_file(ctx, src, dest, executable=False):
"""Returns a bundlable file whose destination is in the contents directory.
Args:
ctx: The Skylark context.
src: The `File` artifact that should be bundled.
dest: The path within the bundle's contents directory where the file should
be placed.
executable: True if the file should be made executable.
Returns:
A bundlable file struct (see `bundling_support.bundlable_file`).
"""
return _bundlable_file(src, _path_in_contents_dir(ctx, dest), executable)
def _embedded_bundle(path, target, verify_has_child_plist,
parent_bundle_id_reference=None):
"""Returns a value that represents an embedded bundle in another bundle.
These values are used by the bundler to indicate how dependencies that are
themselves bundles (such as extensions or frameworks) should be bundled in
the application or target that depends on them.
Args:
path: The relative path within the depender's bundle where the given bundle
should be located.
target: The target representing the embedded bundle.
verify_has_child_plist: If True, the bundler should verify the info.plist
of this bundle against the parents. That means checking that the bundle
identifier of the depender is a prefix of the bundle identifier of the
embedded bundle; checking that the version numbers are the same, etc.
parent_bundle_id_reference: A list of keys to make a keypath into this
bundle's Info.plist where the parent's bundle_id should be found. The
bundler will then ensure they match the parent's bundle_id.
Returns:
A struct with `path`, `target`, `verify_has_child_plist`, and
`parent_bundle_id_reference` fields equal to the values given in the
arguments.
"""
if parent_bundle_id_reference != None and not verify_has_child_plist:
fail("Internal Error: parent_bundle_id_reference without " +
"verify_has_child_plist does not make sense.")
return struct(
path=path, target=target, verify_has_child_plist=verify_has_child_plist,
parent_bundle_id_reference=parent_bundle_id_reference)
def _header_prefix(input_file):
"""Sets a file's bundle destination to a "Headers/" subdirectory.
Args:
input_file: The File to be bundled
Returns:
A bundlable file struct with the same File object, but whose path has been
transformed to start with "Headers/".
"""
new_path = "Headers/" + input_file.basename
return _bundlable_file(input_file, new_path)
def _path_in_binary_dir(ctx, path):
"""Makes a path relative to where the bundle's binary is stored.
On iOS/watchOS/tvOS, the binary is placed directly in the bundle's contents
directory (which itself is actually the bundle root). On macOS, the binary is
in a MacOS directory that is inside the bundle's Contents directory.
Args:
ctx: The Skylark context.
path: The path to make relative to where the bundle's binary is stored.
Returns:
The path, made relative to where the bundle's binary is stored.
"""
return _path_in_contents_dir(
ctx, ctx.attr._bundle_binary_path_format % (path or ""))
def _path_in_contents_dir(ctx, path):
"""Makes a path relative to where the bundle's contents are stored.
Contents include files such as:
* A directory of resources (which itself might be flattened into contents)
* A directory for the binary (which might be flattened)
* Directories for Frameworks and PlugIns (extensions)
* The bundle's Info.plist and PkgInfo
* The code signature
Args:
ctx: The Skylark context.
path: The path to make relative to where the bundle's contents are stored.
Returns:
The path, made relative to where the bundle's contents are stored.
"""
return ctx.attr._bundle_contents_path_format % (path or "")
def _path_in_resources_dir(ctx, path):
"""Makes a path relative to where the bundle's resources are stored.
On iOS/watchOS/tvOS, resources are placed directly in the bundle's contents
directory (which itself is actually the bundle root). On macOS, resources are
in a Resources directory that is inside the bundle's Contents directory.
Args:
ctx: The Skylark context.
path: The path to make relative to where the bundle's resources are stored.
Returns:
The path, made relative to where the bundle's resources are stored.
"""
return _path_in_contents_dir(
ctx, ctx.attr._bundle_resources_path_format % (path or ""))
def _resource_file(ctx, src, dest, executable=False, contents_only=False):
"""Returns a bundlable file whose destination is in the resources directory.
Args:
ctx: The Skylark context.
src: The `File` artifact that should be bundled.
dest: The path within the bundle's resources directory where the file
should be placed.
executable: True if the file should be made executable.
contents_only: If `src` is a directory and this is True, then the _contents_
of the directory will be added at `dest` to the bundle; if this is
False (the default) then the directory _itself_ will be added at `dest`
to the bundle.
Returns:
A bundlable file struct (see `bundling_support.bundlable_file`).
"""
return _bundlable_file(
src, _path_in_resources_dir(ctx, dest), executable, contents_only)
def _validate_bundle_id(bundle_id):
"""Ensure the valie is a valid bundle it or fail the build.
Args:
bundle_id: The string to check.
"""
# Make sure the bundle id seems like a valid one. Apple's docs for
# CFBundleIdentifier are all we have to go on, which are pretty minimal. The
# only they they specifically document is the character set, so the other
# two checks here are just added safety to catch likely errors by developers
# setting things up.
bundle_id_parts = bundle_id.split(".")
for part in bundle_id_parts:
if part == "":
fail("Empty segment in bundle_id: \"%s\"" % bundle_id)
if not part.isalnum():
# Only non alpha numerics that are allowed are '.' and '-'. '.' was
# handled by the split(), so just have to check for '-'.
for ch in part:
if ch != "-" and not ch.isalnum():
fail("Invalid character(s) in bundle_id: \"%s\"" % bundle_id)
if len(bundle_id_parts) < 2:
fail("bundle_id isn't at least 2 segments: \"%s\"" % bundle_id)
# Define the loadable module that lists the exported symbols in this file.
bundling_support = struct(
binary_file=_binary_file,
bundlable_file=_bundlable_file,
bundlable_file_sources=_bundlable_file_sources,
bundle_name=_bundle_name,
bundle_extension=_bundle_extension,
bundle_name_with_extension=_bundle_name_with_extension,
contents_file=_contents_file,
embedded_bundle=_embedded_bundle,
header_prefix=_header_prefix,
path_in_binary_dir=_path_in_binary_dir,
path_in_contents_dir=_path_in_contents_dir,
path_in_resources_dir=_path_in_resources_dir,
resource_file=_resource_file,
validate_bundle_id=_validate_bundle_id,
)
```
#### File: apple/bundling/plist_actions.bzl
```python
load(
"@bazel_skylib//lib:paths.bzl",
"paths"
)
load(
"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:file_support.bzl",
"file_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:plist_support.bzl",
"plist_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"product_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleVersionInfo",
)
load(
"@build_bazel_rules_apple//apple:utils.bzl",
"apple_action",
)
load(
"@build_bazel_rules_apple//common:attrs.bzl",
"attrs",
)
load(
"@build_bazel_rules_apple//common:providers.bzl",
"providers",
)
def _environment_plist_action(ctx):
"""Creates an action that extracts the Xcode environment plist.
Args:
ctx: The Skylark context.
Returns:
The plist file that contains the extracted environment.
"""
platform, sdk_version = platform_support.platform_and_sdk_version(ctx)
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
environment_plist = ctx.new_file(ctx.label.name + "_environment.plist")
platform_support.xcode_env_action(
ctx,
outputs=[environment_plist],
executable=ctx.executable._environment_plist,
arguments=[
"--platform",
platform_with_version,
"--output",
environment_plist.path,
],
)
return environment_plist
def _infoplist_minimum_os_pair(ctx):
"""Returns a info.plist entry of the min OS version for the current target.
Args:
ctx: The Skylark context.
Returns:
A dictionary containing the key/value pair to use in the targets Info.plist
to set the minimum OS version supported.
"""
if platform_support.platform_type(ctx) == apple_common.platform_type.macos:
plist_key = "LSMinimumSystemVersion"
else:
plist_key = "MinimumOSVersion"
return {plist_key: platform_support.minimum_os(ctx)}
def _merge_infoplists(ctx,
path_prefix,
input_plists,
bundle_id=None,
child_plists=[],
child_required_values=[],
exclude_executable_name=False,
extract_from_ctxt=False,
include_xcode_env=False,
resource_bundle_target_data=None,
version_keys_required=False):
"""Creates an action that merges Info.plists and converts them to binary.
This action merges multiple plists by shelling out to plisttool, then
compiles the final result into a single binary plist file.
Args:
ctx: The Skylark context.
path_prefix: A path prefix to apply in front of any intermediate files.
input_plists: The plist files to merge.
bundle_id: The bundle identifier to set in the output plist.
child_plists: A list of plists from child targets (such as extensions
or Watch apps) whose bundle IDs and version strings should be
validated against the compiled plist for consistency.
child_required_values: A list of pair containing a client target plist
and the pairs to check. For more information on the second item in the
pair, see plisttool's `child_plist_required_values`, as this is passed
straight throught to it.
exclude_executable_name: If True, the executable name will not be added to
the plist in the `CFBundleExecutable` key. This is mainly intended for
plists embedded in a command line tool.
extract_from_ctxt: If True, the ctx will also be inspect for additional
information values to be added into the final Info.plist. The ctxt
will also be checked to see if a PkgInfo file should be created.
include_xcode_env: If True, add the development environment and platform
platform info should be added to the plist (just like Xcode does).
resource_bundle_target_data: If the is for a resource bundle, the
AppleResourceBundleTargetData of the target that defined it. Will be
used to provide substitution values.
version_keys_required: If True, the merged Info.plist file must include
entries for CFBundleShortVersionString and CFBundleVersion.
Returns:
A struct with two fields: `output_plist`, a File object containing the
merged binary plist, and `pkginfo`, a File object containing the PkgInfo
file (or None, if no file was generated).
"""
if exclude_executable_name and not extract_from_ctxt:
fail('exclude_executable_name has no meaning without extract_from_ctxt.')
if resource_bundle_target_data and extract_from_ctxt:
fail("resource_bundle_target_data doesn't work with extract_from_ctxt.")
outputs = []
plists = [p.path for p in input_plists]
forced_plists = []
additional_plisttool_inputs = []
pkginfo = None
info_plist_options = {}
substitutions = {}
if version_keys_required:
info_plist_options["version_keys_required"] = True
if bundle_id:
substitutions["PRODUCT_BUNDLE_IDENTIFIER"] = bundle_id
# Pass the bundle_id as a plist and not a force_plist, this way the
# merging will validate that any existing value matches. Historically
# mismatches between the input Info.plist and rules bundle_id have
# been valid bugs, so this will still catch that.
plists.append(struct(CFBundleIdentifier=bundle_id))
output_plist = file_support.intermediate(
ctx, "%{name}-Info-binary.plist", prefix=path_prefix)
outputs.append(output_plist)
if child_plists:
for_control = struct(
**{str(p.owner): p.path for p in child_plists})
info_plist_options["child_plists"] = for_control
if child_required_values:
for_control = struct(
**{str(p.owner): v for (p, v) in child_required_values})
info_plist_options["child_plist_required_values"] = for_control
if resource_bundle_target_data:
substitutions["PRODUCT_NAME"] = resource_bundle_target_data.product_name
substitutions["BUNDLE_NAME"] = resource_bundle_target_data.bundle_name
if extract_from_ctxt:
# Extra things for info_plist_options
name = bundling_support.bundle_name(ctx)
substitutions["PRODUCT_NAME"] = name
if not exclude_executable_name:
substitutions["EXECUTABLE_NAME"] = name
forced_plists.append(struct(CFBundleExecutable=name))
if ctx.attr._needs_pkginfo:
pkginfo = file_support.intermediate(
ctx, "%{name}-PkgInfo", prefix=path_prefix)
outputs.append(pkginfo)
info_plist_options["pkginfo"] = pkginfo.path
bundle_name = bundling_support.bundle_name_with_extension(ctx)
substitutions["BUNDLE_NAME"] = bundle_name
version_info = providers.find_one(
attrs.get(ctx.attr, "version"), AppleBundleVersionInfo)
if version_info:
additional_plisttool_inputs.append(version_info.version_file)
info_plist_options["version_file"] = version_info.version_file.path
# Keys to be forced into the Info.plist file.
# b/67853874 - move this to the right platform specific rule(s).
launch_storyboard = attrs.get(ctx.file, "launch_storyboard")
if launch_storyboard:
short_name = paths.split_extension(launch_storyboard.basename)[0]
forced_plists.append(struct(UILaunchStoryboardName=short_name))
# Add any UIDeviceFamily entry needed.
families = platform_support.ui_device_family_plist_value(ctx)
if families:
forced_plists.append(struct(UIDeviceFamily=families))
# Collect any values for special product types that we have to manually put
# in (duplicating what Xcode apparently does under the hood).
product_type = product_support.product_type(ctx)
product_type_descriptor = product_support.product_type_descriptor(
product_type)
if product_type_descriptor:
if product_type_descriptor.additional_infoplist_values:
forced_plists.append(
struct(**product_type_descriptor.additional_infoplist_values)
)
if include_xcode_env:
environment_plist = _environment_plist_action(ctx)
additional_plisttool_inputs.append(environment_plist)
platform, sdk_version = platform_support.platform_and_sdk_version(ctx)
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
min_os_pair = _infoplist_minimum_os_pair(ctx)
forced_plists += [
environment_plist.path,
struct(
CFBundleSupportedPlatforms=[platform.name_in_plist],
DTPlatformName=platform.name_in_plist.lower(),
DTSDKName=platform_with_version,
**min_os_pair
),
]
# The default in Xcode is for PRODUCT_NAME and TARGET_NAME to be the same.
# Support TARGET_NAME for substitutions even though it might not be the
# target name in the BUILD file.
product_name = substitutions.get("PRODUCT_NAME")
if product_name:
substitutions["TARGET_NAME"] = product_name
# Tweak what is passed for 'target' to provide more more comment messages if
# something does go wrong.
if resource_bundle_target_data:
target = '%s (while bundling under "%s")' % (
str(resource_bundle_target_data.label), str(ctx.label))
else:
target = str(ctx.label)
control = struct(
plists=plists,
forced_plists=forced_plists,
output=output_plist.path,
binary=True,
info_plist_options=struct(**info_plist_options),
variable_substitutions=struct(**substitutions),
target=target,
)
control_file = file_support.intermediate(
ctx, "%{name}.plisttool-control", prefix=path_prefix)
ctx.file_action(
output=control_file,
content=control.to_json()
)
plist_support.plisttool_action(
ctx,
inputs=input_plists + child_plists + additional_plisttool_inputs,
outputs=outputs,
control_file=control_file,
mnemonic="CompileInfoPlist",
)
return struct(output_plist=output_plist, pkginfo=pkginfo)
# Define the loadable module that lists the exported symbols in this file.
plist_actions = struct(
merge_infoplists=_merge_infoplists,
)
```
#### File: rules_apple/common/path_utils.bzl
```python
load("@bazel_skylib//lib:paths.bzl",
"paths")
def _bundle_relative_path(f):
"""Returns the portion of `f`'s path relative to its containing `.bundle`.
This function fails if `f` does not have an ancestor directory named with the
`.bundle` extension.
Args:
f: A file.
Returns:
The `.bundle`-relative path to the file.
"""
return paths.relativize(
f.short_path, _farthest_directory_matching(f.short_path, "bundle"))
def _farthest_directory_matching(path, extension):
"""Returns the part of a path with the given extension closest to the root.
For example, if `path` is `"foo/bar.bundle/baz.bundle"`, passing `".bundle"`
as the extension will return `"foo/bar.bundle"`.
Args:
path: The path.
extension: The extension of the directory to find.
Returns:
The portion of the path that ends in the given extension that is closest
to the root of the path.
"""
prefix, ext, _ = path.partition("." + extension)
if ext:
return prefix + ext
fail("Expected path %r to contain %r, but it did not" % (
path, "." + extension))
def _owner_relative_path(f):
"""Returns the portion of `f`'s path relative to its owner.
Args:
f: A file.
Returns:
The owner-relative path to the file.
"""
if f.is_source:
# Even though the docs says a File's `short_path` doesn't include the
# root, Bazel special cases anything that is external and includes a
# relative path (../) to the file. On the File's `owner` we can get the
# `workspace_root` to try and line things up, but it is in the form of
# "external/[name]". However the File's `path` does include the root and
# leaves it in the "externa/" form.
return paths.relativize(f.path,
paths.join(f.owner.workspace_root, f.owner.package))
elif f.owner.workspace_root:
# Just like the above comment but for generated files, the same mangling
# happen in `short_path`, but since it is generated, the `path` includes
# the extra output directories bazel makes. So pick off what bazel will do
# to the `short_path` ("../"), and turn it into an "external/" so a
# relative path from the owner can be calculated.
workspace_root = f.owner.workspace_root
short_path = f.short_path
if (not workspace_root.startswith("external/") or
not short_path.startswith("../")):
fail(("Generated file in a different workspace with unexpected " +
"short_path (%s) and owner.workspace_root (%r).") % (
short_path, workspace_root))
return paths.relativize("external" + short_path[2:],
paths.join(f.owner.workspace_root, f.owner.package))
else:
return paths.relativize(f.short_path, f.owner.package)
# Define the loadable module that lists the exported symbols in this file.
path_utils = struct(
bundle_relative_path=_bundle_relative_path,
owner_relative_path=_owner_relative_path,
)
```
#### File: tools/plisttool/plisttool_unittest.py
```python
import datetime
import json
import plistlib
import random
import re
import StringIO
import unittest
import plisttool
# Used as the target name for all tests.
_testing_target = '//plisttool:tests'
def _xml_plist(content):
"""Returns a StringIO for a plist with the given content.
This helper function wraps plist XML (key/value pairs) in the necessary XML
boilerplate for a plist with a root dictionary.
Args:
content: The XML content of the plist, which will be inserted into a
dictionary underneath the root |plist| element.
Returns:
A StringIO object containing the full XML text of the plist.
"""
xml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n'
'<plist version="1.0">\n'
'<dict>\n' +
content + '\n' +
'</dict>\n'
'</plist>\n')
return StringIO.StringIO(xml)
def _plisttool_result(control):
"""Helper function that runs PlistTool with the given control struct.
This function inserts a StringIO object as the control's "output" key and
returns the dictionary containing the result of the tool after parsing it
from that StringIO.
Args:
control: The control struct to pass to PlistTool. See the module doc for
the plisttool module for a description of this format.
Returns:
The dictionary containing the result of the tool after parsing it from
the in-memory string file.
"""
output = StringIO.StringIO()
control['output'] = output
control['target'] = _testing_target
tool = plisttool.PlistTool(control)
tool.run()
return plistlib.readPlistFromString(output.getvalue())
class PlistToolVariableReferenceTest(unittest.TestCase):
def _assert_result(self, s, expected):
"""Asserts string is the expected variable reference."""
m = plisttool.VARIABLE_REFERENCE_RE.match(s)
# Testing that based on the whole string.
self.assertEqual(m.group(0), s)
self.assertEqual(plisttool.ExtractVariableFromMatch(m), expected)
def _assert_invalid(self, s):
"""Asserts string is not a valid variable reference."""
self._assert_result(s, None)
def test_valid_parens(self):
self._assert_result('$(foo)', 'foo')
self._assert_result('$(FOO12)', 'FOO12')
self._assert_result('$(PRODUCT_NAME:rfc1034identifier)',
'PRODUCT_NAME:rfc1034identifier')
def test_valid_braces(self):
self._assert_result('${foo}', 'foo')
self._assert_result('${FOO12}', 'FOO12')
self._assert_result('${PRODUCT_NAME:rfc1034identifier}',
'PRODUCT_NAME:rfc1034identifier')
def test_empty_reference(self):
self._assert_invalid('$()')
self._assert_invalid('${}')
def test_mismatched_bracing(self):
self._assert_invalid('${foo)')
self._assert_invalid('$(foo}')
def test_invalid_names(self):
self._assert_invalid('${no space}')
self._assert_invalid('${no-hypens}')
def test_unknown_qualifier(self):
self._assert_invalid('${foo:mumble}')
self._assert_invalid('${foo:rfc666dentifier}')
def test_missing_closer(self):
# Valid, just missing the closer...
self._assert_invalid('$(foo')
self._assert_invalid('$(FOO12')
self._assert_invalid('$(PRODUCT_NAME:rfc1034identifier')
self._assert_invalid('${foo')
self._assert_invalid('${FOO12')
self._assert_invalid('${PRODUCT_NAME:rfc1034identifier')
# Invalid and missing the closer...
self._assert_invalid('${no space')
self._assert_invalid('${no-hypens')
self._assert_invalid('${foo:mumble')
self._assert_invalid('${foo:rfc666dentifier')
class PlistToolVersionStringTest(unittest.TestCase):
def _assert_valid(self, s):
self.assertEqual(plisttool.IsValidVersionString(s), True)
def _assert_invalid(self, s):
self.assertEqual(plisttool.IsValidVersionString(s), False)
def test_all_good(self):
self._assert_valid('1')
self._assert_valid('1.2')
self._assert_valid('1.2.3')
self._assert_valid('1.2.3.4')
self._assert_valid('1.0')
self._assert_valid('1.0.0')
self._assert_valid('1.0.0.0')
self._assert_valid('1.0.3')
self._assert_valid('10.11.12')
self._assert_valid('10.11.12.13')
def test_non_numbers(self):
self._assert_invalid('abc')
self._assert_invalid('abc1')
self._assert_invalid('1abc')
self._assert_invalid('1abc1')
self._assert_invalid('1.abc')
self._assert_invalid('1.1abc')
self._assert_invalid('1.abc1')
self._assert_invalid('1.1abc1')
self._assert_invalid('abc.1')
self._assert_invalid('1.abc.2')
def test_to_many_segments(self):
self._assert_invalid('1.2.3.4.5')
self._assert_invalid('1.2.3.4.0')
self._assert_invalid('1.2.3.4.5.6')
def test_to_badly_formed(self):
self._assert_invalid('1.')
self._assert_invalid('1.2.')
self._assert_invalid('1.2.3.')
self._assert_invalid('1.2.3.4.')
self._assert_invalid('.1')
self._assert_invalid('.1.2')
self._assert_invalid('.1.2.3')
self._assert_invalid('.1.2.3.4')
self._assert_invalid('1..3')
self._assert_invalid('1.2..4')
def test_to_other_punct(self):
self._assert_invalid('1,2')
self._assert_invalid('1$2')
self._assert_invalid('1:2')
def test_to_long(self):
self._assert_invalid('123456789.123456789')
self._assert_invalid('1234.6789.123456789')
self._assert_invalid('1234.6789.1234.6789')
def test_all_good_padded(self):
self._assert_valid('01')
self._assert_valid('01.1')
self._assert_valid('01.01')
self._assert_valid('01.0.1')
self._assert_valid('01.0.01')
self._assert_valid('1.00')
self._assert_valid('1.0.00')
self._assert_valid('1.001')
self._assert_valid('1.0.001')
def test_all_good_with_tracks(self):
self._assert_valid('1a1')
self._assert_valid('1.2d12')
self._assert_valid('1.2.3b7')
self._assert_valid('1.0fc100')
self._assert_valid('1.0.0b7')
self._assert_valid('1.0.3fc1')
self._assert_valid('10.11.12d123')
def test_invalid_tracks(self):
self._assert_invalid('1a0')
self._assert_invalid('1a')
self._assert_invalid('1.2d')
self._assert_invalid('1.2d01')
self._assert_invalid('1.2.3b')
self._assert_invalid('1.2.3b1234')
self._assert_invalid('1.0fc')
self._assert_invalid('1.0fc256')
class PlistToolShortVersionStringTest(unittest.TestCase):
def _assert_valid(self, s):
self.assertEqual(plisttool.IsValidShortVersionString(s), True)
def _assert_invalid(self, s):
self.assertEqual(plisttool.IsValidShortVersionString(s), False)
def test_all_good(self):
self._assert_valid('1')
self._assert_valid('1.2')
self._assert_valid('1.2.3')
self._assert_valid('1.2.3.4')
self._assert_valid('1.0')
self._assert_valid('1.0.0')
self._assert_valid('1.0.0.0')
self._assert_valid('1.0.3')
self._assert_valid('10.11.12')
self._assert_valid('10.11.12.13')
def test_non_numbers(self):
self._assert_invalid('abc')
self._assert_invalid('abc1')
self._assert_invalid('1abc')
self._assert_invalid('1abc1')
self._assert_invalid('1.abc')
self._assert_invalid('1.1abc')
self._assert_invalid('1.abc1')
self._assert_invalid('1.1abc1')
self._assert_invalid('abc.1')
self._assert_invalid('1.abc.2')
def test_to_many_segments(self):
self._assert_invalid('1.2.3.4.5')
self._assert_invalid('1.2.3.4.0')
self._assert_invalid('1.2.3.4.5.6')
def test_to_badly_formed(self):
self._assert_invalid('1.')
self._assert_invalid('1.2.')
self._assert_invalid('1.2.3.')
self._assert_invalid('1.2.3.4.')
self._assert_invalid('.1')
self._assert_invalid('.1.2')
self._assert_invalid('.1.2.3')
self._assert_invalid('.1.2.3.4')
self._assert_invalid('1..3')
self._assert_invalid('1.2..4')
def test_to_other_punct(self):
self._assert_invalid('1,2')
self._assert_invalid('1$2')
self._assert_invalid('1:2')
def test_to_long(self):
self._assert_invalid('123456789.123456789')
self._assert_invalid('1234.6789.123456789')
self._assert_invalid('1234.6789.1234.6789')
def test_all_good_padded(self):
self._assert_valid('01')
self._assert_valid('01.1')
self._assert_valid('01.01')
self._assert_valid('01.0.1')
self._assert_valid('01.0.01')
self._assert_valid('1.00')
self._assert_valid('1.0.00')
self._assert_valid('1.001')
self._assert_valid('1.0.001')
def test_all_good_with_tracks_are_bad(self):
self._assert_invalid('1a1')
self._assert_invalid('1.2d12')
self._assert_invalid('1.2.3b7')
self._assert_invalid('1.0fc100')
self._assert_invalid('1.0.0b7')
self._assert_invalid('1.0.3fc1')
self._assert_invalid('10.11.12d123')
class PlistToolGetWithKeyPath(unittest.TestCase):
def test_one_level(self):
d = { 'a': 'A', 'b': 2, 3: 'c', 'list': [ 'x', 'y' ], 'dict': { 1: 2, 3: 4} }
self.assertEqual(plisttool.GetWithKeyPath(d, ['a']), 'A')
self.assertEqual(plisttool.GetWithKeyPath(d, ['b']), 2)
self.assertEqual(plisttool.GetWithKeyPath(d, [3]), 'c')
self.assertEqual(plisttool.GetWithKeyPath(d, ['list']), ['x', 'y'])
self.assertEqual(plisttool.GetWithKeyPath(d, ['dict']), {1:2, 3:4})
def test_two_level(self):
d = { 'list': [ 'x', 'y' ], 'dict': { 1: 2, 3: 4} }
self.assertEqual(plisttool.GetWithKeyPath(d, ['list', 1]), 'y')
self.assertEqual(plisttool.GetWithKeyPath(d, ['dict', 3]), 4)
def test_deep(self):
d = { 1: { 'a': ['c', [4, 'e']]}}
self.assertEqual(plisttool.GetWithKeyPath(d, [1, 'a', 1, 1]), 'e')
def test_misses(self):
d = { 'list': [ 'x', 'y' ], 'dict': { 1: 2, 3: 4} }
self.assertEqual(plisttool.GetWithKeyPath(d, ['not_found']), None)
self.assertEqual(plisttool.GetWithKeyPath(d, [99]), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['list', 99]), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['dict', 'not_found']), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['dict', 99]), None)
def test_invalids(self):
d = { 'list': [ 'x', 'y' ], 'str': 'foo', 'int': 42 }
self.assertEqual(plisttool.GetWithKeyPath(d, ['list', 'not_int']), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['str', 'nope']), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['str', 99]), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['int', 'nope']), None)
self.assertEqual(plisttool.GetWithKeyPath(d, ['int', 99]), None)
class PlistToolTest(unittest.TestCase):
def _assert_plisttool_result(self, control, expected):
"""Asserts that PlistTool's result equals the expected dictionary.
Args:
control: The control struct to pass to PlistTool. See the module doc for
the plisttool module for a description of this format.
expected: The dictionary that represents the expected result from running
PlistTool.
"""
outdict = _plisttool_result(control)
self.assertEqual(expected, outdict)
def _assert_pkginfo(self, plist, expected):
"""Asserts that PlistTool generates the expected PkgInfo file contents.
Args:
plist: The plist file from which to obtain the PkgInfo values.
expected: The expected 8-byte string written to the PkgInfo file.
"""
pkginfo = StringIO.StringIO()
control = {
'plists': [plist],
'output': StringIO.StringIO(),
'target': _testing_target,
'info_plist_options': {'pkginfo': pkginfo},
}
tool = plisttool.PlistTool(control)
tool.run()
self.assertEqual(expected, pkginfo.getvalue())
def test_merge_of_one_file(self):
plist1 = _xml_plist('<key>Foo</key><string>abc</string>')
self._assert_plisttool_result({'plists': [plist1]}, {'Foo': 'abc'})
def test_merge_of_one_dict(self):
plist1 = {'Foo': 'abc'}
self._assert_plisttool_result({'plists': [plist1]}, {'Foo': 'abc'})
def test_merge_of_one_empty_file(self):
plist1 = _xml_plist('')
self._assert_plisttool_result({'plists': [plist1]}, {})
def test_merge_of_one_empty_dict(self):
plist1 = {}
self._assert_plisttool_result({'plists': [plist1]}, {})
def test_merge_of_two_files(self):
plist1 = _xml_plist('<key>Foo</key><string>abc</string>')
plist2 = _xml_plist('<key>Bar</key><string>def</string>')
self._assert_plisttool_result({'plists': [plist1, plist2]}, {
'Foo': 'abc',
'Bar': 'def',
})
def test_merge_of_file_and_dict(self):
plist1 = _xml_plist('<key>Foo</key><string>abc</string>')
plist2 = {'Bar': 'def'}
self._assert_plisttool_result({'plists': [plist1, plist2]}, {
'Foo': 'abc',
'Bar': 'def',
})
def test_merge_of_two_dicts(self):
plist1 = {'Foo': 'abc'}
plist2 = {'Bar': 'def'}
self._assert_plisttool_result({'plists': [plist1, plist2]}, {
'Foo': 'abc',
'Bar': 'def',
})
def test_merge_where_one_file_is_empty(self):
plist1 = _xml_plist('<key>Foo</key><string>abc</string>')
plist2 = _xml_plist('')
self._assert_plisttool_result({'plists': [plist1, plist2]}, {'Foo': 'abc'})
def test_merge_where_one_dict_is_empty(self):
plist1 = {'Foo': 'abc'}
plist2 = {}
self._assert_plisttool_result({'plists': [plist1, plist2]}, {'Foo': 'abc'})
def test_merge_where_both_files_are_empty(self):
plist1 = _xml_plist('')
plist2 = _xml_plist('')
self._assert_plisttool_result({'plists': [plist1, plist2]}, {})
def test_merge_where_both_dicts_are_empty(self):
plist1 = {}
plist2 = {}
self._assert_plisttool_result({'plists': [plist1, plist2]}, {})
def test_more_complicated_merge(self):
plist1 = _xml_plist(
'<key>String1</key><string>abc</string>'
'<key>Integer1</key><integer>123</integer>'
'<key>Array1</key><array><string>a</string><string>b</string></array>'
)
plist2 = _xml_plist(
'<key>String2</key><string>def</string>'
'<key>Integer2</key><integer>987</integer>'
'<key>Dictionary2</key><dict>'
'<key>k1</key><string>a</string>'
'<key>k2</key><string>b</string>'
'</dict>'
)
plist3 = _xml_plist(
'<key>String3</key><string>ghi</string>'
'<key>Integer3</key><integer>465</integer>'
'<key>Bundle</key><string>this.is.${BUNDLE_NAME}.bundle</string>'
)
self._assert_plisttool_result({
'plists': [plist1, plist2, plist3],
'variable_substitutions': {
'BUNDLE_NAME': 'my'
},
}, {
'String1': 'abc',
'Integer1': 123,
'Array1': ['a', 'b'],
'String2': 'def',
'Integer2': 987,
'Dictionary2': {'k1': 'a', 'k2': 'b'},
'String3': 'ghi',
'Integer3': 465,
'Bundle': 'this.is.my.bundle',
})
def test_merge_with_forced_plist_overrides_on_collisions(self):
plist1 = {'Foo': 'bar'}
plist2 = {'Foo': 'baz'}
self._assert_plisttool_result({
'plists': [plist1],
'forced_plists': [plist2],
}, {'Foo': 'baz'})
def test_merge_with_forced_plists_with_same_key_keeps_last_one(self):
plist1 = {'Foo': 'bar'}
plist2 = {'Foo': 'baz'}
plist3 = {'Foo': 'quux'}
self._assert_plisttool_result({
'plists': [plist1],
'forced_plists': [plist2, plist3],
}, {'Foo': 'quux'})
def test_invalid_variable_substitution_name_space(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.INVALID_SUBSTITUTION_VARIABLE_NAME % (
_testing_target, 'foo bar'))):
_plisttool_result({
'plists': [{}],
'variable_substitutions': {
'foo bar': 'bad name',
},
})
def test_invalid_variable_substitution_name_hyphen(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.INVALID_SUBSTITUTION_VARIABLE_NAME % (
_testing_target, 'foo-bar'))):
_plisttool_result({
'plists': [{}],
'variable_substitutions': {
'foo-bar': 'bad name',
},
})
def test_invalid_variable_substitution_name_qualifier(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.INVALID_SUBSTITUTION_VARIABLE_NAME % (
_testing_target, 'foo:bar'))):
_plisttool_result({
'plists': [{}],
'variable_substitutions': {
'foo:bar': 'bad name',
},
})
def test_invalid_variable_substitution_name_rfc_qualifier(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.SUBSTITUTION_VARIABLE_CANT_HAVE_QUALIFIER % (
_testing_target, 'foo:rfc1034identifier'))):
_plisttool_result({
'plists': [{}],
'variable_substitutions': {
'foo:rfc1034identifier': 'bad name',
},
})
def test_both_types_variable_substitutions(self):
plist1 = _xml_plist(
'<key>FooBraces</key><string>${TARGET_NAME}</string>'
'<key>BarBraces</key><string>${PRODUCT_NAME}</string>'
'<key>FooParens</key><string>$(TARGET_NAME)</string>'
'<key>BarParens</key><string>$(PRODUCT_NAME)</string>'
)
outdict = _plisttool_result({
'plists': [plist1],
'variable_substitutions': {
'PRODUCT_NAME': 'MyApp',
'TARGET_NAME': 'MyApp',
},
})
self.assertEqual('MyApp', outdict.get('FooBraces'))
self.assertEqual('MyApp', outdict.get('BarBraces'))
self.assertEqual('MyApp', outdict.get('FooParens'))
self.assertEqual('MyApp', outdict.get('BarParens'))
def test_rfc1034_conversion(self):
plist1 = _xml_plist(
'<key>Foo</key><string>${PRODUCT_NAME:rfc1034identifier}</string>'
)
outdict = _plisttool_result({
'plists': [plist1],
'variable_substitutions': {
'PRODUCT_NAME': 'foo_bar?baz'
},
})
self.assertEqual('foo-bar-baz', outdict.get('Foo'))
def test_raw_substitutions(self):
plist1 = _xml_plist(
'<key>One</key><string>RAW1</string>'
'<key>Two</key><string>RAW2</string>'
'<key>SpaceOneSpaceTwoSpace</key><string> RAW1 RAW2 </string>'
'<key>OneTwoOneTwo</key><string>RAW1RAW2RAW1RAW2</string>'
'<key>XOneX</key><string>XRAW1X</string>'
'<key>XTwoX</key><string>XRAW2X</string>'
)
outdict = _plisttool_result({
'plists': [plist1],
'raw_substitutions': {
'RAW1': 'a',
'RAW2': 'b',
},
})
self.assertEqual('a', outdict.get('One'))
self.assertEqual('b', outdict.get('Two'))
self.assertEqual(' a b ', outdict.get('SpaceOneSpaceTwoSpace'))
self.assertEqual('abab', outdict.get('OneTwoOneTwo'))
self.assertEqual('XaX', outdict.get('XOneX'))
self.assertEqual('XbX', outdict.get('XTwoX'))
def test_raw_substitutions_overlap_raw(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.OVERLAP_IN_SUBSTITUTION_KEYS % (
_testing_target, 'mum', 'mumble'))):
_plisttool_result({
'plists': [{}],
'raw_substitutions': {
'mumble': 'value1',
'mum': 'value2',
},
})
def test_raw_substitutions_overlap_variable(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.OVERLAP_IN_SUBSTITUTION_KEYS % (
_testing_target, '$(mumble)', 'mum'))):
_plisttool_result({
'plists': [{}],
'variable_substitutions': {
'mumble': 'value1',
},
'raw_substitutions': {
'mum': 'value2',
},
})
def test_raw_substitutions_key_in_value(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.RAW_SUBSTITUTION_KEY_IN_VALUE % (
_testing_target, 'value', '1value2', 'mumble'))):
_plisttool_result({
'plists': [{}],
'raw_substitutions': {
'mumble': '1value2',
'value': 'spam',
},
})
def test_nonexistant_variable_substitution(self):
plist1 = {
'FooBraces': 'A-${NOT_A_VARIABLE}-B'
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNKNOWN_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '${NOT_A_VARIABLE}', 'FooBraces',
'A-${NOT_A_VARIABLE}-B'))):
_plisttool_result({'plists': [plist1]})
plist2 = {
'FooParens': '$(NOT_A_VARIABLE)'
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNKNOWN_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '$(NOT_A_VARIABLE)', 'FooParens',
'$(NOT_A_VARIABLE)'))):
_plisttool_result({'plists': [plist2]})
# Nested dict, will include the keypath.
plist3 = {
'Key1': {
'Key2': 'foo.bar.$(PRODUCT_NAME:rfc1034identifier)'
}
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNKNOWN_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '$(PRODUCT_NAME:rfc1034identifier)',
'Key1:Key2', 'foo.bar.$(PRODUCT_NAME:rfc1034identifier)'))):
_plisttool_result({'plists': [plist3]})
# Array, will include the keypath.
plist3 = {
'Key': [
'this one is ok',
'foo.bar.$(PRODUCT_NAME:rfc1034identifier)'
]
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNKNOWN_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '$(PRODUCT_NAME:rfc1034identifier)',
'Key[1]', 'foo.bar.$(PRODUCT_NAME:rfc1034identifier)'))):
_plisttool_result({'plists': [plist3]})
def test_variable_substitution_in_key(self):
plist1 = {
'Foo${Braces}': 'Bar'
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNSUPPORTED_SUBSTITUTATION_REFERENCE_IN_KEY_MSG % (
_testing_target, '${Braces}', 'Foo${Braces}'))):
_plisttool_result({'plists': [plist1]})
plist2 = {
'Foo$(Parens)': 'Bar'
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNSUPPORTED_SUBSTITUTATION_REFERENCE_IN_KEY_MSG % (
_testing_target, '$(Parens)', 'Foo$(Parens)'))):
_plisttool_result({'plists': [plist2]})
# Nested dict, will include the keypath.
plist3 = {
'Key1': {
'Key${2}': 'value'
}
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNSUPPORTED_SUBSTITUTATION_REFERENCE_IN_KEY_MSG % (
_testing_target, '${2}', 'Key1:Key${2}'))):
_plisttool_result({'plists': [plist3]})
# Array (of dict), will include the keypath.
plist3 = {
'Key1': [
{'Foo': 'Bar'},
{'Key${2}': 'value'},
]
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNSUPPORTED_SUBSTITUTATION_REFERENCE_IN_KEY_MSG % (
_testing_target, '${2}', 'Key1[1]:Key${2}'))):
_plisttool_result({'plists': [plist3]})
def test_invalid_variable_substitution(self):
plist1 = {
'Foo': 'foo.${INVALID_REFERENCE).bar'
}
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.INVALID_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '${INVALID_REFERENCE)', 'Foo',
'foo.${INVALID_REFERENCE).bar'))):
_plisttool_result({'plists': [plist1]})
def test_multiple_substitutions(self):
plist1 = _xml_plist(
'<key>Foo</key>'
'<string>${PRODUCT_NAME}--A_RAW_SUB--${EXECUTABLE_NAME}</string>'
)
outdict = _plisttool_result({
'plists': [plist1],
'variable_substitutions': {
'EXECUTABLE_NAME': 'MyExe',
'PRODUCT_NAME': 'MyApp',
},
'raw_substitutions': {
'A_RAW_SUB': 'MyBundle',
},
})
self.assertEqual('MyApp--MyBundle--MyExe', outdict.get('Foo'))
def test_recursive_substitutions(self):
plist1 = _xml_plist(
'<key>Foo</key>'
'<dict>'
' <key>Foo1</key>'
' <string>${BUNDLE_NAME}</string>'
' <key>Foo2</key>'
' <string>RAW_NAME</string>'
' <key>Foo3</key>'
' <array>'
' <string>${BUNDLE_NAME}</string>'
' <string>RAW_NAME</string>'
' </array>'
'</dict>'
'<key>Bar</key>'
'<array>'
' <string>${BUNDLE_NAME}</string>'
' <string>RAW_NAME</string>'
' <dict>'
' <key>Baz</key>'
' <string>${BUNDLE_NAME}</string>'
' <key>Baz2</key>'
' <string>RAW_NAME</string>'
' </dict>'
'</array>'
)
outdict = _plisttool_result({
'plists': [plist1],
'variable_substitutions': {
'BUNDLE_NAME': 'MyBundle',
},
'raw_substitutions': {
'RAW_NAME': 'MyValue',
},
})
self.assertEqual('MyBundle', outdict.get('Foo').get('Foo1'))
self.assertEqual('MyValue', outdict.get('Foo').get('Foo2'))
self.assertEqual('MyBundle', outdict.get('Foo').get('Foo3')[0])
self.assertEqual('MyValue', outdict.get('Foo').get('Foo3')[1])
self.assertEqual('MyBundle', outdict.get('Bar')[0])
self.assertEqual('MyValue', outdict.get('Bar')[1])
self.assertEqual('MyBundle', outdict.get('Bar')[2].get('Baz'))
self.assertEqual('MyValue', outdict.get('Bar')[2].get('Baz2'))
def test_keys_with_same_values_do_not_raise_error(self):
plist1 = _xml_plist('<key>Foo</key><string>Bar</string>')
plist2 = _xml_plist('<key>Foo</key><string>Bar</string>')
self._assert_plisttool_result({'plists': [plist1, plist2]}, {'Foo': 'Bar'})
def test_conflicting_keys_raises_error(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.CONFLICTING_KEYS_MSG % (
_testing_target, 'Foo', 'Baz', 'Bar'))):
plist1 = _xml_plist('<key>Foo</key><string>Bar</string>')
plist2 = _xml_plist('<key>Foo</key><string>Baz</string>')
_plisttool_result({'plists': [plist1, plist2]})
def test_pkginfo_with_valid_values(self):
self._assert_pkginfo({
'CFBundlePackageType': 'APPL',
'CFBundleSignature': '1234',
}, 'APPL1234')
def test_pkginfo_with_missing_package_type(self):
self._assert_pkginfo({
'CFBundleSignature': '1234',
}, '????1234')
def test_pkginfo_with_missing_signature(self):
self._assert_pkginfo({
'CFBundlePackageType': 'APPL',
}, 'APPL????')
def test_pkginfo_with_missing_package_type_and_signature(self):
self._assert_pkginfo({}, '????????')
def test_pkginfo_with_values_too_long(self):
self._assert_pkginfo({
'CFBundlePackageType': 'APPLE',
'CFBundleSignature': '1234',
}, '????1234')
def test_pkginfo_with_valid_values_too_short(self):
self._assert_pkginfo({
'CFBundlePackageType': 'APPL',
'CFBundleSignature': '123',
}, 'APPL????')
def test_pkginfo_with_values_encodable_in_mac_roman(self):
self._assert_pkginfo({
'CFBundlePackageType': u'ÄPPL',
'CFBundleSignature': '1234',
}, '\x80PPL1234')
def test_pkginfo_with_values_not_encodable_in_mac_roman(self):
self._assert_pkginfo({
'CFBundlePackageType': u'😎',
'CFBundleSignature': '1234',
}, '????1234')
def test_child_plist_that_matches_parent_does_not_raise(self):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
},
})
def test_child_plist_with_incorrect_bundle_id_raises(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.CHILD_BUNDLE_ID_MISMATCH_MSG % (
_testing_target, '//fake:label', 'foo.bar.', 'foo.baz'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
},
})
def test_child_plist_with_incorrect_bundle_version_raises(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.CHILD_BUNDLE_VERSION_MISMATCH_MSG % (
_testing_target, 'CFBundleVersion', '//fake:label',
'1.2.3', '1.2.4'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>'
'<key>CFBundleVersion</key><string>1.2.3</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>'
'<key>CFBundleVersion</key><string>1.2.4</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
},
})
def test_child_plist_with_incorrect_bundle_short_version_raises(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.CHILD_BUNDLE_VERSION_MISMATCH_MSG % (
_testing_target, 'CFBundleShortVersionString', '//fake:label',
'1.2.3', '1.2.4'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>'
'<key>CFBundleShortVersionString</key><string>1.2.3</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>'
'<key>CFBundleShortVersionString</key><string>1.2.4</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
},
})
def test_child_plist_missing_required_child(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.REQUIRED_CHILD_MISSING_MSG % (
_testing_target, '//unknown:label'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
'child_plist_required_values': {
'//unknown:label': [['foo', 'bar']],
}
},
})
def test_child_plist_required_invalid_format_not_list(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.REQUIRED_CHILD_NOT_PAIR % (
_testing_target, '//fake:label', 'not_right'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
'child_plist_required_values': {
'//fake:label': ['not_right'],
}
},
})
def test_child_plist_required_invalid_format_not_pair(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.REQUIRED_CHILD_NOT_PAIR % (
_testing_target, '//fake:label', ['not_right']))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
'child_plist_required_values': {
'//fake:label': [['not_right']],
}
},
})
def test_child_plist_required_missing_keypath(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.REQUIRED_CHILD_KEYPATH_NOT_FOUND % (
_testing_target, '//fake:label', 'not-there', 'blah'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
'child_plist_required_values': {
'//fake:label': [
# This will be found and pass.
[['CFBundleIdentifier'], 'foo.bar.baz' ],
# This will raise.
[['not-there'], 'blah' ],
],
}
},
})
def test_child_plist_required_not_matching(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.REQUIRED_CHILD_KEYPATH_NOT_MATCHING % (
_testing_target, '//fake:label', 'CFBundleIdentifier',
'foo.bar.baz.not', 'foo.bar.baz'))):
parent = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar</string>')
child = _xml_plist(
'<key>CFBundleIdentifier</key><string>foo.bar.baz</string>')
children = {'//fake:label': child}
_plisttool_result({
'plists': [parent],
'info_plist_options': {
'child_plists': children,
'child_plist_required_values': {
'//fake:label': [
[['CFBundleIdentifier'], 'foo.bar.baz.not' ],
],
}
},
})
def test_unknown_control_keys_raise(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNKNOWN_CONTROL_KEYS_MSG % (
_testing_target, 'unknown'))):
plist = {'Foo': 'bar'}
_plisttool_result({
'plists': [plist],
'unknown': True,
})
def test_unknown_info_plist_options_keys_raise(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.UNKNOWN_TASK_OPTIONS_KEYS_MSG % (
_testing_target, 'info_plist_options', 'mumble'))):
plist = {'Foo': 'bar'}
children = {'//fake:label': {'foo': 'bar'}}
_plisttool_result({
'plists': [plist],
'info_plist_options': {
'child_plists': children,
'mumble': 'something',
},
})
def test_missing_version(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.MISSING_VERSION_KEY_MSG % (
_testing_target, 'CFBundleVersion'))):
plist = {'CFBundleShortVersionString': '1.0'}
_plisttool_result({
'plists': [plist],
'info_plist_options': {
'version_keys_required': True,
},
})
def test_missing_short_version(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.MISSING_VERSION_KEY_MSG % (
_testing_target, 'CFBundleShortVersionString'))):
plist = {'CFBundleVersion': '1.0'}
_plisttool_result({
'plists': [plist],
'info_plist_options': {
'version_keys_required': True,
},
})
def test_empty_version(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.MISSING_VERSION_KEY_MSG % (
_testing_target, 'CFBundleVersion'))):
plist = {
'CFBundleShortVersionString': '1.0',
'CFBundleVersion': '',
}
_plisttool_result({
'plists': [plist],
'info_plist_options': {
'version_keys_required': True,
},
})
def test_empty_short_version(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.MISSING_VERSION_KEY_MSG % (
_testing_target, 'CFBundleShortVersionString'))):
plist = {
'CFBundleShortVersionString': '',
'CFBundleVersion': '1.0',
}
_plisttool_result({
'plists': [plist],
'info_plist_options': {
'version_keys_required': True,
},
})
def test_invalid_version(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.INVALID_VERSION_KEY_VALUE_MSG % (
_testing_target, 'CFBundleVersion', '1foo'))):
plist = {
'CFBundleVersion': '1foo',
}
_plisttool_result({
'plists': [plist],
'info_plist_options': { } # presence triggers checking
})
def test_invalid_short_version(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.INVALID_VERSION_KEY_VALUE_MSG % (
_testing_target, 'CFBundleShortVersionString', '1foo'))):
plist = {
'CFBundleShortVersionString': '1foo',
}
_plisttool_result({
'plists': [plist],
'info_plist_options': { } # presence triggers checking
})
def test_versions_not_checked_without_options(self):
plist = {
'CFBundleShortVersionString': '1foo',
'CFBundleVersion': '1foo',
}
# Even though they numbers are invalid, the plist comes back fine because
# there was no info_plist_options to trigger validation.
self._assert_plisttool_result(
{ 'plists': [plist] },
plist
)
def test_entitlements_options_var_subs(self):
plist1 = {'Foo': '$(AppIdentifierPrefix)'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'ApplicationIdentifierPrefix': ['abc123'],
'Version': 1,
},
},
}, {'Foo': 'abc123.'})
def test_entitlements_options_raw_subs(self):
plist1 = {'Bar': 'abc123.*'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'ApplicationIdentifierPrefix': ['abc123'],
'Version': 1,
},
},
}, {'Bar': 'abc123.my.bundle.id'})
def test_entitlements_no_profile_for_app_id_prefix(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(
' '.join([
plisttool.UNKNOWN_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '${AppIdentifierPrefix}', 'Foo',
'${AppIdentifierPrefix}.my.bundle.id'),
plisttool.UNKNOWN_SUBSTITUTION_ADDITION_AppIdentifierPrefix_MSG
]))):
_plisttool_result({
'plists': [{'Foo': '${AppIdentifierPrefix}.my.bundle.id'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
},
})
def test_entitlements_no_profile_for_app_id_prefix_rfc_reference(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(
' '.join([
plisttool.UNKNOWN_SUBSTITUTATION_REFERENCE_MSG % (
_testing_target, '$(AppIdentifierPrefix:rfc1034identifier)', 'Foo',
'$(AppIdentifierPrefix:rfc1034identifier).my.bundle.id'),
plisttool.UNKNOWN_SUBSTITUTION_ADDITION_AppIdentifierPrefix_MSG
]))):
_plisttool_result({
'plists': [{'Foo': '$(AppIdentifierPrefix:rfc1034identifier).my.bundle.id'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
},
})
def test_entitlements_bundle_id_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.bundle.id'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
},
}, plist1)
def test_entitlements_bundle_id_wildcard_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.*'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
},
}, plist1)
def test_entitlements_bundle_id_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_BUNDLE_ID_MISMATCH % (
_testing_target, 'my.bundle.id', 'other.bundle.id'))):
_plisttool_result({
'plists': [{'application-identifier': 'QWERTY.other.bundle.id'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
},
})
def test_entitlements_bundle_id_wildcard_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_BUNDLE_ID_MISMATCH % (
_testing_target, 'my.bundle.id', 'other.*'))):
_plisttool_result({
'plists': [{'application-identifier': 'QWERTY.other.*'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
},
})
def test_entitlements_profile_not_expired(self):
# This is really looking for the lack of an error being raised.
plist1 = {'foo': 'bar'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'profile_metadata_file': {
'ExpirationDate': datetime.datetime.max,
'Version': 1,
},
},
}, plist1)
def test_entitlements_profile_expired(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLEMENTS_PROFILE_HAS_EXPIRED % (
_testing_target, '0001-01-01T00:00:00'))):
_plisttool_result({
'plists': [{'foo': 'bar'}],
'entitlements_options': {
'profile_metadata_file': {
'ExpirationDate': datetime.datetime.min,
'Version': 1,
},
},
})
def test_entitlements_profile_team_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_TEAM_ID_PROFILE_MISMATCH % (
_testing_target, 'QWERTY', 'TeamIdentifier', "['ASDFGH']"))):
_plisttool_result({
'plists': [{'com.apple.developer.team-identifier': 'QWERTY'}],
'entitlements_options': {
'profile_metadata_file': {
'ApplicationIdentifierPrefix': [ 'QWERTY' ],
'TeamIdentifier': [ 'ASDFGH' ],
'Version': 1,
},
},
})
def test_entitlements_profile_app_id_prefix_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_TEAM_ID_PROFILE_MISMATCH % (
_testing_target, 'QWERTY', 'ApplicationIdentifierPrefix', "['ASDFGH']"))):
_plisttool_result({
'plists': [{'com.apple.developer.team-identifier': 'QWERTY'}],
'entitlements_options': {
'profile_metadata_file': {
'TeamIdentifier': [ 'QWERTY' ],
'ApplicationIdentifierPrefix': [ 'ASDFGH' ],
'Version': 1,
},
},
})
def test_entitlements_profile_teams_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'com.apple.developer.team-identifier': 'QWERTY'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'ApplicationIdentifierPrefix': [ 'ASDFGH', 'QWERTY' ],
'TeamIdentifier': [ 'ASDFGH', 'QWERTY' ],
'Version': 1,
},
},
}, plist1)
def test_entitlements_app_id_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.bundle.id'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.my.bundle.id',
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_app_id_wildcard_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.bundle.id'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.*',
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_app_id_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_APP_ID_PROFILE_MISMATCH % (
_testing_target, 'QWERTY.my.bundle.id', 'ASDFGH.my.bundle.id'))):
_plisttool_result({
'plists': [{'application-identifier': 'QWERTY.my.bundle.id'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'ASDFGH.my.bundle.id'
},
'Version': 1,
},
},
})
def test_entitlements_app_id_mismatch_wildcard(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_APP_ID_PROFILE_MISMATCH % (
_testing_target, 'QWERTY.my.bundle.id', 'ASDFGH.*'))):
_plisttool_result({
'plists': [{'application-identifier': 'QWERTY.my.bundle.id'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'ASDFGH.*',
},
'Version': 1,
},
},
})
# The edge case in EntitlementsTask._does_id_match()
def test_entitlements_app_id_wildcard_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.*'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.my.bundle.id',
},
'Version': 1,
},
},
}, plist1)
# The edge case in EntitlementsTask._does_id_match()
def test_entitlements_app_id_wildcard_match_wildcard(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.*'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.*',
},
'Version': 1,
},
},
}, plist1)
# The edge case in EntitlementsTask._does_id_match()
def test_entitlements_app_id_wildcard_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_APP_ID_PROFILE_MISMATCH % (
_testing_target, 'QWERTY.*', 'ASDFGH.*'))):
_plisttool_result({
'plists': [{'application-identifier': 'QWERTY.*'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'ASDFGH.*',
},
'Version': 1,
},
},
})
# The edge case in EntitlementsTask._does_id_match()
def test_entitlements_app_id_wildcard_mismatch_wildcard(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_APP_ID_PROFILE_MISMATCH % (
_testing_target, 'QWERTY.*', 'ASDFGH.my.bundle.id'))):
_plisttool_result({
'plists': [{'application-identifier': 'QWERTY.*'}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'ASDFGH.my.bundle.id',
},
'Version': 1,
},
},
})
def test_entitlements_keychain_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {'keychain-access-groups': ['QWERTY.my.bundle.id']}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'keychain-access-groups': ['QWERTY.my.bundle.id'],
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_keychain_match_wildcard(self):
# This is really looking for the lack of an error being raised.
plist1 = {'keychain-access-groups': ['QWERTY.my.bundle.id']}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'keychain-access-groups': ['QWERTY.*'],
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_no_keychain_requested(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.bundle.id'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.*',
'keychain-access-groups': ['ASDFGH.*'],
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_keychain_not_allowed(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_HAS_GROUP_PROFILE_DOES_NOT % (
_testing_target, 'keychain-access-groups'))):
_plisttool_result({
'plists': [{'keychain-access-groups': ['QWERTY.my.bundle.id']}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.*',
# No 'keychain-access-groups'
},
'Version': 1,
},
},
})
def test_entitlements_keychain_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_HAS_GROUP_ENTRY_PROFILE_DOES_NOT % (
_testing_target, 'keychain-access-groups', 'QWERTY.my.bundle.id',
'ASDFGH.*", "QWERTY.my.bundle.id.also'))):
_plisttool_result({
'plists': [{'keychain-access-groups': ['QWERTY.my.bundle.id']}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'keychain-access-groups': [
'ASDFGH.*',
'QWERTY.my.bundle.id.also',
],
},
'Version': 1,
},
},
})
def test_entitlements_app_groups_match(self):
# This is really looking for the lack of an error being raised.
plist1 = {
'com.apple.security.application-groups': ['QWERTY.my.bundle.id'],
}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'com.apple.security.application-groups': [
'QWERTY.my.bundle.id',
],
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_app_groups_wildcard_no_match(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_HAS_GROUP_ENTRY_PROFILE_DOES_NOT % (
_testing_target, 'com.apple.security.application-groups',
'QWERTY.my.bundle.id', 'QWERTY.*'))):
_plisttool_result({
'plists': [{
'com.apple.security.application-groups': ['QWERTY.my.bundle.id'],
}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'com.apple.security.application-groups': ['QWERTY.*'],
},
'Version': 1,
},
},
})
def test_entitlements_no_app_groups_requested(self):
# This is really looking for the lack of an error being raised.
plist1 = {'application-identifier': 'QWERTY.my.bundle.id'}
self._assert_plisttool_result({
'plists': [plist1],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.*',
'com.apple.security.application-groups': ['ASDFGH.*'],
},
'Version': 1,
},
},
}, plist1)
def test_entitlements_app_groups_not_allowed(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_HAS_GROUP_PROFILE_DOES_NOT % (
_testing_target, 'com.apple.security.application-groups'))):
_plisttool_result({
'plists': [{
'com.apple.security.application-groups': ['QWERTY.my.bundle.id'],
}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'application-identifier': 'QWERTY.*',
# No 'com.apple.security.application-groups'
},
'Version': 1,
},
},
})
def test_entitlements_app_groups_mismatch(self):
with self.assertRaisesRegexp(
plisttool.PlistToolError,
re.escape(plisttool.ENTITLMENTS_HAS_GROUP_ENTRY_PROFILE_DOES_NOT % (
_testing_target, 'com.apple.security.application-groups',
'QWERTY.my.bundle.id', 'ASDFGH.*", "QWERTY.my.bundle.id.also'))):
_plisttool_result({
'plists': [{
'com.apple.security.application-groups': ['QWERTY.my.bundle.id'],
}],
'entitlements_options': {
'bundle_id': 'my.bundle.id',
'profile_metadata_file': {
'Entitlements': {
'com.apple.security.application-groups': [
'ASDFGH.*',
'QWERTY.my.bundle.id.also',
],
},
'Version': 1,
},
},
})
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jerrymarx/PythonFinanceStudy",
"score": 3
}
|
#### File: PythonFinanceStudy/historical_data/tswfin.py
```python
import pandas as pd
import os
'''
data = pd.read_csv('00386.hk.csv')
data['Date'] = pd.to_datetime(data['Date'])
data.set_index('Date', inplace=True)
return date
hk = pd.read_csv('CNYHKD.csv', delimiter='\t')
del hk['Unnamed: 0']
hk['Date'] = pd.to_datetime(hk['Date'])
hk.set_index('Date', inplace=True)
hk.sort_index(inplace=True)
'''
class YahooHistoryDataDownloader:
CRUMBLE_REGEX = r'CrumbStore":{"crumb":"(.*?)"}'
COOKIE_REGEX =
def get_code(code):
if code.find('.') != -1:
return code
if len(code) == 6:
if code[0] == '6' or code[0] == '9':
return code + '.SS'
elif code[0] == '0' or code[0] == '2' or code[0] == '3':
return code + '.SZ'
if len(code) == 5:
if code[0] == '0':
return code[-4] + '.HK'
else:
return code + '.HK'
def load_history_data(code):
if not os.path.exists(get_history_data_file_path(code)):
download_history_data(code)
else:
pass
def download_history_data(code):
```
#### File: PythonFinanceStudy/scraping_divend/divend_sina_getter.py
```python
try:
from urllib.request import urlopen
except:
from urllib2 import urlopen
from bs4 import BeautifulSoup as soup
import re, datetime, csv, time, random
class divend_info:
def __init__(self, full_list_filename, ah_list_filename, divend_filename):
self.full_dict = self.load_full_list(full_list_filename)
self.ah_dict = self.load_ah_list(ah_list_filename)
self.divend_data = self.load_divend_data(divend_filename)
self.price_dict = {}
self.usd = None
self.hkd = None
def load_full_list(self, filename):
full_dict = {}
with open(filename, encoding='UTF-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
key = row[0]
code_a = row[1]
code_b = row[3]
total_a = row[5]
total_b = row[6]
full_dict[key] = {'code_a':code_a, 'code_b':code_b, 'total_a':float(total_a), 'total_b':float(total_b)}
return full_dict
def load_ah_list(self, filename):
ah_dict = {}
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
code_h = row[0]
code_a = row[1]
name_h = row[2]
ah_dict[code_h] = {'code_a':code_a, 'name_h':name_h}
return ah_dict
def load_divend_data(self, filename):
divend_data = {}
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
code = row[0]
volume = float(row[1])
rate = float(row[2])
year = int(row[3])
if code not in divend_data:
divend_data[code] = {}
record = divend_data[code]
if year not in record:
record[year] = 0.0
record[year] += volume * rate / 10
return divend_data
def get_code_str_for_sina_hq(self, code, prefix=''):
if len(code) == 5:
return 'hk' + code
if code[0] == '0' or code[0] == '2' or code[0] == '3':
return prefix + 'sz' + code
elif code[0] == '6' or code[0] == '9':
return prefix + 'sh' + code
else:
print('Unknow code:', code)
return None
def get_latest_price_from_sina(self):
total = len(self.full_dict) + len(self.ah_dict)
group_size = 50
done = 0
key_list = list(self.full_dict.keys())
for i in range(0, len(self.full_dict), group_size):
temp_list = key_list[i:i+group_size]
code_list = [self.get_code_str_for_sina_hq(code, 's_') for code in temp_list]
the_url = 'http://hq.sinajs.cn/format=text&list=' + ','.join(code_list)
client = urlopen(the_url)
print('try to get', the_url)
text = client.read()
client.close()
lines = text.decode('gbk').split('\n')
for line in lines:
if line.strip() == '':
continue
parts = line.split(',')
code, name = parts[0].split('=')
price_str = parts[1]
if price_str == '0.00':
the_url = 'http://hq.sinajs.cn/format=text&list=' + code[-8:]
client = urlopen(the_url)
print('try to get', the_url)
text = client.read()
client.close()
price_str = text.decode('gbk').split(',')[2]
name = name + ' S'
self.price_dict[code[-6:]] = [name, float(price_str),]
done += 1
print('Get info of [%d/%d] done.' % (done, total))
key_list = list(self.ah_dict.keys())
for i in range(0, len(self.ah_dict), group_size):
temp_list = key_list[i:i+group_size]
code_list = [self.get_code_str_for_sina_hq(code) for code in temp_list]
the_url = 'http://hq.sinajs.cn/format=text&list=' + ','.join(code_list)
client = urlopen(the_url)
print('try to get', the_url)
text = client.read()
client.close()
lines = text.decode('gbk').split('\n')
for line in lines:
if line.strip() == '':
continue
parts = line.split(',')
code = parts[0].split('=')[0][-5:]
name = parts[1]
price_str = parts[6]
self.price_dict[code] = [name, float(price_str),]
done += 1
print('Get info of [%d/%d] done.' % (done, total))
def get_last_price(self, code):
if code in self.price_dict:
info = self.price_dict[code]
return info[1], info[0]
else:
print('Cannot find price and name for code:', code)
return None, None
def format_row(self, code, price, name):
items = ['="' + code + '"', name, str(price),]
key = code
if len(code) == 5:
key = self.ah_dict[code]['code_a']
elif self.full_dict[code]['code_a'] != '':
key = self.full_dict[code]['code_a']
common_info = self.full_dict[key]
total = common_info['total_a'] + common_info['total_b']
total_value = total * price
if code[0] == '9':
total_value *= self.usd
elif code[0] == '2' or len(code) == 5:
total_value *= self.hkd
items.append('%.4f' % total)
items.append('%.4f' % total_value)
divend_info = self.divend_data[key] if key in self.divend_data else {}
for year in range(2018, 1990, -1):
divend = ('%.4f' % divend_info[year]) if year in divend_info else ''
percent = (('%.2f%%' % (divend_info[year]*100/total_value)) if year in divend_info else '') if total_value > 1 else ''
items.append(divend)
items.append(percent)
return ','.join(items)
def get_usd_hkd(self):
the_url = 'http://hq.sinajs.cn/format=text&list=USDCNY,HKDCNY'
client = urlopen(the_url)
text = client.read()
client.close()
lines = text.decode('gbk').split('\n')
self.usd = float(lines[0].split(',')[2])
self.hkd = float(lines[1].split(',')[2])
def get_data_from_sina_then_write(self, code, outfile):
price, name = self.get_last_price(code)
text = self.format_row(code, price, name)
with open(outfile, 'a', encoding='utf-8-sig') as ofile:
ofile.write(text)
ofile.write('\n')
def main(self):
self.get_usd_hkd()
outfile = 'output_' + datetime.datetime.now().strftime("%Y%m%d") + '.csv'
with open(outfile, 'w',encoding='utf-8-sig') as ofile:
heads = ['CODE', 'NAME', 'PRICE', 'TOTAL', 'MARKET']
for year in range(2018, 1990, -1):
heads.append(str(year))
heads.append(str(year)+'p')
ofile.write(','.join(heads))
ofile.write('\n')
self.get_latest_price_from_sina()
done = 0
print('Handle the data', end='', flush=True)
for key in self.full_dict:
self.get_data_from_sina_then_write(key, outfile)
done += 1
if done % 100 == 0:
print('.', end='', flush=True)
for key in self.ah_dict:
self.get_data_from_sina_then_write(key, outfile)
done += 1
if done % 100 == 0:
print('.', end='', flush=True)
print(' Done')
if __name__ == '__main__':
worker = divend_info('full_list.csv', 'ah.csv', 'divend.csv');
'''code = '600028'
price, name = worker.get_last_price(code)
text = worker.format_row(code, price, name)
print text'''
worker.main()
```
|
{
"source": "Jerry-Ma/wowplug",
"score": 2
}
|
#### File: wowplug/wowplug/main.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import os
import sys
import inspect
import logging
import logging.config
from textwrap import indent
import pkg_resources
from docopt import docopt
from schema import Schema, Or, SchemaError, Use
from .utils import expanded_abspath, yaml, LoggingHandler
__all__ = ['cli', ]
LOGGER_NAME = 'cli'
def cli():
"""Command line interface of :mod:`wowplug`. Run
.. code-block:: sh
$ wowplug --help
for an extensive description.
"""
doc = """
An addon manager for World of Warcraft.
Usage:
wowplug -h
wowplug --help
wowplug (-v|--version)
wowplug [(-d|--debug)] [(-n|--ignore-config)] <command> [<args>...]
Options:
-h Show this message.
--help Show a more detailed help message
-v --version Show version.
-d --debug Show debug messages.
-n --ignore-config Ignore previously saved config file
"""
cmderr_fmt = "ERROR: {}"
cmds = OrderedDict()
def register_cmd(cmd):
def wrapper(func):
cmds[cmd] = func
return func
return wrapper
@register_cmd('scan')
def cmd_scan(args):
"""
Scan <dir> to get a list of installed addons. If no <dir> is
given, try to scan the previously scanned one if applicable.
Usage:
wowplug scan [<dir>] [--output=<file>]
Options:
-h --help Show this message.
-o <file> --output=<file>
Dump the scan result to a file.
This file can be used for `sync`.
"""
def find_addondir(path):
path = expanded_abspath(path)
parent, base = os.path.split(path)
if base == "AddOns":
return path
if base == "Interface":
return os.path.join(path, 'AddOns')
_path = os.path.join(path, 'Interface', 'AddOns')
if os.path.exists(_path):
return _path
return path
args, fc, tc = _sync_args_with_config(
args, config,
{
# use config entry if <dir> not specified
# update to config entry if specified
'<dir>': {
'key': 'scan.dir',
'norm': find_addondir,
'from': lambda a, c: a is None,
'to': lambda a, c: a is not None,
},
# do not use config entry
# update to config entry if specified
'--output': {
'key': 'sync.file',
'norm': expanded_abspath,
'from': lambda a, c: False,
'to': lambda a, c: a is not None,
},
}
)
# validate the args
args = Schema({
'<dir>': Or(
os.path.isdir,
error="`{}` is not a valid directory".format(args['<dir>'])
if '<dir>' not in fc else
"no valid directory found in saved config {} from"
" previous scan. one has to be specified via"
" command line".format(config.filepath)),
'--output': object,
}, ignore_extra_keys=True).validate(args)
from . import scan
scan.scan(args['<dir>'], output=args['--output'])
@register_cmd('sync')
def cmd_sync(args):
"""
Sync the addons listed in <file> to an AddOns directory. If
no <file> is given, sync the previously synced one or the one
generated by the last run of `scan`. Addons that are not in the
list will be moved to directory `wowplugcache`. Addons that
are in the list but do not exist in the AddOns directory or
`wowplugcache` will be downloaded and installed.
Usage:
wowplug sync [<file>] [--update] [--delete] [--target=<dir>]
Options:
-h --help Show an extensive help message
-u --update Update outdated addons if possible.
-d --delete Delete the unused addons instead of
placing them in `wowplugcache`.
-t <dir> --target=<dir>
Sync to the set <dir> instead of the
`config.scan.dir` specified in <file>.
"""
logger = logging.getLogger(LOGGER_NAME)
args, fc, tc = _sync_args_with_config(
args, config,
{
# use config entry if <file> not specified
# always update to config entry if specified
'<file>': {
'key': 'sync.file',
'norm': expanded_abspath,
'from': lambda a, c: a is None,
'to': lambda a, c: a is not None,
},
# use config entry if not specified
# update to config entry if specified
'--target': {
'key': 'scan.dir',
'norm': expanded_abspath,
'from': lambda a, c: a is None,
'to': lambda a, c: a is not None,
},
}
)
# validate the args
def load_yaml(f):
if not os.path.exists(f):
raise SchemaError()
with open(f, 'r') as fo:
y = yaml.safe_load(fo)
return y
args = Schema({
'<file>': Use(
load_yaml,
error="`{}` does not exist or is not a valid YAML file".format(
args['<file>'])
if '<file>' not in fc else
"no valid sync file found in saved config {} from"
" previous sync or scan. one has to be specified via"
" command line".format(config.filepath)),
'--target': Or(
None, expanded_abspath
),
'--update': object,
'--delete': object,
}, ignore_extra_keys=True).validate(args)
# we need to check sync result fc. in case target is pulled from
# config, we use the entry from <file>
if '--target' in fc:
args['--target'] = args['<file>']['config']['scan']['dir']
logger.debug(
"use --target=`{}` from <file>.config.scan.dir"
.format(args['--target']))
from . import sync
sync.sync(
args['<file>'],
target=args['--target'],
update=args['--update'],
delete=args['--delete']
)
@register_cmd('clean')
def cmd_clean(args):
"""
Sync the <file> as if all addons listed in <file> were
disabled. This will move all addons to `.wowplugcache`
Usage:
wowplug clean [<file>] [--delete]
Options:
-h --help Show this message.
-d --delete Delete the unused addons instead of
placing them in `.wowplugcache`
"""
pass
# process main doc
version = pkg_resources.get_distribution("wowplug").version
args = docopt(doc, version=version, options_first=True, help=False)
logging.config.dictConfig({
'version': 1,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'short': {
'format': '%(levelname)s: %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'()': LoggingHandler,
'formatter': 'short', # standard
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': "DEBUG" if args['--debug'] else "INFO",
'propagate': False
},
}
})
if args['-h']:
print(doc.strip("\n"))
sys.exit(0)
if args['--help']:
# concat all docs in cmds
fdoc = "{}\nAvailable commands:\n\n".format(doc)
for cmd, cmd_func in cmds.items():
fdoc += "{}:\n\n{}\n".format(
cmd, indent(cmd_func.__doc__, " ").strip('\n'))
print(fdoc.strip("\n"))
sys.exit(0)
# execute command
from .config import config
config.load_saved_config = not args['--ignore-config']
cmd = args['<command>']
argv = [cmd, ] + args['<args>']
if cmd in cmds:
cmd_func = cmds[cmd]
try:
cmd_func(docopt(cmd_func.__doc__, argv=argv))
except SchemaError as e:
sys.exit("{}\n\n{}".format(
cmderr_fmt.format(e.code),
cmds[cmd].__doc__.strip("\n"),
))
except Exception as e:
f_locals = inspect.trace()[-1][0].f_locals
if 'logger' in f_locals:
logger = f_locals['logger']
elif 'self' in f_locals:
logger = getattr(f_locals['self'], "logger", None)
else:
logger = None
if logger is not None:
sys.exit(logger.exception(e))
else:
raise
else:
sys.exit(cmderr_fmt.format(
"`{}` is not a valid wowplug command. See 'wowplug -h'."
.format(cmd)))
# save config file on exit
config.save()
# some internal stuff
def _sync_args_with_config(args, config, sync_policy):
logger = logging.getLogger(LOGGER_NAME)
fc = []
tc = []
for ak, sp in sync_policy.items():
ck, nm, ff, tf = sp['key'], sp['norm'], sp['from'], sp['to']
av, cv = args[ak], config.get(ck)
if ff(av, cv):
logger.debug("sync {}=`{}` in config {} to `{}`".format(
ck, cv, config.filepath, ak))
args[ak] = config.get(ck)
fc.append(ak)
if tf(av, cv):
logger.debug("sync {}=`{}` to `{}` in config {}".format(
ak, av, ck, config.filepath))
nv = nm(av)
if nv != av:
logger.debug("use {}=`{}` instead of `{}`".format(
ak, nv, av))
args[ak] = nv
config.set(ck, nv)
tc.append(ak)
return args, fc, tc
```
|
{
"source": "JerryMazeyu/SuperChaser",
"score": 3
}
|
#### File: resources/maze/maze_get.py
```python
from resources.maze.maze_generate import Map
from resources.maze.maze_decorate import DecorateMaze
from resources.maze.maze_config import opt
from resources.maze.maze_check import MazeCheck
from random import choice
def getMazeInfo():
boomNum = choice(range(5, 25))
bonusNum = choice(range(2, 5))
lst = []
for i in range(opt.num):
lst.append(Map.run(opt.dim, opt.dim))
dm = DecorateMaze(lst, boomNum, bonusNum)
resultDict = {'maze': dm.getMaze(), 'boomNum': boomNum, 'bonusNum': bonusNum, 'bonusValue': choice(range(5, 50))}
return resultDict
def get_maze_with_loop(blind_num=opt.blind_num, verbose=True):
while True:
maze_info = getMazeInfo()
mc = MazeCheck(maze_info, opt.blind_depth)
matching_num = mc.check()
if matching_num >= blind_num:
if verbose:
dm = DecorateMaze()
dm.showMaze(maze_info['maze'])
return maze_info
if __name__ == '__main__':
# print(getMazeInfo())
get_maze_with_loop()
```
#### File: resources/maze/maze_solver.py
```python
from resources.maze.maze_generate import Map
class MazeSolver(object):
def __init__(self, maze):
self.path = []
self.dirs = [(0, 1), (1, 0), (0, -1), (-1, 0)]
self.maze = maze
def mark(self, pos):
self.maze[pos[0]][pos[1]] = 2
def passable(self, pos):
return self.maze[pos[0]][pos[1]] == 0
def find_path(self, pos, end):
self.mark(pos)
if pos == end:
self.path.append(pos)
return True
for i in range(4):
nextp = pos[0] + self.dirs[i][0], pos[1] + self.dirs[i][1]
# 考虑下一个可能方向
if self.passable(nextp):
if self.find_path(nextp, end):
self.path.append(pos)
return self.path
return False
def see_path(self):
for i, p in enumerate(self.path):
if i == 0:
self.maze[p[0]][p[1]] = "E"
elif i == len(self.path) - 1:
self.maze[p[0]][p[1]] = "S"
else:
self.maze[p[0]][p[1]] = 3
print("\n")
for r in self.maze:
for c in r:
if c == 3:
print('\033[0;31m' + "*" + " " + '\033[0m', end="")
elif c == "S" or c == "E":
print('\033[0;34m' + c + " " + '\033[0m', end="")
elif c == 2:
print('\033[0;32m' + "#" + " " + '\033[0m', end="")
elif c == 1:
print('\033[0;;40m' + " " * 2 + '\033[0m', end="")
else:
print(" " * 2, end="")
print()
def run(self, start, end):
self.find_path(start, end)
self.see_path()
if __name__ == '__main__':
dim = 10
maze1 = Map.run(dim, dim)
maze2 = Map.run(dim, dim)
start = (1, 1)
end = (dim*2-1, dim*2-1)
s1 = MazeSolver(maze1)
s1.run(start, end)
s2 = MazeSolver(maze2)
s2.run(start, end)
```
|
{
"source": "jerrymlin/emmet",
"score": 2
}
|
#### File: emmet/borg/icsd_to_mongo.py
```python
import json
import logging
import os
import unicodedata
import warnings
import re
from multiprocessing import Manager, Pool
from atomate.utils.utils import get_meta_from_structure
from monty.io import zopen
from monty.json import MontyDecoder, MontyEncoder
from pymatgen.apps.borg.hive import AbstractDrone
from pymatgen.apps.borg.queen import BorgQueen
from pymatgen.io.cif import CifParser
from pymatgen import Composition, Element
logger = logging.getLogger(__name__)
logging.basicConfig(filename='icsd_to_mongo.log', level=logging.DEBUG)
logging.captureWarnings(capture=True)
# clear previous Error_Record and log
with open('Error_Record', 'w') as err_rec:
err_rec.close()
with open('icsd_to_mongo.log', 'w') as log:
log.close()
class IcsdDrone(AbstractDrone):
def __init__(self):
# filler
self.field = 1
def _assimilate_from_cif(self, cif_path):
# capture any warnings generated by parsing cif file
file_ID = cif_path.split('/')[-1].split(".")[0]
cif_meta = {}
with warnings.catch_warnings(record=True) as w:
cif_parser = CifParser(cif_path)
for warn in w:
if 'cifwarnings' in cif_meta:
cif_meta['cifwarnings'].append(str(warn.message))
else:
cif_meta['cifwarnings'] = [str(warn.message)]
logger.warning('{}: {}'.format(file_ID, warn.message))
cif_dict = cif_parser.as_dict()
orig_id = list(cif_dict.keys())[0]
easy_dict = cif_dict[orig_id]
if '_chemical_name_mineral' in easy_dict:
cif_meta['min_name'] = easy_dict['_chemical_name_mineral']
if '_chemical_name_systematic' in easy_dict:
cif_meta['chem_name'] = easy_dict['_chemical_name_systematic']
if '_cell_measurement_pressure' in easy_dict:
cif_meta['pressure'] = float(
easy_dict['_cell_measurement_pressure'])/1000
else:
cif_meta['pressure'] = .101325
with warnings.catch_warnings(record=True) as w:
try:
struc = cif_parser.get_structures()[0]
except ValueError as err:
# if cif parsing raises error, write icsd_id to Error_Record and do NOT add structure to mongo database
logger.error(file_ID + ': {}'.format(err) +
"\nDid not insert structure into Mongo Collection")
with open('Error_Record', 'a') as err_rec:
err_rec.write(str(file_ID)+': {}\n'.format(err))
err_rec.close()
else:
references = self.bibtex_from_cif(cif_path)
history = [{'name': 'ICSD', 'url': 'https://icsd.fiz-karlsruhe.de/',
'description': {'id': file_ID}}]
cif_meta['references'] = references
cif_meta['history'] = history
atomate_meta = get_meta_from_structure(struc)
# data['nsites'] = meta['nsites']
# data['elements'] = meta['elements']
# data['nelements'] = meta['nelements']
# data['formula'] = meta['formula']
# data['formula_reduced'] = meta['formula_pretty']
# data['formula_reduced_abc'] = meta['formula_reduced_abc']
# data['formula_anonymous'] = meta['formula_anonymous']
# data['chemsys'] = meta['chemsys']
# data['is_valid'] = meta['is_valid']
# data['is_ordered'] = meta['is_ordered']
# unfortunately any warnings are logged after any errors. Not too big of an issue
for warn in w:
if 'cifwarnings' in cif_meta:
cif_meta['cifwarnings'].append(str(warn.message))
else:
cif_meta['cifwarnings'] = [str(warn.message)]
logger.warning('{}: {}'.format(file_ID, warn.message))
return(struc, cif_meta, atomate_meta)
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen's as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
files = os.listdir(path)
file_ID = path.split('/')[-1]
print(file_ID)
cif_path = os.path.join(path, file_ID + '.cif')
struc, cifmetadata, atomate_meta = self._assimilate_from_cif(cif_path)
json_path = os.path.join(path, file_ID + '.json')
metadata = {}
if os.path.exists(json_path):
metadata = self._assimilate_from_crawling(json_path)
icsd_c = Composition(metadata['chemical_formula']).remove_charges().reduced_composition
cif_c = struc.composition.remove_charges().reduced_composition
metadata['consistent_composition'] = cif_c.almost_equals(icsd_c)
metadata['implicit_hydrogen'] = self._has_implicit_H(icsd_c, cif_c)
deuterium_indices = [ind for ind, s in enumerate(
struc.sites) if re.findall(r'[A-z]+', s.species_string)[0] == "D"]
tritium_indices = [ind for ind, s in enumerate(
struc.sites) if re.findall(r'[A-z]+', s.species_string)[0] == "T"]
for i_H in deuterium_indices + tritium_indices:
struc.replace(i_H, "H")
metadata['deuterium_indices'] = deuterium_indices
metadata['tritium_indices'] = tritium_indices
data = {
'structure': struc,
'metadata': metadata,
"cifmetadata": cifmetadata
}
for key, val in atomate_meta.items():
data[key] = val
return(data)
def _has_implicit_H(self, icsd_comp, cif_comp):
icsd = icsd_comp.as_dict()
cif = cif_comp.as_dict()
if 'H' in icsd:
if 'H' in cif:
# Tolerance of 0.1 is derived from
# almost_equals in pymatgen's Composition
if abs(icsd['H'] - cif['H']) > 0.1:
return(True)
else:
return(True)
return(False)
def _assimilate_from_crawling(self, json_path):
with open(json_path) as f:
metadata = json.load(f)
refs = []
for key in ['reference', 'reference_1', 'reference_2', 'reference_3']:
refs.append(metadata.pop(key))
refs = list(set(refs))
refs.remove("")
metadata['references'] = refs
return(metadata)
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
(parent, subdirs, files) = path
if len(subdirs) != 0:
return [os.path.join(parent, dir_name) for dir_name in subdirs]
else:
return []
return []
def bibtex_from_cif(self, cif_string):
# if input is a cif filename read from file, else assume input is cif string
if cif_string.endswith(".cif"):
cif_dict = CifParser(cif_string).as_dict()
else:
cif_dict = CifParser.from_string(cif_string).as_dict()
orig_id = list(cif_dict.keys())[0]
# more accesable dict
easy_dict = cif_dict[orig_id]
# generate bibTex string
bibtex_str = "@article{"
# use first author's last name as key + year. not sure about this
bibtex_key = easy_dict['_publ_author_name'][0].replace(' ', '')
bibtex_key = bibtex_key[0:bibtex_key.find(',')]
bibtex_key += easy_dict['_citation_year'][0]
bibtex_str += bibtex_key + ",\n"
# add title
bibtex_str += "title = {" + easy_dict['_publ_section_title'] + "},\n"
# add authors
bibtex_str += "author = {" + \
" and ".join(easy_dict['_publ_author_name']) + "},\n"
# add journal title
bibtex_str += "journal = {" + \
easy_dict['_citation_journal_full'][0] + "},\n"
# add year
bibtex_str += "year = {" + easy_dict['_citation_year'][0] + "},\n"
# add volume number
bibtex_str += "volume = {" + \
easy_dict['_citation_journal_volume'][0] + "},\n"
# add pages
bibtex_str += "pages = {" + easy_dict['_citation_page_first'][0] + \
"-" + easy_dict['_citation_page_last'][0] + "},\n"
# add ASTM id
bibtex_str += "ASTM_id = {" + \
easy_dict['_citation_journal_id_ASTM'][0] + "},\n"
# end string and normalize to ascii
bibtex_str += "}"
#bibtex_str = unicodedata.normalize('NFKD', bibtex_str).encode('ascii','ignore')
# print(bibtex_str)
return bibtex_str
class IcsdQueen(BorgQueen):
def __init__(self, drone, rootpath=None, number_of_drones=1):
self._drone = drone
self._num_drones = number_of_drones
self._data = []
if rootpath:
if number_of_drones > 1:
self.parallel_assimilate(rootpath)
else:
self.serial_assimilate(rootpath)
def parallel_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath.
"""
logger.info('Scanning for valid paths...')
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths(self._drone, (parent, subdirs,
files)))
manager = Manager()
data = manager.list()
status = manager.dict()
status['count'] = 0
status['total'] = len(valid_paths)
logger.info('{} valid paths found.'.format(len(valid_paths)))
p = Pool(self._num_drones)
p.map(self.order_assimilation, ((path, self._drone, data, status)
for path in valid_paths))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder))
def serial_assimilate(self, rootpath):
"""
Assimilate the entire subdirectory structure in rootpath serially.
"""
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths(self._drone, (parent, subdirs,
files)))
data = []
count = 0
total = len(valid_paths)
for path in valid_paths:
newdata = self._drone.assimilate(self._drone, path)
self._data.append(newdata)
count += 1
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder))
def order_assimilation(self, args):
"""
Internal helper method for BorgQueen to process assimilation
"""
(path, drone, data, status) = args
newdata = drone.assimilate(drone, path)
if newdata:
data.append(json.dumps(newdata, cls=MontyEncoder))
status['count'] += 1
count = status['count']
total = status['total']
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100))
if __name__ == '__main__':
path_to_dirs = os.getcwd()
IcsdQueen(IcsdDrone, rootpath=path_to_dirs, number_of_drones=1)
```
|
{
"source": "JerryMu/foolbox",
"score": 3
}
|
#### File: foolbox/examples/substituion_model.py
```python
import torchvision.models as models
import eagerpy as ep
from foolbox import PyTorchModel, accuracy, samples
from foolbox.attacks import LinfPGD
from foolbox.attacks.base import get_criterion
if __name__ == "__main__":
# instantiate a model
model = models.resnet18(pretrained=True).eval()
preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
# get data and test the model
# wrapping the tensors with ep.astensors is optional, but it allows
# us to work with EagerPy tensors in the following
images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
print(accuracy(fmodel, images, labels))
# replace the gradient with the gradient from another model
model2 = fmodel # demo, we just use the same model
# TODO: this is still a bit annoying because we need
# to overwrite run to get the labels
class Attack(LinfPGD):
def value_and_grad(self, loss_fn, x):
val1 = loss_fn(x)
loss_fn2 = self.get_loss_fn(model2, self.labels)
_, grad2 = ep.value_and_grad(loss_fn2, x)
return val1, grad2
def run(self, model, inputs, criterion, *, epsilon, **kwargs):
criterion_ = get_criterion(criterion)
self.labels = criterion_.labels
return super().run(model, inputs, criterion_, epsilon=epsilon, **kwargs)
# apply the attack
attack = Attack()
epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
advs, _, success = attack(fmodel, images, labels, epsilons=epsilons)
# calculate and report the robust accuracy
robust_accuracy = 1 - success.float32().mean(axis=-1)
for eps, acc in zip(epsilons, robust_accuracy):
print(eps, acc.item())
# we can also manually check this
for eps, advs_ in zip(epsilons, advs):
print(eps, accuracy(fmodel, advs_, labels))
# but then we also need to look at the perturbation sizes
# and check if they are smaller than eps
print((advs_ - images).norms.linf(axis=(1, 2, 3)).numpy())
```
#### File: foolbox/attacks/saltandpepper.py
```python
from typing import Optional, Any
import eagerpy as ep
from ..criteria import Misclassification
from ..distances import l2
from ..devutils import flatten
from ..devutils import atleast_kd
from .base import MinimizationAttack
from .base import get_is_adversarial
from .base import get_channel_axis
from ..models.base import Model
from .base import get_criterion
from .base import T
from .base import raise_if_kwargs
class SaltAndPepperNoiseAttack(MinimizationAttack):
"""Increases the amount of salt and pepper noise until the input is misclassified.
Args:
steps : The number of steps to run.
across_channels : Whether the noise should be the same across all channels.
channel_axis : The axis across which the noise should be the same
(if across_channels is True). If None, will be automatically inferred
from the model if possible.
"""
distance = l2
def __init__(
self,
steps: int = 1000,
across_channels: bool = True,
channel_axis: Optional[int] = None,
):
self.steps = steps
self.across_channels = across_channels
self.channel_axis = channel_axis
def run(
self,
model: Model,
inputs: T,
criterion: Misclassification,
*,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x0, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
is_adversarial = get_is_adversarial(criterion_, model)
N = len(x0)
shape = list(x0.shape)
if self.across_channels and x0.ndim > 2:
if self.channel_axis is None:
channel_axis = get_channel_axis(model, x0.ndim)
else:
channel_axis = self.channel_axis % x0.ndim
if channel_axis is not None:
shape[channel_axis] = 1
min_, max_ = model.bounds
r = max_ - min_
result = x0
is_adv = is_adversarial(result)
best_advs_norms = ep.where(is_adv, ep.zeros(x0, N), ep.full(x0, N, ep.inf))
min_probability = ep.zeros(x0, N)
max_probability = ep.ones(x0, N)
stepsizes = max_probability / self.steps
p = stepsizes
for step in range(self.steps):
# add salt and pepper
u = ep.uniform(x0, tuple(shape))
p_ = atleast_kd(p, x0.ndim)
salt = (u >= 1 - p_ / 2).astype(x0.dtype) * r
pepper = -(u < p_ / 2).astype(x0.dtype) * r
x = x0 + salt + pepper
x = ep.clip(x, min_, max_)
# check if we found new best adversarials
norms = flatten(x).norms.l2(axis=-1)
closer = norms < best_advs_norms
is_adv = is_adversarial(x) # TODO: ignore those that are not closer anyway
is_best_adv = ep.logical_and(is_adv, closer)
# update results and search space
result = ep.where(atleast_kd(is_best_adv, x.ndim), x, result)
best_advs_norms = ep.where(is_best_adv, norms, best_advs_norms)
min_probability = ep.where(is_best_adv, 0.5 * p, min_probability)
# we set max_probability a bit higher than p because the relationship
# between p and norms is not strictly monotonic
max_probability = ep.where(
is_best_adv, ep.minimum(p * 1.2, 1.0), max_probability
)
remaining = self.steps - step
stepsizes = ep.where(
is_best_adv, (max_probability - min_probability) / remaining, stepsizes
)
reset = p == max_probability
p = ep.where(ep.logical_or(is_best_adv, reset), min_probability, p)
p = ep.minimum(p + stepsizes, max_probability)
return restore_type(result)
```
#### File: foolbox/tests/test_dataset_attack.py
```python
from typing import Tuple
import pytest
import eagerpy as ep
import foolbox as fbn
def test_dataset_attack(
fmodel_and_data_ext_for_attacks: Tuple[
Tuple[fbn.Model, ep.Tensor, ep.Tensor], bool
],
) -> None:
(fmodel, x, y), _ = fmodel_and_data_ext_for_attacks
x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
fmodel = fmodel.transform_bounds((0, 1))
attack = fbn.attacks.DatasetAttack()
attack.feed(fmodel, x)
assert fbn.accuracy(fmodel, x, y) > 0
advs, _, success = attack(fmodel, x, y, epsilons=None)
assert success.shape == (len(x),)
assert success.all()
assert fbn.accuracy(fmodel, advs, y) == 0
with pytest.raises(ValueError, match="unknown distance"):
attack(fmodel, x, y, epsilons=[500.0, 1000.0])
attack = fbn.attacks.DatasetAttack(distance=fbn.distances.l2)
attack.feed(fmodel, x)
advss, _, success = attack(fmodel, x, y, epsilons=[500.0, 1000.0])
assert success.shape == (2, len(x))
assert success.all()
assert fbn.accuracy(fmodel, advss[0], y) == 0
assert fbn.accuracy(fmodel, advss[1], y) == 0
with pytest.raises(TypeError, match="unexpected keyword argument"):
attack(fmodel, x, y, epsilons=None, invalid=True)
```
|
{
"source": "JerryMXB/LTR_Cascade",
"score": 3
}
|
#### File: Gov2/data/select_topics.py
```python
from __future__ import print_function
import argparse
import fileinput
from tqdm import tqdm
def get_topics(spec, from_file=False):
result = []
if from_file:
result.extend(map(str, [int(l.strip()) for l in open(spec)]))
else:
for term in spec.strip().split(','):
if '-' in term:
a, b = map(int, term.split('-'))
result.extend(map(str, range(a, b + 1)))
else:
result.append(term)
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--from-file', action='store_true', help='load topics from file')
parser.add_argument('topic_spec', help='topic ranges or file input (with --from-file)')
parser.add_argument('input_files', metavar='input', nargs='+', help='input SVMLight files')
args = parser.parse_args()
selected_topics = get_topics(args.topic_spec, from_file=args.from_file)
buf = {t: [] for t in selected_topics}
for line in tqdm(fileinput.input(args.input_files), desc='read the input topics'):
if line.startswith('#'):
continue
_, qid, _ = line.split(None, 2)
assert qid.startswith('qid:')
if qid[4:] in buf:
buf[qid[4:]].append(line)
for topic in tqdm(selected_topics, desc='write to the output'):
for line in buf[topic]:
print(line, end='')
```
#### File: python/core/cascade.py
```python
from __future__ import print_function
import logging
import math
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.externals import joblib
from . import io
import warnings
warnings.filterwarnings("ignore")
def load_data_file(filename):
if filename.endswith('.npz'):
return io.load_npz(filename)
else:
return load_svmlight_file(filename, query_id=True)
def load_data(train_file, validation_file, test_file, scaler=None):
"""Prepare training/validation/test data."""
x_train, y_train, qid_train, docno_train = load_data_file(train_file)
logging.info('Load %s: x_train %s, y_train %s, qid_train %s' %
(train_file, x_train.shape, y_train.shape, qid_train.shape))
x_test, y_test, qid_test, docno_test = load_data_file(test_file)
logging.info('Load %s: x_test %s, y_test %s, qid_test %s' %
(test_file, x_test.shape, y_test.shape, qid_test.shape))
assert x_test.shape[1] == x_train.shape[1]
x_valid, y_valid, qid_valid, docno_valid = None, None, None, None
if validation_file:
x_valid, y_valid, qid_valid, docno_valid = load_data_file(validation_file)
logging.info('Load %s: x_valid %s, y_valid %s, qid_valid %s' %
(validation_file, x_valid.shape, y_valid.shape, qid_valid.shape))
assert x_valid.shape[1] == x_train.shape[1]
if scaler:
scaler.fit_transform(x_train)
scaler.transform(x_test)
if x_valid is not None:
scaler.transform(x_valid)
y_train.flags.writeable = False
qid_train.flags.writeable = False
y_test.flags.writeable = False
qid_test.flags.writeable = False
if x_valid is not None:
y_valid.flags.writeable = False
qid_valid.flags.writeable = False
return ((x_train, y_train, qid_train, docno_train),
(x_valid, y_valid, qid_valid, docno_valid),
(x_test, y_test, qid_test, docno_test))
def load_costs_data(costs_file, importance_file, n_features):
"""Load costs/importance data."""
costs = np.loadtxt(costs_file) if costs_file else np.ones(n_features)
logging.info('Load %s: costs %s' % (costs_file, costs.shape))
importance = np.loadtxt(importance_file) if importance_file else np.ones(n_features)
logging.info('Load %s: importance %s' % (importance_file, importance.shape))
if costs.shape[0] > n_features:
costs = np.resize(costs, n_features)
logging.info('costs resized to match n_features %i' % n_features)
if importance.shape[0] > n_features:
importance = np.resize(importance, n_features)
logging.info('importance resized to match n_features %i' % n_features)
costs.flags.writeable = False
importance.flags.writeable = False
return costs, importance
def load_model(filename):
"""Load model from file."""
return joblib.load(filename)
print('Model loaded from %s' % filename)
def save_model(model, filename):
"""Save the model to file."""
joblib.dump(model, filename)
print('Model saved to %s' % filename)
def predict(cascade, x, qid, score_update):
"""Run prediciton using the cascade"""
state = init_predict(x)
results = []
for stage in cascade:
new_state = partial_predict(stage, state, x, qid, score_update)
results.append(new_state)
state = new_state
return results
def init_predict(x):
return {'preds': np.zeros(x.shape[0], dtype=float),
'indexes': np.arange(x.shape[0], dtype=int),
'extract_counts': np.zeros(x.shape[1], dtype=int)}
def partial_predict(stage, state, x, qid, score_update):
"""Run partial prediction by executing one cascade stage"""
prune, model = stage
if prune is None:
indexes = state['indexes'].copy()
else:
pruned = []
for a, b in group_offsets(qid[state['indexes']]):
idx = state['indexes'][a:b]
ranked_idx = idx[np.argsort(state['preds'][idx])[::-1]]
pruned.extend(prune(ranked_idx))
indexes = np.array(sorted(pruned))
# extracted features will not receive more counts
new_counts = (state['extract_counts'] == 0).astype(int) * model.get_feature_mask() * indexes.size
extract_counts = state['extract_counts'] + new_counts
scores = model.predict(x[indexes])
preds = score_update(state['preds'], indexes, scores)
return {'preds': preds, 'indexes': indexes, 'extract_counts': extract_counts}
def print_trec_run(output, preds, y, qid, docno=None, run_id='exp'):
for a, b in group_offsets(qid):
sim = preds[a:b].copy()
if docno is None:
docno_string = ['%s.%d' % (qid[a], i) for i in range(1, b - a + 1)]
else:
docno_string = docno[a:b]
ranked_list = sorted(zip(docno_string, sim), key=lambda x: x[1], reverse=True)
for rank, (d, s) in enumerate(ranked_list, 1):
output.write('%s Q0 %s %i %f %s\n' % (qid[a], d, rank, s, run_id))
def group_counts(arr):
d = np.ones(arr.size, dtype=int)
d[1:] = (arr[:-1] != arr[1:]).astype(int)
return np.diff(np.where(np.append(d, 1))[0])
def group_offsets(arr):
"""Return a sequence of start/end offsets for the value subgroups in the input"""
d = np.ones(arr.size, dtype=int)
d[1:] = (arr[:-1] != arr[1:]).astype(int)
idx = np.where(np.append(d, 1))[0]
return zip(idx, idx[1:])
# Score update classes
#
class AdditiveUpdate(object):
def __call__(self, preds, indexes, update):
new_preds = preds.copy()
new_preds[indexes] = new_preds[indexes] + update # to work around the qfunc 'add' bug
return new_preds
class UpshiftUpdate(object):
def __init__(self, gap):
self.gap = gap
def __call__(self, preds, indexes, update):
diff = max(0, preds.max() + self.gap - update.min()) # mind the gap
new_preds = preds.copy()
new_preds[indexes] = update + diff
return new_preds
class ResetUpdate(UpshiftUpdate):
def __call__(self, preds, indexes, update):
return super(ResetUpdate, self).__call__(np.zeros_like(preds), indexes, update)
# Prune
#
class Prune(object):
def __init__(self, rank=None, beta=None):
self.rank = rank
self.beta = beta
def __call__(self, arr):
if self.rank:
cutoff = self.rank
elif self.beta:
cutoff = int(math.ceil(len(arr) * self.beta))
else:
cutoff = None
return arr[:cutoff]
# Model classes
#
class SGDClassifierModel(object):
def __init__(self, model):
self.model = model
def get_feature_mask(self):
return (self.model.coef_[0] != 0).astype(int)
def predict(self, x):
return self.model.decision_function(x)
class LinearModel(object):
def __init__(self, coef):
self.coef = coef.copy()
def get_feature_mask(self):
return (self.coef != 0).astype(int)
def predict(self, x):
return np.dot(x, self.coef)
class TreeModel(object):
def __init__(self, model, score_function, class_weights, n_features):
self.model = model
self.score_function = score_function
self.class_weights = class_weights
self.n_features = n_features
def get_feature_mask(self):
mask = np.zeros(self.n_features, dtype=int)
for k in self.model.get_score():
mask[int(k[1:])] = 1
return mask
def predict(self, x):
import xgboost as xgb
dm = xgb.DMatrix(x.toarray())
return self.score_function(self.model.predict(dm), self.class_weights)
class SVMModel(object):
def __init__(self, model, score_function, class_weights, n_features):
self.model = model
self.score_function = score_function
self.class_weights = class_weights
self.n_features = n_features
def get_feature_mask(self):
return (self.model.coef_[0] != 0).astype(int)
def predict(self, x):
return self.score_function(self.model.predict(x), self.class_weights)
```
#### File: LTR_Cascade/python/GBDT.py
```python
from __future__ import print_function
import ast
import baker
import logging
import math
import numpy as np
import scipy.sparse
from sklearn.externals import joblib
from tqdm import trange
import core
from core.cascade import load_data_file, load_data, load_model, save_model
from core.utils import group_counts
from core.metrics import test_all, test_ndcg
# This is for suppressing the warning messages from xgboost
import warnings
warnings.filterwarnings("ignore")
import xgboost as xgb # noqa
def load_DMatrix(x, y, cache_file=None):
import os.path
if cache_file is not None:
if os.path.exists(cache_file):
logging.info("Load cache '{}'".format(cache_file))
return xgb.DMatrix(cache_file)
else:
logging.info("Write to cache '{}'".format(cache_file))
dm = xgb.DMatrix(x, y)
dm.save_binary(cache_file)
return dm
else:
logging.info("No cache")
return xgb.DMatrix(x, y)
def train(train_data, valid_data, score_function, class_weights,
params, trees, nodes, features=None, set_classes=False,
train_cache=None, valid_cache=None):
x_train, y_train, qid_train, _ = train_data
x_valid, y_valid, qid_valid, _ = valid_data
if features is None:
# dtrain = xgb.DMatrix(x_train, y_train)
dtrain = load_DMatrix(x_train, y_train, train_cache)
dvalid = None
if x_valid is not None:
dvalid = xgb.DMatrix(x_valid, y_valid)
dvalid = load_DMatrix(x_valid, y_valid, valid_cache)
else:
non_open_features = np.setdiff1d(np.arange(x_train.shape[1]), features)
# hide non-open features
x_train_prime = x_train.copy()
x_train_prime[:, non_open_features] = 0
# dtrain = xgb.DMatrix(x_train_prime, y_train)
dtrain = load_DMatrix(x_train_prime, y_train, train_cache)
dvalid = None
if x_valid is not None:
x_valid_prime = x_valid.copy()
x_valid_prime[:, non_open_features] = 0
# dvalid = xgb.DMatrix(x_valid_prime, y_valid)
dvalid = load_DMatrix(x_valid_prime, y_valid, valid_cache)
if dvalid:
watchlist = [(dvalid, 'eval'), (dtrain, 'train')] # remove if the output is too verbose
else:
watchlist = [(dtrain, 'train')]
if set_classes:
params['num_class'] = np.unique(y_train).shape[0]
best_sc = -1000
best_params = None
bst = None
def tqdm_integration(tbar):
def callback(env):
tbar.update()
tbar.set_description(' '.join(['{}:{}'.format(k, v) for k, v in env.evaluation_result_list]))
if env.iteration == env.end_iteration:
tbar.close()
return callback
for t in trees:
for n in nodes:
params['max_depth'] = int(math.ceil(math.log(n, 2))) # NOTE: max_depth is automatically set
logging.info('Training with %i trees and %i depth' % (t, n))
logging.info('Params %s' % params)
# model = xgb.train(params, dtrain, t, watchlist)
with trange(t) as tbar:
model = xgb.train(params, dtrain, t, watchlist, verbose_eval=False, callbacks=[tqdm_integration(tbar)])
if dvalid:
predictions = score_function(model.predict(dvalid), class_weights)
sc = test_ndcg(predictions, y_valid, qid_valid) # this groups the validation queries each time, redo
else:
predictions = score_function(model.predict(dtrain), class_weights)
sc = test_ndcg(predictions, y_train, qid_train)
if sc > best_sc:
bst = model
best_sc = sc
best_params = params.copy()
best_params['n_trees'] = t
best_params['n_nodes'] = n # NOTE: for reference
if hasattr(bst, 'set_attr'):
bst.set_attr(**{k: str(v) for k, v in best_params.items()})
return bst
def add_original_order_as_feature(data):
x, _, qid, _ = data
feature = np.concatenate([np.linspace(0, 1, c + 1)[-1:0:-1] for c in group_counts(qid)])
sparse_feature = scipy.sparse.csr_matrix(feature.reshape((feature.size, 1)))
return scipy.sparse.hstack((x, sparse_feature))
def predict(model, test_data, score_function, class_weights, output_trec_run=None):
x_test, y_test, qid_test, docno_test = test_data
dtest = xgb.DMatrix(x_test)
preds = score_function(model.predict(dtest), class_weights)
test_metrics = test_all(preds, y_test, qid_test, 1)
print(test_metrics)
if output_trec_run:
with open(output_trec_run, 'w') as output:
core.cascade.print_trec_run(output, preds, y_test, qid_test, docno_test)
print('Result saved to %s' % output_trec_run)
def train_tree_ranker(train_file, validation_file, test_file, model_prefix,
score_function, params, trees, nodes, set_classes=False,
add_original_order=False):
train_data, valid_data, test_data = load_data(train_file, validation_file, test_file)
if add_original_order:
# FIXME: quick hack
logging.info('The original-order hack is applied to all data')
train_data = (add_original_order_as_feature(train_data), train_data[1], train_data[2])
if valid_data[0] is not None:
valid_data = (add_original_order_as_feature(valid_data), valid_data[1], valid_data[2])
if test_data[0] is not None:
test_data = (add_original_order_as_feature(test_data), test_data[1], test_data[2])
class_weights = core.get_class_weights(train_data[1])
model = train(train_data, valid_data, score_function, class_weights,
params, trees, nodes, set_classes=set_classes)
if model_prefix:
save_model(model, model_prefix)
predict(model, test_data, score_function, class_weights)
@baker.command(name='train_GBRT')
def do_train_GBRT(train_file, validation_file, test_file, model_prefix=None, learning_rate="0.1",
silent=True, subsample="0.5", trees="[5,10,20,50,1000]", nodes="[32]",
add_original_order=False):
"""Train a gradient-boosting regression tree"""
params = {'eta': ast.literal_eval(learning_rate),
'silent': silent,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'subsample': ast.literal_eval(subsample)}
train_tree_ranker(train_file, validation_file, test_file, model_prefix,
core.get_score, params, ast.literal_eval(trees),
ast.literal_eval(nodes), set_classes=False,
add_original_order=add_original_order)
@baker.command(name='train_GBDT')
def do_train_GBDT(train_file, validation_file, test_file, model_prefix=None, learning_rate="0.1",
silent=True, subsample="0.5", trees="[5,10,20,50,1000]", nodes="[32]",
add_original_order=False):
"""Train a gradient-boosting decision tree"""
params = {'eta': ast.literal_eval(learning_rate),
'silent': silent,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'subsample': ast.literal_eval(subsample)}
train_tree_ranker(train_file, validation_file, test_file, model_prefix,
core.get_score_multiclass, params, ast.literal_eval(trees),
ast.literal_eval(nodes), set_classes=True,
add_original_order=add_original_order)
@baker.command(name='train_LambdaMART')
def do_train_LambdaMART(train_file, validation_file, test_file, model_prefix=None, learning_rate="0.1",
silent=True, subsample="0.5", trees="[5,10,20,50,1000]", nodes="[32]",
add_original_order=False):
"""Train a LambdaMART model"""
params = {'eta': ast.literal_eval(learning_rate),
'silent': silent,
'objective': 'rank:pairwise',
'eval_metric': 'rmse',
'subsample': ast.literal_eval(subsample)}
train_tree_ranker(train_file, validation_file, test_file, model_prefix,
core.get_score, params, ast.literal_eval(trees),
ast.literal_eval(nodes), set_classes=False,
add_original_order=add_original_order)
@baker.command(name='predict_GBRT')
def do_predict_GBRT(test_file, model_file, output_trec_run=None, add_original_order=False):
"""Run prediction with a saved model"""
test_data = load_data_file(test_file)
if add_original_order:
test_data = (add_original_order_as_feature(test_data), test_data[1], test_data[2])
model = load_model(model_file)
predict(model, test_data, core.get_score, None, output_trec_run=output_trec_run)
@baker.command(name='predict_GBDT')
def do_predict_GBDT(test_file, model_file, output_trec_run=None, add_original_order=False):
"""Run prediction with a saved model"""
test_data = load_data_file(test_file)
if add_original_order:
test_data = (add_original_order_as_feature(test_data), test_data[1], test_data[2])
model = load_model(model_file)
class_weights = core.get_class_weights(test_data[1]) # FIXME: shouldn't peek into this
predict(model, test_data, core.get_score_multiclass, class_weights,
output_trec_run=output_trec_run)
@baker.command(name='predict_LambdaMART')
def do_predict_LambdaMART(test_file, model_file, output_trec_run=None, add_original_order=False):
"""Run prediction with a saved model"""
test_data = load_data_file(test_file)
if add_original_order:
test_data = (add_original_order_as_feature(test_data), test_data[1], test_data[2])
model = load_model(model_file)
predict(model, test_data, core.get_score, None, output_trec_run=output_trec_run)
@baker.command
def dump_importance(model_file, max_fid, importance_type='weight'):
"""Dump feature importance scores.
Args:
model_file: the model file
max_fid: max. feature id
importance_type: 'weight' or 'gain'
"""
bst = joblib.load(model_file)
score_map = bst.get_score(importance_type=importance_type)
score_map = {int(k[1:]): float(v) for k, v in score_map.items()}
for i in range(int(max_fid)):
print(int(score_map.get(i, 0)))
@baker.command
def info(model_file, costs_file=None):
bst = joblib.load(model_file)
fids = sorted([int(k[1:]) for k in bst.get_fscore()])
print('params', vars(bst))
if hasattr(bst, 'attributes'):
print('attributes', bst.attributes())
print('n_features', len(fids))
print('feature list', fids)
if costs_file:
from core.cascade import load_costs_data
costs, _ = load_costs_data(costs_file, None, max(fids) + 1)
mask = np.zeros(costs.size, dtype=int)
np.put(mask, fids, 1)
print('cost %d' % np.dot(costs, mask))
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
baker.run()
```
#### File: LTR_Cascade/python/WLM11Cascade.py
```python
from __future__ import print_function
import ast
import baker
import logging
import math
import numpy as np
from sklearn.preprocessing import MaxAbsScaler
from tqdm import tqdm
import core
from core.cascade import load_data, load_data_file, load_costs_data, load_model, save_model, group_counts, group_offsets
from core.metrics import test_all, test_ndcg
def _predict(cascade, x, qid, return_stages=False):
"""Run prediciton"""
preds, indexes = _init_predict(x)
if return_stages:
stagewise_results = []
for stage in cascade:
result = _partial_predict(stage, preds, indexes, x, qid)
stagewise_results.append(result)
preds, indexes = result
return stagewise_results
else:
for stage in cascade:
preds, indexes = _partial_predict(stage, preds, indexes, x, qid)
return preds, indexes
def _init_predict(x):
"""Initialze the predictions and indexes"""
preds = np.full(x.shape[0], -1, dtype=float)
indexes = np.arange(x.shape[0], dtype=int)
return preds, indexes
def _partial_predict(stage, preds, indexes, x, qid):
"""Run partial prediction by executing one cascade stage"""
prune, model = stage
if prune:
new_indexes = []
for a, b in group_offsets(qid[indexes]):
idx = indexes[a:b]
ranked_idx = idx[np.argsort(preds[idx])[::-1]]
cutoff = int(math.ceil(prune['beta'] * (b - a))) # prevent generating empty ranked lists
if cutoff == 0:
print(ranked_idx, prune['beta'], b - a)
new_indexes.extend(ranked_idx[:cutoff])
new_indexes = np.array(sorted(new_indexes))
else:
new_indexes = indexes.copy()
new_preds = preds.copy()
new_scores = np.dot(x[new_indexes], model)
new_preds[new_indexes] = new_preds[new_indexes] + new_scores # to work around the numpy qfunc 'add' bug
return new_preds, new_indexes
def predict(cascade, test_data, costs, output_trec_run=None, output_eval=None):
"""Run prediction using the cascade."""
x, y, qid, docno = test_data
x = x.toarray()
# NOTE: the cost-aware evaluation protocol is implemented differently here.
# `extracted_count` is currently stagewise and does not keep track of
# previously extracted features. So to compute the total cascade cost, we
# need to add all the stagewise costs together.
cost_spent_weighted = 0
stagewise_results = _predict(cascade, x, qid, return_stages=True)
for i, ((prune, model), (preds, indexes)) in enumerate(zip(cascade, stagewise_results)):
test_metrics = test_all(preds, y, qid, 1)
print('stage %i: '
'test ERR@5/10/20 %0.4f/%0.4f/%0.4f, '
'test NDCG@5/10/20 %0.4f/%0.4f/%0.4f, '
'test P@5/10/20 %0.4f/%0.4f/%0.4f' %
(i,
test_metrics['err@5'], test_metrics['err@10'], test_metrics['err@20'],
test_metrics['ndcg@5'], test_metrics['ndcg@10'], test_metrics['ndcg@20'],
test_metrics['p@5'], test_metrics['p@10'], test_metrics['p@20']))
n_used_features = len(np.flatnonzero(model))
n_active_docs = len(indexes)
extracted_count = (model != 0).astype(float) * len(indexes)
# NOTE: note the +=
cost_spent_weighted += np.sum(costs * extracted_count)
print(' weighted L1 %f, cascade features %i, num docs %i, cascade cost %0.2f' %
(np.nan,
n_used_features,
n_active_docs,
cost_spent_weighted / float(x.shape[0])))
if output_trec_run:
with file(output_trec_run, 'wb') as output:
core.cascade.print_trec_run(output, stagewise_results[-1][0], y, qid, docno)
logging.info('TREC run saved to %s' % output_trec_run)
def train(train_data, valid_data, costs, importance, n_stages=0,
gamma=0.1, beta_values=[1.0], use_query_features=False):
"""Learn one ranker with SGD and L1 regularization.
Args:
n_stages: number of rankers in the cascade
strategies: a dict of callback functions
"""
x_train, y_train, qid_train, _ = train_data
x_train = x_train.toarray()
# FIXME: validation data manually turned off
# for weird reasons, validation based early stopping doesn't work well
valid_data = None
if valid_data:
x_valid, y_valid, qid_valid, _ = valid_data
x_valid = x_valid.toarray()
n_queries = np.unique(qid_train).shape[0]
n_features = x_train.shape[1]
n_stages = n_stages or n_features # n_stages = n_features if set to None
weights = np.ones(n_queries, dtype=float) / n_queries
C_cascade = np.zeros(n_queries, dtype=float)
cascade = []
# NOTE: gamma is normalized by the maximum cost times the number of docs
max_cost = max(np.max(costs), 1)
C_normalizer = float(max_cost) * x_train.shape[0]
best_perf_train, best_perf_valid = -np.inf, -np.inf
best_cascade = None
# The cascade doesn't like query features...
features = []
if use_query_features:
for j, _ in enumerate(costs):
features.append(j)
else:
for j, _ in enumerate(costs):
for a, b in group_offsets(qid_train):
if (x_train[a:b, j] != x_train[a, j]).any():
features.append(j)
break
used_fids = []
preds, indexes = _init_predict(x_train)
for _ in range(n_stages):
best_weighted_perf = -np.inf
best_stage = None
for k in tqdm(features, 'scan through features'):
if k in used_fids:
continue
weak_ranker = np.zeros(n_features, dtype=float)
weak_ranker[k] = 1
# for beta in np.linspace(0, 1, 4)[1:]:
for beta in beta_values:
prune = {'beta': beta}
new_preds, new_indexes = _partial_predict((prune, weak_ranker),
preds, indexes, x_train, qid_train)
# Eq. (6) in Wang et al. (2011)
E = np.array(test_ndcg(new_preds, y_train, qid_train, average=False))
C = costs[k] * group_counts(qid_train[new_indexes]) / C_normalizer
try:
term1 = np.sum(weights * E / (1 - gamma * C)) # phi_t
term2 = np.sum(weights / (1 - gamma * C))
except Exception as e:
print(weights.shape, E.shape, C.shape)
print(np.unique(qid_train[new_indexes]).shape)
raise e
weighted_perf = term1 ** 2 - term2 ** 2
if weighted_perf > best_weighted_perf:
best_stage = {'J': prune, 'H': weak_ranker, 'E': E, 'C': C, 'fid': k}
best_weighted_perf = weighted_perf
if not best_stage:
break
S = best_stage
alpha = 0.5 * math.log(
np.sum(weights * (1 + S['E']) / (1 - gamma * S['C'])) /
np.sum(weights * (1 - S['E']) / (1 - gamma * S['C']))
)
S['alpha'] = alpha
S['H'] *= alpha
print('J:', S['J'], 'fid:', S['fid'] + 1) # the internal fid is 0 based
print('H:', S['H'].nonzero(), '(values)', S['H'][S['H'].nonzero()])
stage = (S['J'], S['H'])
cascade.append(stage)
# update feature sets and cascade cost
used_fids.append(S['fid'])
C_cascade = C_cascade + S['C']
# update predictions and indexes
# preds, indexes = _predict(cascade, x_train, qid_train)
new_preds, new_indexes = _partial_predict(stage, preds, indexes, x_train, qid_train)
print('preds', preds[:5], 'new_preds', new_preds[:5])
preds = new_preds
indexes = new_indexes
# update cascade effectiveness
E_cascade = np.array(test_ndcg(preds, y_train, qid_train, average=False))
perf_train = E_cascade.mean()
if valid_data:
perf_valid = test_ndcg(_predict(cascade, x_valid, qid_valid)[0],
y_valid, qid_valid, average=True)
else:
perf_valid = np.nan
print('train ndcg %0.4f, valid ndcg %0.4f' % (perf_train, perf_valid))
if perf_train <= best_perf_train: # NOTE: stop early when performance plateaued
break
best_perf_train = perf_train
if valid_data:
if perf_valid > best_perf_valid:
best_perf_valid = perf_valid
best_cascade = list(cascade)
else:
best_cascade = list(cascade)
new_weights = np.exp(-E_cascade + gamma * C_cascade)
weights = new_weights / new_weights.sum()
# print('weight', weights[:10])
return best_cascade
def build_wlm11_cascade(train_file, validation_file, test_file, costs_file=None,
importance_file=None, model_prefix=None, **kwargs):
"""Train a cascade over a partition of disjoint feature sets."""
train_data, valid_data, test_data = load_data(
train_file, validation_file, test_file, scaler=MaxAbsScaler(copy=False))
costs, importance = load_costs_data(
costs_file, importance_file, n_features=train_data[0].shape[1])
# NOTE: costs has to be untainted (make copy before passing it to functions)
cascade = train(train_data, valid_data, costs.copy(), importance.copy(), **kwargs)
if model_prefix:
save_model(cascade, model_prefix)
predict(cascade, test_data, costs.copy())
@baker.command(name='train')
def WLM11(train_file, validation_file, test_file, costs_file=None, importance_file=None,
model_prefix=None, n_stages=0, gamma="0.1", beta_values="[0.5, 0.33, 0.25, 0.2, 0.1]",
use_query_features=False):
"""Train a cascade accoring to the algorithm in Wang et al. (2011)"""
build_wlm11_cascade(train_file, validation_file, test_file, costs_file, importance_file,
model_prefix=model_prefix, n_stages=n_stages,
gamma=ast.literal_eval(gamma), beta_values=ast.literal_eval(beta_values),
use_query_features=use_query_features)
@baker.command(name='predict')
def do_predict(test_file, costs_file, model_file, output_trec_run=None, output_eval=None, train_file=None):
"""Run prediction with a saved cascade"""
test_data = load_data_file(test_file)
costs, _ = load_costs_data(costs_file, None, n_features=test_data[0].shape[1])
cascade = load_model(model_file)
# FIXME: scaler needs to be saved along the cascade
if train_file:
train_data = load_data_file(train_file)
scaler = MaxAbsScaler(copy=False)
scaler.fit(train_data[0])
scaler.transform(test_data[0])
logging.info('Data scaled')
if 'scaler' in cascade:
cascade['scaler'].transform(test_data[0])
predict(cascade, test_data, costs,
output_trec_run=output_trec_run, output_eval=output_eval)
@baker.command(name='info')
def do_info(model_file):
cascade = load_model(model_file)
for i, (prune, stage) in enumerate(cascade, 1):
k = np.flatnonzero(stage)
print('stage', i, 'prune', prune, 'fid', k + 1, 'weight', stage[k]) # fid is 0 based
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
baker.run()
```
|
{
"source": "Jerrynicki/jrery-bot",
"score": 3
}
|
#### File: jrery-bot/commands/neofetch.py
```python
import discord
import discord.ext.commands as commands
import subprocess
class Neofetch(commands.Cog):
def __init__(self, bot, timeouts, generic_responses):
self.bot = bot
self.timeouts = timeouts
self.generic_responses = generic_responses
@commands.command()
async def neofetch(self, ctx):
"""Post output of neofetch to the channel"""
if self.timeouts.is_timeout(ctx.message.channel.id, "neofetch"):
await ctx.send(content=self.generic_responses["timeout"])
else:
self.timeouts.add_timeout(ctx.message.channel.id, "neofetch", 5)
full_output = ""
# sed removes all ANSI codes from output
with subprocess.Popen("neofetch | sed -r \"s/\\x1b\\[[0-9;]*[a-zA-Z]//g\"", stdout=subprocess.PIPE, shell=True) as neofetch_proc:
full_output += neofetch_proc.stdout.read().decode("ascii")
# get rid of wHaCkY characters
full_output = full_output.replace("\x1b[?25l\x1b[?7l", "")
full_output = full_output.replace("\x1b[?25h\x1b[?7h", "")
await ctx.send("```\n" + full_output + "```")
```
#### File: jrery-bot/commands/reminders.py
```python
import discord
import discord.ext.commands as commands
import time as time_module
import sys
import pickle
import asyncio
sys.path.insert(0, "../")
import util
class Reminders(commands.Cog):
def __init__(self, bot, timeouts, generic_responses):
self.bot = bot
self.timeouts = timeouts
self.generic_responses = generic_responses
try:
self.reminders = pickle.load(open("user_data/reminders.pickle", "rb"))
except FileNotFoundError:
self.reminders = list()
self.reminders_changed = False
try:
self.reminders_blocklist = pickle.load(open("user_data/reminders_blocklist.pickle", "rb"))
except FileNotFoundError:
self.reminders_blocklist = dict()
self.reminders_blocklist_changed = False
bot.loop.create_task(self.reminders_autoflush())
bot.loop.create_task(self.reminders_remind())
async def reminders_autoflush(self):
while True:
await asyncio.sleep(20)
if self.reminders_changed:
pickle.dump(self.reminders, open("user_data/reminders.pickle", "wb"))
self.reminders_changed = False
if self.reminders_blocklist_changed:
pickle.dump(self.reminders_blocklist, open("user_data/reminders_blocklist.pickle", "wb"))
self.reminders_blocklist_changed = False
async def reminders_remind(self):
while True:
await asyncio.sleep(10)
current_time = time_module.time()
reminders = self.reminders.copy()
for reminder in reminders:
try:
if reminder[1] < current_time:
user = self.bot.get_user(reminder[0])
user_dm = await user.create_dm()
if len(reminder) == 3: # old reminders without a message link
await user_dm.send("привет. you wanted me to remind you of:\n```" + reminder[2] + "```(no message link available)")
else:
await user_dm.send("привет. you wanted me to remind you of:\n```" + reminder[2] + "```https://discordapp.com/channels/" + reminder[3])
self.reminders.remove(reminder)
self.reminders_changed = True
except:
pass
def add_reminder(self, user, timestamp, text, link):
self.reminders.append([user, timestamp, text, link])
self.reminders_changed = True
def get_reminders(self, user):
reminders_list = list()
for reminder in self.reminders:
if reminder[0] == user:
buffer = reminder.copy()
buffer = buffer[1:]
reminders_list.append(buffer)
reminders_list.sort()
return reminders_list
@commands.command()
async def remind(self, ctx, time, *message): # TODO: Rewrite this command as a command group, so it's less cluttered
"""The remind command can remind you or another user of something. Usage:
remind [time] [message] will set a reminder for yourself
remind [@user] [time] [message] will set a reminder for another user
remind list will send a message with a list of set reminders
remind list [@user] will send a message with a list of set reminders for the mentioned user
remind remove [id] will remove the specified reminder
remind blocklist will send a message with a list of blocked users (who can't set reminders for you)
remind blocklist add [@user] will add a user to your blocklist
remind blocklist remove [@user] will remove a user from your blocklist"""
message = " ".join(message)
if time == "list":
if len(ctx.message.mentions) > 0:
user = ctx.message.mentions[0]
else:
user = ctx.message.author
reminders_list = self.get_reminders(user.id)
if len(reminders_list) == 0:
await ctx.send("No reminders are set!")
return
current_time = int(time_module.time())
i = 0
message = "**Reminders for " + user.name + "**\n"
for reminder in reminders_list:
message += "**[" + str(i) + "]**" + " in " + util.time_calc.time_period_human_readable(reminder[0] - current_time) + " `" + reminder[1] + "`\n"
i += 1
await ctx.send(message[:-1])
elif time == "blocklist":
if len(ctx.message.mentions) > 0:
message = message.split(" ")
user = ctx.message.mentions[0]
if ctx.message.author.id not in self.reminders_blocklist:
self.reminders_blocklist[ctx.message.author.id] = list()
if message[0] == "add":
self.reminders_blocklist[ctx.message.author.id].append(user.id)
self.reminders_blocklist_changed = True
await ctx.send("Added **" + user.name + "** to your blocklist!")
elif message[0] == "remove":
try:
self.reminders_blocklist[ctx.message.author.id].remove(user.id)
self.reminders_blocklist_changed = True
await ctx.send("Removed **" + user.name + "** from your blocklist!")
except ValueError:
await ctx.send("**" + user.name + "** was not found in your blocklist!")
else:
if ctx.message.author.id not in self.reminders_blocklist or len(self.reminders_blocklist[ctx.message.author.id]) == 0:
await ctx.send("**You haven't blocked anyone!**")
return
message = "**Reminders blocklist:**\n"
for blocked in self.reminders_blocklist[ctx.message.author.id]:
user = self.bot.get_user(blocked)
if user == None:
username = "[Username not available]"
else:
username = user.name
message += "**" + username + "** (" + str(blocked) + ")\n"
await ctx.send(message)
elif time == "remove":
eyedee = int(message[0])
reminders = self.get_reminders(ctx.message.author.id)
reminder = reminders[eyedee]
self.reminders.remove([ctx.message.author.id, *reminder])
self.reminders_changed = True
await ctx.send("Removed reminder #" + str(eyedee))
else:
if len(ctx.message.mentions) > 0 and time.startswith("<@"):
user = ctx.message.mentions[0]
offset_args = True
else:
user = ctx.message.author
offset_args = False
if offset_args:
time = message.split(" ")[0]
message = " ".join(message.split(" ")[1:])
if user.id == self.bot.user.id:
await ctx.send("and i say no")
return
timestamp = int(util.time_calc.timestamp_in(time))
if user.id in self.reminders_blocklist and ctx.message.author.id in self.reminders_blocklist[user.id]:
await ctx.send("This user has blocked you from creating reminders for them!")
return
link = str(ctx.message.guild.id) + "/" + str(ctx.message.channel.id) + "/" + str(ctx.message.id)
self.add_reminder(user.id, timestamp, message, link)
if offset_args:
await ctx.send("я буду remind " + user.name)
else:
await ctx.send("я буду remind you")
```
#### File: jrery-bot/commands/streets.py
```python
import discord
import discord.ext.commands as commands
import random
import sys
import os
import requests
import asyncio
import logging
import threading
sys.path.insert(0, "../")
import util
class Streets(commands.Cog):
def __init__(self, bot, timeouts, generic_responses):
self.logger = logging.getLogger(__name__)
self.bot = bot
self.timeouts = timeouts
self.generic_responses = generic_responses
self.strassen_osm_txt = open("strassen_osm.txt", "r").readlines()
self.staedte_osm_txt = open("staedte_osm.txt", "r").readlines()
self.image_search_cache = list()
bot.loop.create_task(self.refresh_search_cache_loop())
def refresh_search_cache(self):
self.logger.info("Refreshing image cache...")
if 1 == 1:
self.logger.info("Debugging is on, not downloading image cache.")
return
self.image_search_cache = list()
search_terms = ("house", "building", "apartment house", "small house")
for term in search_terms:
self.image_search_cache += next(util.duckduckgo_api.search(term, max_results=200))
self.logger.info(str(search_terms.index(term) + 1) + "/" + str(len(search_terms)))
self.logger.info("Done. Got " + str(len(self.image_search_cache)) + " results.")
async def refresh_search_cache_loop(self):
await asyncio.sleep(5)
while True:
try:
thread = threading.Thread(target=self.refresh_search_cache)
thread.start()
thread.join(timeout=5)
except Exception as exc:
self.logger.error("Couldn't refresh search cache: " + str(exc))
if len(self.image_search_cache) == 0:
await asyncio.sleep(30)
else:
await asyncio.sleep(30 * 60 + random.randint(-100, 100)) # adding a bit of randomness so it doesn't seem like a bot ;))))))
@commands.command()
async def address(self, ctx):
if self.timeouts.is_timeout(ctx.message.channel.id, "address"):
await ctx.send(content=self.generic_responses["timeout"])
return
if len(self.image_search_cache) == 0:
await ctx.send("The image search cache is not ready yet! Try again in a few seconds.")
return
self.timeouts.add_timeout(ctx.message.channel.id, "address", 7)
city = random.choice(self.staedte_osm_txt)[1:-2]
street = random.choice(self.strassen_osm_txt)[1:-2]
number = random.randint(1, 24)
chosen_image = random.choice(self.image_search_cache)
filename = "cache/" + str(ctx.message.channel.id) + "_address.png"
response = requests.get(chosen_image["thumbnail"], stream=True)
with open(filename , "wb") as file:
for buffer in response.iter_content(chunk_size=2048):
file.write(buffer)
await ctx.send(content=street + " " + str(number) + ", " + city, file=discord.File(filename))
os.unlink(filename)
```
#### File: jrery-bot/commands/version.py
```python
import discord
import discord.ext.commands as commands
import subprocess
import os
import time
import sys
sys.path.insert(0, "../")
import util
class Version(commands.Cog):
def __init__(self, bot, timeouts, generic_responses):
self.bot = bot
self.timeouts = timeouts
self.generic_responses = generic_responses
@commands.command()
async def version(self, ctx):
embed = discord.Embed(title="Version info", color=discord.Color(0xffff00))
git_status = subprocess.run(["git", "status"], capture_output=True)
git_status = b"\n".join(git_status.stdout.split(b"\n")[0:2])
git_status = git_status.decode("utf8")
embed.add_field(name="Branch info", value=git_status)
head_mtime = int(os.stat(".git/FETCH_HEAD").st_mtime)
embed.add_field(name="Last git pull", value=util.time_calc.time_period_human_readable(time.time() - head_mtime) + " ago")
git_show = subprocess.run(["git", "show"], capture_output=True)
git_show = git_show.stdout.split(b"\n")[0]
git_show = git_show.decode("utf8")
embed.add_field(name="Current commit", value=git_show)
await ctx.send(content="", embed=embed)
```
#### File: jrery-bot/util/time_calc.py
```python
import time
import string
def timestamp_in(tm):
current = time.time()
total_secs = 0
buffer = ""
for char in tm:
if char in string.digits:
buffer += char
else:
if char == "s":
total_secs += int(buffer)
elif char == "m":
total_secs += int(buffer) * 60
elif char == "h":
total_secs += int(buffer) * 60**2
elif char == "d":
total_secs += int(buffer) * 60**2 * 24
buffer = ""
return current + total_secs
def time_period_human_readable(tm):
days = 0
hours = 0
minutes = 0
seconds = 0
tm = int(tm)
days = int(tm / (60**2 * 24))
tm -= days * 60**2 * 24
hours = int(tm / (60**2))
tm -= hours * 60**2
minutes = int(tm / 60)
tm -= minutes * 60
seconds = tm
return str(days) + "d " + str(hours) + "h " + str(minutes) + "m " + str(seconds) + "s"
```
|
{
"source": "Jerrynicki/stalkbot-rewrite",
"score": 3
}
|
#### File: stalkbot-rewrite/utils/gui.py
```python
import threading
import json
import tkinter as tk
import time
class App():
def __init__(self, bot, config, features_toggle, command_log, blacklist):
self.bot = bot
self.config = config
self.features_toggle = features_toggle
self.command_log = command_log
self.user_blacklist = blacklist
def _start(self):
self.root = tk.Tk()
self.root.title("Stalkbot Control Panel")
self.root.iconphoto(True, tk.PhotoImage(file="icon.png"))
self.title = tk.Label(self.root, text="Stalkbot Control Panel", font=("Helvetica", 24))
self.online_status = tk.Label(self.root, text="status", font=("Helvetica", 14))
self.ping = tk.Label(self.root, text="ping", font=("Helvetica", 14))
self.edit_config_button = tk.Button(self.root, text="Edit config", font=("Helvetica", 18), command=self.edit_config)
self.view_log_button = tk.Button(self.root, text="View command log", font=("Helvetica", 18), command=self.view_log)
self.delete_last_message_button = tk.Button(self.root, text="Delete last message", font=("Helvetica", 18), command=self.delete_last_message)
self.blacklist_button = tk.Button(self.root, text="User blacklist", font=("Helvetica", 18), command=self.blacklist)
self.feature_buttons = list()
i = 0
for button in self.features_toggle:
self.feature_buttons.append(tk.Button(self.root, text="Toggle " + button + " (" + str(self.features_toggle[button]) + ")", command=lambda x=button, y=i: self.toggle(x, y)))
i += 1
self.title.pack()
self.online_status.pack()
self.ping.pack()
self.edit_config_button.pack()
self.view_log_button.pack()
self.delete_last_message_button.pack()
self.blacklist_button.pack()
for button in self.feature_buttons:
button.pack()
self.root.after(1, self.update_stats)
self.root.mainloop()
def start(self):
self.thread = threading.Thread(target=self._start)
self.thread.start()
def update_stats(self):
if not self.bot.is_closed() and self.bot.is_ready():
self.online_status.config(fg="#00FF00", text="Online")
else:
self.online_status.config(fg="#FF0000", text="Offline")
self.ping.config(text="Ping: " + str(round(self.bot.latency*1000, 1)) + " ms")
self.root.after(2000, self.update_stats)
def toggle(self, feature, button_id):
self.features_toggle[feature] = not self.features_toggle[feature]
self.feature_buttons[button_id].config(text="Toggle " + feature + " (" + str(self.features_toggle[feature]) + ")")
json.dump(self.features_toggle, open("features_toggle.json", "w"))
def edit_config(self):
root = tk.Tk()
root.title("Stalkbot Config Editor")
labels = list()
values = list()
i = 1
for x in self.config:
labels.append(tk.Label(root, text=x, font=("Helvetica", 14)))
values.append(tk.Entry(root, font=("Helvetica", 14)))
values[-1].insert(0, self.config[x])
if x == "token":
values[-1].config(show="*")
labels[-1].grid(column=0, row=i)
values[-1].grid(column=1, row=i)
i += 1
tk.Button(root, text="Save", font=("Helvetica", 18), command=root.quit).grid(column=0, row=i)
root.mainloop()
i = 0
for x in self.config:
convert = type(self.config[x])
self.config[x] = convert(values[i].get())
i += 1
json.dump(self.config, open("config.json", "w"))
root.destroy()
def view_log(self):
root = tk.Tk()
root.title("Stalkbot Command Log")
listbox = tk.Listbox(root, width=90, height=10)
close = tk.Button(root, text="Close", font=("Helvetica", 18), command=root.destroy)
close.pack()
listbox.pack()
while True:
cur_time = time.time()
listbox.delete(0, tk.END)
for x in self.command_log:
ctx = x[1]
text = self.config["notifications_format"]
text = text.replace("AUTHOR", ctx.message.author.name + "#" + ctx.message.author.discriminator)
text = text.replace("COMMAND", x[2])
text = text.replace("SERVER", ctx.message.guild.name)
text = text.replace("CHANNEL", "#" + ctx.message.channel.name)
text += " | " + str(int((cur_time-x[0])//60)) + "m" + str(int((cur_time-x[0]) % 60)) + "s ago"
listbox.insert(tk.END, text)
root.update()
time.sleep(0.1)
def blacklist(self):
def update_listbox():
listbox.delete(0, tk.END)
for x in self.user_blacklist:
listbox.insert(0, x)
def add():
self.user_blacklist.append(entry.get() + "#" + entry2.get())
entry.delete(0, tk.END)
entry2.delete(0, tk.END)
update_listbox()
json.dump(self.user_blacklist, open("blacklist.json", "w"))
def remove():
del self.user_blacklist[listbox.curselection()[0]]
update_listbox()
json.dump(self.user_blacklist, open("blacklist.json", "w"))
root = tk.Tk()
root.title("Stalkbot User Blacklist")
entry = tk.Entry(root, width=25)
hashtag = tk.Label(root, text="#", font=("Helvetica", 12), anchor="center")
entry2 = tk.Entry(root, width=4)
add_button = tk.Button(root, text="Add (Name#Discriminator)", font=("Helvetica", 12), command=add)
remove_selected_button = tk.Button(root, text="Remove selected", font=("Helvetica", 12), command=remove)
listbox = tk.Listbox(root, width=25, height=10)
close = tk.Button(root, text="Close", font=("Helvetica", 18), command=root.quit)
entry.grid(column=0, row=0)
hashtag.grid(column=1, row=0)
entry2.grid(column=2, row=0)
add_button.grid(column=3, row=0)
remove_selected_button.grid(column=3, row=1)
listbox.grid(column=0, row=1)
close.grid(column=0, row=2)
update_listbox()
root.mainloop()
root.destroy()
def delete_last_message(self):
self.bot.delete_last = True
```
#### File: stalkbot-rewrite/utils/timeouts.py
```python
import time
class Timeouts():
def __init__(self):
self.timeouts = dict()
def add(self, command, length):
ctime = time.time()
if command not in self.timeouts or ctime > self.timeouts[command]:
self.timeouts[command] = ctime + length
def is_timeout(self, command):
if command in self.timeouts:
if time.time() > self.timeouts[command]:
return False
else:
return True
return False
```
|
{
"source": "Jerrynicki/Stalkbot",
"score": 3
}
|
#### File: Jerrynicki/Stalkbot/sum_process_resources.py
```python
from collections import OrderedDict
import subprocess
import psutil
def run_cmd(cmd_string):
"""Runs commands and saves output to variable"""
cmd_list = cmd_string.split(" ")
popen_obj = subprocess.Popen(cmd_list, stdout=subprocess.PIPE)
output = popen_obj.stdout.read()
output = output.decode("utf8")
return output
def sum_process_resources(cpu_ram):
"""Sums top X cpu and memory usages grouped by processes"""
ps_memory, ps_cpu, ps_rss = {}, {}, {}
top = 6
output = run_cmd('ps aux').split("\n")
for i, line in enumerate(output):
cleaned_list = " ".join(line.split())
line_list = cleaned_list.split(" ")
if i > 0 and len(line_list) > 10:
cpu = float(line_list[2])
memory = float(line_list[3])
rss = float(line_list[5])
command = line_list[10]
ps_cpu[command] = round(ps_cpu.get(command, 0) + cpu, 2)
ps_memory[command] = round(ps_memory.get(command, 0) + memory, 2)
ps_rss[command] = round(ps_rss.get(command, 0) + rss, 2)
sorted_cpu = OrderedDict(sorted(ps_cpu.items(), key=lambda x: x[1], reverse=True))
sorted_memory = OrderedDict(sorted(ps_memory.items(), key=lambda x: x[1], reverse=True))
sorted_rss = OrderedDict(sorted(ps_rss.items(), key=lambda x: x[1], reverse=True))
printfile = open("cache/proc.txt", "w")
if cpu_ram == "ram":
for i, k in enumerate(sorted_memory.items()):
if i < top:
print("{}. {} | {}".format(i+1, k[0].split("/")[-1], str(round(float(k[1])*100, 1)) + " MB"), file=printfile)
else:
for i, k in enumerate(sorted_cpu.items()):
if i < top:
print("{}. {} | {}".format(i+1, k[0].split("/")[-1], str(round(float(k[1]) / psutil.cpu_count(), 1)) + "%"), file=printfile)
printfile.close()
if __name__ == '__main__':
sum_process_resources()
```
|
{
"source": "jerrynlp/AutoSum",
"score": 3
}
|
#### File: AutoSum/tools/Data_LTR.py
```python
import Syntax as sx
import argparse
import numpy as np
from scipy import spatial
class Phrase:
"""Information of a phrase"""
def __init__(self, word, word_before, word_after, postag_before, postag_after, chapter_id, sentence_id, negation):
self.negation = negation
self.word = word
self.word_before = word_before
self.postag_before = postag_before
self.postag_after = postag_after
self.word_after = word_after
self.chapter_id = chapter_id
self.sentence_id = sentence_id
self.count = 0
self.weight = 0
def add_info(self):
self.count += 1
def output(self):
return str(self.weight) + "\t" + str(self.chapter_id) + "\t" + str(self.sentence_id) + "\t" + self.word \
+ "\t" + self.word_before + "\t" + str(self.postag_before) + "\t" + self.word_after + "\t" + str(self.postag_after) + "\t" + str(self.count)
def output_feature(self, word2vec):
lout = []
lout.append(self.weight)
lout.append(self.chapter_id)
lout.append(self.sentence_id)
vec = phrase_embedding(self.word.split(' '), word2vec)
if len(vec) == 0:
vec = [0.0] * 64
lout.extend(vec)
vec = phrase_embedding(self.word_before.split(' '), word2vec)
if len(vec) == 0:
vec = [0.0] * 64
lout.extend(vec)
lout.append(self.postag_before)
vec = phrase_embedding(self.word_after.split(' '), word2vec)
if len(vec) == 0:
vec = [0.0] * 64
lout.extend(vec)
lout.append(self.postag_after)
lout.append(self.count)
return ' '.join([str(x) for x in lout])
class PhraseSet:
"""Set to manage phrases"""
def __init__(self, story_id, character_id):
self.phrases = {}
self.story_id = story_id
self.character_id = character_id
def add(self, word, chapter_id, sentence_id, negation, word_before, word_after, postag_before, postag_after):
if not word in self.phrases:
self.phrases[word] = Phrase(word, word_before, word_after, postag_before, postag_after, chapter_id, sentence_id, negation)
self.phrases[word].add_info()
def clear(self):
self.phrases = {}
def sort(self):
return sorted(self.phrases.items(), lambda x, y: cmp(x[1].weight, y[1].weight), reverse=True)
def context(self, word2vec):
ct = []
for word in self.phrases.keys():
ct.extend(word.split(' '))
return phrase_embedding(ct, word2vec)
BOOK_ID = 0
CHAPTER_ID = 1
SENTENCE_ID = 2
TOKEN_ID = 3
HEAD_ID = 7
WORD = 8
NWORD = 10
POSTAG = 11
ENTITY = 12
SYNTAX = 13
CHARACTER_ID = 15
MIN_SIM = 0.8
def read_embedding(embedding_path):
model_file = open(embedding_path, 'rb')
des_line = model_file.readline()
word2vec = {}
word2vec['ANONE'] = ' '.join([str(x) for x in [0.0] * 64])
word2vec['BNONE'] = ' '.join([str(x) for x in [0.0] * 64])
i = 0;
for line in model_file:
terms = line.rstrip().split(' ')
#if i % 100000 == 0:
# print "embedding reading " + str(i) + " lines"
if len(terms) == 65:
word = terms[0]
word2vec[word] = ' '.join(terms[1:])
i += 1
model_file.close()
#print "embedding reading finished"
return word2vec
def phrase_embedding(words, word2vec):
if len(words) == 1:
if not words[0] in word2vec:
return []
else:
return [float(x) for x in word2vec[words[0]].split(' ')]
wordvecs = []
for word in words:
if not word in word2vec:
continue
wordvecs.append([float(x) for x in word2vec[word].split(' ')])
if len(wordvecs):
return np.mean(wordvecs, axis = 0)
else:
return []
def sim(phrase1, phrase2, word2vec):
vec1 = phrase_embedding(phrase1.word.split(' '), word2vec)
vec2 = phrase_embedding(phrase2.word.split(' '), word2vec)
if len(vec1) > 0 and len(vec2) > 0:
if phrase1.negation == phrase2.negation:
return 1 - spatial.distance.cosine(vec1, vec2)
else:
return spatial.distance.cosine(vec1, vec2)
else:
return 0.0
def cal_similarity(summarySet, storySet, word2vec):
for phrase1 in storySet.phrases.values():
max_sim = 0
for phrase2 in summarySet.phrases.values():
similarity = sim(phrase1, phrase2, word2vec)
if max_sim < similarity:
max_sim = similarity
phrase1.weight = max_sim
def process(summary, story, story_id, filter_dict, data_file, sourcedata_file):
#phrases and characters in summary
characters = {}
pos = 0
for sentence in summary:
for token in sentence:
cid = -1
if token[CHARACTER_ID].isdigit():
cid = int(token[CHARACTER_ID])
if cid >= 0:
if not cid in characters:
characters[cid] = [[], [], PhraseSet(story_id, cid), PhraseSet(story_id, cid)]
characters[cid][0].append(pos)
pos += 1
for cid in characters.keys():
for sid in characters[cid][0]:
sentence = summary[sid]
syn = sx.SyntaxTree()
syn.creat(sentence)
labels = syn.extract_label_with_info(cid)
for label in labels:
characters[cid][2].add(label[1], syn.chapterID, syn.sentenceID, label[0], label[2], label[3], label[4], label[5])
for sentence in story:
for token in sentence:
cid = -1
if token[CHARACTER_ID].isdigit():
cid = int(token[CHARACTER_ID])
if cid in characters:
syn = sx.SyntaxTree()
syn.creat(sentence)
labels = syn.extract_label_with_info(cid)
for label in labels:
characters[cid][3].add(label[1], syn.chapterID, syn.sentenceID, label[0], label[2], label[3], label[4], label[5])
for cid in characters:
if len(characters[cid][2].phrases) == 0 or len(characters[cid][3].phrases) == 0:
continue
key = str(characters[cid][2].story_id) + " " + str(characters[cid][2].character_id)
if key in filter_dict:
continue
cal_similarity(characters[cid][2], characters[cid][3], word2vec)
sorted_phrases = characters[cid][3].sort()
if sorted_phrases[0][1].weight < MIN_SIM:
#ignor sample if max(similarities) < MIN_SIM
continue
for phrase in characters[cid][2].phrases.values():
out_line = "summary\t" + str(characters[cid][2].story_id) + "\t" + str(characters[cid][2].character_id) \
+ "\t" + phrase.output()
sourcedata_file.write(out_line + '\n')
for phrase in sorted_phrases:
out_line = "story\t" + str(characters[cid][3].story_id) + "\t" + str(characters[cid][3].character_id) \
+ "\t" + phrase[1].output()
#print "story\t" + str(characters[cid][3].story_id) + "\t" + str(characters[cid][3].character_id) \
# + "\t" + phrase[1].output_feature(word2vec) + ' ' + ' '.join([str(x) for x in characters[cid][3].context(word2vec)])
sourcedata_file.write(out_line + '\n')
data_file.write(str(characters[cid][3].story_id) + "\t" + str(characters[cid][3].character_id) + "\t" \
+ phrase[1].output_feature(word2vec) + ' ' + ' '.join([str(x) for x in characters[cid][3].context(word2vec)]) + '\n')
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', "--token", help="token file")
parser.add_argument('-e', "--embedding", help="embedding file")
parser.add_argument('-q', "--questionnire", help="questionnire file")
parser.add_argument('-d', "--data", help="data file")
parser.add_argument('-s', "--sourcedata", help="source data file")
args = parser.parse_args()
qsample_dict = {}
qsample_file = open(args.questionnire, 'rb')
for line in qsample_file:
terms = line.rstrip().split('\t')
key = terms[1] + ' ' + terms[2]
qsample_dict[key] = 0
qsample_file.close()
qsample_dict = {}
word2vec = read_embedding(args.embedding)
token_file_path = args.token
token_file = open(token_file_path, 'rb') #"../../2.part.tokens.sample", 'rb')
story_id = -1
chapter_id = -1
sentence_id = -1
summary = []
story = []
sentence = []
data_file = open(args.data, 'wb')
sourcedata_file = open(args.sourcedata, 'wb')
for line in token_file:
terms = line.rstrip().split('\t')
if not len(terms) == 16:
continue
# continue
if not int(terms[BOOK_ID]) == story_id:
if len(sentence):
if chapter_id == 0:
summary.append(sentence)
else:
story.append(sentence)
#process
if len(summary):
process(summary, story, story_id, qsample_dict, data_file, sourcedata_file)
#new story
story_id = int(terms[BOOK_ID])
chapter_id = int(terms[CHAPTER_ID])
sentence_id = int(terms[SENTENCE_ID])
summary = []
story = []
sentence = []
sentence.append(terms)
else:
if int(terms[CHAPTER_ID]) == chapter_id and int(terms[SENTENCE_ID]) == sentence_id:
sentence.append(terms)
else:
if len(sentence):
if chapter_id == 0:
summary.append(sentence)
else:
story.append(sentence)
chapter_id = int(terms[CHAPTER_ID])
sentence_id = int(terms[SENTENCE_ID])
sentence = []
sentence.append(terms)
token_file.close()
data.close()
sourcedata_file.close()
```
#### File: AutoSum/tools/Rank_Evaluation.py
```python
import math
import argparse
import numpy as np
"""
the key of annotation is a word;
each value in annotation has two infos, one is rank and the other one is relevance.
prediction is a list of words.
"""
def Gain(r):
return math.pow(2, r) - 1
def NDCG(annotation, prediction):
DCG = 0.0
rank = 1
for item in prediction:
DCG += Gain(annotation[item][1]) * math.log(2, 1 + rank)
rank += 1
MaxDCG = 0.0
for item in annotation.values():
MaxDCG += Gain(item[1]) * math.log(2, 1 + item[0])
if MaxDCG == 0:
return 0
return DCG/MaxDCG
def MAP(annotation, prediction):
number_correct = 0
for item in annotation.values():
if item[1] == 3:#3 is high relevance
number_correct += 1
if number_correct == 0:
return -2
position = 1
true_positive = 0.0
vMAP = 0.0
for i in range(0, number_correct):
if annotation[prediction[i]][1] == 3:
true_positive += 1.0
vMAP += true_positive / position
position += 1
return vMAP / number_correct
def RC(annotation, prediction):
"""
Kendall rank correlation coefficient
"""
number_con = 0.0
number_dis = 0.0
for i in range(0, len(prediction)):
for j in range(i + 1, len(prediction)):
if annotation[prediction[i]][0] < annotation[prediction[j]][0]:
number_con += 1
else:
number_dis += 1
return (number_con - number_dis) / len(prediction) / (len(prediction) - 1) * 2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', "--data", help="data file")
parser.add_argument('-q', "--questionnaire", help="questionnair file")
args = parser.parse_args()
questionnaire_file = open(args.questionnaire, 'rb')
characters = {}#storyID"\t"characterID --> {word-->relevant}
for line in questionnaire_file:
terms = line.rstrip().split('\t')
if not len(terms) == 5:
continue
key = terms[1] + '\t' + terms[2]
characters[key] = {}
options = terms[3].split(';')
for op in options:
temps = op.split(':')
if temps[0] == 'gender' or not len(temps) == 2:
continue
characters[key][temps[1]] = 0
selections = terms[4].split(';')
for sl in selections:
temps = sl.split(':')
if not temps[0] in characters[key]:
continue
if temps[1] == 'strong':
characters[key][temps[0]] = 3
elif temps[1] == 'medium':
characters[key][temps[0]] = 2
elif temps[1] == 'weak':
characters[key][temps[0]] = 1
questionnaire_file.close()
data_file = open(args.data, 'rb')
annotation = {}
prediction = []
key = ''
character = {}
has_high = False
RCs = []
MAPs = []
NDCGs = []
for line in data_file:
terms = line.rstrip().split('\t')
if terms[0] == 'summary':
continue
if terms[1] + '\t' + terms[2] == key:
#add new word
phrase = terms[6]
words = phrase.split(' ')
for word in words:
if word in character and not word in annotation:
prediction.append(word)
annotation[word] = [len(annotation) + 1, character[word]]
if character[word] > 2:
has_high = True
else:
if len(prediction) > 1 and has_high:
sorted_annotation = sorted(annotation.items(), lambda x, y: cmp(x[1][1], y[1][1]), reverse=True)
annotation = {}
for item in sorted_annotation:
annotation[item[0]] = [len(annotation) + 1, item[1][1]]
#print annotation
#print prediction
RCs.append(RC(annotation, prediction))
vMap = MAP(annotation, prediction)
if vMap >= -1:
MAPs.append(vMap)
NDCGs.append(NDCG(annotation, prediction))
annotation = {}
prediction = []
has_high = False
key = terms[1] + '\t' + terms[2]
character = characters[key]
phrase = terms[6]
words = phrase.split(' ')
for word in words:
if word in character and not word in annotation:
prediction.append(word)
annotation[word] = [len(annotation) + 1, character[word]]
if character[word] > 2:
has_high = True
data_file.close()
if len(prediction) > 1 and has_high:
sorted_annotation = sorted(annotation.items(), lambda x, y: cmp(x[1][1], y[1][1]), reverse=True)
annotation = {}
for item in sorted_annotation:
annotation[item[0]] = [len(annotation) + 1, item[1][1]]
RCs.append(RC(annotation, prediction))
vMap = MAP(annotation, prediction)
if vMap >= -1:
MAPs.append(vMap)
NDCGs.append(NDCG(annotation, prediction))
print "RC is " + str(np.mean(RCs))
print len(RCs)
print "NDCG is " + str(np.mean(NDCGs))
print len(NDCGs)
print "MAP is " + str(np.mean(MAPs))
print len(MAPs)
''' test case
prediction = [3, 1, 4, 2]
annotation = {1:[1, 3], 2:[2, 3], 3:[3, 1], 4:[4, 0]}
print RC(annotation, prediction)
print MAP(annotation, prediction)
print NDCG(annotation, prediction)
'''
```
|
{
"source": "jerrynlp/RLMem",
"score": 3
}
|
#### File: jerrynlp/RLMem/MemN2N.py
```python
import torch
import torch.randn as randn
import torch.nn as nn
import torch.squeeze as squeeze
from torch.autograd import Variable
import numpy as np
class MemN2N(nn.Module):
def __init__(self, batch_size, memory_size, vocab_size, embed_size, hops):
super(MemN2N, self).__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
self.batch_size = batch_size
self.memory_size = memory_size
self.hops = hops
#Adjacent memory
#the output embedding for one layer is the input embedding for the one above [Sukhbaatar et al.]
#A1, C1 == A2, C2 == A3, ..., Actually hops + 1 embeddings are needed
self.memory = []
A = nn.Embedding(self.vocab_size, self.embed_size) #input memory
A.weight = nn.Parameter(randn(self.vocab_size, self.embed_size).normal_(0, 0.1))
for i in range(self.hops):
p = nn.Softmax() #softmax layer between input and output
C = nn.Embedding(self.vocab_size, self.embed_size) #output memory
C.weight = nn.Parameter(randn(self.vocab_size, self.embed_size).normal_(0, 0.1))
self.memory.append([A, p, C])
A = C #A[i+1] = C[i]
# final weight matrix
self.W = nn.Parameter(randn(self.embed_size, self.vocab_size), requires_grad=True)
# final softmax layer
self.m = nn.Softmax()
def get_position_encoding(self, query_size):
"""
Position Encoding (PE)
the order of the words now affects.
"""
encoding = np.ones((self.embed_size, query_size), dtype=np.float32)
ls = query_size + 1
le = self.embed_size + 1
for i in range(1, le):
for j in range(1, ls):
encoding[i - 1, j - 1] = (i - (le - 1) / 2) * (j - (ls - 1) / 2)
encoding = 1 + 4 * encoding / self.embed_size / query_size
enc_vec = torch.from_numpy(np.transpose(encoding)).type(torch.FloatTensor)
return enc_vec
def embed_evidences(self, evidences, embedding_layer):
evidence_embedding_list = []
for evidence in evidences:
evidence_variable = Variable(torch.squeeze(evidence, 0).data.type(torch.LongTensor))
evidence_embedding = embedding_layer(evidence_variable)
position_encoding = self.get_position_encoding(evidence.size(), self.embed_size)
evidence_embedding = evidence_embedding * position_encoding
evidence_embedding_list.append(evidence_embedding)
batch_story_embedding_temp = torch.stack(evidence_embedding_list)
batch_story_embedding = torch.sum(batch_story_embedding_temp, dim=2)
return torch.squeeze(batch_story_embedding, dim=2)
def forward(self, x_e, x_q):
e = Variable(x_e, requires_grad=False) #evidences
q = Variable(squeeze(x_q, 1), requires_grad=False) #question
u_list = []
#emb query
queries_emb = self.memory[0][0](q) #in the simplest case via another embedding matrix B with the same dimensions as A
position_encoding = self.get_position_encoding(queries_emb.size()[0])
queries = queries_emb * position_encoding
u_list.append(torch.sum(queries, dim=1))
for i in range(self.hops):
#emb A
evidence_emb_A = self.embed_story(e, self.memory[i][0])
#inner product
u_k_1_matrix = [u_list[-1]] * self.memory_size
p = evidence_emb_A * torch.squeeze(torch.stack(u_k_1_matrix, dim=1), 2)
#softmax
p = self.memory[i][1](torch.squeeze(torch.sum(p, dim=2)))
#emb_C
evidence_emb_C = self.embed_story(e, self.memory[i][2])
#inner product
pre_o = torch.mul(evidence_emb_C, p.unsqueeze(1).expand_as(evidence_emb_C))
o = torch.sum(pre_o, dim=2)
#u_k
u_list.append(torch.squeeze(o) + torch.squeeze(u_list[-1]))
wx = torch.mm(u_list[-1], self.W)
y_pred = self.m(wx)
return y_pred
#Test
batch_size = 1
embed_size = 4
vocab_size = 4
hops = 2
memory_size = 10
net = MemN2N(batch_size, memory_size, vocab_size, embed_size, vocab_size, hops)
```
|
{
"source": "jerryntom/PyCalc-Basic",
"score": 3
}
|
#### File: jerryntom/PyCalc-Basic/main.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from math import sqrt
class Ui_Calculator:
# declarations of variables
def __init__(self):
self.styleSheet = """
QToolTip {
font-size: 25pt;
}
"""
self.calcHistory = ["0"]
self.pastResult = ""
self.pastCalculation = ""
self.aftFrac = ""
self.befFrac = ""
self.value = "0"
self.centralwidget = QtWidgets.QWidget(Calculator)
self.font = QtGui.QFont()
self.numberField = QtWidgets.QLabel(self.centralwidget)
self.errorLabel = QtWidgets.QLabel(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(Calculator)
# buttons
self.percentBut = self.buttonObject(10, 100, 111, 41, '%', self.percentClicked)
self.clearEntryBut = self.buttonObject(130, 100, 111, 41, 'CE', self.clearEntry)
self.clearBut= self.buttonObject(250, 100, 111, 41, 'C', self.clear)
self.backBut = self.buttonObject(370, 100, 111, 41, 'Back', self.back)
self.expSecBut = self.buttonObject(130, 150, 111, 41, 'x²', self.expSec)
self.fractionBut = self.buttonObject(10, 150, 111, 41, '1/x', self.fraction)
self.divideBut = self.buttonObject(370, 150, 111, 41, '/', self.mathSymbol)
self.rootSecBut = self.buttonObject(250, 150, 111, 41, '²√x', self.rootSec)
self.eightBut = self.buttonObject(130, 200, 111, 41, '8', self.numbers)
self.sevenBut = self.buttonObject(10, 200, 111, 41, '7', self.numbers)
self.multiplyBut = self.buttonObject(370, 200, 111, 41, '*', self.mathSymbol)
self.nineBut = self.buttonObject(250, 200, 111, 41, '9', self.numbers)
self.fiveBut = self.buttonObject(130, 250, 111, 41, '5', self.numbers)
self.fourBut = self.buttonObject(10, 250, 111, 41, '4', self.numbers)
self.substractBut = self.buttonObject(370, 250, 111, 41, '-', self.mathSymbol)
self.sixBut = self.buttonObject(250, 250, 111, 41, '6', self.numbers)
self.twoBut = self.buttonObject(130, 300, 111, 41, '2', self.numbers)
self.oneBut = self.buttonObject(10, 300, 111, 41, '1', self.numbers)
self.addBut = self.buttonObject(370, 300, 111, 41, '+', self.mathSymbol)
self.threeBut = self.buttonObject(250, 300, 111, 41, '3', self.numbers)
self.zeroBut = self.buttonObject(130, 350, 111, 41, '0', self.numbers)
self.plusMinusBut = self.buttonObject(10, 350, 111, 41, '+/-', self.plusMinus)
self.calculateBut = self.buttonObject(370, 350, 111, 41, '=', self.calculate)
self.dotBut = self.buttonObject(250, 350, 111, 41, '.', self.mathSymbol)
#construction of window and its components
def setupUi(self, Calculator):
Calculator.setObjectName("Calculator")
Calculator.setMaximumSize(494, 421)
Calculator.setMinimumSize(494, 421)
Calculator.setWindowOpacity(0.96)
Calculator.setTabShape(QtWidgets.QTabWidget.Triangular)
Calculator.setWindowIcon(QtGui.QIcon('logo.ico'))
self.centralwidget.setObjectName("centralwidget")
self.font.setFamily("Segoe UI Light")
self.font.setPointSize(36)
self.font.setBold(False)
self.numberField.setGeometry(QtCore.QRect(10, 10, 471, 81))
self.numberField.setFont(self.font)
self.numberField.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing)
self.numberField.setObjectName("label")
self.numberField.setToolTip('0')
self.numberField.setText('0')
self.font.setPointSize(16)
self.errorLabel.setGeometry(QtCore.QRect(10, -50, 471, 81))
self.errorLabel.setFont(self.font)
self.errorLabel.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing)
self.errorLabel.setObjectName("errorLabel")
self.statusbar.setObjectName("statusbar")
Calculator.setCentralWidget(self.centralwidget)
Calculator.setStatusBar(self.statusbar)
QtCore.QMetaObject.connectSlotsByName(Calculator)
self.value = self.numberField.text()
# template for buttons
def buttonObject(self, xCord, yCord, width, height, name, function):
self.font.setFamily("Segoe UI Light")
self.font.setPointSize(18)
self.font.setBold(False)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(xCord, yCord, width, height))
self.pushButton.setFont(self.font)
self.pushButton.setObjectName(name)
self.pushButton.setText(name)
try:
if name in ('/', '*', '-', '+', '.'):
self.pushButton.clicked.connect(lambda: self.mathSymbol(name))
elif int(name) in range(0, 10):
self.pushButton.clicked.connect(lambda: self.numbers(name))
except ValueError:
self.pushButton.clicked.connect(function)
return self.pushButton
# calculating part of number by percents
def percentClicked(self):
percent = ''
pastNumber = ''
errorCheck = True
try:
for i in range(len(self.value)-1, -1, -1):
if self.value[i] in ('-', '+', '*', '/'):
break
else:
percent += self.value[i]
for i in range(len(self.value)-len(percent)-2, -1, -1):
if self.value[i] in ('-', '+', '*', '/'):
break
else:
pastNumber += self.value[i]
self.value = self.value[0:len(self.value)-len(percent)]
percent = float(percent[::-1])
percent = str(percent/100)
pastNumber = pastNumber[::-1]
result = float(percent) * float(pastNumber)
self.value += self.betterRound(result)
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
print(self.value)
print(percent, pastNumber)
except Exception as e:
self.exceptErrors(e, "Error occured - percent function")
# revert to past expression
def clearEntry(self):
if len(self.calcHistory) == 1:
self.value = '0'
elif len(self.calcHistory) > 1:
self.calcHistory.pop()
self.value = self.calcHistory[len(self.calcHistory)-1]
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
print(self.value)
# delete entire expression and calcHistory
def clear(self):
self.value = '0'
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
self.calcHistory = ['0']
print(self.value)
# delete one character from expression
def back(self):
if len(self.value) == 1:
self.value = '0'
else:
self.value = self.value[0:len(self.value)-1]
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
print(self.value)
# calculating fraction of numbers - 1/x
def fraction(self):
try:
if self.value == self.aftFrac:
self.value = self.befFrac
self.befFrac = self.aftFrac
else:
if len(self.value) >= 12:
self.exceptErrors('Too big number', "Too big number - fraction function")
return None
self.befFrac = self.value
self.value = self.betterRound(1 / float(self.value))
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
self.aftFrac = self.value
self.calcHistory.append(self.value)
self.checkLongNumber()
print(self.value)
except ZeroDivisionError as e:
self.exceptErrors(e, "ZeroDivisionError - fraction function")
except Exception as e:
self.exceptErrors(e, "Error occured - fraction function")
# calculating second degree's power
def expSec(self):
try:
self.value = float(self.value)
self.value = self.betterRound(pow(self.value, 2))
self.calcHistory.append(self.value)
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
self.checkLongNumber()
print(self.value)
except Exception as e:
self.exceptErrors(e, "Error occured - expSec function")
# calculating square root
def rootSec(self):
try:
self.value = self.betterRound(sqrt(float(self.value)))
self.calcHistory.append(self.value)
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
self.checkLongNumber()
print(self.value)
except Exception as e:
self.exceptErrors(e, "Error occured - rootSec function")
# calculating entire expressions from self.value
def calculate(self):
try:
if self.value == self.pastResult:
self.value += self.pastCalculation
else:
self.pastCalculation = ''
for i in range(len(self.value)-1, -1, -1):
if self.value[i] in ('-', '+', '*', '/'):
self.pastCalculation += self.value[i]
break
else:
self.pastCalculation += self.value[i]
self.pastCalculation = self.pastCalculation[::-1]
self.value = eval(self.value)
self.value = self.betterRound(float(self.value))
if float(self.value) % 1 == 0 and '.' in self.value:
self.value = self.value[0:len(self.value)-2]
self.calcHistory.append(self.value)
self.numberField.setText(self.value)
self.pastResult = self.value
self.numberField.setToolTip(self.value)
self.checkLongNumber()
print(self.value)
except Exception as e:
self.exceptErrors(e, "Syntax error - can't calculate")
# changing + to - and the other way
def plusMinus(self):
try:
if float(self.value) % 1 == 0:
self.value = str(-(float(self.value)))
self.value = self.value[0:len(self.value)-2]
elif float(self.value) % 1 != 0:
self.value = str(-(float(self.value)))
if self.value == '-0':
self.value = '0'
self.calcHistory.append(self.value)
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
print(self.value)
except Exception as e:
self.exceptErrors(e, "Can't convert it by plusMinus function")
# connected to QTimer - exceptErrors for hiding error label
def clearErrors(self):
self.errorLabel.setText('')
# checking if number has 64 chars or more
def checkLongNumber(self):
if len(self.value) >= 64:
self.errorLabel.setText('SizeLimit: 64')
QtCore.QTimer.singleShot(2000, self.clearErrors)
self.value = '0'
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
# passing errors to error label
def exceptErrors(self, errorLog, errorCom):
print(errorLog)
self.errorLabel.setText(errorCom)
QtCore.QTimer.singleShot(2000, self.clearErrors)
# making buttons with math symbols better
def mathSymbol(self, symbol):
self.value += symbol
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
print(self.value)
# making numeric button better
def numbers(self, number):
if number == '0' and self.value != '0':
self.value += '0'
else:
if self.value == '0':
self.value = number
else:
self.value += number
self.numberField.setText(self.value)
self.numberField.setToolTip(self.value)
print(self.value)
# rounding numbers to 10 decimal places
def betterRound(self, number):
if number % 1 == 0:
result = str(int(number))
elif number % 1 != 0:
result = format(number, ".10f")
if '.00000' in str(result) or '.99999' in str(result):
result = format(number, ".0f")
result = str(result).rstrip('0')
return result
# app execution
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Calculator = QtWidgets.QMainWindow()
ui = Ui_Calculator()
app.setStyleSheet(ui.styleSheet)
ui.setupUi(Calculator)
Calculator.show()
sys.exit(app.exec_())
```
|
{
"source": "JerryPan2718/flexgpt",
"score": 2
}
|
#### File: minGPT/memgpt/mem_demo.py
```python
import logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# make deterministic
from mem_utils import set_seed
from utils import check_shape, CachedModule, PytorchTimer, check_device_on_cuda
set_seed(2718)
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
import time
import datetime
from torch.profiler import profile, record_function, ProfilerActivity
import torch.autograd.profiler as profiler
import pandas as pd
today = datetime.date.today()
CUDA_VISIBLE_DEVICES = 1
NUMEXPR_MAX_THREADS = 8
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class CharDataset(Dataset):
def __init__(self, data, block_size):
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))
self.stoi = { ch:i for i,ch in enumerate(chars) }
self.itos = { i:ch for i,ch in enumerate(chars) }
self.block_size = block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return len(self.data) - self.block_size
def __getitem__(self, idx):
# grab a chunk of (block_size + 1) characters from the data
chunk = self.data[idx:idx + self.block_size + 1]
# encode every character to an integer
dix = [self.stoi[s] for s in chunk]
"""
arrange data and targets so that the first i elements of x
will be asked to predict the i-th element of y. Notice that
the eventual language model will actually make block_size
individual predictions at the same time based on this data,
so we are being clever and amortizing the cost of the forward
pass of the network. So for example if block_size is 4, then
we could e.g. sample a chunk of text "hello", the integers in
x will correspond to "hell" and in y will be "ello". This will
then actually "multitask" 4 separate examples at the same time
in the language model:
- given just "h", please predict "e" as next
- given "he" please predict "l" next
- given "hel" predict "l" next
- given "hell" predict "o" next
In addition, because the DataLoader will create batches of examples,
every forward/backward pass during traning will simultaneously train
a LOT of predictions, amortizing a lot of computation. In particular,
for a batched input of integers X (B, T) where B is batch size and
T is block_size and Y (B, T), the network will during training be
simultaneously training to make B*T predictions, all at once! Of course,
at test time we can paralellize across batch B, but unlike during training
we cannot parallelize across the time dimension T - we have to run
a forward pass of the network to recover the next single character of the
sequence along each batch dimension, and repeatedly always feed in a next
character to get the next one.
So yes there is a big asymmetry between train/test time of autoregressive
models. During training we can go B*T at a time with every forward pass,
but during test time we can only go B at a time, T times, with T forward
passes.
"""
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
block_size = 2048 # spatial extent of the model for its context
# you can download this file at https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt
text = open('input.txt', 'r').read() # don't worry we won't run out of file handles
train_dataset = CharDataset(text, block_size) # one line of poem is roughly 50 characters
def model_init(B, K, H, cache_length, T):
from mem_gpt import MemGPT, MemGPTConfig
mem_config = MemGPTConfig(train_dataset.vocab_size, train_dataset.block_size,
B=12, K=4, H=768, cache_length=cache_length, device=device)
model = MemGPT(mem_config)
print("=" * 50)
from mem_trainer import MemTrainer, MemTrainerConfig
# initialize a trainer instance and kick off training
tconf = MemTrainerConfig(max_epochs=1, batch_size=128, learning_rate=6e-4,
lr_decay=True, warmup_tokens=512*20, final_tokens=2*len(train_dataset)*block_size,
num_workers=4, T=T)
trainer = MemTrainer(model, train_dataset, None, tconf)
trainer.train()
print("=" * 50)
return model, trainer
def model_sampling(model, trainer, steps, B):
from mem_utils import sample
context = "O God, O God!"
x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device)
# x = torch.tensor([[train_dataset.stoi[s] for s in context] for _ in range(B)], dtype=torch.long)[None,...].to(trainer.device)
# x = x[0]
# print(f"initial x: {x}")
print(f"initial x: {x.shape}")
y, sampling_record = sample(model, x, steps, temperature=1.0, sample=True, top_k=10)
return y, sampling_record
if __name__ == "__main__":
hparams = {"117M": (12, 768), "345M": (24, 1024), "762M": (36, 1280), "1542M": (48, 1600)}
cache_lengths = [1, 0] # 0, 0.25, 0.5, 0.5, 1
Tgs = [512] # 256, 512, 1024
start = time.time()
Tc = 32
for model_size, hparam in hparams.items():
if model_size != "1542M":
continue
d = {}
B, H = hparam
K = 1
for Tg in Tgs:
for cache_length in cache_lengths:
with torch.no_grad():
with torch.autocast(device):
model, trainer = model_init(B, K, H, cache_length * Tg, Tc + Tg)
print(f"Tg={Tg} model_size={model_size} cache_length={cache_length * Tg}")
# warmup
with profiler.profile(with_stack=True, profile_memory=True) as prof:
for i in range(2):
print(f"warmup iteration: {i}")
y, sampling_record = model_sampling(model, trainer, Tg, B)
total_time = []
mem_usage = []
runtime = []
# timing module
for i in range(2):
print(f"timing iteration: {i}")
with PytorchTimer(verbose=False) as t:
y, sampling_record = model_sampling(model, trainer, Tg, B)
mem_usage.append(sampling_record[0])
runtime.append(sampling_record[1])
total_time.append(t.elapsed)
ret = [np.mean(total_time), np.std(total_time), np.mean(mem_usage), np.std(mem_usage), np.mean(runtime), np.std(runtime)]
d[f"model_size={model_size} Tg={Tg} cache_length={cache_length * Tg}"] = ret
torch.cuda.empty_cache()
torch.cuda.empty_cache()
prof.export_chrome_trace(f"profiles/{today}-model_size={model_size}-cache_length={cache_length}-Tg={Tg}.json")
speedup = d[f"model_size={model_size} Tg={Tg} cache_length={0}"][0] / d[f"model_size={model_size} Tg={Tg} cache_length={Tg}"][0]
print(f"Speedup for {model_size} with Tg={Tg}: {speedup}")
print(d)
df = pd.DataFrame(data=d, index=["runtime_mean(ms)", "runtime_std(ms)", "mem_mean(b)", "mem_std(b)", "t1_meam(ms)", "t1_std(ms)"])
print(df)
df.to_csv(f"logs/{today}-mem_demo-{model_size}_K={K}.csv")
print(time.time() - start)
# completion = ''.join([train_dataset.itos[int(i)] for i in y])
# print(completion)
```
#### File: minGPT/memgpt/mem_linear.py
```python
from utils import check_shape, CachedModule, PytorchTimer
import torch
import torch.nn as nn
import logging
# from pthflops import count_ops
logging.basicConfig(level=logging.DEBUG)
# print(device)
class CachedLinear(CachedModule):
""" cached nn.Linear layer """
def __init__(self, in_features, out_features, bias=True, x=None, **kwargs):
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
CachedModule.__init__(self, x)
self.layer = nn.Linear(in_features, out_features, bias, **kwargs, device=self.device)
def forward(self, x):
"""
x: BTH
cache: B(T-1)H
new_out: B*1*H
"""
B, T, _ = x.shape
if self.cache is not None:
cache = check_shape(self.cache, (B, T - 1, self.layer.out_features))
new_out = check_shape(self.layer(x[:, -1:, :]), (B, 1, self.layer.out_features))
y = torch.cat([cache, new_out], dim=1)
else:
y = check_shape(self.layer(x), (B, T, self.layer.out_features))
self.set_cache(y)
return y
if __name__ == "__main__":
B, T, H = (16, 128, 768)
layer = CachedLinear(H, H, False)
layer.clear_cache()
x = torch.randn((B, T, H))
with PytorchTimer(verbose=True):
y = check_shape(layer(x), (B, T, H))
# layer.clear_cache()
# x = torch.randn((B, T, H))
# with PytorchTimer(verbose=True):
# y = check_shape(layer(x), (B, T, H))
# layer.clear_cache()
# x = torch.randn((B, T, H))
# with PytorchTimer(verbose=True):
# y = check_shape(layer(x), (B, T, H))
# layer.clear_cache()
# x = torch.randn((B, T, H))
# with PytorchTimer(verbose=True):
# y = check_shape(layer(x), (B, T, H))
# logging.debug(f"test cache")
# for i in range(1, 10):
# x = torch.randn((B, T + i, H))
# with PytorchTimer(verbose=True):
# y = check_shape(layer(x), (B, T + i, H))
```
#### File: minGPT/memgpt/mem_selfattn.py
```python
from utils import check_device_on_cuda, check_shape, CachedModule, PytorchTimer
from mem_linear import CachedLinear
import time
import numpy as np
import pandas as pd
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
import torch.cuda.profiler as profiler
from torch.profiler import profile, record_function, ProfilerActivity
import datetime
from mem_gpt_flops import selfattn_flop
today = datetime.date.today()
# from pypapi import papi_high as high
# from pypapi import events as papi_events
import logging
logging.basicConfig(level=logging.DEBUG)
CUDA_VISIBLE_DEVICES = 1
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
class CachedSelfAttn(CachedModule):
def __init__(self, n_head, n_hidden, dropout=0.1, max_t=2048, cache_length=64, B=12, T=2048):
"""
q: BKT(H/K)
k: BKT(H/K)
v: BKT(H/K)
qkt: BKTT
"""
super().__init__(dict(qkt=None, y=None))
assert n_hidden % n_head == 0, "linear layer dimension is not divisible by n_head"
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.q = CachedLinear(n_hidden, n_hidden)
self.k = CachedLinear(n_hidden, n_hidden)
self.v = CachedLinear(n_hidden, n_hidden)
self.attn_drop = nn.Dropout(dropout)
self.resid_drop = nn.Dropout(dropout)
self.proj = CachedLinear(n_hidden, n_hidden)
self.register_buffer("mask", torch.tril(torch.ones(max_t, max_t, device=self.device)).view(1, 1, max_t, max_t))
self.n_head = n_head
self.n_hidden = n_hidden
self.cache = {}
self.cache_counter = 0
self.cache_length = cache_length
# self.i = 0
def clear_cache(self):
self.q.clear_cache()
self.k.clear_cache()
self.v.clear_cache()
self.proj.clear_cache()
self.cache = {}
def set_cache(self, key, value):
self.cache[key] = value
def get_cache(self, key, device=None):
val = self.cache.get(key, None) if self.cache else None
# if val is not None and device is not None:
# print(val.get_device())
# val = val
return val
def reset_cache_counter(self):
self.cache_counter = 0
def forward_uncached(self, x):
# print("uncached")
B, T, H = x.size()
K = self.n_head
# with PytorchTimer(verbose=False) as T1:
q = self.q(x).view(B, T, K, H // K).transpose(1, 2)
k = self.k(x).view(B, T, K, H // K).transpose(1, 2)
v = self.v(x).view(B, T, K, H // K).transpose(1, 2)
# t1 = T1.elapsed
# with PytorchTimer(verbose=False) as T2:
qkt = q @ k.transpose(-2, -1)
attn = qkt * (1.0 / math.sqrt(k.size(-1)))
# t2 = T2.elapsed
# print(f"mem_selfattn qkt matmul: {t2}")
mask = self.mask[:, :, :T, :T]
attn = attn.masked_fill(mask == 0, float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.attn_drop(attn)
# with PytorchTimer(verbose=False) as T3:
y = attn @ v # (B, K, T, T) x (B, K, T, H/K) -> (B, K, T, T/K)
# t3 = T3.elapsed
# print(f"mem_selfattn y matmul and cat: {t3}")
t1 = t2 = t3 = 0
if check_device_on_cuda(mask) == False:
print(f"mask.device: {mask.device}")
return qkt, y, t1, t2, t3
def forward_cached(self, x, qkt_cached, y_cached, restore_dim=True):
# print("cached")
B, T, H = x.size()
K = self.n_head
qkt_cached = check_shape(qkt_cached, (B, K, T-1, T-1))
y_cached = check_shape(y_cached, (B, K, T-1, H // K))
# with PytorchTimer(verbose=False) as T1:
# print(f"self.i: {self.i}")
q = self.q(x).view(B, T, K, H // K).transpose(1, 2)
k = self.k(x).view(B, T, K, H // K).transpose(1, 2)
v = self.v(x).view(B, T, K, H // K).transpose(1, 2)
qkt = torch.zeros(B, K, T, T, device=x.device)
# print(f"qkt_cached: {qkt_cached.shape}")
# print(f"qkt: {qkt[:, :, :T-self.i, :T-self.i].shape}, T: {T}")
qkt[:, :, :T-1, :T-1] = qkt_cached
# t1 = T1.elapsed
# qkt: BK1(H/K) * BK(H/K)T -> BK1T
# with PytorchTimer(verbose=False) as T2:
qkt[:, :, -1:, :] = q[:, :, -1:, :] @ k.transpose(-2, -1)
attn = qkt * (1.0 / math.sqrt(k.size(-1)))
# t2 = T2.elapsed
# print(f"mem_selfattn qkt matmul: {t2}")
mask = self.mask[:, :, :T, :T]
attn = attn.masked_fill(mask == 0, float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.attn_drop(attn)
new_attn = attn[:, :, -1:, :]
# with PytorchTimer(verbose=False) as T3:
# y_new: BK1T * BKT(H/K) -> BK1(H/K)
y_new = new_attn @ v
# print(f"forward_cached: {new_attn.shape} {v.shape}")
# y: stack(BK1(H/K), BK(T-1)(H/K)) -> BKT(H/K)
y = torch.cat((y_cached, y_new), dim=-2)
# t3 = T3.elapsed
# print(f"mem_selfattn y matmul and cat: {t3}")
t1 = t2 = t3 = 0
# qkt = qkt[:, :, :-1, :-1]
# y = y[:, :, :-1, :]
# x = x[:, :-1, :]
return qkt, y, t1, t2, t3
def forward(self, x):
# print(self.cache.keys())
B, T, H = x.size()
K = self.n_head
assert H == self.n_hidden
assert H % K == 0
qkt_cached = self.get_cache("qkt", device=x.device)
y_cached = self.get_cache("y", device=x.device)
# print(y_cached is None)
# if y_cached is not None:
# print(f"y_cached.shape: {y_cached.shape}")
# if qkt_cached is not None:
# print(f"qkt_cached.shape: {qkt_cached.shape}")
# print(f"cache_counter: {self.cache_counter} cache_length: {self.cache_length}")
if (y_cached is None or qkt_cached is None) or self.cache_counter >= self.cache_length:
self.clear_cache()
qkt, y, t1, t2, t3 = self.forward_uncached(x)
self.set_cache("qkt", check_shape(qkt, (B, K, T, T)))
self.set_cache("y", check_shape(y, (B, K, T, H // K)))
else:
qkt, y, t1, t2, t3 = self.forward_cached(x, qkt_cached, y_cached)
self.set_cache("qkt", check_shape(qkt, (B, K, T, T)))
self.set_cache("y", check_shape(y, (B, K, T, H // K)))
y = y.transpose(1, 2).contiguous().view(B, T, H)
self.cache_counter += 1
# print(t1, t2, t3)
return y, t1, t2, t3
### Helper function for Benchmark ###
def bench_cached(module, x, n_gen, is_profile=False):
t1_array = []
t2_array = []
t3_array = []
# x = x.to(device)
B, T, H = x.shape
mem_usage = []
module.clear_cache()
module.reset_cache_counter()
with torch.inference_mode():
with PytorchTimer(verbose=False) as t:
if is_profile:
for _ in range(8):
x1 = x[:]
for i in range(1, 10):
y, t1, t2, t3 = module(x1)
y = check_shape(y, x1.shape)
t1_array.append(t1)
t2_array.append(t2)
t3_array.append(t3)
y_new = torch.randn((B, 1, H), device=device)
x1 = check_shape(torch.cat((y, y_new), dim=-2), (B, T + i, H))
mem_usage.append(torch.cuda.memory_allocated())
module.clear_cache()
module.reset_cache_counter()
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
x1 = x[:]
for i in range(1, 10):
y, t1, t2, t3 = module(x1)
y = check_shape(y, x1.shape)
t1_array.append(t1)
t2_array.append(t2)
t3_array.append(t3)
y_new = torch.randn((B, 1, H), device=device)
x1 = check_shape(torch.cat((y, y_new), dim=-2), (B, T + i, H))
mem_usage.append(torch.cuda.memory_allocated())
module.clear_cache()
module.reset_cache_counter()
prof.export_chrome_trace(f"profiles/{today}-trace-token_length={x.size(1)} mem_length={module.cache_length}.json")
else:
for i in range(1, n_gen + 1):
y, t1, t2, t3 = module(x)
y = check_shape(y, x.shape)
t1_array.append(t1)
t2_array.append(t2)
t3_array.append(t3)
y_new = torch.randn((B, 1, H), device=device)
x = check_shape(torch.cat((y, y_new), dim=-2), (B, T + i, H))
mem_usage.append(torch.cuda.memory_allocated())
return t.elapsed, mem_usage, np.sum(t1_array), np.sum(t2_array), np.sum(t3_array)
def pipeline(benchmark_function, module):
t1_array = []
t2_array = []
t3_array = []
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
# warmup
for i in tqdm(range(4)):
benchmark_function(module, x, Tg)
# bench
total_time = []
mem_usage = []
# FLOP = []
for i in tqdm(range(8)):
ret = benchmark_function(module, x, Tg, False)
total_time.append(ret[0])
mem_usage += ret[1]
t1_array.append(ret[2])
t2_array.append(ret[3])
t3_array.append(ret[4])
# benchmark_function(module, x, Tg, True)
# prof.export_chrome_trace(f"profiles/{today}-full-trace-token_length={x.size(1)} mem_length={module.cache_length}.json")
return [np.mean(total_time), np.std(total_time), np.mean(mem_usage) / 10 ** 6, np.std(mem_usage) / 10 ** 6, np.mean(t1_array), np.std(t1_array), np.mean(t2_array), np.std(t2_array), np.mean(t3_array), np.std(t3_array)]
if __name__ == "__main__":
# Parameters: layers, d_model
# 117M: 12, 768
# 345M: 24, 1024
# 762M: 36, 1280
# 1542M: 48, 1600
hparams = {"117M": (12, 768), "345M": (24, 1024), "762M": (36, 1280), "1542M": (48, 1600)}
start = time.time()
for model_size, hparam in hparams.items():
if model_size != "117M":
continue
with torch.no_grad():
with torch.autocast(device):
d = {}
Ts = [128] # 1024, 512, 256, 128
K = 4
B, H = hparam
for T in Ts:
Tc = 32
Tg = T
layer0 = CachedSelfAttn(K, H, cache_length=0, B=B, T=Tc+Tg)
layer1 = CachedSelfAttn(K, H, cache_length=0.25 * Tg, B=B, T=Tc+Tg)
layer2 = CachedSelfAttn(K, H, cache_length=0.5 * Tg, B=B, T=Tc+Tg)
layer3 = CachedSelfAttn(K, H, cache_length=0.75 * Tg, B=B, T=Tc+Tg)
layer4 = CachedSelfAttn(K, H, cache_length=Tg, B=B, T=Tc+Tg)
x = torch.randn((B, Tc, H), device=device)
# print(x.get_device())
ret0 = pipeline(bench_cached, layer0)
flops = selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0)
print(ret0 + [flops])
d[f"Tc={Tc} Tg={Tg} cache_length={0}"] = ret0 + [flops]
torch.cuda.empty_cache()
x = torch.randn((B, Tc, H), device=device)
ret1 = pipeline(bench_cached, layer1)
flops = selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0.25 * Tg)
print(ret1 + [flops])
d[f"Tc={Tc} Tg={Tg} cache_length={0.25 * Tg}"] = ret1 + [flops]
torch.cuda.empty_cache()
x = torch.randn((B, Tc, H), device=device)
ret2 = pipeline(bench_cached, layer2)
flops = selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0.5 * Tg)
print(ret2 + [flops])
d[f"Tc={Tc} Tg={Tg} cache_length={0.5 * Tg}"] = ret2 + [flops]
torch.cuda.empty_cache()
x = torch.randn((B, Tc, H), device=device)
ret3 = pipeline(bench_cached, layer3)
flops = selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0.75 * Tg)
print(ret3 + [flops])
d[f"Tc={Tc} Tg={Tg} cache_length={0.75 * Tg}"] = ret3 + [flops]
torch.cuda.empty_cache()
x = torch.randn((B, Tc, H), device=device)
ret4 = pipeline(bench_cached, layer4)
flops = selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=Tg)
print(ret4 + [flops])
d[f"Tc={Tc} Tg={Tg} cache_length={Tg}"] = ret4 + [flops]
torch.cuda.empty_cache()
print(d)
df = pd.DataFrame(data=d, index=["runtime_mean(ms)", "runtime_std(ms)", "mem_mean(MB)", "mem_std(MB)", "t1_mean(s)", "t1_std(s)", "t2_mean(s)", "t2_std(s)","t3_mean(s)", "t3_std(s)", "flops"])
print(df)
df.to_csv(f"logs/{today}-mem_selfattn_{model_size}_K={K}_test_nograd_AMP_todevice_optimized_t1t2t3_T=32.csv")
print(time.time() - start)
```
|
{
"source": "jerrypeng7773/amazon-sagemaker-examples",
"score": 2
}
|
#### File: docker/code/main.py
```python
from __future__ import absolute_import
import os
import sys
import time
from utils import (
ExitSignalHandler,
load_json_object,
print_files_in_path,
print_json_object,
save_model_artifacts,
write_failure_file,
)
hyperparameters_file_path = "/opt/ml/input/config/hyperparameters.json"
inputdataconfig_file_path = "/opt/ml/input/config/inputdataconfig.json"
resource_file_path = "/opt/ml/input/config/resourceconfig.json"
data_files_path = "/opt/ml/input/data/"
failure_file_path = "/opt/ml/output/failure"
model_artifacts_path = "/opt/ml/model/"
training_job_name_env = "TRAINING_JOB_NAME"
training_job_arn_env = "TRAINING_JOB_ARN"
def train():
try:
print("\nRunning training...")
if os.path.exists(hyperparameters_file_path):
hyperparameters = load_json_object(hyperparameters_file_path)
print("\nHyperparameters configuration:")
print_json_object(hyperparameters)
if os.path.exists(inputdataconfig_file_path):
input_data_config = load_json_object(inputdataconfig_file_path)
print("\nInput data configuration:")
print_json_object(input_data_config)
for key in input_data_config:
print("\nList of files in {0} channel: ".format(key))
channel_path = data_files_path + key + "/"
print_files_in_path(channel_path)
if os.path.exists(resource_file_path):
resource_config = load_json_object(resource_file_path)
print("\nResource configuration:")
print_json_object(resource_config)
if training_job_name_env in os.environ:
print("\nTraining job name: ")
print(os.environ[training_job_name_env])
if training_job_arn_env in os.environ:
print("\nTraining job ARN: ")
print(os.environ[training_job_arn_env])
# This object is used to handle SIGTERM and SIGKILL signals.
signal_handler = ExitSignalHandler()
# Dummy net.
net = None
# Run training loop.
epochs = 5
for x in range(epochs):
print("\nRunning epoch {0}...".format(x))
time.sleep(30)
if signal_handler.exit_now:
print("Received SIGTERM/SIGINT. Saving training state and exiting.")
# Save state here.
save_model_artifacts(model_artifacts_path, net)
sys.exit(0)
print("Completed epoch {0}.".format(x))
# At the end of the training loop, we have to save model artifacts.
save_model_artifacts(model_artifacts_path, net)
print("\nTraining completed!")
except Exception as e:
write_failure_file(failure_file_path, str(e))
print(e, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
if sys.argv[1] == "train":
train()
else:
print("Missing required argument 'train'.", file=sys.stderr)
sys.exit(1)
```
#### File: multi_model_pytorch/code/train.py
```python
import argparse
import gzip
import json
import logging
import os
import shutil
import subprocess
import sys
from distutils.dir_util import copy_tree
from tempfile import TemporaryDirectory
# External Dependencies:
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from model import MNISTNet
from packaging import version as pkgversion
from sagemaker_pytorch_serving_container import handler_service as default_handler_service
from torch.utils.data import DataLoader, Dataset
# Local Dependencies:
from inference import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
def enable_sm_oneclick_deploy(model_dir):
"""Copy current running source code folder to model_dir, to enable Estimator.deploy()
PyTorch framework containers will load custom inference code if:
- The code exists in a top-level code/ folder in the model.tar.gz
- The entry point argument matches an existing file
...So to make one-click estimator.deploy() work (without creating a PyTorchModel first), we need
to:
- Copy the current working directory to model_dir/code
- `from inference import *` because "train.py" will still be the entry point (same as the training job)
"""
code_path = os.path.join(model_dir, "code")
logger.info(f"Copying working folder to {code_path}")
for currpath, dirs, files in os.walk("."):
for file in files:
# Skip any filenames starting with dot:
if file.startswith("."):
continue
filepath = os.path.join(currpath, file)
# Skip any pycache or dot folders:
if ((os.path.sep + ".") in filepath) or ("__pycache__" in filepath):
continue
relpath = filepath[len(".") :]
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
outpath = os.path.join(code_path, relpath)
logger.info(f"Copying {filepath} to {outpath}")
os.makedirs(outpath.rpartition(os.path.sep)[0], exist_ok=True)
shutil.copy2(filepath, outpath)
return code_path
def enable_torchserve_multi_model(model_dir, handler_service_file=default_handler_service.__file__):
"""Package the contents of model_dir as a TorchServe model archive
SageMaker framework serving containers for PyTorch versions >=1.6 use TorchServe, for consistency with
the PyTorch ecosystem. TorchServe expects particular 'model archive' packaging around models.
On single-model endpoints, the SageMaker container can transparently package your model.tar.gz for
TorchServe at start-up. On multi-model endpoints though, as models are loaded and unloaded dynamically,
this is not (currently?) supported.
...So to make your training jobs produce model.tar.gz's which are already compatible with TorchServe
(and therefore SageMaker Multi-Model-Endpoints, on PyTorch >=1.6), you can do something like this.
Check out the PyTorch Inference Toolkit (used by SageMaker PyTorch containers) for more details:
https://github.com/aws/sagemaker-pytorch-inference-toolkit
For running single-model endpoints, or MMEs on PyTorch<1.6, this function is not necessary.
If you use the SageMaker PyTorch framework containers, you won't need to change `handler_service_file`
unless you already know about the topic :-) The default handler will already support `model_fn`, etc.
"""
if pkgversion.parse(torch.__version__) >= pkgversion.parse("1.6"):
logger.info(f"Packaging {model_dir} for use with TorchServe")
# torch-model-archiver creates a subdirectory per `model-name` within `export-path`, but we want the
# contents to end up in `model_dir`'s root - so will use a temp dir and copy back:
with TemporaryDirectory() as temp_dir:
ts_model_name = "model" # Just a placeholder, doesn't really matter for our purposes
subprocess.check_call(
[
"torch-model-archiver",
"--model-name",
ts_model_name,
"--version",
"1",
"--handler",
handler_service_file,
"--extra-files",
model_dir,
"--archive-format",
"no-archive",
"--export-path",
temp_dir,
]
)
copy_tree(os.path.join(temp_dir, ts_model_name), model_dir)
else:
logger.info(f"Skipping TorchServe repackage: PyTorch version {torch.__version__} < 1.6")
def normalize(x, axis):
eps = np.finfo(float).eps
mean = np.mean(x, axis=axis, keepdims=True)
# avoid division by zero
std = np.std(x, axis=axis, keepdims=True) + eps
return (x - mean) / std
def convert_to_tensor(data_dir, images_file, labels_file):
"""Byte string to torch tensor"""
with gzip.open(os.path.join(data_dir, images_file), "rb") as f:
images = (
np.frombuffer(
f.read(),
np.uint8,
offset=16,
)
.reshape(-1, 28, 28)
.astype(np.float32)
)
with gzip.open(os.path.join(data_dir, labels_file), "rb") as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8).astype(np.int64)
# normalize the images
images = normalize(images, axis=(1, 2))
# add channel dimension (depth-major)
images = np.expand_dims(images, axis=1)
# to torch tensor
images = torch.tensor(images, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.int64)
return images, labels
class MNIST(Dataset):
def __init__(self, data_dir, train=True):
"""PyTorch Dataset for example MNIST files
Loads and decodes the expected gzip file names from data_dir
"""
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
self.images, self.labels = convert_to_tensor(data_dir, images_file, labels_file)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.images[idx], self.labels[idx]
def train(args):
use_cuda = args.num_gpus > 0
device = torch.device("cuda" if use_cuda > 0 else "cpu")
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
train_loader = DataLoader(
MNIST(args.train, train=True), batch_size=args.batch_size, shuffle=True
)
test_loader = DataLoader(
MNIST(args.test, train=False), batch_size=args.test_batch_size, shuffle=False
)
net = MNISTNet().to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(
net.parameters(),
betas=(args.beta_1, args.beta_2),
weight_decay=args.weight_decay,
)
logger.info("Start training ...")
for epoch in range(1, args.epochs + 1):
net.train()
for batch_idx, (imgs, labels) in enumerate(train_loader, 1):
imgs, labels = imgs.to(device), labels.to(device)
output = net(imgs)
loss = loss_fn(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}".format(
epoch,
batch_idx * len(imgs),
len(train_loader.sampler),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
# test the model
test(net, test_loader, device)
# save model checkpoint
save_model(net, args.model_dir)
return
def test(model, test_loader, device):
"""Evaluate `model` on the test set and log metrics to console"""
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for imgs, labels in test_loader:
imgs, labels = imgs.to(device), labels.to(device)
output = model(imgs)
test_loss += F.cross_entropy(output, labels, reduction="sum").item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
logger.info(
"Test set: Average loss: {:.4f}, Accuracy: {}/{}, {})\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
return
def save_model(model, model_dir):
path = os.path.join(model_dir, "model.pth")
logger.info(f"Saving model to {path}")
torch.save(model.cpu().state_dict(), path)
enable_sm_oneclick_deploy(model_dir)
enable_torchserve_multi_model(model_dir)
return
def parse_args():
"""Load SageMaker training job (hyper)-parameters from CLI and environment variables"""
parser = argparse.ArgumentParser()
# Training procedure parameters:
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=1,
metavar="N",
help="number of epochs to train (default: 1)",
)
parser.add_argument(
"--learning-rate",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--beta_1",
type=float,
default=0.9,
metavar="BETA1",
help="beta1 (default: 0.9)",
)
parser.add_argument(
"--beta_2",
type=float,
default=0.999,
metavar="BETA2",
help="beta2 (default: 0.999)",
)
parser.add_argument(
"--weight-decay",
type=float,
default=1e-4,
metavar="WD",
help="L2 weight decay (default: 1e-4)",
)
parser.add_argument(
"--seed",
type=int,
default=1,
metavar="S",
help="random seed (default: 1)",
)
parser.add_argument(
"--log-interval",
type=int,
default=100,
metavar="N",
help="how many batches to wait before logging training status",
)
# parser.add_argument("--backend", type=str, default=None,
# help="backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)",
# )
# I/O folders:
parser.add_argument("--train", type=str, default=os.environ["SM_CHANNEL_TRAINING"])
parser.add_argument("--test", type=str, default=os.environ["SM_CHANNEL_TESTING"])
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
# Container environment:
# parser.add_argument("--hosts", type=list, default=json.loads(os.environ["SM_HOSTS"]))
# parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"])
parser.add_argument("--num-gpus", type=int, default=os.environ["SM_NUM_GPUS"])
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
train(args)
```
#### File: container/cifar10/cifar10.py
```python
import argparse
import ast
import logging
import os
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision
import torchvision.models
import torchvision.transforms as transforms
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
classes = ("plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")
# https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py#L118
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def _train(args):
is_distributed = len(args.hosts) > 1 and args.dist_backend is not None
logger.debug("Distributed training - {}".format(is_distributed))
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ["WORLD_SIZE"] = str(world_size)
host_rank = args.hosts.index(args.current_host)
dist.init_process_group(backend=args.dist_backend, rank=host_rank, world_size=world_size)
logger.info(
"Initialized the distributed environment: '{}' backend on {} nodes. ".format(
args.dist_backend, dist.get_world_size()
)
+ "Current host rank is {}. Using cuda: {}. Number of gpus: {}".format(
dist.get_rank(), torch.cuda.is_available(), args.num_gpus
)
)
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Device Type: {}".format(device))
logger.info("Loading Cifar10 dataset")
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
trainset = torchvision.datasets.CIFAR10(
root=args.data_dir, train=True, download=False, transform=transform
)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers
)
testset = torchvision.datasets.CIFAR10(
root=args.data_dir, train=False, download=False, transform=transform
)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers
)
logger.info("Model loaded")
model = Net()
if torch.cuda.device_count() > 1:
logger.info("Gpu count: {}".format(torch.cuda.device_count()))
model = nn.DataParallel(model)
model = model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(0, args.epochs):
running_loss = 0.0
for i, data in enumerate(train_loader):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print("Finished Training")
return _save_model(model, args.model_dir)
def _save_model(model, model_dir):
logger.info("Saving the model.")
path = os.path.join(model_dir, "model.pth")
# recommended way from http://pytorch.org/docs/master/notes/serialization.html
torch.save(model.cpu().state_dict(), path)
def model_fn(model_dir):
logger.info("model_fn")
device = "cuda" if torch.cuda.is_available() else "cpu"
model = Net()
if torch.cuda.device_count() > 1:
logger.info("Gpu count: {}".format(torch.cuda.device_count()))
model = nn.DataParallel(model)
with open(os.path.join(model_dir, "model.pth"), "rb") as f:
model.load_state_dict(torch.load(f))
return model.to(device)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--workers",
type=int,
default=2,
metavar="W",
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--epochs",
type=int,
default=2,
metavar="E",
help="number of total epochs to run (default: 2)",
)
parser.add_argument(
"--batch-size", type=int, default=4, metavar="BS", help="batch size (default: 4)"
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="initial learning rate (default: 0.001)",
)
parser.add_argument(
"--momentum", type=float, default=0.9, metavar="M", help="momentum (default: 0.9)"
)
parser.add_argument(
"--dist-backend", type=str, default="gloo", help="distributed backend (default: gloo)"
)
# The parameters below retrieve their default values from SageMaker environment variables, which are
# instantiated by the SageMaker containers framework.
# https://github.com/aws/sagemaker-containers#how-a-script-is-executed-inside-the-container
parser.add_argument("--hosts", type=str, default=ast.literal_eval(os.environ["SM_HOSTS"]))
parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"])
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--data-dir", type=str, default=os.environ["SM_CHANNEL_TRAINING"])
parser.add_argument("--num-gpus", type=int, default=os.environ["SM_NUM_GPUS"])
_train(parser.parse_args())
```
#### File: creating_marketplace_products/src/algorithm_validation_specification.py
```python
import json
class AlgorithmValidationSpecification:
template = """
{
"ValidationSpecification": {
"ValidationRole": "ROLE_REPLACE_ME",
"ValidationProfiles": [
{
"ProfileName": "ValidationProfile1",
"TrainingJobDefinition": {
"TrainingInputMode": "File",
"HyperParameters": {},
"InputDataConfig": [
{
"ChannelName": "CHANNEL_NAME_REPLACE_ME",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "TRAIN_S3_INPUT_REPLACE_ME",
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "CONTENT_TYPE_REPLACE_ME",
"CompressionType": "None",
"RecordWrapperType": "None"
}
],
"OutputDataConfig": {
"KmsKeyId": "",
"S3OutputPath": "VALIDATION_S3_OUTPUT_REPLACE_ME/training-output"
},
"ResourceConfig": {
"InstanceType": "INSTANCE_TYPE_REPLACE_ME",
"InstanceCount": 1,
"VolumeSizeInGB": 10,
"VolumeKmsKeyId": ""
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 1800
}
},
"TransformJobDefinition": {
"MaxConcurrentTransforms": 1,
"MaxPayloadInMB": 6,
"TransformInput": {
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "BATCH_S3_INPUT_REPLACE_ME"
}
},
"ContentType": "CONTENT_TYPE_REPLACE_ME",
"CompressionType": "None",
"SplitType": "Line"
},
"TransformOutput": {
"S3OutputPath": "VALIDATION_S3_OUTPUT_REPLACE_ME/batch-transform-output",
"Accept": "CONTENT_TYPE_REPLACE_ME",
"AssembleWith": "Line",
"KmsKeyId": ""
},
"TransformResources": {
"InstanceType": "INSTANCE_TYPE_REPLACE_ME",
"InstanceCount": 1
}
}
}
]
}
}
"""
def get_algo_validation_specification_dict(
self,
validation_role,
training_channel_name,
training_input,
batch_transform_input,
content_type,
instance_type,
output_s3_location,
):
return json.loads(
self.get_algo_validation_specification_json(
validation_role,
training_channel_name,
training_input,
batch_transform_input,
content_type,
instance_type,
output_s3_location,
)
)
def get_algo_validation_specification_json(
self,
validation_role,
training_channel_name,
training_input,
batch_transform_input,
content_type,
instance_type,
output_s3_location,
):
return (
self.template.replace("ROLE_REPLACE_ME", validation_role)
.replace("CHANNEL_NAME_REPLACE_ME", training_channel_name)
.replace("TRAIN_S3_INPUT_REPLACE_ME", training_input)
.replace("BATCH_S3_INPUT_REPLACE_ME", batch_transform_input)
.replace("CONTENT_TYPE_REPLACE_ME", content_type)
.replace("INSTANCE_TYPE_REPLACE_ME", instance_type)
.replace("VALIDATION_S3_OUTPUT_REPLACE_ME", output_s3_location)
)
```
#### File: creating_marketplace_products/src/modelpackage_validation_specification.py
```python
import json
class ModelPackageValidationSpecification:
template = """
{
"ValidationSpecification": {
"ValidationRole": "ROLE_REPLACE_ME",
"ValidationProfiles": [
{
"ProfileName": "ValidationProfile1",
"TransformJobDefinition": {
"MaxConcurrentTransforms": 1,
"MaxPayloadInMB": 6,
"TransformInput": {
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "BATCH_S3_INPUT_REPLACE_ME"
}
},
"ContentType": "CONTENT_TYPE_REPLACE_ME",
"CompressionType": "None",
"SplitType": "Line"
},
"TransformOutput": {
"S3OutputPath": "VALIDATION_S3_OUTPUT_REPLACE_ME/batch-transform-output",
"Accept": "CONTENT_TYPE_REPLACE_ME",
"AssembleWith": "Line",
"KmsKeyId": ""
},
"TransformResources": {
"InstanceType": "INSTANCE_TYPE_REPLACE_ME",
"InstanceCount": 1
}
}
}
]
}
}
"""
def get_validation_specification_dict(
self,
validation_role,
batch_transform_input,
content_type,
instance_type,
output_s3_location,
):
return json.loads(
self.get_validation_specification_json(
validation_role,
batch_transform_input,
content_type,
instance_type,
output_s3_location,
)
)
def get_validation_specification_json(
self,
validation_role,
batch_transform_input,
content_type,
instance_type,
output_s3_location,
):
return (
self.template.replace("ROLE_REPLACE_ME", validation_role)
.replace("BATCH_S3_INPUT_REPLACE_ME", batch_transform_input)
.replace("CONTENT_TYPE_REPLACE_ME", content_type)
.replace("INSTANCE_TYPE_REPLACE_ME", instance_type)
.replace("VALIDATION_S3_OUTPUT_REPLACE_ME", output_s3_location)
)
```
#### File: creating_marketplace_products/src/training_specification.py
```python
import json
class TrainingSpecification:
template = """
{
"TrainingSpecification": {
"TrainingImage": "IMAGE_REPLACE_ME",
"SupportedHyperParameters": [
{
"Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes",
"Name": "max_leaf_nodes",
"Type": "Integer",
"Range": {
"IntegerParameterRangeSpecification": {
"MinValue": "1",
"MaxValue": "100000"
}
},
"IsTunable": true,
"IsRequired": false,
"DefaultValue": "100"
}
],
"SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME,
"SupportsDistributedTraining": false,
"MetricDefinitions": METRICS_REPLACE_ME,
"TrainingChannels": CHANNELS_REPLACE_ME,
"SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME
}
}
"""
def get_training_specification_dict(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
return json.loads(
self.get_training_specification_json(
ecr_image,
supports_gpu,
supported_channels,
supported_metrics,
supported_tuning_job_objective_metrics,
)
)
def get_training_specification_json(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
if supported_channels is None:
print("Please provide at least one supported channel")
raise ValueError("Please provide at least one supported channel")
if supported_metrics is None:
supported_metrics = []
if supported_tuning_job_objective_metrics is None:
supported_tuning_job_objective_metrics = []
return (
self.template.replace("IMAGE_REPLACE_ME", ecr_image)
.replace("INSTANCES_REPLACE_ME", self.get_supported_instances(supports_gpu))
.replace(
"CHANNELS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True),
)
.replace(
"METRICS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True),
)
.replace(
"TUNING_OBJECTIVES_REPLACE_ME",
json.dumps(
[ob.__dict__ for ob in supported_tuning_job_objective_metrics],
indent=4,
sort_keys=True,
),
)
)
@staticmethod
def get_supported_instances(supports_gpu):
cpu_list = [
"ml.m4.xlarge",
"ml.m4.2xlarge",
"ml.m4.4xlarge",
"ml.m4.10xlarge",
"ml.m4.16xlarge",
"ml.m5.large",
"ml.m5.xlarge",
"ml.m5.2xlarge",
"ml.m5.4xlarge",
"ml.m5.12xlarge",
"ml.m5.24xlarge",
"ml.c4.xlarge",
"ml.c4.2xlarge",
"ml.c4.4xlarge",
"ml.c4.8xlarge",
"ml.c5.xlarge",
"ml.c5.2xlarge",
"ml.c5.4xlarge",
"ml.c5.9xlarge",
"ml.c5.18xlarge",
]
gpu_list = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.2xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
list_to_return = cpu_list
if supports_gpu:
list_to_return = cpu_list + gpu_list
return json.dumps(list_to_return)
```
#### File: creating_marketplace_products/src/tuning_objectives.py
```python
class TuningObjectives:
def __init__(self, objectiveType, metricName):
self.Type = objectiveType
self.MetricName = metricName
```
#### File: frameworks/pytorch_cnn_cifar10/cifar_utils.py
```python
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
classes = ("plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")
def _get_transform():
return torchvision.transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=False, transform=transform
)
return torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
def test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=False, transform=transform
)
return torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
def show_img(img):
"""displays an image"""
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
```
#### File: docker/code/predictor.py
```python
from __future__ import print_function
import csv
import glob
import json
import os
import shutil
import stat
import sys
from io import StringIO
import flask
import numpy as np
import pandas as pd
from flask import Flask, Response, jsonify, make_response, request
from joblib import dump, load
from sagemaker_containers.beta.framework import (
content_types,
encoders,
env,
modules,
transformer,
worker,
)
from utils import (
load_json_object,
print_files_in_path,
print_json_object,
save_model_artifacts,
write_failure_file,
)
model_artifacts_path = "/opt/ml/model/"
feature_column = "words"
label_column = "label"
preprocessor = None
le = None
# The flask app for serving predictions
app = flask.Flask(__name__)
def load_model():
global preprocessor
global le
if not preprocessor:
preprocessor = load(os.path.join(model_artifacts_path, "model.joblib"))
if not le:
le = load(os.path.join(model_artifacts_path, "label.joblib"))
@app.route("/ping", methods=["GET"])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
load_model()
health = preprocessor is not None and le is not None
status = 200 if health else 404
return flask.Response(response="\n", status=status, mimetype="application/json")
@app.route("/invocations", methods=["POST"])
def transformation():
print("data: ", request.data[:100])
print("cookies: ", request.cookies)
print("headers: ", dict(request.headers))
print("args: ", request.args)
load_model()
content_type = request.headers["Content-Type"]
print("Content type", content_type)
accept = request.headers["Accept"]
print("Accept", accept)
input_data = request.data.decode()
first_entry = input_data.split("\n", 1)[0].split(",", 1)[0]
print("First entry is: ", first_entry)
df = None
if first_entry == "label" or first_entry.startswith("category_"):
recs = [(row[0], set(row[1:])) for row in csv.reader(StringIO(input_data))]
if first_entry == "label":
df = pd.DataFrame.from_records(recs[1:], columns=[label_column, feature_column])
else:
df = pd.DataFrame.from_records(recs, columns=[label_column, feature_column])
# This is a labelled example, includes the ring label
print("Length indicates that label is included")
else:
print("Length indicates that label is not included.")
# This is an unlabelled example.
recs = [(set(row),) for row in csv.reader(StringIO(input_data))]
df = pd.DataFrame.from_records(recs, columns=[feature_column])
print("merged df", df.head())
features = preprocessor.transform(df["words"])
prediction = None
if label_column in df:
print("label_column in input_data")
labels = le.transform(df[label_column])
# Return the label (as the first column) and the set of features.
prediction = np.insert(features.todense(), 0, labels, axis=1)
else:
print("label_column not in input_data")
# Return only the set of features
prediction = features.todense()
if accept == "application/json":
instances = []
for row in prediction.tolist():
instances.append({"features": row})
json_output = {"instances": instances}
return Response(json.dumps(json_output), mimetype=accept)
# TODO: use custom flag to indicate that this is in a pipeline rather than relying on the '*/*'
elif accept == "text/csv" or accept == "*/*":
return Response(encoders.encode(prediction, "text/csv"), mimetype="text/csv")
else:
raise RuntimeError("{} accept type is not supported by this script.".format(accept))
```
#### File: src/ActiveLearning/create_validation_set.py
```python
import logging
from s3_helper import S3Ref, copy_with_query, create_ref_at_parent_key
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""
This method selects 10% of the input manifest as validation and creates an s3 file containing the validation objects.
"""
label_attribute_name = event["LabelAttributeName"]
meta_data = event["meta_data"]
s3_input_uri = meta_data["IntermediateManifestS3Uri"]
input_total = int(meta_data["counts"]["input_total"])
# 10% of the total input should be used for validation.
validation_set_size = input_total // 10
source = S3Ref.from_uri(s3_input_uri)
validation_labeled_query = """select * from s3object[*] s where s."{}-metadata"."human-annotated" IN ('yes') LIMIT {}""".format(
label_attribute_name, validation_set_size
)
dest = create_ref_at_parent_key(source, "validation_input.manifest")
copy_with_query(source, dest, validation_labeled_query)
logger.info(
"Uploaded validation set of size {} to {}.".format(validation_set_size, dest.get_uri())
)
meta_data["counts"]["validation"] = validation_set_size
meta_data["ValidationS3Uri"] = dest.get_uri()
return meta_data
```
#### File: src/ActiveLearning/prepare_for_inference.py
```python
import json
import logging
from io import StringIO
from s3_helper import S3Ref, copy_with_query_and_transform, create_ref_at_parent_key
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def augment_inference_input(inference_raw):
"""
The inference manifest needs to be augmented with a value 'k' so that blazing text
produces all probabilities instead of just the top match.
"""
augmented_inference = StringIO()
for line in inference_raw:
infer_dict = json.loads(line)
# Note: This number should ideally be equal to the number of classes.
# But using a big number, produces the same result.
infer_dict["k"] = 1000000
augmented_inference.write(json.dumps(infer_dict) + "\n")
logger.info("Augmented inference data by adding 'k' to each line.")
return augmented_inference
def create_tranform_config(training_config):
"""
Transform config specifies input parameters for the transform job.
"""
return {
# We reuse the training job name for the model name and corresponding
# transform job name.
"TransformJobName": training_config["TrainingJobName"],
"ModelName": training_config["TrainingJobName"],
"S3OutputPath": training_config["S3OutputPath"],
}
def lambda_handler(event, context):
"""
This function generates auto annotations and performs active learning.
"""
label_attribute_name = event["LabelAttributeName"]
meta_data = event["meta_data"]
s3_input_uri = meta_data["IntermediateManifestS3Uri"]
transform_config = create_tranform_config(meta_data["training_config"])
source = S3Ref.from_uri(s3_input_uri)
dest = S3Ref.from_uri(transform_config["S3OutputPath"] + "unlabeled.manifest")
logger.info("Creating inference output from unlabeled subset of input {}.".format(s3_input_uri))
SQL_UNLABELED = """select * from s3object[*] s where s."{}" is missing """
unlabeled_query = SQL_UNLABELED.format(label_attribute_name)
copy_with_query_and_transform(source, dest, unlabeled_query, augment_inference_input)
meta_data["UnlabeledS3Uri"] = dest.get_uri()
logger.info("Uploaded unlabeled manifest for inference to {}.".format(dest.get_uri()))
meta_data["transform_config"] = transform_config
return meta_data
```
#### File: src/tests/test_add_record_id.py
```python
import boto3
from Bootstrap.add_record_id import lambda_handler
from moto import mock_s3
@mock_s3
def test_add_record_id():
manifest_content = b'{"source":"Fed revises guidelines sending stocks up."}\n{"source": "Review Guardians of the Galaxy"}'
s3r = boto3.resource("s3", region_name="us-east-1")
s3r.create_bucket(Bucket="source_bucket")
s3r.Object("source_bucket", "input.manifest").put(Body=manifest_content)
event = {
"ManifestS3Uri": "s3://source_bucket/input.manifest",
}
output = lambda_handler(event, {})
manifest_content_with_id = b'{"source": "Fed revises guidelines sending stocks up.", "id": 0}\n{"source": "Review Guardians of the Galaxy", "id": 1}\n'
updated_body = s3r.Object("source_bucket", "input.manifest").get()["Body"].read()
assert updated_body == manifest_content_with_id
assert output["ManifestS3Uri"] == "s3://source_bucket/input.manifest"
```
#### File: ground_truth_labeling_jobs/ground_truth_object_detection_tutorial/ground_truth_od.py
```python
import os
import imageio
import matplotlib.pyplot as plt
import numpy as np
class BoundingBox:
"""Bounding box for an object in an image."""
def __init__(self, image_id=None, boxdata=None):
self.image_id = image_id
if boxdata:
for datum in boxdata:
setattr(self, datum, boxdata[datum])
def __repr__(self):
return "Box for image {}".format(self.image_id)
def compute_bb_data(self):
"""Compute the parameters used for IoU."""
image = self.image
self.xmin = self.left / image.width
self.xmax = (self.left + self.width) / image.width
self.ymin = self.top / image.height
self.ymax = (self.top + self.height) / image.height
class WorkerBoundingBox(BoundingBox):
"""Bounding box for an object in an image produced by a worker."""
def __init__(self, image_id=None, worker_id=None, boxdata=None):
self.worker_id = worker_id
super().__init__(image_id=image_id, boxdata=boxdata)
class GroundTruthBox(BoundingBox):
"""Bounding box for an object in an image produced by a worker."""
def __init__(self, image_id=None, oiddata=None, image=None):
self.image = image
self.class_name = oiddata[0]
xmin, xmax, ymin, ymax = [float(datum) for datum in oiddata[1:]]
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
imw = image.width
imh = image.height
boxdata = {
"height": (ymax - ymin) * imh,
"width": (xmax - xmin) * imw,
"left": xmin * imw,
"top": ymin * imh,
}
super().__init__(image_id=image_id, boxdata=boxdata)
class BoxedImage:
"""Image with bounding boxes."""
def __init__(
self,
id=None,
consolidated_boxes=None,
worker_boxes=None,
gt_boxes=None,
uri=None,
size=None,
):
self.id = id
self.uri = uri
if uri:
self.filename = uri.split("/")[-1]
self.oid_id = self.filename.split(".")[0]
else:
self.filename = None
self.oid_id = None
self.local = None
self.im = None
if size:
self.width = size["width"]
self.depth = size["depth"]
self.height = size["height"]
self.shape = self.width, self.height, self.depth
if consolidated_boxes:
self.consolidated_boxes = consolidated_boxes
else:
self.consolidated_boxes = []
if worker_boxes:
self.worker_boxes = worker_boxes
else:
self.worker_boxes = []
if gt_boxes:
self.gt_boxes = gt_boxes
else:
self.gt_boxes = []
def __repr__(self):
return "Image{}".format(self.id)
def n_consolidated_boxes(self):
"""Count the number of consolidated boxes."""
return len(self.consolidated_boxes)
def n_worker_boxes(self):
return len(self.worker_boxes)
def download(self, directory):
target_fname = os.path.join(directory, self.uri.split("/")[-1])
if not os.path.isfile(target_fname):
os.system(f"aws s3 cp {self.uri} {target_fname}")
self.local = target_fname
def imread(self):
"""Cache the image reading process."""
try:
return imageio.imread(self.local)
except OSError:
print(
"You need to download this image first. "
"Use this_image.download(local_directory)."
)
raise
def plot_bbs(self, ax, bbs, img_kwargs, box_kwargs, **kwargs):
"""Master function for plotting images with bounding boxes."""
img = self.imread()
ax.imshow(img, **img_kwargs)
imh, imw, *_ = img.shape
box_kwargs["fill"] = None
if kwargs.get("worker", False):
# Give each worker a color.
worker_colors = {}
worker_count = 0
for bb in bbs:
worker = bb.worker_id
if worker not in worker_colors:
worker_colors[worker] = "C" + str((9 - worker_count) % 10)
worker_count += 1
rec = plt.Rectangle(
(bb.left, bb.top),
bb.width,
bb.height,
edgecolor=worker_colors[worker],
**box_kwargs,
)
ax.add_patch(rec)
else:
for bb in bbs:
rec = plt.Rectangle((bb.left, bb.top), bb.width, bb.height, **box_kwargs)
ax.add_patch(rec)
ax.axis("off")
def plot_consolidated_bbs(self, ax, img_kwargs={}, box_kwargs={"edgecolor": "blue", "lw": 3}):
"""Plot the consolidated boxes."""
self.plot_bbs(ax, self.consolidated_boxes, img_kwargs=img_kwargs, box_kwargs=box_kwargs)
def plot_worker_bbs(self, ax, img_kwargs={}, box_kwargs={"lw": 2}):
"""Plot the individual worker boxes."""
self.plot_bbs(
ax, self.worker_boxes, worker=True, img_kwargs=img_kwargs, box_kwargs=box_kwargs
)
def plot_gt_bbs(self, ax, img_kwargs={}, box_kwargs={"edgecolor": "lime", "lw": 3}):
"""Plot the ground truth (Open Image Dataset) boxes."""
self.plot_bbs(ax, self.gt_boxes, img_kwargs=img_kwargs, box_kwargs=box_kwargs)
def compute_img_confidence(self):
"""Compute the mean bb confidence."""
if len(self.consolidated_boxes) > 0:
return np.mean([box.confidence for box in self.consolidated_boxes])
else:
return 0
def compute_iou_bb(self):
"""Compute the mean intersection over union for a collection of
bounding boxes.
"""
# Precompute data for the consolidated boxes if necessary.
for box in self.consolidated_boxes:
try:
box.xmin
except AttributeError:
box.compute_bb_data()
# Make the numpy arrays.
if self.gt_boxes:
gts = np.vstack([(box.xmin, box.ymin, box.xmax, box.ymax) for box in self.gt_boxes])
else:
gts = []
if self.consolidated_boxes:
preds = np.vstack(
[(box.xmin, box.ymin, box.xmax, box.ymax) for box in self.consolidated_boxes]
)
else:
preds = []
confs = np.array([box.confidence for box in self.consolidated_boxes])
if len(preds) == 0 and len(gts) == 0:
return 1.0
if len(preds) == 0 or len(gts) == 0:
return 0.0
preds = preds[np.argsort(confs.flatten())][::-1]
is_pred_assigned_to_gt = [False] * len(gts)
pred_areas = (preds[:, 2] - preds[:, 0]) * (preds[:, 3] - preds[:, 1])
gt_areas = (gts[:, 2] - gts[:, 0]) * (gts[:, 3] - gts[:, 1])
all_ious = []
for pred_id, pred in enumerate(preds):
best_iou = 0
best_id = -1
for gt_id, gt in enumerate(gts):
if is_pred_assigned_to_gt[gt_id]:
continue
x1 = max(gt[0], pred[0])
y1 = max(gt[1], pred[1])
x2 = min(gt[2], pred[2])
y2 = min(gt[3], pred[3])
iw = max(0, x2 - x1)
ih = max(0, y2 - y1)
inter = iw * ih
iou = inter / (pred_areas[pred_id] + gt_areas[gt_id] - inter)
if iou > best_iou:
best_iou = iou
best_id = gt_id
if best_id != -1:
is_pred_assigned_to_gt[best_id] = True
# True positive! Store the IoU.
all_ious.append(best_iou)
else:
# 0 IoU for each unmatched gt (false-negative).
all_ious.append(0.0)
# 0 IoU for each unmatched prediction (false-positive).
all_ious.extend([0.0] * (len(is_pred_assigned_to_gt) - sum(is_pred_assigned_to_gt)))
return np.mean(all_ious)
def group_miou(imgs):
"""Compute the mIoU for a group of images.
Args:
imgs: list of BoxedImages, with consolidated_boxes and gt_boxes.
Returns:
mIoU calculated over the bounding boxes in the group.
"""
# Create a notional BoxedImage with bounding boxes from imgs.
all_consolidated_boxes = [box for img in imgs for box in img.consolidated_boxes]
all_gt_boxes = [box for img in imgs for box in img.gt_boxes]
notional_image = BoxedImage(consolidated_boxes=all_consolidated_boxes, gt_boxes=all_gt_boxes)
# Compute and return the mIoU.
return notional_image.compute_iou_bb()
```
#### File: lambda_src/shared/label_arn.py
```python
from enum import Enum
class JobModality(str, Enum):
PointCloudObjectDetection = "PointCloudObjectDetection"
PointCloudObjectDetectionAudit = "PointCloudObjectDetectionAudit"
PointCloudObjectTracking = "PointCloudObjectTracking"
PointCloudObjectTrackingAudit = "PointCloudObjectTrackingAudit"
PointCloudSemanticSegmentation = "PointCloudSemanticSegmentation"
PointCloudSemanticSegmentationAudit = "PointCloudSemanticSegmentationAudit"
VideoObjectDetection = "VideoObjectDetection"
VideoObjectDetectionAudit = "VideoObjectDetectionAudit"
VideoObjectTracking = "VideoObjectTracking"
VideoObjectTrackingAudit = "VideoObjectTrackingAudit"
def is_member(job_type):
return job_type in JobModality.__members__
def job_name_to_label_attribute(job_type, name):
"""Converts a job name to a label attribute value"""
if job_type.startswith("Video"):
return f"{name}-ref"
return name
def ui_config(region, job_type):
"""Generates a ui_config for a supported task type."""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_labeling_job
human_task_ui_arns = {
JobModality.PointCloudObjectDetection: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/PointCloudObjectDetection",
JobModality.PointCloudObjectDetectionAudit: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/PointCloudObjectDetection",
JobModality.PointCloudObjectTracking: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/PointCloudObjectTracking",
JobModality.PointCloudObjectTrackingAudit: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/PointCloudObjectTracking",
JobModality.PointCloudSemanticSegmentation: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/PointCloudSemanticSegmentation",
JobModality.PointCloudSemanticSegmentationAudit: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/PointCloudSemanticSegmentation",
JobModality.VideoObjectDetection: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/VideoObjectDetection",
JobModality.VideoObjectDetectionAudit: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/VideoObjectDetection",
JobModality.VideoObjectTracking: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/VideoObjectTracking",
JobModality.VideoObjectTrackingAudit: f"arn:aws:sagemaker:{region}:394669845002:human-task-ui/VideoObjectTracking",
}
human_task_arn = human_task_ui_arns[job_type]
return {
"HumanTaskUiArn": human_task_arn,
}
def pre_human_task_lambda_arn(region, job_type):
"""Generates a pre human task lambda arn for a supported task type."""
pre_human_task_lambdas = {
JobModality.PointCloudObjectDetection: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection",
},
JobModality.PointCloudObjectDetectionAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection",
},
JobModality.PointCloudObjectTracking: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking",
},
JobModality.PointCloudObjectTrackingAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking",
},
JobModality.PointCloudSemanticSegmentation: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation",
},
JobModality.PointCloudSemanticSegmentationAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation",
},
JobModality.VideoObjectDetection: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection",
},
JobModality.VideoObjectDetectionAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection",
},
JobModality.VideoObjectTracking: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking",
},
JobModality.VideoObjectTrackingAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking",
},
}
return pre_human_task_lambdas[job_type][region]
def annotation_consolidation_config(region, job_type):
annotation_consolidation_lambda_arns = {
JobModality.PointCloudObjectDetection: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection",
},
JobModality.PointCloudObjectDetectionAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection",
},
JobModality.PointCloudObjectTracking: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking",
},
JobModality.PointCloudObjectTrackingAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking",
},
JobModality.PointCloudSemanticSegmentation: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation",
},
JobModality.PointCloudSemanticSegmentationAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation",
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation",
},
JobModality.VideoObjectDetection: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection",
},
JobModality.VideoObjectDetectionAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection",
},
JobModality.VideoObjectTracking: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking",
},
JobModality.VideoObjectTrackingAudit: {
"us-east-1": "arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking",
"us-east-2": "arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking",
"us-west-2": "arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking",
"eu-west-1": "arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking",
"ap-northeast-1": "arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking",
"ap-southeast-2": "arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking",
"ap-south-1": "arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking",
"eu-central-1": "arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking",
"ap-northeast-2": "arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking",
"eu-west-2": "arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking",
"ap-southeast-1": "arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking",
"ca-central-1": "arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking",
},
}
return {
"AnnotationConsolidationLambdaArn": annotation_consolidation_lambda_arns[job_type][region]
}
```
#### File: src/lib/sagemaker_bootstrap.py
```python
from __future__ import absolute_import
import json
import logging
import os
logger = logging.getLogger(__name__)
def train():
"""Runs the configured SAGEMAKER_TRAINING_COMMAND with all
the hyperparameters.
"""
os.chdir("/opt/ml/code")
user_args = os.environ.get("SM_USER_ARGS", "")
logger.info("SM_USER_ARGS=%s" % user_args)
logger.info("All eniron vars=%s" % os.environ)
hyperparams = " ".join(json.loads(user_args))
params_blob = os.environ.get("SM_TRAINING_ENV", "")
params = json.loads(params_blob)
hyperparams_dict = params["hyperparameters"]
s3_bucket = hyperparams_dict.get("s3_bucket", "gsaur-test")
s3_prefix = hyperparams_dict.get("s3_prefix", "sagemaker")
base_cmd = os.environ.get("SAGEMAKER_TRAINING_COMMAND", "python train.py")
cmd = "%s %s" % (base_cmd, hyperparams)
logger.info("Launching training command: %s" % cmd)
retval = os.system(cmd)
if retval != 0:
msg = "Train command returned exit code %s" % retval
logger.error(msg)
raise RuntimeError(msg)
```
#### File: markov/memories/deepracer_memory.py
```python
import ast
import math
import pickle
import random
from copy import deepcopy
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from rl_coach.core_types import CsvDataset, Episode, Transition
from rl_coach.filters.filter import InputFilter
from rl_coach.logger import screen
from rl_coach.memories.memory import Memory, MemoryGranularity, MemoryParameters
from rl_coach.utils import ProgressBar, ReaderWriterLock
class DeepRacerMemoryParameters(MemoryParameters):
def __init__(self):
super().__init__()
self.max_size = (MemoryGranularity.Transitions, 1000000)
self.n_step = -1
self.train_to_eval_ratio = 1 # for OPE we'll want a value < 1
@property
def path(self):
return "markov.memories.deepracer_memory:DeepRacerMemory"
class DeepRacerMemory(Memory):
"""
A replay buffer that stores episodes of transitions. The additional structure allows performing various
calculations of total return and other values that depend on the sequential behavior of the transitions
in the episode.
"""
def __init__(
self,
max_size: Tuple[MemoryGranularity, int] = (MemoryGranularity.Transitions, 1000000),
n_step=-1,
train_to_eval_ratio: int = 1,
):
"""
:param max_size: the maximum number of transitions or episodes to hold in the memory
"""
super().__init__(max_size)
self.n_step = n_step
self._buffer = [Episode(n_step=self.n_step)] # list of episodes
self.transitions = []
self._length = 1 # the episodic replay buffer starts with a single empty episode
self._num_transitions = 0
self._num_transitions_in_complete_episodes = 0
self.last_training_set_episode_id = None # used in batch-rl
self.last_training_set_transition_id = None # used in batch-rl
self.train_to_eval_ratio = train_to_eval_ratio # used in batch-rl
self.evaluation_dataset_as_episodes = None
self.evaluation_dataset_as_transitions = None
self.frozen = False
def length(self, lock: bool = False) -> int:
"""
Get the number of episodes in the ER (even if they are not complete)
"""
length = self._length
if self._length is not 0 and self._buffer[-1].is_empty():
length = self._length - 1
return length
def num_complete_episodes(self):
"""Get the number of complete episodes in ER"""
length = self._length - 1
return length
def num_transitions(self):
return self._num_transitions
def num_transitions_in_complete_episodes(self):
return self._num_transitions_in_complete_episodes
def get_last_training_set_episode_id(self):
return self.last_training_set_episode_id
def sample(self, size: int, is_consecutive_transitions=False) -> List[Transition]:
"""
Sample a batch of transitions from the replay buffer. If the requested size is larger than the number
of samples available in the replay buffer then the batch will return empty.
:param size: the size of the batch to sample
:param is_consecutive_transitions: if set True, samples a batch of consecutive transitions.
:return: a batch (list) of selected transitions from the replay buffer
"""
if self.num_complete_episodes() >= 1:
if is_consecutive_transitions:
episode_idx = np.random.randint(0, self.num_complete_episodes())
if self._buffer[episode_idx].length() <= size:
batch = self._buffer[episode_idx].transitions
else:
transition_idx = np.random.randint(size, self._buffer[episode_idx].length())
batch = self._buffer[episode_idx].transitions[
transition_idx - size : transition_idx
]
else:
transitions_idx = np.random.randint(
self.num_transitions_in_complete_episodes(), size=size
)
batch = [self.transitions[i] for i in transitions_idx]
else:
raise ValueError(
"The episodic replay buffer cannot be sampled since there are no complete episodes yet. "
"There is currently 1 episodes with {} transitions".format(self._buffer[0].length())
)
return batch
def get_episode_for_transition(self, transition: Transition) -> (int, Episode):
"""
Get the episode from which that transition came from.
:param transition: The transition to lookup the episode for
:return: (Episode number, the episode) or (-1, None) if could not find a matching episode.
"""
for i, episode in enumerate(self._buffer):
if transition in episode.transitions:
return i, episode
return -1, None
def shuffle_episodes(self):
"""
Shuffle all the complete episodes in the replay buffer, while deleting the last non-complete episode
:return:
"""
self.assert_not_frozen()
# unlike the standard usage of the EpisodicExperienceReplay, where we always leave an empty episode after
# the last full one, so that new transitions will have where to be added, in this case we delibrately remove
# that empty last episode, as we are about to shuffle the memory, and we don't want it to be shuffled in
self.remove_last_episode(lock=False)
random.shuffle(self._buffer)
self.transitions = [t for e in self._buffer for t in e.transitions]
# create a new Episode for the next transitions to be placed into
self._buffer.append(Episode(n_step=self.n_step))
self._length += 1
def get_shuffled_training_data_generator(self, size: int) -> List[Transition]:
"""
Get an generator for iterating through the shuffled replay buffer, for processing the data in epochs.
If the requested size is larger than the number of samples available in the replay buffer then the batch will
return empty. The last returned batch may be smaller than the size requested, to accommodate for all the
transitions in the replay buffer.
:param size: the size of the batch to return
:return: a batch (list) of selected transitions from the replay buffer
"""
shuffled_transition_indices = list(range(self.last_training_set_transition_id))
random.shuffle(shuffled_transition_indices)
# The last batch drawn will usually be < batch_size (=the size variable)
for i in range(math.ceil(len(shuffled_transition_indices) / size)):
sample_data = [
self.transitions[j] for j in shuffled_transition_indices[i * size : (i + 1) * size]
]
yield sample_data
def get_all_complete_episodes_transitions(self) -> List[Transition]:
"""
Get all the transitions from all the complete episodes in the buffer
:return: a list of transitions
"""
return self.transitions[: self.num_transitions_in_complete_episodes()]
def get_all_complete_episodes(self) -> List[Episode]:
"""
Get all the transitions from all the complete episodes in the buffer
:return: a list of transitions
"""
return self.get_all_complete_episodes_from_to(0, self.num_complete_episodes())
def get_all_complete_episodes_from_to(self, start_episode_id, end_episode_id) -> List[Episode]:
"""
Get all the transitions from all the complete episodes in the buffer matching the given episode range
:return: a list of transitions
"""
return self._buffer[start_episode_id:end_episode_id]
def _enforce_max_length(self) -> None:
"""
Make sure that the size of the replay buffer does not pass the maximum size allowed.
If it passes the max size, the oldest episode in the replay buffer will be removed.
:return: None
"""
granularity, size = self.max_size
if granularity == MemoryGranularity.Transitions:
while size != 0 and self.num_transitions() > size:
self.remove_first_episode(lock=False)
elif granularity == MemoryGranularity.Episodes:
while self.length() > size:
self.remove_first_episode(lock=False)
def _update_episode(self, episode: Episode) -> None:
episode.update_transitions_rewards_and_bootstrap_data()
def verify_last_episode_is_closed(self) -> None:
"""
Verify that there is no open episodes in the replay buffer
:return: None
"""
last_episode = self.get(-1, False)
if last_episode and last_episode.length() > 0:
self.close_last_episode(lock=False)
def close_last_episode(self, lock=True) -> None:
"""
Close the last episode in the replay buffer and open a new one
:return: None
"""
last_episode = self._buffer[-1]
self._num_transitions_in_complete_episodes += last_episode.length()
self._length += 1
# create a new Episode for the next transitions to be placed into
self._buffer.append(Episode(n_step=self.n_step))
# if update episode adds to the buffer, a new Episode needs to be ready first
# it would be better if this were less state full
self._update_episode(last_episode)
self._enforce_max_length()
def store(self, transition: Transition) -> None:
"""
Store a new transition in the memory. If the transition game_over flag is on, this closes the episode and
creates a new empty episode.
Warning! using the episodic memory by storing individual transitions instead of episodes will use the default
Episode class parameters in order to create new episodes.
:param transition: a transition to store
:return: None
"""
self.assert_not_frozen()
# Calling super.store() so that in case a memory backend is used, the memory backend can store this transition.
super().store(transition)
if len(self._buffer) == 0:
self._buffer.append(Episode(n_step=self.n_step))
last_episode = self._buffer[-1]
last_episode.insert(transition)
self.transitions.append(transition)
self._num_transitions += 1
if transition.game_over:
self.close_last_episode(False)
self._enforce_max_length()
def store_episode(self, episode: Episode, lock: bool = False) -> None:
"""
Store a new episode in the memory.
:param episode: the new episode to store
:return: None
"""
self.assert_not_frozen()
# Calling super.store() so that in case a memory backend is used, the memory backend can store this episode.
super().store_episode(episode)
if self._buffer[-1].length() == 0:
self._buffer[-1] = episode
else:
self._buffer.append(episode)
self.transitions.extend(episode.transitions)
self._num_transitions += episode.length()
self.close_last_episode(False)
def _remove_episode(self, episode_index: int) -> None:
"""
Remove either the first or the last index
:param episode_index: the index of the episode to remove (either 0 or -1)
:return: None
"""
self.assert_not_frozen()
assert episode_index == 0 or episode_index == -1, (
"_remove_episode only supports removing the first or the last " "episode"
)
if len(self._buffer) > 0:
episode_length = self._buffer[episode_index].length()
self._length -= 1
self._num_transitions -= episode_length
self._num_transitions_in_complete_episodes -= episode_length
if episode_index == 0:
del self.transitions[:episode_length]
else: # episode_index = -1
del self.transitions[-episode_length:]
del self._buffer[episode_index]
def remove_first_episode(self, lock: bool = True) -> None:
"""
Remove the first episode (even if it is not complete yet)
:param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
locks and then calls store with lock = True
:return: None
"""
self._remove_episode(0)
def remove_last_episode(self, lock: bool = True) -> None:
"""
Remove the last episode (even if it is not complete yet)
:param lock: if true, will lock the readers writers lock. this can cause a deadlock if an inheriting class
locks and then calls store with lock = True
:return: None
"""
self._remove_episode(-1)
# for API compatibility
def get(self, episode_index: int, lock: bool = True) -> Union[None, Episode]:
"""
Returns the episode in the given index. If the episode does not exist, returns None instead.
:param episode_index: the index of the episode to return
:return: the corresponding episode
"""
if self.length() == 0 or episode_index >= self.length():
episode = None
else:
episode = self._buffer[episode_index]
return episode
def get_last_complete_episode(self) -> Union[None, Episode]:
"""
Returns the last complete episode in the memory or None if there are no complete episodes
:return: None or the last complete episode
"""
last_complete_episode_index = self.num_complete_episodes() - 1
episode = None
if last_complete_episode_index >= 0:
episode = self.get(last_complete_episode_index)
return episode
def clean(self) -> None:
"""
Clean the memory by removing all the episodes
:return: None
"""
self.assert_not_frozen()
self.transitions = []
self._buffer = [Episode(n_step=self.n_step)]
self._length = 1
self._num_transitions = 0
self._num_transitions_in_complete_episodes = 0
def mean_reward(self) -> np.ndarray:
"""
Get the mean reward in the replay buffer
:return: the mean reward
"""
mean = np.mean([transition.reward for transition in self.transitions])
return mean
def load_csv(self, csv_dataset: CsvDataset, input_filter: InputFilter) -> None:
"""
Restore the replay buffer contents from a csv file.
The csv file is assumed to include a list of transitions.
:param csv_dataset: A construct which holds the dataset parameters
:param input_filter: A filter used to filter the CSV data before feeding it to the memory.
"""
self.assert_not_frozen()
df = pd.read_csv(csv_dataset.filepath)
if len(df) > self.max_size[1]:
screen.warning(
"Warning! The number of transitions to load into the replay buffer ({}) is "
"bigger than the max size of the replay buffer ({}). The excessive transitions will "
"not be stored.".format(len(df), self.max_size[1])
)
episode_ids = df["episode_id"].unique()
progress_bar = ProgressBar(len(episode_ids))
state_columns = [col for col in df.columns if col.startswith("state_feature")]
for e_id in episode_ids:
progress_bar.update(e_id)
df_episode_transitions = df[df["episode_id"] == e_id]
input_filter.reset()
if len(df_episode_transitions) < 2:
# we have to have at least 2 rows in each episode for creating a transition
continue
episode = Episode()
transitions = []
for (_, current_transition), (_, next_transition) in zip(
df_episode_transitions[:-1].iterrows(), df_episode_transitions[1:].iterrows()
):
state = np.array([current_transition[col] for col in state_columns])
next_state = np.array([next_transition[col] for col in state_columns])
transitions.append(
Transition(
state={"observation": state},
action=int(current_transition["action"]),
reward=current_transition["reward"],
next_state={"observation": next_state},
game_over=False,
info={
"all_action_probabilities": ast.literal_eval(
current_transition["all_action_probabilities"]
)
},
),
)
transitions = input_filter.filter(transitions, deep_copy=False)
for t in transitions:
episode.insert(t)
# Set the last transition to end the episode
if csv_dataset.is_episodic:
episode.get_last_transition().game_over = True
self.store_episode(episode)
# close the progress bar
progress_bar.update(len(episode_ids))
progress_bar.close()
def freeze(self):
"""
Freezing the replay buffer does not allow any new transitions to be added to the memory.
Useful when working with a dataset (e.g. batch-rl or imitation learning).
:return: None
"""
self.frozen = True
def assert_not_frozen(self):
"""
Check that the memory is not frozen, and can be changed.
:return:
"""
assert self.frozen is False, "Memory is frozen, and cannot be changed."
def prepare_evaluation_dataset(self):
"""
Gather the memory content that will be used for off-policy evaluation in episodes and transitions format
:return:
"""
self._split_training_and_evaluation_datasets()
self.evaluation_dataset_as_episodes = deepcopy(
self.get_all_complete_episodes_from_to(
self.get_last_training_set_episode_id() + 1, self.num_complete_episodes()
)
)
if len(self.evaluation_dataset_as_episodes) == 0:
raise ValueError(
"train_to_eval_ratio is too high causing the evaluation set to be empty. "
"Consider decreasing its value."
)
self.evaluation_dataset_as_transitions = [
t for e in self.evaluation_dataset_as_episodes for t in e.transitions
]
def _split_training_and_evaluation_datasets(self):
"""
If the data in the buffer was not split to training and evaluation yet, split it accordingly.
:return: None
"""
if self.last_training_set_transition_id is None:
if self.train_to_eval_ratio < 0 or self.train_to_eval_ratio >= 1:
raise ValueError("train_to_eval_ratio should be in the (0, 1] range.")
transition = self.transitions[
round(self.train_to_eval_ratio * self.num_transitions_in_complete_episodes())
]
episode_num, episode = self.get_episode_for_transition(transition)
self.last_training_set_episode_id = episode_num
self.last_training_set_transition_id = len(
[
t
for e in self.get_all_complete_episodes_from_to(
0, self.last_training_set_episode_id + 1
)
for t in e
]
)
def save(self, file_path: str) -> None:
"""
Save the replay buffer contents to a pickle file
:param file_path: the path to the file that will be used to store the pickled transitions
"""
with open(file_path, "wb") as file:
pickle.dump(self.get_all_complete_episodes(), file)
def load_pickled(self, file_path: str) -> None:
"""
Restore the replay buffer contents from a pickle file.
The pickle file is assumed to include a list of transitions.
:param file_path: The path to a pickle file to restore
"""
self.assert_not_frozen()
with open(file_path, "rb") as file:
episodes = pickle.load(file)
num_transitions = sum([len(e.transitions) for e in episodes])
if num_transitions > self.max_size[1]:
screen.warning(
"Warning! The number of transition to load into the replay buffer ({}) is "
"bigger than the max size of the replay buffer ({}). The excessive transitions will "
"not be stored.".format(num_transitions, self.max_size[1])
)
progress_bar = ProgressBar(len(episodes))
for episode_idx, episode in enumerate(episodes):
self.store_episode(episode)
# print progress
progress_bar.update(episode_idx)
progress_bar.close()
```
#### File: markov/multi_agent_coach/multi_agent_environment.py
```python
import time
from typing import List, Union
import numpy as np
from rl_coach.base_parameters import Parameters, VisualizationParameters
from rl_coach.core_types import ActionType, EnvResponse, GoalType, RunPhase
from rl_coach.environments.environment import LevelSelection
from rl_coach.environments.environment_interface import EnvironmentInterface
from rl_coach.spaces import ActionSpace, ObservationSpace, RewardSpace, StateSpace
from rl_coach.utils import force_list
class MultiAgentEnvironmentParameters(Parameters):
def __init__(self, level=None):
super().__init__()
self.level = level
self.frame_skip = 4
self.seed = None
self.custom_reward_threshold = None
self.default_input_filter = None
self.default_output_filter = None
self.experiment_path = None
# Set target reward and target_success if present
self.target_success_rate = 1.0
@property
def path(self):
return "markov.multi_agent_coach.multi_agent_environment:MultiAgentEnvironment"
class MultiAgentEnvironment(EnvironmentInterface):
def __init__(
self,
level: LevelSelection,
seed: int,
frame_skip: int,
custom_reward_threshold: Union[int, float],
visualization_parameters: VisualizationParameters,
target_success_rate: float = 1.0,
num_agents: int = 1,
**kwargs
):
"""
:param level: The environment level. Each environment can have multiple levels
:param seed: a seed for the random number generator of the environment
:param frame_skip: number of frames to skip (while repeating the same action) between each two agent directives
:param visualization_parameters: a blob of parameters used for visualization of the environment
:param **kwargs: as the class is instantiated by MultiAgentEnvironmentParameters, this is used to support having
additional arguments which will be ignored by this class, but might be used by others
"""
super().__init__()
# env initialization
self.num_agents = num_agents
self.state = [{}] * num_agents
self.reward = [0.0] * num_agents
self.done = [False] * num_agents
self.goal = None
self.info = {}
self._last_env_response = [None] * num_agents
self.last_action = [0] * num_agents
self.episode_idx = 0
self.total_steps_counter = 0
self.current_episode_steps_counter = 0
self.last_episode_time = time.time()
# rewards
self.total_reward_in_current_episode = [0.0] * num_agents
self.max_reward_achieved = [-np.inf] * num_agents
self.reward_success_threshold = custom_reward_threshold
# spaces
self.state_space = self._state_space = [None] * num_agents
self.goal_space = self._goal_space = None
self.action_space = self._action_space = [None] * num_agents
self.reward_space = RewardSpace(
1, reward_success_threshold=self.reward_success_threshold
) # TODO: add a getter and setter
self.env_id = str(level)
self.seed = seed
self.frame_skip = frame_skip
# visualization
self.visualization_parameters = visualization_parameters
# Set target reward and target_success if present
self.target_success_rate = target_success_rate
@property
def action_space(self) -> Union[List[ActionSpace], ActionSpace]:
"""
Get the action space of the environment
:return: the action space
"""
return self._action_space
@action_space.setter
def action_space(self, val: Union[List[ActionSpace], ActionSpace]):
"""
Set the action space of the environment
:return: None
"""
self._action_space = val
@property
def state_space(self) -> Union[List[StateSpace], StateSpace]:
"""
Get the state space of the environment
:return: the observation space
"""
return self._state_space
@state_space.setter
def state_space(self, val: Union[List[StateSpace], StateSpace]):
"""
Set the state space of the environment
:return: None
"""
self._state_space = val
@property
def goal_space(self) -> Union[List[ObservationSpace], ObservationSpace]:
"""
Get the state space of the environment
:return: the observation space
"""
return self._goal_space
@goal_space.setter
def goal_space(self, val: Union[List[ObservationSpace], ObservationSpace]):
"""
Set the goal space of the environment
:return: None
"""
self._goal_space = val
@property
def last_env_response(self) -> Union[List[EnvResponse], EnvResponse]:
"""
Get the last environment response
:return: a dictionary that contains the state, reward, etc.
"""
return self._last_env_response
@last_env_response.setter
def last_env_response(self, val: Union[List[EnvResponse], EnvResponse]):
"""
Set the last environment response
:param val: the last environment response
"""
self._last_env_response = force_list(val)
def step(self, action: Union[List[ActionType], ActionType]) -> List[EnvResponse]:
"""
Make a single step in the environment using the given action
:param action: an action to use for stepping the environment. Should follow the definition of the action space.
:return: the environment response as returned in get_last_env_response
"""
clipped_and_scaled_action = list()
for agent_action, action_space in zip(force_list(action), force_list(self.action_space)):
agent_action = action_space.clip_action_to_space(agent_action)
if action_space and not action_space.contains(agent_action):
raise ValueError(
"The given action does not match the action space definition. "
"Action = {}, action space definition = {}".format(agent_action, action_space)
)
if hasattr(action_space, "scale_action_space") and action_space.scale_action_space:
agent_action = action_space.scale_action_values(agent_action)
clipped_and_scaled_action.append(agent_action)
action = clipped_and_scaled_action
# store the last agent action done and allow passing None actions to repeat the previously done action
if action is None:
action = self.last_action
self.last_action = action
self.current_episode_steps_counter += 1
if self.phase != RunPhase.UNDEFINED:
self.total_steps_counter += 1
# act
self._take_action(action)
# observe
self._update_state()
self.total_reward_in_current_episode = [
total_reward_in_current_episode + reward
for total_reward_in_current_episode, reward in zip(
self.total_reward_in_current_episode, self.reward
)
]
self.last_env_response = [
EnvResponse(
next_state=state, reward=reward, game_over=done, goal=self.goal, info=self.info
)
for state, reward, done in zip(self.state, self.reward, self.done)
]
return self.last_env_response
def handle_episode_ended(self) -> None:
"""
End an episode
:return: None
"""
pass
def reset_internal_state(self, force_environment_reset=False) -> EnvResponse:
"""
Reset the environment and all the variable of the wrapper
:param force_environment_reset: forces environment reset even when the game did not end
:return: A dictionary containing the observation, reward, done flag, action and measurements
"""
self._restart_environment_episode(force_environment_reset)
self.last_episode_time = time.time()
if self.current_episode_steps_counter > 0 and self.phase != RunPhase.UNDEFINED:
self.episode_idx += 1
self.done = [False] * self.num_agents
self.total_reward_in_current_episode = self.reward = [0.0] * self.num_agents
self.last_action = [0] * self.num_agents
self.current_episode_steps_counter = 0
self.last_env_response = [
EnvResponse(
next_state=state, reward=reward, game_over=done, goal=self.goal, info=self.info
)
for state, reward, done in zip(self.state, self.reward, self.done)
]
return self.last_env_response
def get_random_action(self) -> Union[List[ActionType], ActionType]:
"""
Returns an action picked uniformly from the available actions
:return: a numpy array with a random action
"""
return (
self.action_space.sample()
if isinstance(self.action_space, ActionType)
else [action_space.sample() for action_space in self.action_space]
)
def get_goal(self) -> GoalType:
"""
Get the current goal that the agents needs to achieve in the environment
:return: The goal
"""
return self.goal
def set_goal(self, goal: GoalType) -> None:
"""
Set the current goal that the agent needs to achieve in the environment
:param goal: the goal that needs to be achieved
:return: None
"""
self.goal = goal
# The following functions define the interaction with the environment.
# Any new environment that inherits the MultiAgentEnvironment class should use these signatures.
# Some of these functions are optional - please read their description for more details.
def _take_action(self, action_idx: ActionType) -> None:
"""
An environment dependent function that sends an action to the simulator.
:param action_idx: the action to perform on the environment
:return: None
"""
raise NotImplementedError("")
def _update_state(self) -> None:
"""
Updates the state from the environment.
Should update self.state, self.reward, self.done, and self.info
:return: None
"""
raise NotImplementedError("")
def _restart_environment_episode(self, force_environment_reset=False) -> None:
"""
Restarts the simulator episode
:param force_environment_reset: Force the environment to reset even if the episode is not done yet.
:return: None
"""
raise NotImplementedError("")
def _notify_phase(self, phase: RunPhase):
"""
This is a hook that notifies the enviroment that the phase has changed
phase - The value of the pahse after it has changed
"""
raise NotImplementedError("")
def get_target_success_rate(self) -> float:
return self.target_success_rate
def close(self) -> None:
"""
Clean up steps.
:return: None
"""
pass
```
#### File: markov/sensors/composite_sensor.py
```python
from markov.sensors.sensor_interface import LidarInterface, SensorInterface
from rl_coach.spaces import StateSpace
class CompositeSensor(SensorInterface):
"""This class represents a composite sensor so that from the point of view of each agent there
is only one sensor interface
"""
def __init__(self):
self.sensors = list()
def add_sensor(self, sensor):
"""Adds a sensor to the sensor list
sensor - Sensor object to add to the sensor list
"""
self.sensors.append(sensor)
def get_observation_space(self):
observation_space = StateSpace({})
for sensor in self.sensors:
observation_space.sub_spaces.update(sensor.get_observation_space().sub_spaces)
return observation_space
def get_state(self, block=True):
state = dict()
# For blocking requests, run a blocking call on each sensor
if block:
for sensor in self.sensors:
# Lidar sensor update rate is 10 hz while camera sensor update rate is 15 hz.
# Due to slower Lidar sensor update rate, if the Lidar sensor is used,
# Lidar sensor data retrieval becomes bottleneck and makes the inference period to 10 hz.
# The latest Lidar sensor type is sector-lidar, due to limited number of sectors and binary type
# for sector-lidar state data, it is unlikely, that sector Lidar data change every steps.
# Thus, it is bit unnecessary to wait for Lidar data and slow everything down.
# We ignore blocking request to Lidar sensor update and follow-up non-blocking call below
# will use the latest Lidar data whether it was used previously or not.
if isinstance(sensor, LidarInterface):
continue
state.update(sensor.get_state(block=block))
# For all requests, follow-up with a non-blocking call
# This will ensure we have the latest for all sensors in the event that one of the
# earlier sensors in the list published new data while waiting on a blocking call above
for sensor in self.sensors:
state.update(sensor.get_state(block=False))
return state
def get_raw_state(self):
raw_data = dict()
for sensor in self.sensors:
if hasattr(sensor, "raw_data"):
raw_data[sensor.sensor_type] = sensor.raw_data
return raw_data
def reset(self):
[sensor.reset() for sensor in self.sensors]
def get_input_embedders(self, network_type):
input_embedders = dict()
for sensor in self.sensors:
input_embedders = dict(input_embedders, **sensor.get_input_embedders(network_type))
return input_embedders
```
#### File: track_geom/spline/lane_change_spline.py
```python
import bisect
import random
import numpy as np
from markov.track_geom.constants import DIST_THRESHOLD, SPLINE_DEGREE
from markov.track_geom.spline.abstract_spline import AbstractSpline
from markov.track_geom.track_data import TrackLine
from scipy.interpolate import splprep
from shapely.geometry import Point
from shapely.geometry.polygon import LineString
class LaneChangeSpline(AbstractSpline):
def __init__(
self, start_lane, end_lane, current_dist, lane_change_start_dist, lane_change_end_dist
):
self._start_lane = start_lane
self._end_lane = end_lane
self._current_dist = current_dist
self._lane_change_start_dist = lane_change_start_dist
self._lane_change_end_dist = lane_change_end_dist
super(LaneChangeSpline, self).__init__()
def _build_spline(self):
"""Build spline for lane change
Returns:
tuple: lane change lane, lane point distance,
prepared lane change spline.
"""
# cetner line
center_line = self._track_data.center_line
# start lane
start_lane_line = self._start_lane.lane["track_line"]
start_lane_dists = self._start_lane.lane["dists"]
# end lane
end_lane_line = self._end_lane.lane["track_line"]
end_lane_dists = self._end_lane.lane["dists"]
start_lane_point = Point(
np.array(self._start_lane.eval_spline(self._lane_change_start_dist))[:, 0]
)
end_lane_point = Point(
np.array(self._end_lane.eval_spline(self._lane_change_end_dist))[:, 0]
)
end_offset = (
0.0
if (self._lane_change_start_dist < self._lane_change_end_dist)
else center_line.length
)
# Find prev/next points on each lane
current_prev_index = bisect.bisect_left(start_lane_dists, self._current_dist) - 1
start_prev_index = bisect.bisect_left(start_lane_dists, self._lane_change_start_dist) - 1
end_next_index = bisect.bisect_right(end_lane_dists, self._lane_change_end_dist)
# Define intervals on start/end lanes to build the spline from
num_start_coords = len(start_lane_line.coords)
num_end_coords = len(end_lane_line.coords)
if self._track_data.is_loop:
num_start_coords -= 1
num_end_coords -= 1
start_index_0 = (current_prev_index - 3) % num_start_coords
start_index_1 = start_prev_index
end_index_0 = end_next_index
end_index_1 = (end_next_index + 3) % num_end_coords
# Grab waypoint indices for these intervals (some corner cases here...)
if start_index_0 < start_index_1:
start_indices = list(range(start_index_0, start_index_1 + 1))
start_offsets = [0.0] * len(start_indices)
else:
start_indices_0 = list(range(start_index_0, num_start_coords))
start_indices_1 = list(range(start_index_1 + 1))
start_indices = start_indices_0 + start_indices_1
start_offsets = [-center_line.length] * len(start_indices_0) + [0.0] * len(
start_indices_1
)
if end_index_0 < end_index_1:
end_indices = list(range(end_index_0, end_index_1 + 1))
end_offsets = [end_offset] * len(end_indices)
else:
end_indices_0 = list(range(end_index_0, num_end_coords))
end_indices_1 = list(range(end_index_1 + 1))
end_indices = end_indices_0 + end_indices_1
end_offsets = [end_offset] * len(end_indices_0) + [
end_offset + center_line.length
] * len(end_indices_1)
# Logic to avoid start and end point are too close to track waypoints
before_start_lane_point = Point(np.array(start_lane_line.coords.xy)[:, start_indices[-1]])
after_end_lane_point = Point(np.array(end_lane_line.coords.xy)[:, end_indices[0]])
if before_start_lane_point.distance(start_lane_point) < DIST_THRESHOLD:
# pop last index of start_indices
start_indices.pop()
start_offsets.pop()
if after_end_lane_point.distance(end_lane_point) < DIST_THRESHOLD:
# pop first index of end_indices
end_indices.pop(0)
end_offsets.pop(0)
# Build the spline
u = np.hstack(
(
np.array(start_lane_dists)[start_indices] + np.array(start_offsets),
self._lane_change_start_dist,
self._lane_change_end_dist + end_offset,
np.array(end_lane_dists)[end_indices] + np.array(end_offsets),
)
)
x = np.hstack(
(
np.array(start_lane_line.coords.xy)[:, start_indices],
start_lane_point.xy,
end_lane_point.xy,
np.array(end_lane_line.coords.xy)[:, end_indices],
)
)
u, ui = np.unique(u, return_index=True)
x = x[:, ui]
bot_car_spline, _ = splprep(x, u=u, k=SPLINE_DEGREE, s=0)
return TrackLine(LineString(np.array(np.transpose(x)))), u, bot_car_spline
```
#### File: markov/track_geom/utils.py
```python
import bisect
import math
import numpy as np
from markov.track_geom.constants import HIDE_POS_DELTA, HIDE_POS_OFFSET, START_POS_OFFSET
# The order of rotation applied: roll -> pitch -> yaw
def euler_to_quaternion(roll=0, pitch=0, yaw=0):
# Abbreviations for the various angular functions
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
# Quaternion
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
w = cy * cp * cr + sy * sp * sr
return x, y, z, w
def quaternion_to_euler(x, y, z, w):
"""convert quaternion x, y, z, w to euler angle roll, pitch, yaw
Args:
x: quaternion x
y: quaternion y
z: quaternion z
w: quaternion w
Returns:
Tuple: (roll, pitch, yaw) tuple
"""
# roll (x-axis rotation)
sinr_cosp = 2.0 * (w * x + y * z)
cosr_cosp = 1.0 - 2.0 * (x * x + y * y)
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2.0 * (w * y - z * x)
if abs(sinp) >= 1.0:
pitch = math.copysign(math.pi / 2.0, sinp) # use 90 degrees if out of range
else:
pitch = math.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2.0 * (w * z + x * y)
cosy_cosp = 1.0 - 2.0 * (y * y + z * z)
yaw = math.atan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
def inverse_quaternion(q, threshold=0.000001):
q = np.array(q)
n = np.dot(q, q)
if n < threshold:
raise Exception("Cannot calculate inverse with quaternion length is 0")
inv_q = np.zeros(4)
inv_q[0] = -q[0] / n
inv_q[1] = -q[1] / n
inv_q[2] = -q[2] / n
inv_q[3] = q[3] / n
return inv_q
def apply_orientation(q, v):
"""This function is used to rotate a vector in the oriention of the given quternion.
This function assumes that v is a homogeneous quternion. That is the real part is zero.
The complete explanation can be found in the link
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
https://en.wikipedia.org/wiki/Quaternion#Hamilton_product
On an highlevel. We want the vector v in the direction of the quternion q. We know that
q * q_conj = 1
p = q * v * q_conj, where p is pure quternion, same length as v in the direction of q.
The simplified formula in the executed code is derived from the below equations
quaternion_mult(q,r)
b1, c1, d1, a1 = q # Here a1 and a2 are real numbers, b1, c1, d1 are imaginary i,j,k
b2, c2, d2, a2 = r
return [
a1*b2 + b1*a2 + c1*d2 - d1*c2,
a1*c2 - b1*d2 + c1*a2 + d1*b2,
a1*d2 + b1*c2 - c1*b2 + d1*a2,
a1*a2 - b1*b2 - c1*c2 - d1*d2
]
apply_orientation(q, v):
r = np.insert(v, 3, 0)
q_conj = [-1*q[0],-1*q[1],-1*q[2], q[3]]
return quaternion_mult(quaternion_mult(q,r), q_conj)[:3]
If the vector is not pure quternion. Then in the below simplified solution the real value returned will be
a2*( a1_sq + b1_sq + c1_sq + d1_sq)
Arguments:
q (numpy.ndarray): A quternion numpy array of shape (4,)
v (numpy.ndarray): A vector on which orientation has to be applied. A numpy array of shape (3,)
"""
b1, c1, d1, a1 = q
b2, c2, d2 = v[0], v[1], v[2]
a1_sq = a1 ** 2
b1_sq = b1 ** 2
c1_sq = c1 ** 2
d1_sq = d1 ** 2
return np.array(
[
b2 * (-c1_sq - d1_sq + b1_sq + a1_sq)
+ 2 * (-(a1 * c2 * d1) + (b1 * c1 * c2) + (b1 * d1 * d2) + (a1 * c1 * d2)),
c2 * (c1_sq - d1_sq + a1_sq - b1_sq)
+ 2 * ((a1 * b2 * d1) + (b1 * b2 * c1) + (c1 * d1 * d2) - (a1 * b1 * d2)),
d2 * (-c1_sq + d1_sq + a1_sq - b1_sq)
+ 2 * ((a1 * b1 * c2) + (b1 * b2 * d1) - (a1 * b2 * c1) + (c1 * c2 * d1)),
]
)
def find_prev_next(a, x):
next_index = bisect.bisect_right(a, x)
prev_index = next_index - 1
if prev_index == -1:
prev_index = len(a) - 1
if next_index == len(a):
next_index = 0
return prev_index, next_index
def pose_distance(pose_a, pose_b):
p_a = pose_a.position
p_b = pose_b.position
return math.sqrt((p_b.x - p_a.x) ** 2 + (p_b.y - p_a.y) ** 2 + (p_b.z - p_a.z) ** 2)
def get_start_positions(race_car_num):
return [-START_POS_OFFSET * idx for idx in range(race_car_num)]
def get_hide_positions(race_car_num):
"""Generate hide positions for cars what will be outside of the race track environment.
So that idle cars are not visible to customers.
Args:
race_car_num (int): The number of race cars in current environment.
Returns:
list: List of hiding positions.
"""
# TODO: Maybe implement some logic to make sure the park postion is always outside of the race track
return [
(-(HIDE_POS_OFFSET + HIDE_POS_DELTA * idx), HIDE_POS_OFFSET) for idx in range(race_car_num)
]
```
#### File: eplus/envs/pyEpError.py
```python
class pyEpError(Exception):
"""Base class for pyEp Errors"""
def __init__(self, message):
super(pyEpError, self).__init__(message)
class VersionError(pyEpError):
"""Error Thrown when E+ Communications protocol is not 2."""
def __init__(self, message=None):
if message is None:
message = "Incorrect Version of EnergyPlus communications protocol. Make sure your version of EnergyPlus supports version 2"
super(VersionError, self).__init__(str(message))
self.version = message
class EpWriteError(pyEpError):
"""Error thrown when appempting to write to a closed E+ instance"""
def __init__(self, message=None):
if message is None:
message = "Error attempting to write to closed socket by EnergyPlus. Perhaps the simulation already finished?"
super(EpWriteError, self).__init__(message)
self.message = message
class EpReadError(pyEpError):
"""Error thrown when appempting to read from a closed E+ instance"""
def __init__(self, message=None):
if message is None:
message = "Error attempting to read from closed EnergyPlus socket. Perhaps the simulation is already finished?"
super(EpReadError, self).__init__(message)
self.message = message
class MissingEpPathError(pyEpError):
"""Error thrown when the path to EnergyPlus is not specified."""
def __init__(self, message=None):
if message is None:
message = "EnergyPlus path not specified. Set the default path with set_eplus_dir()"
super(MissingEpPathError, self).__init__(message)
self.message = message
```
#### File: compressor/layers/ops.py
```python
import pickle
from collections import OrderedDict
import tensorflow as tf
def get_tf_vars_dict(scope=None):
"""Returns all trainable variables in the session in a dictionary form"""
all_trainable_vars = get_tf_vars_list(scope)
vars_dict = OrderedDict()
for var in all_trainable_vars:
vars_dict[var.name] = var
return vars_dict
def get_param_from_name(name, scope=None):
"""Returns a particular parameter as a tf element given its name"""
return get_global_vars_dict(scope)[name]
def load_meta_model_as_np(infile, import_scope="imported"):
"""This will load the meta file into numpy arrays."""
with tf.Session() as sess:
restorer = tf.train.import_meta_graph(infile + ".meta", import_scope=import_scope)
restorer.restore(sess, infile)
tf_vars = get_global_vars_dict(import_scope)
np_vars = {}
for k in tf_vars.keys():
np_vars[k] = tf_vars[k].eval()
return np_vars, tf_vars
def load_pkl_obj(name):
"""Loads a pickle model weights for when weights are supplied as initializers to layers"""
with open(name + ".pkl", "rb") as f:
return pickle.load(f)
def get_tf_vars_list(scope=None):
"""Returns all the trainable varialbes in the scope as a trainable dictionary."""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
def get_global_vars_list(scope=None):
"""Returns all the varialbes in the scope as a trainable dictionary."""
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
def get_global_vars_dict(scope=None):
"""Returns all variables in the session in a dictionary form"""
all_vars = get_global_vars_list(scope)
vars_dict = OrderedDict()
for var in all_vars:
vars_dict[var.name] = var
return vars_dict
```
#### File: rl_resource_allocation_ray_customEnv/src/train_news_vendor.py
```python
from ray.tune.registry import register_env
from sagemaker_rl.ray_launcher import SageMakerRayLauncher
class MyLauncher(SageMakerRayLauncher):
def register_env_creator(self):
from news_vendor_env import NewsVendorGymEnvironmentNormalized
register_env(
"NewsVendorGymEnvironment-v1",
lambda env_config: NewsVendorGymEnvironmentNormalized(env_config),
)
def get_experiment_config(self):
return {
"training": {
"env": "NewsVendorGymEnvironment-v1",
"run": "PPO",
"config": {
"ignore_worker_failures": True,
"gamma": 1,
"kl_coeff": 1.0,
"num_sgd_iter": 5,
"lr": 0.0001,
"sgd_minibatch_size": 32768,
"train_batch_size": 320000,
"model": {
"fcnet_hiddens": [64, 64],
},
"use_gae": False,
"num_workers": (self.num_cpus - 1),
"num_gpus": self.num_gpus,
"batch_mode": "complete_episodes",
"env_config": {},
"observation_filter": "MeanStdFilter",
},
"checkpoint_freq": 2,
}
}
if __name__ == "__main__":
MyLauncher().train_main()
```
#### File: common/sagemaker_rl/stable_baselines_launcher.py
```python
import os
import gym
import roboschool
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from mpi4py import MPI
from stable_baselines.bench import Monitor
from stable_baselines.common import set_global_seeds, tf_util
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.ppo1 import PPO1
class RewScale(gym.RewardWrapper):
def __init__(self, env, scale):
gym.RewardWrapper.__init__(self, env)
self.scale = scale
def reward(self, _reward):
return _reward * self.scale
class SagemakerStableBaselinesLauncher:
"""
Sagemaker's Stable Baselines Launcher.
"""
def __init__(self, env, output_path, model, num_timesteps):
self._env = env
self._output_path = output_path
self._model = model
self._num_timesteps = num_timesteps
def _train(self):
"""Train the RL model"""
self._model.learn(total_timesteps=self._num_timesteps)
def _predict(self, model, video_path):
"""Run predictions on trained RL model."""
vr = VideoRecorder(
env=self._env,
path="{}/rl_out.mp4".format(video_path, str(MPI.COMM_WORLD.Get_rank())),
enabled=True,
)
obs = self._env.reset()
for i in range(1000):
action, _states = model.predict(obs)
obs, rewards, dones, info = self._env.step(action)
if dones:
obs = self._env.reset()
self._env.render(mode="rgb_array")
vr.capture_frame()
vr.close()
self._env.close()
def run(self):
self._train()
if MPI.COMM_WORLD.Get_rank() == 0:
self._predict(self._model, self._output_path)
class SagemakerStableBaselinesPPO1Launcher(SagemakerStableBaselinesLauncher):
"""
Sagemaker's Stable Baselines PPO1 Launcher.
"""
def __init__(
self,
env,
output_path,
timesteps_per_actorbatch,
clip_param,
entcoeff,
optim_epochs,
optim_stepsize,
optim_batchsize,
gamma,
lam,
schedule,
verbose,
num_timesteps,
):
print(
"Initializing PPO with output_path: {} and Hyper Params [timesteps_per_actorbatch: {},clip_param: {}, "
"entcoeff: {}, optim_epochs: {}, optim_stepsize: {}, optim_batchsize: {}, gamma: {}, lam: {}, "
"schedule: {}, verbose: {}, num_timesteps: {}]".format(
output_path,
timesteps_per_actorbatch,
clip_param,
entcoeff,
optim_epochs,
optim_stepsize,
optim_batchsize,
gamma,
lam,
schedule,
verbose,
num_timesteps,
)
)
super().__init__(
env,
output_path,
PPO1(
policy=MlpPolicy,
env=env,
gamma=gamma,
timesteps_per_actorbatch=timesteps_per_actorbatch,
clip_param=clip_param,
entcoeff=entcoeff,
optim_epochs=optim_epochs,
optim_stepsize=optim_stepsize,
optim_batchsize=optim_batchsize,
lam=lam,
schedule=schedule,
verbose=verbose,
),
num_timesteps,
)
def create_env(env_id, output_path, seed=0):
rank = MPI.COMM_WORLD.Get_rank()
set_global_seeds(seed + 10000 * rank)
env = gym.make(env_id)
env = Monitor(env, os.path.join(output_path, str(rank)), allow_early_resets=True)
env.seed(seed)
return env
```
#### File: rl_traveling_salesman_vehicle_routing_coach/src/TSP_baseline_utils.py
```python
import itertools
def tsp_action_go_from_a_to_b(a, b):
# 0: Up, 1: Down, 2: Left, 3: Right
action = None
cur_x = a[0]
cur_y = a[1]
tar_x = b[0]
tar_y = b[1]
x_diff = tar_x - cur_x
y_diff = tar_y - cur_y
if abs(x_diff) >= abs(y_diff):
# Move horizontally
if x_diff > 0:
action = 3
elif x_diff < 0:
action = 2
else:
# Move vertically
if y_diff > 0:
action = 0
elif y_diff < 0:
action = 1
return action
import numpy as np
def create_dist_matrix(all_xy, num_stops):
# D[i,j] is the cost of going from i to j
D = {i: {} for i in range(num_stops)} # index 0 is the restaurant
# Create distance matrix
for i in range(num_stops):
for j in range(i + 1, num_stops):
dist = manhattan_dist(all_xy[i][0], all_xy[i][1], all_xy[j][0], all_xy[j][1])
D[i][j] = dist
D[j][i] = dist
return D
def tsp_dp_approx_sol(res_xy, orders_xy):
# This baseline is for the TSP problem,
# a single agent traveling all orders starting and finishing at a single restaurant
# assuming res_xy = (res_x, res_y), orders_xy = [(order1_x, order1_y), ...]
all_xy = [res_xy] + orders_xy
num_stops = len(all_xy)
D = create_dist_matrix(all_xy, num_stops)
# Best cost in stage i for each order
DP = {i: {} for i in range(num_stops)}
# Subsequent visits in the best route from stage i on for each order
DP_will_visit = {i: {} for i in range(num_stops)}
# DP solution, backwards
for i in reversed(range(num_stops)):
# This is the final visit to the restaurant
if i == num_stops - 1:
for o in range(1, num_stops):
DP[i][o] = D[o][0]
DP_will_visit[i][o] = [o]
else:
if i == 0:
stop_list = [0]
else:
stop_list = range(1, num_stops)
for o in stop_list:
min_dist = np.inf
min_o_next = None
for o_next in range(1, num_stops):
if o_next in DP_will_visit[i + 1].keys():
if o not in DP_will_visit[i + 1][o_next]:
cost = D[o][o_next] + DP[i + 1][o_next]
if cost < min_dist:
min_o_next = o_next
min_dist = cost
if min_o_next:
DP[i][o] = min_dist
DP_will_visit[i][o] = [o] + DP_will_visit[i + 1][min_o_next]
print(DP)
print(DP_will_visit)
return DP[0], DP_will_visit[0][0] + [0]
def manhattan_dist(x1, y1, x2, y2):
return np.abs(x1 - x2) + np.abs(y1 - y2)
def tsp_dp_opt_sol(res_xy, orders_xy):
all_xy = [res_xy] + orders_xy
num_stops = len(all_xy)
D = create_dist_matrix(all_xy, num_stops)
C = {} # Subtour cost dictionary, (set of nodes in the subtour, last node)
P = {} # Subtour path dictionary, (set of nodes in the subtour, last node)
# Initialize C
for o in range(1, num_stops):
C[frozenset({o}), o] = D[0][o]
P[frozenset({o}), o] = [0, o]
for s in range(2, num_stops):
for S in itertools.combinations(range(1, num_stops), s):
for o in S:
search_keys = [(frozenset(S) - {o}, m) for m in S if m != o]
search_list = [C[S_o, m] + D[m][o] for S_o, m in search_keys]
min_val = min(search_list)
opt_key = search_keys[search_list.index(min_val)]
C[frozenset(S), o] = min_val
P[frozenset(S), o] = P[opt_key] + [o]
final_set = frozenset(range(1, num_stops))
search_list = [C[final_set, o] + D[o][0] for o in final_set]
best_cost = min(search_list)
opt_final_order = search_list.index(best_cost) + 1
best_route = P[final_set, opt_final_order] + [0]
return best_cost, best_route
```
#### File: rl_traveling_salesman_vehicle_routing_coach/src/VRP_env.py
```python
import gym
import numpy as np
from gym import spaces
from rl_operations_research_baselines.VRP.VRP_view_2D import VRPView2D
"""
STATE:
Restaurant x
Restaurant y
Driver x
Driver y
Driver used capacity
Driver max capacity
Order status: inactive/delivered/not-created, open, accepted, picked up
Order x
Order y
Order to restaurant map
Time elapsed per order after 'open'
ACTION:
- no action
- L,R,U,D
- accept an order
Easy:
#Restaurants: 1
#Orders: 2
Order Promise: Infinite( = Episode length)
Order Timeout: Infinite( = Episode length)
Driver Capacity: Infinite( = # Orders)
"""
class VRPEasyEnv(gym.Env):
def render(self, mode="human", close=False):
if self.vrp_view is None:
self.vrp_view = VRPView2D(
n_restaurants=self.n_restaurants,
n_orders=self.n_orders,
map_quad=self.map_quad,
grid_size=25,
)
return self.vrp_view.update(
res_x=self.res_x,
res_y=self.res_y,
o_status=self.o_status,
o_x=self.o_x,
o_y=self.o_y,
dr_x=self.dr_x,
dr_y=self.dr_y,
mode=mode,
)
def __init__(
self,
n_restaurants=1,
n_orders=2,
order_prob=0.3,
driver_capacity=5,
map_quad=(2, 2),
order_promise=100,
order_timeout=100,
episode_length=100,
):
self.vrp_view = None
self.map_quad = map_quad
self.n_orders = n_orders
self.n_restaurants = n_restaurants
self.driver_capacity = driver_capacity
self.order_prob = order_prob
self.order_promise = order_promise
self.order_timeout = order_timeout
self.dr_used_capacity = 0
self.o_x = []
self.o_y = []
self.o_status = []
self.o_res_map = []
self.o_time = []
self.dr_x = None
self.dr_y = None
self.game_over = False
self.state = []
self.reward = None
# store the inputs
self.episode_length = episode_length
self.clock = 0
# map boundaries
self.map_min_x = -map_quad[0]
self.map_max_x = +map_quad[0]
self.map_min_y = -map_quad[1]
self.map_max_y = +map_quad[1]
self.map_range_x = range(-self.map_max_x, self.map_max_x + 1)
self.map_range_y = range(-self.map_max_y, self.map_max_y + 1)
# restaurant x position limits
res_x_min = [self.map_min_x] * n_restaurants
res_x_max = [self.map_max_x] * n_restaurants
# restaurant y position limits
res_y_min = [self.map_min_y] * n_restaurants
res_y_max = [self.map_max_y] * n_restaurants
# driver x position limits
dr_x_min = [self.map_min_x]
dr_x_max = [self.map_max_x]
# driver y position limits
dr_y_min = [self.map_min_y]
dr_y_max = [self.map_max_y]
dr_used_capacity_min = [0]
dr_used_capacity_max = [driver_capacity]
# n_orders for x position limits
o_x_min = [self.map_min_x] * n_orders
o_x_max = [self.map_max_x] * n_orders
# n_orders for y position limits
o_y_min = [self.map_min_y] * n_orders
o_y_max = [self.map_max_y] * n_orders
# order status: 0 - inactive(not created, cancelled, delivered), 1 - open, 2 - accepted, 3 - picked-up
o_status_min = [0] * n_orders
o_status_max = [3] * n_orders
# order-restaurant mapping, i.e. which the order belongs to which restaurant
o_res_map_min = [-1] * n_orders
o_res_map_max = [n_restaurants - 1] * n_orders
# time elapsed since the order has been placed
o_time_min = [0] * n_orders
o_time_max = [order_timeout] * n_orders
# Create the observation space
self.observation_space = spaces.Box(
low=np.array(
res_x_min
+ res_y_min
+ dr_x_min
+ dr_y_min
+ dr_used_capacity_min
+ [driver_capacity]
+ o_x_min
+ o_y_min
+ o_status_min
+ o_res_map_min
+ o_time_min
+ [order_promise]
+ [order_timeout]
),
high=np.array(
res_x_max
+ res_y_max
+ dr_x_max
+ dr_y_max
+ dr_used_capacity_max
+ [driver_capacity]
+ o_x_max
+ o_y_max
+ o_status_max
+ o_res_map_max
+ o_time_max
+ [order_promise]
+ [order_timeout]
),
dtype=np.int16,
)
# Action space: no action, up, down, left, right, accept order i
self.action_space = spaces.Discrete(5 + n_orders)
def reset(self):
self.clock = 0
self.__place_restaurants()
self.__place_driver()
self.dr_used_capacity = 0
self.o_x = [0] * self.n_orders
self.o_y = [0] * self.n_orders
self.o_status = [0] * self.n_orders
self.o_res_map = [-1] * self.n_orders
self.o_time = [0] * self.n_orders
self.vrp_view = None
return self.__create_state()
def step(self, action):
done = False
self.reward = 0
self.__update_driver_parameters(action)
self.__update_environment_parameters()
state = self.__create_state()
# Update the clock
self.clock += 1
if self.clock >= self.episode_length:
done = True
info = {}
return state, self.reward, done, info
def __update_driver_parameters(self, action):
if action == 0: # no action
pass
elif action == 1: # UP
self.dr_y = min(self.map_max_y, self.dr_y + 1)
elif action == 2: # DOWN
self.dr_y = max(self.map_min_y, self.dr_y - 1)
elif action == 3: # LEFT
self.dr_x = max(self.map_min_x, self.dr_x - 1)
elif action == 4: # RIGHT
self.dr_x = min(self.map_max_x, self.dr_x + 1)
elif action > 4: # accept order i
o = action - 5 # order index
# if order is open and driver has capacity, accept it
if self.o_status[o] == 1 and self.dr_used_capacity < self.driver_capacity:
self.o_status[o] = 2
self.dr_used_capacity += 1
# Check for pick-ups for each order accepted by the driver but not picked up/delivered yet.
for r in range(self.n_restaurants):
res_x = self.res_x[r]
res_y = self.res_y[r]
if self.dr_x == res_x and self.dr_y == res_y:
# The driver is at a restaurant. Check if any accepted order can be picked up from here.
for o in range(self.n_orders):
# if an order is assigned to this driver, if it is open
# and if it is ordered from the restaurant the driver is at, then pick it up
if self.o_status[o] == 2 and self.o_res_map[o] == r:
self.o_status[o] = 3 # set order status to picked up
self.reward += (self.order_timeout - self.o_time[o]) * 0.1
# Check for deliveries
for o in range(self.n_orders):
# If order is picked up by driver and driver is at delivery location, deliver the order
if self.o_status[o] == 3 and (self.dr_x == self.o_x[o] and self.dr_y == self.o_y[o]):
# 50 cents of tip/penalty for early/late delivery.
# self.reward += (self.order_promise - self.o_time[o]) * 0.5
self.reward += (
max(0.0, (self.order_promise - self.o_time[o]) * 0.5)
+ (self.order_timeout - self.o_time[o]) * 0.15
)
self.dr_used_capacity -= 1
self.o_status[o] = 0
self.o_time[o] = 0
self.o_res_map[o] = -1
self.o_x[o] = 0
self.o_y[o] = 0
def __update_environment_parameters(self):
# Update the waiting times
for o in range(self.n_orders):
# if this is an active order, increase the waiting time
if self.o_status[o] > 1:
self.o_time[o] += 1
# Check if any order expires
for o in range(self.n_orders):
if self.o_time[o] >= self.order_timeout:
# Incur the cost to the driver who had accepted the order
# print("Order", o, "has expired.")
if self.o_status[o] >= 2:
self.reward -= self.order_timeout * 0.5
self.dr_used_capacity -= 1
self.o_status[o] = 0
self.o_time[o] = 0
self.o_res_map[o] = -1
self.o_x[o] = 0
self.o_y[o] = 0
# Create new orders
for o in range(self.n_orders):
if self.o_status[o] == 0:
# Flip a coin to create an order
if np.random.random(1)[0] < self.order_prob:
o_x, o_y, r = self.__receive_order()
self.o_x[o] = o_x
self.o_y[o] = o_y
self.o_res_map[o] = r
self.o_status[o] = 1
def __place_restaurants(self):
self.res_coordinates = []
self.res_x = []
self.res_y = []
i = 0
while len(self.res_coordinates) < self.n_restaurants:
res_x = np.random.choice([i for i in self.map_range_x], 1)[0]
res_y = np.random.choice([i for i in self.map_range_y], 1)[0]
res_loc = (res_x, res_y)
if res_loc not in self.res_coordinates:
self.res_coordinates.append(res_loc)
self.res_x.append(res_x)
self.res_y.append(res_y)
elif i > 1000:
print("Something is wrong with the restaurant placement.")
break
def __place_driver(self):
self.dr_x = np.random.choice([i for i in self.map_range_x], 1)[0]
self.dr_y = np.random.choice([i for i in self.map_range_y], 1)[0]
def __receive_order(self):
i = 0 # prevent infinite loop
while True:
order_x = np.random.choice([i for i in self.map_range_x], 1)[0]
order_y = np.random.choice([i for i in self.map_range_y], 1)[0]
# Make sure the order does not overlap with any restaurants
if (order_x, order_y) not in self.res_coordinates:
break
elif i > 1000:
print("Something is wrong with the restaurant/order locations.")
break
else:
i += 1
# Determine the restaurant to assign the order
from_res = np.random.choice([i for i in range(self.n_restaurants)], 1)[0]
return order_x, order_y, from_res
def __create_state(self):
return (
self.res_x
+ self.res_y
+ [self.dr_x]
+ [self.dr_y]
+ [self.dr_used_capacity]
+ [self.driver_capacity]
+ self.o_x
+ self.o_y
+ self.o_status
+ self.o_res_map
+ self.o_time
+ [self.order_promise]
+ [self.order_timeout]
)
"""
Medium:
#Restaurants: 1
#Orders: 10
Order Promise: Infinite( = Episode length)
Order Timeout: Infinite( = Episode length)
Driver Capacity: Infinite( = # Orders)
"""
class VRPMediumEnv(VRPEasyEnv):
def __init__(self):
super().__init__(
n_restaurants=1,
n_orders=10,
order_prob=0.9,
driver_capacity=4,
map_quad=(8, 8),
order_promise=200,
order_timeout=400,
episode_length=2000,
)
"""
Hard:
#Restaurants: 2
#Orders: 10
Order Promise: 30
Order Timeout: 30
Driver Capacity: 3
"""
class VRPHardEnv(VRPEasyEnv):
def __init__(self):
super().__init__(
n_restaurants=2,
n_orders=3,
order_prob=0.9,
driver_capacity=4,
map_quad=(10, 10),
order_promise=60,
order_timeout=120,
episode_length=5000,
)
```
#### File: tensorflow_open-images_jpg/code/inference.py
```python
import base64
import io
import json
import requests
def input_handler(data, context):
"""Pre-process request input before it is sent to TensorFlow Serving REST API
Args:
data (obj): the request data stream
context (Context): an object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == "application/x-image":
payload = data.read()
encoded_image = base64.b64encode(payload).decode("utf-8")
instance = [{"b64": encoded_image}]
return json.dumps({"instances": instance})
else:
_return_error(
415, 'Unsupported content type "{}"'.format(context.request_content_type or "Unknown")
)
def output_handler(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode("utf-8"))
response_content_type = context.accept_header
prediction = response.content
return prediction, response_content_type
def _return_error(code, message):
raise ValueError("Error: {}, {}".format(str(code), message))
```
#### File: tfrecord-transformer-container/app/main.py
```python
import codecs
import io
import json
import logging
import struct
import crcmod
import tensorflow as tf
from flask import Flask, Response, request
from google.protobuf.json_format import MessageToDict
app = Flask(__name__)
def _masked_crc32c(value):
crc = crcmod.predefined.mkPredefinedCrcFun("crc-32c")(value)
return (((crc >> 15) | (crc << 17)) + 0xA282EAD8) & 0xFFFFFFFF
def read_tfrecords(tfrecords):
tfrecords_bytes = io.BytesIO(tfrecords)
examples = []
while True:
length_header = 12
buf = tfrecords_bytes.read(length_header)
if not buf:
# reached end of tfrecord buffer, return examples
return examples
if len(buf) != length_header:
raise ValueError("TFrecord is fewer than %d bytes" % length_header)
length, length_mask = struct.unpack("<QI", buf)
length_mask_actual = _masked_crc32c(buf[:8])
if length_mask_actual != length_mask:
raise ValueError("TFRecord does not contain a valid length mask")
length_data = length + 4
buf = tfrecords_bytes.read(length_data)
if len(buf) != length_data:
raise ValueError("TFRecord data payload has fewer bytes than specified in header")
data, data_mask_expected = struct.unpack("<%dsI" % length, buf)
data_mask_actual = _masked_crc32c(data)
if data_mask_actual != data_mask_expected:
raise ValueError("TFRecord has an invalid data crc32c")
# Deserialize the tf.Example proto
example = tf.train.Example()
example.ParseFromString(data)
# Extract a feature map from the example object
example_feature = MessageToDict(example.features)["feature"]
feature_dict = {}
for feature_key in example_feature.keys():
feature_dict[feature_key] = example_feature[feature_key][
list(example_feature[feature_key].keys())[0]
]["value"][0]
examples.append(feature_dict)
@app.route("/invocations", methods=["POST"])
def invocations():
try:
examples = read_tfrecords(request.data)
# Build a TF serving predict request JSON
response = Response(json.dumps({"signature_name": "predict", "instances": examples}))
response.headers["Content-Type"] = "application/json"
return response
except ValueError as err:
return str(err), 400
@app.route("/ping", methods=["GET"])
def ping():
return "", 200
```
#### File: sagemaker-debugger/pytorch_iterative_model_pruning/model_resnet.py
```python
import numpy as np
import smdebug
import torch
import torch.nn as nn
import torchvision
from smdebug import modes
from torchvision import models
# list of ordered tensor names
activation_outputs = [
#'relu_ReLU_output_0',
"layer1.0.relu_0_output_0",
"layer1.1.relu_0_output_0",
"layer2.0.relu_0_output_0",
"layer2.1.relu_0_output_0",
"layer3.0.relu_0_output_0",
"layer3.1.relu_0_output_0",
"layer4.0.relu_0_output_0",
"layer4.1.relu_0_output_0",
]
gradients = [
#'gradient/relu_ReLU_output',
"gradient/layer1.0.relu_ReLU_output",
"gradient/layer1.1.relu_ReLU_output",
"gradient/layer2.0.relu_ReLU_output",
"gradient/layer2.1.relu_ReLU_output",
"gradient/layer3.0.relu_ReLU_output",
"gradient/layer3.1.relu_ReLU_output",
"gradient/layer4.0.relu_ReLU_output",
"gradient/layer4.1.relu_ReLU_output",
]
# function to prune layers
def prune(model, filters_list, trial, step):
# dict that has a list of filters to be pruned per layer
filters_dict = {}
for layer_name, channel, _ in filters_list:
if layer_name not in filters_dict:
filters_dict[layer_name] = []
filters_dict[layer_name].append(channel)
counter = 0
in_channels_dense = 0
exclude_filters = None
in_channels = 3
exclude = False
# iterate over layers in the ResNet model
for named_module in model.named_modules():
layer_name = named_module[0]
layer = named_module[1]
# check if current layer is a convolutional layer
if isinstance(layer, torch.nn.modules.conv.Conv2d):
# remember the output channels of non-pruned convolution (needed for pruning first fc layer)
in_channels_dense = layer.out_channels
# create key to find right weights/bias/filters for the corresponding layer
weight_name = "ResNet_" + layer_name + ".weight"
# get weight values from last available training step
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# we need to adjust the number of input channels,
# if previous covolution has been pruned
# print( "current:", layer.in_channels, "previous", in_channels, layer_name, weight_name)
if "conv1" in layer_name or "conv2" in layer_name:
if layer.in_channels != in_channels:
layer.in_channels = in_channels
weight = np.delete(weight, exclude_filters, axis=1)
exclude_filters = None
# if current layer is in the list of filters to be pruned
if "conv1" in layer_name:
layer_id = layer_name.strip("conv1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce output channels for conv layer",
layer_id,
"from",
layer.out_channels,
"to",
layer.out_channels - len(filters_dict[key]),
)
# set new output channels
layer.out_channels = layer.out_channels - len(filters_dict[key])
# remove corresponding filters from weights and bias
# convolution weights have dimension: filter x channel x kernel x kernel
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
break
# remember new size of output channels, because we need to prune subsequent convolution
in_channels = layer.out_channels
# set pruned weight and bias
layer.weight.data = torch.from_numpy(weight)
if isinstance(layer, torch.nn.modules.batchnorm.BatchNorm2d):
# get weight values from last available training step
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# get bias values from last available training step
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
# get running_mean values from last available training step
mean_name = layer_name + ".running_mean_output_0"
mean = trial.tensor(mean_name).value(step, mode=modes.TRAIN)
# get running_var values from last available training step
var_name = layer_name + ".running_var_output_0"
var = trial.tensor(var_name).value(step, mode=modes.TRAIN)
# if current layer is in the list of filters to be pruned
if "bn1" in layer_name:
layer_id = layer_name.strip("bn1")
for key in filters_dict:
if len(layer_id) > 0 and layer_id in key:
print(
"Reduce bn layer",
layer_id,
"from",
weight.shape[0],
"to",
weight.shape[0] - len(filters_dict[key]),
)
# remove corresponding filters from weights and bias
# convolution weights have dimension: filter x channel x kernel x kernel
exclude_filters = filters_dict[key]
weight = np.delete(weight, exclude_filters, axis=0)
bias = np.delete(bias, exclude_filters, axis=0)
mean = np.delete(mean, exclude_filters, axis=0)
var = np.delete(var, exclude_filters, axis=0)
break
# set pruned weight and bias
layer.weight.data = torch.from_numpy(weight)
layer.bias.data = torch.from_numpy(bias)
layer.running_mean.data = torch.from_numpy(mean)
layer.running_var.data = torch.from_numpy(var)
layer.num_features = weight.shape[0]
in_channels = weight.shape[0]
if isinstance(layer, torch.nn.modules.linear.Linear):
# get weight values from last available training step
weight_name = "ResNet_" + layer_name + ".weight"
weight = trial.tensor(weight_name).value(step, mode=modes.TRAIN)
# get bias values from last available training step
bias_name = "ResNet_" + layer_name + ".bias"
bias = trial.tensor(bias_name).value(step, mode=modes.TRAIN)
# prune first fc layer
if exclude_filters is not None:
# in_channels_dense is the number of output channels of last non-pruned convolution layer
params = int(layer.in_features / in_channels_dense)
# prune weights of first fc layer
indexes = []
for i in exclude_filters:
indexes.extend(np.arange(i * params, (i + 1) * params))
if indexes[-1] > weight.shape[1]:
indexes.extend(np.arange(weight.shape[1] - params, weight.shape[1]))
weight = np.delete(weight, indexes, axis=1)
print(
"Reduce weights for first linear layer from",
layer.in_features,
"to",
weight.shape[1],
)
# set new in_features
layer.in_features = weight.shape[1]
exclude_filters = None
# set weights
layer.weight.data = torch.from_numpy(weight)
# set bias
layer.bias.data = torch.from_numpy(bias)
return model
```
#### File: track-an-airflow-workflow/code/train.py
```python
import argparse
import logging
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from data_util import load_test_dataset, load_train_dataset
from model import get_model
from sklearn.model_selection import train_test_split
logging.getLogger().setLevel(logging.INFO)
tf.logging.set_verbosity(tf.logging.ERROR)
# Copy inference pre/post-processing script so it will be included in the model package
os.system("mkdir /opt/ml/model/code")
os.system("cp inference.py /opt/ml/model/code")
os.system("cp requirements.txt /opt/ml/model/code")
def save_model(model, path):
tf.contrib.saved_model.save_keras_model(model, f"{path}/SavedModel")
logging.info("Model successfully saved at: {}".format(path))
def main(args):
model = get_model(
filters=args.filter_sizes,
hidden_units=args.hidden_size,
dropouts=args.dropout_sizes,
num_class=args.num_classes,
)
# load training data
x, y = load_train_dataset(droot=args.train_dir)
# one-hot encode label
one_hot_y = np.zeros((y.shape[0], args.num_classes))
one_hot_y[np.arange(y.shape[0]), y] = 1
# split x and y into train and val set
X_train, X_val, y_train, y_val = train_test_split(
x, one_hot_y, test_size=args.test_size, random_state=42, shuffle=True
)
# normalize the x image
X_train = X_train / 255
X_val = X_val / 255
opt = tf.keras.optimizers.Adam(args.learning_rate, args.momentum)
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["categorical_crossentropy", "accuracy"],
)
# a callback to save model ckpt after each epoch if better model is found
model_ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
args.output_data_dir + "/checkpoint-{epoch}.h5",
monitor="val_accuracy",
)
logging.info("Start training ...")
model.fit(
X_train,
y_train,
validation_data=(X_val, y_val),
batch_size=args.batch_size,
epochs=args.epochs,
callbacks=[model_ckpt_callback],
verbose=2,
)
save_model(model, args.model_output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--filter-sizes", nargs=2, type=int, default=[64, 32], help="Filter size with length of 2"
)
parser.add_argument(
"--hidden-size", type=int, default=256, help="Feed-forward layer hidden unit size."
)
parser.add_argument(
"--dropout-sizes",
nargs=3,
type=float,
default=[0.3, 0.3, 0.5],
help="Dropout layer size with length of 2",
)
parser.add_argument(
"--num-classes", type=int, default=10, help="Num of class in classification task."
)
parser.add_argument("--learning-rate", type=float, default=0.001, help="Initial learning rate.")
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--test-size", type=float, default=0.2)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--train-dir", type=str, default=os.environ.get("SM_CHANNEL_TRAINING"))
parser.add_argument("--model_dir", type=str)
parser.add_argument("--model-output-dir", type=str, default=os.environ.get("SM_MODEL_DIR"))
parser.add_argument("--output-data-dir", type=str, default=os.environ.get("SM_OUTPUT_DATA_DIR"))
args = parser.parse_args()
main(args)
```
#### File: sagemaker-python-sdk/tensorflow_script_mode_pipe_mode/pipemode.py
```python
import argparse
import json
import os
import tensorflow as tf
from sagemaker_tensorflow import PipeModeDataset
from tensorflow.contrib.data import map_and_batch
PREFETCH_SIZE = 10
BATCH_SIZE = 64
NUM_PARALLEL_BATCHES = 2
DIMENSION = 1024
EPOCHS = 1
def train_input_fn():
"""Returns input function that would feed the model during training"""
return _input_fn("train")
def eval_input_fn():
"""Returns input function that would feed the model during evaluation"""
return _input_fn("eval")
def _input_fn(channel):
"""Returns a Dataset for reading from a SageMaker PipeMode channel."""
features = {
"data": tf.FixedLenFeature([], tf.string),
"labels": tf.FixedLenFeature([], tf.int64),
}
def parse(record):
parsed = tf.parse_single_example(record, features)
return ({"data": tf.decode_raw(parsed["data"], tf.float64)}, parsed["labels"])
ds = PipeModeDataset(channel)
if EPOCHS > 1:
ds = ds.repeat(EPOCHS)
ds = ds.prefetch(PREFETCH_SIZE)
ds = ds.apply(
map_and_batch(parse, batch_size=BATCH_SIZE, num_parallel_batches=NUM_PARALLEL_BATCHES)
)
return ds
def _parse_args():
parser = argparse.ArgumentParser()
# Data, model, and output directories
# model_dir is always passed in from SageMaker. By default this is a S3 path under the default bucket.
parser.add_argument("--model_dir", type=str)
parser.add_argument("--sm-model-dir", type=str, default=os.environ.get("SM_MODEL_DIR"))
parser.add_argument("--hosts", type=list, default=json.loads(os.environ.get("SM_HOSTS")))
parser.add_argument("--current-host", type=str, default=os.environ.get("SM_CURRENT_HOST"))
return parser.parse_known_args()
def serving_input_fn():
inputs = {"data": tf.placeholder(tf.string)}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
if __name__ == "__main__":
args, unknown = _parse_args()
column = tf.feature_column.numeric_column("data", shape=(DIMENSION,))
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=3000)
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
linear_classifier = tf.estimator.LinearClassifier(
feature_columns=[column], model_dir=args.model_dir
)
tf.estimator.train_and_evaluate(linear_classifier, train_spec, eval_spec)
if args.current_host == args.hosts[0]:
linear_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
```
#### File: mnist/code/model_def.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
```
|
{
"source": "Jerrypiglet/scalenet",
"score": 2
}
|
#### File: scalenet/RELEASE_ScaleNet_minimal/dataset_coco_pickle_eccv.py
```python
import os, sys
import numpy as np
import torch
import torchvision
import json
import random
from PIL import Image
from glob import glob
from scipy.stats import norm
# from torchvision import transforms
from imageio import imread
from tqdm import tqdm
from scipy.io import loadmat
from termcolor import colored
import time
import pickle
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
def getBins(minval, maxval, sigma, alpha, beta, kappa):
"""Remember, bin 0 = below value! last bin mean >= maxval"""
x = np.linspace(minval, maxval, 255)
rv = norm(0, sigma)
pdf = rv.pdf(x)
pdf /= (pdf.max())
pdf *= alpha
pdf = pdf.max()*beta - pdf
cumsum = np.cumsum(pdf)
cumsum = cumsum / cumsum.max() * kappa
cumsum -= cumsum[pdf.size//2]
return cumsum
# def getHorizonLineFromAngles(pitch, roll, FoV, im_h, im_w):
# midpoint = getMidpointFromAngle(pitch, FoV)
# dh = getDeltaHeightFromRoll(roll, im_h, im_w)
# return midpoint + dh, midpoint - dh
# def getMidpointFromAngle(pitch, FoV):
# return ( 0.5 + 0.5*np.tan(pitch) / np.tan(FoV/2) )
# def getDeltaHeightFromRoll(roll, im_h, im_w):
# "The height distance of horizon from the midpoint at image left/right border intersection."""
# return im_w/im_h*np.tan(roll) / 2
# def getOffset(pitch, roll, vFoV, im_h, im_w):
# hl, hr = getHorizonLineFromAngles(pitch, roll, vFoV, im_h, im_w)
# midpoint = (hl + hr) / 2.
# #slope = np.arctan(hr - hl)
# offset = (midpoint - 0.5) / np.sqrt( 1 + (hr - hl)**2 )
# return offset
# def midpointpitch2bin(midpoint, pitch):
# if np.isnan(midpoint):
# if pitch < 0:
# return np.digitize(pitch, pitch_bins_low)
# else:
# return np.digitize(pitch, pitch_bins_high) + 224
# assert 0 <= midpoint <= 1
# return int(midpoint*192) + 32
def bin2midpointpitch(bins):
pos = np.squeeze(bins.argmax(axis=-1))
if pos < 31:
return False, pitch_bins_low[pos]
elif pos == 255:
return False, np.pi/6
elif pos >= 224:
return False, pitch_bins_high[pos - 224]
else:
return True, (pos - 32)/192
def make_bins_layers_list(x_bins_lowHigh_list):
x_bins_layers_list = []
for layer_idx, x_bins_lowHigh in enumerate(x_bins_lowHigh_list):
x_bins = np.linspace(x_bins_lowHigh[0], x_bins_lowHigh[1], 255)
x_bins_centers = x_bins.copy()
x_bins_centers[:-1] += np.diff(x_bins_centers)/2
x_bins_centers = np.append(x_bins_centers, x_bins_centers[-1]) # 42 bins
x_bins_layers_list.append(x_bins_centers)
return x_bins_layers_list
# yc_bins_centers_1 = np.append(yc_bins_centers_1, yc_bins_centers_1[-1]) # 42 bins
bins_lowHigh_list_dict = {}
# yc_bins_lowHigh_list = [[0.5, 3.], [-0.3, 0.3], [-0.15, 0.15], [-0.15, 0.15], [-0.05, 0.05]] # 'SmallerBins'
yc_bins_lowHigh_list = [[0.5, 5.], [-0.3, 0.3], [-0.15, 0.15], [-0.3, 0.3], [-0.15, 0.15]] # 'YcLargeBins'
# yc_bins_lowHigh_list = [[0.5, 10.], [-0.5, 0.5], [-0.15, 0.15], [-0.3, 0.3], [-0.15, 0.15]] # 'YcLargerBinsV2'
bins_lowHigh_list_dict['yc_bins_lowHigh_list'] = yc_bins_lowHigh_list
yc_bins_layers_list = make_bins_layers_list(yc_bins_lowHigh_list)
yc_bins_centers = yc_bins_layers_list[0]
fmm_bins_lowHigh_list = [[0., 0.], [-0.2, 0.2], [-0.05, 0.05], [-0.05, 0.05], [-0.05, 0.05]] # percentage!!
bins_lowHigh_list_dict['fmm_bins_lowHigh_list'] = fmm_bins_lowHigh_list
fmm_bins_layers_list = make_bins_layers_list(fmm_bins_lowHigh_list)
v0_bins_lowHigh_list = [[0., 0.], [-0.15, 0.15], [-0.05, 0.05], [-0.05, 0.05], [-0.05, 0.05]] # 'SmallerBins'
bins_lowHigh_list_dict['v0_bins_lowHigh_list'] = v0_bins_lowHigh_list
v0_bins_layers_list = make_bins_layers_list(v0_bins_lowHigh_list)
# human_bins = np.linspace(1., 2., 256)
human_bins = np.linspace(1., 1.9, 256) # 'SmallerPersonBins'
# human_bins = np.linspace(1., 2.5, 256) # 'V2PersonCenBins'
# human_bins = np.linspace(0.7, 1.9, 256) # 'V3PersonCenBins'
human_bins_1 = np.linspace(-0.2, 0.2, 256)
human_bins_lowHigh_list = [[0., 0.], [-0.3, 0.15], [-0.10, 0.10], [-0.10, 0.10], [-0.05, 0.05]] # 'SmallerBins'
bins_lowHigh_list_dict['human_bins_lowHigh_list'] = human_bins_lowHigh_list
human_bins_layers_list = make_bins_layers_list(human_bins_lowHigh_list)
car_bins = np.linspace(1.4, 1.70, 256) # 'V2CarBins'
car_bins_lowHigh_list = [[0., 0.], [-0.10, 0.10], [-0.05, 0.05], [-0.10, 0.10], [-0.05, 0.05]] # 'SmallerBins'
bins_lowHigh_list_dict['car_bins_lowHigh_list'] = car_bins_lowHigh_list
car_bins_layers_list = make_bins_layers_list(car_bins_lowHigh_list)
results_path_yannick = 'data/coco_results/yannick_results_train2017_filtered'
# image_path = '/home/ruizhu/Documents/Projects/adobe_scale_est/data/COCO/train2017'
# bbox_path = '/home/ruizhu/Documents/Projects/adobe_scale_est/data/coco_results/imgs_with_morethan2_standing_persons_allVis_train2017_2'
# bbox_path = '/data/COCO/coco_results/imgs_with_morethan2_standing_persons_allVis_train2017_2'
# new dataset 2020
# bbox_path = '/data/COCO/coco_results/imgs_with_morethan2_standing_persons_train2017_20200101-2'
# bbox_path = '/data/COCO/coco_results/imgs_with_morethan2_standing_persons_train2017_20200103-v4'
# bbox_path = '/data/COCO/coco_results/imgs_with_morethan2_standing_persons_train2017_20200103-v5_ratio2-8'
pickle_paths = {\
'train-val': 'data/coco_results/results_with_kps_20200208_morethan2_2-8/pickle', \
# 'test': '/data/COCO/coco_results/results_with_kps_20200225_val2017-vis_filtered_2-8_moreThan2/pickle'}
'test': 'data/coco_results/results_with_kps_20200225_val2017_test_detOnly_filtered_2-8_moreThan2/pickle'}
# pickle_paths['train-val'] = '/data/COCO/coco_results/results_with_kps_20200221_Car_noSmall-ratio1-35-mergeWith-kps_20200208_morethan2_2-8/pickle' #MultiCat
pickle_paths['test'] = '/results_test_20200302_Car_noSmall-ratio1-35-mergeWith-results_with_kps_20200225_train2017_detOnly_filtered_2-8_moreThan2/pickle' #MultiCat
bbox_paths = {key: pickle_paths[key].replace('/pickle', '/npy') for key in pickle_paths}
class COCO2017Scale(torchvision.datasets.coco.CocoDetection):
def __init__(self, transforms_maskrcnn=None, transforms_yannick=None, split='', coco_subset='coco_scale', shuffle=True, logger=None, opt=None, dataset_name='', write_split=False):
assert split in ['train', 'val', 'test'], 'wrong dataset split for COCO2017Scale!'
if split in ['train', 'val']:
ann_file = '/data/COCO/annotations/person_keypoints_train2017.json' # !!!! tmp!
root = '/data/COCO/train2017'
pickle_path = pickle_paths['train-val']
bbox_path = bbox_paths['train-val']
image_path = '/data/COCO/train2017'
# self.train = True
else:
ann_file = '/data/COCO/annotations/person_keypoints_val2017.json' # !!!! tmp!
root = '/data/COCO/val2017'
pickle_path = pickle_paths['test']
bbox_path = bbox_paths['test']
image_path = '/data/COCO/val2017'
# self.train = False
super(COCO2017Scale, self).__init__(root, ann_file)
self.opt = opt
self.cfg = self.opt.cfg
self.GOOD_NUM = self.cfg.DATA.COCO.GOOD_NUM
if split in ['train', 'val']:
self.train = True
else:
self.train = False
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.transforms_maskrcnn = transforms_maskrcnn
self.transforms_yannick = transforms_yannick
ts = time.time()
# try:
# with open("filelist_spherical.json", "r") as fhdl:
# self.data = json.load(fhdl)
# except FileNotFoundError:
self.yannick_mat_files = glob(os.path.join(results_path_yannick, "*.mat"), recursive=True)
self.yannick_mat_files.sort()
random.seed(123456)
random.shuffle(self.yannick_mat_files)
num_mat_files = len(self.yannick_mat_files)
if split == 'train':
self.yannick_mat_files = self.yannick_mat_files[:int(num_mat_files*0.8)]
elif split == 'val':
self.yannick_mat_files = self.yannick_mat_files[-int(num_mat_files*0.2):]
logger.info(self.yannick_mat_files[0])
# self.yannick_mat_files = self.yannick_mat_files[:100]
# with open("filelist_spherical.json", "w") as fhdl:
# json.dump(self.data, fhdl)
if self.train:
self.img_filenames = [os.path.basename(yannick_mat_file).split('.')[0] for yannick_mat_file in self.yannick_mat_files]
self.img_files = [os.path.join(image_path, img_filename+'.jpg') for img_filename in self.img_filenames]
self.bbox_npy_files = [os.path.join(bbox_path, img_filename+'.npy') for img_filename in self.img_filenames]
self.pickle_files = [os.path.join(pickle_path, img_filename+'.data') for img_filename in self.img_filenames]
assert len(self.bbox_npy_files) == len(self.pickle_files) == len(self.pickle_files) == len(self.img_files) == len(self.yannick_mat_files)
bbox_npy_files_filtered = []
pickle_files_filtered = []
img_files_filtered = []
yannick_mat_files_filtered = []
for bbox_npy_file, pickle_file, img_file, yannick_mat_file in zip(self.bbox_npy_files, self.pickle_files, self.img_files, self.yannick_mat_files):
if os.path.isfile(pickle_file):
assert os.path.basename(bbox_npy_file)[:12] == os.path.basename(pickle_file)[:12] == os.path.basename(img_file)[:12] == os.path.basename(yannick_mat_file)[:12]
bbox_npy_files_filtered.append(bbox_npy_file)
pickle_files_filtered.append(pickle_file)
img_files_filtered.append(img_file)
yannick_mat_files_filtered.append(yannick_mat_file)
self.bbox_npy_files = bbox_npy_files_filtered
self.pickle_files = pickle_files_filtered
self.img_files = img_files_filtered
self.yannick_mat_files = yannick_mat_files_filtered
else:
self.pickle_files = glob(os.path.join(pickle_path, "*.data"), recursive=True)
self.pickle_files.sort()
self.img_filenames = [os.path.basename(pickle_file).split('.')[0] for pickle_file in self.pickle_files]
self.img_files = [os.path.join(image_path, img_filename+'.jpg') for img_filename in self.img_filenames]
self.bbox_npy_files = [os.path.join(bbox_path, img_filename+'.npy') for img_filename in self.img_filenames]
self.yannick_mat_files = [''] * len(self.pickle_files)
assert len(self.bbox_npy_files) == len(self.pickle_files) == len(self.img_files) == len(self.yannick_mat_files)
if write_split and opt.rank==0:
list_file = pickle_path.replace('/pickle', '') + '/%s.txt'%split
train_file = open(list_file, 'a')
for train_pickle in self.pickle_files:
train_id_06 = os.path.basename(train_pickle)[6:12]
# train_id_06 = os.path.basename(train_pickle).split('_')[1]
print(train_id_06)
train_file.write('%s\n'%(train_id_06))
train_file.close()
print('Train split written to '+list_file)
if opt.rank==0:
logger.info(colored("[COCO dataset] Loaded %d PICKLED files in %.4fs for %s set from %s."%(len(self.pickle_files), time.time()-ts, split, pickle_path), 'white', 'on_blue'))
# from scipy.io import savemat
# savemat('val_set.mat', {'img_files': self.img_files})
if shuffle:
random.seed(314159265)
list_zip = list(zip(self.img_files, self.bbox_npy_files, self.pickle_files, self.yannick_mat_files))
random.shuffle(list_zip)
self.img_files, self.bbox_npy_files, self.pickle_files, self.yannick_mat_files = zip(*list_zip)
assert os.path.basename(self.img_files[0])[:12] == os.path.basename(self.bbox_npy_files[0])[:12] == os.path.basename(self.pickle_files[0])[:12] == os.path.basename(self.yannick_mat_files[0])[:12]
# print(self.img_files[:2])
# print(self.bbox_npy_files[:2])
# if train:
# self.data = self.data[:-2000]
# else:
# self.data = self.data[-2000:]
if not self.train:
print([os.path.basename(img_file) for img_file in self.img_files[:12]])
def __getitem__(self, k):
im_ori_RGB = Image.open(self.img_files[k]).convert('RGB') # im_ori_RGB.size: (W, H
with open(self.pickle_files[k], 'rb') as filehandle:
data = pickle.load(filehandle)
bboxes = data['bboxes'].astype(np.float32) # [xywh]
assert len(bboxes.shape)==2 and bboxes.shape[1]==4
num_bboxes_ori = bboxes.shape[0]
if 'label' in data:
labels = data['label'] # ['car', 'person', 'person']
else:
labels = ['person'] * num_bboxes_ori
# bboxes = np.load(self.bbox_npy_files[k]).astype(np.float32) # [xywh]
if bboxes.shape[0] > self.cfg.DATA.COCO.GOOD_NUM:
bboxes = bboxes[:self.cfg.DATA.COCO.GOOD_NUM, :]
labels = labels[:self.cfg.DATA.COCO.GOOD_NUM]
target_boxes = torch.as_tensor(bboxes).reshape(-1, 4) # guard against no boxes
target = BoxList(target_boxes, im_ori_RGB.size, mode="xywh").convert("xyxy")
num_boxes = target.bbox.shape[0]
if self.opt.est_kps:
if 'kps' in data:
kps_gt = data['kps'].astype(int) # [N, 51]
if num_bboxes_ori > self.cfg.DATA.COCO.GOOD_NUM:
kps_gt = kps_gt[:self.cfg.DATA.COCO.GOOD_NUM, :]
kps_gt = kps_gt.tolist() # [[51]]
else:
kps_gt = [[0]*51 for i in range(num_boxes)]
target_keypoints = PersonKeypoints(kps_gt, im_ori_RGB.size)
# kps_sum = torch.sum(torch.sum(target_keypoints.keypoints[:, :, :2], 1), 1)
# kps_mask = kps_sum != 0.
# print(target_keypoints.keypoints.shape, kps_sum, kps_mask)
target.add_field("keypoints", target_keypoints)
# target.add_field("keypoints_mask", kps_mask)
target = target.clip_to_image(remove_empty=True)
classes = [1] * num_boxes # !!!!! all person (1) for now...
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
scores = torch.tensor([1.] * target.bbox.shape[0])
target.add_field("scores", scores)
W, H = im_ori_RGB.size[:2]
if self.train:
yannick_results = loadmat(self.yannick_mat_files[k])
horizon_visible = yannick_results['horizon_visible'][0][0].astype(np.float32)
assert horizon_visible == 1
horizon = yannick_results['pitch'][0][0].astype(np.float32)
horizon_pixels_yannick = H * horizon
v0 = H - horizon_pixels_yannick
vfov = yannick_results['vfov'][0][0].astype(np.float32)
f_pixels_yannick = H/2./(np.tan(vfov/2.))
else:
f_pixels_yannick = -1
v0 = -1
im_yannickTransform = self.transforms_yannick(im_ori_RGB) # [0., 1.] by default
im_maskrcnnTransform, target_maskrcnnTransform = self.transforms_maskrcnn(im_ori_RGB, target) # [0., 1.] by default
# print('---', im.size(), np.asarray(im).shape)
# im_array = np.asarray(im)
# if len(im_array.shape)==2:
# im_array = np.stack((im_array,)*3, axis=-1)
# # print(im_array.shape)
# x = torch.from_numpy(im_array.transpose((2,0,1)))
if self.train and self.opt.est_kps:
target_maskrcnnTransform.add_field("keypoints_ori", target_keypoints)
target_maskrcnnTransform.add_field("boxlist_ori", target)
target_maskrcnnTransform.add_field('img_files', [self.img_files[k]] * num_boxes)
if self.train:
y_person = 1.75
bbox_good_list = bboxes
vc = H / 2.
inv_f2_yannick = 1./ (f_pixels_yannick * f_pixels_yannick)
yc_list = []
for bbox in bbox_good_list:
vt = H - bbox[1]
vb = H - (bbox[1] + bbox[3])
# v0_single = yc * (vt - vb) / y_person + vb
yc_single = y_person * (v0 - vb) / (vt - vb) / (1. + (vc - v0) * (vc - vt) / f_pixels_yannick**2)
yc_list.append(yc_single)
yc_estCam = np.median(np.asarray(yc_list))
else:
yc_estCam = -1
assert len(labels)==bboxes.shape[0]
# im_ori_BGR_array = np.array(im_ori_RGB.copy())[:,:,::-1]
return im_yannickTransform, im_maskrcnnTransform, W, H, \
float(yc_estCam), \
self.pad_bbox(bboxes, self.GOOD_NUM).astype(np.float32), bboxes.shape[0], float(v0), float(f_pixels_yannick), \
os.path.basename(self.img_files[k])[:12], self.img_files[k], target_maskrcnnTransform, labels
def __len__(self):
return len(self.img_files)
def pad_bbox(self, bboxes, max_length):
bboxes_padded = np.zeros((max_length, bboxes.shape[1]))
assert bboxes.shape[0]<=max_length, 'bboxes length %d > max_length %d!'%(bboxes.shape[0], max_length)
bboxes_padded[:bboxes.shape[0], :] = bboxes
return bboxes_padded
def my_collate(batch):
# Refer to https://discuss.pytorch.org/t/how-to-create-batches-of-a-list-of-varying-dimension-tensors/50773/14
im_yannickTransform_list, im_maskrcnnTransform_list, W_batch_list, H_batch_list, yc_batch_list, \
bboxes_batch_list, bboxes_length_batch_list, v0_batch_list, f_pixels_yannick_batch_list, im_filename_list, im_file_list, target_maskrcnnTransform_list, labels_list = zip(*batch)
# input_yannickTransform = torch.stack(im_yannickTransform_list)
# input_maskrcnnTransform = torch.stack(im_maskrcnnTransform_list)
W_batch_array = np.stack(W_batch_list).copy()
H_batch_array = np.stack(H_batch_list).copy()
# yc_onehot_batch = torch.stack(yc_onehot_batch_list)
yc_batch = torch.tensor(yc_batch_list)
bboxes_batch_array = np.stack(bboxes_batch_list).copy()
bboxes_length_batch_array = np.stack(bboxes_length_batch_list).copy()
v0_batch = torch.tensor(v0_batch_list)
f_pixels_yannick_batch = torch.tensor(f_pixels_yannick_batch_list)
# idx3_batch_list = [idx3.item() for idx3 in idx3_batch_list]
# idx3_batch = torch.tensor(idx3_batch_list)
return im_yannickTransform_list, im_maskrcnnTransform_list, W_batch_array, H_batch_array, yc_batch, \
bboxes_batch_array, bboxes_length_batch_array, v0_batch, f_pixels_yannick_batch, im_filename_list, im_file_list, target_maskrcnnTransform_list, labels_list
# # batch contains a list of tuples of structure (sequence, target)
# data = [item[0] for item in batch]
# data = pack_sequence(data, enforce_sorted=False)
# targets = [item[1] for item in batch]
# return [data, targets]
def collate_fn_padd(batch):
'''
Padds batch of variable length
note: it converts things ToTensor manually here since the ToTensor transform
assume it takes in images rather than arbitrary tensors.
'''
## get sequence lengths
# lengths = torch.tensor([ t.shape[0] for t in batch ]).to(device)
# ## padd
# batch = [ torch.Tensor(t).to(device) for t in batch ]
# batch = torch.nn.utils.rnn.pad_sequence(batch)
# ## compute mask
# mask = (batch != 0).to(device)
# return batch, lengths, mask
ims = [torch.Tensor(item[0]) for item in batch]
bboxes = [torch.Tensor(item[1]) for item in batch]
v0s = [torch.Tensor(np.asarray(item[2])) for item in batch]
f_pixels_yannicks = [torch.Tensor(np.asarray(item[3])) for item in batch]
img_filenames = [item[4] for item in batch]
img_filepaths = [item[5] for item in batch]
return [ims, bboxes, v0s, f_pixels_yannicks, img_filenames, img_filepaths]
# def __getitem__(self, k):
# # with open(self.data[k][:-4] + ".json", "r") as fhdl:
# # data = json.load(fhdl)
# # data = data[2]
# #im = np.asarray(imread(self.data[k].replace("_true_camera_calibration.json", ".jpg"))[:,:,:3])
# im = Image.open(self.data[k])
# #im = Image.open(self.data[k].replace("_true_camera_calibration.json", ".jpg"))
# #hl_left, hl_right = getHorizonLineFromAngles(pitch=data["pitch"], roll=data["roll"], FoV=data["vfov"], im_h=im.size[0], im_w=im.size[1])
# #slope = np.arctan(hl_right - hl_left)
# #midpoint = (hl_left + hl_right) / 2
# #offset = (midpoint - 0.5) / np.sqrt( 1 + (hl_right - hl_left)**2 )
# #offset = getOffset(data["pitch"], data["roll"], data["vfov"], im.size[0], im.size[1])
# #idx1 = midpointpitch2bin(, data["pitch"])
# assert im.size[0] == data["width"]
# idx1 = midpointpitch2bin(data["offset"] / data["height"], data["pitch"])
# idx2 = np.digitize(data["roll"], roll_bins)
# idx3 = np.digitize(data["vfov"], vfov_bins)
# idx4 = np.digitize(data["spherical_distortion"], distortion_bins)
# #print("{:.04f}".format(data["vfov"]), "{:.04f}".format(data["pitch"]), idx1)
# y1 = np.zeros((256,), dtype=np.float32)
# y2 = np.zeros((256,), dtype=np.float32)
# y3 = np.zeros((256,), dtype=np.float32)
# y4 = np.zeros((256,), dtype=np.float32)
# if idx2 > 255 or idx1 > 255:
# print(self.data[k], data["offset"] / im.size[0], data["pitch"], idx1, idx2, idx3, idx4)
# y1[idx1] = y2[idx2] = y3[idx3] = y4[idx4] = 1.
# #x = torch.from_numpy(im.transpose((2,0,1)))
# x = self.transforms(im)
# y1, y2, y3, y4 = map(torch.from_numpy, (y1, y2, y3, y4))
# return x, y1, y2, y3, y4, data
# #{"angle units": "radians", "yaw": 0.0, "has_artifact": false, "has_day_sky": false, "source": "pano_aoijeqajukkoem", "pitch": 0.00492545270356224, "primary_top_content": "buildings or ceilings", "vfov": 0.9096217797805077, "roll": -0.01719714340875391}
if __name__ == '__main__':
# this_bin = midpointpitch2bin(1.1, 0.0)
# a = np.zeros((256,)); a[this_bin] = 1
# print("bin:", this_bin, "recovered:", bin2midpointpitch(a))
# sys.exit()
train = COCO2017Scale(train=True)
print(len(train))
for a in range(len(train)):
_ = train[a]
#print("---")
#import pdb; pdb.set_trace()
```
#### File: maskrcnn_rui/solver/build.py
```python
import torch
from .lr_scheduler import WarmupMultiStepLR
from termcolor import colored
def make_optimizer(cfg, model, optim_type='SGD', params_dict= {}, logger=None):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
logger.info(colored('Creating %s solver with lr=%.4f, weight_decay=%.4f...'%(optim_type, lr, weight_decay), 'white', 'on_blue'))
if optim_type == 'SGD':
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
elif optim_type == 'Adam':
optimizer = torch.optim.Adam(params, lr=lr, betas=(params_dict['beta1'], 0.999), eps=1e-5)
else:
raise RuntimeError('Optimizer type %s not supported! (SGD/Adam)'%optim_type)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
```
#### File: RELEASE_ScaleNet_minimal/models/model_part_GeneralizedRCNNRuiMod_cameraCalib_sep.py
```python
import cv2
import logging
import torch
import torch.nn as nn
# from torchvision import models, transforms
from torchvision import transforms as T
from torchvision.transforms import functional as F
# from torchvision.models.densenet import model_urls
# model_urls['densenet161'] = model_urls['densenet161'].replace('https://', 'http://')
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_rui.modeling.backbone import build_backbone
from maskrcnn_rui.modeling.rpn.rpn import build_rpn
from maskrcnn_rui.roi_heads_rui.roi_heads import build_roi_h_heads, build_classifier_heads, build_roi_bbox_heads
# from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from termcolor import colored
from utils.logger import setup_logger, printer
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from torchsummary import summary
from utils.model_utils import CATEGORIES
# from models.model_part_pointnet import CamHPointNet
class GeneralizedRCNNRuiMod_cameraCalib(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg, opt, modules_not_build=[], logger=None, rank=-1, confidence_threshold=0.7):
super().__init__()
self.backbone = build_backbone(cfg)
# self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.if_roi_h_heads = 'roi_h_heads' not in modules_not_build
if self.if_roi_h_heads:
self.roi_h_heads = build_roi_h_heads(cfg, opt, self.backbone.out_channels)
self.if_classifier_heads = 'classifier_heads' not in modules_not_build
if self.if_classifier_heads:
self.classifier_heads = build_classifier_heads(cfg, opt, self.backbone.out_channels)
self.if_roi_bbox_heads = 'roi_bbox_heads' not in modules_not_build and opt.est_bbox
if self.if_roi_bbox_heads:
self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.roi_bbox_heads = build_roi_bbox_heads(cfg, self.backbone.out_channels)
# self.if_camH_pointnet = 'camH_pointnet' not in modules_not_build and opt.pointnet_camH
# if self.if_camH_pointnet:
# self.camH_pointnet = CamHPointNet(in_channels=6, out_channels=cfg.MODEL.CLASSIFIER_HEADNUM_CLASSES.NUM_CLASSES)
self.cfg = cfg
self.opt = opt
self.device = self.cfg.MODEL.DEVICE
self.rank = rank
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
# self.transforms = self.build_transform()
self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
self.CATEGORIES = CATEGORIES
# self.logger = logging.getLogger("GeneralizedRCNNRuiMod:in_model")
self.logger = logger
self.printer = printer(get_rank(), self.opt.debug)
if self.opt.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
def prepare_images(self, inputCOCO_Image_maskrcnnTransform_list):
# Transform so that the min size is no smaller than cfg.INPUT.MIN_SIZE_TRAIN, and the max size is no larger than cfg.INPUT.MIN_SIZE_TRAIN
# image_batch = [self.transforms(original_image) for original_image in original_image_batch_list]
image_batch = inputCOCO_Image_maskrcnnTransform_list
image_sizes_after_transform = [(image_after.shape[2], image_after.shape[1]) for image_after in image_batch]
# if self.training:
# for original_image, image_after, image_after_size in zip(inputCOCO_Image_maskrcnnTransform, image_batch, image_sizes_after_transform):
# self.printer.print('[generalized_rcnn_rui-prepare_images] Image sizes:', original_image.shape, '-->', image_after.shape, image_after_size)
# [Rui] PADDING
# convert to an ImageList, ``padded`` so that it is divisible by cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image_batch, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
# print(self.cfg.INPUT.MIN_SIZE_TRAIN, self.cfg.INPUT.MAX_SIZE_TRAIN, self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MAX_SIZE_TEST)
if self.training:
self.printer.print('PADDED: image_list.tensors, image_list.image_sizes (before pad):', image_list.tensors.shape, image_list.image_sizes)
image_list = image_list.to(self.device)
return image_list, image_sizes_after_transform
def forward(self, original_image_batch_list, list_of_bbox_list_cpu=None, list_of_oneLargeBbox_list=None, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if_print = self.training
# if self.training and (list_of_bbox_list_cpu is None or list_of_oneLargeBbox_list_cpu is None):
# raise ValueError("In training mode, targets should be passed")
# images = to_image_list(images)
images, image_sizes_after_transform = self.prepare_images(original_image_batch_list)
features = self.backbone(images.tensors)
## DETACH!!!!!!!!!!!
# features = tuple(feat.detach() for feat in list(features))
# if if_print:
# self.printer.print('[generalized_rcnn_rui] Feats:')
# for feat in features:
# self.printer.print(feat.shape)
return_dict = {'image_sizes_after_transform': image_sizes_after_transform}
if list_of_bbox_list_cpu is not None:
list_of_bbox_list = [bbox_list_array.to(self.device) for bbox_list_array in list_of_bbox_list_cpu]
list_of_bbox_list = [bbox_list.resize(size) for bbox_list, size in zip(list_of_bbox_list, image_sizes_after_transform)]
if if_print:
self.printer.print('[generalized_rcnn_rui] list_of_bbox_list:', list_of_bbox_list) # list([BoxList(num_boxes=1000, image_width=1066, image_height=800, mode=xyxy)])
roi_heads_output = self.roi_h_heads(features, list_of_bbox_list)
class_logits = roi_heads_output['class_logits']
# print('==roi_feats', roi_feats.shape, roi_feats.detach().cpu().numpy())
class_logits_softmax = nn.functional.softmax(class_logits, dim=1)
# print(class_logits[0], torch.sum(class_logits[0]))
bbox_lengths = [len(bbox_list) for bbox_list in list_of_bbox_list]
class_logits_softmax_list = class_logits_softmax.split(bbox_lengths)
return_dict.update({'class_person_H_logits_softmax_list': class_logits_softmax_list, 'class_person_H_logits_softmax': class_logits_softmax, 'class_person_H_logits': class_logits, 'bbox_lengths': bbox_lengths})
roi_feats = roi_heads_output['feats'] # [N_all, D]
return_dict.update({'roi_feats': roi_feats})
# Global feat with list_of_oneLargeBbox_list_cpu
if list_of_oneLargeBbox_list is not None:
list_of_oneLargeBbox_list = [bbox_list.resize(size) for bbox_list, size in zip(list_of_oneLargeBbox_list, image_sizes_after_transform)]
cls_outputs = self.classifier_heads(features, list_of_oneLargeBbox_list)
return_dict.update({'output_horizon': cls_outputs['output_horizon']['class_logits'], 'output_pitch': cls_outputs['output_pitch']['class_logits'], \
'output_roll': cls_outputs['output_roll']['class_logits'], 'output_vfov': cls_outputs['output_vfov']['class_logits']})
# if not self.opt.pointnet_camH:
# return_dict.update({'output_camH': cls_outputs['output_camH']['class_logits']})
# if self.if_camH_pointnet and bboxed_padded is not None:
if self.if_roi_bbox_heads:
proposals, proposal_losses = self.rpn(images, features, targets=None)
return_dict.update({'proposals': proposals})
if self.roi_bbox_heads:
x, predictions, detector_losses = self.roi_bbox_heads(features, proposals, targets)
return_dict.update({'x': x, 'predictions': predictions, 'detector_losses': detector_losses})
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
return_dict.update(detector_losses)
return_dict.update(proposal_losses)
return return_dict
def post_process(self, predictions, image_sizes_after_transform):
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
# prediction = predictions[0] # BoxList(num_boxes=73, image_width=1066, image_height=800, mode=xyxy)
prediction_list = []
prediction_list_ori = []
for size, prediction in zip(image_sizes_after_transform, predictions):
# reshape prediction (a BoxList) into the original image size
# height, width = original_image.shape[:-1]
prediction_list_ori.append(prediction)
prediction = prediction.resize(size)
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
prediction_list.append(prediction)
return prediction_list, prediction_list_ori
def select_and_vis_bbox(self, prediction_list, prediction_list_ori, image_batch_list):
top_prediction_list = [self.select_top_predictions(prediction) for prediction in prediction_list]
top_prediction_list_ori = [self.select_top_predictions(prediction) for prediction in prediction_list_ori]
result_list = []
for image, top_predictions in zip(image_batch_list, top_prediction_list):
result = image.copy()
# if self.show_mask_heatmaps:
# return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
if self.cfg.MODEL.KEYPOINT_ON:
result = self.overlay_keypoints(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
result_list.append(result)
return result_list, top_prediction_list
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[int(i)] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
)
return image
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image):
size = self.get_size(image.size)
image = F.resize(image, size)
return image
```
#### File: nn/modules/conv.py
```python
from torch import nn
from ..init import init_bn
class Conv1d(nn.Module):
"""Applies a 1D convolution over an input signal composed of several input planes
optionally followed by batch normalization and relu activation.
Attributes:
conv (nn.Module): convolution module
bn (nn.Module): batch normalization module
relu (nn.Module, optional): relu activation module
"""
def __init__(self, in_channels, out_channels, kernel_size,
relu=True, bn=True, bn_momentum=0.1, **kwargs):
super(Conv1d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, bias=(not bn), **kwargs)
self.bn = nn.BatchNorm1d(out_channels, momentum=bn_momentum) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
self.init_weights()
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def init_weights(self, init_fn=None):
"""default initialization"""
if init_fn is not None:
init_fn(self.conv)
if self.bn is not None:
init_bn(self.bn)
class Conv2d(nn.Module):
"""Applies a 2D convolution (optionally with batch normalization and relu activation)
over an input signal composed of several input planes.
Attributes:
conv (nn.Module): convolution module
bn (nn.Module): batch normalization module
relu (nn.Module, optional): relu activation module
"""
def __init__(self, in_channels, out_channels, kernel_size,
relu=True, bn=True, bn_momentum=0.1, **kwargs):
super(Conv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, bias=(not bn), **kwargs)
self.bn = nn.BatchNorm2d(out_channels, momentum=bn_momentum) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
self.init_weights()
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def init_weights(self, init_fn=None):
"""default initialization"""
if init_fn is not None:
init_fn(self.conv)
if self.bn is not None:
init_bn(self.bn)
```
#### File: nn/modules/mlp.py
```python
from torch import nn
import torch.nn.functional as F
from .conv import Conv1d, Conv2d
from .linear import FC
class MLP(nn.ModuleList):
def __init__(self,
in_channels,
mlp_channels,
dropout_prob=0.0,
bn=True,
bn_momentum=0.1):
"""Multilayer perceptron
Args:
in_channels (int): the number of channels of input tensor
mlp_channels (tuple): the numbers of channels of fully connected layers
dropout_prob (float or None): dropout probability
bn (bool): whether to use batch normalization
bn_momentum (float)
"""
super(MLP, self).__init__()
self.in_channels = in_channels
self.out_channels = mlp_channels[-1]
for ind, out_channels in enumerate(mlp_channels):
self.append(FC(in_channels, out_channels, relu=True, bn=bn, bn_momentum=bn_momentum))
in_channels = out_channels
# Do not use modules due to ModuleList.
assert dropout_prob >= 0.0
self.dropout_prob = dropout_prob
def forward(self, x):
for module in self:
assert isinstance(module, FC)
x = module(x)
if self.training and self.dropout_prob > 0.0:
x = F.dropout(x, p=self.dropout_prob, training=True)
return x
def init_weights(self, init_fn=None):
for module in self:
assert isinstance(module, FC)
module.init_weights(init_fn)
def extra_repr(self):
return 'dropout_prob={}'.format(self.dropout_prob) if self.dropout_prob > 0.0 else ''
class SharedMLP(nn.ModuleList):
def __init__(self,
in_channels,
mlp_channels,
ndim=1,
dropout_prob=0.0,
bn=True,
bn_momentum=0.1):
"""Multilayer perceptron shared on resolution (1D or 2D)
Args:
in_channels (int): the number of channels of input tensor
mlp_channels (tuple): the numbers of channels of fully connected layers
ndim (int): the number of dimensions to share
dropout_prob (float or None): dropout ratio
bn (bool): whether to use batch normalization
bn_momentum (float)
"""
super(SharedMLP, self).__init__()
self.in_channels = in_channels
self.out_channels = mlp_channels[-1]
self.ndim = ndim
if ndim == 1:
mlp_module = Conv1d
elif ndim == 2:
mlp_module = Conv2d
else:
raise ValueError('SharedMLP only supports ndim=(1, 2).')
for ind, out_channels in enumerate(mlp_channels):
self.append(mlp_module(in_channels, out_channels, 1, relu=True, bn=bn, bn_momentum=bn_momentum))
in_channels = out_channels
# Do not use modules due to ModuleList.
assert dropout_prob >= 0.0
self.dropout_prob = dropout_prob
def forward(self, x):
for module in self:
assert isinstance(module, (Conv1d, Conv2d))
x = module(x)
if self.training and self.dropout_prob > 0.0:
if self.ndim == 1:
x = F.dropout(x, p=self.dropout_prob, training=True)
elif self.ndim == 2:
x = F.dropout2d(x, p=self.dropout_prob, training=True)
else:
raise ValueError('SharedMLP only supports ndim=(1, 2).')
return x
def init_weights(self, init_fn=None):
for module in self:
assert isinstance(module, (Conv1d, Conv2d))
module.init_weights(init_fn)
```
#### File: RELEASE_ScaleNet_minimal/utils/data_utils.py
```python
import torch
# import torch.nn as nn
# import torch.optim as optim
import numpy as np
# import torchvision
from torchvision import datasets, models, transforms
# import argparse
# import time
# import os, sys
# import copy
# from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau
# from tensorboardX import SummaryWriter
from torch.utils.tensorboard import SummaryWriter
# from tqdm import tqdm
# from termcolor import colored
# from statistics import mean
# from dataset import COCO2017Scale, yc_bins_centers, collate_fn_padd, roll_bins_centers, pitch_bins_low, pitch_bins_high, roll_bins_centers, vfov_bins_centers, distortion_bins_centers, human_bins
# from dataset import pitch_bins_v0_wide
# from compute_vectors import generate_field
#
# from torchsummary import summary as model_summary
# import glob
import logging
# from dataset import my_collate
# from dataset_combine import my_collate_combine
# from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_rui.data.build import make_data_sampler
from maskrcnn_rui.data import samplers
from utils.train_utils import cycle
class RandomSaturation:
def __call__(self, sample):
if np.random.rand() < 0.75:
saturation_amt = np.random.triangular(-1, 0, 1)
if np.random.rand() < 0.04: # Grayscale
saturation_amt = 1
im = sample[0]
im = torch.clamp(im + (torch.max(im, 0)[0] - im) * saturation_amt, 0, 1)
sample[0] = im
return sample
perturb = transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_trnfs_yannick = transforms.Compose([
transforms.Resize((224,224)),
perturb,
transforms.ToTensor(),
RandomSaturation(),
normalize,
])
eval_trnfs_yannick = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
normalize,
])
def make_batch_data_sampler(
dataset, sampler, images_per_batch, num_iters=None, start_iter=0, drop_last=True
):
# if aspect_grouping:
# if not isinstance(aspect_grouping, (list, tuple)):
# aspect_grouping = [aspect_grouping]
# aspect_ratios = _compute_aspect_ratios(dataset)
# group_ids = _quantize(aspect_ratios, aspect_grouping)
# batch_sampler = samplers.GroupedBatchSampler(
# sampler, group_ids, images_per_batch, drop_uneven=False
# )
# else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=drop_last
)
if num_iters is not None:
batch_sampler = samplers.IterationBasedBatchSampler(
batch_sampler, num_iters, start_iter
)
return batch_sampler
def make_data_loader(cfg, dataset, is_train=True, is_distributed=False, start_iter=0, is_for_period=False, logger=None, override_shuffle=None, collate_fn=None, batch_size_override=-1):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.".format(
images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus if batch_size_override==-1 else batch_size_override
shuffle = True
num_iters = cfg.SOLVER.MAX_ITER
drop_last = True
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "TEST.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.".format(
images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus if batch_size_override==-1 else batch_size_override
# shuffle = False if not is_distributed else True
shuffle = False
num_iters = None
start_iter = 0
drop_last = False
if override_shuffle is not None:
shuffle = override_shuffle
sampler = make_data_sampler(dataset, shuffle, is_distributed)
batch_sampler = make_batch_data_sampler(
dataset, sampler, images_per_gpu, None, start_iter, drop_last=drop_last
)
# collator = BBoxAugCollator() if not is_train and cfg.TEST.BBOX_AUG.ENABLED else \
# BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
)
logger.info('++++++[train_utils] len(dataset) %d, len(sampler) %d, len(batch_sampler) %d, len(data_loader) %d, is_train %s, is_distributed %s:' % \
(len(dataset), len(sampler), len(batch_sampler), len(data_loader), is_train, is_distributed))
return data_loader
def iterator_coco_combine_alternate(iterator_A, iterator_B):
flag = True
# if len(iterator_A) > len(iterator_B):
# iterator_B = cycle(iterator_B)
# else:
# iterator_A = cycle(iterator_A)
iterator_A = cycle(iterator_A)
iterator_B = cycle(iterator_B)
iterator_A = iter(iterator_A)
iterator_B = iter(iterator_B)
# result = 0
# while result is not None:
while True:
if flag:
flag = not flag
yield(next(iterator_A))
else:
flag = not flag
yield(next(iterator_B))
```
|
{
"source": "Jerrypiglet/Total3DUnderstanding",
"score": 2
}
|
#### File: Total3DUnderstanding/configs/pix3d_config.py
```python
class Config(object):
def __init__(self, dataset):
"""
Configuration of data paths.
"""
self.dataset = dataset
self.root_path = './data/' + self.dataset
self.train_split = self.root_path + '/splits/train.json'
self.test_split = self.root_path + '/splits/test.json'
self.metadata_path = self.root_path + '/metadata'
self.train_test_data_path = self.root_path + '/train_test_data'
if dataset == 'pix3d':
self.metadata_file = self.metadata_path + '/pix3d.json'
self.classnames = ['misc',
'bed', 'bookcase', 'chair', 'desk', 'sofa',
'table', 'tool', 'wardrobe']
number_pnts_on_template = 2562
neighbors = 30
```
#### File: Total3DUnderstanding/models/eval_metrics.py
```python
from shapely.geometry.polygon import Polygon
def get_iou_cuboid(cu1, cu2):
"""
Calculate the Intersection over Union (IoU) of two 3D cuboid.
Parameters
----------
cu1 : numpy array, 8x3
cu2 : numpy array, 8x3
Returns
-------
float
in [0, 1]
"""
# 2D projection on the horizontal plane (z-x plane)
polygon2D_1 = Polygon(
[(cu1[0][2], cu1[0][0]), (cu1[1][2], cu1[1][0]), (cu1[2][2], cu1[2][0]), (cu1[3][2], cu1[3][0])])
polygon2D_2 = Polygon(
[(cu2[0][2], cu2[0][0]), (cu2[1][2], cu2[1][0]), (cu2[2][2], cu2[2][0]), (cu2[3][2], cu2[3][0])])
# 2D intersection area of the two projections.
intersect_2D = polygon2D_1.intersection(polygon2D_2).area
# the volume of the intersection part of cu1 and cu2
inter_vol = intersect_2D * max(0.0, min(cu1[0][1], cu2[0][1]) - max(cu1[4][1], cu2[4][1]))
# the volume of cu1 and cu2
vol1 = polygon2D_1.area * (cu1[0][1]-cu1[4][1])
vol2 = polygon2D_2.area * (cu2[0][1]-cu2[4][1])
# return 3D IoU
return inter_vol / (vol1 + vol2 - inter_vol)
```
#### File: Total3DUnderstanding/models/testing.py
```python
class BaseTester(object):
'''
Base tester for all networks.
'''
def __init__(self, cfg, net, device=None):
self.cfg = cfg
self.net = net
self.device = device
def visualize_step(self, *args, **kwargs):
''' Performs a visualization step.
'''
raise NotImplementedError
def get_metric_values(self, est_data, gt_data):
''' Performs a evaluation step.
'''
# camera orientation error
raise NotImplementedError
```
#### File: Total3DUnderstanding/net_utils/libs.py
```python
import torch
import torch.nn as nn
import copy
import numpy as np
from torch.nn import functional as F
from copy import deepcopy
def to_dict_tensor(dicts, if_cuda):
'''
Store dict to torch tensor.
:param dicts:
:param if_cuda:
:return:
'''
dicts_new = copy.copy(dicts)
for key, value in dicts_new.items():
value_new = torch.from_numpy(np.array(value))
if value_new.type() == 'torch.DoubleTensor':
value_new = value_new.float()
if if_cuda:
value_new = value_new.cuda()
dicts_new[key] = value_new
return dicts_new
def num_from_bins(bins, cls, reg):
"""
:param bins: b x 2 tensors
:param cls: b long tensors
:param reg: b tensors
:return: bin_center: b tensors
"""
bin_width = (bins[0][1] - bins[0][0])
bin_center = (bins[cls, 0] + bins[cls, 1]) / 2
return bin_center + reg * bin_width
def R_from_yaw_pitch_roll(yaw, pitch, roll):
'''
get rotation matrix from predicted camera yaw, pitch, roll angles.
:param yaw: batch_size x 1 tensor
:param pitch: batch_size x 1 tensor
:param roll: batch_size x 1 tensor
:return: camera rotation matrix
'''
n = yaw.size(0)
R = torch.zeros((n, 3, 3)).cuda()
R[:, 0, 0] = torch.cos(yaw) * torch.cos(pitch)
R[:, 0, 1] = torch.sin(yaw) * torch.sin(roll) - torch.cos(yaw) * torch.cos(roll) * torch.sin(pitch)
R[:, 0, 2] = torch.cos(roll) * torch.sin(yaw) + torch.cos(yaw) * torch.sin(pitch) * torch.sin(roll)
R[:, 1, 0] = torch.sin(pitch)
R[:, 1, 1] = torch.cos(pitch) * torch.cos(roll)
R[:, 1, 2] = - torch.cos(pitch) * torch.sin(roll)
R[:, 2, 0] = - torch.cos(pitch) * torch.sin(yaw)
R[:, 2, 1] = torch.cos(yaw) * torch.sin(roll) + torch.cos(roll) * torch.sin(yaw) * torch.sin(pitch)
R[:, 2, 2] = torch.cos(yaw) * torch.cos(roll) - torch.sin(yaw) * torch.sin(pitch) * torch.sin(roll)
return R
def get_rotation_matrix_gt(bins_tensor, pitch_cls_gt, pitch_reg_gt, roll_cls_gt, roll_reg_gt):
'''
get rotation matrix from predicted camera pitch, roll angles.
'''
pitch = num_from_bins(bins_tensor['pitch_bin'], pitch_cls_gt, pitch_reg_gt)
roll = num_from_bins(bins_tensor['roll_bin'], roll_cls_gt, roll_reg_gt)
r_ex = R_from_yaw_pitch_roll(torch.zeros_like(pitch), pitch, roll)
return r_ex
def get_mask_status(masks, split):
obj_status_flag = []
for batch_id, interval in enumerate(split):
for obj_id in range(interval[1]-interval[0]):
if masks[batch_id][obj_id]:
obj_status_flag.append(1)
else:
obj_status_flag.append(0)
return np.array(obj_status_flag)
def layout_basis_from_ori(ori):
"""
:param ori: orientation angle
:return: basis: 3x3 matrix
the basis in 3D coordinates
"""
n = ori.size(0)
basis = torch.zeros((n, 3, 3)).cuda()
basis[:, 0, 0] = torch.sin(ori)
basis[:, 0, 2] = torch.cos(ori)
basis[:, 1, 1] = 1
basis[:, 2, 0] = -torch.cos(ori)
basis[:, 2, 2] = torch.sin(ori)
return basis
def get_corners_of_bb3d(basis, coeffs, centroid):
"""
:param basis: n x 3 x 3 tensor
:param coeffs: n x 3 tensor
:param centroid: n x 3 tensor
:return: corners n x 8 x 3 tensor
"""
n = basis.size(0)
corners = torch.zeros((n, 8, 3)).cuda()
coeffs = coeffs.view(n, 3, 1).expand(-1, -1, 3)
centroid = centroid.view(n, 1, 3).expand(-1, 8, -1)
corners[:, 0, :] = - basis[:, 0, :] * coeffs[:, 0, :] + basis[:, 1, :] * coeffs[:, 1, :] - basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 1, :] = - basis[:, 0, :] * coeffs[:, 0, :] + basis[:, 1, :] * coeffs[:, 1, :] + basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 2, :] = basis[:, 0, :] * coeffs[:, 0, :] + basis[:, 1, :] * coeffs[:, 1, :] + basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 3, :] = basis[:, 0, :] * coeffs[:, 0, :] + basis[:, 1, :] * coeffs[:, 1, :] - basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 4, :] = - basis[:, 0, :] * coeffs[:, 0, :] - basis[:, 1, :] * coeffs[:, 1, :] - basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 5, :] = - basis[:, 0, :] * coeffs[:, 0, :] - basis[:, 1, :] * coeffs[:, 1, :] + basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 6, :] = basis[:, 0, :] * coeffs[:, 0, :] - basis[:, 1, :] * coeffs[:, 1, :] + basis[:, 2, :] * coeffs[:, 2, :]
corners[:, 7, :] = basis[:, 0, :] * coeffs[:, 0, :] - basis[:, 1, :] * coeffs[:, 1, :] - basis[:, 2, :] * coeffs[:, 2, :]
corners = corners + centroid
return corners
def get_layout_bdb_sunrgbd(bins_tensor, lo_ori_reg, lo_ori_cls, centroid_reg, coeffs_reg):
"""
get the eight corners of 3D bounding box
:param bins_tensor:
:param lo_ori_reg: layout orientation regression results
:param lo_ori_cls: layout orientation classification results
:param centroid_reg: layout centroid regression results
:param coeffs_reg: layout coefficients regression results
:return: bdb: b x 8 x 3 tensor: the bounding box of layout in layout system.
"""
ori_reg = torch.gather(lo_ori_reg, 1, lo_ori_cls.view(lo_ori_cls.size(0), 1).expand(lo_ori_cls.size(0), 1)).squeeze(1)
ori = num_from_bins(bins_tensor['layout_ori_bin'], lo_ori_cls, ori_reg)
basis = layout_basis_from_ori(ori)
centroid_reg = centroid_reg + bins_tensor['layout_centroid_avg']
coeffs_reg = (coeffs_reg + 1) * bins_tensor['layout_coeffs_avg']
bdb = get_corners_of_bb3d(basis, coeffs_reg, centroid_reg)
return bdb
def get_bdb_form_from_corners(corners_orig, mask_status):
corners = corners_orig[mask_status.nonzero()]
vec_0 = (corners[:, 2, :] - corners[:, 1, :]) / 2.
vec_1 = (corners[:, 0, :] - corners[:, 4, :]) / 2.
vec_2 = (corners[:, 1, :] - corners[:, 0, :]) / 2.
coeffs_0 = torch.norm(vec_0, dim=1)
coeffs_1 = torch.norm(vec_1, dim=1)
coeffs_2 = torch.norm(vec_2, dim=1)
coeffs = torch.cat([coeffs_0.unsqueeze(-1), coeffs_1.unsqueeze(-1), coeffs_2.unsqueeze(-1)], -1)
centroid = (corners[:, 0, :] + corners[:, 6, :]) / 2.
basis_0 = torch.mm(torch.diag(1 / coeffs_0), vec_0)
basis_1 = torch.mm(torch.diag(1 / coeffs_1), vec_1)
basis_2 = torch.mm(torch.diag(1 / coeffs_2), vec_2)
basis = torch.cat([basis_0.unsqueeze(1), basis_1.unsqueeze(1), basis_2.unsqueeze(1)], dim=1)
return {'basis': basis, 'coeffs': coeffs, 'centroid': centroid}
def recover_points_to_world_sys(bdb3D, mesh_coordinates):
'''
Get 3D point cloud from mesh with estimated position and orientation.
:param bdb3D: 3D object bounding boxes with keys ['coeffs', 'basis', 'centroid'].
:param mesh_coordinates: Number_of_objects x Number_of_points x 3.
:param mask_status: indicate whether the object has a mask.
:return: points on world system
'''
mesh_coordinates = mesh_coordinates.transpose(1, 2)
mesh_coordinates_in_world_sys = []
for obj_idx, mesh_coordinate in enumerate(mesh_coordinates):
mesh_center = (mesh_coordinate.max(dim=0)[0] + mesh_coordinate.min(dim=0)[0]) / 2.
mesh_center = mesh_center.detach()
mesh_coordinate = mesh_coordinate - mesh_center
mesh_coef = (mesh_coordinate.max(dim=0)[0] - mesh_coordinate.min(dim=0)[0]) / 2.
mesh_coef = mesh_coef.detach()
mesh_coordinate = torch.mm(torch.mm(mesh_coordinate, torch.diag(1. / mesh_coef)),
torch.diag(bdb3D['coeffs'][obj_idx]))
# set orientation
mesh_coordinate = torch.mm(mesh_coordinate, bdb3D['basis'][obj_idx])
# move to center
mesh_coordinate = mesh_coordinate + bdb3D['centroid'][obj_idx].view(1, 3)
mesh_coordinates_in_world_sys.append(mesh_coordinate.unsqueeze(0))
return torch.cat(mesh_coordinates_in_world_sys, dim=0)
def get_rotation_matix_result(bins_tensor, pitch_cls_gt, pitch_reg_result, roll_cls_gt, roll_reg_result):
'''
get rotation matrix from predicted camera pitch, roll angles.
'''
pitch_result = torch.gather(pitch_reg_result, 1,
pitch_cls_gt.view(pitch_cls_gt.size(0), 1).expand(pitch_cls_gt.size(0), 1)).squeeze(1)
roll_result = torch.gather(roll_reg_result, 1,
roll_cls_gt.view(roll_cls_gt.size(0), 1).expand(roll_cls_gt.size(0), 1)).squeeze(1)
pitch = num_from_bins(bins_tensor['pitch_bin'], pitch_cls_gt, pitch_result)
roll = num_from_bins(bins_tensor['roll_bin'], roll_cls_gt, roll_result)
cam_R = R_from_yaw_pitch_roll(torch.zeros_like(pitch), pitch, roll)
return cam_R
def rgb_to_world(p, depth, K, cam_R, split):
"""
Given pixel location and depth, camera parameters, to recover world coordinates.
:param p: n x 2 tensor
:param depth: b tensor
:param k: b x 3 x 3 tensor
:param cam_R: b x 3 x 3 tensor
:param split: b x 2 split tensor.
:return: p_world_right: n x 3 tensor in right hand coordinate
"""
n = p.size(0)
K_ex = torch.cat([K[index].expand(interval[1] - interval[0], -1, -1) for index, interval in enumerate(split)], 0)
cam_R_ex = torch.cat([cam_R[index].expand(interval[1] - interval[0], -1, -1) for index, interval in enumerate(split)], 0)
x_temp = (p[:, 0] - K_ex[:, 0, 2]) / K_ex[:, 0, 0]
y_temp = (p[:, 1] - K_ex[:, 1, 2]) / K_ex[:, 1, 1]
z_temp = 1
ratio = depth / torch.sqrt(x_temp ** 2 + y_temp ** 2 + z_temp ** 2)
x_cam = x_temp * ratio
y_cam = y_temp * ratio
z_cam = z_temp * ratio
# transform to toward-up-right coordinate system
x3 = z_cam
y3 = -y_cam
z3 = x_cam
p_cam = torch.stack((x3, y3, z3), 1).view(n, 3, 1) # n x 3
p_world = torch.bmm(cam_R_ex, p_cam)
return p_world
def basis_from_ori(ori):
"""
:param ori: torch tensor
the orientation angle
:return: basis: 3x3 tensor
the basis in 3D coordinates
"""
n = ori.size(0)
basis = torch.zeros((n, 3, 3)).cuda()
basis[:, 0, 0] = torch.cos(ori)
basis[:, 0, 2] = -torch.sin(ori)
basis[:, 1, 1] = 1
basis[:, 2, 0] = torch.sin(ori)
basis[:, 2, 2] = torch.cos(ori)
return basis
def get_bdb_3d_result(bins_tensor, ori_cls_gt, ori_reg_result, centroid_cls_gt, centroid_reg_result, size_cls_gt,
size_reg_result, P, K, cam_R, split):
# coeffs
size_cls_gt = torch.argmax(size_cls_gt, 1)
coeffs = (size_reg_result + 1) * bins_tensor['avg_size'][size_cls_gt, :] # b x 3
# centroid
centroid_reg = torch.gather(centroid_reg_result, 1,
centroid_cls_gt.view(centroid_cls_gt.size(0), 1).expand(centroid_cls_gt.size(0),
1)).squeeze(1)
centroid_depth = num_from_bins(bins_tensor['centroid_bin'], centroid_cls_gt, centroid_reg)
centroid = rgb_to_world(P, centroid_depth, K, cam_R, split) # b x 3
# basis
ori_reg = torch.gather(ori_reg_result, 1,
ori_cls_gt.view(ori_cls_gt.size(0), 1).expand(ori_cls_gt.size(0), 1)).squeeze(1)
ori = num_from_bins(bins_tensor['ori_bin'], ori_cls_gt, ori_reg)
basis = basis_from_ori(ori)
bdb = get_corners_of_bb3d(basis, coeffs, centroid)
bdb_form = {'basis': basis, 'coeffs': coeffs, 'centroid': centroid}
return bdb, bdb_form
def project_3d_points_to_2d(points3d, cam_R_ex, K_ex):
"""
project 3d points to 2d
:param points3d: n x 8 x 3 tensor; n equals to number of boxes.
:param cam_R_ex: n x 3 x 3 tensor
:param K_ex: n x 3 x 3 tensor
:return:
"""
n = points3d.size(0)
points_cam_ori = torch.bmm(points3d, cam_R_ex)
T_cam = torch.FloatTensor([[0., 0., 1.], [0., -1., 0.], [1., 0., 0.]]).expand(n, -1, -1).cuda()
points_cam = torch.bmm(points_cam_ori, torch.transpose(T_cam, 1, 2))
points_cam_positive = torch.transpose(
torch.stack((points_cam[:, :, 0], points_cam[:, :, 1], F.threshold(points_cam[:, :, 2], 0.0001, 0.0001)), 2), 1,
2) # b x 3 x 8
points_2d_ori = torch.transpose(torch.bmm(K_ex, points_cam_positive), 1, 2) # b x 8 x 3
points_2d = torch.stack(
(points_2d_ori[:, :, 0] / points_2d_ori[:, :, 2], points_2d_ori[:, :, 1] / points_2d_ori[:, :, 2]),
2) # n x 8 x 2
return points_2d
def get_bdb_2d_result(bdb3d, cam_R, K, split):
"""
:param bins_tensor:
:param bdb3d: n x 8 x 3 tensor: n equals to the number of objects in all batches.
:param cam_R: b x 3 x 3 tensor: b - batch number
:param K: b x 3 x 3 tensor: b - batch number
:return:
"""
n = bdb3d.size(0)
# convert K to n x 3 x 3
K_ex = torch.cat([K[index].expand(interval[1] - interval[0], -1, -1) for index, interval in enumerate(split)], 0)
cam_R_ex = torch.cat(
[cam_R[index].expand(interval[1] - interval[0], -1, -1) for index, interval in enumerate(split)], 0)
points_2d = project_3d_points_to_2d(bdb3d, cam_R_ex, K_ex) # n x 8 x 2
x1 = torch.min(torch.max(torch.min(points_2d[:, :, 0], dim=1)[0], torch.zeros(n).cuda()),
2 * K_ex[:, 0, 2]) / (K_ex[:, 0, 2].float())
y1 = torch.min(torch.max(torch.min(points_2d[:, :, 1], dim=1)[0], torch.zeros(n).cuda()),
2 * K_ex[:, 1, 2]) / (K_ex[:, 1, 2].float())
x2 = torch.min(torch.max(torch.max(points_2d[:, :, 0], dim=1)[0], torch.zeros(n).cuda()),
2 * K_ex[:, 0, 2]) / (K_ex[:, 0, 2].float())
y2 = torch.min(torch.max(torch.max(points_2d[:, :, 1], dim=1)[0], torch.zeros(n).cuda()),
2 * K_ex[:, 1, 2]) / (K_ex[:, 1, 2].float())
return torch.stack((x1, y1, x2, y2), 1)
def physical_violation(bdb_layout, bdb_3d, split):
'''
compute the loss of physical violation
:param bdb_layout: b x 8 x 3 tensor
:param bdb_3d: n x 8 x 3 tensor
:param split: b x 2 tensor
:return:
'''
n = bdb_3d.size(0)
layout_max = torch.max(bdb_layout, dim=1)[0] # b x 3
layout_min = torch.min(bdb_layout, dim=1)[0] # b x 3
layout_max_ex = torch.cat([layout_max[index].expand(interval[1] - interval[0], -1) for index, interval in enumerate(split)], 0) # n x 3
layout_min_ex = torch.cat([layout_min[index].expand(interval[1] - interval[0], -1) for index, interval in enumerate(split)], 0) # n x 3
bdb_max = torch.max(bdb_3d, dim=1)[0] # n x 3
bdb_min = torch.min(bdb_3d, dim=1)[0] # n x 3
violation = F.relu(bdb_max - layout_max_ex) + F.relu(layout_min_ex - bdb_min) # n x 3
return violation, torch.zeros(n, 3).cuda()
def get_bdb_evaluation(bins_tensor, ori_cls_gt, ori_reg_result, centroid_cls_gt, centroid_reg_result, size_cls_gt,
size_reg_result, P, K, cam_R, split, return_bdb=False):
bdb, bdb_form = get_bdb_3d_result(bins_tensor, ori_cls_gt, ori_reg_result, centroid_cls_gt, centroid_reg_result,
size_cls_gt, size_reg_result, P, K, cam_R, split)
n = ori_cls_gt.size(0)
basis = bdb_form['basis']
coeffs = bdb_form['coeffs']
centroid = bdb_form['centroid']
class_id = torch.argmax(size_cls_gt, 1)
bdb_output = [{'basis': basis[i, :, :].cpu().numpy(), 'coeffs': coeffs[i, :].cpu().numpy(),
'centroid': centroid[i, :].squeeze().cpu().numpy(), 'classid': class_id[i].cpu().numpy()} for i in
range(n)]
if not return_bdb:
return bdb_output
else:
return bdb_output, bdb
def get_corners_of_bb3d_no_index(basis, coeffs, centroid):
corners = np.zeros((8, 3))
coeffs = np.abs(coeffs)
corners[0, :] = - basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners[1, :] = - basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[2, :] = + basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[3, :] = + basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners[4, :] = - basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners[5, :] = - basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[6, :] = + basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[7, :] = + basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners = corners + np.tile(centroid, (8, 1))
return corners
def change_key(bbox):
if 'u1' not in bbox.keys() and 'x1' in bbox.keys():
bbox = deepcopy(bbox)
bbox['u1'] = bbox['x1']
bbox['v1'] = bbox['y1']
bbox['u2'] = bbox['x2']
bbox['v2'] = bbox['y2']
bbox.pop('x1', None)
bbox.pop('x2', None)
bbox.pop('y1', None)
bbox.pop('y2', None)
return bbox
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'u1', 'v1', 'u2', 'v2'}
The (u1, v1) position is at the top left corner,
The (u2, v2) position is at the bottom right corner
bb2 : dict
Keys: {'u1', 'v1', 'u2', 'v2'}
The (u1, v1) position is at the top left corner,
The (u2, v2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
bb1 = change_key(bb1)
bb2 = change_key(bb2)
assert bb1['u1'] <= bb1['u2']
assert bb1['v1'] <= bb1['v2']
assert bb2['u1'] <= bb2['u2']
assert bb2['v1'] <= bb2['v2']
# determine the coordinates of the intersection rectangle
u_left = max(bb1['u1'], bb2['u1'])
v_top = max(bb1['v1'], bb2['v1'])
u_right = min(bb1['u2'], bb2['u2'])
v_bottom = min(bb1['v2'], bb2['v2'])
if u_right < u_left or v_bottom < v_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (u_right - u_left) * (v_bottom - v_top)
# compute the area of both AABBs
bb1_area = (bb1['u2'] - bb1['u1']) * (bb1['v2'] - bb1['v1'])
bb2_area = (bb2['u2'] - bb2['u1']) * (bb2['v2'] - bb2['v1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
```
#### File: Jerrypiglet/Total3DUnderstanding/train_epoch.py
```python
from net_utils.utils import LossRecorder
from time import time
def train_epoch(cfg, epoch, trainer, dataloaders):
'''
train by epoch
:param cfg: configuration file
:param epoch: epoch id.
:param trainer: specific trainer for networks
:param dataloaders: dataloader for training and validation
:return:
'''
for phase in ['train', 'test']:
dataloader = dataloaders[phase]
batch_size = cfg.config[phase]['batch_size']
loss_recorder = LossRecorder(batch_size)
# set mode
trainer.net.train(phase == 'train')
# set subnet mode
trainer.net.set_mode()
cfg.log_string('-' * 100)
cfg.log_string('Switch Phase to %s.' % (phase))
cfg.log_string('-'*100)
for iter, data in enumerate(dataloader):
if phase == 'train':
loss = trainer.train_step(data)
else:
loss = trainer.eval_step(data)
# visualize intermediate results.
if ((iter + 1) % cfg.config['log']['vis_step']) == 0:
trainer.visualize_step(epoch, phase, iter, data)
loss_recorder.update_loss(loss)
if ((iter + 1) % cfg.config['log']['print_step']) == 0:
cfg.log_string('Process: Phase: %s. Epoch %d: %d/%d. Current loss: %s.' % (phase, epoch, iter + 1, len(dataloader), str(loss)))
cfg.log_string('=' * 100)
for loss_name, loss_value in loss_recorder.loss_recorder.items():
cfg.log_string('Currently the last %s loss (%s) is: %f' % (phase, loss_name, loss_value.avg))
cfg.log_string('=' * 100)
return loss_recorder.loss_recorder
def train(cfg, trainer, scheduler, checkpoint, train_loader, test_loader):
'''
train epochs for network
:param cfg: configuration file
:param scheduler: scheduler for optimizer
:param trainer: specific trainer for networks
:param checkpoint: network weights.
:param train_loader: dataloader for training
:param test_loader: dataloader for testing
:return:
'''
start_epoch = scheduler.last_epoch
total_epochs = cfg.config['train']['epochs']
min_eval_loss = checkpoint.get('min_loss')
dataloaders = {'train': train_loader, 'test': test_loader}
for epoch in range(start_epoch, total_epochs):
cfg.log_string('-' * 100)
cfg.log_string('Epoch (%d/%s):' % (epoch + 1, total_epochs))
trainer.show_lr()
start = time()
eval_loss_recorder = train_epoch(cfg, epoch + 1, trainer, dataloaders)
eval_loss = trainer.eval_loss_parser(eval_loss_recorder)
scheduler.step(eval_loss)
cfg.log_string('Epoch (%d/%s) Time elapsed: (%f).' % (epoch + 1, total_epochs, time()-start))
# save checkpoint
checkpoint.register_modules(epoch=epoch, min_loss=eval_loss)
checkpoint.save('last')
cfg.log_string('Saved the latest checkpoint.')
if epoch==-1 or eval_loss<min_eval_loss:
checkpoint.save('best')
min_eval_loss = eval_loss
cfg.log_string('Saved the best checkpoint.')
cfg.log_string('=' * 100)
for loss_name, loss_value in eval_loss_recorder.items():
cfg.log_string('Currently the best test loss (%s) is: %f' % (loss_name, loss_value.avg))
cfg.log_string('=' * 100)
```
#### File: utils_OR/DatasetCreation/renderMatPart.py
```python
import glob
import os
import os.path as osp
import argparse
import xml.etree.ElementTree as et
from xml.dom import minidom
def transformToXml(root ):
rstring = et.tostring(root, 'utf-8')
pstring = minidom.parseString(rstring)
xmlString = pstring.toprettyxml(indent=" ")
xmlString= xmlString.split('\n')
xmlString = [x for x in xmlString if len(x.strip()) != 0 ]
xmlString = '\n'.join(xmlString )
return xmlString
parser = argparse.ArgumentParser()
# Directories
parser.add_argument('--xmlRoot', default="/siggraphasia20dataset/code/Routine/scenes/xml1", help="outdir of xml file")
# Start and end point
parser.add_argument('--rs', default=0, type=int, help='the width of the image' )
parser.add_argument('--re', default=1600, type=int, help='the height of the image' )
# xml file
parser.add_argument('--xmlFile', default='main', help='the xml file')
# output file
parser.add_argument('--outRoot', default='/eccv20dataset/DatasetNew_test/', help='output directory')
# Render Mode
parser.add_argument('--mode', default=7, type=int, help='the information being rendered')
# Control
parser.add_argument('--forceOutput', action='store_true', help='whether to overwrite previous results')
# Program
parser.add_argument('--program', default='/siggraphasia20dataset/OptixRenderer_MatPart/src/bin/optixRenderer', help='the location of render' )
opt = parser.parse_args()
scenes = glob.glob(osp.join(opt.xmlRoot, 'scene*') )
scenes = [x for x in scenes if osp.isdir(x) ]
scenes = sorted(scenes )
for n in range(opt.rs, min(opt.re, len(scenes ) ) ):
scene = scenes[n]
sceneId = scene.split('/')[-1]
print('%d/%d: %s' % (n, len(scenes), sceneId ) )
outDir = osp.join(opt.outRoot, opt.xmlFile + '_' + opt.xmlRoot.split('/')[-1], sceneId )
if not osp.isdir(outDir ):
continue
os.system('mkdir -p %s' % outDir )
xmlFile = osp.join(scene, '%s.xml' % opt.xmlFile )
camFile = osp.join(scene, 'cam.txt' )
if not osp.isfile(xmlFile ) or not osp.isfile(camFile ):
continue
# Modify the xml file to fix the lamp issue
tree = et.parse(xmlFile )
root = tree.getroot()
shapes = root.findall('shape')
isFindAreaLight = False
for shape in shapes:
string = shape.findall('string')[0]
fileName = string.get('value')
if not 'alignedNew.obj' in fileName:
if 'aligned_shape.obj' in fileName:
fileName = fileName.replace('aligned_shape.obj', 'alignedNew.obj')
string.set('value', fileName )
bsdfs = shape.findall('ref')
for bsdf in bsdfs:
shape.remove(bsdf )
elif 'aligned_light.obj' in fileName:
root.remove(shape )
newXmlFile = xmlFile.replace('.xml', '_cadmatobj.xml')
xmlString = transformToXml(root )
with open(newXmlFile, 'w') as xmlOut:
xmlOut.write(xmlString )
cmd = '%s -f %s -c %s -o %s -m %d' % (opt.program, newXmlFile, 'cam.txt', osp.join(outDir, 'im.hdr'), opt.mode )
print(cmd )
if opt.forceOutput:
cmd += ' --forceOutput'
os.system(cmd )
```
#### File: utils_OR/DatasetCreation/sampleCameraPose.py
```python
import numpy as np
import os
import shutil
import glob
import JSONHelper
import quaternion
import argparse
import os.path as osp
import pickle
import align_utils as utils
import xml.etree.ElementTree as et
from xml.dom import minidom
import cv2
import struct
import scipy.ndimage as ndimage
def loadMesh(name ):
vertices = []
faces = []
with open(name, 'r') as meshIn:
lines = meshIn.readlines()
lines = [x.strip() for x in lines if len(x.strip() ) > 2 ]
for l in lines:
if l[0:2] == 'v ':
vstr = l.split(' ')[1:4]
varr = [float(x) for x in vstr ]
varr = np.array(varr ).reshape([1, 3] )
vertices.append(varr )
elif l[0:2] == 'f ':
fstr = l.split(' ')[1:4]
farr = [int(x.split('/')[0] ) for x in fstr ]
farr = np.array(farr ).reshape([1, 3] )
faces.append(farr )
vertices = np.concatenate(vertices, axis=0 )
faces = np.concatenate(faces, axis=0 )
return vertices, faces
def writeMesh(name, vertices, faces ):
with open(name, 'w') as meshOut:
for n in range(0, vertices.shape[0]):
meshOut.write('v %.3f %.3f %.3f\n' %
(vertices[n, 0], vertices[n, 1], vertices[n, 2] ) )
for n in range(0,faces.shape[0] ):
meshOut.write('f %d %d %d\n' %
(faces[n, 0], faces[n, 1], faces[n, 2]) )
def writeScene(name, boxes ):
with open(name, 'w') as meshOut:
vNum = 0
for group in boxes:
vertices = group[0]
faces = group[1]
for n in range(0, vertices.shape[0] ):
meshOut.write('v %.3f %.3f %.3f\n' %
(vertices[n, 0], vertices[n, 1], vertices[n, 2] ) )
for n in range(0, faces.shape[0]):
meshOut.write('f %d %d %d\n' %
(faces[n, 0] + vNum, faces[n, 1] + vNum, faces[n, 2] + vNum ) )
vNum += vertices.shape[0]
def computeBox(vertices ):
minX, maxX = vertices[:, 0].min(), vertices[:, 0].max()
minY, maxY = vertices[:, 1].min(), vertices[:, 1].max()
minZ, maxZ = vertices[:, 2].min(), vertices[:, 2].max()
corners = []
corners.append(np.array([minX, minY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, minY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, minY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, minY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, maxY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, maxY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, maxY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, maxY, maxZ] ).reshape(1, 3) )
corners = np.concatenate(corners ).astype(np.float32 )
faces = []
faces.append(np.array([1, 2, 3] ).reshape(1, 3) )
faces.append(np.array([1, 3, 4] ).reshape(1, 3) )
faces.append(np.array([5, 7, 6] ).reshape(1, 3) )
faces.append(np.array([5, 8, 7] ).reshape(1, 3) )
faces.append(np.array([1, 6, 2] ).reshape(1, 3) )
faces.append(np.array([1, 5, 6] ).reshape(1, 3) )
faces.append(np.array([2, 7, 3] ).reshape(1, 3) )
faces.append(np.array([2, 6, 7] ).reshape(1, 3) )
faces.append(np.array([3, 8, 4] ).reshape(1, 3) )
faces.append(np.array([3, 7, 8] ).reshape(1, 3) )
faces.append(np.array([4, 5, 1] ).reshape(1, 3) )
faces.append(np.array([4, 8, 5] ).reshape(1, 3) )
faces = np.concatenate(faces ).astype(np.int32 )
return corners, faces
def computeTransform(vertices, t, q, s):
if s != None:
scale = np.array(s, dtype=np.float32 ).reshape(1, 3)
vertices = vertices * scale
if q != None:
q = np.quaternion(q[0], q[1], q[2], q[3])
rotMat = quaternion.as_rotation_matrix(q )
if np.abs(rotMat[1, 1] ) > 0.5:
d = rotMat[1, 1]
rotMat[:, 1] = 0
rotMat[1, :] = 0
if d < 0:
rotMat[1, 1] = -1
else:
rotMat[1, 1] = 1
vertices = np.matmul(rotMat, vertices.transpose() )
vertices = vertices.transpose()
if t != None:
trans = np.array(t, dtype=np.float32 ).reshape(1, 3)
vertices = vertices + trans
return vertices, trans.squeeze(), rotMat, scale.squeeze()
def checkOverlapApproximate(bverts1, bverts2 ):
axis_1 = (bverts1[1, :] - bverts1[0, :] ).reshape(1, 3)
xLen = np.sqrt(np.sum(axis_1 * axis_1 ) )
axis_2 = (bverts1[3, :] - bverts1[0, :] ).reshape(1, 3)
zLen = np.sqrt(np.sum(axis_2 * axis_2 ) )
origin = bverts1[0, :]
xCoord = np.sum( (bverts2[0:4, :] - origin ) * axis_1 / xLen, axis=1 )
zCoord = np.sum( (bverts2[0:4, :] - origin ) * axis_2 / zLen, axis=1 )
minX, maxX = xCoord.min(), xCoord.max()
minZ, maxZ = zCoord.min(), zCoord.max()
xOverlap = (min(maxX, xLen) - max(minX, 0) )
zOverlap = (min(maxZ, zLen) - max(minZ, 0) )
if xOverlap < 0 or zOverlap < 0:
return False
areaTotal = (maxX - minX) * (maxZ - minZ )
areaOverlap = xOverlap * zOverlap
if areaOverlap / areaTotal > 0.7:
return True
else:
return False
def findSupport(lverts, boxes, cats ):
# Find support for every object
boxList = []
for n in range(0, len(boxes) ):
bList = []
top = boxes[n][0][:, 1].max()
for m in range(0, len(boxes ) ):
if m != n:
bverts = boxes[m][0]
minY, maxY = bverts[:, 1].min(), bverts[:, 1].max()
bottom = minY
if np.abs(top - bottom) < 0.75 * (maxY - minY ) and np.abs(top - bottom ) < 1:
isOverlap = checkOverlapApproximate(boxes[n][0], boxes[m][0] )
if isOverlap:
if m < n:
if not n in boxList[m]:
bList.append(m )
else:
bList.append(m )
boxList.append(bList )
# Find objects on floor
floorList = []
floorHeight = lverts[:, 1].min()
for n in range(0, len(boxes ) ):
isSupported = False
for bList in boxList:
if n in bList:
isSupported = True
break
if not isSupported:
if cats[n] == '03046257' or cats[n] == '03636649' or cats[n] == '02808440':
bverts = boxes[n][0]
minY, maxY = bverts[:, 1].min(), bverts[:, 1].max()
if np.abs(minY - floorHeight ) < 1.5 * (maxY - minY) and np.abs(minY - floorHeight ) < 1 :
floorList.append(n )
else:
floorList.append(n )
return floorList, boxList
def adjustHeightBoxes(boxId, boxes, cads, boxList ):
top = boxes[boxId ][0][:, 1].max()
for n in boxList[boxId ]:
bverts = boxes[n][0]
bottom = bverts[:, 1].min()
delta = np.array([0, top-bottom, 0] ).reshape(1, 3)
boxes[n][0] = boxes[n][0] + delta
cads[n][0] = cads[n][0] + delta
boxes[n].append( ('t', delta.squeeze() ) )
cads[n].append( ('t', delta.squeeze() ) )
if len(boxList[n]) != 0:
adjustHeightBoxes(n, boxes, cads, boxList )
adjustHeightBoxes(n, boxes, cads, boxList )
return
def adjustHeight(lverts, boxes, cads, floorList, boxList ):
# Adjust the height
floorHeight = lverts[:, 1].min()
for n in floorList:
bverts = boxes[n][0]
bottom = bverts[:, 1].min()
delta = np.array([0, floorHeight-bottom, 0] ).reshape(1, 3)
boxes[n][0] = boxes[n][0] + delta
boxes[n].append( ('t', delta.squeeze() ) )
cads[n][0] = cads[n][0] + delta
cads[n].append( ('t', delta.squeeze() ) )
if len(boxList[n] ) != 0:
adjustHeightBoxes(n, boxes, cads, boxList )
return
def checkPointInPolygon(wallVertices, v ):
###Given the wall vertices, determine if the pt is inside the polygon
X = [pt[0] for pt in wallVertices ]
Z = [pt[2] for pt in wallVertices ]
j = len(wallVertices) - 1
oddNodes = False
x, z = v[0], v[2]
for i in range(len(wallVertices ) ):
if (Z[i] < z and Z[j] >= z) or (Z[j] < z and Z[i] >= z ):
if (X[i] + ((z - Z[i]) / (Z[j] - Z[i]) * (X[j] - X[i]) ) ) <= x:
oddNodes = not oddNodes
j=i
return oddNodes
def calLineParam(pt1, pt2 ):
###Calculate line parameters
x1, z1 = pt1
x2, z2 = pt2
a = z1 - z2
b = x2 - x1
c = z2 * x1 - x2 * z1
return a, b, c
def findNearestPt(w1, w2, pts ):
###Find the nearest point on the line to a point
a, b, c = calLineParam(w1, w2)
x, z = pts
a2b2 = a ** 2 + b ** 2
new_x = (b * (b * x - a * z) - a * c) / a2b2
new_z = (a * (-b * x + a * z) - b * c) / a2b2
return np.array([new_x, new_z] )
def findNearestWall(pt, wallVertices ):
###Find nearest wall of a point
minD, result = 100, None
pt = np.array([pt[0], pt[2]], dtype=np.float32 )
j = len(wallVertices) - 1
for i in range(len(wallVertices ) ):
w1 = np.array([wallVertices[i][0], wallVertices[i][2] ], dtype = np.float32 )
w2 = np.array([wallVertices[j][0], wallVertices[j][2] ], dtype = np.float32 )
if np.linalg.norm(w1 - pt ) < np.linalg.norm(w2 - pt):
d = np.linalg.norm(np.cross(w2 - w1, w1 - pt) ) / np.linalg.norm(w2 - w1)
else:
d = np.linalg.norm(np.cross(w2 - w1, w2 - pt) ) / np.linalg.norm(w2 - w1)
if d < minD:
nearestPt = findNearestPt(w1, w2, pt)
denom, nom = w1 - w2, w1 - nearestPt
if(np.sum(denom == 0)):
denom[denom == 0] = denom[denom != 0]
check = nom / denom
if np.mean(check) < 1 and np.mean(check) > 0:
minD = d
result = nearestPt
j = i
for i in range(len(wallVertices ) ):
w1 = np.array([wallVertices[i][0], wallVertices[i][2] ], dtype = np.float32 )
d = np.linalg.norm(w1 - pt)
if d < minD:
minD = d
result = w1
return minD, result
def moveBox(record):
pt, nearestPt = record
vector = ((nearestPt[0] - pt[0]), (nearestPt[1] - pt[2] ) )
return vector
def moveBoxInWall(cverts, bboxes, cads, threshold = 0.3):
# find wall_vertices
wallVertices = []
floorHeight = cverts[:, 1].min()
for n in range(0, cverts.shape[0] ):
vert = cverts[n, :]
if np.abs(vert[1] - floorHeight ) < 0.1:
wallVertices.append(vert )
isMove = False
isBeyondRange = False
for n in range(0, len(boxes ) ):
box = boxes[n]
maxD, record = 0, None
bverts = box[0]
for m in range(0, bverts.shape[0] ):
v = bverts[m, :]
if not checkPointInPolygon(wallVertices, v ):
d, nearestPt = findNearestWall(v, wallVertices )
if maxD < d:
record = (v, nearestPt )
maxD = d
if record != None:
t_x, t_z = moveBox(record )
trans = np.array([t_x, 0, t_z], dtype=np.float32 )
if np.linalg.norm(trans ) > threshold:
isBeyondRange = True
if np.linalg.norm(trans ) >= 1e-7:
isMove = True
direc = trans / np.linalg.norm(trans )
trans = trans + direc * 0.04
bboxes[n][0] = bboxes[n][0] + trans.reshape(1, 3)
bboxes[n].append( ('t', trans.squeeze() ) )
cads[n][0] = cads[n][0] + trans.reshape(1, 3)
cads[n].append( ('t', trans.squeeze() ) )
return isMove, isBeyondRange
def sampleCameraPoses(cverts, boxes,
samplePoint, sampleNum,
heightMin, heightMax,
distMin, distMax,
thetaMin, thetaMax,
phiMin, phiMax ):
wallVertices = []
floorHeight = cverts[:, 1].min()
for n in range(0, cverts.shape[0] ):
vert = cverts[n, :]
if np.abs(vert[1] - floorHeight ) < 0.1:
wallVertices.append(vert )
X = [pt[0] for pt in wallVertices ]
Z = [pt[2] for pt in wallVertices ]
meanPoint = [sum(X ) / len(X), 0, sum(Z) / len(Z) ]
meanPoint = np.array(meanPoint, dtype = np.float32 )
thetaMin = thetaMin / 180.0 * np.pi
thetaMax = thetaMax / 180.0 * np.pi
phiMin = phiMin / 180.0 * np.pi
phiMax = phiMax / 180.0 * np.pi
yMin = np.sin(thetaMin )
yMax = np.sin(thetaMax )
xMin = np.sin(phiMin )
xMax = np.sin(phiMax )
# Compute the segment length
totalLen = 0
j = len(wallVertices ) - 1
for i in range(len(wallVertices ) ):
l = (X[i] - X[j]) * (X[i] - X[j]) \
+ (Z[i] - Z[j]) * (Z[i] - Z[j] )
l = np.sqrt(l )
totalLen += l
j = i
segLen = totalLen / samplePoint
# Sample the camera poses
j = len(wallVertices ) - 1
camPoses = []
validPointCount = 0
for i in range(len(wallVertices ) ):
# compute the segment direction
direc = np.array( [X[i] - X[j], 0, Z[i] - Z[j] ], dtype = np.float32 )
totalLen = np.sqrt(np.sum(direc * direc ) )
if totalLen == 0:
continue
direc = direc / totalLen
# Determine the normal direction
normal = np.array([direc[2], 0, -direc[0] ], dtype = np.float32 )
normal = normal / np.sqrt(np.sum(normal * normal ) )
midPoint = np.array([0.5*(X[i] + X[j]), 0, 0.5*(Z[i] + Z[j]) ], dtype = np.float32 )
sp1 = midPoint + normal * 0.1
sp2 = midPoint - normal * 0.1
isIn1 = checkPointInPolygon(wallVertices, sp1 )
isIn2 = checkPointInPolygon(wallVertices, sp2 )
assert(isIn1 != isIn2 )
if isIn1 == False and isIn2 == True:
normal = -normal
accumLen = 0.2 * segLen
origin = np.array([X[j], 0, Z[j] ], dtype = np.float32 )
while accumLen < totalLen:
# compute point location
for cnt in range(0, sampleNum ):
pointLoc = origin + accumLen * direc
pointLoc += (np.random.random() * (distMax - distMin ) \
+ distMin) * normal
pointLoc[1] = np.random.random() * (heightMax - heightMin ) \
+ heightMin + floorHeight
isIn = checkPointInPolygon(wallVertices, pointLoc )
if not isIn:
print('Warning: %d point is outside the room' % validPointCount )
continue
else:
# check if the point will in any bounding boxes
isOverlap = False
for box in boxes:
bverts = box[0][0:4, :]
bminY = box[0][0, 1]
bmaxY = box[0][4, 1]
if pointLoc[1] > bminY and pointLoc[1] < bmaxY:
isOverlap = checkPointInPolygon(bverts, pointLoc )
if isOverlap:
break
if isOverlap:
print('Warning: %d point overlaps with furniture' % validPointCount )
continue
validPointCount += 1
camPose = np.zeros((3, 3), dtype=np.float32 )
camPose[0, :] = pointLoc
centerAxis = meanPoint - pointLoc
centerAxis = centerAxis / np.maximum(np.sqrt(
np.sum(centerAxis * centerAxis) ), 1e-6 )
zAxis = normal + 0.9 * centerAxis
zAxis = zAxis / np.maximum(np.sqrt(
np.sum(zAxis * zAxis) ), 1e-6 )
yAxis = np.array([0, 1, 0], dtype=np.float32 )
xAxis = np.cross(yAxis, zAxis )
xValue = (xMax - xMin) * np.random.random() + xMin
yValue = (yMax - yMin) * np.random.random() + yMin
zValue = 1
targetDirec = zValue * zAxis + yValue * yAxis + xValue * xAxis
targetDirec = targetDirec / np.sqrt(np.sum(targetDirec * targetDirec ) )
target = pointLoc + targetDirec
up = yAxis - np.sum(yAxis * targetDirec ) * targetDirec
up = up / np.sqrt(np.sum(up * up ) )
camPose[1, :] = target
camPose[2, :] = up
camPoses.append(camPose )
accumLen += segLen
j = i
return camPoses
def transformToXml(root ):
rstring = et.tostring(root, 'utf-8')
pstring = minidom.parseString(rstring)
xmlString = pstring.toprettyxml(indent=" ")
xmlString= xmlString.split('\n')
xmlString = [x for x in xmlString if len(x.strip()) != 0 ]
xmlString = '\n'.join(xmlString )
return xmlString
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', default="./xml/", help="outDir of xml file" )
parser.add_argument('--threshold', type=float, default = 0.3, help = 'the threshold to decide low quality mesh.' )
parser.add_argument('--rs', type=int, default=0, help='the starting point' )
parser.add_argument('--re', type=int, default=1600, help='the end point' )
parser.add_argument('--sampleRate', type=float, default=100.0 )
parser.add_argument('--sampleNum', type=int, default=3 )
parser.add_argument('--heightMin', type=float, default=1.4 )
parser.add_argument('--heightMax', type=float, default=1.8 )
parser.add_argument('--distMin', type=float, default=0.3 )
parser.add_argument('--distMax', type=float, default=1.5 )
parser.add_argument('--thetaMin', type=float, default=-60 )
parser.add_argument('--thetaMax', type=float, default=20 )
parser.add_argument('--phiMin', type=float, default=-45 )
parser.add_argument('--phiMax', type=float, default=45 )
# Program
parser.add_argument('--program', default='~/OptixRenderer/src/bin/optixRenderer' )
opt = parser.parse_args()
params = JSONHelper.read("./Parameters.json" )
filename_json = params["scan2cad"]
shapeNetRoot = params["shapenetAbs"]
layoutRoot = params["scannet_layoutAbs"]
camRootAbs = params['scannet_camAbs']
sceneCnt = 0
for r in JSONHelper.read(filename_json ):
if not (sceneCnt >= opt.rs and sceneCnt < opt.re):
sceneCnt += 1
continue
sceneCnt += 1
id_scan = r["id_scan"]
outDir = osp.abspath(opt.out + "/" + id_scan )
os.system('mkdir -p %s' % outDir )
if not osp.isfile(osp.join(outDir, 'transform.dat') ):
continue
'''
if osp.isfile(osp.join(outDir, 'cam.txt') ):
continue
'''
poses = glob.glob(osp.join(camRootAbs, id_scan, 'pose', '*.txt') )
samplePoint = int(len(poses ) / opt.sampleRate )
print('%d: %s: sample point %d' % (sceneCnt, id_scan, samplePoint ) )
layOutFile = osp.join(layoutRoot, id_scan, id_scan + '.obj' )
contourFile = osp.join(layoutRoot, id_scan, id_scan + '_contour.obj' )
t = r['trs']['translation']
q = r['trs']['rotation']
s = r['trs']['scale']
lverts, lfaces = loadMesh(layOutFile )
lverts[:, 0], lverts[:, 1] = lverts[:, 0], lverts[:, 1]
lverts, trans, rot, scale = computeTransform(lverts, t, q, s )
layout = [lverts, lfaces, ('s', scale), ('rot', rot), ('t', trans) ]
cverts, cfaces = loadMesh(contourFile )
cverts, trans, rot, scale = computeTransform(cverts, t, q, s )
boxes = []
cads = []
cats = []
# Load the shapes
for model in r["aligned_models"]:
t = model["trs"]["translation"]
q = model["trs"]["rotation"]
s = model["trs"]["scale"]
id_cad = model["id_cad"]
catid_cad = model["catid_cad"]
cad_file = osp.join(shapeNetRoot, catid_cad, id_cad, 'alignedNew.obj' )
if not osp.isfile(cad_file ):
continue
vertices, faces = loadMesh(cad_file )
bverts, bfaces = computeBox(vertices )
bverts, trans, rot, scale = computeTransform(bverts, t, q, s )
vertices, _, _, _ = computeTransform(vertices, t, q, s )
boxes.append([bverts, bfaces, ('s', scale), ('rot', rot), ('t', trans) ] )
cads.append([vertices, faces, ('s', scale), ('rot', rot), ('t', trans) ] )
cats.append(catid_cad )
# Build the relationship and adjust heights
floorList, boxList = findSupport(lverts, boxes, cats )
adjustHeight(lverts, boxes, cads, floorList, boxList )
# Sample initial camera pose
isMove, isBeyondRange = moveBoxInWall(cverts, boxes, cads, opt.threshold )
cnt = 0
while isMove == True and isBeyondRange == False:
isMove, isBeyondRange = moveBoxInWall(cverts, boxes, cads, opt.threshold )
print('IterNum %d' % cnt )
cnt += 1
if cnt == 5 or isMove == False or isBeyondRange == True:
break
camPoses = sampleCameraPoses(cverts, boxes, \
samplePoint, opt.sampleNum,
opt.heightMin, opt.heightMax, \
opt.distMin, opt.distMax, \
opt.thetaMin, opt.thetaMax, \
opt.phiMin, opt.phiMax )
camNum = len(camPoses )
if camNum == 0:
continue
with open(osp.join(outDir, 'camInitial.txt'), 'w') as camOut:
camOut.write('%d\n' % camNum )
print('Final sampled camera poses: %d' % len(camPoses ) )
for camPose in camPoses:
for n in range(0, 3):
camOut.write('%.3f %.3f %.3f\n' % \
(camPose[n, 0], camPose[n, 1], camPose[n, 2] ) )
# Downsize the size of the image
oldXML = osp.join(outDir, 'main.xml' )
newXML = osp.join(outDir, 'mainTemp.xml')
camFile = osp.join(outDir, 'camInitial.txt' )
if not osp.isfile(oldXML ) or not osp.isfile(camFile ):
continue
tree = et.parse(oldXML )
root = tree.getroot()
sensors = root.findall('sensor')
for sensor in sensors:
film = sensor.findall('film')[0]
integers = film.findall('integer')
for integer in integers:
if integer.get('name' ) == 'width':
integer.set('value', '160')
if integer.get('name' ) == 'height':
integer.set('value', '120')
xmlString = transformToXml(root )
with open(newXML, 'w') as xmlOut:
xmlOut.write(xmlString )
# Render depth and normal
cmd = '%s -f %s -c %s -o %s -m %d' % (opt.program, newXML, 'camInitial.txt', 'im.rgbe', 2 )
cmd += ' --forceOutput'
os.system(cmd )
cmd = '%s -f %s -c %s -o %s -m %d' % (opt.program, newXML, 'camInitial.txt', 'im.rgbe', 4 )
cmd += ' --forceOutput'
os.system(cmd )
cmd = '%s -f %s -c %s -o %s -m %d' % (opt.program, newXML, 'camInitial.txt', 'im.rgbe', 5 )
cmd += ' --forceOutput'
os.system(cmd )
# Load the normal and depth
normalCosts = []
depthCosts = []
for n in range(0, camNum ):
# Load the depth and normal
normalName = osp.join(outDir, 'imnormal_%d.png' % (n+1) )
maskName = osp.join(outDir, 'immask_%d.png' % (n+1) )
depthName = osp.join(outDir, 'imdepth_%d.dat' % (n+1) )
normal = cv2.imread(normalName )
mask = cv2.imread(maskName )
with open(depthName, 'rb') as fIn:
hBuffer = fIn.read(4)
height = struct.unpack('i', hBuffer)[0]
wBuffer = fIn.read(4)
width = struct.unpack('i', wBuffer)[0]
dBuffer = fIn.read(4 * width * height )
depth = np.asarray(struct.unpack('f' * height * width, dBuffer), dtype=np.float32 )
depth = depth.reshape([height, width] )
# Compute the ranking
mask = (mask[:, :, 0] > 0.4 )
mask = ndimage.binary_erosion(mask, border_value=1, structure=np.ones((3, 3) ) )
mask = mask.astype(np.float32 )
pixelNum = np.sum(mask )
if pixelNum == 0:
normalCosts.append(0 )
depthCosts.append(0 )
continue
normal = normal.astype(np.float32 )
normal_gradx = np.abs(normal[:, 1:] - normal[:, 0:-1] )
normal_grady = np.abs(normal[1:, :] - normal[0:-1, :] )
ncost = (np.sum(normal_gradx ) + np.sum(normal_grady ) ) / pixelNum
dcost = np.sum(np.log(depth + 1 ) ) / pixelNum
normalCosts.append(ncost )
depthCosts.append(dcost )
normalCosts = np.array(normalCosts, dtype=np.float32 )
depthCosts = np.array(depthCosts, dtype=np.float32 )
normalCosts = (normalCosts - normalCosts.min() ) \
/ (normalCosts.max() - normalCosts.min() )
depthCosts = (depthCosts - depthCosts.min() ) \
/ (depthCosts.max() - depthCosts.min() )
totalCosts = normalCosts + 0.3 * depthCosts
camIndex = np.argsort(totalCosts )
camIndex = camIndex[::-1]
camPoses_s = []
selectedDir = osp.join(outDir, 'selected' )
if osp.isdir(selectedDir ):
os.system('rm -r %s' % selectedDir )
os.system('mkdir %s' % selectedDir )
for n in range(0, min(samplePoint, camNum ) ):
camPoses_s.append(camPoses[camIndex[n] ] )
normalName = osp.join(outDir, 'imnormal_%d.png' % (camIndex[n] + 1) )
os.system('cp %s %s' % (normalName, selectedDir ) )
with open(osp.join(outDir, 'cam.txt'), 'w') as camOut:
camOut.write('%d\n' % len(camPoses_s ) )
print('Final sampled camera poses: %d' % len(camPoses_s ) )
for camPose in camPoses_s:
for n in range(0, 3):
camOut.write('%.3f %.3f %.3f\n' % \
(camPose[n, 0], camPose[n, 1], camPose[n, 2] ) )
os.system('rm %s' % osp.join(outDir, 'mainTemp.xml') )
os.system('rm %s' % osp.join(outDir, 'immask_*.png') )
os.system('rm %s' % osp.join(outDir, 'imdepth_*.dat') )
os.system('rm %s' % osp.join(outDir, 'imnormal_*.png') )
```
#### File: utils_OR/DatasetCreation/SGOptim.py
```python
import torch
import numpy as np
import cv2
import torch.optim as optim
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
class SGEnvOptim():
def __init__(self,
weightValue, thetaValue, phiValue,
gpuId = 0, niter = 40, envNum = 1, isCuda = True,
envWidth = 512, envHeight = 256, SGRow = 1, SGCol = 1, ch = 3,
isFixLightPos = True ):
self.SGNum = int(SGRow*SGCol )
self.envNum = envNum
self.niter = niter
self.ch = ch
self.isFixLightPos = isFixLightPos
Az = ( (np.arange(envWidth) + 0.5) / envWidth - 0.5 ) * 2 * np.pi
El = ( (np.arange(envHeight) + 0.5) / envHeight ) * np.pi
Az, El = np.meshgrid(Az, El )
Az = Az[:, :, np.newaxis ]
El = El[:, :, np.newaxis ]
lx = np.sin(El ) * np.cos(Az )
ly = np.sin(El ) * np.sin(Az )
lz = np.cos(El )
self.ls = np.concatenate( (lx, ly, lz), axis = 2)[np.newaxis, np.newaxis, np.newaxis, :]
self.ls = Variable(torch.from_numpy(self.ls.astype(np.float32) ) )
self.envHeight = envHeight
self.envWidth = envWidth
self.iterCount = 0
self.W = Variable(torch.from_numpy(np.sin(El.astype(np.float32) ).reshape( (1, 1, envHeight, envWidth) ) ) )
self.W[:, :, 0:int(envHeight/2), :] = 0
self.envmap = Variable(torch.zeros( (self.envNum, self.ch, self.envHeight, self.envWidth), dtype=torch.float32 ) )
self.isCuda = isCuda
self.gpuId = gpuId
thetaValue = max(thetaValue, np.pi/2.0 + 0.01 )
thetaValue = (thetaValue - np.pi/2.0)/ np.pi*4 - 1
thetaValue = 0.5 * np.log((1 + thetaValue) / (1 - thetaValue) )
phiValue = phiValue / np.pi
phiValue = 0.5 * np.log((1+phiValue) / (1-phiValue) )
weightValue = np.log(weightValue ).squeeze()
weightValue = weightValue.tolist()
weight = Variable(torch.zeros( (self.envNum, self.SGNum, self.ch), dtype = torch.float32) )
weight[:, :, 0] += weightValue[0]
weight[:, :, 1] += weightValue[1]
weight[:, :, 2] += weightValue[2]
theta = Variable(torch.zeros( (self.envNum, self.SGNum, 1), dtype = torch.float32 ) ) + thetaValue
phi = Variable(torch.zeros( (self.envNum, self.SGNum, 1 ), dtype = torch.float32 ) ) + phiValue
lamb = torch.log(Variable(torch.ones(self.envNum, self.SGNum, 1) * np.pi * 2.0 ) )
self.weight = weight.unsqueeze(-1).unsqueeze(-1)
self.theta = theta.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
self.phi = phi.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
self.lamb = lamb.unsqueeze(-1).unsqueeze(-1)
if isCuda:
self.ls = self.ls.cuda(self.gpuId )
self.weight = self.weight.cuda()
self.theta = self.theta.cuda()
self.phi = self.phi.cuda()
self.lamb = self.lamb.cuda()
self.envmap = self.envmap.cuda(self.gpuId )
self.W = self.W.cuda(self.gpuId )
self.weight.requires_grad = True
self.theta.requires_grad = True
self.phi.requires_grad = True
self.lamb.requires_grad = True
self.mseLoss = nn.MSELoss(size_average = False )
if self.isFixLightPos:
self.optEnv = optim.LBFGS([self.weight, self.lamb], lr=0.1, max_iter=100 )
self.optEnvAdam = optim.Adam([self.weight, self.lamb], lr=1e-2 )
else:
self.optEnv = optim.LBFGS([self.weight,
self.theta, self.phi, self.lamb], lr=0.1, max_iter=100 )
self.optEnvAdam = optim.Adam([self.weight,
self.theta, self.phi, self.lamb], lr=1e-2 )
def renderSG(self, theta, phi, lamb, weight ):
axisX = torch.sin(theta ) * torch.cos(phi )
axisY = torch.sin(theta ) * torch.sin(phi )
axisZ = torch.cos(theta )
axis = torch.cat([axisX, axisY, axisZ], dim=5)
mi = lamb.expand([self.envNum, self.SGNum, 1, self.envHeight, self.envWidth] ) * \
(torch.sum(
axis.expand([self.envNum, self.SGNum, 1, self.envHeight, self.envWidth, 3] ) * \
self.ls.expand([self.envNum, self.SGNum, 1, self.envHeight, self.envWidth, 3] ),
dim = 5) -1 )
envmaps = weight.expand([self.envNum, self.SGNum, self.ch, self.envHeight, self.envWidth] ) * \
torch.exp(mi ).expand([self.envNum, self.SGNum, self.ch,
self.envHeight, self.envWidth] )
envmap = torch.sum(envmaps, dim=1 )
return envmap
def deparameterize(self ):
weight, theta, phi, lamb = torch.split(self.param.view(self.envNum, self.SGNum, 6),
[3, 1, 1, 1], dim=2)
weight = weight.unsqueeze(-1).unsqueeze(-1)
theta = theta.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
phi = phi.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
lamb = lamb.unsqueeze(-1).unsqueeze(-1)
return theta, phi, weight, lamb
def optimizeBFGS(self, envmap ):
assert(envmap.shape[0] == self.envNum
and envmap.shape[1] == self.ch
and envmap.shape[2] == self.envHeight
and envmap.shape[3] == self.envWidth )
self.envmap.data.copy_(torch.from_numpy(envmap ) )
minLoss = 2e20
recImageBest = None
thetaBest = None
phiBest = None
weightBest = None
lambBest = None
self.loss = None
for i in range(0, self.niter ):
print('Iteration %d' % i )
def closure( ):
theta = 0.25*np.pi * (torch.tanh(self.theta )+1) + np.pi/2.0 + 0.01
phi = np.pi * torch.tanh(self.phi )
weight = torch.exp(self.weight )
lamb = torch.clamp(torch.exp(self.lamb ), max=70 )
recImage = self.renderSG(theta, phi, lamb, weight )
loss = self.mseLoss(
recImage * self.W.expand_as(recImage),
self.envmap * self.W.expand_as(recImage) )
self.loss = loss
self.optEnv.zero_grad()
loss.backward(retain_graph = True )
print ('%d Loss: %f' % (self.iterCount, (loss.item() / self.envNum
/ self.envWidth / self.envHeight / self.ch ) ) )
self.iterCount += 1
return loss
self.optEnv.step(closure)
if self.loss.cpu().data.item() < minLoss:
if torch.isnan(torch.sum(self.theta ) ) or \
torch.isnan(torch.sum(self.phi) ) or \
torch.isnan(torch.sum(self.weight ) ) or \
torch.isnan(torch.sum(self.lamb ) ):
break
else:
theta = 0.25*np.pi * (torch.tanh(self.theta )+1) + np.pi/2.0 + 0.01
phi = np.pi * torch.tanh(self.phi )
weight = torch.exp(self.weight )
lamb = torch.clamp(torch.exp(self.lamb ), max=70 )
recImage = self.renderSG(theta, phi, lamb, weight )
recImageBest = recImage.cpu().data.numpy()
thetaBest = theta.data.cpu().numpy().reshape( (self.envNum, self.SGNum, 1) )
phiBest = phi.data.cpu().numpy().squeeze().reshape( (self.envNum, self.SGNum, 1) )
lambBest = lamb.data.cpu().numpy().squeeze().reshape( (self.envNum, self.SGNum, 1) )
weightBest = weight.data.cpu().numpy().squeeze().reshape( (self.envNum, self.SGNum, 3) )
minLoss = self.loss.cpu()
del theta, phi, weight, lamb, recImage
else:
break
return thetaBest, phiBest, lambBest, weightBest, recImageBest
def optimizeAdam(self, envmap ):
assert(envmap.shape[0] == self.envNum
and envmap.shape[1] == self.ch
and envmap.shape[2] == self.envHeight
and envmap.shape[3] == self.envWidth )
self.envmap.data.copy_(torch.from_numpy(envmap ) )
minLoss = 2e20
recImageBest = None
thetaBest = None
phiBest = None
weightBest = None
lambBest = None
self.loss = None
for i in range(0, self.niter ):
print('Iteration %d' % i )
for j in range(0, 100):
theta = 0.25*np.pi * (torch.tanh(self.theta )+1) + np.pi/2.0 + 0.01
phi = np.pi * torch.tanh(self.phi )
weight = torch.exp(self.weight )
lamb = torch.clamp(torch.exp(self.lamb ), max=70 )
recImage = self.renderSG(theta, phi, lamb, weight )
loss = self.mseLoss(
recImage * self.W.expand_as(recImage),
self.envmap * self.W.expand_as(recImage) )
self.loss = loss
self.optEnv.zero_grad()
loss.backward()
self.iterCount += 1
self.optEnvAdam.step()
print ('Step %d Loss: %f' % (self.iterCount, (loss.item() / self.envNum
/ self.envWidth / self.envHeight / self.ch ) ) )
if self.loss.cpu().data.item() < minLoss:
if torch.isnan(torch.sum(self.theta ) ) or \
torch.isnan(torch.sum(self.phi) ) or \
torch.isnan(torch.sum(self.weight ) ) or \
torch.isnan(torch.sum(self.lamb ) ):
break
else:
theta = 0.25*np.pi * (torch.tanh(self.theta )+1) + np.pi/2.0 + 0.01
phi = np.pi * torch.tanh(self.phi )
weight = torch.exp(self.weight )
lamb = torch.clamp(torch.exp(self.lamb ), max=70 )
recImage = self.renderSG(theta, phi, lamb, weight )
recImageBest = recImage.cpu().data.numpy()
thetaBest = theta.data.cpu().numpy().reshape( (self.envNum, self.SGNum, 1) )
phiBest = phi.data.cpu().numpy().squeeze().reshape( (self.envNum, self.SGNum, 1) )
lambBest = lamb.data.cpu().numpy().squeeze().reshape( (self.envNum, self.SGNum, 1) )
weightBest = weight.data.cpu().numpy().squeeze().reshape( (self.envNum, self.SGNum, 3) )
minLoss = self.loss.cpu()
del theta, phi, weight, lamb, recImage
else:
break
return thetaBest, phiBest, lambBest, weightBest, recImageBest
```
#### File: Total3DUnderstanding/utils_OR/utils_OR_mesh.py
```python
import trimesh
import numpy as np
import quaternion
# original obj operations by Zhengqin
def loadMesh(name ):
vertices = []
faces = []
with open(name, 'r') as meshIn:
lines = meshIn.readlines()
lines = [x.strip() for x in lines if len(x.strip() ) > 2 ]
for l in lines:
if l[0:2] == 'v ':
vstr = l.split(' ')[1:4]
varr = [float(x) for x in vstr ]
varr = np.array(varr ).reshape([1, 3] )
vertices.append(varr )
elif l[0:2] == 'f ':
fstr = l.split(' ')[1:4]
farr = [int(x.split('/')[0] ) for x in fstr ]
farr = np.array(farr ).reshape([1, 3] )
faces.append(farr )
vertices = np.concatenate(vertices, axis=0 )
faces = np.concatenate(faces, axis=0 )
return vertices, faces
def writeMesh(name, vertices, faces ):
with open(name, 'w') as meshOut:
for n in range(0, vertices.shape[0]):
meshOut.write('v %.3f %.3f %.3f\n' %
(vertices[n, 0], vertices[n, 1], vertices[n, 2] ) )
for n in range(0,faces.shape[0] ):
meshOut.write('f %d %d %d\n' %
(faces[n, 0], faces[n, 1], faces[n, 2]) )
def computeBox(vertices ):
minX, maxX = vertices[:, 0].min(), vertices[:, 0].max()
minY, maxY = vertices[:, 1].min(), vertices[:, 1].max()
minZ, maxZ = vertices[:, 2].min(), vertices[:, 2].max()
corners = []
corners.append(np.array([minX, minY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, minY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, minY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, minY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, maxY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, maxY, minZ] ).reshape(1, 3) )
corners.append(np.array([maxX, maxY, maxZ] ).reshape(1, 3) )
corners.append(np.array([minX, maxY, maxZ] ).reshape(1, 3) )
corners = np.concatenate(corners ).astype(np.float32 )
faces = []
faces.append(np.array([1, 2, 3] ).reshape(1, 3) )
faces.append(np.array([1, 3, 4] ).reshape(1, 3) )
faces.append(np.array([5, 7, 6] ).reshape(1, 3) )
faces.append(np.array([5, 8, 7] ).reshape(1, 3) )
faces.append(np.array([1, 6, 2] ).reshape(1, 3) )
faces.append(np.array([1, 5, 6] ).reshape(1, 3) )
faces.append(np.array([2, 7, 3] ).reshape(1, 3) )
faces.append(np.array([2, 6, 7] ).reshape(1, 3) )
faces.append(np.array([3, 8, 4] ).reshape(1, 3) )
faces.append(np.array([3, 7, 8] ).reshape(1, 3) )
faces.append(np.array([4, 5, 1] ).reshape(1, 3) )
faces.append(np.array([4, 8, 5] ).reshape(1, 3) )
faces = np.concatenate(faces ).astype(np.int32 )
return corners, faces
def computeTransform(vertices, t, q, s):
if s != None:
scale = np.array(s, dtype=np.float32 ).reshape(1, 3)
vertices = vertices * scale
if q != None:
q = np.quaternion(q[0], q[1], q[2], q[3])
rotMat = quaternion.as_rotation_matrix(q )
if np.abs(rotMat[1, 1] ) > 0.5:
d = rotMat[1, 1]
rotMat[:, 1] = 0
rotMat[1, :] = 0
if d < 0:
rotMat[1, 1] = -1
else:
rotMat[1, 1] = 1
vertices = np.matmul(rotMat, vertices.transpose() )
vertices = vertices.transpose()
if t != None:
trans = np.array(t, dtype=np.float32 ).reshape(1, 3)
vertices = vertices + trans
return vertices, trans.squeeze(), rotMat, scale.squeeze()
# mesh operations by Rui
def load_OR_mesh(layout_obj_file):
mesh = trimesh.load_mesh(str(layout_obj_file))
mesh = as_mesh(mesh)
return mesh
def as_mesh(scene_or_mesh):
"""
Convert a possible scene to a mesh.
If conversion occurs, the returned mesh has only vertex and face data.
"""
if isinstance(scene_or_mesh, trimesh.Scene):
if len(scene_or_mesh.geometry) == 0:
mesh = None # empty scene
else:
# we lose texture information here
mesh = trimesh.util.concatenate(
tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
for g in scene_or_mesh.geometry.values()))
else:
assert(isinstance(scene_or_mesh, trimesh.Trimesh))
mesh = scene_or_mesh
return mesh
def remove_top_down_faces(mesh):
v = np.array(mesh.vertices)
f = list(np.array(mesh.faces))
f_after = []
for f0 in f:
if not(v[f0[0]][2]==v[f0[1]][2]==v[f0[2]][2]):
f_after.append(f0)
new_mesh = trimesh.Trimesh(vertices=v, faces=np.asarray(f_after))
return new_mesh
def mesh_to_contour(mesh, if_input_is_v_e=False, vertical_dim=-1):
if if_input_is_v_e:
v, e = mesh
else:
mesh = remove_top_down_faces(mesh)
v = np.array(mesh.vertices)
e = np.array(mesh.edges)
v_new_id_list = []
v_new_id = 0
floor_z = np.amin(v[:, vertical_dim])
for v0 in v:
if v0[vertical_dim]==floor_z:
v_new_id_list.append(v_new_id)
v_new_id += 1
else:
v_new_id_list.append(-1)
v_new = np.array([np.delete(v[x], vertical_dim) for x in range(len(v)) if v_new_id_list[x]!=-1])
e_new = np.array([[v_new_id_list[e[x][0]], v_new_id_list[e[x][1]]] for x in range(len(e)) if (v_new_id_list[e[x][0]]!=-1 and v_new_id_list[e[x][1]]!=-1)])
return v_new, e_new
def mesh_to_skeleton(mesh):
mesh = remove_top_down_faces(mesh)
v = np.array(mesh.vertices)
e = mesh.edges
floor_z = np.amin(v[:, -1])
ceil_z = np.amax(v[:, -1])
e_new = []
for e0 in e:
z0, z1 = v[e0[0]][2], v[e0[1]][2]
if z0 == z1:
e_new.append(e0)
elif np.array_equal(v[e0[0]][:2], v[e0[1]][:2]):
e_new.append(e0)
e_new = np.array(e_new)
return v, e_new
def v_pairs_from_v3d_e(v, e):
v_pairs = [(np.array([v[e0[0]][0], v[e0[1]][0]]), np.array([v[e0[0]][1], v[e0[1]][1]]), np.array([v[e0[0]][2], v[e0[1]][2]])) for e0 in e]
return v_pairs
def v_pairs_from_v2d_e(v, e):
v_pairs = [(np.array([v[e0[0]][0], v[e0[1]][0]]), np.array([v[e0[0]][1], v[e0[1]][1]])) for e0 in e]
return v_pairs
def v_xytuple_from_v2d_e(v, e):
v_pairs = [(v[e0[0]], v[e0[1]]) for e0 in e]
return v_pairs
def transform_v(vertices, transforms):
assert transforms[0][0]=='s' and transforms[1][0]=='rot' and transforms[2][0]=='t'
# following computeTransform()
assert len(vertices.shape)==2
assert vertices.shape[1]==3
s = transforms[0][1]
scale = np.array(s, dtype=np.float32 ).reshape(1, 3)
vertices = vertices * scale
rotMat = s = transforms[1][1]
vertices = np.matmul(rotMat, vertices.transpose() )
vertices = vertices.transpose()
t = s = transforms[2][1]
trans = np.array(t, dtype=np.float32 ).reshape(1, 3)
vertices = vertices + trans
return vertices
from scipy.spatial import ConvexHull
def minimum_bounding_rectangle(points):
# https://gis.stackexchange.com/questions/22895/finding-minimum-area-rectangle-for-given-points
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: an nx2 matrix of coordinates
:rval: an nx2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
# rotations = np.vstack([
# np.cos(angles),
# -np.sin(angles),
# np.sin(angles),
# np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval
```
#### File: Total3DUnderstanding/utils/utils_rui.py
```python
def clip(subjectPolygon, clipPolygon):
# https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return ((n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3)
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
return(outputList)
# https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def vis_cube_plt(Xs, ax, color=None, linestyle='-'):
index1 = [0, 1, 2, 3, 0, 4, 5, 6, 7, 4]
index2 = [[1, 5], [2, 6], [3, 7]]
# ax.scatter3D(Xs[:, 0], Xs[:, 1], Xs[:, 2])
if color is None:
color = list(np.random.choice(range(256), size=3) / 255.)
print(color)
ax.plot3D(Xs[index1, 0], Xs[index1, 1], Xs[index1, 2], color=color, linestyle=linestyle)
for index in index2:
ax.plot3D(Xs[index, 0], Xs[index, 1], Xs[index, 2], color=color, linestyle=linestyle)
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def vis_axis(ax):
for vec, tag, tag_loc in zip([([0, 1], [0, 0], [0, 0]), ([0, 0], [0, 1], [0, 0]), ([0, 0], [0, 0], [0, 1])], [r'$X_w$', r'$Y_w$', r'$Z_w$'], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
a = Arrow3D(vec[0], vec[1], vec[2], mutation_scale=20,
lw=1, arrowstyle="->", color="k")
ax.text3D(tag_loc[0], tag_loc[1], tag_loc[2], tag)
ax.add_artist(a)
def vis_axis_xyz(ax, x, y, z, origin=[0., 0., 0.], suffix='_w'):
for vec, tag, tag_loc in zip([([origin[0], (origin+x)[0]], [origin[1], (origin+x)[1]], [origin[2], (origin+x)[2]]), \
([origin[0], (origin+y)[0]], [origin[1], (origin+y)[1]], [origin[2], (origin+y)[2]]), \
([origin[0], (origin+z)[0]], [origin[1], (origin+z)[1]], [origin[2], (origin+z)[2]])], [r'$X%s$'%suffix, r'$Y%s$'%suffix, r'$Z%s$'%suffix], [origin+x, origin+y, origin+z]):
a = Arrow3D(vec[0], vec[1], vec[2], mutation_scale=20,
lw=1, arrowstyle="->", color="k")
ax.text3D(tag_loc[0], tag_loc[1], tag_loc[2], tag)
ax.add_artist(a)
```
|
{
"source": "JerryPopi/djangur-py",
"score": 2
}
|
#### File: JerryPopi/djangur-py/djangur.py
```python
import asyncio
import discord
from commands import Commands, Guild_Instance, leave, play_search
import os
from pymongo import MongoClient
from dotenv import load_dotenv
load_dotenv()
CONNECTION_STRING = f"mongodb+srv://{os.environ['mongo_user']}:{os.environ['mongo_pass']}@dj<EMAIL>.mongodb.net/djangur?retryWrites=true&w=majority"
db_client = MongoClient(CONNECTION_STRING)
db = db_client['djangur']
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
print(os.environ['prefix'])
@client.event
async def on_message(msg):
if msg.author == client.user:
return
ginst = Guild_Instance.by_id(msg.guild.id)
ginst.tc = msg.channel
ginst.db = db[str(msg.guild.id)]
if msg.content.isdigit() and ginst.searching:
await play_search(msg.content, msg=msg, client=client, ginst=ginst)
if not msg.content.startswith(os.environ['prefix']):
return
no_prefix = msg.content[len(os.environ['prefix']):]
split = no_prefix.split(' ', 1)
cmd = split[0]
args = split[1] if (len(split) == 2) else ''
if cmd in Commands.command_map:
await Commands.command_map[cmd].fn(args, msg=msg, client=client, ginst=ginst)
else:
await msg.channel.send(f'{cmd}: Command not found.')
@client.event
async def on_voice_state_update(member, before, after):
if not member.name == 'Tramvai':
return
elif before.channel is None:
ginst = Guild_Instance.by_id(after.channel.guild.id)
voice = after.channel.guild.voice_client
time = 0
while True:
await asyncio.sleep(1)
time = time + 1
if voice.is_playing() and not voice.is_paused():
time = 0
if time == 600:
print(await Commands.command_map['leave'].fn(None, None, None, ginst))
if not voice.is_connected():
break
elif before.channel is not None:
if after.channel is None:
ginst = Guild_Instance.by_id(before.channel.guild.id)
await Commands.command_map['leave'].fn(None, None, None, ginst)
client.run(os.environ['token'])
```
|
{
"source": "JerryPopi/python_project",
"score": 3
}
|
#### File: python_project/src/classes.py
```python
import pygame
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
class Rectangle(pygame.sprite.Sprite):
def __init__(self, width, height, screen = (800, 640), colour = WHITE):
self.width = width
self.height = height
self.screen = screen
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, colour, [0, 0, width, height])
self.rect = self.image.get_rect()
def set_pos(self, x, y):
self.rect.x = x
self.rect.y = y
def get_screen(self, sizes):
self.screen = sizes
class Paddle(Rectangle):
def set_pos_p(self, y):
self.set_pos(self.rect.x, y)
if y <= 0:
self.set_pos(self.rect.x, 0)
elif y >= self.screen[1] - self.height:
self.set_pos(self.rect.x, self.screen[1] - self.height)
class Ball(Rectangle):
def __init__(self, width):
super().__init__(width, width)
```
|
{
"source": "Jerry-py/BreadBot-Source-Code",
"score": 2
}
|
#### File: BreadBot-Source-Code/cogs/errors.py
```python
import discord
import asyncio
import traceback
import json
import string, random
import utility
from random import choice
from discord.ext import commands
errors = ('ArithmeticError', 'AssertionError', 'BaseException', 'BlockingIOError',
'BrokenPipeError', 'BufferError', 'BytesWarning', 'ChildProcessError', 'ConnectionAbortedError',
'ConnectionError', 'ConnectionRefusedError', 'ConnectionResetError', 'DeprecationWarning', 'EOFError',
'EnvironmentError', 'FileExistsError', 'FileNotFoundError','FloatingPointError', 'FutureWarning',
'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'UnexpectedQuoteError',
'IndentationError', 'IndexError', 'InterruptedError', 'IsADirectoryError', 'KeyError',
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'ModuleNotFoundError', 'NameError',
'NotADirectoryError', 'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError',
'PendingDeprecationWarning', 'PermissionError', 'ProcessLookupError', 'RecursionError',
'ReferenceError', 'ResourceWarning', 'RuntimeError', 'RuntimeWarning', 'StopAsyncIteration',
'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
'TimeoutError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError',
'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
'WindowsError', 'ZeroDivisionError')
def gen_code():
chars = list(string.hexdigits) + list(string.octdigits)
num = list(string.digits) + list(string.hexdigits) + list(string.octdigits)
former = []
for _ in range(random.randint(5, 8)):
x = ('y', 'n')
if random.choice(x) == 'y':
if random.choice(x) == 'y':
former.append(random.choice(chars).lower())
else:
former.append(random.choice(chars).upper())
else:
former.append(random.choice(num))
return ''.join(map(str, former))
class Errors(commands.Cog):
def __init__(self, bc):
self.bc = bc
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
#Ignore these errors
ignored = (commands.CommandNotFound, utility.EditError)
if isinstance(error, ignored):
return
#Begin actual error handling
if isinstance(error, commands.errors.NotOwner):
if ctx.command.name == "invite":
return
msg = await ctx.message.reply('Only **{}** can use this command.'.format(await self.bc.fetch_user(self.bc.owner)))
await asyncio.sleep(3)
await msg.delete()
elif isinstance(error, utility.Blacklisted):
msg = await ctx.message.reply(error)
elif isinstance(error, utility.Premium):
msg = await ctx.message.reply(error)
elif isinstance(error, commands.errors.DisabledCommand):
msg = await ctx.message.reply("This command is disabled for mantinance!")
await asyncio.sleep(3)
await msg.delete()
elif isinstance(error, commands.MissingPermissions):
msg = await ctx.message.reply('You need **{}** perms to complete this action.'.format(error.missing_permissions[0]))
await asyncio.sleep(3)
await msg.delete()
elif isinstance(error, commands.errors.NoPrivateMessage):
msg = await ctx.message.reply("The user has blocked me or has the DM's closed.")
await asyncio.sleep(3)
await msg.delete()
elif isinstance(error, commands.CommandOnCooldown):
m, s = divmod(error.retry_after, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
w, d = divmod(d, 7)
em = discord.Embed(color=0xff0000)
if int(d) == 0 and int(h) == 0 and int(m) == 0:
em.set_author(
name=f' You must wait {int(s)} seconds to use this command!'
)
elif int(d) == 0 and int(h) == 0:
em.set_author(
name=
f' You must wait {int(m)} minutes and {int(s)} seconds to use this command!'
)
elif int(d) == 0 and int(m) != 0:
em.set_author(
name=
f' You must wait {int(h)} hours, {int(m)} minutes and {int(s)} seconds to use this command!'
)
elif int(d) != 0 and int(h) != 0 and int(m) != 0:
em.set_author(
name=
f' You must wait {int(d)} days, {int(h)} hours, {int(m)} minutes and {int(s)} seconds to use this command!'
)
else:
em.set_author(
name=
f' You must wait {int(w)} weeks, {int(d)} days, {int(h)} hours, {int(m)} minutes and {int(s)} seconds to use this command!'
)
await ctx.message.reply(embed=em)
elif isinstance(error, commands.BotMissingPermissions):
msg = await ctx.message.reply('I am missing permissions.')
await asyncio.sleep(3)
await msg.delete()
elif isinstance(error, commands.MissingRequiredArgument):
data = await self.bc.prefixes.find(ctx.guild.id)
if data is None:
data = {'prefix': '='}
aliases = "|".join(ctx.command.aliases)
cmd_invoke = f"[{ctx.command.name}|{aliases}]" if ctx.command.aliases else ctx.command.name
full_invoke = ctx.command.qualified_name.replace(ctx.command.name, "")
params = ctx.command.usage or ctx.command.signature
prefix = "=" if "prefix" not in data else data["prefix"]
em = discord.Embed(
title="Missing Required Argument",
color = discord.Color.red(),
description="```{}{}{} {}```\n\n**{}**".format(prefix,full_invoke,cmd_invoke,params,error.args[0])
)
await ctx.send(embed=em)
else:
code = gen_code()
error = traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)
error_type = 'Unspecified'
for i in range(len(error)):
for j in errors:
if j in error[i]:
error_type = j
break
with open("utility/storage/json/errors.json", "r") as f:
data = json.load(f)
data[str(code)] = {}
data[str(code)]['Command'] = ctx.command.qualified_name.title()
data[str(code)]['Error Type'] = error_type
data[str(code)]['Error'] = error
with open("utility/storage/json/errors.json", "w") as f:
json.dump(data,f,indent=4)
def setup(bc):
bc.add_cog(Errors(bc))
```
#### File: BreadBot-Source-Code/cogs/moderation.py
```python
import discord
from discord.ext import commands, tasks
import json
import re
import datetime
from copy import deepcopy
import asyncio
import random
from random import choice
import sys
import time
from dateutil.relativedelta import relativedelta
time_regex = re.compile("(?:(\d{1,5})(h|s|m|d||))+?")
time_dict = {"h": 3600, "s": 1, "m": 60, "d": 86400,"":1}
class TimeConverter(commands.Converter):
async def convert(self, ctx, argument):
args = argument.lower()
matches = re.findall(time_regex, args)
time = 0
for key, value in matches:
try:
time += time_dict[value] * float(key)
except KeyError:
raise commands.BadArgument(
f"{value} is an invalid time key! h|m|s|d are valid arguments"
)
except ValueError:
raise commands.BadArgument(f"{key} is not a number!")
return round(time)
class Moderation(commands.Cog):
def __init__(self, bc):
self.bc = bc
self.mute_task = self.check_current_mutes.start()
def cog_unload(self):
self.mute_task.cancel()
@staticmethod
def _overwrites_to_json(overwrites):
try:
return {str(target.id): overwrite._values for target, overwrite in overwrites.items()}
except Exception:
return {}
@tasks.loop(seconds=3)
async def check_current_mutes(self):
currentTime = datetime.datetime.now()
mutes = deepcopy(self.bc.muted_users)
for key, value in mutes.items():
if value['muteDuration'] is None:
continue
unmuteTime = value['mutedAt'] + relativedelta(seconds=value['muteDuration'])
if currentTime >= unmuteTime:
with open('utility/storage/json/muteroles.json', 'r') as f:
channel = json.load(f)
guild = self.bc.get_guild(value['guildId'])
member = guild.get_member(value["_id"])
if not member:
continue
try:
role = discord.utils.get(guild.roles, id=int(channel[str(guild.id)]))
except KeyError:
await self.bc.mutes.delete(member.id)
try:
self.bc.muted_users.pop(member.id)
except:
pass
else:
if role in member.roles:
await member.remove_roles(role)
print(f"Unmuted: {member.display_name}")
await self.bc.mutes.delete(member.id)
try:
self.bc.muted_users.pop(member.id)
except KeyError:
pass
@check_current_mutes.before_loop
async def before_check_current_mutes(self):
await self.bc.wait_until_ready()
@commands.command(
description="Mutes a given user for an amount of time!",
usage='<user> [time]'
)
@commands.bot_has_permissions(manage_roles=True)
@commands.has_permissions(manage_messages=True)
async def mute(self, ctx, member: discord.Member, *, time: TimeConverter=None):
with open('utility/storage/json/muteroles.json', 'r') as f:
channel = json.load(f)
try:
role = discord.utils.get(ctx.guild.roles, id=int(channel[str(ctx.guild.id)]))
except KeyError:
await ctx.send("No muted role was found! Please set one with the muterole command")
return
pos1 = ctx.guild.roles.index(ctx.author.top_role)
pos2 = ctx.guild.roles.index(member.top_role)
if pos1 == pos2:
await ctx.send("Both of you have the same power so i can not mute this person!")
return
elif pos1 < pos2:
await ctx.send("This person has more power than you so i can not mute him for you!")
return
try:
if self.bc.muted_users[member.id]:
await ctx.send("This user is already muted")
return
except KeyError:
pass
data = {
"_id": member.id,
'mutedAt': datetime.datetime.now(),
'muteDuration': time or None,
'mutedBy': ctx.author.id,
'guildId': ctx.guild.id,
}
await self.bc.mutes.upsert(data)
self.bc.muted_users[member.id] = data
await member.add_roles(role)
if not time:
await ctx.send(f"Muted {member.display_name} infinitely")
await self.postmodlog(ctx.guild,"Mute",ctx.author,ctx.channel,member=member,duration=time)
else:
minutes, seconds = divmod(time, 60)
hours, minutes = divmod(minutes, 60)
if int(hours):
await ctx.send(
f"Muted {member.display_name} for {hours} hours, {minutes} minutes and {seconds} seconds"
)
await self.postmodlog(ctx.guild,"Mute",ctx.author,ctx.channel,member=member,duration=f"{hours} hours, {minutes} minutes and {seconds} seconds")
elif int(minutes):
await ctx.send(
f"Muted {member.display_name} for {minutes} minutes and {seconds} seconds"
)
await self.postmodlog(ctx.guild,"Mute",ctx.author,ctx.channel,member=member,duration=f"{minutes} minutes and {seconds} seconds")
elif int(seconds):
await ctx.send(f"Muted {member.display_name} for {seconds} seconds")
await self.postmodlog(ctx.guild,"Mute",ctx.author,ctx.channel,member=member,duration=f"{seconds} seconds")
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
@commands.command(
name='unmute',
description="Unmuted a member!",
usage='<user>'
)
@commands.has_permissions(manage_roles=True)
async def unmute(self, ctx, member: discord.Member):
with open('utility/storage/json/muteroles.json', 'r') as f:
channel = json.load(f)
role = discord.utils.get(ctx.guild.roles, id=int(channel[str(ctx.guild.id)]))
if not role:
await ctx.send("No muted role was found! Please create one called `Muted`")
return
await self.bc.mutes.delete(member.id)
try:
self.bc.muted_users.pop(member.id)
except KeyError:
pass
if role not in member.roles:
await ctx.send("This member is not muted.")
return
await member.remove_roles(role)
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
await self.postmodlog(ctx.guild,"Unmute",ctx.author,ctx.channel,member=member)
await ctx.send(f"Unmuted `{member.display_name}`")
@commands.command(
pass_context=True,
name='addrole',
description='add a role to someone ',
usage='<member> <role>')
@commands.has_permissions(manage_roles=True)
async def addrole(self,
ctx,
member: discord.Member,
*,
role: discord.Role):
await member.add_roles(role)
await ctx.message.delete()
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
await ctx.send(f'{member} Was Given {role}')
@commands.command(description='Massnick everyone anythin', usage='<name>')
@commands.has_permissions(manage_guild=True)
async def massnick(self, ctx, *, nick):
for member in ctx.guild.members:
if member != ctx.guild.owner:
try:
await member.edit(nick=nick)
await asyncio.sleep(0.5)
except:
pass
await ctx.send(f'The entire guild user name was set to `{nick}`')
@commands.command(description='revert all nicknames to regular', usage=' ')
@commands.has_permissions(manage_nicknames=True)
async def revert(self, ctx):
for member in ctx.guild.members:
if member != ctx.guild.owner:
try:
await member.edit(nick=f'{member.name}')
await asyncio.sleep(0.5)
except:
pass
await ctx.send('All usernames have returned to normal!')
@commands.command(
pass_context=True,
name='takerole',
description='takes a role from someone ',
usage='<member> <role>')
@commands.has_permissions(manage_roles=True)
async def takerole(self,
ctx,
member: discord.Member,
*,
role: discord.Role):
pos1 = ctx.guild.roles.index(ctx.author.top_role)
pos2 = ctx.guild.roles.index(member.top_role)
if pos1 == pos2:
await ctx.send("Both of you have the same power so i can not take a role from this person!")
return
elif pos1 < pos2:
await ctx.send("This person has more power than you so i can not take a role from him for you!")
return
await ctx.message.delete()
await member.remove_roles(role)
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.id, "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
await ctx.send(f'{role} was taken from {member}')
@commands.command(
name='kick',
description='kick people',
usage='<user> [reason]')
@commands.has_permissions(kick_members=True)
async def kick(self,
ctx,
member: discord.Member,
*,
reason="No Reason Specified"):
user = member
pos1 = ctx.guild.roles.index(ctx.author.top_role)
pos2 = ctx.guild.roles.index(member.top_role)
if pos1 == pos2:
await ctx.send("Both of you have the same power so i can not kick this person!")
return
elif pos1 < pos2:
await ctx.send("This person has more power than you so i can not kick him for you!")
return
await ctx.message.delete()
try:
await user.send(
f"You were kicked from {ctx.guild.name} for the following reason:\n\n{reason}"
)
except:
await ctx.send("An error occured trying to dm this member!")
try:
await member.kick(reason=reason)
await ctx.send("I have kicked this member!")
except:
await ctx.send("Could not kick this member!")
await self.postmodlog(ctx.guild,"Kick",ctx.author,ctx.channel,reason)
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
@commands.command(
name='ban',
description='ban people',
usage='<user> [reason]')
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason="No Reason Specified"):
user = member
pos1 = ctx.guild.roles.index(ctx.author.top_role)
pos2 = ctx.guild.roles.index(member.top_role)
if pos1 == pos2:
await ctx.send("Both of you have the same power so i can not ban this person!")
return
elif pos1 < pos2:
await ctx.send("This person has more power than you so i can not ban him for you!")
return
await ctx.message.delete()
try:
await user.send(
f"You were banned from {ctx.guild.name} for the following reason:\n\n{reason}"
)
except:
await ctx.send("An error occured trying to dm this member!")
try:
await member.ban(reason=reason)
await ctx.send("I have ban this member!")
except:
await ctx.send("Could not ban this member!")
await self.postmodlog(ctx.guild,"Ban",ctx.author,ctx.channel,reason)
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
@commands.command(
description='Unban someone by their id',
usage='<userid>'
)
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member):
member = await self.bc.fetch_user(int(member))
await ctx.guild.unban(member)
await ctx.send(f"unbanned {member.name}")
data = await self.bc.logs.find(ctx.guild.id)
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
await self.postmodlog(ctx.guild,"Unban",ctx.author,ctx.channel)
@commands.command(
name='purge',
description=
'clear messages with no limit incase you wanna clear your entire chat',
usage='<amount>')
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount=5):
await ctx.channel.purge(limit=1 + amount)
await self.postmodlog(ctx.guild,"Channel Purge",ctx.author,ctx.channel)
@commands.command(description='lock the channel ', usage=' ')
@commands.has_permissions(manage_guild=True)
async def lock(self,ctx,channel:discord.TextChannel=None):
channel = channel or ctx.channel
data = await self.bc.modroles.find(ctx.guild.id)
data2 = await self.bc.locked.find(channel.id)
if data2:
return await ctx.send("This channel is already locked!")
else:
data2 = {"_id": channel.id, "name": channel.name, "perms": {}}
#Before channel locks the perms are saved into db
data2["perms"] = self._overwrites_to_json(channel.overwrites)
for role in channel.overwrites:
if role.name == self.bc.user.name:
continue
perms = channel.overwrites_for(role)
perms.send_messages = False
perms.add_reactions = False
await channel.set_permissions(role, overwrite=perms)
await asyncio.sleep(0.5)
try:
for role in data["roles"]:
role = discord.utils.get(ctx.guild.roles, id=role)
perms = channel.overwrites_for(role)
perms.send_messages = True
perms.add_reactions = True
await channel.set_permissions(role, overwrite=perms)
await asyncio.sleep(0.5)
except Exception as e:
print(e)
await self.bc.locked.upsert(data2)
await ctx.send(f"Locked {channel.mention}. Everyone that doesnt have a modrole set with me cant chat here.")
await channel.edit(name="🔒 " + channel.name)
await self.postmodlog(ctx.guild,"Channel Lock",ctx.author,channel)
@commands.command(description='unlock a channel you locked', usage='unlock')
@commands.has_permissions(manage_guild=True)
async def unlock(self,ctx,channel:discord.TextChannel=None):
channel = channel or ctx.channel
with open('utility/storage/json/muteroles.json', 'r') as f:
role = json.load(f)
try:
muterole = discord.utils.get(ctx.guild.roles, id=int(role[str(ctx.guild.id)]))
except:
muterole = None
data = await self.bc.locked.find(channel.id)
if data is None:
return await ctx.send("This channel is not locked!")
for role, permissions in data["perms"].items():
if muterole and role == muterole.id:
continue
guildrole = discord.utils.get(ctx.guild.roles, id=int(role))
await ctx.channel.set_permissions(guildrole, overwrite=discord.PermissionOverwrite(**permissions))
await asyncio.sleep(0.5)
await ctx.send(f"Unlocked {channel.mention} all roles can talk here now")
await channel.edit(name=data["name"])
await self.bc.locked.delete(channel.id)
await self.postmodlog(ctx.guild,"Channel Unlock",ctx.author,channel)
@commands.command(description='set a slowmode to a channel. leave blank to reset. max is 21600 seconds', usage='[seconds]')
@commands.has_permissions(manage_channels=True)
async def slowmode(self,ctx,*, time: TimeConverter=None):
if time > 21600:
await ctx.send("That is over 6 hours i cant do that.")
return
if time is None:
time = 0
else:
m, s = divmod(time, 60)
h, m = divmod(m, 60)
await ctx.channel.edit(slowmode_delay=time)
em=discord.Embed(
color=0xff0000)
if int(h) == 0 and int(m) == 0:
em.set_author(name=f'Slowmode is now {int(s)} seconds')
elif int(h) == 0:
em.set_author(name=f' Slowmode is now {int(m)} minutes and {int(s)} seconds')
else:
em.set_author(name=f' Slowmode is now {int(h)} hours, {int(m)} minutes and {int(s)} seconds')
await ctx.send(embed=em)
await self.postmodlog(ctx.guild,"Slowmode Change",ctx.author,ctx.channel)
@commands.command(
description="Warn someone",
usage="<user> [reason]"
)
@commands.has_permissions(manage_messages=True)
async def warn(self,ctx,member:discord.Member,*,reason="No Reason Given"):
data = await self.bc.warns.find(ctx.guild.id)
pos1 = ctx.guild.roles.index(ctx.author.top_role)
pos2 = ctx.guild.roles.index(member.top_role)
if pos1 == pos2:
await ctx.send("Both of you have the same power so i can not warn this person!")
return
elif pos1 < pos2:
await ctx.send("This person has more power than you so i can not warn him for you!")
return
if not data:
data = {
"_id": ctx.guild.id,
"cases": 0,
str(member.id): [],
}
if str(member.id) not in data:
data = {
"_id": ctx.guild.id,
"cases": data["cases"],
str(member.id): [],
}
data[str(member.id)].append({"warning": len(data[str(member.id)]) + 1, "reason": reason,"moderator":ctx.author.id, "case": data["cases"] + 1, "date": str(datetime.datetime.utcnow())})
data["cases"] += 1
await self.bc.warns.upsert(data)
data = await self.bc.logs.find(ctx.guild.id)
try:
await member.send(
f"You were warned in {ctx.guild.name} for the following reason:\n\n{reason}"
)
except:
await ctx.send("An error occured trying to dm this member!")
if not data:
data = {"_id": ctx.guild.id, "logs": []}
data["logs"].append({"Moderator": ctx.author.name + "#" + str(ctx.author.discriminator), "Action": ctx.command.qualified_name, "Target": f"{member.name}#{member.discriminator}", "Target ID": member.id, "Date": str(datetime.datetime.utcnow().strftime("%x %X"))})
await self.bc.logs.upsert(data)
await ctx.send("Warned **{}** for the reason:\n`{}`".format(member,reason))
await self.postmodlog(ctx.guild,"Warn",ctx.author,ctx.channel,member=member,reason=reason,case=data["cases"])
@commands.command(
description="Check a person's warns",
usage="[user]"
)
async def warns(self,ctx,member:discord.Member=None):
data = await self.bc.warns.find(ctx.guild.id)
if not member:
member = ctx.author
if str(member.id) not in data or len(data[str(member.id)]) == 0:
await ctx.send("This person has no warns")
return
else:
em = discord.Embed(
title="{}'s warnings".format(member.name),
color = random.choice(self.bc.color_list)
)
warns = data[str(member.id)]
for thing in warns:
warnno = thing["warning"]
reason = thing["reason"]
mod = await self.bc.fetch_user(thing["moderator"])
case = thing["case"]
em.add_field(name=f"Warning {warnno}",value=f"Reason: {reason}\nModerator: {mod}\nDate: {thing['date']}\nCase: {case}",inline=False)
await ctx.send(embed=em)
@commands.command(
aliases=["delwarn"],
description="delete a warn",
usage="<user> <case #>"
)
@commands.has_permissions(manage_guild=True)
async def deletewarn(self,ctx,member:discord.Member,case:int):
data = await self.bc.warns.find(ctx.guild.id)
if not data:
await ctx.send("Your server has no warns")
return
if str(member.id) not in data:
await ctx.send("This person has no warns")
return
else:
warns = data[str(member.id)]
for thing in warns:
if case == thing["case"]:
warns.remove({"warning":thing["warning"], "reason":thing["reason"],"moderator":thing["moderator"],"case":case})
await self.bc.warns.upsert(data)
await ctx.send("Succesfully deleted warn")
await self.postmodlog(ctx.guild,"Warn Deleted",ctx.author,ctx.channel,reason=None,case=case)
break
else:
continue
async def postmodlog(self,guild,action,moderator,channelexec,member=None,reason=None,case=None,duration=None):
data = await self.bc.modlogs.find(guild.id)
if not data or "channel" not in data:
return
channel = discord.utils.get(guild.text_channels,id=data["channel"])
em = discord.Embed(
title="Moderation Command Action",
color=random.choice(self.bc.color_list)
)
em.add_field(name="Action:",value=action,inline=False)
em.add_field(name="Responsible Moderator:",value=moderator.name,inline=False)
em.add_field(name="Channel Executed:",value=channelexec.mention,inline=False)
if reason is not None:
em.add_field(name="Reason:",value=reason,inline=False)
if case is not None:
em.add_field(name="Case:",value=case,inline=False)
if duration is not None:
em.add_field(name="Duration:",value=duration)
if member is not None:
em.add_field(name="User affected:",value=member,inline=False)
await channel.send(embed=em)
def setup(bc):
bc.add_cog(Moderation(bc))
```
#### File: BreadBot-Source-Code/cogs/reactions.py
```python
import discord
import asyncio
import emojis
from discord.ext import commands
class ReactionRoles(commands.Cog):
def __init__(self, bc):
self.bc = bc
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
emoji = str(payload.emoji)
data = await self.bc.reactions.find(payload.channel_id)
if not data or not payload.guild_id:
return
print(emoji)
if str(payload.message_id) not in data["menus"].keys():
return
menu = data["menus"][str(payload.message_id)]
if emoji in menu.keys():
guild = self.bc.get_guild(payload.guild_id)
role = guild.get_role(menu[emoji])
await payload.member.add_roles(role)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
emoji = str(payload.emoji)
data = await self.bc.reactions.find(payload.channel_id)
if not data or not payload.guild_id:
return
print(emoji)
if str(payload.message_id) not in data["menus"].keys():
return
menu = data["menus"][str(payload.message_id)]
if emoji in menu.keys():
guild = self.bc.get_guild(payload.guild_id)
role = guild.get_role(menu[emoji])
member = guild.get_member(payload.user_id)
await member.remove_roles(role)
@commands.group(description="Make a reaction role menu", invoke_without_command=True, aliases=["rr"])
@commands.cooldown(1,5,commands.BucketType.guild)
async def reactionrole(self,ctx):
await ctx.invoke(self.bc.get_command("help"), entity="reactionrole")
@reactionrole.command(description="Quick setup for a reaction role")
@commands.is_owner()
@commands.cooldown(1,5,commands.BucketType.guild)
async def make(self,ctx):
embed = discord.Embed()
roles = []
channel = None
def check(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
await ctx.send("What channel should the menu be in?")
try:
msg = await self.bc.wait_for("message", timeout=120, check=check)
except asyncio.TimeoutError:
return await ctx.send("Timed out!")
else:
try:
_ = await commands.TextChannelConverter().convert(ctx, msg.content)
except:
return await ctx.send("Channel is invalid")
else:
channel = _
await ctx.send("What do you want the title and description to be of the reaction role menu? You can also use {roles} to already display the roles!\n\nFormat: `Title||Description`\nExample: `Color Roles||Come get your color roles here:\n{roles}`")
try:
msg = await self.bc.wait_for("message", timeout=120, check=check)
except asyncio.TimeoutError:
return await ctx.send("Timed out!")
else:
if not "||" in msg.content:
return await ctx.send("You need to have a title and a description!")
format = msg.content.split("||")
if len(format) > 2:
return await ctx.send("You cant have more titles or descriptions!")
embed.title = format[0]
embed.description = format[1]
await ctx.send("This is what your menu will look like!", embed=embed)
await ctx.send("What color do you want the embed to be? **Use hex code format**\n\nExample: `#ff0000`")
try:
msg = await self.bc.wait_for("message", timeout=120, check=check)
except asyncio.TimeoutError:
return await ctx.send("Timed out!")
else:
if "#" not in msg.content:
return await ctx.send("You must use hex code format")
try:
color = int(msg.content.replace("#", "0x"), 16)
except:
return await ctx.send("An error occured whilst converting this to a number!")
else:
embed.color = color
await ctx.send("Your menu will now look like this!", embed=embed)
await ctx.send("Now here comes the fun part. Put down your emoji and a role! Say `done` when you are finished!\n\nExample: `:kek: @\everyone`")
while True:
try:
msg = await self.bc.wait_for("message", timeout=120, check=check)
except asyncio.TimeoutError:
return await ctx.send("Timed out!")
else:
if msg.content == "done":
break
format = msg.content.split(" ")
if len(format) != 2:
return await ctx.send("You need an emoji and a role seperated by a space!")
try:
role = await commands.RoleConverter().convert(ctx, format[1])
except:
return await ctx.send("Role is invalid!")
try:
await commands.EmojiConverter().convert(ctx, format[0])
except:
emoji = emojis.get(format[0])
try:
emoji = emoji.pop()
except:
return await ctx.send("Invalid emoji")
else:
emoji = format[0]
emoji = str(emoji)
roles.append({"emoji": emoji, "role": role.id})
await msg.add_reaction("<a:tick:816709602384937031>")
role_desc = ""
for data in roles:
role = await commands.RoleConverter().convert(ctx, str(data["role"]))
role_desc += "{} - {}\n".format(data["emoji"], role.mention)
embed.description = embed.description.replace("{roles}", role_desc)
data = await self.bc.reactions.find(channel.id)
try:
msg = await channel.send(embed=embed)
except:
return await ctx.send("I cannot send messages in this channel!")
for reaction in roles:
await msg.add_reaction(reaction["emoji"])
if not data:
data = {"_id": channel.id, "disabled": False, "menus": {}}
data["menus"][str(msg.id)] = {}
for reaction in roles:
data["menus"][str(msg.id)][reaction["emoji"]] = reaction["role"]
await self.bc.reactions.upsert(data)
def setup(bc):
bc.add_cog(ReactionRoles(bc))
```
#### File: BreadBot-Source-Code/utility/errors.py
```python
from discord.ext import commands
class Blacklisted(commands.CommandError):
def __init__(self):
super().__init__("Seems like Bungo blacklisted you from using BreadBot!")
class Premium(commands.CommandError):
def __init__(self):
super().__init__("Seems like you need premium to use this command. You can buy premium at <https://patreon.com/breadbot_>!")
class EditError(commands.CommandError):
pass
```
#### File: BreadBot-Source-Code/utility/util.py
```python
import asyncio
import discord
from discord.ext import commands
import json
class Paginator(discord.ui.View):
def __init__(self, entries, color, title, ctx):
self.page = 0
self.entries = entries
self.color = color
self.title = title
self.ctx = ctx
super().__init__()
@discord.ui.button(label="<<", style=discord.ButtonStyle.green)
async def flipfront(self, button: discord.ui.Button, interation: discord.Interaction):
if interation.user.id != self.ctx.author.id:
await interation.response.send_message("You cannot use this!", ephemeral=True)
return
self.page = 0
embed = discord.Embed(
title=self.title,
color=self.color,
description=self.entries[self.page]
if type(self.entries[self.page]) != dict
else self.entries[self.page]["content"],
)
if type(self.entries[self.page]) == dict:
embed.set_image(url=self.entries[self.page]["image"])
embed.set_footer(text="Page ({}/{})".format(self.page + 1, len(self.entries)))
await interation.message.edit(view=self, embed=embed)
@discord.ui.button(label="<", style=discord.ButtonStyle.green)
async def flipback(self, button: discord.ui.Button, interation: discord.Interaction):
if interation.user.id != self.ctx.author.id:
await interation.response.send_message("You cannot use this!", ephemeral=True)
return
if self.page == 0:
return
self.page -= 1
embed = discord.Embed(
title=self.title,
color=self.color,
description=self.entries[self.page]
if type(self.entries[self.page]) != dict
else self.entries[self.page]["content"],
)
if type(self.entries[self.page]) == dict:
embed.set_image(url=self.entries[self.page]["image"])
embed.set_footer(text="Page ({}/{})".format(self.page + 1, len(self.entries)))
await interation.message.edit(view=self, embed=embed)
@discord.ui.button(label=">", style=discord.ButtonStyle.green)
async def flipforward(self, button: discord.ui.Button, interation: discord.Interaction):
if interation.user.id != self.ctx.author.id:
await interation.response.send_message("You cannot use this!", ephemeral=True)
return
if self.page + 1 == len(self.entries):
return
self.page += 1
embed = discord.Embed(
title=self.title,
color=self.color,
description=self.entries[self.page]
if type(self.entries[self.page]) != dict
else self.entries[self.page]["content"],
)
if type(self.entries[self.page]) == dict:
embed.set_image(url=self.entries[self.page]["image"])
embed.set_footer(text="Page ({}/{})".format(self.page + 1, len(self.entries)))
await interation.message.edit(view=self, embed=embed)
@discord.ui.button(label=">>", style=discord.ButtonStyle.green)
async def fliplast(self, button: discord.ui.Button, interation: discord.Interaction):
if interation.user.id != self.ctx.author.id:
await interation.response.send_message("You cannot use this!", ephemeral=True)
return
self.page = len(self.entries) - 1
embed = discord.Embed(
title=self.title,
color=self.color,
description=self.entries[self.page]
if type(self.entries[self.page]) != dict
else self.entries[self.page]["content"],
)
if type(self.entries[self.page]) == dict:
embed.set_image(url=self.entries[self.page]["image"])
embed.set_footer(text="Page ({}/{})".format(self.page + 1, len(self.entries)))
await interation.message.edit(view=self, embed=embed)
class Pag:
def __init__(self, **kwargs):
self.title = kwargs.get("title")
self.color = kwargs.get("color")
self.entries = kwargs.get("entries")
async def start(self, ctx: commands.Context):
embed = discord.Embed(
title=self.title,
color=self.color,
description=self.entries[0]
if type(self.entries[0]) != dict
else self.entries[0]["content"],
)
if type(self.entries[0]) == dict:
embed.set_image(url=self.entries[0]["image"])
embed.set_footer(text="Page (1/{})".format(len(self.entries)))
await ctx.send(embed=embed, view=Paginator(self.entries, self.color, self.title, ctx))
async def docs(command, category, bc, ctx):
command = bc.get_command(command)
cog = command.cog
with open("utility/storage/json/docs.json", "r") as f:
data = json.load(f)
data[category] = []
for command in cog.walk_commands():
if command.hidden:
continue
if hasattr(command, "all_commands"):
for command in list(set(command.all_commands.values())):
aliases = "|".join(command.aliases)
if not command.checks:
data[category].append(
{
"name": command.qualified_name,
"description": command.description,
"usage": "={}{} {}".format(
command.qualified_name.replace(
command.name, ""
),
f"[{command.name}|{aliases}]"
if command.aliases
else command.name,
command.usage or command.signature,
),
"permission": "No permissions required",
}
)
else:
try:
command.checks[0](ctx)
except Exception as e:
data[category].append(
{
"name": command.qualified_name,
"description": command.description,
"usage": "={}{} {}".format(
command.qualified_name.replace(
command.name, ""
),
f"[{command.name}|{aliases}]"
if command.aliases
else command.name,
command.usage or command.signature,
),
"permission": str(e)
.replace("You are missing ", "")
.replace(
" permission(s) to run this command.", ""
),
}
)
else:
if command.parent:
continue
aliases = "|".join(command.aliases)
if not command.checks:
data[category].append(
{
"name": command.qualified_name,
"description": command.description,
"usage": "={}{} {}".format(
command.qualified_name.replace(command.name, ""),
f"[{command.name}|{aliases}]"
if command.aliases
else command.name,
command.usage or command.signature,
),
"permission": "No permissions required",
}
)
else:
try:
command.checks[0](ctx)
except Exception as e:
data[category].append(
{
"name": command.qualified_name,
"description": command.description,
"usage": "={}{} {}".format(
command.qualified_name.replace(
command.name, ""
),
f"[{command.name}|{aliases}]"
if command.aliases
else command.name,
command.usage or command.signature,
),
"permission": str(e)
.replace("You are missing ", "")
.replace(
" permission(s) to run this command.", ""
),
}
)
with open("utility/storage/json/docs.json", "w") as f:
json.dump(data, f)
async def GetMessage(
bc, ctx, contentOne="Default Message", contentTwo="\uFEFF", timeout=100
):
"""
This function sends an embed containing the params and then waits for a message to return
Params:
- bot (commands.Bot object) :
- ctx (context object) : Used for sending msgs n stuff
- Optional Params:
- contentOne (string) : Embed title
- contentTwo (string) : Embed description
- timeout (int) : Timeout for wait_for
Returns:
- msg.content (string) : If a message is detected, the content will be returned
or
- False (bool) : If a timeout occurs
"""
embed = discord.Embed(title=f"{contentOne}", description=f"{contentTwo}",)
sent = await ctx.send(embed=embed)
try:
msg = await bc.wait_for(
"message",
timeout=timeout,
check=lambda message: message.author == ctx.author
and message.channel == ctx.channel,
)
if msg:
return msg.content
except asyncio.TimeoutError:
return False
```
|
{
"source": "Jerry-py/Clutter",
"score": 2
}
|
#### File: utils/content/mongodb.py
```python
from typing import Any
from pymongo import MongoClient
class Utils:
@staticmethod
def assemble(path: list, value: Any) -> dict:
to_asm, i = {}, 0
ref = to_asm
if not path:
return value
for _ in path:
i += 1
if i == len(path):
to_asm[_] = value
break
to_asm[_] = {}
to_asm = to_asm[_]
return ref
@staticmethod
def find(get_from: dict, path: list, *, default: Any = None) -> Any:
key = path.pop(-1)
for _ in path:
try:
get_from = get_from[_]
except (KeyError, TypeError, AttributeError):
return default
return get_from.get(key, default)
class MongoManager:
def __init__(self, connect_url: str, database: str):
self.client = MongoClient(connect_url)
self.db = self.client[database]
self.utils = Utils
def set(self, path: str, value: Any) -> None:
path = [_ for _ in path.split(".") if _ != ""]
collection = self.db[path.pop(0)]
if not path: # set( "collectionName" )
result = collection.find_one({"_id": "_"}, {"_id": 1})
if result is None:
return collection.insert_one({"_id": "_", "_": value})
return collection.update_one({"_id": "_"}, {"$set": {"_": value}})
_id = path.pop(0)
if not path: # set( "collectionName.cardID" )
result = collection.find_one({"_id": _id}, {"_id": 1})
if result is None:
return collection.insert_one({"_id": _id, "_": value})
return collection.update_one({"_id": _id}, {"$set": {"_": value}})
result = collection.find_one({"_id": _id}, {"_id": 1}) # set( "collectionName.cardID.DIpath" )
if result is None:
return collection.insert_one(
{"_id": _id, **self.utils.assemble(path, value)}) # is there a better way for this?
return collection.update_one({"_id": _id}, {"$set": {".".join(path): value}})
def rem(self, path: str) -> None:
path = [_ for _ in path.split(".") if _ != ""]
collection = self.db[path.pop(0)]
if not path: # rem( "collectionName" )
return collection.drop()
_id = path.pop(0)
if not path: # rem( "collectionName.cardID" )
return collection.delete_one({"_id": _id})
if len(path) == 1:
key = path.pop(0) # rem( "collectionName.cardID.varName" )
return collection.update_one({"_id": _id}, {"$unset": {key: ""}})
return collection.update_one({"_id": _id},
{"$unset": {".".join(path)}}) # rem( "collectionName.cardID.DIpath" )
def get(self, path: str, default: Any = None) -> Any:
path = [_ for _ in path.split(".") if _ != ""]
collection = self.db[path.pop(0)] # set( "collectionName" )
if not path:
result = collection.find_one({"_id": "_"}, {"_id": 0, "_": 1})
if result is not None:
return result.get("_", default)
return default
_id = path.pop(0) # set( "collectionName.cardID" )
if not path:
result = collection.find_one({"_id": _id}, {"_id": 0, "_": 1})
if result is not None:
return result.get("_", default)
return default
result = collection.find_one({"_id": _id},
{"_id": 0, ".".join(path): 1}) # set( "collectionName.cardID.DIpath" )
if result is not None:
return self.utils.find(result, path, default=default)
return default
```
|
{
"source": "Jerry-py/Disgames-Bot",
"score": 3
}
|
#### File: bot/cogs/tags.py
```python
import discord
from discord.ext.commands import Context, command, group
from discord.ext import commands
from bot.utils import TagModel, LowerCase
from typing import Literal
def s(data) -> Literal["", "s"]:
check = data == 1
if hasattr(data, "endswith"):
check = not data.endswith("s")
elif hasattr(data, "__len__"):
check = len(data) == 1
return "s" if check else ""
class TagsCog(commands.Cog):
"""The cog that is for tag commands - This is from https://github.com/Dorukyum/Pycord-Manager/blob/main/cogs/tags.py"""
def __init__(self, bot):
self.bot = bot
@group(invoke_without_command=True)
async def tag(self, ctx: Context, *, name: LowerCase):
"""View a tag's content."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
await ctx.reply(tag.content)
await tag.update_from_dict({"uses": tag.uses + 1}).save()
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def create(self, ctx: Context, name: LowerCase, *, content):
"""Create a tag."""
if await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
await ctx.reply("A tag with this name already exists.")
else:
await TagModel.create(
guild_id=ctx.guild.id,
author_id=ctx.author.id,
name=name,
content=content,
uses=0,
)
await ctx.reply(f"Tag `{name}` created successfully.")
@tag.command()
async def edit(self, ctx: Context, name: LowerCase, *, content):
"""Edit the content of a tag."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
if (
tag.author_id == ctx.author.id
or ctx.channel.permissions_for(ctx.author).manage_messages
):
await tag.update_from_dict({"content": content}).save()
await ctx.reply(f"Tag `{name}` edited successfully.")
else:
await ctx.reply("You don't own this tag.")
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def delete(self, ctx: Context, *, name: LowerCase):
"""Delete a tag."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
if (
tag.author_id == ctx.author.id
or ctx.channel.permissions_for(ctx.author).manage_messages
):
await tag.delete()
await ctx.reply(f"Tag `{name}` deleted successfully.")
else:
await ctx.reply("You don't own this tag.")
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def transfer(self, ctx: Context, name: LowerCase, member: discord.Member = None):
"""Transfer a tag's ownership."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
if tag.author_id == ctx.author.id:
await tag.update_from_dict({"author_id": member.id}).save()
await ctx.send(f"Tag `{name}` transferred to {member} successfully.")
else:
await ctx.reply("You don't own this tag.")
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def rename(self, ctx: Context, name: LowerCase, *, new_name: LowerCase):
"""Rename a tag."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
if tag.author_id == ctx.author.id:
if await TagModel.filter(name=new_name, guild_id=ctx.guild.id):
await ctx.send("A tag with this name already exists.")
else:
await tag.update_from_dict({"name": new_name}).save()
await ctx.send(
f"Tag `{name}` renamed to `{new_name}` successfully."
)
else:
await ctx.reply("You don't own this tag.")
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def info(self, ctx: Context, *, name: LowerCase):
"""View info about a tag."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
owner = self.bot.get_user(tag.author_id) or await self.bot.fetch_user(
tag.author_id
)
await ctx.send(
embed=discord.Embed(title=tag.name, color=discord.Color.blurple())
.add_field(name="Owner", value=owner.mention)
.add_field(name="Uses", value=tag.uses)
.add_field(
name="Created At", value=discord.utils.format_dt(tag.created_at)
)
)
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def raw(self, ctx: Context, *, name: LowerCase):
"""View the content of a tag, with escaped markdown."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
await ctx.send(discord.utils.escape_markdown(tag.content))
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def claim(self, ctx: Context, *, name: LowerCase):
"""Claim a tag."""
if tag := await TagModel.filter(name=name, guild_id=ctx.guild.id).first():
if ctx.guild.get_member(tag.author_id):
await ctx.reply("The author of this tag is still in the server.")
else:
await tag.update_from_dict({"author_id": ctx.author.id}).save()
await ctx.reply("Successfully claimed tag.")
else:
await ctx.reply("A tag with this name doesn't exist.")
@tag.command()
async def search(self, ctx: Context, *, query):
"""Search the guild's tags."""
if tags := await TagModel.filter(guild_id=ctx.guild.id):
await ctx.send(
embed=discord.Embed(
title=f"Tag Search | {query}",
description="\n".join(
f"{i+1}. {name}"
for i, name in enumerate(
tag.name for tag in tags if query in tag.name
)
),
color=discord.Color.blurple(),
)
)
else:
await ctx.reply("This server has no tags.")
@command(name="tags")
async def _tags(self, ctx: Context, member: discord.Member = None):
"""View the guild's tags.
Shows the tags of a member if supplied."""
if member:
if tags := await TagModel.filter(guild_id=ctx.guild.id, author_id=member.id):
await ctx.send(
embed=discord.Embed(
title=f"{member.display_name}'{s(member.display_name)} Tags",
description="\n".join(
f"{i+1}. {tag.name}" for i, tag in enumerate(tags)
),
color=discord.Color.blurple(),
)
)
else:
await ctx.reply("This member does not have any tags in this server.")
elif tags := await TagModel.filter(guild_id=ctx.guild.id):
await ctx.send(
embed=discord.Embed(
title=f"Tags in {ctx.guild.name}",
description="\n".join(
f"{i+1}. {tag.name}" for i, tag in enumerate(tags)
),
color=discord.Color.blurple(),
)
)
else:
await ctx.reply("This server does not have any tags.")
def setup(bot):
bot.add_cog(TagsCog(bot))
```
#### File: bot/utils/embeds.py
```python
import discord
import random
import datetime
class Embeds:
"""Embed for handling errors"""
def __init__(self):
self.cooldown_choices = [
"Woah, slow down man",
"A little too quick there",
"Too fast man",
"Spamming is not cool"
]
self.time = datetime.datetime.utcnow().strftime('%Y:%m:%d %H:%M:%S')
def OnCooldown(self, error: str):
"""Returns an embed for when a command is on cooldown"""
cooldown_name = random.choice(self.cooldown_choices)
return discord.Embed(
title=cooldown_name,
description=f"You need to slow down and don't spam the "
f"bot\n Retry after {round(error.retry_after, 2)}s",
color=discord.Color.blue(),
)
def OnError(self, command_name: str, time: str, reason: str):
"""Returns an embed for when a command raises an error"""
Embed = discord.Embed(title="Oh no an error occurred", color=discord.Color.red())
Embed.add_field(name="Command Name: ", value=command_name)
Embed.add_field(name="At: ", value=time)
Embed.add_field(name="Reason", value=reason)
return Embed
```
#### File: bot/utils/pagination.py
```python
import discord
from discord.ext import commands
class Dropdown(discord.ui.Select):
"""A Simple Dropdown View"""
def __init__(self, placeholder: str, pages : list, pagination_type : str):
self._pagination_type = pagination_type
self._placeholder = placeholder
self._pages = pages
self._options = [
discord.SelectOption(label=f"Page {int(page+1)}", description=f"Page {int(page+1)}")
for page in range(len(pages))
]
super().__init__(
placeholder=self._placeholder,
min_values=1,
max_values=1,
options=self._options,
)
async def callback(self, inter):
page = self._pages[int(str(self.values[0]).replace("Page ", ""))-1]
if self._pagination_type == 'embed':
await inter.response.edit_message(embed=page)
else:
await inter.response.edit_message(content=page)
class EmbedPaginator(discord.ui.View):
"""A Simple Embed Paginator using discord Views"""
def __init__(self, ctx : commands.Context, embeds : list, timeout : int = 120, dropdown : bool = True):
super().__init__(timeout=timeout)
self.embeds = embeds
self.ctx = ctx
self.current_page = 0
if dropdown:
self.add_item(Dropdown('Select a page', self.embeds, 'embed'))
async def show_page(self, inter, page: int):
"""Change the page of the paginator"""
await inter.response.defer()
self.current_page = 0 if page >= len(self.embeds) else page
embed = self.embeds[self.current_page]
await inter.edit_original_message(embed=embed)
@discord.ui.button(label='⏪')
async def beginning(self, button, inter):
"""Go to the first page"""
await self.show_page(inter, 0)
@discord.ui.button(label="⬅️")
async def back(self, button, inter):
"""Go to the previous page"""
await self.show_page(inter, self.current_page - 1)
@discord.ui.button(label="➡️")
async def forward(self, button, inter):
"""Go to the next page"""
await self.show_page(inter, self.current_page + 1)
@discord.ui.button(label='⏩')
async def end(self, button, inter):
"""Go to the last page"""
await self.show_page(inter, -1)
@discord.ui.button(label="Quit")
async def quit(self, button, inter):
"""Quit the paginator"""
await inter.response.defer()
await inter.delete_original_message()
async def interaction_check(self, inter):
"""Check if the user who used the the interaction is the author of the message"""
if inter.user == self.ctx.author:
return True
await inter.response.send_message("Hey! You can't do that!", ephemeral=True)
return False
async def on_timeout(self) -> None:
"""When the view times out"""
self.clear_items()
class MessagePaginator(discord.ui.View):
"""A Simple Message Paginator using discord Views"""
def __init__(self, ctx : commands.Context, messages : list, timeout : int = 120, dropdown : bool = True):
super().__init__(timeout=timeout)
self.messages = messages
self.ctx = ctx
self.current_page = 0
if dropdown:
self.add_item(Dropdown('Select a page', self.messages, 'message'))
async def show_page(self, inter, page: int):
"""Change the page of the paginator"""
self.current_page = 0 if page >= len(self.messages) else page
await inter.edit_original_message(content=self.messages[self.current_page])
@discord.ui.button(label='⏪')
async def beginning(self, button, inter):
"""Go to the first page"""
await inter.response.defer()
await self.show_page(inter, 0)
@discord.ui.button(label="⬅️")
async def back(self, button, inter):
"""Go to the previous page"""
await inter.response.defer()
await self.show_page(inter, self.current_page - 1)
@discord.ui.button(label="➡️")
async def forward(self, button, inter):
"""Go to the next page"""
await inter.response.defer()
await self.show_page(inter, self.current_page + 1)
@discord.ui.button(label='⏩')
async def end(self, button, inter):
"""Go to the last page"""
await inter.response.defer()
await self.show_page(inter, -1)
@discord.ui.button(label="Quit")
async def quit(self, button, inter):
"""Quit the paginator"""
await inter.response.defer()
await inter.delete_original_message()
async def interaction_check(self, inter):
"""Check if the user who used the the interaction is the author of the message"""
if inter.user == self.ctx.author:
return True
await inter.response.send_message("Hey! You can't do that!", ephemeral=True)
return False
async def on_timeout(self) -> None:
"""When the view times out"""
self.clear_items()
```
|
{
"source": "Jerry-py/Disgames",
"score": 3
}
|
#### File: disgames/mixins/rps_buttons.py
```python
import discord
from discord.ext import commands
import random
try:
class RPSButton(discord.ui.Button):
def __init__(self, emoji):
self.conversion = {"✂️":'Scissors',"📜":'Paper',"🪨":"Rock"}
super().__init__(label=self.conversion[emoji], emoji=emoji, style=discord.ButtonStyle.primary)
async def callback(self, interaction):
view = self.view
if not interaction.user in view.plays:
return await interaction.response.send_message("You're not in this game", ephemeral=True)
elif view.plays[interaction.user]:
return await interaction.response.send_message("You already chose", ephemeral=True)
view.plays[interaction.user] = str(self.emoji)
if view.player2.bot:
view.plays[view.player2] = random.choice(list(self.conversion))
try:
winner = view.has_won_rps_buttons(view.player1, view.player2)
except KeyError:
return await interaction.response.send_message(f"Waiting for {view.player2.mention if interaction.user == view.player1 else view.player1.mention}", ephemeral=True)
else:
view.stop()
view.clear_items()
return await interaction.response.edit_message(content=f"{view.player1.mention}: {view.plays[view.player1]}\n{view.player2.mention}: {view.plays[view.player2]}\n\nWinner: {winner}", view=view)
class RPSView(discord.ui.View):
def __init__(self, player1, player2):
super().__init__()
for emoji in ["✂️", "📜", "🪨"]:
self.add_item(RPSButton(emoji))
self.plays = {player1:'',player2:''}
self.player1 = player1
self.player2 = player2
def has_won_rps_buttons(self, player1, player2):
"""Returns the winner"""
if not self.plays[player1] or not self.plays[player2]:
raise KeyError
dct = {"✂️":"📜","🪨":"✂️","📜":"🪨"}
if dct[self.plays[player1]] == dct[self.plays[player2]]:
return "Draw"
elif dct[self.plays[player1]] == self.plays[player2]:
return player1.mention
return player2.mention
class RPSButtons(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def rps(self, ctx, member:discord.Member=None):
if member and (member.bot or member == ctx.author):
return await ctx.send("Invalid syntax: can't play again "+member.display_name)
await ctx.send('Rock Paper Scissors', view=RPSView(ctx.author, member or self.bot.user))
except AttributeError:
class RPSButtons:
pass
```
#### File: disgames/mixins/rps.py
```python
import random, discord
from discord.ext import commands
class RPS(commands.Cog):
def __init__(self, bot):
self.bot = bot
def has_won_rps(self, inp1, inp2):
"""Returns the winner"""
dct = {"✂️": "📜", "🪨": "✂️", "📜": "🪨"}
if inp1 == inp2:
return "Draw"
elif dct[inp1] == inp2:
return "inp1"
return "inp2"
@commands.command()
async def rps(self, ctx, member: discord.Member = None):
"""Rock wins against scissors; paper wins against rock; and scissors wins against paper"""
if not member:
msg = await ctx.send("Please react with your choice:")
for i in ["✂️", "🪨", "📜"]:
await msg.add_reaction(i)
reaction, _ = await self.bot.wait_for(
"reaction_add",
check=lambda r, u: u == ctx.author
and r.message == msg
and str(r) in ["✂️", "🪨", "📜"],
)
bot_choice = random.choice(["🪨", "📜", "✂️"])
win = self.has_won_rps(str(reaction), bot_choice)
await ctx.send(
f"{self.bot.user.display_name}: {bot_choice}\n{ctx.author.display_name}: {str(reaction)}\nWinner: {'Draw' if win == 'Draw' else (ctx.author.mention if win == 'inp1' else self.bot.user.mention)}"
)
elif member.bot or member == ctx.author:
return await ctx.send(
f"Invalid Syntax: Can't play against {member.display_name}"
)
else:
try:
msg1 = await ctx.author.send("Please react with your choice:")
for i in ["✂️", "🪨", "📜"]:
await msg1.add_reaction(i)
except discord.Forbidden:
return await ctx.send(f"I couldn't dm {ctx.author.display_name}")
try:
msg2 = await member.send("Please react with your choice:")
for i in ["✂️", "🪨", "📜"]:
await msg2.add_reaction(i)
except discord.Forbidden:
return await ctx.send(f"I couldn't dm {member.display_name}")
def check(payload):
return (
payload.message_id in [msg1.id, msg2.id]
and str(payload.emoji) in ["✂️", "🪨", "📜"]
and payload.user_id != self.bot.user.id
)
payload = await self.bot.wait_for("raw_reaction_add", check=check)
if payload.user_id == ctx.author.id:
await ctx.send(f"Waiting for {member.display_name}")
payload2 = await self.bot.wait_for(
"raw_reaction_add",
check=lambda p: p.message_id == msg2.id
and str(payload.emoji) in ["✂️", "🪨", "📜"],
)
win = self.has_won_rps(str(payload.emoji), str(payload2.emoji))
await ctx.send(
f"{member.display_name}: {str(payload2.emoji)}\n{ctx.author.display_name}: {str(payload.emoji)}\nWinner: {'Draw' if win == 'Draw' else (ctx.author.mention if win == 'inp1' else member.mention)}"
)
else:
await ctx.send(f"Waiting for {ctx.author.display_name}")
payload2 = await self.bot.wait_for(
"raw_reaction_add",
check=lambda p: p.message_id == msg1.id
and str(payload.emoji) in ["✂️", "🪨", "📜"],
)
win = self.has_won_rps(str(payload2.emoji), str(payload.emoji))
await ctx.send(
f"{member.display_name}: {str(payload.emoji)}\n{ctx.author.display_name}: {str(payload2.emoji)}\nWinner: {'Draw' if win == 'Draw' else (ctx.author.mention if win == 'inp1' else member.mention)}"
)
```
|
{
"source": "jerrypy/NuptLifeAnalyzer",
"score": 2
}
|
#### File: NuptLifeAnalyzer/app/__init__.py
```python
from celery import Celery
from flask import Flask
from flask.ext.mail import Mail
from flask.ext.bootstrap import Bootstrap
from app_config import config, Config
from redis_session import RedisSessionInterface
celery = Celery(__name__, broker=Config.CELERY_BROKER_URL, backend=Config.CELERY_RESULT_BACKEND)
mail = Mail()
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
app.session_interface = RedisSessionInterface(prefix=Config.YOUJI_SESSION_PREFIX)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
celery.conf.update(app.config)
bootstrap.init_app(app)
mail.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
```
#### File: jerrypy/NuptLifeAnalyzer/manage.py
```python
import os
from app import create_app
from flask.ext.script import Manager, Shell
app = create_app(os.getenv('YOUJI_CONFIG') or 'default')
manager = Manager(app)
def make_shell_context():
return dict(app=app)
manager.add_command("shell", Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
```
#### File: NuptLifeAnalyzer/NuptCrawlers/EhomeCrawler.py
```python
from urlparse import urlparse
from datetime import date
import datetime
import json
from requests import Session
import gevent
from config import Config
from app_config import Config as APP_CONFIG
from NUPTCrawlerBase import NUPTCrawlerBase
from lib.util import api
from lib.http import req
from lib.PageParser import EhomeParser
class EhomeCrawler(NUPTCrawlerBase):
def __init__(self, db=None, debug=False):
super(EhomeCrawler, self).__init__(debug=debug)
self.URLS = Config.EHOME_URLS
self.host = urlparse(self.URLS['COOKIE']).netloc
self.proxies = Config.PROXIES
self.session = Session()
self.session.proxies = self.proxies
self.cookies = None
self.iplanet = None
self.student_id = ''
self.cardno = None
self.collection = db.ehome
def _login(self, login_data):
resp = req(self.URLS['COOKIE'], 'get', host=self.host)
if resp is None:
return Config.SERVER_MSG['SERVER_ERROR']
api.logger.info('[+] ID: %s got ehome cookies.' % login_data['student_id'])
self.cookies = resp.cookies
self.student_id = login_data['student_id']
payload = {
'email': login_data['student_id'],
'password': login_data['password']
}
resp = req(self.URLS['LOGIN'], 'post', data=payload, cookies=self.cookies)
if resp is None:
return Config.SERVER_MSG['SERVER_ERROR']
# 校园系统bug 无需这一步
if resp.url != self.URLS['LOGIN_SUCCESS']:
return Config.SERVER_MSG['WRONG_PASSWORD']
api.logger.info('[+] ID: %s login ehome.' % login_data['student_id'])
self.iplanet = resp.history[0].cookies
self.session.cookies = self.iplanet
return Config.SERVER_MSG['LOGIN_SUCCESS']
def login(self, login_data=None):
return self._login(login_data=login_data)
def _get_cardno(self):
info = []
resp = self.session.get(self.URLS['INDEX'])
if resp is None:
return info
content = resp.text
info = EhomeParser.parse_ehome_info(content)
api.logger.info('[+] got cardno: %s.' % info['usercode'])
# 写至数据库
self.insert_to_db('info', info)
return info
def _get_loss(self, start_date, cardno):
rec = []
data = {
'param_0': 0,
'param_1': 100
}
params = {
'className': 'cn.com.system.query.DealQuery',
'methodName': 'getDealRegisterLoss',
'paramCount': '5',
'param_2': start_date,
'param_3': str(date.today()),
'param_4': cardno
}
resp = self.session.post(self.URLS['REC'], params=params, data=data)
if resp is None:
return None
rec = resp.json()['results']
rec = dict(loss_rec=rec)
self.insert_to_db('loss_rec', rec)
return rec # list of dicts
def _get_rec(self, start_date, cardno):
rec = []
# 这里查的是最后的数据统计信息,不是原始信息。
# res = self.find_in_db('rec')
# if res is not None and not res['rec']['incomplete']:
# return res
# 获取用户校园卡号,图书馆信息处也可以查询
info = self._get_cardno()
usercode = info.get('usercode')
if not usercode:
return rec
fanka_data = {
'param_0': 0,
'param_1': 1 # 每页显示数
}
params = {
'className': 'cn.com.system.query.DealQuery',
'methodName': 'getDealQuery',
'paramCount': '6',
'param_2': start_date,
'param_3': str(date.today()),
'param_4': '-1',
'param_5': cardno
}
#: 第一次请求,只返回1条结果 fanka_data param_1=1
#: 目的是获取到总数
#: 再发送第二条数据,获取全部结果,但是服务器最多一次只返回1024条数据。
resp = self.session.post(self.URLS['REC'], params=params, data=fanka_data)
if resp is None:
return None
res = resp.json()
total_count = int(res['totalCount'])
api.logger.info('[+] got total_count %s of %s' % (total_count, cardno))
if total_count > 0:
threads = []
for i in range(0, total_count, 1024):
post_data = dict(param_0=i,
param_1=1024)
threads.append(gevent.spawn(self.session.post, self.URLS['REC'], params=params, data=post_data))
gevent.joinall(threads)
for t in threads:
if t.value is not None:
rec.extend(t.value.json()['results'])
else:
pass
# if total_count > 0:
# fanka_data['param_1'] = total_count
# resp = self.session.post(self.URLS['REC'], params=params, data=fanka_data)
# if resp is None:
# return "[]"
# res = resp.json()
# rec = res['results']
# else:
# pass
rec = dict(items=rec)
self.insert_to_db('rec', rec)
return rec
def get_data(self, start_date, cardno):
"""
:param start_date: 应从正方入学日期获取
:return:
"""
self.cardno = cardno
api.logger.info('[+] start fetching ehome data for %s' % self.cardno)
# 这里查的是最后的数据统计信息,不是原始信息。
res = self.find_in_db('analysis')
if res is not None and not res['analysis']['incomplete']:
if (datetime.datetime.now() - self.find_in_db('fetch_date')['fetch_date']).days > APP_CONFIG.EHOME_DATA_TTL:
pass
else:
return res
threads = [
gevent.spawn(self._get_rec, start_date, cardno),
gevent.spawn(self._get_loss, start_date, cardno)
]
gevent.joinall(threads)
res = dict(loss_rec=threads[1].value,
rec=threads[0].value)
# 进行分析处理后,写至数据库分析字段。
analysis = self.analyze(res)
self.insert_to_db('analysis', analysis, date=True)
return {'analysis': analysis}
def analyze(self, res):
api.logger.info('[*] start analyzing %s card data...' % self.cardno)
if res['rec'] is None or res['rec']['items'] == []:
return None
recs = res['rec']['items']
total_consume = 0.0 # 总消费
highest_consume = dict(window='',
money=0,
date='') # 单次最高消费
highest_month_consume = dict(money=0,
month='') # 最高单月消费
first_consume = dict(window='',
money=0,
date='2099-01-01 00:00:00') # 第一次消费
highest_left = dict(money=0.0,
date='') # 最高余额
lowest_left = dict(money=99999999,
date='') # 最低余额
favor_window = dict(name='',
times=0,
money=0) # 最喜欢的窗口
bank_charge = dict(times=0,
money=0) # 银行圈存
bath_charge = dict(times=0,
money=0) # 控水转账
elec_charge = dict(times=0,
money=0) # 电费
net_charge = dict(times=0,
money=0) # 城市热点
windows = dict()
month_consume = dict() # 单月消费
for i in recs:
# 总消费
if i['CONTYPE'] != u'2' and i['CONTYPE'] != u'19' and i['CONTYPE'] != u'13':
total_consume += float(i['TRANSACTMONEY'])
# 单月最高消费
if i['DISPOSETIME'][0:7] in month_consume:
month_consume[i['DISPOSETIME'][0:7]]['money'] += float(i['TRANSACTMONEY'])
else:
month_consume[i['DISPOSETIME'][0:7]] = dict(money=float(i['TRANSACTMONEY']))
if month_consume[i['DISPOSETIME'][0:7]]['money'] > highest_month_consume['money']:
highest_month_consume = dict(money=month_consume[i['DISPOSETIME'][0:7]]['money'],
month=i['DISPOSETIME'][0:7])
# 最高余额
if float(i['CURRENTDBMONEY']) > highest_left['money']:
highest_left = dict(money=float(i['CURRENTDBMONEY']),
date=i['DISPOSETIME'])
# 最低余额
if float(i['CURRENTDBMONEY']) < lowest_left['money']:
lowest_left = dict(money=float(i['CURRENTDBMONEY']),
date=i['DISPOSETIME'])
if i['CONTYPE'] == u'0':
# 第一次消费
if i['DISPOSETIME'] < first_consume['date']:
first_consume = dict(window=i['WINNAME'],
money=i['TRANSACTMONEY'],
date=i['DISPOSETIME'])
# 最高单次消费
if float(i['TRANSACTMONEY']) > highest_consume['money']:
highest_consume = dict(window=i['WINNAME'],
money=float(i['TRANSACTMONEY']),
date=i['DISPOSETIME'])
# 最多次消费窗口
if i['WINNAME'] in windows:
windows[i['WINNAME']]['times'] += 1
windows[i['WINNAME']]['money'] += float(i['TRANSACTMONEY'])
else:
windows[i['WINNAME']] = dict(times = 1,
money=float(i['TRANSACTMONEY']))
if windows[i['WINNAME']]['times'] > favor_window['times']:
favor_window = dict(name=i['WINNAME'],
times=windows[i['WINNAME']]['times'],
money=windows[i['WINNAME']]['money'])
# 银行圈存
elif i['CONTYPE'] == u'13' or i['CONTYPE'] == u'2':
bank_charge['money'] += float(i['TRANSACTMONEY'])
bank_charge['times'] += 1
# 电控转账
elif i['CONTYPE'] == u'4':
elec_charge['money'] += float(i['TRANSACTMONEY'])
elec_charge['times'] += 1
# 城市热点和机房转账
elif i['CONTYPE'] == u'25' or i['CONTYPE'] == u'24':
net_charge['money'] += float(i['TRANSACTMONEY'])
net_charge['times'] += 1
# 充水
elif i['CONTYPE'] == u'26':
bath_charge['money'] += float(i['TRANSACTMONEY'])
bath_charge['times'] += 1
else:
pass
return dict(total_consume=total_consume,
first_consume=first_consume,
highest_consume=highest_consume,
highest_month_consume=highest_month_consume,
highest_left=highest_left,
lowest_left=lowest_left,
favor_window=favor_window,
bank_charge=bank_charge,
elec_charge=elec_charge,
bath_charge=bath_charge,
net_charge=net_charge)
if __name__ == '__main__':
# print str(date.today())
from app_config import Config as APP_CONFIG
from pymongo import MongoClient
conn = MongoClient(APP_CONFIG.MONGO_URI)
db = conn.youji
db.authenticate(APP_CONFIG.MONGO_USER, APP_CONFIG.MONGO_PWD)
ec = EhomeCrawler(db=db, debug=True)
ec._login(login_data={'student_id': '', 'password': ''})
print ec.get_data('2012-09-01', "")
```
#### File: NuptLifeAnalyzer/NuptCrawlers/ZfCrawler.py
```python
import re
from PIL import Image
from StringIO import StringIO
from urlparse import urlparse
import json
import gevent
from celery.contrib import rdb
from config import Config
from NUPTCrawlerBase import NUPTCrawlerBase
from lib.util import api, save_to_qiniu
from lib.http import req
from lib.PageParser import ZfParser
class ZfCrawler(NUPTCrawlerBase):
"""
cookie是抓取验证码时候的cookie。
每一个入口,都需要传入cookies和student_id,因为我们需要先展示
验证码给用户,让用户自行输入,所以这样设计。
能自动识别验证码后,可以与Lib和Ehome相同的设计。
"""
def __init__(self, debug=False):
super(ZfCrawler, self).__init__(debug=debug)
self.ZF_URLS = Config.ZF_URLS
self.host = urlparse(self.ZF_URLS['LOGIN']).netloc
self.vs_regex = r'<input type="hidden" name="__VIEWSTATE" value="((.[^\s])*)" />'
self.db = None
self.collection = None
@staticmethod
def get_captcha(url=Config.ZF_URLS['CAPTCHA'], debug=False):
"""
验证码暂时需要用户输入
模拟教务处验证码获取 http://jwxt.njupt.edu.cn/CheckCode.aspx
<img src="http://jwxt.njupt.edu.cn/CheckCode.aspx">
TODO:
1. 识别验证码, 参考:http://blog.rijnx.com/post/ZF-Checkcode-Verify
:return: captcha图片流, 正方登录cookie
:param: url
:param: debug
"""
resp = req(url, 'get')
if resp is None:
return Config.SERVER_MSG['SERVER_ERROR'], None
if debug:
# debug模式下,保存图片到本地查看
i = Image.open(StringIO(resp.content))
i.save('test.gif')
return resp.content, resp.cookies
def decaptcha(self):
captcha = req(Config.ZF_URLS['CAPTCHA'], 'get')
if captcha is None:
return Config.SERVER_MSG['SERVER_ERROR'], None
captcha = captcha.content
data = {
'file': captcha
}
text = req(Config.DECAPTCHA_URL, 'post', files=data).text
return text
def _get_viewstate(self, url, cookies=None):
"""
获取表单viewstate
"""
resp = req(url, 'get', referer=self.ZF_URLS['LOGIN'], cookies=cookies)
if resp is None:
return Config.SERVER_MSG['SERVER_ERROR']
res = re.search(self.vs_regex, resp.text, re.S)
if res is None:
return Config.SERVER_MSG['SERVER_ERROR']
viewstate = res.group(1)
return viewstate
def _login(self, login_data=None, cookies=None):
"""
登录正方
:param login_data:
:param cookies:
:return: (student_id, 登录结果)
"""
viewstate = self._get_viewstate(self.ZF_URLS['LOGIN'])
if viewstate == Config.SERVER_MSG['SERVER_ERROR']:
return Config.SERVER_MSG['SERVER_ERROR']
student_id = login_data['student_id']
login_data = {
'__VIEWSTATE': viewstate,
'txtUserName': student_id,
'TextBox2': Config.TEST_ZF_PASSWORD if student_id == Config.TEST_STUDENT_ID else login_data['zf_password'],
'txtSecretCode': login_data['zf_captcha'],
'RadioButtonList1': '学生',
'Button1': '登录',
'lbLanguange': '',
'hidPdrs': '',
'hidsc': ''
}
resp = req(self.ZF_URLS['LOGIN'], 'post', referer=self.ZF_URLS['LOGIN'], data=login_data, cookies=cookies)
if resp is None:
return '', Config.SERVER_MSG['SERVER_ERROR']
if resp.url.startswith(self.ZF_URLS['LOGIN_SUCCESS']):
api.logger.info('[+] ID: %s login zf successfully.' % (login_data['txtUserName']))
msg = Config.SERVER_MSG['LOGIN_SUCCESS']
elif self.ZF_URLS['WRONG_CAPTCHA_FINGER'] in resp.text:
msg = Config.SERVER_MSG['WRONG_CAPTCHA']
elif self.ZF_URLS['INVALID_CAPTCHA_FINGER'] in resp.text:
msg = Config.SERVER_MSG['INVALID_USERNAME']
elif self.ZF_URLS['WRONG_PASS_FINGER'] in resp.text:
api.logger.warning('[-] ID: %s login zf failed.' % (login_data['txtUserName']))
msg = Config.SERVER_MSG['WRONG_PASSWORD']
elif self.ZF_URLS['COMMENT'] in resp.text:
api.logger.warning('[-] need to comment for classes.')
msg = Config.SERVER_MSG['COMMENT_TIME']
else:
msg = Config.SERVER_MSG['SERVER_ERROR']
return login_data['txtUserName'], msg
def get_personal_info(self, cookies, student_id, db):
"""
获取个人信息
"""
# rdb.set_trace()
if self.collection is None:
self.collection = getattr(db, 'zf')
# 先找数据库
res = self.find_in_db('info', student_id=student_id)
# 如果数据库中没有记录,或者记录是不完整的,才尝试查询
if res is not None and not res['info']['incomplete']:
res = dict(id_num=res['id_num'][-6:], entrance_date=res['entrance_date'])
return json.dumps(res, ensure_ascii=False)
url = ZfParser.get_zf_urls(self.ZF_URLS['INFO'], student_id)
resp = req(url, 'get', referer=self.ZF_URLS['LOGIN'], cookies=cookies)
if resp is None:
api.logger.warning('[-] got %s personal info failed.' % student_id)
return Config.SERVER_MSG['SERVER_ERROR']
content = resp.text
res = ZfParser.parse_zf_info(content)
api.logger.info('[+] got %s personal info successfully.' % student_id)
# 写至数据库
self.insert_to_db('info', student_id, res)
#: 结果不需要全部返回给前段,只需返回必要的字段即可
#: 身份证后六位,作为登录图书馆默认密码
#: 入学日期,作为智慧校园查询起始日期
res = dict(id_num=res['id_num'][-6:], entrance_date=res['entrance_date'])
return json.dumps(res, ensure_ascii=False)
def _get_score(self, cookies, student_id):
"""
获取成绩信息
"""
url = ZfParser.get_zf_urls(self.ZF_URLS['SCORE'], student_id)
viewstate = self._get_viewstate(url, cookies=cookies)
score_data = {
'__VIEWSTATE': viewstate,
'ddlXN': '',
'ddlXQ': '',
'Button2': '在校学习成绩查询'
}
resp = req(url, 'post', data=score_data, referer=self.ZF_URLS['LOGIN'], cookies=cookies, host=self.host)
if resp is None or resp.text is None:
api.logger.warning('[+] got %s cert score failed.' % student_id)
return "[]"
content = resp.text
res = ZfParser.parse_zf_score(content)
api.logger.info('[+] got %s score successfully.' % student_id)
# 写至数据库
print 'score1'
self.insert_to_db('score', student_id, res)
print 'score2'
return res
def _get_course(self, cookies, student_id):
"""
获取本学期课程
"""
pass
def _get_cert_score(self, cookies, student_id):
"""
获取等级考试成绩信息
"""
pass
# url = ZfParser.get_zf_urls(self.ZF_URLS['CERT_SCORE'], student_id)
# resp = req(url, 'get', cookies=cookies, referer=self.ZF_URLS['LOGIN'])
# if resp is None or resp.text is None:
# api.logger.warning('[+] got %s cert score failed.' % student_id)
# return "[]"
# content = resp.text
# res = ZfParser.parse_zf_cert_score(content)
# api.logger.info('[+] got %s cert score successfully.' % student_id)
#
# # 写至数据库
# print 'cert1'
# rdb.set_trace()
# self.insert_to_db('cert_score', student_id, res)
# print 'cert2'
# return res
def _get_thesis(self, cookies, student_id):
"""
获取毕业论文信息
"""
pass
def _get_img(self, cookies, student_id):
"""
保存个人照片
"""
img_url = ZfParser.get_zf_urls(self.ZF_URLS['IMG'], student_id)
resp = req(img_url, 'get', referer=self.ZF_URLS['LOGIN'], cookies=cookies, host=self.host)
if resp is None:
return ''
i = Image.open(StringIO(resp.content))
if self.debug:
i.save(student_id + '.jpg')
api.logger.info('[+] got %s image successfully.' % student_id)
url = save_to_qiniu(i)
i.close()
# 写至数据库
print 'img1'
self.insert_to_db('img_url', student_id, dict(img_url=img_url))
print 'img2'
return img_url
def get_data(self, cookies=None, student_id=None):
"""
并发爬取所有信息,实时返回info信息,需要把身份证后六位传给EhomeCrawler尝试登录。
"""
api.logger.info('[*] start fetching data from zf for %s' % student_id)
# 这里查的是最后的数据统计信息,不是原始信息。
res = self.find_in_db('analysis', student_id=student_id)
if res is not None and not res['analysis']['incomplete']:
return res
threads = []
threads.extend([
gevent.spawn(self._get_score, cookies, student_id),
gevent.spawn(self._get_cert_score, cookies, student_id),
gevent.spawn(self._get_thesis, cookies, student_id),
gevent.spawn(self._get_course, cookies, student_id),
gevent.spawn(self._get_img, cookies, student_id)
])
gevent.joinall(threads)
res = dict(score=threads[0].value,
cert_score=threads[1].value,
thesis=threads[2].value,
course=threads[3].value,
img_url=threads[4].value)
# 进行分析处理后,写至数据库分析字段。
# analysis = self.analyze(res, student_id)
# TODO 返回分析后的数据 analysis
return res
def find_in_db(self, key_name, student_id=None):
# rdb.set_trace()
res = self.collection.find_one({'student_id': student_id}, {"_id": 0, key_name: 1})
# key_name不存在的话,会返回{}空字典
if not res:
return None
return res
def insert_to_db(self, key_name, student_id, res):
"""
TODO 原数据库中有的字段有值,而现在查询没有的,保留原有字段值。
:return:
"""
# FIXME incomplete 机制 怎么样做才最好?
res['incomplete'] = False
# FIXME 这里每次都需要重写一次student_id
return \
self.collection.update_one({"student_id": student_id},
{'$set': {"student_id": student_id, key_name: res}},
upsert=True) == 1
def analyze(self, res, student_id):
failed_courses_count = 0
highest_score = 0
highest_weight = 0
highest_course = ''
for x in res['score']['all_score']:
if x[10] != '':
failed_courses_count += 1
try:
highest = float(x[7])
weight = float(x[6])
if highest > highest_score:
highest_score = highest
highest_course = x[3]
elif highest == highest_score and weight > highest_weight:
highest_score = highest
highest_course = x[3]
except:
continue
res = dict(examed_courses_count=len(res['score']['all_score']),
failed_courses_count=failed_courses_count,
highest_course=dict(name=highest_course,
score=highest_score))
return json.dumps(res, ensure_ascii=False)
if __name__ == '__main__':
zc = ZfCrawler(debug=True)
_, cookies = zc.get_captcha(debug=True)
captcha = raw_input('login captcha: ')
login_data = dict(student_id=Config.TEST_STUDENT_ID,
zf_password=<PASSWORD>,
zf_captcha=captcha)
sid, _ = zc.login(login_data, cookies)
j = zc.get_data(cookies, sid)
import pprint
pprint.pprint(j, indent=4)
# print zc._get_cert_score(cookies, sid)
```
|
{
"source": "Jerry-py/RoboEpik",
"score": 2
}
|
#### File: RoboEpik/core/__main__.py
```python
from config import Config
from EpikCord import Client, Intents, Button, ActionRow, Modal, ApplicationCommandInteraction, TextInput, Embed, Message,Colour
import logging
logger = logging.getLogger('EpikCord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='.\\epik.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
class RoboEpik(Client):
def __init__(self, config: Config):
super().__init__(
config.token,
Intents().guilds.guild_messages.message_content.guild_members
)
async def create_pastebin(self, code: str):
...
client = RoboEpik(Config())
@client.command(
description = "A command to setup the Anonymous help channel.",
guild_ids = ["937364424208039957"]
)
async def setup_anonymous(interaction: ApplicationCommandInteraction):
if interaction.author.id != "5<PASSWORD>":
return await interaction.reply(content = "Only The Untraceable can run that command.")
components = [
ActionRow([
Button(label="Click here for Anonymous help!", custom_id = "anonymous_help").GREEN
])
]
embed = [
Embed(
title = "Anonymous help",
description = "Click the button below to setup the Anonymous help channel.",
color = 0x00ff00,
footer = {"text": "No logs are kept of what is sent to who, and you can use the other help channels if you're comfortable with revealing your identity."}
)
]
await interaction.reply(embeds = embed, components = components)
@client.component("anonymous_help")
async def anonymous_help_button_click(interaction, button):
await interaction.send_modal(
Modal(
custom_id = "anonymous_help_modal",
title = "Anonymous help",
components = [
ActionRow([
TextInput(
custom_id = "code",
label = "Code",
placeholder = "Enter the code here.",
style = 2,
required = True
)
]), ActionRow([
TextInput(
custom_id = "issue_description",
label = "Issue description",
placeholder = "Enter the issue description here.",
style = 2,
required = True
)]), ActionRow([
TextInput(
custom_id = "full_traceback",
label = "Full traceback",
placeholder = "Enter the full traceback here.",
style = 2,
required = True
)
]), ActionRow([
TextInput(
custom_id = "version",
label = "Version",
placeholder = "Enter EpikCord.py version here.",
style = 1,
required = True
)])
])
)
@client.component("anonymous_help_modal")
async def anonymous_help_modal_submit(interaction, code, issue_description, full_traceback, version):
if (len(code) + len(issue_description) + len(full_traceback) + len(version)) > 2000 and (len(issue_description) + len(full_traceback) + len(version)) < 2000:
pastebin_link = await client.create_pastebin(code, full_traceback)
embed = [
Embed(
title = "Anonymous help",
description = f"Your issue has been submitted to the Anonymous help channel. Your code has been posted [here]({pastebin_link})",
color = 0x00ff00,
footer = {"text": "No logs are kept of what is sent to who, and you can use the other help channels if you're comfortable with revealing your identity."}
)
]
await interaction.reply(embeds = embed)
await interaction.create_followup()
@client.command(name="help", description="The official RoboEpik help command")
async def help(interaction:ApplicationCommandInteraction):
help_embed= [Embed(
title= "Help Command For RoboEpik",
description= "1. You can simply type ## for any EpikCord repository Issue/PR and re# for The Official **RoboEpik** repository Issues/PR",
color=0x00f700
)]
await interaction.reply(embeds=help_embed)
GH_API_SITE = "https://api.github.com"
@client.event
async def on_message_create(message:Message):
#Note: This solution works well for both PR and Issue but not discussions maybe because discussions are way different than PRs or issues
if message.content.startswith("##") :#Represents a github issue
gh_repo_id = message.content.strip("##")
resp= await client.http.get(url=f"{GH_API_SITE}/repos/EpikCord/EpikCord.py/issues/{gh_repo_id}",to_discord = False)
resp_stat = resp.status
response: dict = await resp.json()
title = response.get("title")
user = response.get("user")
user_name = user.get("login")
# we need to fix the issue where there is no login for discussions
body = response.get("body")
url= response.get("html_url")
state = response.get("state")
if resp_stat == 200:
issue_or_pr_em = [Embed(title = f"Issue/PR {gh_repo_id}", description=f"Title = {title}\nState = {state}\nBy: {user_name}\nBody: {body}",color=0x00FF00 if state == "open" else 0xFF0000, footer={"text":f"For more info, visit {url}"})]
await message.channel.send(embeds=issue_or_pr_em)
elif resp_stat == 404:
await message.channel.send(content = "The Resource you mentioned was not there.")
elif resp_stat == 410:
await message.channel.send(content = "The resource said bye-bye to us and went away 🤣.")
if message.content.lower().startswith("re#") :#Represents a github issue
gh_repo_id = message.content.lower().strip("re#")
resp= await client.http.get(url=f"{GH_API_SITE}/repos/EpikCord/RoboEpik/issues/{gh_repo_id}",to_discord = False)
resp_stat = resp.status
response: dict = await resp.json()
title = response.get("title")
user = response.get("user")
user_name = user.get("login")
body = response.get("body")
url= response.get("html_url")
state = response.get("state")
if resp_stat == 200:
issue_or_pr_em = [Embed(title = f"Issue/PR {gh_repo_id}", description=f"Title = {title}\nState = {state}\nBy: {user_name}\nBody: {body}",color=0x00FF00 if state == "open" else 0xFF0000, footer={"text":f"For more info, visit {url}"})]
await message.channel.send(embeds=issue_or_pr_em)
elif resp_stat == 404:
await message.channel.send(content = "The Resource you mentioned was not there.")
elif resp_stat == 410:
await message.channel.send(content = "The resource said bye-bye to us and went away 🤣.")
client.login()
```
|
{
"source": "jerryqhyu/ebisu",
"score": 3
}
|
#### File: src/strategy/gmail_sub.py
```python
import calendar
import os
import threading
import time
from datetime import datetime, timezone
import apiclient
import httplib2
import oauth2client
credential_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../token.json")
def credentials():
store = oauth2client.file.Storage(credential_path)
return store.get()
def service():
http = credentials().authorize(httplib2.Http())
return apiclient.discovery.build("gmail", "v1", http=http, cache_discovery=False)
def get_messages_list(user_id, from_address, after):
if from_address is None:
query = f"after:{after}"
else:
query = f"from:{from_address} after:{after}"
return service().users().messages() \
.list(userId=user_id, q=query).execute()
def get_message_detail(id, user_id):
return service().users().messages().get(id=id, userId=user_id).execute()
class GmailSub():
interval = 1
is_running = True
last_time = None
from_address = None
message_handler = None
error_handler = None
def __init__(self, user_id):
self.user_id = user_id
self.thread = threading.Thread(target=self.__start)
self.thread.daemon = True
self.thread.start()
def set_interval(self, interval):
self.interval = interval
def set_from_address(self, address):
self.from_address = address
def on_message(self, callback):
self.message_handler = callback
def on_error(self, callback):
self.error_handler = callback
def stop(self):
self.is_running = False
def __start(self):
while self.is_running:
try:
ms = self.__get_messages()
if self.message_handler is not None:
self.message_handler(ms)
except Exception as ex:
if self.error_handler is not None:
self.error_handler(ex)
time.sleep(self.interval)
def __get_messages(self):
if self.last_time is None:
after = calendar.timegm(datetime.now(timezone.utc).timetuple())
else:
after = self.last_time + 1
now = calendar.timegm(datetime.now(timezone.utc).timetuple())
resp = get_messages_list(self.user_id,
from_address=self.from_address,
after=after)
messages = []
self.last_time = now
if 'messages' not in resp:
return messages
for m in resp['messages']:
detail = get_message_detail(m['id'], self.user_id)
messages.append(detail)
return messages
```
|
{
"source": "JerryRain/rrt_ws",
"score": 2
}
|
#### File: lqrrt_ros/msg/_MoveFeedback.py
```python
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class MoveFeedback(genpy.Message):
_md5sum = "9406c8b108b277e4eb75c321d48ddcc4"
_type = "lqrrt_ros/MoveFeedback"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Feedback
string behavior
int64 tree_size
bool tracking
float64[] distance
float64 time_till_next_branch
"""
__slots__ = ['behavior','tree_size','tracking','distance','time_till_next_branch']
_slot_types = ['string','int64','bool','float64[]','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
behavior,tree_size,tracking,distance,time_till_next_branch
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MoveFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.behavior is None:
self.behavior = ''
if self.tree_size is None:
self.tree_size = 0
if self.tracking is None:
self.tracking = False
if self.distance is None:
self.distance = []
if self.time_till_next_branch is None:
self.time_till_next_branch = 0.
else:
self.behavior = ''
self.tree_size = 0
self.tracking = False
self.distance = []
self.time_till_next_branch = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.behavior
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_qB().pack(_x.tree_size, _x.tracking))
length = len(self.distance)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.distance))
buff.write(_get_struct_d().pack(self.time_till_next_branch))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.behavior = str[start:end].decode('utf-8')
else:
self.behavior = str[start:end]
_x = self
start = end
end += 9
(_x.tree_size, _x.tracking,) = _get_struct_qB().unpack(str[start:end])
self.tracking = bool(self.tracking)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.distance = struct.unpack(pattern, str[start:end])
start = end
end += 8
(self.time_till_next_branch,) = _get_struct_d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.behavior
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_qB().pack(_x.tree_size, _x.tracking))
length = len(self.distance)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.distance.tostring())
buff.write(_get_struct_d().pack(self.time_till_next_branch))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.behavior = str[start:end].decode('utf-8')
else:
self.behavior = str[start:end]
_x = self
start = end
end += 9
(_x.tree_size, _x.tracking,) = _get_struct_qB().unpack(str[start:end])
self.tracking = bool(self.tracking)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.distance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 8
(self.time_till_next_branch,) = _get_struct_d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_qB = None
def _get_struct_qB():
global _struct_qB
if _struct_qB is None:
_struct_qB = struct.Struct("<qB")
return _struct_qB
_struct_d = None
def _get_struct_d():
global _struct_d
if _struct_d is None:
_struct_d = struct.Struct("<d")
return _struct_d
```
#### File: src/RRT/orignal_rrt.py
```python
import sys, random, math, pygame
from pygame.locals import *
from math import sqrt,cos,sin,atan2
#constants
XDIM = 640
YDIM = 480
WINSIZE = [XDIM, YDIM]
EPSILON = 7.0
NUMNODES = 1000
fpsClock = pygame.time.Clock()
def dist(p1,p2):
return sqrt((p1[0]-p2[0])*(p1[0]-p2[0])+(p1[1]-p2[1])*(p1[1]-p2[1]))
def step_from_to(p1,p2):
if dist(p1,p2) < EPSILON:
return p2
else:
theta = atan2(p2[1]-p1[1],p2[0]-p1[0])
return p1[0] + EPSILON*cos(theta), p1[1] + EPSILON*sin(theta)
def main():
#initialize and prepare screen
pygame.init()
screen = pygame.display.set_mode(WINSIZE)
pygame.display.set_caption('Original RRT')
#white = 255, 240, 200
#black = 20, 20, 40
white = 255, 255, 255
black = 0, 0, 0
screen.fill(black)
nodes = []
#nodes.append((XDIM/2.0,YDIM/2.0)) # Start in the center
nodes.append((0.0,0.0)) # Start in the corner
for i in range(NUMNODES):
rand = random.random()*640.0, random.random()*480.0
nn = nodes[0]
for p in nodes:
if dist(p,rand) < dist(nn,rand):
nn = p
newnode = step_from_to(nn,rand)
nodes.append(newnode)
pygame.draw.line(screen,white,nn,newnode)
pygame.display.update()
fpsClock.tick(100)
#print i, " ", nodes
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
sys.exit("Leaving because you requested it.")
# if python says run, then we should run
if __name__ == '__main__':
main()
```
#### File: rrt-star-connect/python/wrapper.py
```python
import math
import rospy
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import PointStamped
class Wrapper(object):
def __init__(self, space_map):
self.width_pixels = space_map.info.width
self.length_pixels = space_map.info.height
self.start_x= space_map.info.origin.position.x
self.start_y= space_map.info.origin.position.y
self.length_m = (abs (space_map.info.origin.position.x))*2
self.width_m = (abs (space_map.info.origin.position.y))*2
self.matrix_of_pixels = [[True if j!=0 else False for j in space_map.data[i*self.length_pixels:i*self.length_pixels+self.length_pixels]] for i in reversed(xrange(self.width_pixels))]
def collision(self,x,y,z):
pixel_x=int(self.width_pixels*(y-self.start_y)/(-self.width_m))
pixel_y=int(self.length_pixels*(x-self.start_x)/self.length_m)
return self.matrix_of_pixels[pixel_x][pixel_y]
x_goal=0.0
y_goal=0.0
memorize_x=0.0
memorize_y=0.0
wrapper=None
pub=None
def read_map(pose):
global wrapper
wrapper=Wrapper(pose)
def publish_goal_configuration(pose):
global x_goal
global y_goal
global memorize_x
global memorize_y
global wrapper
x_goal=pose.point.x
y_goal=pose.point.y
pointstamped = PointStamped()
if wrapper.collision(float(x_goal),float(y_goal),0)==False:
pointstamped.point.x=x_goal
memorize_x=x_goal
pointstamped.point.y=y_goal
memorize_y=y_goal
pub.publish(pointstamped)
else:
pointstamped.point.x=memorize_x
pointstamped.point.y=memorize_y
rate.sleep()
if __name__ == '__main__':
rospy.Subscriber("/clicked_point", PointStamped, publish_goal_configuration)
rospy.Subscriber("/map", OccupancyGrid, read_map)
pub = rospy.Publisher('/goal_configuration', PointStamped, queue_size=200)
rospy.init_node('goal_configuration_node', anonymous=True)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rospy.spin()
```
|
{
"source": "jerryrwu/alcazard",
"score": 2
}
|
#### File: jerryrwu/alcazard/clients.py
```python
import logging
import os
import shutil
import time
import traceback
from abc import ABC, abstractmethod
from asyncio import CancelledError
from alcazar_logging import BraceAdapter
from error_manager import ErrorManager, Severity
from utils import timezone_now, dict_pop_n, set_pop_n
logger = BraceAdapter(logging.getLogger(__name__))
class AlcazarException(Exception):
pass
class TorrentNotFoundException(AlcazarException):
def __init__(self, message=None, *args, **kwargs):
message = message or 'Torrent does not exist.'
super().__init__(message, *args, **kwargs)
class TorrentAlreadyAddedException(AlcazarException):
def __init__(self, message=None, *args, **kwargs):
message = message or 'Torrent already added.'
super().__init__(message, *args, **kwargs)
class FieldInfo:
def __init__(self, local_name, remote_name, converter=None, public=True):
self.local_name = local_name
self.remote_name = remote_name
self.converter = converter
self.public = public
class SessionStats:
def __init__(self, torrent_count, downloaded, uploaded, download_rate, upload_rate):
self.torrent_count = torrent_count
self.downloaded = downloaded
self.uploaded = uploaded
self.download_rate = download_rate
self.upload_rate = upload_rate
def to_dict(self):
return dict(self.__dict__)
class TorrentState:
STATUS_CHECK_WAITING = 0
STATUS_CHECKING = 1
STATUS_DOWNLOADING = 2
STATUS_SEEDING = 3
STATUS_STOPPED = 4
STATUS_NAMES = {
STATUS_CHECK_WAITING: 'check_waiting',
STATUS_CHECKING: 'checking',
STATUS_DOWNLOADING: 'downloading',
STATUS_SEEDING: 'seeding',
STATUS_STOPPED: 'stopped',
}
_FIELD_MAPPING = None
def __init__(self, manager, info_hash):
self.manager = manager
self.info_hash = info_hash
self.status = None
self.download_path = None
self.name = None
self.size = None
self.downloaded = None
self.uploaded = None
self.download_rate = None
self.upload_rate = None
self.progress = None
self.date_added = None
self.error = None
self.tracker_error = None
def _sync_fields(self, remote):
updated = False
for field_info in self._FIELD_MAPPING:
local_value = getattr(self, field_info.local_name)
if field_info.remote_name:
remote_value = getattr(remote, field_info.remote_name)
else:
remote_value = remote
if field_info.converter:
remote_value = field_info.converter(remote_value)
if local_value != remote_value:
setattr(self, field_info.local_name, remote_value)
updated = True
return updated
def to_dict(self):
result = {field.local_name: getattr(self, field.local_name)
for field in self._FIELD_MAPPING
if field.public}
result.update({
'info_hash': self.info_hash,
'error': self.error,
'client': self.manager.name,
})
return result
class PeriodicTaskInfo:
def __init__(self, fn, interval_seconds):
self.fn = fn
self.interval_seconds = interval_seconds
self.last_run_at = None
async def run_if_needed(self, current_time):
if not self.last_run_at or current_time - self.last_run_at > self.interval_seconds:
self.last_run_at = current_time
await self.fn()
return True
return False
class TorrentBatchUpdate:
def __init__(self, added=None, updated=None, removed=None):
self.added = added or {}
self.updated = updated or {}
self.removed = removed or set()
def update(self, batch):
for info_hash, data in batch.added.items():
self.added[info_hash] = data
self.updated.pop(info_hash, None)
self.removed.discard(info_hash)
for info_hash, data in batch.updated.items():
# If the add was not retrieved yet, update the data there, otherwise add it to updates
if info_hash in self.added:
self.added[info_hash] = data
else:
self.updated[info_hash] = data
self.removed.discard(data['info_hash'])
for info_hash in batch.removed:
self.added.pop(info_hash, None)
self.updated.pop(info_hash, None)
self.removed.add(info_hash)
def pop_batch(self, limit):
result = TorrentBatchUpdate()
result.added, limit = dict_pop_n(self.added, limit)
result.updated, limit = dict_pop_n(self.updated, limit)
result.removed, limit = set_pop_n(self.removed, limit)
return result, limit
def to_dict(self):
return {
'added': list(self.added.values()),
'updated': list(self.updated.values()),
'removed': list(self.removed),
}
class Manager(ABC):
key = None
config_model = None
def __init__(self, orchestrator, instance_config):
# The Orchestrator object this manager belongs to
self._orchestrator = orchestrator
# The global config of the orchestrator
self._config = orchestrator.config
# ManagerConfig for this instance
self._instance_config = instance_config
# Named used for display/system purposes
self._name = '{}{:03}'.format(self.key, instance_config.id)
# Used to track errors, warnings and info messages in the client and the error status.
self._error_manager = ErrorManager()
# Set by children when they grab a peer_port
self._peer_port = None
# Registry for the periodic tasks
self._periodic_tasks = []
# Current instance of SessionStats, as last obtained from the client
self._session_stats = None
# Has the client been fully initialized (all initial data loaded)
self._initialized = False
# Initialization time from launch in seconds
self._initialize_time_seconds = None
# When the instance was launched
self._launch_datetime = None
@property
def initialized(self):
return self._initialized
@property
def name(self):
return self._name
@property
def config(self):
return self._config
@property
def instance_config(self):
return self._instance_config
@property
def session_stats(self):
return self._session_stats
@property
@abstractmethod
def peer_port(self):
pass
@abstractmethod
async def force_reannounce(self, info_hash):
pass
@abstractmethod
async def force_recheck(self, info_hash):
pass
@abstractmethod
async def move_data(self, info_hash, download_path):
pass
@abstractmethod
async def pause_torrent(self, info_hash):
pass
@abstractmethod
async def resume_torrent(self, info_hash):
pass
@abstractmethod
async def rename_torrent(self, info_hash, name):
pass
@abstractmethod
def launch(self):
logger.info('Launching {}', self._name)
self._launch_datetime = timezone_now()
@abstractmethod
async def shutdown(self):
pass
@abstractmethod
def get_info_dict(self):
return {
'type': self.key,
'name': self._name,
'peer_port': self.peer_port,
'config': self.instance_config.to_dict(),
'initialized': self._initialized,
'status': self._error_manager.status,
'errors': self._error_manager.to_dict(),
'session_stats': self._session_stats.to_dict() if self._session_stats else None,
}
@abstractmethod
def get_debug_dict(self):
data = self.get_info_dict()
data.update({
'initialize_time_seconds': self._initialize_time_seconds,
})
return data
@abstractmethod
async def add_torrent(self, torrent, download_path, name):
pass
@abstractmethod
async def remove_torrent(self, info_hash):
pass
async def _run_periodic_task_if_needed(self, current_time, task):
start = time.time()
ran = await task.run_if_needed(current_time)
if ran:
logger.debug('{}.{} took {:.3f}', self._name, task.fn.__name__, time.time() - start)
return ran
async def _run_periodic_tasks(self):
current_time = time.time()
for task in self._periodic_tasks:
try:
ran = await self._run_periodic_task_if_needed(current_time, task)
if ran:
self._error_manager.clear_error(task.fn.__name__)
except CancelledError:
raise
except Exception:
message = 'Periodic task {} running every {}s crashed'.format(
task.fn.__name__, task.interval_seconds)
self._error_manager.add_error(
severity=Severity.ERROR,
key=task.fn.__name__,
message=message,
traceback=traceback.format_exc()
)
logger.exception(message)
def _can_clean_directory(self, directory):
items = os.listdir(directory)
if self._config.clean_torrent_file_on_remove:
return all(f.lower().endswith('.torrent') or f == 'ReleaseInfo2.txt' for f in items)
else:
return len(items) == 0
def clean_torrent_directories(self, download_path, torrent_name):
try:
if not self._config.clean_directories_on_remove:
logger.debug('Directory clean on remove is disabled in config.')
return
start_dir = os.path.join(download_path, torrent_name)
if not os.path.isdir(start_dir):
start_dir = download_path
if not os.path.isdir(start_dir):
logger.debug('Directory for {}/{} not found.'.format(download_path, torrent_name))
return
while self._can_clean_directory(start_dir):
logger.info('Removing cleanable directory {}.'.format(start_dir))
shutil.rmtree(start_dir)
start_dir = os.path.dirname(start_dir)
except Exception as exc:
self._error_manager.add_error(
Severity.ERROR,
'clean_torrent_directories',
'Unable to clean torrent directories for {}/{}.'.format(download_path, torrent_name),
traceback.format_exc(),
)
def get_manager_types():
managers = []
try:
from transmission.managed_transmission import ManagedTransmission
managers.append(ManagedTransmission)
except (ImportError, ModuleNotFoundError) as exc:
logger.warning('Unable import managed_transmission: {}.', exc)
try:
from transmission.remote_transmission import RemoteTransmission
managers.append(RemoteTransmission)
except (ImportError, ModuleNotFoundError) as exc:
logger.warning('Unable import remote_transmission: {}.', exc)
try:
if __debug__:
import pyximport
pyximport.install()
from libtorrent_impl.managed_libtorrent import ManagedLibtorrent
managers.append(ManagedLibtorrent)
except (ImportError, ModuleNotFoundError) as exc:
logger.warning('Unable import managed_libtorrent: {}.', exc)
return {manager_type.key: manager_type for manager_type in managers}
```
#### File: jerryrwu/alcazard/error_manager.py
```python
class Severity:
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
class ErrorInfo:
def __init__(self, severity, key, message, traceback):
self.severity = severity
self.key = key
self.message = message
self.traceback = traceback
def to_dict(self):
return dict(self.__dict__)
class ErrorManager:
GREEN = 'green' # All is good in the world
YELLOW = 'yellow' # Some warnings that need to be looked at
RED = 'red' # Something very bad happened
def __init__(self):
self._current_errors = {}
@property
def status(self):
statuses = {error.severity for error in self._current_errors.values()}
if Severity.ERROR in statuses:
return self.RED
elif Severity.WARNING in statuses:
return self.YELLOW
return self.GREEN
def add_error(self, severity, key, message, traceback=None):
self._current_errors[key] = ErrorInfo(
severity=severity,
key=key,
message=message,
traceback=traceback
)
def clear_error(self, key, convert_errors_to_warnings=True):
if key in self._current_errors:
if convert_errors_to_warnings:
error = self._current_errors[key]
if error.severity == Severity.ERROR:
self._current_errors[key] = ErrorInfo(
severity=Severity.WARNING,
key=key,
message='Error resolved to warning: '.format(error.message),
traceback=error.traceback,
)
else:
del self._current_errors[key]
def to_dict(self):
return {key: error.to_dict() for key, error in self._current_errors.items()}
```
#### File: jerryrwu/alcazard/migrations.py
```python
import logging
from playhouse import migrate
from alcazar_logging import BraceAdapter
logger = BraceAdapter(logging.getLogger(__name__))
def _record_migration(db, name):
db.execute_sql('INSERT INTO migration (name) VALUES (?001)', (name,))
def _handle_table_creation(db, migrations):
with db.atomic():
for migration_name, _ in migrations:
_record_migration(db, migration_name)
def _handle_migrations(db, migrations, current_migrations):
migrator = migrate.SqliteMigrator(db)
for migration_name, migration_fn in migrations:
if migration_name in current_migrations:
continue
logger.info('Running migration {}', migration_name)
with db.atomic():
migration_fn(migrator)
_record_migration(db, migration_name)
def apply_migrations(db, models, migrations):
db.create_tables(models)
current_migrations = {t[0] for t in db.execute_sql('SELECT name FROM migration').fetchall()}
if len(current_migrations) == 0: # Initial table creation, just insert all
logger.info('Migrations table was just created, inserting all current migrations.')
_handle_table_creation(db, migrations)
else:
logger.debug('Migrations detected, updating state.')
_handle_migrations(db, migrations, current_migrations)
```
|
{
"source": "jerryrwu/harvest",
"score": 2
}
|
#### File: harvest/Harvest/throttling.py
```python
from datetime import timedelta
from time import sleep
from django.db import models
from django.utils import timezone
from Harvest.utils import control_transaction, get_logger
logger = get_logger(__name__)
class ThrottledRequest(models.Model):
datetime = models.DateTimeField()
class Meta:
abstract = True
class DatabaseSyncedThrottler:
def __init__(self, config_model, model, num_requests, per_seconds):
self.config_model = config_model
self.model = model
self.num_requests = num_requests
self.per_seconds = per_seconds
def _prune(self):
self.model.objects.using('control').filter(
datetime__lt=timezone.now() - timedelta(seconds=self.per_seconds)).delete()
@control_transaction()
def throttle_request(self, **request_params):
list(self.config_model.objects.using('control').select_for_update().all())
self._prune()
requests = list(self.model.objects.using('control').order_by('datetime'))
if len(requests) >= self.num_requests:
sleep_time = self.per_seconds - (timezone.now() - requests[0].datetime).total_seconds()
if sleep_time > 0:
logger.info('Throttling request by {} for {}', sleep_time, self.model._meta.label)
sleep(sleep_time)
self.model.objects.using('control').create(datetime=timezone.now(), **request_params)
@control_transaction()
def get_load(self):
list(self.config_model.objects.using('control').select_for_update().all())
self._prune()
return self.model.objects.using('control').order_by('datetime').count() / self.num_requests
```
#### File: harvest/image_cache/views.py
```python
import base64
import hashlib
import mimetypes
import os
from itertools import count
from time import time
import requests
from django.conf import settings
from django.http import HttpResponse
from requests import RequestException
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
FAILURE_TTL = 24 * 3600
RETRIES = 3
CACHE_CONTROL = 'public, max-age=86400'
class Image(APIView):
def _fetch(self, url):
for i in count(1):
try:
response = requests.get(url)
response.raise_for_status()
return response.content
except RequestException:
if i >= RETRIES:
raise
def get(self, request):
url = request.GET['url']
dir_name = hashlib.md5(url.encode()).hexdigest()[:2]
dir_path = os.path.join(settings.MEDIA_ROOT, 'image_cache', dir_name)
path = os.path.join(
dir_path,
base64.b64encode(url.encode()).decode() + os.path.splitext(url)[1],
)
if os.path.exists(path):
return HttpResponse(open(path, 'rb'), content_type=mimetypes.guess_type(path)[0])
path_fail = path + '.fail'
if os.path.exists(path_fail):
fail_mtime = os.path.getmtime(path_fail)
if time() - fail_mtime < FAILURE_TTL:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
content = self._fetch(url)
except RequestException:
os.makedirs(dir_path, exist_ok=True)
with open(path_fail, 'wb'):
pass
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
os.remove(path_fail)
except FileNotFoundError:
pass
os.makedirs(dir_path, exist_ok=True)
with open(path, 'wb') as f:
f.write(content)
return HttpResponse(open(path, 'rb'), content_type=mimetypes.guess_type(path)[0])
```
#### File: plugins/redacted/utils.py
```python
import html
import re
from dataclasses import dataclass
import bs4
from upload_studio.upload_metadata import MusicMetadata
class JoinedArtistsBuilder(object):
def __init__(self, joined_artists_builder=None):
if joined_artists_builder is None:
self.result = []
else:
self.result = list(joined_artists_builder.result)
def append_joined(self, join_string, artists):
for a in artists:
self.result.append({
'id': a['id'],
'name': a['name'],
'join': join_string,
})
self.result[-1]['join'] = ''
def append_artist(self, artist):
self.result.append({
'id': artist['id'],
'name': html.unescape(artist['name']),
'join': '',
})
def append_join(self, join_string):
assert not self.result[-1]['join'], 'Last join should be empty before adding a new join'
self.result[-1]['join'] = join_string
def clear(self):
self.result = []
def get_artists_list(music_info):
a_main = music_info['artists']
a_composers = music_info['composers']
a_conductors = music_info['conductor']
a_djs = music_info['dj']
if len(a_main) == 0 and len(a_conductors) == 0 and len(a_djs) == 0 and len(a_composers) == 0:
return []
builder = JoinedArtistsBuilder()
if len(a_composers) and len(a_composers) < 3:
builder.append_joined(' & ', a_composers)
if len(a_composers) < 3 and len(a_main) > 0:
builder.append_join(' performed by ')
composer_builder = JoinedArtistsBuilder(builder)
if len(a_main):
if len(a_main) <= 2:
builder.append_joined(' & ', a_main)
else:
builder.append_artist({'id': -1, 'name': 'Various Artists'})
if len(a_conductors):
if (len(a_main) or len(a_composers)) and (len(a_composers) < 3 or len(a_main)):
builder.append_join(' under ')
if len(a_conductors) <= 2:
builder.append_joined(' & ', a_conductors)
else:
builder.append_artist({'id': -1, 'name': 'Various Conductors'})
if len(a_composers) and len(a_main) + len(a_conductors) > 3 and len(a_main) > 1 and len(
a_conductors) > 1:
builder = composer_builder
builder.append_artist({'id': -1, 'name': 'Various Artists'})
elif len(a_composers) > 2 and len(a_main) + len(a_conductors) == 0:
builder.clear()
builder.append_artist({'id': -1, 'name': 'Various Composers'})
if len(a_djs):
if len(a_djs) <= 2:
builder.clear()
builder.append_joined(' & ', a_djs)
else:
builder.clear()
builder.append_artist({'id': -1, 'name': 'Various DJs'})
return builder.result
def get_joined_artists(music_info):
artists_list = get_artists_list(music_info)
result = []
for a in artists_list:
result.append(a['name'])
result.append(a['join'])
return ''.join(result)
def get_shorter_joined_artists(music_info, group_name):
artists = get_joined_artists(music_info)
if len(artists) + len(group_name) > 80:
if music_info['artists']:
if len(music_info['artists']) > 1:
artists = 'Various Artists'
else:
artists = music_info['artists'][0]['name']
elif music_info['conductor']:
if len(music_info['conductor']) > 1:
artists = 'Various Conductors'
else:
artists = music_info['conductor'][0]['name']
return artists
def extract_upload_errors(html):
soup = bs4.BeautifulSoup(html, 'html5lib')
return soup.find('p', attrs={'style': 'color: red; text-align: center;'}).text.strip()
_ENCODING_PREFERENCES = [
MusicMetadata.ENCODING_320,
MusicMetadata.ENCODING_V0,
MusicMetadata.ENCODING_LOSSLESS,
MusicMetadata.ENCODING_24BIT_LOSSLESS,
]
def select_best_torrents_from_torrent_dicts(torrents):
def _is_torrent_better(a, b):
try:
a_index = _ENCODING_PREFERENCES.index(a['encoding'])
except ValueError:
a_index = -1
try:
b_index = _ENCODING_PREFERENCES.index(b['encoding'])
except ValueError:
b_index = -1
if a_index > b_index:
return True
if a_index < b_index:
return False
return a['size'] > b['size']
best_torrents = {}
for torrent in torrents:
key = (
torrent['remasterYear'],
torrent['remasterTitle'],
torrent['remasterRecordLabel'],
torrent['remasterCatalogueNumber'],
)
if not best_torrents.get(key) or _is_torrent_better(torrent, best_torrents[key]):
best_torrents[key] = torrent
return list(best_torrents.values())
@dataclass
class RedactedFileInfo:
name: str
size: int
def parse_file_list(file_list):
items = file_list.split('|||')
files = []
for item in items:
m = re.match('(.*){{{([0-9]*)}}}', item)
files.append(RedactedFileInfo(m[1], int(m[2])))
return files
```
#### File: upload_studio/executors/confirm_spectrals.py
```python
from upload_studio.step_executor import StepExecutor
class ConfirmSpectralsExecutor(StepExecutor):
name = 'confirm_spectrals'
description = 'Ask for confirmation on the spectral files.'
def __init__(self, *args, spectrals_confirmed=None, **kwargs):
super().__init__(*args, **kwargs)
self.spectrals_confirmed = spectrals_confirmed
def record_additional_metadata(self):
self.metadata.processing_steps.append('Confirmed spectral images manually.')
def handle_run(self):
# Copy just the spectrals for viewing
self.copy_prev_step_area_files('spectrals')
if self.spectrals_confirmed is None:
self.raise_error('Spectrals are unconfirmed.')
elif self.spectrals_confirmed is False:
self.raise_error('Spectrals are rejected.')
# Spectrals were already copied, copy everything else
self.copy_prev_step_files(('spectrals',))
self.record_additional_metadata()
```
#### File: upload_studio/executors/utils.py
```python
import subprocess
def get_flac_version():
return subprocess.check_output(['flac', '--version']).decode().split('\n')[0]
def get_lame_version():
return subprocess.check_output(['lame', '--version']).decode().split('\n')[0]
```
|
{
"source": "jerryryle/raspberry_pi_ap",
"score": 2
}
|
#### File: rogueap/rogueap/rogueap.py
```python
from flask import escape, Flask, g, jsonify, render_template, request
app = Flask(__name__, static_folder='static', static_url_path='/r/static', template_folder='templates')
app.config.from_object(__name__)
@app.route("/", endpoint='index')
@app.route("/r", endpoint='r')
def index():
return render_template('index.html')
@app.route("/r/204")
def r_204():
return '', 204
if __name__ == '__main__':
app.config.update(dict(SECRET_KEY='test'))
app.run(debug=True)
```
|
{
"source": "jerrys208/liftbox",
"score": 3
}
|
#### File: liftbox/photo/mgr.py
```python
import logging
import os
import piexif
import json
import shutil
from time import ctime
from datetime import datetime
from os import makedirs, listdir
from os.path import join, basename, pardir, abspath, splitext, exists
from PIL import Image
from PIL.ExifTags import TAGS
""" 照片分類
需求:
1. 檢驗重複 (根據 exif 精確時間)
2. 依日期分類 (張數太少自動合併)
3. 標示模糊
4. 重新命名 P01706120001.jpg
5. 影片處理
6. 將以分類的目錄寫入 exif
流程:
1. 指定根目錄
2. 檢視每張照片 (根據副檔名篩選)
3. 根據 exif 取得日期資訊
"""
#########################################################################
# 操作 EXIF
class ImageTool(object):
@classmethod
def list_files(cls, dirs, exts):
""" 列出所有 image ('.jpg', '.jpeg') """
for dir in dirs:
logging.info('dir[%s]...', dir)
for dir_name, subdir_list, file_list in os.walk(dir):
for basename in file_list:
for ext in exts:
if basename.lower().endswith(ext):
yield join(dir_name, basename)
break
@classmethod
def read_datetime(cls, filename):
""" 讀取指定檔案的日期資訊 """
logging.debug('read_image_datetime(%s)[+]', filename)
image = Image.open(filename)
# 使用 pillow 的相容性較 (piexif) 佳
# exif_dict = piexif.load(image.info["exif"])
# if 'Exif' in exif_dict and 36867 in exif_dict['Exif']:
# return exif_dict['Exif'][36867]
# if '0th' in exif_dict and 306 in exif_dict['0th']:
# return exif_dict['0th'][306]
# logging.error('read exif error: %s', filename)
exif = image._getexif()
if exif:
if 36867 in exif.keys():
#return exif[36867][0:10].replace(':','') # 2011:10:08 -> 20111008
#return exif[36867][0:7].replace(':','') # 2011:10:08 -> 20111008
return exif[36867].replace(':', '').replace(' ', '_')
elif 306 in exif.keys():
#return exif[306][0:10].replace(':','') # 2011:10:08 -> 20111008
#return exif[306][0:7].replace(':','') # 2011:10:08 -> 20111008
return exif[306].replace(':', '').replace(' ', '_')
# check file property
stat = os.stat(filename)
if hasattr(stat, 'st_mtime'):
return datetime.strftime(datetime.fromtimestamp(stat.st_mtime), "%Y%m%d_%H%M%S")
logging.error('cannot read image datetime for file: %s', filename)
# raise RuntimeError('datetime not found in EXIF: %s' % filename)
return None
@classmethod
def read_exif(cls, filename):
logging.info('read_exif(%s)[+]', filename)
image = Image.open(filename)
exif_dict = piexif.load(image.info["exif"])
# DateTimeOriginal = exif_dict['Exif'][36867]
# UserComment = exif_dict['Exif'][37510]
# PixelXDimension = exif_dict['Exif'][40962]
# PixelYDimension = exif_dict['Exif'][40963]
# logging.info('%s %s %s %s', DateTimeOriginal, UserComment, PixelXDimension, PixelYDimension)
for ifd in ("0th", "Exif", "GPS", "1st"):
for tag in exif_dict[ifd]:
logging.info('%s.%s %s %s', ifd, tag, piexif.TAGS[ifd][tag]["name"], exif_dict[ifd][tag])
class Photo(object):
""" 代表照片 """
def __init__(self, filename):
self.filename = filename
self.datetime = ImageTool.read_datetime(filename)
def get_dst_file(self, root_dir):
""" 取得 destination 檔案路徑
=> root_dir/yyyy/yyyymmdd/yyyymmdd_hhmmssi.jpg
"""
ext = splitext(self.filename)[1].lower()
ext = '.jpg' if ext == '.jpeg' else ext
return join(root_dir, self.datetime[0:4], self.datetime[0:8], self.datetime + ext)
def move_to_dst(self, root_dir):
logging.info('move: %s', self.filename)
# check datetime
if self.datetime is None:
logging.warning('cannot move file: datetime(%s) is None', self.filename)
return
filename = self.get_dst_file(root_dir)
# check parent folder
folder = abspath(join(filename, pardir))
if not exists(folder):
makedirs(folder)
# check dest file not exists
# 名稱: YYYYMMDDHHMMSS.jpg 重複時秒數加 1 ??
path, ext = splitext(filename)
for i in range(100):
newfile = ''.join([path, str(i), ext])
if not exists(newfile):
logging.info('%s -> %s', self.filename, newfile)
shutil.move(self.filename, newfile)
break
class Folder(object):
def __init__(self, dirname):
self.dirname = dirname
self.date = basename(dirname)
self.count = len(listdir(self.dirname))
def rename(self, new_name):
# parent = abspath(join(self.dirname, pardir))
new_path = self.dirname + '_' + new_name
logging.info('%s -> %s', self.dirname, new_path)
shutil.move(self.dirname, new_path)
self.dirname = new_path
def __repr__(self):
return '%s(%d)' % (self.dirname, self.count)
#########################################################################
# 消除重複檔案
import photohash
def search_duplicated(dirs, perceptual=False):
images = {}
for image in list_picture(dirs):
imhash = photohash.average_hash(image)
if imhash in images:
logging.warning('duplicated: %s', imhash)
logging.warning(' f1: %s', images[imhash])
logging.warning(' f2: %s', image)
else:
images[imhash] = image
#########################################################################
# 建立目錄
"""
2016
2017
+ 20170123_xxxxx
+ 20170130_xxxxx (>25)
"""
def move_picture(dirs, target):
for picture in list_picture(dirs):
picture.move_to_dst(target)
class DateNote(object):
""" 代表已標住的目錄 (date: note), e.g. 20111001_陽明員工旅遊紙箱王 """
@classmethod
def dump_date_note(cls, root_dir, out_file='note.txt'):
res = {}
for dir_name, subdir_list, file_list in os.walk(root_dir):
for subdir in subdir_list:
if len(subdir) > 9 and subdir[8] == '_':
print(subdir)
date = subdir[:8]
note = subdir[9:]
res[date] = note
with open(out_file, 'w', encoding='utf8') as out:
keys = sorted(res.keys())
for key in keys:
out.write('{}:{}\n'.format(key, res[key]))
@classmethod
def load_date_note(cls, filename='note.txt'):
res = {}
with open(filename, 'r', encoding='utf8') as src:
for line in src:
key, val = line.split(':')
res[key] = val.rstrip('\n')
logging.info('date_note: %s', res)
return res
@classmethod
def list_date_folder(cls, dir):
for dir_name, subdir_list, file_list in os.walk(dir):
for name in subdir_list:
if len(name) == 8:
yield Folder(join(dir_name, name))
@classmethod
def apply_date_note(cls, dir):
note = cls.load_date_note()
used = set()
for f in list_date_folder(dir):
if f.date in note:
used.add(f.date)
new_name = note[f.date]
f.rename(new_name)
# list un-used
for date in (note.keys() - used):
print(date, note[date])
#########################################################################
# Main
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.INFO)
logging.info('start')
# 管理 date note
# DateNote.dump_date_note('/home/jerry/_media/_photo/001_照片/')
# dn = DateNote.load_date_note()
# restore_date_note('/media/jerry/D_DAT/_Photo/001_照片')
# search_duplicated(['/media/jerry/D_DAT/_Photo/002_已整理/20090308_宸瑀生活照', '/media/jerry/D_DAT/_Photo/宸瑀'])
# rename_image(['/media/jerry/D_DAT/_Photo/000_待分日/wanyun'])
# move_picture(['/media/jerry/D_DAT/_Photo/000_整理/arc'], '/media/jerry/D_DAT/_Photo/001_照片')
# print(read_image_datetime('/media/jerry/D_DAT/_Photo/000_整理/無法判定/00000752.jpg'))
```
|
{
"source": "JerrySchonenberg/DSN",
"score": 2
}
|
#### File: DSN/DSN/config_DSN.py
```python
import configparser
import os
import time
import tensorflow as tf
import subprocess
import numpy as np
import cv2
import sys
import math
import matplotlib.pyplot as plt
sys.path.append("../coppelia_sim")
from API_coppeliasim import CoppeliaSim
from PIL import Image
PATH_EXEC = './coppeliaSim.sh' #symbolic link
COMMAND_INIT = '../config/commands.ini'
VELOCITY = [] #velocity per command, as defined in commands.ini
CS_INIT = '../config/coppeliasim.ini'
HANDLE_NAME = [] #name of the handles
CONFIG_OUT = '../config/DSN.ini'
IMAGES = [] #store all images to compute the angles and zoom from
RESOLUTION_CONFIG = -1
RESOLUTION_ACTUAL = -1
ITER = int(sys.argv[1]) #how many times should every command be handled
#initialize the commands from a configuration file
def command_init() -> None:
config = configparser.ConfigParser()
config.read(COMMAND_INIT)
backwards = True #skip backwards command
for section in config.sections():
if backwards == False:
VELOCITY.append([int(config[section]['leftmotor']), int(config[section]['rightmotor'])])
else:
backwards = False
#start the configuration scene on coppeliasim
def scene_init() -> tuple(str, str, int):
config = configparser.ConfigParser()
config.read(CS_INIT)
scene = config['COM']['scene']
address = config['COM']['address']
port = int(config['COM']['port'])
for i in config['HANDLES']:
HANDLE_NAME.append(config.get('HANDLES', i))
global RESOLUTION_CONFIG
RESOLUTION_CONFIG = int(config['IMAGE']['resolution_config'])
global RESOLUTION_ACTUAL
RESOLUTION_ACTUAL = int(config['IMAGE']['resolution_actual'])
return scene, address, port
#get image from coppeliasim robot
def retrieve_image(CS: CoppeliaSim) -> np.ndarray:
resolution, img_list = CS.get_image()
img = np.array(img_list, dtype=np.uint8)
img.resize([resolution[0], resolution[1], 3]) #convert into right format
img = np.flipud(img) #vertically flip img
return img
#write results to configuration file (.ini)
def write_config_init(dx: list, dy: list, DSN_variant: int, tau: float) -> None:
config_command = configparser.ConfigParser()
config_command.read(COMMAND_INIT)
config_DSN = configparser.ConfigParser()
config_DSN['GENERAL'] = {'variant' : str(DSN_variant),
'tau' : str(tau)}
i = 0
backwards = True #used to skip backwards command
for command in config_command.sections():
if backwards == False:
config_DSN[command] = {'shift' : str(dx[i]),
'zoom' : str(dy[i])}
i += 1
else:
backwards = False
with open(CONFIG_OUT, 'w') as configfile:
config_DSN.write(configfile)
#use AKAZE for feature point detection
def AKAZE(DSN_variant: int, tau: float) -> None:
pixel_ratio = RESOLUTION_CONFIG / RESOLUTION_ACTUAL
dx = [0] * len(VELOCITY) #contains amount of horizontal pixels to be shifted
dy = [0] * len(VELOCITY) #same as dx, but for vertical pixels
for i in range(ITER):
for command in range(len(VELOCITY)):
temp_dx, temp_dy = 0, 0
list_kp1, list_kp2 = [], []
cv_img1 = IMAGES[i*len(VELOCITY)+command]
cv_img2 = cv2.cvtColor(IMAGES[i*len(VELOCITY)+command+1], cv2.COLOR_RGB2GRAY)
#AKAZE feature point detection and matching
akaze = cv2.AKAZE_create()
img1_kp, img1_ds = akaze.detectAndCompute(cv_img1, None)
img2_kp, img2_ds = akaze.detectAndCompute(cv_img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1_ds = np.float32(img1_ds)
img2_ds = np.float32(img2_ds)
matches = flann.knnMatch(img1_ds, img2_ds, 2)
#need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
#atio test as per Lowe's paper
for j,(m,n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[j]=[1,0]
list_kp1.append([img1_kp[m.queryIdx].pt[0], img1_kp[m.queryIdx].pt[1]])
list_kp2.append([img2_kp[m.trainIdx].pt[0], img2_kp[m.trainIdx].pt[1]])
count = 0
if len(list_kp1) > 0:
for j in range(len(list_kp1)):
temp_dx += list_kp2[j][0] - list_kp1[j][0]
if list_kp1[j][1] >= RESOLUTION_CONFIG/2: #only upper half of image considered
temp_dy += list_kp2[j][1] - list_kp1[j][1]
count += 1
temp_dx /= len(list_kp1)
temp_dy /= count
dx[command] += temp_dx
dy[command] += temp_dy
for i in range(len(VELOCITY)):
dx[i] = (dx[i] / ITER) / pixel_ratio
dy[i] = (dy[i] / ITER) / pixel_ratio
if dx[i] < 0:
dx[i] = math.ceil(dx[i])
else:
dx[i] = math.floor(dx[i])
if dy[i] < 0:
dy[i] = math.ceil(dy[i])
else:
dy[i] = math.floor(dy[i])
write_config_init(dx, dy, DSN_variant, tau)
#simulate use of CNN
def dummy_cnn() -> None:
img = np.zeros((1,64,64,3), dtype=np.int)
model.predict(img)
model.predict(img)
#main loop of program
def main_loop(address: str, port: int, DSN_variant: int, tau: float) -> None:
CS = CoppeliaSim(address, port)
CS.get_handles(HANDLE_NAME[0:2], HANDLE_NAME[2:]) #motor-handle, sensor-handle
CS.check_startup_sim()
print("Configuring DSN...")
#first image is always blank
CS.get_image()
CS.get_image()
#get image of starting point
img = retrieve_image(CS)
IMAGES.append(img)
for i in range(ITER):
for command in range(len(VELOCITY)):
CS.set_velocity(VELOCITY[command][0], VELOCITY[command][1])
dummy_cnn()
CS.set_velocity(0, 0)
img = retrieve_image(CS)
IMAGES.append(img)
CS.stop_simulation()
AKAZE(DSN_variant, tau) #match keypoints
CS.exit_API('Configuration completed, saved in ' + CONFIG_OUT)
#start of script
if __name__ == "__main__":
if len(sys.argv) != 4:
print('insufficient arguments: [iter] [DSN-variant] [tau]')
exit()
#get files with configuration parameters
command_init()
scene, address, port = scene_init()
model = tf.keras.models.load_model('../models/weights/weights_OAH_1.h5')
pid = os.fork()
if pid == 0:
with open(os.devnull, 'wb') as devnull:
subprocess.check_call([PATH_EXEC, '-q', '-h', scene], stdout=devnull, stderr=subprocess.STDOUT)
else:
time.sleep(5) #wait for coppeliasim to start
main_loop(address, port, int(sys.argv[2]), float(sys.argv[3])) #start the configuration
```
|
{
"source": "jerryshueh/design-patterns",
"score": 4
}
|
#### File: Iterator/python/iterator.py
```python
class Item:
def __init__(self, name, val):
self.name = name
self.val = val
def __str__(self):
return "{} {}".format(self.name, self.val)
class ItemsIterator:
def __init__(self, items):
self.index = 0
self.items = items
def has_next(self):
if self.index >= len(self.items):
return False
return True
def next(self):
item = self.items[self.index]
self.index += 1
return item
if __name__ == '__main__':
item1 = Item('car', 30000)
item2 = Item('house', 100000)
item3 = Item('bike', 500)
item4 = Item('tv', 800)
items = []
items.append(item1)
items.append(item2)
items.append(item3)
items.append(item4)
iterator = ItemsIterator(items)
while iterator.has_next():
item = iterator.next()
print item
```
#### File: Strategy/python/sorting_strategies.py
```python
class SortingStrategy:
# Line is a sequence of points:
def sort(self, values) : pass
# The various strategies:
class AscendingSort(SortingStrategy):
def sort(self, values):
return sorted(values)
class DescendingSort(SortingStrategy):
def sort(self, values):
return sorted(values, reverse=True)
class ListSorter:
def __init__(self, strategy):
self.strategy = strategy
def sort_list(self, values):
return self.strategy.sort(values)
def change_strategy(self, new_strategy):
self.strategy = new_strategy
```
#### File: Observer Pattern/python/Launcher.py
```python
import sys
sys.dont_write_bytecode = True;
from Subscriber import Subscriber
from Publisher import Publisher
def main():
pub = Publisher(['lunch', 'dinner'])
bob = Subscriber('Bob')
alice = Subscriber('Alice')
john = Subscriber('John')
pub.register("lunch", bob)
pub.register("dinner", alice)
pub.register("lunch", john)
pub.register("dinner", john)
pub.dispatch("lunch", "It's lunchtime!")
pub.dispatch("dinner", "Dinner is served")
if __name__ == "__main__":
main()
```
#### File: Facade/python/facade.py
```python
class SomeBusiness():
def DoSomething(self):
pass
class SomeRepository():
def SaveSomething(self, thing):
pass
class SomeLogger:
def Log(self, message):
pass
class SomeFacade:
def __init__(self):
self.business = SomeBusiness()
self.repository = SomeRepository()
self.logger = SomeLogger()
def DoSomething(self):
done = self.business.DoSomething()
self.logger.Log("done")
saved = self.repository.SaveSomething(done)
self.logger.Log("saved")
def main():
facade = SomeFacade()
facade.DoSomething()
if __name__ == "__main__":
main()
```
|
{
"source": "jerry-sky/academic-notebook",
"score": 4
}
|
#### File: lab/lista-5/dijkstra.py
```python
from collections import namedtuple
from graph import Graph, INFINITY
from priority_queue import PriorityQueue, PQNode
from sys import exit, stdin, stderr
from time import time
from graph_input_utility import read_graph_definition
DijkstraAlgorithmResult = namedtuple(
'DijkstraAlgorithmResult', ['dist', 'prev'])
def Dijkstra_algorithm(graph: Graph, starting_node: int) -> DijkstraAlgorithmResult:
dist = [INFINITY for _ in range(graph.nodes_count)]
prev = [None for _ in range(graph.nodes_count)]
dist[starting_node] = 0
prev[starting_node] = starting_node
# prepare list of tuples of nodes labels with their starting distances
dist_nodes = [
PQNode(key=i, priority=dist[i])
for i in range(0, graph.nodes_count)
]
Q = PriorityQueue(raw=dist_nodes)
while not Q.is_empty:
# pick the closest node
fst = Q.pop().key
for e in graph.get_neighbourhood(fst):
# scan the neighbourhood
snd = e.snd
weight = e.weight
if dist[snd] > dist[fst] + weight:
# update if better route found
dist[snd] = dist[fst] + weight
prev[snd] = fst
Q.bottom_bound_flatten_priority(snd, dist[snd])
return DijkstraAlgorithmResult(dist=dist, prev=prev)
if __name__ == "__main__":
graph = read_graph_definition()
# read starting node
starting_node = input()
try:
starting_node = int(starting_node)
except ValueError:
exit('starting node needs to be an integer')
# measure time
begin = time()
# use the Dijkstra's algorithm
results = Dijkstra_algorithm(graph, starting_node)
end = time()
# print out the results
for node in range(0, graph.nodes_count):
print(node, results.dist[node])
# retrace the route back
route = [
(
node,
graph.get_edge_weight(results.prev[node], node)
)
]
curr = route[0]
while curr[0] != starting_node:
curr = (results.prev[curr[0]], None)
route.insert(
0,
(
curr[0],
graph.get_edge_weight(results.prev[curr[0]], curr[0])
)
)
# print the route
print(starting_node, '', file=stderr, end='')
for r in route[1:]:
print('-' + str(r[1]) + '→', r[0], '', file=stderr, end='')
print(file=stderr)
print((end-begin) * 1000, 'ms', file=stderr)
```
#### File: lab/lista-3/ex-1.py
```python
from sys import argv, exit
from zlib import crc32
from math import ceil
import re
FRAME_BORDER = '01111110'
def str_to_bytes(data: str) -> bytes:
"""Converts a string of 1s and 0s to a list of bytes.
"""
data = data.ljust(ceil(len(data)/8) * 8, '0')
output = []
for i in range(0, int(len(data)/8)):
b = 0
for j in range(0, 8):
if data[i*8+j] == '1':
b += 2**(7-j)
output.append(b)
return bytes(output)
def bytes_to_str(data: bytes) -> str:
"""Converts a list bytes to a string of 1s and 0s.
"""
output = ''
for b in data:
bb = bin(b)[2:].ljust(8, '0')
output += bb
return output
def crc_wrapper(data: str) -> str:
"""Performs crc32 method and returns the output in an appropriate
format for this exercise.
"""
crc = crc32(str_to_bytes(data))
return bytes_to_str(crc.to_bytes(4, 'big'))
def encode(data: str) -> str:
# attach the CRC checksum
data += crc_wrapper(data)
# perform bit stuffing
data = re.sub(r'11111', '111110', data)
return FRAME_BORDER + data + FRAME_BORDER
def decode(data: str) -> str:
# remove frame borders
data = re.sub(FRAME_BORDER, '', data)
# perform reverse bit stuffing
data = re.sub(r'111110', '11111', data)
# verify the CRC
crc = data[-32:]
if crc != crc_wrapper(data[:-32]):
raise Exception('invalid CRC => invalid frame')
return data[:-32]
if __name__ == '__main__':
if len(argv) < 4:
exit('usage: ./ex-1.py <--encode|--decode> input_file output_file')
mode = argv[1]
if mode not in ['--encode', '--decode']:
exit('invalid parameters')
input_file = argv[2]
output_file = argv[3]
with open(input_file, 'r') as fin:
data = fin.readline().replace('\n', '')
parsed = encode(data) if mode == '--encode' else decode(data)
with open(output_file, 'w+') as fout:
fout.write(parsed)
fout.write('\n')
```
#### File: lab/lista-2/test2.py
```python
""
z = ''
x = "m"
"""
Documentation
# a comment inside of a documentation comment
"""
def test():
print('##nope#####')
'''
documentation
#another comment inside of a documentation comment
\"
\'''
\''\'
\'\''
'''
x = "#not a comment"
y = "# and another non-comment"
z = '#not a comment'
y = '# and another non-comment'
z = " # yup\" # NOOOOOO \" \" #are you sure about that? \' #yup \' \' # you sure 100%?"
z = ' #yup \" # NOOOOOO \" \" #are you sure about that? \' #yup \' \' # you sure 100%?'
if x == '####### really?':
print('####yup')
z = ""
"""
a
sdasd
"""
```
#### File: l2/z1/main.py
```python
from typing import Tuple
from functools import reduce
from math import sqrt, cos, exp, pi as PI
from time import time
from random import random, randint
from sys import exit
from neighbourhood import NeighbourhoodGenerator
def SalomonFunc(x: Tuple[float]) -> float:
x_length = sqrt(reduce(
lambda so_far, curr: so_far + curr**2,
x,
0
))
return 1 - cos(2*PI*x_length) + 0.1 * x_length
def RandomMove(original_pos: Tuple[float], jump_size: float, dimensions=4) -> Tuple[float]:
"""Moves a provided position vector in a random fashion.
Generates a random value from a range of [-1; 1] * jump_size and
adds it to the originally provided vector.
"""
direction = []
for i in range(0, dimensions):
direction.append(
original_pos[i]
+ ((random()*2 - 1) * jump_size))
return tuple(direction)
def Probability(difference: float, temperature: float) -> float:
return exp(-0.001*difference/temperature)
# very poor results actually (even while tweaking the c constant):
# return 1.0 / (1.0 + exp(0.000001*difference/temperature))
def SimulatedAnnealing(solution_initial: Tuple[float], temperature_initial: float, running_time_max: int, jump_size_initial: float, jump_size_min: float = 0.1):
"""Finds minimum of a Salomon function using simulated annealing algorithm.
Args:
`solution_initial`: The point in 4D space from which the algorithm
starts its search.
`temperature_initial`: Initial annealing temperature.
`running_time_max`: Abort searching after that amount of time
unless temperature got to 0 first.
`jump_size`: The length of the vector that will be randomly selected
during searching for better (or worse) solutions.
"""
solution_current = solution_initial
solution_current_value = SalomonFunc(solution_current)
temperature_current = temperature_initial
jump_size = jump_size_initial
generator = NeighbourhoodGenerator()
offsets = generator.generate()
begin = time()
end = time()
while end - begin <= running_time_max and temperature_current > 0:
offset = offsets[randint(0, len(offsets)-1)]
# scan the neighbouring points
# below is the old way — take only one vector at a time which gives
# very low probability of finding the best solution
## solution_candidate = RandomMove(solution_current, jump_size)
# multiply the offset vector by the jump_size coefficient
solution_candidate = list(
map(lambda x: random() * x * jump_size, offset))
for i in range(0, 4):
solution_candidate[i] += solution_current[i]
solution_candidate_value = SalomonFunc(solution_candidate)
# print(solution_candidate)
if solution_current_value > solution_candidate_value:
# the candidate was just plainly better
solution_current = solution_candidate
solution_current_value = solution_candidate_value
temperature_current *= 0.99
else:
difference = abs(solution_candidate_value -
solution_current_value)
if Probability(difference, temperature_current) > random():
# candidate solution wasn't better but it got lucky
solution_current = solution_candidate
solution_current_value = solution_candidate_value
# jump_size = jump_size_initial
# temperature_current = temperature_current/(10 * temperature_current + 1)
end = time()
return solution_current, solution_current_value
if __name__ == "__main__":
# read the input numbers
t, x1, x2, x3, x4 = map(lambda x: int(x), input().split())
# compose the start point
x = (x1, x2, x3, x4)
# run the algorithm
solution, solution_value = SimulatedAnnealing(
x, 50, t, jump_size_initial=2, jump_size_min=0.1)
# print out the results
for i in solution:
print(i, end=" ")
print(solution_value, end="")
```
#### File: l2/z2/neighbourhood.py
```python
from typing import List, Tuple
from random import random, randint
from sys import exit
def PickTheNearestValueFromList(search_list: List[int], search_value) -> int:
"""Picks the nearest value to the provided one.
"""
return min(search_list, key=lambda x: abs(x - search_value))
def AlterBlockIntensity(
matrix: List[List[int]], matrix_initial: List[List[int]],
block_definition: List[List[List[int]]], allowed_values: List[int],
affect_all_blocks=False) -> None:
"""Generate a neighbouring matrix by changing values of one the matrix's blocks.
Args:
`matrix`: The matrix to alter.
`matrix_initial`: The base input matrix.
`block_definition`: Defines where the blocks begin.
`allowed_values`: Defines the values that can be used during rendering.
`affect_all_blocks`: Whether to alter all blocks or just one random one.
"""
if affect_all_blocks:
i = 0
for row in block_definition:
j = 0
for block in row:
vertical_bound = i + block[0]
horizontal_bound = j + block[1]
# pick the random value of the block as the flattening value
# if it's not an allowed one pick the nearest one
flattening_value = PickTheNearestValueFromList(
allowed_values,
matrix_initial[randint(i, vertical_bound-1)][randint(j, horizontal_bound-1)])
for g in range(i, vertical_bound):
for h in range(j, horizontal_bound):
matrix[g][h] = flattening_value
j += block[1]
i += row[0][0]
else:
# # pick a random block
block_index_x = randint(0, len(block_definition) - 1)
block_index_y = randint(0, len(block_definition[block_index_x]) - 1)
# render the actual indexes of the matrix block
i = 0
j = 0
for a in range(block_index_x):
i += block_definition[a][block_index_y][0]
for a in range(block_index_y):
j += block_definition[block_index_x][a][1]
# current block's size
block_size = block_definition[block_index_x][block_index_y]
vertical_bound = i + block_size[0]
horizontal_bound = j + block_size[1]
# pick the random value of the block as the flattening value
# if it's not an allowed one pick the nearest one
flattening_value = PickTheNearestValueFromList(
allowed_values,
matrix_initial[randint(i, vertical_bound-1)][randint(j, horizontal_bound-1)])
for g in range(i, vertical_bound):
for h in range(j, horizontal_bound):
matrix[g][h] = flattening_value
def AlterBlockSizes(block_definition: List[List[List[int]]], k: int) -> None:
"""Generate a neighbouring matrix by adjusting the blocks' sizes.
"""
# when resizing only one block you could have a situation when in order
# to maintain blocks' structural integrity resizing some other blocks
# would be absolutely necessary; you could have some overlapping or dead
# zones going on without considering some special cases;
# to avoid this issue, let's resize a line of blocks instead of just one
# decide whether to resize horizontally or vertically
if random() > 0.5:
# collect all oversized blocks' indexes
fat_blocks_indexes = []
for i in range(0, len(block_definition[0])):
if block_definition[0][i][1] > k:
fat_blocks_indexes.append(i)
if len(fat_blocks_indexes) == 0:
return
# pick one of them
line_of_blocks_index = fat_blocks_indexes[randint(
0, len(fat_blocks_indexes) - 1)]
# horizontally shrink chosen line of blocks
for i in range(0, len(block_definition)):
block_definition[i][line_of_blocks_index][1] -= 1
# now, choose which neighbouring line to enlarge
offset = None
if line_of_blocks_index == len(block_definition[0]) - 1:
# if the last line was shrunk, enlarge the one to the left
offset = -1
elif line_of_blocks_index == 0:
offset = 1
else:
offset = [-1, 1][randint(0, 1)]
# now, horizontally enlarge neighbouring blocks
for i in range(0, len(block_definition)):
block_definition[i][line_of_blocks_index + offset][1] += 1
else:
fat_blocks_indexes = []
for i in range(0, len(block_definition)):
if block_definition[i][0][0] > k:
fat_blocks_indexes.append(i)
if len(fat_blocks_indexes) == 0:
return
line_of_blocks_index = fat_blocks_indexes[randint(
0, len(fat_blocks_indexes) - 1)]
for i in range(0, len(block_definition[line_of_blocks_index])):
block_definition[line_of_blocks_index][i][0] -= 1
offset = None
if line_of_blocks_index == len(block_definition) - 1:
offset = -1
elif line_of_blocks_index == 0:
offset = 1
else:
offset = [-1, 1][randint(0, 1)]
for i in range(0, len(block_definition[line_of_blocks_index])):
block_definition[line_of_blocks_index + offset][i][0] += 1
```
#### File: l3/z3/main.py
```python
from sys import stderr, exit
from typing import Iterable
from time import time
from random import random, randint
# all possible directions the agent can go
DIRECTIONS = {0: 'U', 1: 'D', 2: 'L', 3: 'R'}
INVERSED_DIRECTIONS = {'U': 0, 'D': 1, 'L': 2, 'R': 3}
INFINITY = float('inf')
def is_inverse(a, b) -> bool:
"""Checks if two provided directions are the opposites of each other.
"""
if (a == 2 and b == 3) or (a == 3 and b == 2):
return True
if (a == 0 and b == 1) or (a == 1 and b == 0):
return True
return False
def remove_mini_loops(solution: Iterable[int]) -> None:
"""Removes unnecessary mini loops like LR or UD from the solution.
"""
length = len(solution)
i = 0
while i < length - 1:
cur = solution[i]
nex = solution[i + 1]
if is_inverse(cur, nex):
del solution[i]
del solution[i]
i -= 2
if i < 0:
i = -1
length -= 2
i += 1
def translate_position(position: Iterable[int], direction) -> None:
"""Moves provided vector in the provided direction.
"""
if direction == 0:
position[0] -= 1
elif direction == 1:
position[0] += 1
elif direction == 2:
position[1] -= 1
else:
position[1] += 1
def validate_solution(
solution: Iterable[int], starting_position,
simulation_map: Iterable[Iterable[int]]) -> Iterable[int]:
"""Validates solution and gives a shorter version of it if possible.
It returns `None` if the solution is not valid.
"""
pos = list(starting_position)
modified_solution = []
for move in solution:
current_cell = simulation_map[pos[0]][pos[1]]
# check if the agent hit a wall
if current_cell == 1:
return None
# check if the agent is already at the exit
elif current_cell == 8:
return modified_solution
translate_position(pos, move)
modified_solution.append(move)
if simulation_map[pos[0]][pos[1]] == 8:
return modified_solution
else:
return None
def fitness_func(population, starting_position, simulation_map):
population = map(
lambda sol: validate_solution(
sol, starting_position, simulation_map) if sol is not None else None,
population
)
# sort the population by their fitness
population = sorted(population, key=lambda sol: len(sol)
if sol is not None else INFINITY)
return population
def GA_find_shortest_path(simulation_map: Iterable[Iterable[int]], initial_solutions: Iterable[int],
max_pop_size: int, mutation_probability: float, max_running_time: int):
begin = time()
starting_position = None
i = 0
for line in simulation_map:
j = 0
for cell in line:
if cell == 5:
starting_position = (i, j)
j += 1
i += 1
# the first population
population = [*initial_solutions]
# now, we perform the GA
end = time()
while end-begin <= max_running_time:
# selection stage
pivot = round(max_pop_size/2)
# select the best solutions based on their length (the lowest the best)
founding_fathers = population[:pivot]
the_rest = population[pivot:]
# take a valid solution that wasn't that good but it will
# introduce more diversity
if len(population) == max_pop_size:
r = None
while r is None and len(the_rest) > 0:
r = the_rest.pop(randint(0, len(the_rest)-1))
if r is not None:
founding_fathers.append(r)
# crossover stage
population = [*founding_fathers]
# generate remaining population members based on the „founding fathers”
while len(population) < max_pop_size:
# pick two parents to crossover
p_one = founding_fathers.pop(randint(0, len(founding_fathers)-1))
p_two = founding_fathers[randint(0, len(founding_fathers)-1)]
founding_fathers.append(p_one)
# take a part from the first parent
index = randint(1, len(p_one)-1)
part_one = p_one[:index]
if random() > 0.5:
part_one = p_one[index:]
# take a part from the second parent
index = randint(1, len(p_two)-1)
part_two = p_two[index:]
if random() > 0.5:
part_two = p_two[:index]
new_member = part_one + part_two
# mutation stage
for i in range(0, len(new_member)):
if random() > 1-mutation_probability:
new_member[i] = (new_member[i] + randint(0, 3)) % 4
population.append(new_member)
# fitness stage
# all solutions that are invalid are `None`s
# not only are we evaluating the solutions but we're trimming out parts that are
# unnecessary – meaning if the agent got to the solution earlier than the length
# of the solution in question, we're leaving out the remaining part thus creating
# an even better solution (a shorter solution)
population = fitness_func(
population, starting_position, simulation_map)
end = time()
return population[0]
if __name__ == '__main__':
t, n, _, s, p = map(lambda x: int(x), input().split())
simulation_map = []
for _ in range(0, n):
simulation_map.append(
list(map(lambda x: int(x), list(input())))
)
initial_solutions = []
for _ in range(0, s):
sol = list(input())
sol_processed = []
for move in sol:
sol_processed.append(INVERSED_DIRECTIONS[move])
initial_solutions.append(sol_processed)
result = GA_find_shortest_path(simulation_map, initial_solutions, max_pop_size=p,
mutation_probability=0.05,
max_running_time=t)
for r in result:
print(DIRECTIONS[r], end='', file=stderr)
print(len(result))
```
#### File: lab/lista-4/entropy.py
```python
from copy import deepcopy
from typing import Iterable
from math import log2
from decimal import Decimal
from sys import exit
# define all methods
JPEG_LS_METHODS = [
'normal', # original image
'W',
'N',
'NW',
'N + W - NW',
'N + (W - NW)/2',
'W + (N - NW)/2',
'(N + W)/2',
'new standard'
]
def rgb_hexify(rgb: Iterable[int]) -> str:
"""Convert a list of RGB numbers to a hex format.
"""
return ''.join(
list(map(
lambda x: hex(abs(x))[2:].zfill(2),
rgb
))[::-1]
)
class Entropy(object):
def __init__(self):
# define a base object for counting how many times stuff occurred
base_counter = {
'pixels': dict(),
'red': dict(),
'green': dict(),
'blue': dict()
}
# initialize all counters
self.all_counters = dict()
for method in JPEG_LS_METHODS:
self.all_counters[method] = deepcopy(base_counter)
def register_char(self, method: str, pixel) -> None:
"""Add the pixel to the method's counter.
"""
# register the whole pixel
red = pixel[2]
green = pixel[1]
blue = pixel[0]
tmp = self.all_counters[method]
# register the occurrence in all the counters
for c in pixel:
if tmp['pixels'].get(c) is None:
tmp['pixels'][c] = 1
else:
tmp['pixels'][c] += 1
if tmp['red'].get(red) is None:
tmp['red'][red] = 1
else:
tmp['red'][red] += 1
if tmp['green'].get(green) is None:
tmp['green'][green] = 1
else:
tmp['green'][green] += 1
if tmp['blue'].get(blue) is None:
tmp['blue'][blue] = 1
else:
tmp['blue'][blue] += 1
def calc_entropy(self, method, mode='pixels') -> float:
"""Calculate the entropy.
Args:
`method`: Calc entropy of the results of the `method` method.
`mode`: Calc entropy of all whole 'pixels', or only for 'red,
'green', 'blue'.
"""
# output = 0
output = Decimal(0)
data = self.all_counters[method][mode].values()
# total_count = sum(data)
total_count = Decimal(sum(data))
for x in data:
# output += x * (-1 * log2(x))
output += Decimal(x) * Decimal(-1 *
Decimal(x).ln() / Decimal(2).ln())
output /= total_count
# return output + log2(total_count)
return float(output + total_count.ln() / Decimal(2).ln())
```
#### File: lab/lista-5/errors.py
```python
COLOURS = ['blue', 'green', 'red']
class Errors(object):
"""Calculates the mean squared error (MSE) and
signal-to-noise ratio (SNR) of an image.
"""
def __init__(self):
self._mse = dict()
self._snr = dict()
self._count = dict()
for colour in COLOURS:
self._mse[colour] = 0
self._snr[colour] = 0
self._count[colour] = 0
def register_val(self, original_value: int, quantized_value: int, colour: str):
"""While quantizing an image, this method accumulates all summation components.
"""
self._mse[colour] += (original_value - quantized_value) ** 2
self._snr[colour] += original_value ** 2
self._count[colour] += 1
def calc_mse(self, colour: str):
if colour in COLOURS:
return self._mse[colour] / self._count[colour]
else:
# calc as a whole pixel
top = 0
bottom = 0
for c in COLOURS:
top += self._mse[c]
bottom += self._count[c]
return top / bottom
def calc_snr(self, colour: str):
top = 0
bottom = 0
if colour in COLOURS:
# (1/N) / (1/N) = 1
top = self._snr[colour]
bottom = self._mse[colour]
else:
# calc as a whole pixel
for c in COLOURS:
top += self._snr[c]
bottom += self._mse[c]
if bottom > 0:
return top / bottom
else:
return float('inf')
```
#### File: lab/lista-5/main.py
```python
from typing import List, Tuple, Iterable
from sys import argv, exit, stderr
from errors import Errors, COLOURS
from math import log10
import os
def one_byte(f) -> int:
"""Read one byte and return it as an integer.
"""
return ord(f.read(1))
def byte_list(f, count: int) -> Tuple[int]:
"""Read `count` bytes and return it as a tuple of integers.
"""
return tuple(int(x) for x in f.read(count))
def int_from_bytes(bytes_) -> int:
"""Calculates an integer from provided bytes.
"""
output = 0
for i in range(0, len(bytes_)):
output += bytes_[i] * (2**(8*i))
return output
def quantize(input_image: str, output_image: str, r_bits: int, g_bits: int, b_bits: int):
err = Errors()
with open(input_image, 'rb+') as fi:
# read the the first part of the header
header_header = byte_list(fi, 12)
image_width_raw = byte_list(fi, 2)
image_width = int_from_bytes(image_width_raw)
image_height_raw = byte_list(fi, 2)
image_height = int_from_bytes(image_height_raw)
# read the rest of the header
pixel_depth = one_byte(fi)
image_descriptor = one_byte(fi)
with open(output_image, 'wb') as fo:
# copy the header over
fo.write(bytes(header_header))
fo.write(bytes(image_width_raw))
fo.write(bytes(image_height_raw))
fo.write(bytes([pixel_depth]))
fo.write(bytes([image_descriptor]))
# wrap the bits counts inside a convienient list
colour_bits_count = [b_bits, g_bits, r_bits]
# process the pixels
for _ in range(0, image_height):
for __ in range(0, image_width):
# take three bytes for each pixel (BGR *not* RGB)
pixel = byte_list(fi, 3)
quantized_pixel = []
for colour in [0, 1, 2]:
# how many bits have to be striped off
shift = 8 - colour_bits_count[colour]
q = pixel[colour]
if shift > 0:
# strip off unnecessary bits
q = q >> shift
# to make sure we've got a middle value of the range that comes from
# quantizing the pixel add a one and then fill out the rest with zeroes
q = q << 1
q += 1
if shift > 1:
q = q << shift-1
fo.write(bytes([q]))
# add appropriate values to SNR and MSE
err.register_val(pixel[colour], q, COLOURS[colour])
# copy the footer over
x = fi.read(1)
while x:
fo.write(x)
x = fi.read(1)
return err
def generate_all_RGB_combinations(pixel_bits_count: int):
"""Generates all possible combinations of bit spread across all colour channels.
"""
for r in range(0, pixel_bits_count+1):
for g in range(0, pixel_bits_count+1):
for b in range(0, pixel_bits_count+1):
if r + g + b == pixel_bits_count:
yield (r, g, b)
if __name__ == '__main__':
if len(argv) < 5:
exit('usage: ./main.py <input file> <output file> <bit depth (bits per pixel)> <bit spread measure: MSE|SNR>')
input_image = argv[1]
output_image = argv[2]
pixel_depth = int(argv[3])
bit_spread_measure = argv[4]
tmp_output_image = '__tmp__' + output_image
if bit_spread_measure not in ['MSE', 'SNR']:
exit('invalid option for bit spread measure; only „MSE” or „SNR” are allowed')
# go through all possible RGB spread combinations
rgb_combinations = generate_all_RGB_combinations(pixel_depth)
# run the first time so we have something to compare with
best_bit_spread = next(rgb_combinations)
best_results = quantize(input_image, output_image, *best_bit_spread)
# go through the rest of the generated bit spreads
for bit_spread in rgb_combinations:
# store it in a separate file not to overwrite the best output image found so far
results = quantize(input_image, tmp_output_image, *bit_spread)
# if it is in fact better, replace the image with the newly generated one
better = False
if bit_spread_measure == 'MSE':
# compare the highest MSE
if max(results.calc_mse(c) for c in COLOURS) < max(best_results.calc_mse(c) for c in COLOURS):
better = True
else: # bit_spread_measure == 'SNR'
# compare the lowest SNR
if min(results.calc_snr(c) for c in COLOURS) > min(best_results.calc_snr(c) for c in COLOURS):
better = True
if better:
best_results = results
best_bit_spread = bit_spread
# overwrite the existing best result file
os.remove(output_image)
os.rename(tmp_output_image, output_image)
# report
print('found a better bit spread!', str(best_bit_spread), file=stderr)
# clean up
if os.path.exists(tmp_output_image):
os.remove(tmp_output_image)
# print best bit spread
print('RGB bit spread:', str(best_bit_spread))
# print the error measurements
print('MSE =', best_results.calc_mse(''))
for colour in COLOURS[::-1]:
print('MSE(' + colour[0] + ')=', best_results.calc_mse(colour))
print('SNR =', best_results.calc_snr(''),
'(' + str(10 * log10(best_results.calc_snr(''))) + ' dB)')
for colour in COLOURS[::-1]:
print('SNR(' + colour[0] + ')=', best_results.calc_snr(colour),
'(' + str(10 * log10(best_results.calc_snr(colour))) + ' dB)')
```
#### File: lab/lista-6/main.py
```python
from typing import List, Tuple, Iterable
from sys import argv, exit, stderr
from getopt import getopt
from bitwiseio import BitReader, BitWriter
from bitpack_utility import *
from tga_utility import *
def add_pixels_mod(one: Tuple[int], two: Tuple[int]):
"""Adds two pixels' values together and keeps them in range of [0,255].
"""
return tuple(
(one[i] + two[i]) % 256 for i in range(0, 3)
)
def encode(input_image: str, output_file: str, bitdepth: int) -> None:
"""Encodes given image to a propriatory format file with a specialized
header on top of the original file's TGA header.
"""
with open(input_image, 'rb+') as fi:
with BitReader(fi) as reader:
with open(output_file, "wb+") as fo:
with BitWriter(fo) as writer:
# copy the original header over and read image dimensions
image_width, image_height = read_and_write_tga_header(
reader, writer)
# write a propriatory header that contains the bitdepth
writer.writebits(bitdepth, 8)
# start encoding from \vec0
previous_pixel = (0, 0, 0)
for _ in range(image_width * image_height):
current_pixel = bitpack_list(reader, 3)
# iterate over all colours
quantized_difference = [0 for _ in range(3)]
for c in range(0, 3):
# calculate the difference and quantize it
quantized_difference[c] = \
(current_pixel[c] - previous_pixel[c])\
% 256\
>> (8-bitdepth)
# save the quantized difference
writer.writebits(quantized_difference[c], bitdepth)
# revert back to its original bit size
# (now without unnecessary bits)
quantized_difference[c] = quantized_difference[c] << (
8-bitdepth)
# replace the old pixel with the current one
previous_pixel = add_pixels_mod(
previous_pixel, quantized_difference)
# copy the original footer over
read_and_write_tga_footer(reader, writer)
def decode(input_file: str, output_image: str) -> None:
"""Decodes given binary file back to the original TGA image.
Input file needs to be a binary file generated by the `encode` function.
"""
with open(input_file, 'rb+') as fi:
with BitReader(fi) as reader:
with open(output_file, "wb+") as fo:
with BitWriter(fo) as writer:
# copy the original header over and read image dimensions
image_width, image_height = read_and_write_tga_header(
reader, writer)
# read the propriatory header
bitdepth = one_bitpack(reader)
# start from a \vec0
previous_pixel = (0, 0, 0)
for _ in range(image_height * image_width):
# read the offset and bring back its original bitsize
current_offset = tuple(map(
lambda x: x << (8-bitdepth),
bitpack_list(reader, 3, size=bitdepth)
))
# recreate a quantized pixel
previous_pixel = add_pixels_mod(
previous_pixel, current_offset)
# save recovered pixel
for c in range(0, 3):
t = previous_pixel[c]
writer.writebits(t, 8)
# recover the original file footer
read_and_write_tga_footer(reader, writer)
if __name__ == "__main__":
raw_args = argv[1:]
optlist, args = getopt(raw_args, '', ['mode='])
usage_help = 'usage: ./main.py --mode <encode|decode> <input file> <output file> [bit depth]'
if len(args) < 2 and len(optlist) < 1:
exit(usage_help)
input_file = args[0]
output_file = args[1]
bitdepth = None
if len(args) >= 3:
bitdepth = int(args[2])
mode = None
for opt, arg in optlist:
if opt == '--mode':
if arg == 'encode' and bitdepth is None:
print('encode mode requires bit depth option')
exit()
elif arg == 'encode':
mode = 'e'
elif arg == 'decode':
mode = 'd'
else:
print('invalid --mode')
exit(usage_help)
if mode == 'e':
encode(input_file, output_file, bitdepth)
else: # mode == 'd'
decode(input_file, output_file)
```
#### File: lab/lista-6/tga_utility.py
```python
from bitwiseio import BitReader, BitWriter
from bitpack_utility import *
def read_and_write_tga_header(reader: BitReader, writer: BitWriter):
"""Reads the TGA header from the open input file and writes it to the
output file writer.
"""
# read the the first part of the original header
header_header = bitpack_list(reader, 12)
image_width_raw = bitpack_list(reader, 2)
image_width = int_from_bytes(image_width_raw)
image_height_raw = bitpack_list(reader, 2)
image_height = int_from_bytes(image_height_raw)
# read the rest of the header
header_the_rest = bitpack_list(reader, 2)
# writer is optional
if writer is not None:
# copy the original header over
writer.writebits(int_from_bytes(bytes(header_header[::-1])), 12 * 8)
writer.writebits(int_from_bytes(bytes(image_width_raw[::-1])), 2 * 8)
writer.writebits(int_from_bytes(bytes(image_height_raw[::-1])), 2 * 8)
writer.writebits(int_from_bytes(bytes(header_the_rest[::-1])), 2 * 8)
return image_width, image_height
def read_and_write_tga_footer(reader: BitReader, writer: BitWriter):
"""Reads the TGA footer from the open input file and writes it to the
output file writer.
"""
while reader.read:
x = reader.readbits(8)
writer.writebits(x, 8)
```
#### File: py/lista-2/ex-2.py
```python
from sys import argv
BASE64_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+="
def encode(input_: str) -> str:
"""Encode input into a base64 string."""
# make sure we're reading bytes
input = input_.encode()
output = ""
# bits count offset generated by previous bytes readings
offset = 0
i = 0
while i < len(input):
# the 6bit character to write to the output string
c = 0
# currently processed 8 bit character
curr = input[i]
# preceding 8 bit character
prev = input[i - 1]
if offset == 0:
# take the first 6 bits from the 8 bit character
c = curr >> 2
offset = 6
elif offset == 6:
# take the last 2 bits from the previous character
c1 = prev & 3
# take the first 4 bits from the current character
c2 = curr >> 4
# merge these two parts into one
c = (c1 * (2 ** 4)) + c2
offset = 4
elif offset == 4:
# take the last 4 bits from the previous character
c1 = prev & 15
# take the first 2 bits from the current character
c2 = curr >> 6
c = (c1 * (2 ** 2)) + c2
offset = 2
elif offset == 2:
# take the last 6 bits
c = prev & 63
# as 8 > 6 the counter is behind and needs to be rerolled
# after a sequence of 3 consecutive 8 bit characters
# @brief 24 / 8 = 3; 24 / 6 = 3
i -= 1
offset = 0
i += 1
output += BASE64_ALPHABET[c]
# handle the leftovers
c = input[-1]
padding = ""
if offset == 2:
# take the last 6 bits
c &= 63
elif offset == 4:
# take the last 4 bits
c &= 15
c *= 2 ** 2
padding = "="
elif offset == 6:
# take the last 2 bits
c &= 3
c *= 2 ** 4
padding = "=="
output += BASE64_ALPHABET[c] + padding
return output
def decode(input: str) -> str:
"""Decode input base64 string to its original form (in raw bytes).
"""
output = ""
# bits count offset generated by previous bytes readings
offset = 0
i = 0
# partly assembled 8 bit character
# it will be added to `output` if it contains 8 bits
tmp = 0
while i < len(input):
curr = input[i]
if curr == "=":
# ignore the padding
break
# convert base64 character to an integer
c = BASE64_ALPHABET.index(curr)
if offset == 0:
# take the whole 6 bits
tmp += c
tmp *= 2 ** 2
offset = 6
elif offset == 6:
# take first 2 bits
tmp += c >> 4
# enough bits to put into output
output += chr(tmp)
# take last 4 bits
tmp = c & 15
tmp *= 2 ** 4
offset = 4
elif offset == 4:
# take first 4 bits
tmp += c >> 2
output += chr(tmp)
# take last 2 bits
tmp = c & 3
tmp *= 2 ** 6
offset = 2
elif offset == 2:
# take the whole 6 bits
tmp += c
output += chr(tmp)
tmp = 0
offset = 0
i += 1
return output
def main(mode: str, input_file_path: str, output_file_path: str) -> None:
"""Either encodes or decodes provided file to or from base64 form.
Transforms the content of a file from bytes to an array of 6 bit
characters written using base64 ASCII alphabet. It can also decode
such array of 6 bit characters back into the original form.
Args:
`mode`: Choose either `--encode` or `--decode` to transform the
provided file accordingly.
`input_file_path`: Path to the file to transform.
`output_file_path`: Path to the output file.
"""
if mode == "--encode":
with open(input_file_path) as file:
contents = file.read()
encoded = encode(contents)
with open(output_file_path, "w+") as file:
file.write(encoded)
elif mode == "--decode":
with open(input_file_path) as file:
contents = file.read()
decoded = decode(contents)
with open(output_file_path, "w+") as file:
file.write(decoded)
if __name__ == "__main__":
if len(argv) > 3:
main(argv[1], argv[2], argv[3])
else:
print("usage: ./ex-2.py --encode|--decode <input_file> <output_file>")
```
#### File: py/lista-4/ex-4.py
```python
from inspect import getfullargspec
import math
class _overload_storage(object):
"""Storage for overloaded functions.
Attributes:
`_functions`: Static dictionary that holds all overloaded
functions indexed by function name.
`_func_name`: Name of the function that is overloaded.
"""
_functions = {}
_func_name = ""
def __init__(self, func):
# take not of the function name
self._func_name = func.__name__
# if there is no entry for that function name add an empty list
# for it
if _overload_storage._functions.get(self._func_name) is None:
_overload_storage._functions[self._func_name] = []
# add new overload of this function
_overload_storage._functions[self._func_name].append(
{
# save all args of this overload for identification
'args': getfullargspec(func).args,
# the actual function to run
'func': func
}
)
def __call__(self, *args, **kwargs):
"""Overwrites the default call function to make this class
„callable” to enable function overloading.
"""
for f in _overload_storage._functions[self._func_name]:
# loop over all overloads of this function
args_count = len(args) + len(kwargs)
if args_count != len(f['args']):
continue
return f['func'](*args, **kwargs)
# it may be replaced with `raise`
return None
def overload(func):
"""Make an overload of a function.
"""
c = _overload_storage(func)
return c
if __name__ == "__main__":
@overload
def norm(a, b):
return math.sqrt(a*a + b*b)
@overload
def norm(a, b, c):
return abs(a) + abs(b) + abs(c)
# testing if another function with the same argument count doesn't
# interfere with other overloaded functions
@overload
def another_one(a, b):
return a + b
print(
norm(2, 4),
norm(2, 3, 4),
another_one(2, 4),
sep="\n"
)
```
|
{
"source": "jerry-sky/vyrow",
"score": 3
}
|
#### File: vyrow/tests/utilities.py
```python
from sys import argv
import os
def file_contents(path: str) -> str:
'''
Returns contents of a given file.
'''
with open(path, 'r+') as f:
return f.read()
def current_path() -> str:
'''
Returns the path to the current script running directory.
'''
return os.path.dirname(argv[0]) or '.'
def parent_path() -> str:
'''
Returns the path to the parent directory of the current script running directory.
'''
# get the directory that contains the running script
cwd = current_path()
# calculate the parent directory
pp = ''
if cwd == '.':
# the directory running is the `tests` directory
pp = '..'
else:
# otherwise go one directory level up
pp = os.path.dirname(cwd)
if pp == '':
# the script has been initiated from the repository directory
pp = '.'
return pp
```
|
{
"source": "jerrysun103/uoft",
"score": 3
}
|
#### File: assignments/a1/draft_+1.py
```python
import time
from toah_model_jerry import TOAHModel
def three_stools(toah, n, ori, des, mid):
if n == 1:
toah.move(ori, des)
else:
three_stools(toah, n - 1, ori, mid, des)
toah.move(ori, des)
three_stools(toah, n - 1, mid, des, ori)
def mini_step(i):
if i == 1:
return 1
else:
k = 1
step = 2 * mini_step(i - 1) + 1
for j in range(1, i):
if 2 * mini_step(i - j) + 2 ** j - 1 > step:
step = 2 * mini_step(i - j) + 2 ** j - 1
k = j
return k
def four_stools(toah, n, ori, des, mid1):
mid2 = -1
for i in range(4):
if i != ori and i != des and i != mid1:
mid2 = i
if n == 1:
toah.move(ori, des)
return 1
else:
k = 1
step = 2 * four_stools(toah, n - 1, ori, des, mid1) + 1
for j in range(1, n):
if 2 * four_stools(toah, n - j, ori, des, mid1) + 2 ** j - 1 < step:
step = 2 * four_stools(toah, n - j, ori, des, mid1) + 2 ** j - 1
k = j
four_stools(toah, n - k, ori, mid1, des)
three_stools(toah, k, ori, des, mid2)
four_stools(toah, n - k, mid1, des, ori)
return step
def tour_of_four_stools(model, delay_btw_moves=0.5, animate=False):
"""Move a tower of cheeses from the first stool in model to the fourth.
@type model: TOAHModel
TOAHModel with tower of cheese on first stool and three empty
stools
@type delay_btw_moves: float
time delay between moves if console_animate is True
@type animate: bool
animate the tour or not
"""
pass
if __name__ == '__main__':
#num_cheeses = 5
#delay_between_moves = 0.5
#console_animate = False
# DO NOT MODIFY THE CODE BELOW.
#four_stools = TOAHModel(4)
#four_stools.fill_first_stool(number_of_cheeses=num_cheeses)
#tour_of_four_stools(four_stools,
#animate=console_animate,
#delay_btw_moves=delay_between_moves)
#print(four_stools.number_of_moves())
# Leave files below to see what python_ta checks.
# File tour_pyta.txt must be in same folder
#import python_ta
#python_ta.check_all(config="tour_pyta.txt")
new = TOAHModel(3)
new.fill_first_stool(4)
model = TOAHModel(3)
model.fill_first_stool(4)
three_stools(new, 4, 0, 2, 1)
seq = new.get_move_seq()
for item in range(seq.length()):
ori = seq.get_move(item)[0]
des = seq.get_move(item)[1]
model.move(ori, des)
print(model)
new_ = TOAHModel(4)
new_.fill_first_stool(5)
model_ = TOAHModel(4)
model_.fill_first_stool(5)
four_stools(new_, 5, 0, 3, 1)
seq = new_.get_move_seq()
for item in range(seq.length()):
ori = seq.get_move(item)[0]
des = seq.get_move(item)[1]
model_.move(ori, des)
print(model_)
print(new_.number_of_moves())
```
#### File: assignments/a1/toah_model.py
```python
class TOAHModel:
""" Model a game of Tour Of Anne Hoy.
Model stools holding stacks of cheese, enforcing the constraint
that a larger cheese may not be placed on a smaller one.
"""
def __init__(self, number_of_stools):
""" Create new TOAHModel with empty stools to hold stools of cheese.
@param TOAHModel self: a new TOAHModel
@param int number_of_stools: the number of stools
@rtype: None
>>> M = TOAHModel(4)
>>> M.fill_first_stool(5)
>>> (M.get_number_of_stools(), M.number_of_moves()) == (4,0)
True
>>> M.get_number_of_cheeses()
5
"""
self.trace = []
self._move_seq = MoveSequence([])
self._stools = [[] for i in range(number_of_stools)]
def get_move_seq(self):
""" Return the move sequence
@param TOAHModel self: the TOAHModel itself
@rtype: MoveSequence
>>> toah = TOAHModel(2)
>>> toah.get_move_seq() == MoveSequence([])
True
"""
return self._move_seq
def __eq__(self, other):
""" Return whether TOAHModel self is equivalent to other.
Two TOAHModels are equivalent if their current
configurations of cheeses on stools look the same.
More precisely, for all h,s, the h-th cheese on the s-th
stool of self should be equivalent the h-th cheese on the s-th
stool of other
@param TOAHModel self: a TOAHModel self
@param TOAHModel other: a TOAHModel other
@rtype: bool
>>> m1 = TOAHModel(4)
>>> m1.fill_first_stool(7)
>>> m1.move(0, 1)
>>> m1.move(0, 2)
>>> m1.move(1, 2)
>>> m2 = TOAHModel(4)
>>> m2.fill_first_stool(7)
>>> m2.move(0, 3)
>>> m2.move(0, 2)
>>> m2.move(3, 2)
>>> m1 == m2
True
"""
if len(self._stools) != len(other._stools):
return False
for i in range(len(self._stools)):
if len(self._stools[i]) != len(other._stools[i]):
return False
for j in range(len(self._stools[i])):
if self._cheese_at(i, j) != other._cheese_at(i, j):
return False
return True
def _cheese_at(self, stool_index, stool_height):
# """ Return (stool_height)th from stool_index stool, if possible.
#
# @type self: TOAHModel
# @type stool_index: int
# @type stool_height: int
# @rtype: Cheese | None
#
# >>> M = TOAHModel(4)
# >>> M.fill_first_stool(5)
# >>> M._cheese_at(0,3).size
# 2
# >>> M._cheese_at(0,0).size
# 5
# """
if 0 <= stool_height < len(self._stools[stool_index]):
return self._stools[stool_index][stool_height]
else:
return None
def __str__(self):
""" Depicts only the current state of the stools and cheese.
@param TOAHModel self: the TOAHModel itself
@rtype: str
"""
all_cheeses = []
for height in range(self.get_number_of_cheeses()):
for stool in range(self.get_number_of_stools()):
if self._cheese_at(stool, height) is not None:
all_cheeses.append(self._cheese_at(stool, height))
max_cheese_size = max([c.size for c in all_cheeses]) \
if len(all_cheeses) > 0 else 0
stool_str = "=" * (2 * max_cheese_size + 1)
stool_spacing = " "
stools_str = (stool_str + stool_spacing) * self.get_number_of_stools()
def _cheese_str(size):
# helper for string representation of cheese
if size == 0:
return " " * len(stool_str)
cheese_part = "-" + "--" * (size - 1)
space_filler = " " * int((len(stool_str) - len(cheese_part)) / 2)
return space_filler + cheese_part + space_filler
lines = ""
for height in range(self.get_number_of_cheeses() - 1, -1, -1):
line = ""
for stool in range(self.get_number_of_stools()):
c = self._cheese_at(stool, height)
if isinstance(c, Cheese):
s = _cheese_str(int(c.size))
else:
s = _cheese_str(0)
line += s + stool_spacing
lines += line + "\n"
lines += stools_str
return lines
def fill_first_stool(self, number_of_cheeses):
""" Add number of cheeses, number_of_cheeses, to the first stool,
the cheese adds in in an order from larger size(larger diameter) at the
bottom to smaller size on the top.
@param TOAHModel self: the TOAHModel itself
@param int number_of_cheeses: the number of cheeses
@rtype: None
>>> M = TOAHModel(4)
>>> M.fill_first_stool(5)
>>> M._stools[0] == [Cheese(5), Cheese(4), Cheese(3), Cheese(2), \
Cheese(1)]
True
"""
for i in range(number_of_cheeses, 0, -1):
self._stools[0].append(Cheese(i))
def get_number_of_stools(self):
""" Return the number of stools in the TOAHModel.
@param TOAHModel self: the TOAHModel itself
@rtype: int
>>> new_toah = TOAHModel(5)
>>> new_toah.get_number_of_stools()
5
"""
return len(self._stools)
def get_number_of_cheeses(self):
""" Return the total number of cheese inside the TOAHModel.
@param TOAHModel self: the TOAHModel itself
@rtype: int
>>> new_toah = TOAHModel(5)
>>> new_toah.fill_first_stool(6)
>>> new_toah.get_number_of_cheeses()
6
"""
return sum([len(stool) for stool in self._stools])
def add(self, cheese_add, s_index):
""" Add cheese, cheese_add, to indicated stool represented by s_index,
add the cheese iff the top cheese's size at the indicated stool is less
than the cheese, cheese_add, that we are going add, or the indicated
stool is empty, otherwise, raise exception.
@param TOAHModel self: the TOAHModel itself
@param Cheese cheese_add: the cheese object to add
@param int s_index: the index of a specific stool
@rtype: None
>>> new_toah = TOAHModel(5)
>>> new_toah.fill_first_stool(6)
>>> new_toah.add(Cheese(1), 2)
>>> new_toah.get_cheese_location(Cheese(1))
2
"""
if len(self._stools[s_index]) == 0 or cheese_add.size < \
self._stools[s_index][-1].size:
self._stools[s_index].append(cheese_add)
else:
raise IllegalMoveError()
def get_cheese_location(self, cheese):
""" Return the index of the stool where the cheese is located in the
TOAHModel self.
@param TOAHModel self: the TOAHModel
@param Cheese cheese: the cheese that you want to find the location
@rtype: int
>>> new_toah = TOAHModel(5)
>>> new_toah.fill_first_stool(6)
>>> new_toah.add(Cheese(1), 2)
>>> new_toah.get_cheese_location(Cheese(1))
2
"""
s_index = -1
for index in range(len(self._stools)):
if cheese in self._stools[index]:
s_index = index
if s_index == -1:
raise CheeseNotFoundError()
else:
return s_index
def get_top_cheese(self, stool_index):
""" Return the Cheese object based on the stool_index in the TOAHModel
self.
@param TOAHModel self: the TOAHModel itself
@param int stool_index: the index of stool
@rtype: Cheese
"""
if len(self._stools[stool_index]) == 0:
return None
else:
return self._stools[stool_index][-1]
def move(self, from_stool, dest_stool):
""" Move cheese from from_stool to dest_stool iff the cheese size is
less than dest_stool's top Cheese'size or the dest_stool is a empty
stool, otherwise raise IllegalMoveError.
@param TOAHModel self: the TOAHModel itself
@param int from_stool: the index of the from_stool
@param int dest_stool: the index of the dest_stool
@rtype: None
>>> new_toah = TOAHModel(5)
>>> new_toah.fill_first_stool(6)
>>> new_toah.move(0, 1)
>>> new_toah.get_top_cheese(0).size
2
"""
if len(self._stools[from_stool]) != 0:
top_cheese = self.get_top_cheese(from_stool)
self.add(top_cheese, dest_stool)
self._stools[from_stool].remove(top_cheese)
self._move_seq.add_move(from_stool, dest_stool)
else:
raise IllegalMoveError()
def number_of_moves(self):
""" Return the number of moves of the TOAHModel self.
@parm TOAHModel self: the TOAHself itself
@rtype: int
>>> new_toah = TOAHModel(5)
>>> new_toah.fill_first_stool(6)
>>> new_toah.move(0, 1)
>>> new_toah.move(0, 2)
>>> new_toah.number_of_moves()
2
"""
return self._move_seq.length()
class Cheese:
""" A cheese for stacking in a TOAHModel
=== Attributes ===
@param int size: width of cheese
"""
def __init__(self, size):
""" Initialize a Cheese to diameter size.
@param Cheese self: The Cheese itself
@param int size: Cheese size
@rtype: None
>>> c = Cheese(3)
>>> isinstance(c, Cheese)
True
>>> c.size
3
"""
self.size = size
def __eq__(self, other):
""" Is self equivalent to other?
We say they are if they're the same
size.
@param Cheese self: The Cheese itself
@param Cheese|Any other: Other Cheese
@rtype: bool
>>> c1 = Cheese(3)
>>> c2 = Cheese(4)
>>> c3 = Cheese(3)
>>> c1 == c2
False
>>> c1 == c3
True
"""
return type(self) == type(other) and self.size == other.size
class IllegalMoveError(Exception):
""" Exception indicating move that violate TOAHModel
"""
pass
class CheeseNotFoundError(Exception):
"""Exception indicating that the cheese is not in the TOAHModel
"""
pass
class MoveSequence(object):
""" Sequence of moves in TOAH game
"""
def __init__(self, moves):
""" Create a new MoveSequence self.
@param MoveSequence self: The MoveSequence itself
@param list[tuple[int]] moves: move record
@rtype: None
>>> ms = MoveSequence([(1, 2)])
>>> ms._moves == [(1, 2)]
True
"""
self._moves = moves
def get_move(self, i):
""" Return the move at position i in self
@param MoveSequence self: The MoveSequence itself
@param int i: The i th move
@rtype: tuple[int]
>>> ms = MoveSequence([(1, 2)])
>>> ms.get_move(0) == (1, 2)
True
"""
return self._moves[i]
def add_move(self, src_stool, dest_stool):
""" Add move from src_stool to dest_stool to MoveSequence self.
@param MoveSequence self: The MoveSequence itself
@param int src_stool: the source stool
@param int dest_stool: the destination stool
@rtype: None
>>> ms = MoveSequence([(1, 2)])
>>> ms.add_move(3, 4)
>>> ms.get_move(1) == (3, 4)
True
"""
self._moves.append((src_stool, dest_stool))
def length(self):
""" Return number of moves in self.
@param MoveSequence self: The MoveSequence itself
@rtype: int
>>> ms = MoveSequence([(1, 2)])
>>> ms.length()
1
"""
return len(self._moves)
def generate_toah_model(self, number_of_stools, number_of_cheeses):
""" Construct TOAHModel from number_of_stools and number_of_cheeses
after moves in self.
Takes the two parameters for
the game (number_of_cheeses, number_of_stools), initializes the game
in the standard way with TOAHModel.fill_first_stool(number_of_cheeses),
and then applies each of the moves in this move sequence.
@param MoveSequence self: The MoveSequence itself
@param int number_of_stools: the number of stools
@param int number_of_cheeses: the number of cheeses
@rtype: TOAHModel
>>> ms = MoveSequence([])
>>> toah = TOAHModel(2)
>>> toah.fill_first_stool(2)
>>> toah == ms.generate_toah_model(2, 2)
True
"""
model = TOAHModel(number_of_stools)
model.fill_first_stool(number_of_cheeses)
for move in self._moves:
model.move(move[0], move[1])
return model
if __name__ == '__main__':
# import doctest
# doctest.testmod(verbose=True)
# Leave lines below to see what python_ta checks.
# File toahmodel_pyta.txt must be in same folder.
import python_ta
python_ta.check_all(config="toahmodel_pyta.txt")
```
#### File: a2/starter/huffman_eris.py
```python
from nodes import HuffmanNode, ReadNode
# ====================
# Helper functions for manipulating bytes
def get_bit(byte, bit_num):
""" Return bit number bit_num from right in byte.
@param int byte: a given byte
@param int bit_num: a specific bit number within the byte
@rtype: int
>>> get_bit(0b00000101, 2)
1
>>> get_bit(0b00000101, 1)
0
"""
return (byte & (1 << bit_num)) >> bit_num
def byte_to_bits(byte):
""" Return the representation of a byte as a string of bits.
@param int byte: a given byte
@rtype: str
>>> byte_to_bits(14)
'00001110'
"""
return "".join([str(get_bit(byte, bit_num))
for bit_num in range(7, -1, -1)])
def bits_to_byte(bits):
""" Return int represented by bits, padded on right.
@param str bits: a string representation of some bits
@rtype: int
>>> bits_to_byte("00000101")
5
>>> bits_to_byte("101") == 0b10100000
True
"""
return sum([int(bits[pos]) << (7 - pos)
for pos in range(len(bits))])
# ====================
# Helper Class: Priority Queue
class PriorityQueue:
""" A Priority Queue to collect data
=== Attributes ===
@param list list: the list to collect data
"""
def __init__(self):
""" Initialize a new Priortity Queue.
@param PriorityQueue self: The PriortityQueue itself.
@rtype: None
"""
self.list = []
def add(self, obj):
""" Add obj to Priority_Queue.
@param PriorityQueue self: the PriorityQueue itself
@param list obj: the obj that to be added
@rtype: None
"""
self.list.append(obj)
def remove(self):
"""
Remove smallest item.
@param PriorityQueue self: the PriorityQueue itself
@rtype: list
"""
self.list.sort()
return self.list.pop(0)
def if_len_1(self):
"""
Reture True iff the Priority Queue is empty.
@param PriorityQueue self: the PriorityQueue itself
@rtype: boolean
"""
return len(self.list) == 1
# ====================
# Helper Class: Queue
class Queue:
""" A general queue
=== Attributes ===
@param list _queue: the list to collect data
"""
def __init__(self):
""" Initialize a new empty queue.
@param Queue self: the Queue itself.
@rtype: None
"""
self._queue = []
def add(self, item):
""" Add item to the end of this queue.
@param Queue self: the Queue itself.
@param list item: the list to add
@rtype: None
"""
self._queue.append(item)
def remove(self):
""" Remove and return the item at the beginning of this queue.
@param Queue self: the Queue itself
@rtype: list
"""
return self._queue.pop(0)
def is_empty(self):
""" Return whether or not this queue is empty.
@param Queue self: the Queue itself]
@rtype: boolean
"""
return len(self._queue) == 0
# ====================
# Functions for compression
def make_freq_dict(text):
""" Return a dictionary that maps each byte in text to its frequency.
@param bytes text: a bytes object
@rtype: dict{int,int}
>>> d = make_freq_dict(bytes([65, 66, 67, 66]))
>>> d == {65: 1, 66: 2, 67: 1}
True
"""
res = {}
for item in text:
if item in res:
res[item] += 1
else:
res[item] = 1
return res
# the idea below refers wikipedia
def huffman_tree(freq_dict):
""" Return the root HuffmanNode of a Huffman tree corresponding
to frequency dictionary freq_dict.
@param dict(int,int) freq_dict: a frequency dictionary
@rtype: HuffmanNode
>>> freq = {2: 6, 3: 4}
>>> t = huffman_tree(freq)
>>> result1 = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> result2 = HuffmanNode(None, HuffmanNode(2), HuffmanNode(3))
>>> t == result1 or t == result2
True
"""
a = PriorityQueue()
for item in freq_dict:
a.add([freq_dict[item], HuffmanNode(item)])
# build huffman_tree
while not a.if_len_1():
tp_1 = a.remove()
tp_2 = a.remove()
tp_new = [tp_1[0] + tp_2[0], HuffmanNode(None, tp_1[1], tp_2[1])]
a.add(tp_new)
return a.list[0][1]
# helper function: make HuffmanCode for a HuffmanNode
def get_leaves(node):
"""Return a list of values of leaves in Huffman tree.
@param HuffmanNode node: a Huffman node
@rtype: list[int]
>>> tree = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> get_leaves(tree)
[3, 2]
"""
if HuffmanNode.is_leaf(node):
return [node.symbol]
else:
return get_leaves(node.left) + get_leaves(node.right)
def create_code(tree, value):
"""Return the Huffman code of the leaf with the value based on the tree.
@param HuffmanNode tree: a Huffman node
@param int value: the value of the leaf
@rtype: str
>>> freq = {1: 2, 2: 3, 3: 4, 5: 6, 6: 7}
>>> tree = huffman_tree(freq)
>>> create_code(tree, 3)
'00'
"""
if not HuffmanNode.is_leaf(tree):
if value in get_leaves(tree.left):
return "0" + create_code(tree.left, value)
elif value in get_leaves(tree.right):
return "1" + create_code(tree.right, value)
else:
return ""
def get_codes(tree):
""" Return a dict mapping symbols from tree rooted at HuffmanNode to codes.
@param HuffmanNode tree: a Huffman tree rooted at node 'tree'
@rtype: dict(int,str)
>>> freq = {1: 2, 2: 3, 3: 4, 5: 6, 6: 7}
>>> tree = huffman_tree(freq)
>>> d = get_codes(tree)
>>> d == {1: '010', 2: '011', 3: '00', 5: '10', 6: '11'}
True
"""
res = {}
leaves = get_leaves(tree)
for item in leaves:
code = create_code(tree, item)
res[item] = code
return res
def get_node(node):
"""Return a list of all the HuffmanNode in the node.
@param HuffmanNode node: a Huffman tree
@rtype: list[HuffmanNode]
>>> left = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> get_node(left)
[HuffmanNode(None, HuffmanNode(3, None, None), HuffmanNode(2, None, None))]
"""
if HuffmanNode.is_leaf(node):
return []
else:
return get_node(node.left) + get_node(node.right) + [node]
def number_nodes(tree):
""" Number internal nodes in tree according to postorder traversal;
start numbering at 0.
@param HuffmanNode tree: a Huffman tree rooted at node 'tree'
@rtype: NoneType
>>> left = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> right = HuffmanNode(None, HuffmanNode(9), HuffmanNode(10))
>>> tree = HuffmanNode(None, left, right)
>>> number_nodes(tree)
>>> tree.left.number
0
>>> tree.right.number
1
>>> tree.number
2
"""
nodes = get_node(tree)
for i in range(len(nodes)):
nodes[i].number = i
def avg_length(tree, freq_dict):
""" Return the number of bits per symbol required to compress text
made of the symbols and frequencies in freq_dict, using the Huffman tree.
@param HuffmanNode tree: a Huffman tree rooted at node 'tree'
@param dict(int,int) freq_dict: frequency dictionary
@rtype: float
>>> freq = {3: 2, 2: 7, 9: 1}
>>> left = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> right = HuffmanNode(9)
>>> tree = HuffmanNode(None, left, right)
>>> avg_length(tree, freq)
1.9
"""
total_weight = sum(list(freq_dict.values()))
acc = 0
code_dict = get_codes(tree)
for key in freq_dict:
acc = acc + freq_dict[key] * len(code_dict[key])
return acc / total_weight
def generate_compressed(text, codes):
""" Return compressed form of text, using mapping in codes for each symbol.
@param bytes text: a bytes object
@param dict(int,str) codes: mappings from symbols to codes
@rtype: bytes
>>> d = {0: "0", 1: "10", 2: "11"}
>>> text = bytes([1, 2, 1, 0])
>>> result = generate_compressed(text, d)
>>> [byte_to_bits(byte) for byte in result]
['10111000']
>>> text = bytes([1, 2, 1, 0, 2])
>>> result = generate_compressed(text, d)
>>> [byte_to_bits(byte) for byte in result]
['10111001', '10000000']
>>> text = bytes([1, 2, 3, 5])
>>> freq = {1: '010', 2: '011', 3: '00', 5: '10', 6: '11'}
>>> result = generate_compressed(text, freq)
>>> [byte_to_bits(byte) for byte in result]
['01001100', '10000000']
"""
text_list = list(text)
new_str = ''
new_byte_list = []
for item in text_list:
new_str = new_str + codes[item]
if len(new_str) % 8 == 0:
for i in range(int(len(new_str) / 8)):
bit_str = new_str[i * 8: (i + 1) * 8]
new_byte_list.append(bits_to_byte(bit_str))
else:
for i in range(int(len(new_str) / 8)):
bit_str = new_str[i * 8: (i + 1) * 8]
new_byte_list.append(bits_to_byte(bit_str))
last = new_str[(int(len(new_str) / 8)) * 8:]
new_byte_list.append(bits_to_byte(last + (8 - len(last)) * '0'))
return bytes(new_byte_list)
# helper
def get_byte(node):
"""Return a list to describe the node.
@param HuffmanNode node: the node that needs to be described
@rtype: list
>>> tree = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> number_nodes(tree)
>>> get_byte(tree)
[0, 3, 0, 2]
"""
byte_list = []
if HuffmanNode.is_leaf(node.left):
byte_list.append(0)
byte_list.append(node.left.symbol)
else:
byte_list.append(1)
byte_list.append(node.left.number)
if HuffmanNode.is_leaf(node.right):
byte_list.append(0)
byte_list.append(node.right.symbol)
else:
byte_list.append(1)
byte_list.append(node.right.number)
return byte_list
def tree_to_bytes(tree):
""" Return a bytes representation of the tree rooted at tree.
@param HuffmanNode tree: a Huffman tree rooted at node 'tree'
@rtype: bytes
The representation should be based on the postorder traversal of tree
internal nodes, starting from 0.
Precondition: tree has its nodes numbered.
>>> tree = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> number_nodes(tree)
>>> list(tree_to_bytes(tree))
[0, 3, 0, 2]
>>> left = HuffmanNode(None, HuffmanNode(3), HuffmanNode(2))
>>> right = HuffmanNode(5)
>>> tree = HuffmanNode(None, left, right)
>>> number_nodes(tree)
>>> list(tree_to_bytes(tree))
[0, 3, 0, 2, 1, 0, 0, 5]
"""
new_list = []
internal_node_list = get_node(tree)
for item in internal_node_list:
new_list = new_list + get_byte(item)
return bytes(new_list)
def num_nodes_to_bytes(tree):
""" Return number of nodes required to represent tree (the root of a
numbered Huffman tree).
@param HuffmanNode tree: a Huffman tree rooted at node 'tree'
@rtype: bytes
"""
return bytes([tree.number + 1])
def size_to_bytes(size):
""" Return the size as a bytes object.
@param int size: a 32-bit integer that we want to convert to bytes
@rtype: bytes
>>> list(size_to_bytes(300))
[44, 1, 0, 0]
"""
# little-endian representation of 32-bit (4-byte)
# int size
return size.to_bytes(4, "little")
def compress(in_file, out_file):
""" Compress contents of in_file and store results in out_file.
@param str in_file: input file whose contents we want to compress
@param str out_file: output file, where we store our compressed result
@rtype: NoneType
"""
with open(in_file, "rb") as f1:
text = f1.read()
freq = make_freq_dict(text)
tree = huffman_tree(freq)
codes = get_codes(tree)
number_nodes(tree)
print("Bits per symbol:", avg_length(tree, freq))
result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +
size_to_bytes(len(text)))
result += generate_compressed(text, codes)
with open(out_file, "wb") as f2:
f2.write(result)
# ====================
# Functions for decompression
def generate_tree_general(node_lst, root_index):
""" Return the root of the Huffman tree corresponding
to node_lst[root_index].
The function assumes nothing about the order of the nodes in the list.
@param list[ReadNode] node_lst: a list of ReadNode objects
@param int root_index: index in the node list
@rtype: HuffmanNode
>>> lst = [ReadNode(0, 5, 0, 7), ReadNode(0, 10, 0, 12), \
ReadNode(1, 1, 1, 0)]
>>> generate_tree_general(lst, 2)
HuffmanNode(None, HuffmanNode(None, HuffmanNode(10, None, None), \
HuffmanNode(12, None, None)), \
HuffmanNode(None, HuffmanNode(5, None, None), HuffmanNode(7, None, None)))
"""
if node_lst[root_index].l_type == 0 and node_lst[root_index].r_type == 0:
return HuffmanNode(None, HuffmanNode(node_lst[root_index].l_data, None,
None),
HuffmanNode(node_lst[root_index].r_data, None, None))
elif node_lst[root_index].l_type == 0 and node_lst[root_index].r_type == 1:
return HuffmanNode(None,
HuffmanNode(node_lst[root_index].l_data, None, None),
generate_tree_general
(node_lst, node_lst[root_index].r_data))
elif node_lst[root_index].l_type == 1 and node_lst[root_index].r_type == 0:
return HuffmanNode(None,
generate_tree_general
(node_lst, node_lst[root_index].l_data),
HuffmanNode(node_lst[root_index].r_data, None, None))
else:
return HuffmanNode(None,
generate_tree_general(node_lst,
node_lst[root_index].l_data),
generate_tree_general(node_lst,
node_lst[root_index].r_data))
def generate_tree_postorder(node_lst, root_index):
""" Return the root of the Huffman tree corresponding
to node_lst[root_index].
The function assumes that the list represents a tree in postorder.
@param list[ReadNode] node_lst: a list of ReadNode objects
@param int root_index: index in the node list
@rtype: HuffmanNode
>>> lst = [ReadNode(0, 5, 0, 7), ReadNode(0, 10, 0, 12), \
ReadNode(1, 0, 1, 0)]
>>> generate_tree_postorder(lst, 2)
HuffmanNode(None, HuffmanNode(None, HuffmanNode(5, None, None), \
HuffmanNode(7, None, None)), \
HuffmanNode(None, HuffmanNode(10, None, None), HuffmanNode(12, None, None)))
"""
if node_lst[root_index].l_type == 0 and node_lst[root_index].r_type == 0:
return HuffmanNode(None,
HuffmanNode(node_lst[root_index].l_data, None, None),
HuffmanNode(node_lst[root_index].r_data, None, None))
elif node_lst[root_index].l_type == 0 and node_lst[root_index].r_type == 1:
return HuffmanNode(None,
HuffmanNode(node_lst[root_index].l_data, None, None),
generate_tree_postorder(node_lst, root_index - 1))
elif node_lst[root_index].l_type == 1 and node_lst[root_index].r_type == 0:
return HuffmanNode(None,
generate_tree_postorder(node_lst, root_index - 1),
HuffmanNode(node_lst[root_index].r_data, None, None))
else:
return HuffmanNode(None,
generate_tree_postorder
(node_lst, root_index -
len(get_node(generate_tree_general
(node_lst, root_index - 1))) - 1),
generate_tree_general(node_lst, root_index - 1))
def generate_uncompressed(tree, text, size):
""" Use Huffman tree to decompress size bytes from text.
@param HuffmanNode tree: a HuffmanNode tree rooted at 'tree'
@param bytes text: text to decompress
@param int size: how many bytes to decompress from text.
@rtype: bytes
>>> freq = {1: 2, 2: 3, 3: 4, 5: 6, 6: 7}
>>> tree = huffman_tree(freq)
>>> codes = get_codes(tree)
>>> codes
{1: '010', 2: '011', 3: '00', 5: '10', 6: '11'}
>>> text = bytes([1, 2, 3, 5, 2, 3, 5, 1])
>>> a = generate_compressed(text, codes)
>>> b = generate_uncompressed(tree, a, 8)
>>> list(b)
[1, 2, 3, 5, 2, 3, 5, 1]
"""
the_dict = get_codes(tree)
inverse_dict = {}
for key in the_dict:
inverse_dict[the_dict[key]] = key
begin = 0
new_list = []
new_str = ''
number = 0
text_to_read = ''.join([byte_to_bits(item) for item in text])
while begin < len(text_to_read) and new_str not in inverse_dict \
and number < size:
i = 1
while begin + i <= len(text_to_read) and new_str not in inverse_dict:
new_str = text_to_read[begin: begin + i]
i = i + 1
if new_str in inverse_dict:
new_list.append(inverse_dict[new_str])
number = number + 1
begin = begin + i - 1
new_str = ''
else:
begin = begin + i
return bytes(new_list)
def bytes_to_nodes(buf):
""" Return a list of ReadNodes corresponding to the bytes in buf.
@param bytes buf: a bytes object
@rtype: list[ReadNode]
>>> bytes_to_nodes(bytes([0, 1, 0, 2]))
[ReadNode(0, 1, 0, 2)]
"""
lst = []
for i in range(0, len(buf), 4):
l_type = buf[i]
l_data = buf[i+1]
r_type = buf[i+2]
r_data = buf[i+3]
lst.append(ReadNode(l_type, l_data, r_type, r_data))
return lst
def bytes_to_size(buf):
""" Return the size corresponding to the
given 4-byte little-endian representation.
@param bytes buf: a bytes object
@rtype: int
>>> bytes_to_size(bytes([44, 1, 0, 0]))
300
"""
return int.from_bytes(buf, "little")
def uncompress(in_file, out_file):
""" Uncompress contents of in_file and store results in out_file.
@param str in_file: input file to uncompress
@param str out_file: output file that will hold the uncompressed results
@rtype: NoneType
"""
with open(in_file, "rb") as f:
num_nodes = f.read(1)[0]
buf = f.read(num_nodes * 4)
node_lst = bytes_to_nodes(buf)
# use generate_tree_general or generate_tree_postorder here
tree = generate_tree_general(node_lst, num_nodes - 1)
size = bytes_to_size(f.read(4))
with open(out_file, "wb") as g:
text = f.read()
g.write(generate_uncompressed(tree, text, size))
# ====================
# Other functions
def improve_tree(tree, freq_dict):
""" Improve the tree as much as possible, without changing its shape,
by swapping nodes. The improvements are with respect to freq_dict.
@param HuffmanNode tree: Huffman tree rooted at 'tree'
@param dict(int,int) freq_dict: frequency dictionary
@rtype: NoneType
>>> left = HuffmanNode(None, HuffmanNode(99), HuffmanNode(100))
>>> right = HuffmanNode(None, HuffmanNode(101), \
HuffmanNode(None, HuffmanNode(97), HuffmanNode(98)))
>>> tree = HuffmanNode(None, left, right)
>>> freq = {97: 26, 98: 23, 99: 20, 100: 16, 101: 15}
>>> improve_tree(tree, freq)
>>> avg_length(tree, freq)
2.31
"""
#create inversed dict
data_list = sorted(list(freq_dict.values()))
data_list.reverse()
inverse_dict = {}
for key in freq_dict:
inverse_dict[freq_dict[key]] = key
key_list = [inverse_dict[item] for item in data_list]
to_act_on = Queue()
to_act_on.add(tree)
i = 0
while not to_act_on.is_empty():
next_node = to_act_on.remove()
if HuffmanNode.is_leaf(next_node):
next_node.symbol = key_list[i]
i = i + 1
else:
to_act_on.add(next_node.left)
to_act_on.add(next_node.right)
if __name__ == "__main__":
import python_ta
python_ta.check_all(config="huffman_pyta.txt")
# TODO: Uncomment these when you have implemented all the functions
import doctest
doctest.testmod()
import time
mode = input("Press c to compress or u to uncompress: ")
if mode == "c":
fname = input("File to compress: ")
start = time.time()
compress(fname, fname + ".huf")
print("compressed {} in {} seconds."
.format(fname, time.time() - start))
elif mode == "u":
fname = input("File to uncompress: ")
start = time.time()
uncompress(fname, fname + ".orig")
print("uncompressed {} in {} seconds."
.format(fname, time.time() - start))
```
#### File: exercises/ex1/ex1.py
```python
class SuperDuperManager:
""" A class responsible for keeping track of all cars in the system.
"""
def __init__(self):
"""Initialize a new SuperDuperManager.
There are no cars in the system when first created.
@type self: SuperDuperManager
@rtype: None
"""
self._cars = {}
def add_car(self, id_, fuel):
"""Add a new car to the system.
The new car is identified by the string <id_>, and has initial amount
of fuel <fuel>.
Do nothing if there is already a car with the given id.
@type self: SuperDuperManager
@type id_: str
@type fuel: int
@rtype: None
"""
# Check to make sure the identifier isn't already used.
if id_ not in self._cars:
# TODO: Add the new car.
self._cars[id_] = Car(fuel)
def move_car(self, id_, new_x, new_y):
"""Move the car with the given id.
The car called <id_> should be moved to position (<new_x>, <new_y>).
Do nothing if there is no car with the given id,
or if the corresponding car does not have enough fuel.
@type self: SuperDuperManager
@type id_: str
@type new_x: int
@type new_y: int
@rtype: None
"""
if id_ in self._cars:
# TODO: Move the car with id <id_>.
distance = abs(new_x - self._cars[id_].position[0]) + \
abs(new_y - self._cars[id_].position[1])
if distance <= self._cars[id_].fuel:
self._cars[id_].position = (new_x, new_y)
self._cars[id_].fuel = self._cars[id_].fuel - distance
def get_car_position(self, id_):
"""Return the position of the car with the given id.
Return a tuple of the (x, y) position of the car with id <id_>.
Return None if there is no car with the given id.
@type self: SuperDuperManager
@type id_: str
@rtype: (int, int) | None
"""
if id_ in self._cars:
# TODO: Get the position of the car with id <id_>.
return self._cars[id_].position
return None
def get_car_fuel(self, id_):
"""Return the amount of fuel of the car with the given id.
Return None if there is no car with the given id.
@type self: SuperDuperManager
@type id_: str
@rtype: int | None
"""
if id_ in self._cars:
# TODO: Get the amount of fuel of the car with id <id_>.
return self._cars[id_].fuel
return None
def dispatch(self, x, y):
"""Move a car to the given location.
Choose a car to move based on the following criteria:
(1) Only consider cars that *can* move to the location.
(Ignore ones that don't have enough fuel.)
(2) After (1), choose the car that would move the *least* distance to
get to the location.
(3) If there is a tie in (2), pick the car whose id comes first
alphabetically. Use < to compare the strings.
(4) If no cars can move to the given location, do nothing.
@type self: SuperDuperManager
@type x: int
@type y: int
@rtype: None
"""
# TODO: Implement this method!
res_distance = []
res_id = []
#deal tie situation, create tie_id list
tie_id = []
for k, v in self._cars.items():
distance = abs(x - v.position[0]) + \
abs(y - v.position[1])
if distance <= v.fuel:
res_distance.append(distance)
res_id.append(k)
if len(res_distance) != 0:
closest_distance = min(res_distance)
if res_distance.count(closest_distance) == 1:
closest_id = res_id[res_distance.index(closest_distance)]
self.move_car(closest_id, x, y)
else:
for i in range(len(res_distance)):
if res_distance[i] == closest_distance:
tie_id.append(res_id[i])
tie_id.sort()
self.move_car(tie_id[0], x, y)
# TODO: Design and implement this class.
# TODO: Remember to document all attributes and methods!
class Car:
"""A car in the Super system.
=== Attributes ===
@type fuel: int
fuel amout the car remain
@type position: list of int
car position
"""
def __init__(self, fuel, position=(0, 0)):
"""
Creat a car with fuel amount and default position.
@type self: Car
@rtype = None
"""
self.fuel = fuel
self.position = position
if __name__ == '__main__':
# Run python_ta to ensure this module passes all checks for
# code inconsistencies and forbidden Python features.
# Useful for debugging!
import python_ta
python_ta.check_all()
# Uncomment and run before final submission. This checks for style errors
# in addition to code inconsistencies and forbidden Python features.
# python_ta.check_all()
#a = SuperDuperManager()
#a.add_car('bmw', 10)
#a.add_car('toyota', 10)
#a.add_car('cooper', 6)
#a.add_car('benz', 8)
#a.dispatch(2, 3)
#print(a.get_car_position('benz'))
#print(a.get_car_fuel('benz'))
#a.move_car('toyota', 4, 5)
#a.dispatch(5, 6)
#print(a.get_car_position('bmw'))
#print(a.get_car_fuel('bmw'))
```
#### File: exercises/ex1/Inventory system.py
```python
class Inventory_system:
"""Create a inventory system to record one item information.
=== Attributes ===
@type price: int | float
item price
@type description: str
item description
@type category: str
item category
@type item_num: int
item number
"""
def __init__(self, price, item_num, description, category):
""" Create a Inventory system of item, incluing price, number, description and category.
@type self: Inventory_system
@type price: float | int
@type item_num: int
@description: str
@category: str
@rtype: None
"""
self.price = price
self.item_num = item_num
self.description = description
self.category = category
def __str__(self):
""" Return readable information about this item.
@type self: Inventory_system
@rtype: str
"""
# return a new line
return"""
{} is {}$
it's category is {} and item number is {}
""".format(self.description, self.price, self.category, self.item_num)
def discount(self, discount_percent):
"""Give price discount to this item.
@type self: Inventory_system
@type discount_percent: float
@rtype: None
"""
self.price = self.price * discount_percent
def compare(self, other):
""" Compare two item to see which is cheaper.
@type self: Inventory_system
@type other: Inventory_system
@rtype: str
"""
if self.price < other.price:
return self.description
elif self.price > other.price:
return other.description
else:
return 'tie'
if __name__ == '__main__':
item_1 = Inventory_system(10, 1001, 'T-shirt', 'Cloth')
item_2 = Inventory_system(1000, 2001, 'BMW', 'Car')
print(item_1)
print(item_2)
print(item_1.compare(item_2))
item_2.discount(0.001)
print(item_1.compare(item_2))
```
#### File: exercises/ex2/ex2.py
```python
from math import sqrt # sqrt used to calculate diagonal distances
import random # used to generate random numbers
##############################################################################
# Task 1: Cars and other vehicles
##############################################################################
class SuperDuperManager:
"""A class responsible for keeping track of all cars in the system.
"""
# @param dict[str, Vehicle] _vehicles:
# A map of unique string identifiers to the corresponding vehicles.
# For example, _vehicles['a01'] would be a vehicle corresponding to
# the id_ 'a01'.
def __init__(self):
"""Initialize a new SuperDuperManager.
Initially there are no vehicles in the system.
@param SuperDuperManager self:
@rtype: None
"""
self._vehicles = {}
def add_vehicle(self, vehicle_type, id_, fuel):
"""Add a new vehicle to the system of the given type.
The new vehicle is identified by the string <id_>,
and has initial amount of fuel <fuel>.
Do nothing if there is already a vehicle with the given id.
Precondition: <vehicle_type> is one of 'Car', 'Helicopter', or
'UnreliableMagicCarpet'.
@param SuperDuperManager self:
@param str vehicle_type:
@param str id_:
@param int fuel:
@rtype: None
"""
# Check to make sure the identifier isn't already used.
if id_ not in self._vehicles:
if vehicle_type == 'Car':
self._vehicles[id_] = Car(fuel)
elif vehicle_type == 'Helicopter':
self._vehicles[id_] = Helicopter(fuel)
elif vehicle_type == 'UnreliableMagicCarpet':
self._vehicles[id_] = UnreliableMagicCarpet(fuel)
def move_vehicle(self, id_, new_x, new_y):
"""Move a vehicle with the given id.
The vehicle called <id_> should be moved to position (<new_x>, <new_y>).
Do nothing if there is no vehicle with the given id,
or if the corresponding vehicle does not have enough fuel to move.
@param SuperDuperManager self: SuperDuperManager
@param str id_:
@param int new_x:
@param int new_y:
@rtype: None
"""
if id_ in self._vehicles:
self._vehicles[id_].move(new_x, new_y)
def get_vehicle_position(self, id_):
"""Return the position of the vehicle with the given id.
Return a tuple of the (x, y) position of the vehicle.
Return None if there is no vehicle with the given id.
@param SuperDuperManager self: SuperDuperManager
@param str id_: str
@rtype: (int, int) | None
"""
if id_ in self._vehicles:
return self._vehicles[id_].position
def get_vehicle_fuel(self, id_):
"""Return the amount of fuel of the vehicle with the given id.
Return None if there is no vehicle with the given id.
@param SuperDuperManager self:
@param str id_:
@rtype: int | None
"""
if id_ in self._vehicles:
return self._vehicles[id_].fuel
class Vehicle:
""" A superclass for a vehicle in the Super Duper system.
Note that this interface specifies *two* public attributes,
and *two* public methods (the constructor is not considered public).
Of the public methods, a default implementation is given for move,
but not fuel_needed.
It also defines a constructor that should be called by each of its
subclasses.
=== Attributes ===
@param tuple(int) position:
The position of this vehicle.
@param int fuel:
The amount of fuel remaining for this vehicle.
=== Representation invariants ===
fuel >= 0
"""
def __init__(self, new_fuel, new_position):
"""Initialize a new Vehicle with the given fuel and position.
Precondition: new_fuel >= 0
@param Vehicle self: Vehicle itself
@param int new_fuel: fuel amount
@param (int, int) new_position: destination coordinates
@rtype: None
"""
self.fuel = new_fuel
self.position = new_position
def fuel_needed(self, new_x, new_y):
"""Return how much fuel would be used to move to the given position.
Note: the amount returned may be larger than self.fuel,
indicating that this vehicle may not move to the given position.
@param Vehicle self: Vehicle itself
@param int new_x: destination's x coordinate
@param int new_y: destination's y coordinate
@rtype: float
"""
raise NotImplementedError
def move(self, new_x, new_y):
"""Move this vehicle to a new position.
Do nothing if this vehicle does not have enough fuel to move.
@param Vehicle self: Vehicle itself
@param int new_x: destination's x coordinate
@param int new_y: destination's y coordinate
@rtype: None
"""
needed = self.fuel_needed(new_x, new_y)
if needed <= self.fuel:
self.position = (new_x, new_y)
self.fuel -= needed
# TODO: Implement this class (you can use your work from Exercise 1)
class Car(Vehicle):
"""A Car in the Super Duper system.
Car original position is at (0, 0).
A Car can only move vertically and horizontally, and uses
one unit of fuel per unit distance travelled.
Do nothing if the Car does not have enough fuel to move.
=== Attributes ===
@param tuple(int) position:
The position of this Car.
@param int fuel:
The amount of fuel remaining for this Car.
=== Representation invariants ===
fuel >= 0
"""
def __init__(self, fuel, position=(0, 0)):
"""
Initialize a new Car with the given fuel and position.
Precondition: new_fuel >= 0
@param Car self: Car itself.
@param int fuel: fuel amount.
@param (int, int) position: original position.
@rtype: None
"""
Vehicle.__init__(self, fuel, position)
def fuel_needed(self, new_x, new_y):
"""Return how much fuel would be used to move to the given position.
Note: the amount returned may be larger than self.fuel,
indicating that this Car may not move to the given position.
@param Car self: Car itself.
@param int new_x: destination's x coordinate
@param int new_y: destination's y coordinate
@rtype: float
"""
distance = abs(new_x - self.position[0]) + abs(new_y - self.position[1])
return distance
# TODO: Implement this class. Note: We've imported the sqrt function for you.
class Helicopter(Vehicle):
"""
A helicopter. Can travel diagonally between points.
Hlicopter original position is (3, 5).
After each move, amount of fuel will round down to the nearest integer.
Do nothing if Helicopter does not have enough fuel to move.
=== Attributes ===
@param tuple(int) position:
The position of this vehicle.
@param int fuel:
The amount of fuel remaining for this vehicle.
=== Representation invariants ===
fuel >= 0
"""
def __init__(self, fuel, position=(3, 5)):
"""
Create a Helicopter with fuel amount and default position
Precondition: new_fuel >= 0
@param Car self: Helicopter itself.
@param int fuel: fuel amount.
@param (int, int) position: original position.
@rtype: None
"""
Vehicle.__init__(self, fuel, position)
def fuel_needed(self, new_x, new_y):
"""Return how much fuel would be used to move to the given position.
Note: the amount returned may be larger than self.fuel,
indicating that this vehicle may not move to the given position.
@param Helicopter self: Helicopter itself
@param int new_x: destination's x coordinates
@param int new_y: destination's y coordinates
@rtype: float
"""
return sqrt((abs(new_x - self.position[0]))**2 +
(abs(new_y - self.position[1]))**2)
def move(self, new_x, new_y):
"""Move this Helicopter to a new position.
Do nothing if this Helicopter does not have enough fuel to move.
@param Helicopter self: Helicopter itself
@param int new_x: destination's x coordinates
@param int new_y: destination's y coordinates
@rtype: None
"""
needed = self.fuel_needed(new_x, new_y)
if needed <= self.fuel:
self.position = (new_x, new_y)
self.fuel = int(self.fuel - needed)
# TODO: Implement this class. Note: We've imported the random module for you.
class UnreliableMagicCarpet(Vehicle):
"""
An unreliable magic carpet.
An UnreliableMagicCarpet is created at random position (x, y), range of x is
integer between 0 to 10 inclusively, range of y is integer between 0 to 10
inclusively too.
Does not need to use fuel to travel, but ends up in a random position
within two horizontal and vertical units from the target destination.
=== Attributes ===
@param tuple(int) position:
The position of this vehicle.
@param int fuel:
The amount of fuel remaining for this vehicle.
"""
def __init__(self, fuel, position=(random.randint(0, 10),
random.randint(0, 10))):
"""
Create a Helicopter with fuel amount and default position
"""
Vehicle.__init__(self, fuel, position)
def fuel_needed(self, new_x, new_y):
"""
Return how much fuel would be used to move to the given position.
Note: the amount returned always be 0 since
UnreliableMagicCarpet does not consume fuel.
@param UnreliableMagicCarpet self: UnreliableMagicCarpet itself
@param int new_x: destination's x coordinates
@param int new_y: destination's y coordinates
@rtype: int
"""
return 0
def move(self, new_x, new_y):
"""
Move this UnreliableMagicCarpet to a new position.
Note: The UnreliableMagicCarpet will move to random position
around taiget one.
@param UnreliableMagicCarpet self: UnreliableMagicCarpet itself
@param int new_x: destination's x coordinates
@param int new_y: destination's y coordinates
@rtype: None
"""
needed = self.fuel_needed(new_x, new_y)
if needed <= self.fuel:
dx = random.randint(-2, 2)
dy = random.randint(-2, 2)
self.position = (new_x + dx, new_y + dy)
##############################################################################
# Task 2: Introduction to Stacks
##############################################################################
def reverse_top_two(stack):
"""Reverse the top two elements on <stack>.
Precondition: <stack> has at least two items.
@param Stack stack:
@rtype: None
>>> from obfuscated_stack import Stack
>>> stack = Stack()
>>> stack.add(1)
>>> stack.add(2)
>>> reverse_top_two(stack)
>>> stack.remove()
1
>>> stack.remove()
2
"""
# TODO: implement this function after you've read about Stacks.
top_1 = stack.remove()
top_2 = stack.remove()
stack.add(top_1)
stack.add(top_2)
if __name__ == '__main__':
# Run python_ta to ensure this module passes all checks for
# code inconsistencies and forbidden Python features.
# Useful for debugging!
import python_ta
python_ta.check_all(config='pylint.txt')
# Uncomment and run before final submission. This checks for style errors
# in addition to code inconsistencies and forbidden Python features.
# python_ta.check_all(config='pylint.txt')
```
#### File: labs/lab12/network_graph.py
```python
import networkx as nx
import random
def page_rank():
"""Return top 10 node
@rtype: list
"""
graph = nx.Graph()
with open('facebook-links.txt', 'r') as f:
for line in f:
s = line.split('\t')
graph.add_edge(int(s[0]), int(s[1]))
assert len(graph.nodes()) == 63731
assert len(graph.edges()) == 817090
g = graph.subgraph(range(10000)) # take a subset of the graph for speed
#set initial cur node
node_list = g.nodes()
cur_node_index = random.randint(0, 9999)
cur_node = node_list[cur_node_index]
for i in range(10000):
p_value = random.randint(1, 10)
if p_value != 1:
cur_neighbor_list = g.neighbors(cur_node)
cur_node = cur_neighbor_list[random.randint(0, len(cur_neighbor_list)-1)]
if g.node[cur_node] == {}:
g.node[cur_node]['acc'] = 1
else:
g.node[cur_node]['acc'] += 1
else:
cur_node_index = random.randint(0, 9999)
cur_node = node_list[cur_node_index]
if g.node[cur_node] == {}:
g.node[cur_node]['acc'] = 1
else:
g.node[cur_node]['acc'] += 1
target = g.nodes(data=True)
acc_node_list = [(x[1]['acc'], x[0]) for x in target if 'acc' in x[1]]
acc_node_list.sort()
res = acc_node_list[::-1][:10]
return res
if __name__ == "__main__":
res = page_rank()
print(res)
```
#### File: labs/lab12/test_sort.py
```python
from sort import *
import random
import timeit
import cProfile
def is_sorted(list_):
"""
Return True iff list_ is in non-decreasing order.
@param list list_: list to inspect
@rtype bool:
>>> is_sorted([1, 3, 5])
True
>>> is_sorted([3, 1, 5])
False
"""
for j in range(1, len(list_)):
if list_[j - 1] > list_[j]:
return False
return True
def time_sort(list_, sorted_list, which_sort):
"""
Sort list_ using function which_sort, time it and print the results,
and ensure that the elements are the same as list sorted_list,
which is a sorted version of L obtained by
using the built-in sort.
@param list list_: list to sort
@param list sorted_list: list to compare result to
@param (list)->None which_sort: sorting algorithm
"""
sorter_name = which_sort.__name__ # the function's name as a string
# Verify that the sorting algorithm works correctly!
new_list = list_[:]
which_sort(new_list)
error_string = sorter_name + "did not sort"
assert is_sorted(new_list) and new_list == sorted_list, error_string
# The timeit module provides accurate timing of code in seconds, by
# running the code a number of times and adding up the total time.
t = timeit.timeit('{}({})'.format(sorter_name, list_),
'from sort import ' + sorter_name,
number=4) / 4
# Print information about the results so far, before all of the output
# generated by the cProfile module.
print("{} {} items in {:.6f}\n".format(sorter_name, len(list_), t))
def generate_data(n, sorted_=False, reversed_=False):
"""
Return a list of n ints. If sorted_, the list should be nearly sorted:
only a few elements are out of order. If sorted_ and reversed_, the list
should be nearly sorted in reverse. The list should otherwise be
shuffled (in random order).
@param int n: number of ints in the list to be returned
@param bool sorted_: indicates whether or not to sort
@param bool reversed_: indicates whether or not to reverse
@rtype: list[int]
"""
list_ = [2 * j for j in range(n)]
if sorted_:
j = random.randrange(5, 11)
while j < n // 2:
list_[j], list_[-j] = list_[-j], list_[j]
j += random.randrange(5, 11)
if reversed_:
list_.reverse()
else:
random.shuffle(list_)
return list_
def profile_comparisons(n):
"""
Run cProfile to identify bottlenecks in algorithms.
@param int n: size of list to run algorithms on.
@rtype: None
"""
for algo in [selection_sort, insertion_sort_1, bubblesort_1,
mergesort_1, quicksort_1]:
list_ = generate_data(n)
name = algo.__name__
print("=== profiling {} ===".format(name))
cProfile.run("{}({})".format(algo.__name__, list_), sort='calls')
if __name__ == "__main__":
import doctest
doctest.testmod()
for algo_ in [selection_sort, insertion_sort_1, bubblesort_1, bubblesort_2,
mergesort_1, quicksort_1]:
for i in range(1, 7):
L = generate_data(i * 100)
time_sort(L, sorted(L), algo_)
for i in range(1, 7):
L = generate_data(i * 100)
time = timeit.timeit('{}.sort()'.format(L), number=100) / 100
print("built-in sort {} items in {:.6f}\n".format(len(L), time))
# uncomment this call to profile-comparisons, and edit the list of
# algorithms to see call-by-call comparison
profile_comparisons(1000)
```
#### File: labs/lab1/lab1.py
```python
class RaceRegistry:
"""Race Registry includes runners' information
Attributes:
@type under_20: list
emails of under_20 category
@type under_30: list
emails of under_30 category
@type under_40: list
emails of under_40 category
@type over_40: list
emails of over_40 category
"""
def __init__(self):
"""Creat a RaceRegistry for runners
@type self: Race_Registry
@rtype: None
"""
self.under_20 = []
self.under_30 = []
self.under_40 = []
self.over_40 = []
def __eq__(self, other):
"""Return True iff this Race_Registry is same as other.
@type self: Race_Registry
@type other: Race_Registry
@rtype: bool
>>> r1 = RaceRegistry()
>>> r2 = RaceRegistry()
>>> r1 == r2
True
>>> r1.add('<EMAIL>', 'under 20')
>>> r1 == r2
False
"""
self.under_20.sort()
self.under_30.sort()
self.under_40.sort()
self.over_40.sort()
other.under_20.sort()
other.under_30.sort()
other.under_40.sort()
other.over_40.sort()
return type(self) == type(other) and \
self.under_20.sort() == other.under_20.sort() \
and self.under_30.sort() == other.under_30.sort() \
and self.under_40.sort() == other.under_40.sort() \
and self.over_40.sort() == other.over_40.sort()
def __str__(self):
"""Return readable string representation of this Race_Registry.
@type self: Race_Registry
@rtype: None
>>> r1 = RaceRegistry()
>>> r1.add('<EMAIL>', 'under 20')
>>> print(r1)
under 20: [<EMAIL>]
under 30: []
under 40: []
over 40: []
"""
return """under 20: {0}
under 30: {1}
under 40: {2}
over 40:{3}
""".format(self.under_20, self.under_30, self.under_40, self.over_40)
def add(self, email, speed_category):
"""add one runner information of email and speed_category \
to this Race_Registry.
@type self: Race_Registry
@type email: str
@tpye speed_category: str
@rtype: None
>>> r = RaceRegistry()
>>> r.add('<EMAIL>', 'under 40')
>>> r.under_40
['<EMAIL>']
"""
if speed_category == 'under 20':
self.under_20.append(email)
elif speed_category == 'under 30':
self.under_30.append(email)
elif speed_category == 'under 40':
self.under_40.append(email)
else:
self.over_40.append(email)
def get_runner_cate(self, email):
"""Return runner's category basing on his email.
@type self: Race_Registry
@type email: str
@rtype: str
>>> r = RaceRegistry()
>>> r.add('<EMAIL>', 'under 40')
>>> r.get_runner_cate('<EMAIL>')
'under 40'
"""
if email in self.under_20:
return 'under 20'
elif email in self.under_30:
return 'under 30'
elif email in self.under_40:
return 'under 40'
elif email in self.over_40:
return 'over 40'
if __name__ == '__main__':
r = RaceRegistry()
r.add('<EMAIL>', 'under 40')
r.add('<EMAIL>', 'under 30')
r.add('<EMAIL>', 'under 20')
r.add('<EMAIL>', 'over 40')
print(r)
r.get_runner_cate('<EMAIL>')
```
#### File: labs/lab3/stack.py
```python
class Stack:
"""
Last-in, first-out (LIFO) stack.
"""
def __init__(self):
"""
Create a new, empty Stack self.
@param Stack self: this stack
@rtype: None
"""
self._contents = []
def add(self, obj):
"""
Add object obj to top of Stack self.
@param Stack self: this Stack
@param object obj: object to place on Stack
@rtype: None
"""
self._contents.append(obj)
def __str__(self):
"""
Return a str representation of Stack self.
@param Stack self: this Stack
@rtype: str
>>> s = Stack()
>>> s.add(3)
>>> s.add(2)
>>> print(s)
[3, 2]
"""
return str(self._contents)
def __eq__(self, other):
"""
Return whether Stack self is equivalent to other.
@param Stack self: this Stack
@param object|Stack other: object to compare to self.
@rtype: bool
>>> s1 = Stack()
>>> s1.add(3)
>>> s2 = Stack()
>>> s2.add(3)
>>> s1 == s2
True
"""
return (type(self) == type(other) and
self._contents == other._contents)
def remove(self):
"""
Remove and return top element of Stack self.
Assume Stack self is not empty.
@param Stack self: this Stack
@rtype: object
>>> s = Stack()
>>> s.add(5)
>>> s.add(7)
>>> s.remove()
7
"""
return self._contents.pop()
def is_empty(self):
"""
Return whether Stack self is empty.
@param Stack self: this Stack
@rtype: bool
"""
return len(self._contents) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: labs/lab3/testqueue.py
```python
import unittest
from csc148_queue import Queue
class EmptyTestCase(unittest.TestCase):
"""Test behaviour of an empty Queue.
"""
def setUp(self):
"""Set up an empty queue.
"""
self.queue = Queue()
def tearDown(self):
"""Clean up.
"""
self.queue = None
def testIsEmpty(self):
"""Test is_empty() on empty Queue.
"""
self.assertTrue(
self.queue.is_empty(),
'is_empty returned False on an empty Queue!')
class SingletonTestCase(unittest.TestCase):
"""Check whether adding a single item makes it appear at the front.
"""
def setUp(self):
"""Set up a queue with a single element.
"""
self.queue = Queue()
self.queue.add('a')
def tearDown(self):
"""Clean up.
"""
self.queue = None
def testIsEmpty(self):
"""Test is_empty() on non-empty Queue.
"""
self.assertFalse(
self.queue.is_empty(),
'is_empty returned True on non-empty Queue!')
def testRemove(self):
"""Test remove() on a non-empty Queue.
"""
front = self.queue.remove()
self.assertEqual(
front, 'a',
'The item at the front should have been "a" but was ' +
front + '.')
self.assertTrue(
self.queue.is_empty(),
'Queue with one element not empty after remove().')
class TypicalTestCase(unittest.TestCase):
"""A comprehensive tester of typical behaviour of Queue.
"""
def setUp(self):
"""Set up an empty queue.
"""
self.queue = Queue()
def tearDown(self):
"""Clean up.
"""
self.queue = None
def testAll(self):
"""Check adding and removing several items.
"""
for item in range(20):
self.queue.add(item)
self.assertFalse(
self.queue.is_empty(),
'Queue should not be empty after adding item ' +
str(item))
item = 0
while not self.queue.is_empty():
front = self.queue.remove()
self.assertEqual(
front, item,
'Wrong item at the front of the Queue. Found ' +
str(front) + ' but expected ' + str(item))
item += 1
if __name__ == '__main__':
unittest.main(exit=False)
```
#### File: labs/lab3/teststack.py
```python
import unittest
from stack import Stack
class EmptyTestCase(unittest.TestCase):
"""Test behaviour of an empty Queue.
"""
def setUp(self):
"""Set up an empty queue.
"""
self.stack = Stack()
def tearDown(self):
"""Clean up.
"""
self.stack = None
def testIsEmpty(self):
"""Test is_empty() on empty Queue.
"""
self.assertTrue(
self.stack.is_empty(),
'is_empty returned False on an empty Stack!')
class SingletonTestCase(unittest.TestCase):
"""Check whether adding a single item makes it appear at the front.
"""
def setUp(self):
"""Set up a queue with a single element.
"""
self.stack = Stack()
self.stack.add('a')
def tearDown(self):
"""Clean up.
"""
self.stack = None
def testIsEmpty(self):
"""Test is_empty() on non-empty Queue.
"""
self.assertFalse(
self.stack.is_empty(),
'is_empty returned True on non-empty Stack!')
def testRemove(self):
"""Test remove() on a non-empty Queue.
"""
front = self.stack.remove()
self.assertEqual(
front, 'a',
'The item at the front should have been "a" but was ' +
front + '.')
self.assertTrue(
self.stack.is_empty(),
'Stack with one element not empty after remove().')
class TypicalTestCase(unittest.TestCase):
"""A comprehensive tester of typical behaviour of Queue.
"""
def setUp(self):
"""Set up an empty queue.
"""
self.stack = Stack()
def tearDown(self):
"""Clean up.
"""
self.stack = None
def testAll(self):
"""Check adding and removing several items.
"""
for item in range(20):
self.stack.add(item)
self.assertFalse(self.stack.is_empty(),'Stack should not be empty after adding item ' + str(item))
item = 19
while not self.stack.is_empty():
top = self.stack.remove()
self.assertEqual(top, item,'Wrong item at the front of the Stack. Found ' +str(top) + ' but expected ' + str(item))
item -= 1
if __name__ == '__main__':
unittest.main(exit=False)
```
#### File: labs/lab7/ex6_test.py
```python
import unittest
from ex6 import (Tree, descendants_from_list, list_internal, arity,
contains_test_passer, list_if, count)
from hypothesis import given
from hypothesis.strategies import integers, lists
from random import randint
from math import ceil
class TestListInternal(unittest.TestCase):
def test_empty_tree(self):
t = Tree()
self.assertEqual(list_internal(t), [])
def test_one_node(self):
v = randint(0, 10)
t = Tree(v)
self.assertEqual(list_internal(t), [])
@given(lists(elements=integers(min_value=0, max_value=30),
min_size=5, max_size=13))
def test_height_three(self, lst):
t = descendants_from_list(Tree(lst[0]), lst[1:], 3)
num = ceil((len(lst) - 4) / 3)
l1 = list_internal(t)
l1.sort()
l2 = lst[:num + 1]
l2.sort()
self.assertEqual(l1, l2)
@given(lists(elements=integers(min_value=0, max_value=30),
min_size=41, max_size=121))
def test_height_five(self, lst):
t = descendants_from_list(Tree(lst[0]), lst[1:], 3)
num = ceil((len(lst) - 40) / 3)
l1 = list_internal(t)
l1.sort()
l2 = lst[:num + 13]
l2.sort()
self.assertEqual(l1, l2)
class TestArity(unittest.TestCase):
def test_empty_tree(self):
t = Tree()
self.assertEqual(arity(t), 0)
def test_one_node(self):
v = randint(0, 10)
t = Tree(v)
self.assertEqual(arity(t), 0)
@given(lists(elements=integers(min_value=12, max_value=30),
min_size=9, max_size=20),
lists(elements=integers(min_value=2, max_value=8),
min_size=1, max_size=2))
def testRecursiveStep(self, lst, lst_branch):
t = Tree(0)
for i in lst_branch:
new_t = descendants_from_list(Tree(0), lst, i)
t.children.append(new_t)
self.assertEqual(arity(t), max(lst_branch))
class TestContainsTestPassr(unittest.TestCase):
def test_one_node(self):
t = Tree(5)
test = lambda num: num % 2 != 0
self.assertEqual(contains_test_passer(t, test), True)
@given(lists(elements=integers(min_value=1, max_value=30),
min_size=9, max_size=20))
def testRecursiveStep(self, lst):
odd = list(map(lambda x: x * 2 + 1, lst))
even = list(map(lambda x: x * 2, lst))
t_odd = Tree(0)
t_even = Tree(0)
descendants_from_list(t_odd, odd, 3)
descendants_from_list(t_even, even, 4)
test = lambda num: num % 2 != 0 # this means return True if num is odd
self.assertEqual(contains_test_passer(t_even, test), False)
self.assertEqual(contains_test_passer(t_odd, test), True)
class TestListIf(unittest.TestCase):
def test_one_node(self):
v = randint(0, 10)
t = Tree(v)
p = lambda x: x % 2 != 0
self.assertEqual(list_if(t, p), [v] if p(v) else [])
@given(lists(elements=integers(min_value=1, max_value=30),
min_size=9, max_size=20))
def testRecursiveStep(self, lst):
odd = list(map(lambda x: x * 2 + 1, lst))
even = list(map(lambda x: x * 2, lst))
t_odd = Tree(0)
t_even = Tree(0)
descendants_from_list(t_odd, odd, 3)
descendants_from_list(t_even, even, 4)
p = lambda x: x % 2 != 0 # this means return True if num is odd
l1 = list_if(t_even, p)
l2 = list_if(t_odd, p)
l1.sort()
l2.sort()
odd.sort()
self.assertEqual(l1, [])
self.assertEqual(l2, odd)
class TestCount(unittest.TestCase):
def test_empty_tree(self):
t = Tree()
self.assertEqual(count(t), 1)
def test_one_node(self):
v = randint(0, 10)
t = Tree(v)
self.assertEqual(count(t), 1)
@given(lists(elements=integers(min_value=3, max_value=30),
min_size=9, max_size=20))
def testRecursiveStep(self, lst):
t = Tree(0)
descendants_from_list(t, lst, 3)
self.assertEqual(count(t), len(lst) + 1)
if __name__ == '__main__':
unittest.main()
```
#### File: labs/lab9/ex8.py
```python
def create_node(t):
"""Create a LinkedlistNode.
@param BinaryTree t: BinaryTreeNode to be added into Linkedlist
@rtype: LinkedListNode
"""
return LinkedListNode(t.value)
class BinaryTree:
"""
A Binary Tree, i.e. arity 2.
=== Attributes ===
@param object value: value for this binary tree node
@param BinaryTree|None left: left child of this binary tree node
@param BinaryTree|None right: right child of this binary tree node
"""
def __init__(self, value, left=None, right=None):
"""
Create BinaryTree self with value and children left and right.
@param BinaryTree self: this binary tree
@param object value: value of this node
@param BinaryTree|None left: left child
@param BinaryTree|None right: right child
@rtype: None
"""
self.value, self.left, self.right = value, left, right
def __eq__(self, other):
"""
Return whether BinaryTree self is equivalent to other.
@param BinaryTree self: this binary tree
@param Any other: object to check equivalence to self
@rtype: bool
>>> BinaryTree(7).__eq__("seven")
False
>>> b1 = BinaryTree(7, BinaryTree(5))
>>> b1.__eq__(BinaryTree(7, BinaryTree(5), None))
True
"""
return (type(self) == type(other) and
self.value == other.value and
(self.left, self.right) == (other.left, other.right))
def __repr__(self):
"""
Represent BinaryTree (self) as a string that can be evaluated to
produce an equivalent BinaryTree.
@param BinaryTree self: this binary tree
@rtype: str
>>> BinaryTree(1, BinaryTree(2), BinaryTree(3))
BinaryTree(1, BinaryTree(2, None, None), BinaryTree(3, None, None))
"""
return "BinaryTree({}, {}, {})".format(repr(self.value),
repr(self.left),
repr(self.right))
def __str__(self, indent=""):
"""
Return a user-friendly string representing BinaryTree (self)
inorder. Indent by indent.
>>> b = BinaryTree(1, BinaryTree(2, BinaryTree(3)), BinaryTree(4))
>>> print(b)
4
1
2
3
<BLANKLINE>
"""
right_tree = (self.right.__str__(
indent + " ") if self.right else "")
left_tree = self.left.__str__(indent + " ") if self.left else ""
return (right_tree + "{}{}\n".format(indent, str(self.value)) +
left_tree)
def inorder(self):
""" Return LinkedList with values of BinaryTree self inorder.
@param BinaryTree self: this binary tree
@rtype: LinkedList
>>> t = BinaryTree(0, BinaryTree(1), BinaryTree(2))
>>> lnk= t.inorder()
>>> print(lnk)
1 -> 0 -> 2 ->|
>>> t2 = BinaryTree(3, BinaryTree(4), t)
>>> lnk = t2.inorder()
>>> print(lnk)
4 -> 3 -> 1 -> 0 -> 2 ->|
"""
l = LinkedList()
if self.left is None and self.right is None:
new_node = create_node(self)
l.front = l.back = new_node
l.size += 1
elif self.left is None:
new_node = create_node(self)
l.front = new_node
right_linkedlist = self.right.inorder()
new_node.next_ = right_linkedlist.front
l.back = right_linkedlist.back
l.size = 1 + right_linkedlist.size
elif self.right is None:
new_node = create_node(self)
l.back = new_node
left_linkedlist = self.left.inorder()
left_linkedlist.back.next_ = new_node
l.front = left_linkedlist.front
l.size = 1 + left_linkedlist.size
else:
new_node = create_node(self)
left_linkedlist = self.left.inorder()
right_linkedlist = self.right.inorder()
l.front = left_linkedlist.front
l.back = right_linkedlist.back
left_linkedlist.back.next_ = new_node
new_node.next_ = right_linkedlist.front
l.size = left_linkedlist.size + 1 + right_linkedlist.size
return l
def preorder(self):
""" Return LinkedList with values of BinaryTree self in preorder.
@param BinaryTree self: this binary tree
@rtype: LinkedList
>>> t = BinaryTree(0, BinaryTree(1), BinaryTree(2))
>>> lnk= t.preorder()
>>> print(lnk)
0 -> 1 -> 2 ->|
>>> t2 = BinaryTree(3, BinaryTree(4), t)
>>> lnk = t2.preorder()
>>> print(lnk)
3 -> 4 -> 0 -> 1 -> 2 ->|
"""
l = LinkedList()
if self.left is None and self.right is None:
new_node = create_node(self)
l.front = l.back = new_node
l.size += 1
elif self.left is None:
new_node = create_node(self)
l.front = new_node
right_linkedlist = self.right.preorder()
new_node.next_ = right_linkedlist.front
l.back = right_linkedlist.back
l.size = 1 + right_linkedlist.size
elif self.right is None:
new_node = create_node(self)
l.front = new_node
left_linkedlist = self.left.preorder()
new_node.next_ = left_linkedlist.front
l.back = left_linkedlist.back
l.size = 1 + left_linkedlist.size
else:
new_node = create_node(self)
left_linkedlist = self.left.preorder()
right_linkedlist = self.right.preorder()
l.front = new_node
new_node.next_ = left_linkedlist.front
left_linkedlist.back.next_ = right_linkedlist.front
l.size = left_linkedlist.size + 1 + right_linkedlist.size
l.back = right_linkedlist.back
return l
def postorder(self):
""" Return LinkedList with values of BinaryTree self in postorder.
@param BinaryTree self: this binary tree
@rtype: LinkedList
>>> t = BinaryTree(0, BinaryTree(1), BinaryTree(2))
>>> lnk= t.postorder()
>>> print(lnk)
1 -> 2 -> 0 ->|
>>> t2 = BinaryTree(3, BinaryTree(4), t)
>>> lnk = t2.postorder()
>>> print(lnk)
4 -> 1 -> 2 -> 0 -> 3 ->|
"""
l = LinkedList()
if self.left is None and self.right is None:
new_node = create_node(self)
l.front = l.back = new_node
l.size += 1
elif self.left is None:
right_linkedlist = self.right.postorder()
l.front = right_linkedlist.front
new_node = create_node(self)
right_linkedlist.back.next_ = new_node
l.back = new_node
l.size = 1 + right_linkedlist.size
elif self.right is None:
left_linkedlist = self.left.postorder()
new_node = create_node(self)
l.front = left_linkedlist.front
left_linkedlist.back.next_ = new_node
l.back = new_node
l.size = 1 + left_linkedlist.size
else:
left_linkedlist = self.left.postorder()
right_linkedlist = self.right.postorder()
new_node = create_node(self)
l.front = left_linkedlist.front
left_linkedlist.back.next_ = right_linkedlist.front
right_linkedlist.back.next_ = new_node
l.back = new_node
l.size = left_linkedlist.size + 1 + right_linkedlist.size
return l
def longest(self):
""" Return LinkedList with values of longest path from root to leaf in
BinaryTree self.
@param BinaryTree self: this binary tree
@rtype: LinkedList
>>> t = BinaryTree(0, BinaryTree(1))
>>> t2 = BinaryTree(3, BinaryTree(4), t)
>>> print(t2.longest())
3 -> 0 -> 1 ->|
"""
l = LinkedList()
if self.left is None and self.right is None:
new_node = create_node(self)
l.front = l.back = new_node
l.size += 1
elif self.left is None:
new_node = create_node(self)
l.front = new_node
right_longest = self.right.longest()
new_node.next_ = right_longest.front
l.back = right_longest.back
l.size = 1 + right_longest.size
elif self.right is None:
new_node = create_node(self)
l.front = new_node
left_longest = self.left.longest()
new_node.next_ = left_longest.front
l.back = left_longest.back
l.size = 1 + left_longest.size
else:
left_longest = self.left.longest()
right_longest = self.right.longest()
new_node = create_node(self)
if left_longest.size >= right_longest.size:
l.front = new_node
new_node.next_ = left_longest.front
l.back = left_longest.back
l.size = 1 + left_longest.size
else:
l.front = new_node
new_node.next_ = right_longest.front
l.back = right_longest.back
l.size = 1 + right_longest.size
return l
class LinkedListNode:
"""
Node to be used in linked list
=== Attributes ===
@param LinkedListNode next_: successor to this LinkedListNode
@param object value: data this LinkedListNode represents
"""
def __init__(self, value, next_=None):
"""
Create LinkedListNode self with data value and successor next_.
@param LinkedListNode self: this LinkedListNode
@param object value: data of this linked list node
@param LinkedListNode|None next_: successor to this LinkedListNode.
@rtype: None
"""
self.value, self.next_ = value, next_
def __str__(self):
"""
Return a user-friendly representation of this LinkedListNode.
@param LinkedListNode self: this LinkedListNode
@rtype: str
>>> n = LinkedListNode(5, LinkedListNode(7))
>>> print(n)
5 -> 7 ->|
"""
s = "{} ->".format(self.value)
current_node = self.next_
while current_node is not None:
s += " {} ->".format(current_node.value)
current_node = current_node.next_
assert current_node is None, "unexpected non_None!!!"
s += "|"
return s
class LinkedList:
"""
Collection of LinkedListNodes
=== Attributes ==
@param: LinkedListNode front: first node of this LinkedList
@param LinkedListNode back: last node of this LinkedList
@param int size: number of nodes in this LinkedList
a non-negative integer
"""
def __init__(self):
"""
Create an empty linked list.
@param LinkedList self: this LinkedList
@rtype: None
"""
self.front, self.back, self.size = None, None, 0
def __str__(self):
"""
Return a human-friendly string representation of
LinkedList self.
@param LinkedList self: this LinkedList
>>> lnk = LinkedList()
>>> print(lnk)
I'm so empty... experiencing existential angst!!!
"""
if self.front is None:
assert self.back is None and self.size is 0, "ooooops!"
return "I'm so empty... experiencing existential angst!!!"
else:
return str(self.front)
def prepend(self, value):
"""
Insert value before LinkedList self.front.
@param LinkedList self: this LinkedList
@param object value: value for new LinkedList.front
@rtype: None
>>> lnk = LinkedList()
>>> lnk.prepend(0)
>>> lnk.prepend(1)
>>> lnk.prepend(2)
>>> str(lnk.front)
'2 -> 1 -> 0 ->|'
>>> lnk.size
3
"""
# Create new node with next_ referring to front
new_node = LinkedListNode(value, self.front)
# change front
self.front = new_node
# if the list was empty, change back
if self.size == 0:
self.back = new_node
# update size
self.size += 1
def append(self, value):
"""
Insert a new LinkedListNode with value after self.back.
@param LinkedList self: this LinkedList.
@param object value: value of new LinkedListNode
@rtype: None
>>> lnk = LinkedList()
>>> lnk.append(5)
>>> lnk.size
1
>>> print(lnk.front)
5 ->|
>>> lnk.append(6)
>>> lnk.size
2
>>> print(lnk.front)
5 -> 6 ->|
"""
new_node = LinkedListNode(value)
if self.size == 0:
assert self.back is None and self.front is None, "ooops"
self.front = self.back = new_node
else:
self.back.next_ = new_node
self.back = new_node
self.size += 1
def __len__(self):
"""
Return the number of nodes in LinkedList self.
@param LinkedList self: this LinkedList
@rtype: int
>>> lnk = LinkedList()
>>> lnk.append(0)
>>> lnk.append(3)
>>> lnk.size
2
"""
return self.size
def copy(self):
"""
Return a copy of LinkedList self. The copy should have
different nodes, but equivalent values, from self.
@param LinkedList self: this LinkedList
@rtype: LinkedList
>>> lnk = LinkedList()
>>> lnk.prepend(5)
>>> lnk.prepend(7)
>>> print(lnk.copy())
7 -> 5 ->|
"""
copy_list = LinkedList()
original_node = self.front
while original_node is not None:
copy_list.append(original_node.value)
original_node = original_node.next_
return copy_list
def __add__(self, other):
"""
Return a new list by concatenating self to other. Leave
both self and other unchanged.
@param LinkedList self: this LinkedList
@param LinkedList other: Linked list to concatenate to self
@rtype: LinkedList
>>> lnk1 = LinkedList()
>>> lnk1.prepend(5)
>>> lnk2 = LinkedList()
>>> lnk2.prepend(7)
>>> print(lnk1 + lnk2)
5 -> 7 ->|
>>> print(lnk1)
5 ->|
>>> print(lnk2)
7 ->|
"""
if len(self) == 0:
return other.copy()
elif len(other) == 0:
return self.copy()
else:
list1 = self.copy()
list2 = other.copy()
list1.back.next_ = list2.front
list1.back = list2.back
list1.size += list2.size
return list1
if __name__ == "__main__":
import python_ta
python_ta.check_all(config='pylint.txt')
import doctest
doctest.testmod()
```
#### File: lectures/week5/Wed.py
```python
def concat_str(string_list):
"""
Concatenate all the strings in a possibly-nested list of strings
@param str|list(str|list(...)) string_list: this string list.
@rtype: str
>>> list_ = ['the', 'cow', 'goes', 'moo', '!']
>>> concat_str(list_)
'the cow goes moo !'
>>> list_ = ['this', 'string', 'is', 'actually', [['made'], 'up'], 'of', 'several', 'strings']
'this string is actually made up of several strings'
"""
if isinstance(string_list, str):
return string_list
else:
return ''.join([concat_str(elem) for elem in string_list])
def distribute_papers(pile):
"""
Recursive function to distribute papers in 148
@param list[int] pile: our remaining pile of paper
@rtype: None
"""
if len(pile) == 1:
pile = pile[1:]
return
elif len(pile) == 0
return
else:
print()
```
#### File: lectures/week8/binary_tree .py
```python
from csc148_queue import Queue
class BinaryTree:
"""
A Binary Tree, i.e. arity 2.
"""
def __init__(self, data, left=None, right=None):
"""
Create BinaryTree self with data and children left and right.
@param BinaryTree self: this binary tree
@param object data: data of this node
@param BinaryTree|None left: left child
@param BinaryTree|None right: right child
@rtype: None
"""
self.data, self.left, self.right = data, left, right
def __eq__(self, other):
"""
Return whether BinaryTree self is equivalent to other.
@param BinaryTree self: this binary tree
@param Any other: object to check equivalence to self
@rtype: bool
>>> BinaryTree(7).__eq__("seven")
False
>>> b1 = BinaryTree(7, BinaryTree(5))
>>> b1.__eq__(BinaryTree(7, BinaryTree(5), None))
True
"""
return (type(self) == type(other) and
self.data == other.data and
self.left == other.left and
self.right == other.right)
def __str__(self, indent=""):
"""
Return a user-friendly string representing BinaryTree (self)
inorder. Indent by indent.
>>> b = BinaryTree(1, BinaryTree(2, BinaryTree(3)), BinaryTree(4))
>>> print(b)
4
1
2
3
<BLANKLINE>
"""
# obtain a visual representation of the left subtree (recursively)
left_tree = (self.left.__str__(indent + " ")
if self.left
else "")
# obtain a visual representation of the right subtree (recursively)
right_tree = (self.right.__str__(indent + " ")
if self.right
else "")
# put them together with the root on a new line in between
return (right_tree +
"{}{}\n".format(indent, str(self.data)) +
left_tree)
def __repr__(self):
"""
Represent BinaryTree (self) as a string that can be evaluated to
produce an equivalent BinaryTree.
@param BinaryTree self: this binary tree
@rtype: str
>>> BinaryTree(1, BinaryTree(2), BinaryTree(3))
BinaryTree(1, BinaryTree(2, None, None), BinaryTree(3, None, None))
"""
return "BinaryTree({}, {}, {})".format(repr(self.data),
repr(self.left),
repr(self.right))
def __contains__(self, value):
"""
Return whether tree rooted at self contains value.
@param BinaryTree self: binary tree to search for value
@param object value: value to search for
@rtype: bool
>>> BinaryTree(5, BinaryTree(7), BinaryTree(9)).__contains__(7)
True
>>> BinaryTree(5, BinaryTree(7), BinaryTree(9)).__contains__(3)
False
"""
# We turned the external method contains into a special method of
# a BinaryTree object. No need to have both.
return (self.data == value or
(self.left is not None and value in self.left) or
(self.right is not None and value in self.right))
def contains(node, value):
"""
Return whether tree rooted at node contains value.
@param BinaryTree|None node: binary tree to search for value
@param object value: value to search for
@rtype: bool
>>> contains(None, 5)
False
>>> contains(BinaryTree(5, BinaryTree(7), BinaryTree(9)), 7)
True
"""
# handling the None case will be trickier for a method
if node is None:
return False
else:
return (node.data == value or
contains(node.left, value) or
contains(node.right, value))
def height(t):
"""
Return 1 + length of the longest path of t.
@param BinaryTree t: binary tree to find the height of
@rtype: int
>>> t = BinaryTree(13)
>>> height(t)
1
>>> height(BinaryTree(5, BinaryTree(3), BinaryTree(8, BinaryTree(7))))
3
"""
if t is None:
return 0
else:
return 1 + max(height(t.left), height(t.right))
def evaluate(b):
"""
Evaluate the expression rooted at b. If b is a leaf,
return its float data. Otherwise, evaluate b.left and
b.right and combine them with b.data.
Assume: -- b is a non-empty binary tree
-- interior nodes contain data in {"+", "-", "*", "/"}
-- interior nodes always have two children
-- leaves contain float data
@param BinaryTree b: binary tree representing arithmetic expression
@rtype: float
>>> b = BinaryTree(3.0)
>>> evaluate(b)
3.0
>>> b = BinaryTree("*", BinaryTree(3.0), BinaryTree(4.0))
>>> evaluate(b)
12.0
"""
if b.left is None and b.right is None:
return b.data
else:
return eval(str(evaluate(b.left)) +
str(b.data) +
str(evaluate(b.right)))
def inorder_visit(node, act):
"""
Visit each node of binary tree rooted at node in order and act.
@param BinaryTree node: binary tree to visit
@param (BinaryTree)->object act: function to execute on visit
@rtype: None
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> def f(node): print(node.data)
>>> inorder_visit(b, f)
2
4
6
8
10
12
14
"""
if node is not None:
inorder_visit(node.left, act)
act(node)
inorder_visit(node.right, act)
def preorder_visit(t, act):
"""
Visit BinaryTree t in preorder and act on nodes as you visit.
@param BinaryTree|None t: binary tree to visit
@param (BinaryTree)->Any act: function to use on nodes
@rtype: None
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> def f(node): print(node.data)
>>> preorder_visit(b, f)
8
4
2
6
12
10
14
"""
if t is not None:
act(t)
preorder_visit(t.left, act)
preorder_visit(t.right, act)
def postorder_visit(t, act):
"""
Visit BinaryTree t in postorder and act on nodes as you visit.
@param BinaryTree|None t: binary tree to visit
@param (BinaryTree)->Any act: function to use on nodes
@rtype: None
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> def f(node): print(node.data)
>>> postorder_visit(b, f)
2
6
4
10
14
12
8
"""
if t is not None:
postorder_visit(t.left, act)
postorder_visit(t.right, act)
act(t)
def levelorder_visit(t, act):
"""
Visit BinaryTree t in level order and act on nodes as they are visited
@param BinaryTree|None t: binary tree to visit
@param (BinaryTree)->Any act: function to use during visit
@rtype: None
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> def f(node): print(node.data)
>>> levelorder_visit(b, f)
8
4
12
2
6
10
14
"""
nodes = Queue()
nodes.add(t)
while not nodes.is_empty():
next_node = nodes.remove()
act(next_node)
if next_node.left:
nodes.add(next_node.left)
if next_node.right:
nodes.add(next_node.right)
def visit_level(t, n, act):
"""
Visit each node of BinaryTree t at level n and act on it. Return
the number of nodes visited visited.
@param BinaryTree|None t: binary tree to visit
@param int n: level to visit
@param (BinaryTree)->Any act: function to execute on nodes at level n
@rtype: int
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> def f(node): print(node.data)
>>> visit_level(b, 2, f)
2
6
10
14
4
"""
if t is None:
return 0
elif n == 0:
act(t)
return 1
elif n > 0:
return (visit_level(t.left, n-1, act) +
visit_level(t.right, n-1, act))
else:
return 0
def levelorder_visit2(t, act):
"""
Visit BinaryTree t in level order and act on each node.
@param BinaryTree|None t: binary tree to visit
@param (BinaryTree)->Any act: function to use during visit
@rtype: None
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> def f(node): print(node.data)
>>> levelorder_visit2(b, f)
8
4
12
2
6
10
14
"""
# this approach uses iterative deepening
n = 0
visited = visit_level(t, n, act)
while visited > 0:
n += 1
visited = visit_level(t, n, act)
# the following functions assume a binary search tree
def bst_contains(node, value):
"""
Return whether tree rooted at node contains value.
Assume node is the root of a Binary Search Tree
@param BinaryTree|None node: node of a Binary Search Tree
@param object value: value to search for
@rtype: bool
>>> bst_contains(None, 5)
False
>>> bst_contains(BinaryTree(7, BinaryTree(5), BinaryTree(9)), 5)
True
"""
if node is None:
return False
elif node.data == value:
return True
elif value < node.data:
return bst_contains(node.left, value)
elif value > node.data:
return bst_contains(node.right, value)
else:
assert False, "WTF!"
def find_max(node):
"""
Find and return subnode with maximum data.
Assume node is the root of a binary search tree.
@param BinaryTree node: binary tree node to begin search from
@rtype: BinaryTree
>>> find_max(BinaryTree(5, BinaryTree(3), BinaryTree(7)))
BinaryTree(7, None, None)
"""
return find_max(node.right) if node.right is not None else node
def insert(node, data):
"""
Insert data in BST rooted at node if necessary, and return new root.
Assume node is the root of a Binary Search Tree.
@param BinaryTree node: root of a binary search tree.
@param object data: data to insert into BST, if necessary.
>>> b = BinaryTree(8)
>>> b = insert(b, 4)
>>> b = insert(b, 2)
>>> b = insert(b, 6)
>>> b = insert(b, 12)
>>> b = insert(b, 14)
>>> b = insert(b, 10)
>>> print(b)
14
12
10
8
6
4
2
<BLANKLINE>
"""
return_node = node
if not node:
return_node = BinaryTree(data)
elif data < node.data:
node.left = insert(node.left, data)
elif data > node.data:
node.right = insert(node.right, data)
else: # nothing to do
pass
return return_node
if __name__ == "__main__":
import doctest
doctest.testmod()
# eval example - this is why you should be careful when using it:
# if we pass a destructive command (say, to remove all our files), then eval
# is not going to warn or stop us from self-destruction :)
# import os
# eval(input("your wish is my command:"))
b = BinaryTree(8)
b = insert(b, 4)
b = insert(b, 2)
b = insert(b, 6)
print(b)
# 8
# 6
# 4
# 2
def f(node): print(node.data)
# add breakpoint on the following line before you start debugging ...
levelorder_visit2(b, f)
```
#### File: 2019 Winter/hw3/q2.py
```python
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
np.random.seed(0)
from scipy.misc import logsumexp
from sklearn.model_selection import train_test_split
# load boston housing prices dataset
boston = load_boston()
x = boston['data']
N = x.shape[0]
x = np.concatenate((np.ones((506,1)),x),axis=1) #add constant one feature - no bias needed
d = x.shape[1]
y = boston['target']
idx = np.random.permutation(range(N))
#helper function
def l2(A,B):
'''
Input: A is a Nxd matrix
B is a Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between A[i,:] and B[j,:]
i.e. dist[i,j] = ||A[i,:]-B[j,:]||^2
'''
A_norm = (A**2).sum(axis=1).reshape(A.shape[0],1)
B_norm = (B**2).sum(axis=1).reshape(1,B.shape[0])
dist = A_norm+B_norm-2*A.dot(B.transpose())
return dist
"""
When we have a new test example x, based on its distance to each training set examples, we assign a(i).
Then, we have matrix A to get the w*.
Thus, we can get predicted y based on new x and w*.
"""
#to implement
def LRLS(test_datum, x_train, y_train, tau,lam=1e-5):
'''
Input: test_datum is a dx1 test vector
x_train is the N_train x d design matrix
y_train is the N_train x 1 targets vector
tau is the local reweighting parameter
lam is the regularization parameter
output is y_hat the prediction on test_datum
'''
# calculate the distances between test_datum and each training set examples
dist = l2(test_datum, x_train)
# do transformation to dist
dist = np.divide(-dist, 2*(tau**2))
# get the max number B in dist
B = np.max(dist)
# create numerator array and denominator
numerator_array = np.exp(dist - B)
denominator = np.exp(logsumexp(dist - B))
# diagonal numbers
a = np.divide(numerator_array, denominator)[0]
# create diagonal matrix A
A = np.diag(a)
# get w*
# X^TAX + lamdaI
matrix_1 = x_train.transpose()@A@x_train + lam*np.identity(d)
# X^TAy
matrix_2 = x_train.transpose()@A@y_train
target_w = np.linalg.solve(matrix_1, matrix_2)
predicted_y = test_datum @ target_w
return predicted_y
def run_validation(x,y,taus,val_frac):
'''
Input: x is the N x d design matrix
y is the N x 1 targets vector
taus is a vector of tau values to evaluate
val_frac is the fraction of examples to use as validation data
output is
a vector of training losses, one for each tau value
a vector of validation losses, one for each tau value
'''
training_loss = []
validation_loss = []
#split data set to train set and validation set
X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size = val_frac, random_state=0)
for tau in taus:
predicted_y_array = []
for test_datum in X_test:
predicted_y = LRLS(test_datum.reshape((1,d)), X_train, Y_train, tau, lam=1e-5)
predicted_y_array.append(predicted_y)
validation_cost = np.sum( (np.array(predicted_y) - Y_test)**2 ) / (2. * N * val_frac)
train_cost = np.sum( (np.array(predicted_y) - Y_train)**2 ) / (2. * N * (1-val_frac))
validation_loss.append(validation_cost)
training_loss.append(train_cost)
return np.array(training_loss), np.array(validation_loss)
if __name__ == "__main__":
# In this excersice we fixed lambda (hard coded to 1e-5) and only set tau value. Feel free to play with lambda as well if you wish
taus = np.logspace(1.0,3,200)
train_losses, test_losses = run_validation(x,y,taus,val_frac=0.3)
plt.semilogx(train_losses)
plt.savefig('train_losses.jpg')
plt.semilogx(test_losses)
plt.savefig('validation_losses.jpg')
```
|
{
"source": "Jerry-Terrasse/LlfSystem",
"score": 3
}
|
#### File: Jerry-Terrasse/LlfSystem/shoter.py
```python
from PIL import ImageGrab as ig
import numpy as np
def shot(x1=None,y1=None,x2=None,y2=None,name=None):
if x1 is None or y1 is None or x2 is None or y2 is None:
ret=ig.grab()
else:
ret=ig.grab((x1,y1,x2,y2))
if name is not None:
ret.save(name)
return np.array(ret)[:,:,::-1]
if __name__=='__main__':
print("Shoter Here")
```
|
{
"source": "jerrytheo96/jdoc-scraper",
"score": 3
}
|
#### File: jerrytheo96/jdoc-scraper/misc.py
```python
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
def get_absolute_url(current_url, relative_url):
'''Returns the absolute url given the current url and the url
relative to the current url.
'''
current_url = current_url.split('/')
relative_url = relative_url.split('/')
# Number of levels to go up.
levels = relative_url.count('..')
abs_url = '/'.join(current_url[: -(levels+1)]) + '/' \
+ '/'.join(relative_url[levels:])
return abs_url
def write_xml(package_info):
'''Write the package info to an xml file.
The xml is structured as follows,
package
├── name
├── description
├── class (id)
│ ├── name
│ └── description
└── method (id)
├── name
├── description
├── parameter
│ ├── name
│ └── type
├── return
└── class
The bracketed terms indicate attributes.
'''
root = ET.Element('package')
# Name.
name = ET.SubElement(root, 'name')
name.text = package_info['name']
# Description.
desc = ET.SubElement(root, 'desc')
desc.text = package_info['description']
# Classes.
cls_id = 0
mtd_id = 0
for class_ in package_info['classes']:
cls = ET.SubElement(root, 'class')
# Class ID.
cls.set('id', str(cls_id))
cls_id = cls_id + 1
# Class name.
cls_name = ET.SubElement(cls, 'name')
cls_name.text = class_['name']
# Class description.
cls_desc = ET.SubElement(cls, 'description')
cls_desc.text = class_['description']
for class_ in package_info['classes']:
for method in class_['methods']['methods']:
mtd = ET.SubElement(root, 'method')
# Method ID.
mtd.set('id', str(mtd_id))
mtd_id = mtd_id + 1
# Method name.
mtd_name = ET.SubElement(mtd, 'name')
mtd_name.text = method['name']
# Method description.
mtd_desc = ET.SubElement(mtd, 'description')
mtd_desc.text = method['description']
# Method parameters.
prm_id = 0
if method['parameters']:
for param in method['parameters']:
prm = ET.SubElement(mtd, 'parameter')
prm.set('id', str(prm_id))
prm_id = prm_id + 1
prm_name = ET.SubElement(prm, 'name')
prm_name.text = param
prm_type = ET.SubElement(prm, 'type')
prm_type.text = method['parameters'][param]
# Method return type.
if method['return']:
mtd_retn = ET.SubElement(mtd, 'return')
mtd_retn.text = method['return']
# Method class.
mtd_clsn = ET.SubElement(mtd, 'class')
mtd_clsn.text = class_['name']
# Prettify the xml to double space indentation.
rough_string = ET.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
xml_path = os.path.join('docs', package_info['name'] + '.xml')
with open(xml_path, 'w') as xml_file:
xml_file.write(reparsed.toprettyxml(indent=' '))
```
|
{
"source": "jerrytheo/mim-gazebo",
"score": 2
}
|
#### File: mim-gazebo/scripts/simulate.py
```python
from signal import signal, SIGINT
import sys
import subprocess
from tools import find_world_file
from tools import print_col
from tools import update_environ
help_text = """
Usage: ./simulate.py <world-name> [-h] [--help] [-q] [--quiet]
[-m] [--multiple] [-c] [--client]
Set up environment and run gazebo server and (optionally) client.
<world-name>
The name of the world to simulate. Could either be the path
to a world SDF file, or a file from 'worlds/'. If specifying
from 'worlds/' extension .world may be omitted, e.g., icy.
OPTIONAL ARGUMENTS:
-q --quiet Silence output.
-m --multiple Restartable Gazebo Client.
-c --client Run Gazebo Client.
-h --help Display help and exit.
"""
def parse_args():
"""Parse command line arguments."""
args_to_pass = ['--verbose']
launch_client = False
single_launch = True
for arg in sys.argv[1:]:
if arg in ['-q', '--quiet']:
args_to_pass.remove('--verbose')
elif arg in ['-c', '--client']:
launch_client = True
elif arg in ['-m', '--multiple']:
single_launch = False
elif arg in ['-h', '--help']:
print(help_text)
sys.exit(0)
else:
wfile = find_world_file(arg)
if wfile is None:
print('Invalid world file.')
print(help_text)
sys.exit(-1)
args_to_pass.insert(0, wfile)
return args_to_pass, [launch_client, single_launch]
def sig_handler(num, fr):
"""Signal handler for SIGINT."""
if flags[0]:
print('', end='\r')
print_col('Why would you do this? Close the GUI first.')
print_col('Attempting to interrupt server.')
try:
server.wait(30)
except subprocess.TimeoutExpired:
print_col('Failed. Attempting to terminate server.')
try:
server.terminate()
server.wait(30)
except subprocess.TimeoutExpired:
print_col('Hit Ctrl-\\ if you run out of patience.')
else:
print('', end='\r')
print_col('Quitting server.')
def run_client(args, env, single=True):
"""Runs the client for Gazebo. Sends SIGINT to server if the client
fails to launch.
"""
try:
while True:
client = subprocess.Popen(['gzclient', *args], env=env)
client.wait()
if not single:
print_col('Client quit. Restart? (y/n)', end=' ')
ch = input()
else:
ch = 'n'
if ch != 'y':
print_col('Client will not be restarted.')
print_col('Press Ctrl-C to quit server.')
return False
else:
print_col('Restarting client.')
print('')
except Exception:
server.send_signal(SIGINT)
env = update_environ()
args, flags = parse_args()
signal(SIGINT, sig_handler)
server = subprocess.Popen(['gzserver', *args], env=env)
if flags[0]:
flags[0] = run_client(args, env, single=flags[1])
server.wait()
```
|
{
"source": "jerrytheo/mim-plotsim",
"score": 3
}
|
#### File: jerrytheo/mim-plotsim/demo_sq.py
```python
import numpy as np
from scipy.spatial import distance
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
# Set a few numpy options.
np.seterr(divide='ignore', invalid='ignore')
np.set_printoptions(precision=2)
class RobotSwarm:
def __init__(self, nbot, mass=1., R_dist=.5, F_max=.2, V_max=.1,
power=2, friction=.4):
self.n = nbot
self.m = mass
self.R = R_dist
self.F = F_max
self.V = V_max
self.p = power
self.u = friction
self.G = 0.18
self.v = np.zeros((self.n, 2))
self.X = np.random.uniform(-2, 2, (self.n, 2))
self.b = np.ones(nbot)
self.b[:(nbot // 2)] = -1
np.random.shuffle(self.b)
self._likes = self.b[:, None] @ self.b[:, None].T
def setup_plot(self, fig, ax):
self.fig = fig
self.ax = ax
self.ax.set_xlim(-10, 10)
self.ax.set_ylim(-10, 10)
self.ax.grid(True)
self.l1, = self.ax.plot(
self.X[self.b < 0, 0], self.X[self.b < 0, 1], 'ro', ms=2)
self.l2, = self.ax.plot(
self.X[self.b > 0, 0], self.X[self.b > 0, 1], 'bo', ms=2)
def __call__(self, i):
r = distance.pdist(self.X)
a = self.estimate_accel(r)
self.v *= self.u
self.v += np.sum(a, axis=0)
np.clip(self.v, -self.V, self.V, out=self.v)
self.X += self.v
select = np.random.uniform(size=self.n) < .02
if np.any(select):
self.b[select] *= -1
self._likes = self.b[:, None] @ self.b[:, None].T
self.l1.set_data(self.X[self.b < 0, 0], self.X[self.b < 0, 1])
self.l2.set_data(self.X[self.b > 0, 0], self.X[self.b > 0, 1])
return self.l1, self.l2
def estimate_accel(self, distances):
# Estimating the magnitude of force.
f = self.G * (self.m ** 2) / (distances ** self.p)
f = distance.squareform(f)
r = distance.squareform(distances)
sel = self._likes > 0
f[sel][r[sel] > (self.R * np.sqrt(2))] *= -1
f[~sel][r[~sel] > self.R] *= -1
f[r > (2. * self.R)] = 0
# Estimating the direction of force.
unit = self.X[None, :] - self.X[:, None]
unit = np.nan_to_num(unit / np.linalg.norm(unit, axis=2)[:, :, None])
return (unit * np.clip(f, -self.F, self.F)[:, :, None]) / self.m
if __name__ == '__main__':
fig, ax = plt.subplots()
rs = RobotSwarm(256)
rs.setup_plot(fig, ax)
anim = FuncAnimation(fig, rs, interval=50)
plt.show()
```
|
{
"source": "jerrytron/twine-story-export",
"score": 2
}
|
#### File: jerrytron/twine-story-export/cdam_convert_linear.py
```python
import os
import re
import sys
import struct
import argparse
import tiddlywiki as tiddly
VERSION = "1.0"
# For holding variable keys and values.
VARIABLES = {}
TITLE_MAP = {}
STORY_MAP = {}
PASSAGES = {}
STORY_TITLE = ""
STORY_AUTHOR = ""
STORY_SUBTITLE = ""
STORY_CREDITS = ""
STORY_VERSION = ""
STORY_CONTACT = ""
STORY_LANGUAGE = ""
REPORT = ""
OPERATION_TEST = bytearray()
TOTAL_OPS = 0
VERBOSE = False
kAppend = "<append>"
kContinue = "<continue>"
kContinueCopy = 'Continue...'
class CDAMParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def main():
global STORY_TITLE
global STORY_AUTHOR
global STORY_SUBTITLE
global STORY_CREDITS
global STORY_CONTACT
global STORY_LANGUAGE
global STORY_VERSION
# To Make a Linear Story:
# python ./cdam_convert_twine.py --title
parser = CDAMParser(version='1.0', description='CDAM Twine Source Code Converter')
#parser.add_argument('--dirname', default='NONE', action='store', help='Directory name for story on the file system.')
#parser.add_argument('--title', default='Untitled', action='store', help='The story title.')
#parser.add_argument('--subtitle', default='NONE', action='store', help='The story subtitle.')
#parser.add_argument('--author', default='Anonymous', action='store', help='The author of the story.')
#parser.add_argument('--credits', default='', action='store', help='Additional story credits.')
#parser.add_argument('--contact', default='Follow @choosatron online!', action='store', help='Misc contact info.')
#parser.add_argument('--lang', default='eng', action='store', help='Up to four character language code.')
#parser.add_argument('--ver', default='1.0.0', action='store', help='Story version in three parts, x.x.x')
parser.add_argument('--source', default='', action='store', help='The Twine source code file.')
parser.add_argument('--output', default='./', action='store', help='The location to create the output files.')
args = parser.parse_args()
story = LoadSource(args.source)
if story == False:
print("[ERROR] Failed to read file contents.")
return
tiddlySrc = ParsePassages(story)
print("Done!")
def LoadSource(path):
try:
file = open(path, 'r')
except IOError:
print("[ERROR] File not found: " + path)
return False
#sourceStr = file.read()
source = ""
for line in file:
#print line
if line.find("Title: ") >= 0:
# Split the line at title, grab the second part, chop off the newline.
STORY_TITLE = line.split("Title: ", 1)[1][:-1]
print(STORY_TITLE)
continue
if line.find("Subtitle: ") >= 0:
STORY_SUBTITLE = line.split("Subtitle: ", 1)[1][:-1]
print(STORY_SUBTITLE)
continue
if line.find("Author: ") >= 0:
STORY_AUTHOR = line.split("Author: ", 1)[1][:-1]
print(STORY_AUTHOR)
continue
if line.find("Credits: ") >= 0:
STORY_CREDITS = line.split("Credits: ", 1)[1][:-1]
print(STORY_CREDITS)
continue
if line.find("Contact: ") >= 0:
STORY_CONTACT = line.split("Contact: ", 1)[1][:-1]
print(STORY_CONTACT)
continue
if line.find("Language: ") >= 0:
STORY_LANGUAGE = line.split("Language: ", 1)[1][:-1]
print(STORY_LANGUAGE)
continue
if line.find("Version: ") >= 0:
STORY_VERSION = line.split("Version: ", 1)[1][:-1]
print(STORY_VERSION)
continue
source += line
file.close()
return source
def ParsePassages(source):
global STORY_TITLE
global STORY_AUTHOR
global STORY_SUBTITLE
global STORY_CREDITS
global STORY_CONTACT
global STORY_LANGUAGE
global STORY_VERSION
def BuildTiddlyPassage():
passage = ""
return passage
if __name__ == '__main__':
main()
```
|
{
"source": "jerrytxi/speech_tools",
"score": 2
}
|
#### File: speech_tools/python/srt2textgrid.py
```python
import os
import glob
import argparse
from datetime import datetime
try:
from praatio import tgio
from srt import parse
except ValueError:
print("autosub not installed please run 'pip3 install srt praatio'.")
def validate(args):
"""
Check that the CLI arguments are valid.
"""
if not args.source_path:
print("Error: You need to specify a source path.")
return False
else:
if not os.path.exists(args.source_path):
print("Error: Source path is not a folder or file.")
return False
return True
def srtToGrid(srtFile,outputFile):
srtFileObj=open(srtFile)
subs = parse(srtFileObj.read())
entryList=[]
tMax=0
for sub in subs:
startTime=sub.start.total_seconds()
endTime=sub.end.total_seconds()
label=sub.content
intTier=(startTime,endTime,label)
entryList.append(intTier)
tMax=endTime
srtFileObj.close()
print("Save TextGrid to {output} ".format(output=outputFile))
tierName="Sentences"
if os.path.isfile(outputFile):
tg = tgio.openTextgrid(outputFile)
if tierName in tg.tierDict:
tierName=tierName+datetime.now().strftime("%m%d%Y%H%M%S")
else:
tg = tgio.Textgrid()
wordTier = tgio.IntervalTier(tierName, entryList, 0, tMax)
tg.addTier(wordTier)
tg.save(outputFile)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the srt file to textGrid.\
You can use autosub to generate src from a wav file.",nargs='?')
parser.add_argument('-o', '--output',help="Output path for subtitles (by default, \
TextGrid are saved in the same directory and name as the source path)")
args = parser.parse_args()
if not validate(args):
return 1
if os.path.isfile(args.source_path):
#source path is a file
base = os.path.splitext(args.source_path)[0]
srtFile= "{base}.{format}".format(base=base, format='srt')
srtFileExsist=os.path.isfile(srtFile)
if not srtFileExsist:
print("Error:srt file is not exsist.")
return 1
else:
outputFile=args.output
if not outputFile:
outputFile = "{base}.{format}".format(base=base, format='TextGrid')
srtToGrid(srtFile,outputFile)
else:
#source path is a dir
folder=os.path.dirname(args.source_path)
srtFiles = glob.glob(os.path.join(folder, '*.srt'))
for srtFile in srtFiles:
base = os.path.splitext(srtFile)[0]
outputFile = "{base}.{format}".format(base=base, format='TextGrid')
srtToGrid(srtFile,outputFile)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
```
#### File: speech_tools/python/wav2srt.py
```python
import argparse
import os
import glob
try:
import autosub
except ValueError:
print("autosub not installed please run 'pip install autosub'.")
def validate(args):
"""
Check that the CLI arguments are valid.
"""
if not args.source_path:
print("Error: You need to specify a source path.")
return False
else:
if not os.path.isdir(args.source_path):
print("Error: Source path is not a folder.you can run autosub direct.")
return False
return True
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default="zh-CN")
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
args = parser.parse_args()
if args.list_languages:
os.system("autosub --list-languages")
return 0
if not validate(args):
return 1
wavFiles = glob.glob(os.path.join(args.source_path, '*.wav'))
for wavFile in wavFiles:
print("autosub '{wavFile}' -S {lang} -D {lang}".format(wavFile=wavFile,lang=args.src_language))
os.system("autosub '{wavFile}' -S {lang} -D {lang}".format(wavFile=wavFile,lang=args.src_language))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
```
|
{
"source": "jerryuhoo/PaddleSpeech",
"score": 2
}
|
#### File: paddleaudio/datasets/rirs_noises.py
```python
import collections
import csv
import os
import random
from typing import List
from paddle.io import Dataset
from tqdm import tqdm
from ..backends import load as load_audio
from ..backends import save as save_wav
from ..utils import DATA_HOME
from ..utils.download import download_and_decompress
from .dataset import feat_funcs
__all__ = ['OpenRIRNoise']
class OpenRIRNoise(Dataset):
archieves = [
{
'url': 'http://www.openslr.org/resources/28/rirs_noises.zip',
'md5': 'e6f48e257286e05de56413b4779d8ffb',
},
]
sample_rate = 16000
meta_info = collections.namedtuple('META_INFO', ('id', 'duration', 'wav'))
base_path = os.path.join(DATA_HOME, 'open_rir_noise')
wav_path = os.path.join(base_path, 'RIRS_NOISES')
csv_path = os.path.join(base_path, 'csv')
subsets = ['rir', 'noise']
def __init__(self,
subset: str='rir',
feat_type: str='raw',
target_dir=None,
random_chunk: bool=True,
chunk_duration: float=3.0,
seed: int=0,
**kwargs):
assert subset in self.subsets, \
'Dataset subset must be one in {}, but got {}'.format(self.subsets, subset)
self.subset = subset
self.feat_type = feat_type
self.feat_config = kwargs
self.random_chunk = random_chunk
self.chunk_duration = chunk_duration
OpenRIRNoise.csv_path = os.path.join(
target_dir, "open_rir_noise",
"csv") if target_dir else self.csv_path
self._data = self._get_data()
super(OpenRIRNoise, self).__init__()
# Set up a seed to reproduce training or predicting result.
# random.seed(seed)
def _get_data(self):
# Download audio files.
print(f"rirs noises base path: {self.base_path}")
if not os.path.isdir(self.base_path):
download_and_decompress(
self.archieves, self.base_path, decompress=True)
else:
print(
f"{self.base_path} already exists, we will not download and decompress again"
)
# Data preparation.
print(f"prepare the csv to {self.csv_path}")
if not os.path.isdir(self.csv_path):
os.makedirs(self.csv_path)
self.prepare_data()
data = []
with open(os.path.join(self.csv_path, f'{self.subset}.csv'), 'r') as rf:
for line in rf.readlines()[1:]:
audio_id, duration, wav = line.strip().split(',')
data.append(self.meta_info(audio_id, float(duration), wav))
random.shuffle(data)
return data
def _convert_to_record(self, idx: int):
sample = self._data[idx]
record = {}
# To show all fields in a namedtuple: `type(sample)._fields`
for field in type(sample)._fields:
record[field] = getattr(sample, field)
waveform, sr = load_audio(record['wav'])
assert self.feat_type in feat_funcs.keys(), \
f"Unknown feat_type: {self.feat_type}, it must be one in {list(feat_funcs.keys())}"
feat_func = feat_funcs[self.feat_type]
feat = feat_func(
waveform, sr=sr, **self.feat_config) if feat_func else waveform
record.update({'feat': feat})
return record
@staticmethod
def _get_chunks(seg_dur, audio_id, audio_duration):
num_chunks = int(audio_duration / seg_dur) # all in milliseconds
chunk_lst = [
audio_id + "_" + str(i * seg_dur) + "_" + str(i * seg_dur + seg_dur)
for i in range(num_chunks)
]
return chunk_lst
def _get_audio_info(self, wav_file: str,
split_chunks: bool) -> List[List[str]]:
waveform, sr = load_audio(wav_file)
audio_id = wav_file.split("/open_rir_noise/")[-1].split(".")[0]
audio_duration = waveform.shape[0] / sr
ret = []
if split_chunks and audio_duration > self.chunk_duration: # Split into pieces of self.chunk_duration seconds.
uniq_chunks_list = self._get_chunks(self.chunk_duration, audio_id,
audio_duration)
for idx, chunk in enumerate(uniq_chunks_list):
s, e = chunk.split("_")[-2:] # Timestamps of start and end
start_sample = int(float(s) * sr)
end_sample = int(float(e) * sr)
new_wav_file = os.path.join(self.base_path,
audio_id + f'_chunk_{idx+1:02}.wav')
save_wav(waveform[start_sample:end_sample], sr, new_wav_file)
# id, duration, new_wav
ret.append([chunk, self.chunk_duration, new_wav_file])
else: # Keep whole audio.
ret.append([audio_id, audio_duration, wav_file])
return ret
def generate_csv(self,
wav_files: List[str],
output_file: str,
split_chunks: bool=True):
print(f'Generating csv: {output_file}')
header = ["id", "duration", "wav"]
infos = list(
tqdm(
map(self._get_audio_info, wav_files, [split_chunks] * len(
wav_files)),
total=len(wav_files)))
csv_lines = []
for info in infos:
csv_lines.extend(info)
with open(output_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(header)
for line in csv_lines:
csv_writer.writerow(line)
def prepare_data(self):
rir_list = os.path.join(self.wav_path, "real_rirs_isotropic_noises",
"rir_list")
rir_files = []
with open(rir_list, 'r') as f:
for line in f.readlines():
rir_file = line.strip().split(' ')[-1]
rir_files.append(os.path.join(self.base_path, rir_file))
noise_list = os.path.join(self.wav_path, "pointsource_noises",
"noise_list")
noise_files = []
with open(noise_list, 'r') as f:
for line in f.readlines():
noise_file = line.strip().split(' ')[-1]
noise_files.append(os.path.join(self.base_path, noise_file))
self.generate_csv(rir_files, os.path.join(self.csv_path, 'rir.csv'))
self.generate_csv(noise_files, os.path.join(self.csv_path, 'noise.csv'))
def __getitem__(self, idx):
return self._convert_to_record(idx)
def __len__(self):
return len(self._data)
```
#### File: paddleaudio/metric/eer.py
```python
from typing import List
import numpy as np
import paddle
from sklearn.metrics import roc_curve
def compute_eer(labels: np.ndarray, scores: np.ndarray) -> List[float]:
"""Compute EER and return score threshold.
Args:
labels (np.ndarray): the trial label, shape: [N], one-dimention, N refer to the samples num
scores (np.ndarray): the trial scores, shape: [N], one-dimention, N refer to the samples num
Returns:
List[float]: eer and the specific threshold
"""
fpr, tpr, threshold = roc_curve(y_true=labels, y_score=scores)
fnr = 1 - tpr
eer_threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
return eer, eer_threshold
def compute_minDCF(positive_scores,
negative_scores,
c_miss=1.0,
c_fa=1.0,
p_target=0.01):
"""
This is modified from SpeechBrain
https://github.com/speechbrain/speechbrain/blob/085be635c07f16d42cd1295045bc46c407f1e15b/speechbrain/utils/metric_stats.py#L509
Computes the minDCF metric normally used to evaluate speaker verification
systems. The min_DCF is the minimum of the following C_det function computed
within the defined threshold range:
C_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 -p_target)
where p_miss is the missing probability and p_fa is the probability of having
a false alarm.
Args:
positive_scores (Paddle.Tensor): The scores from entries of the same class.
negative_scores (Paddle.Tensor): The scores from entries of different classes.
c_miss (float, optional): Cost assigned to a missing error (default 1.0).
c_fa (float, optional): Cost assigned to a false alarm (default 1.0).
p_target (float, optional): Prior probability of having a target (default 0.01).
Returns:
List[float]: min dcf and the specific threshold
"""
# Computing candidate thresholds
if len(positive_scores.shape) > 1:
positive_scores = positive_scores.squeeze()
if len(negative_scores.shape) > 1:
negative_scores = negative_scores.squeeze()
thresholds = paddle.sort(paddle.concat([positive_scores, negative_scores]))
thresholds = paddle.unique(thresholds)
# Adding intermediate thresholds
interm_thresholds = (thresholds[0:-1] + thresholds[1:]) / 2
thresholds = paddle.sort(paddle.concat([thresholds, interm_thresholds]))
# Computing False Rejection Rate (miss detection)
positive_scores = paddle.concat(
len(thresholds) * [positive_scores.unsqueeze(0)])
pos_scores_threshold = positive_scores.transpose(perm=[1, 0]) <= thresholds
p_miss = (pos_scores_threshold.sum(0)
).astype("float32") / positive_scores.shape[1]
del positive_scores
del pos_scores_threshold
# Computing False Acceptance Rate (false alarm)
negative_scores = paddle.concat(
len(thresholds) * [negative_scores.unsqueeze(0)])
neg_scores_threshold = negative_scores.transpose(perm=[1, 0]) > thresholds
p_fa = (neg_scores_threshold.sum(0)
).astype("float32") / negative_scores.shape[1]
del negative_scores
del neg_scores_threshold
c_det = c_miss * p_miss * p_target + c_fa * p_fa * (1 - p_target)
c_min = paddle.min(c_det, axis=0)
min_index = paddle.argmin(c_det, axis=0)
return float(c_min), float(thresholds[min_index])
```
#### File: tests/backends/base.py
```python
import os
import unittest
import urllib.request
mono_channel_wav = 'https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav'
multi_channels_wav = 'https://paddlespeech.bj.bcebos.com/PaddleAudio/cat.wav'
class BackendTest(unittest.TestCase):
def setUp(self):
self.initWavInput()
def initWavInput(self):
self.files = []
for url in [mono_channel_wav, multi_channels_wav]:
if not os.path.isfile(os.path.basename(url)):
urllib.request.urlretrieve(url, os.path.basename(url))
self.files.append(os.path.basename(url))
def initParmas(self):
raise NotImplementedError
```
#### File: src/operations/count.py
```python
import sys
from config import DEFAULT_TABLE
from logs import LOGGER
def do_count(table_name, milvus_cli):
"""
Returns the total number of vectors in the system
"""
if not table_name:
table_name = DEFAULT_TABLE
try:
if not milvus_cli.has_collection(table_name):
return None
num = milvus_cli.count(table_name)
return num
except Exception as e:
LOGGER.error(f"Error attempting to count table {e}")
sys.exit(1)
def do_count_vpr(table_name, mysql_cli):
"""
Returns the total number of spk in the system
"""
if not table_name:
table_name = DEFAULT_TABLE
try:
num = mysql_cli.count_table(table_name)
return num
except Exception as e:
LOGGER.error(f"Error attempting to count table {e}")
sys.exit(1)
def do_list(table_name, mysql_cli):
"""
Returns the total records of vpr in the system
"""
if not table_name:
table_name = DEFAULT_TABLE
try:
spk_ids, audio_paths, _ = mysql_cli.list_vpr(table_name)
return spk_ids, audio_paths
except Exception as e:
LOGGER.error(f"Error attempting to count table {e}")
sys.exit(1)
def do_get(table_name, spk_id, mysql_cli):
"""
Returns the audio path by spk_id in the system
"""
if not table_name:
table_name = DEFAULT_TABLE
try:
audio_apth = mysql_cli.search_audio_vpr(table_name, spk_id)
return audio_apth
except Exception as e:
LOGGER.error(f"Error attempting to count table {e}")
sys.exit(1)
```
#### File: src/operations/search.py
```python
import sys
import numpy
from config import DEFAULT_TABLE
from config import TOP_K
from encode import get_audio_embedding
from logs import LOGGER
def do_search(host, table_name, audio_path, milvus_cli, mysql_cli):
"""
Search the uploaded audio in Milvus/MySQL
"""
try:
if not table_name:
table_name = DEFAULT_TABLE
feat = get_audio_embedding(audio_path)
vectors = milvus_cli.search_vectors(table_name, [feat], TOP_K)
vids = [str(x.id) for x in vectors[0]]
paths = mysql_cli.search_by_milvus_ids(vids, table_name)
distances = [x.distance for x in vectors[0]]
for i in range(len(paths)):
tmp = "http://" + str(host) + "/data?audio_path=" + str(paths[i])
paths[i] = tmp
distances[i] = (1 - distances[i]) * 100
return vids, paths, distances
except Exception as e:
LOGGER.error(f"Error with search: {e}")
sys.exit(1)
def do_search_vpr(host, table_name, audio_path, mysql_cli):
"""
Search the uploaded audio in MySQL
"""
try:
if not table_name:
table_name = DEFAULT_TABLE
emb = get_audio_embedding(audio_path)
emb = numpy.array(emb)
spk_ids, paths, vectors = mysql_cli.list_vpr(table_name)
scores = [numpy.dot(emb, x.astype(numpy.float64)) for x in vectors]
spk_ids = [str(x) for x in spk_ids]
paths = [str(x) for x in paths]
for i in range(len(paths)):
tmp = "http://" + str(host) + "/data?audio_path=" + str(paths[i])
paths[i] = tmp
scores[i] = scores[i] * 100
return spk_ids, paths, scores
except Exception as e:
LOGGER.error(f"Error with search: {e}")
sys.exit(1)
```
#### File: streaming_asr_server/web/app.py
```python
import argparse
from flask import Flask
from flask import render_template
parser = argparse.ArgumentParser(description='training your network')
parser.add_argument('--port', default=19999, type=int, help='port id')
args = parser.parse_args()
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=args.port, debug=True)
```
#### File: sd0/local/ami_splits.py
```python
ALLOWED_OPTIONS = ["scenario_only", "full_corpus", "full_corpus_asr"]
def get_AMI_split(split_option):
"""
Prepares train, dev, and test sets for given split_option
Arguments
---------
split_option: str
The standard split option.
Allowed options: "scenario_only", "full_corpus", "full_corpus_asr"
Returns
-------
Meeting IDs for train, dev, and test sets for given split_option
"""
if split_option not in ALLOWED_OPTIONS:
print(
f'Invalid split "{split_option}" requested!\nValid split_options are: ',
ALLOWED_OPTIONS, )
return
if split_option == "scenario_only":
train_set = [
"ES2002",
"ES2005",
"ES2006",
"ES2007",
"ES2008",
"ES2009",
"ES2010",
"ES2012",
"ES2013",
"ES2015",
"ES2016",
"IS1000",
"IS1001",
"IS1002",
"IS1003",
"IS1004",
"IS1005",
"IS1006",
"IS1007",
"TS3005",
"TS3008",
"TS3009",
"TS3010",
"TS3011",
"TS3012",
]
dev_set = [
"ES2003",
"ES2011",
"IS1008",
"TS3004",
"TS3006",
]
test_set = [
"ES2004",
"ES2014",
"IS1009",
"TS3003",
"TS3007",
]
if split_option == "full_corpus":
# List of train: SA (TRAINING PART OF SEEN DATA)
train_set = [
"ES2002",
"ES2005",
"ES2006",
"ES2007",
"ES2008",
"ES2009",
"ES2010",
"ES2012",
"ES2013",
"ES2015",
"ES2016",
"IS1000",
"IS1001",
"IS1002",
"IS1003",
"IS1004",
"IS1005",
"IS1006",
"IS1007",
"TS3005",
"TS3008",
"TS3009",
"TS3010",
"TS3011",
"TS3012",
"EN2001",
"EN2003",
"EN2004",
"EN2005",
"EN2006",
"EN2009",
"IN1001",
"IN1002",
"IN1005",
"IN1007",
"IN1008",
"IN1009",
"IN1012",
"IN1013",
"IN1014",
"IN1016",
]
# List of dev: SB (DEV PART OF SEEN DATA)
dev_set = [
"ES2003",
"ES2011",
"IS1008",
"TS3004",
"TS3006",
"IB4001",
"IB4002",
"IB4003",
"IB4004",
"IB4010",
"IB4011",
]
# List of test: SC (UNSEEN DATA FOR EVALUATION)
# Note that IB4005 does not appear because it has speakers in common with two sets of data.
test_set = [
"ES2004",
"ES2014",
"IS1009",
"TS3003",
"TS3007",
"EN2002",
]
if split_option == "full_corpus_asr":
train_set = [
"ES2002",
"ES2003",
"ES2005",
"ES2006",
"ES2007",
"ES2008",
"ES2009",
"ES2010",
"ES2012",
"ES2013",
"ES2014",
"ES2015",
"ES2016",
"IS1000",
"IS1001",
"IS1002",
"IS1003",
"IS1004",
"IS1005",
"IS1006",
"IS1007",
"TS3005",
"TS3006",
"TS3007",
"TS3008",
"TS3009",
"TS3010",
"TS3011",
"TS3012",
"EN2001",
"EN2003",
"EN2004",
"EN2005",
"EN2006",
"EN2009",
"IN1001",
"IN1002",
"IN1005",
"IN1007",
"IN1008",
"IN1009",
"IN1012",
"IN1013",
"IN1014",
"IN1016",
]
dev_set = [
"ES2011",
"IS1008",
"TS3004",
"IB4001",
"IB4002",
"IB4003",
"IB4004",
"IB4010",
"IB4011",
]
test_set = [
"ES2004",
"IS1009",
"TS3003",
"EN2002",
]
return train_set, dev_set, test_set
```
#### File: sd0/local/compute_embdding.py
```python
import argparse
import json
import os
import pickle
import sys
import numpy as np
import paddle
from paddle.io import BatchSampler
from paddle.io import DataLoader
from tqdm.contrib import tqdm
from yacs.config import CfgNode
from paddlespeech.s2t.utils.log import Log
from paddlespeech.vector.cluster.diarization import EmbeddingMeta
from paddlespeech.vector.io.batch import batch_feature_normalize
from paddlespeech.vector.io.dataset_from_json import JSONDataset
from paddlespeech.vector.models.ecapa_tdnn import EcapaTdnn
from paddlespeech.vector.modules.sid_model import SpeakerIdetification
from paddlespeech.vector.training.seeding import seed_everything
# Logger setup
logger = Log(__name__).getlog()
def prepare_subset_json(full_meta_data, rec_id, out_meta_file):
"""Prepares metadata for a given recording ID.
Arguments
---------
full_meta_data : json
Full meta (json) containing all the recordings
rec_id : str
The recording ID for which meta (json) has to be prepared
out_meta_file : str
Path of the output meta (json) file.
"""
subset = {}
for key in full_meta_data:
k = str(key)
if k.startswith(rec_id):
subset[key] = full_meta_data[key]
with open(out_meta_file, mode="w") as json_f:
json.dump(subset, json_f, indent=2)
def create_dataloader(json_file, batch_size):
"""Creates the datasets and their data processing pipelines.
This is used for multi-mic processing.
"""
# create datasets
dataset = JSONDataset(
json_file=json_file,
feat_type='melspectrogram',
n_mels=config.n_mels,
window_size=config.window_size,
hop_length=config.hop_size)
# create dataloader
batch_sampler = BatchSampler(dataset, batch_size=batch_size, shuffle=True)
dataloader = DataLoader(dataset,
batch_sampler=batch_sampler,
collate_fn=lambda x: batch_feature_normalize(
x, mean_norm=True, std_norm=False),
return_list=True)
return dataloader
def main(args, config):
# set the training device, cpu or gpu
paddle.set_device(args.device)
# set the random seed
seed_everything(config.seed)
# stage1: build the dnn backbone model network
ecapa_tdnn = EcapaTdnn(**config.model)
# stage2: build the speaker verification eval instance with backbone model
model = SpeakerIdetification(backbone=ecapa_tdnn, num_class=1)
# stage3: load the pre-trained model
# we get the last model from the epoch and save_interval
args.load_checkpoint = os.path.abspath(
os.path.expanduser(args.load_checkpoint))
# load model checkpoint to sid model
state_dict = paddle.load(
os.path.join(args.load_checkpoint, 'model.pdparams'))
model.set_state_dict(state_dict)
logger.info(f'Checkpoint loaded from {args.load_checkpoint}')
# set the model to eval mode
model.eval()
# load meta data
meta_file = os.path.join(
args.data_dir,
config.meta_data_dir,
"ami_" + args.dataset + "." + config.mic_type + ".subsegs.json", )
with open(meta_file, "r") as f:
full_meta = json.load(f)
# get all the recording IDs in this dataset.
all_keys = full_meta.keys()
A = [word.rstrip().split("_")[0] for word in all_keys]
all_rec_ids = list(set(A[1:]))
all_rec_ids.sort()
split = "AMI_" + args.dataset
i = 1
msg = "Extra embdding for " + args.dataset + " set"
logger.info(msg)
if len(all_rec_ids) <= 0:
msg = "No recording IDs found! Please check if meta_data json file is properly generated."
logger.error(msg)
sys.exit()
# extra different recordings embdding in a dataset.
for rec_id in tqdm(all_rec_ids):
# This tag will be displayed in the log.
tag = ("[" + str(args.dataset) + ": " + str(i) + "/" +
str(len(all_rec_ids)) + "]")
i = i + 1
# log message.
msg = "Embdding %s : %s " % (tag, rec_id)
logger.debug(msg)
# embedding directory.
if not os.path.exists(
os.path.join(args.data_dir, config.embedding_dir, split)):
os.makedirs(
os.path.join(args.data_dir, config.embedding_dir, split))
# file to store embeddings.
emb_file_name = rec_id + "." + config.mic_type + ".emb_stat.pkl"
diary_stat_emb_file = os.path.join(args.data_dir, config.embedding_dir,
split, emb_file_name)
# prepare a metadata (json) for one recording. This is basically a subset of full_meta.
# lets keep this meta-info in embedding directory itself.
json_file_name = rec_id + "." + config.mic_type + ".json"
meta_per_rec_file = os.path.join(args.data_dir, config.embedding_dir,
split, json_file_name)
# write subset (meta for one recording) json metadata.
prepare_subset_json(full_meta, rec_id, meta_per_rec_file)
# prepare data loader.
diary_set_loader = create_dataloader(meta_per_rec_file,
config.batch_size)
# extract embeddings (skip if already done).
if not os.path.isfile(diary_stat_emb_file):
logger.debug("Extracting deep embeddings")
embeddings = np.empty(shape=[0, config.emb_dim], dtype=np.float64)
segset = []
for batch_idx, batch in enumerate(tqdm(diary_set_loader)):
# extrac the audio embedding
ids, feats, lengths = batch['ids'], batch['feats'], batch[
'lengths']
seg = [x for x in ids]
segset = segset + seg
emb = model.backbone(feats, lengths).squeeze(
-1).numpy() # (N, emb_size, 1) -> (N, emb_size)
embeddings = np.concatenate((embeddings, emb), axis=0)
segset = np.array(segset, dtype="|O")
stat_obj = EmbeddingMeta(
segset=segset,
stats=embeddings, )
logger.debug("Saving Embeddings...")
with open(diary_stat_emb_file, "wb") as output:
pickle.dump(stat_obj, output)
else:
logger.debug("Skipping embedding extraction (as already present).")
# Begin experiment!
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
'--device',
default="gpu",
help="Select which device to perform diarization, defaults to gpu.")
parser.add_argument(
"--config", default=None, type=str, help="configuration file")
parser.add_argument(
"--data-dir",
default="../save/",
type=str,
help="processsed data directory")
parser.add_argument(
"--dataset",
choices=['dev', 'eval'],
default="dev",
type=str,
help="Select which dataset to extra embdding, defaults to dev")
parser.add_argument(
"--load-checkpoint",
type=str,
default='',
help="Directory to load model checkpoint to compute embeddings.")
args = parser.parse_args()
config = CfgNode(new_allowed=True)
if args.config:
config.merge_from_file(args.config)
config.freeze()
main(args, config)
```
#### File: sv0/local/make_rirs_noise_csv_dataset_from_json.py
```python
import argparse
import csv
import os
from typing import List
import tqdm
from paddleaudio import load as load_audio
from yacs.config import CfgNode
from paddlespeech.s2t.utils.log import Log
from paddlespeech.vector.utils.vector_utils import get_chunks
logger = Log(__name__).getlog()
def get_chunks_list(wav_file: str,
split_chunks: bool,
base_path: str,
chunk_duration: float=3.0) -> List[List[str]]:
"""Get the single audio file info
Args:
wav_file (list): the wav audio file and get this audio segment info list
split_chunks (bool): audio split flag
base_path (str): the audio base path
chunk_duration (float): the chunk duration.
if set the split_chunks, we split the audio into multi-chunks segment.
"""
waveform, sr = load_audio(wav_file)
audio_id = wav_file.split("/rir_noise/")[-1].split(".")[0]
audio_duration = waveform.shape[0] / sr
ret = []
if split_chunks and audio_duration > chunk_duration: # Split into pieces of self.chunk_duration seconds.
uniq_chunks_list = get_chunks(chunk_duration, audio_id, audio_duration)
for idx, chunk in enumerate(uniq_chunks_list):
s, e = chunk.split("_")[-2:] # Timestamps of start and end
start_sample = int(float(s) * sr)
end_sample = int(float(e) * sr)
# currently, all vector csv data format use one representation
# id, duration, wav, start, stop, label
# in rirs noise, all the label name is 'noise'
# the label is string type and we will convert it to integer type in training
ret.append([
chunk, audio_duration, wav_file, start_sample, end_sample,
"noise"
])
else: # Keep whole audio.
ret.append(
[audio_id, audio_duration, wav_file, 0, waveform.shape[0], "noise"])
return ret
def generate_csv(wav_files,
output_file: str,
base_path: str,
split_chunks: bool=True):
"""Prepare the csv file according the wav files
Args:
wav_files (list): all the audio list to prepare the csv file
output_file (str): the output csv file
config (CfgNode): yaml configuration content
split_chunks (bool): audio split flag
"""
logger.info(f'Generating csv: {output_file}')
header = ["utt_id", "duration", "wav", "start", "stop", "label"]
csv_lines = []
for item in tqdm.tqdm(wav_files):
csv_lines.extend(
get_chunks_list(
item, base_path=base_path, split_chunks=split_chunks))
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
with open(output_file, mode="w") as csv_f:
csv_writer = csv.writer(
csv_f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(header)
for line in csv_lines:
csv_writer.writerow(line)
def prepare_data(args, config):
"""Convert the jsonline format to csv format
Args:
args (argparse.Namespace): scripts args
config (CfgNode): yaml configuration content
"""
# if external config set the skip_prep flat, we will do nothing
if config.skip_prep:
return
base_path = args.noise_dir
wav_path = os.path.join(base_path, "RIRS_NOISES")
logger.info(f"base path: {base_path}")
logger.info(f"wav path: {wav_path}")
rir_list = os.path.join(wav_path, "real_rirs_isotropic_noises", "rir_list")
rir_files = []
with open(rir_list, 'r') as f:
for line in f.readlines():
rir_file = line.strip().split(' ')[-1]
rir_files.append(os.path.join(base_path, rir_file))
noise_list = os.path.join(wav_path, "pointsource_noises", "noise_list")
noise_files = []
with open(noise_list, 'r') as f:
for line in f.readlines():
noise_file = line.strip().split(' ')[-1]
noise_files.append(os.path.join(base_path, noise_file))
csv_path = os.path.join(args.data_dir, 'csv')
logger.info(f"csv path: {csv_path}")
generate_csv(
rir_files, os.path.join(csv_path, 'rir.csv'), base_path=base_path)
generate_csv(
noise_files, os.path.join(csv_path, 'noise.csv'), base_path=base_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--noise_dir",
default=None,
required=True,
help="The noise dataset dataset directory.")
parser.add_argument(
"--data_dir",
default=None,
required=True,
help="The target directory stores the csv files")
parser.add_argument(
"--config",
default=None,
required=True,
type=str,
help="configuration file")
args = parser.parse_args()
# parse the yaml config file
config = CfgNode(new_allowed=True)
if args.config:
config.merge_from_file(args.config)
# prepare the csv file from jsonlines files
prepare_data(args, config)
```
#### File: sv0/local/make_voxceleb_kaldi_trial.py
```python
import argparse
import codecs
import os
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--voxceleb_trial",
default="voxceleb1_test_v2",
type=str,
help="VoxCeleb trial file. Default we use the kaldi trial voxceleb1_test_v2.txt"
)
parser.add_argument(
"--trial",
default="data/test/trial",
type=str,
help="Kaldi format trial file")
args = parser.parse_args()
def main(voxceleb_trial, trial):
"""
VoxCeleb provide several trial file, which format is different with kaldi format.
VoxCeleb format's meaning is as following:
--------------------------------
target_or_nontarget path1 path2
--------------------------------
target_or_nontarget is an integer: 1 target path1 is equal to path2
0 nontarget path1 is unequal to path2
path1: spkr_id/rec_id/name
path2: spkr_id/rec_id/name
Kaldi format's meaning is as following:
---------------------------------------
utt_id1 utt_id2 target_or_nontarget
---------------------------------------
utt_id1: utterance identification or speaker identification
utt_id2: utterance identification or speaker identification
target_or_nontarget is an string: 'target' utt_id1 is equal to utt_id2
'nontarget' utt_id2 is unequal to utt_id2
"""
print("Start convert the voxceleb trial to kaldi format")
if not os.path.exists(voxceleb_trial):
raise RuntimeError(
"{} does not exist. Pleas input the correct file path".format(
voxceleb_trial))
trial_dirname = os.path.dirname(trial)
if not os.path.exists(trial_dirname):
os.mkdir(trial_dirname)
with codecs.open(voxceleb_trial, 'r', encoding='utf-8') as f, \
codecs.open(trial, 'w', encoding='utf-8') as w:
for line in f:
target_or_nontarget, path1, path2 = line.strip().split()
utt_id1 = "-".join(path1.split("/"))
utt_id2 = "-".join(path2.split("/"))
target = "nontarget"
if int(target_or_nontarget):
target = "target"
w.write("{} {} {}\n".format(utt_id1, utt_id2, target))
print("Convert the voxceleb trial to kaldi format successfully")
if __name__ == "__main__":
main(args.voxceleb_trial, args.trial)
```
#### File: paddlespeech/cli/executor.py
```python
import logging
import os
import sys
from abc import ABC
from abc import abstractmethod
from collections import OrderedDict
from typing import Any
from typing import Dict
from typing import List
from typing import Union
import paddle
from .log import logger
from .utils import download_and_decompress
from .utils import MODEL_HOME
class BaseExecutor(ABC):
"""
An abstract executor of paddlespeech tasks.
"""
def __init__(self):
self._inputs = OrderedDict()
self._outputs = OrderedDict()
self.pretrained_models = OrderedDict()
self.model_alias = OrderedDict()
@abstractmethod
def _init_from_path(self, *args, **kwargs):
"""
Init model and other resources from arguments. This method should be called by `__call__()`.
"""
pass
@abstractmethod
def preprocess(self, input: Any, *args, **kwargs):
"""
Input preprocess and return paddle.Tensor stored in self._inputs.
Input content can be a text(tts), a file(asr, cls), a stream(not supported yet) or anything needed.
Args:
input (Any): Input text/file/stream or other content.
"""
pass
@paddle.no_grad()
@abstractmethod
def infer(self, *args, **kwargs):
"""
Model inference and put results into self._outputs.
This method get input tensors from self._inputs, and write output tensors into self._outputs.
"""
pass
@abstractmethod
def postprocess(self, *args, **kwargs) -> Union[str, os.PathLike]:
"""
Output postprocess and return results.
This method get model output from self._outputs and convert it into human-readable results.
Returns:
Union[str, os.PathLike]: Human-readable results such as texts and audio files.
"""
pass
@abstractmethod
def execute(self, argv: List[str]) -> bool:
"""
Command line entry. This method can only be accessed by a command line such as `paddlespeech asr`.
Args:
argv (List[str]): Arguments from command line.
Returns:
int: Result of the command execution. `True` for a success and `False` for a failure.
"""
pass
@abstractmethod
def __call__(self, *arg, **kwargs):
"""
Python API to call an executor.
"""
pass
def get_task_source(self, input_: Union[str, os.PathLike, None]
) -> Dict[str, Union[str, os.PathLike]]:
"""
Get task input source from command line input.
Args:
input_ (Union[str, os.PathLike, None]): Input from command line.
Returns:
Dict[str, Union[str, os.PathLike]]: A dict with ids and inputs.
"""
if self._is_job_input(input_):
ret = self._get_job_contents(input_)
else:
ret = OrderedDict()
if input_ is None: # Take input from stdin
for i, line in enumerate(sys.stdin):
line = line.strip()
if len(line.split(' ')) == 1:
ret[str(i + 1)] = line
elif len(line.split(' ')) == 2:
id_, info = line.split(' ')
ret[id_] = info
else: # No valid input info from one line.
continue
else:
ret[1] = input_
return ret
def process_task_results(self,
input_: Union[str, os.PathLike, None],
results: Dict[str, os.PathLike],
job_dump_result: bool=False):
"""
Handling task results and redirect stdout if needed.
Args:
input_ (Union[str, os.PathLike, None]): Input from command line.
results (Dict[str, os.PathLike]): Task outputs.
job_dump_result (bool, optional): if True, dumps job results into file. Defaults to False.
"""
if not self._is_job_input(input_) and len(
results) == 1: # Only one input sample
raw_text = list(results.values())[0]
else:
raw_text = self._format_task_results(results)
print(raw_text, end='') # Stdout
if self._is_job_input(
input_) and job_dump_result: # Dump to *.job.done
try:
job_output_file = os.path.abspath(input_) + '.done'
sys.stdout = open(job_output_file, 'w')
print(raw_text, end='')
logger.info(f'Results had been saved to: {job_output_file}')
finally:
sys.stdout.close()
def _is_job_input(self, input_: Union[str, os.PathLike]) -> bool:
"""
Check if current input file is a job input or not.
Args:
input_ (Union[str, os.PathLike]): Input file of current task.
Returns:
bool: return `True` for job input, `False` otherwise.
"""
return input_ and os.path.isfile(input_) and (input_.endswith('.job') or
input_.endswith('.txt'))
def _get_job_contents(
self, job_input: os.PathLike) -> Dict[str, Union[str, os.PathLike]]:
"""
Read a job input file and return its contents in a dictionary.
Args:
job_input (os.PathLike): The job input file.
Returns:
Dict[str, str]: Contents of job input.
"""
job_contents = OrderedDict()
with open(job_input) as f:
for line in f:
line = line.strip()
if not line:
continue
k, v = line.split(' ')
job_contents[k] = v
return job_contents
def _format_task_results(
self, results: Dict[str, Union[str, os.PathLike]]) -> str:
"""
Convert task results to raw text.
Args:
results (Dict[str, str]): A dictionary of task results.
Returns:
str: A string object contains task results.
"""
ret = ''
for k, v in results.items():
ret += f'{k} {v}\n'
return ret
def disable_task_loggers(self):
"""
Disable all loggers in current task.
"""
loggers = [
logging.getLogger(name) for name in logging.root.manager.loggerDict
]
for l in loggers:
l.disabled = True
def _get_pretrained_path(self, tag: str) -> os.PathLike:
"""
Download and returns pretrained resources path of current task.
"""
support_models = list(self.pretrained_models.keys())
assert tag in self.pretrained_models, 'The model "{}" you want to use has not been supported, please choose other models.\nThe support models includes:\n\t\t{}\n'.format(
tag, '\n\t\t'.join(support_models))
res_path = os.path.join(MODEL_HOME, tag)
decompressed_path = download_and_decompress(self.pretrained_models[tag],
res_path)
decompressed_path = os.path.abspath(decompressed_path)
logger.info(
'Use pretrained model stored in: {}'.format(decompressed_path))
return decompressed_path
def show_rtf(self, info: Dict[str, List[float]]):
"""
Calculate rft of current task and show results.
"""
num_samples = 0
task_duration = 0.0
wav_duration = 0.0
for start, end, dur in zip(info['start'], info['end'], info['extra']):
num_samples += 1
task_duration += end - start
wav_duration += dur
logger.info('Sample Count: {}'.format(num_samples))
logger.info('Avg RTF: {}'.format(task_duration / wav_duration))
```
#### File: cli/stats/infer.py
```python
import argparse
from typing import List
from prettytable import PrettyTable
from ..utils import cli_register
from ..utils import stats_wrapper
__all__ = ['StatsExecutor']
model_name_format = {
'asr': 'Model-Language-Sample Rate',
'cls': 'Model-Sample Rate',
'st': 'Model-Source language-Target language',
'text': 'Model-Task-Language',
'tts': 'Model-Language',
'vector': 'Model-Sample Rate'
}
@cli_register(
name='paddlespeech.stats',
description='Get speech tasks support models list.')
class StatsExecutor():
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser(
prog='paddlespeech.stats', add_help=True)
self.task_choices = ['asr', 'cls', 'st', 'text', 'tts', 'vector']
self.parser.add_argument(
'--task',
type=str,
default='asr',
choices=self.task_choices,
help='Choose speech task.',
required=True)
def show_support_models(self, pretrained_models: dict):
fields = model_name_format[self.task].split("-")
table = PrettyTable(fields)
for key in pretrained_models:
table.add_row(key.split("-"))
print(table)
def execute(self, argv: List[str]) -> bool:
"""
Command line entry.
"""
parser_args = self.parser.parse_args(argv)
has_exceptions = False
try:
self(parser_args.task)
except Exception as e:
has_exceptions = True
if has_exceptions:
return False
else:
return True
@stats_wrapper
def __call__(
self,
task: str=None, ):
"""
Python API to call an executor.
"""
self.task = task
if self.task not in self.task_choices:
print("Please input correct speech task, choices = " + str(
self.task_choices))
elif self.task == 'asr':
try:
from ..asr.pretrained_models import pretrained_models
print(
"Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of ASR pretrained models.")
elif self.task == 'cls':
try:
from ..cls.pretrained_models import pretrained_models
print(
"Here is the list of CLS pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of CLS pretrained models.")
elif self.task == 'st':
try:
from ..st.pretrained_models import pretrained_models
print(
"Here is the list of ST pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of ST pretrained models.")
elif self.task == 'text':
try:
from ..text.pretrained_models import pretrained_models
print(
"Here is the list of TEXT pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of TEXT pretrained models.")
elif self.task == 'tts':
try:
from ..tts.pretrained_models import pretrained_models
print(
"Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print("Failed to get the list of TTS pretrained models.")
elif self.task == 'vector':
try:
from ..vector.pretrained_models import pretrained_models
print(
"Here is the list of Speaker Recognition pretrained models released by PaddleSpeech that can be used by command line and python API"
)
self.show_support_models(pretrained_models)
except BaseException:
print(
"Failed to get the list of Speaker Recognition pretrained models."
)
```
#### File: kws/models/loss.py
```python
import paddle
def padding_mask(lengths: paddle.Tensor) -> paddle.Tensor:
batch_size = lengths.shape[0]
max_len = int(lengths.max().item())
seq = paddle.arange(max_len, dtype=paddle.int64)
seq = seq.expand((batch_size, max_len))
return seq >= lengths.unsqueeze(1)
def fill_mask_elements(condition: paddle.Tensor, value: float,
x: paddle.Tensor) -> paddle.Tensor:
assert condition.shape == x.shape
values = paddle.ones_like(x, dtype=x.dtype) * value
return paddle.where(condition, values, x)
def max_pooling_loss(logits: paddle.Tensor,
target: paddle.Tensor,
lengths: paddle.Tensor,
min_duration: int=0):
mask = padding_mask(lengths)
num_utts = logits.shape[0]
num_keywords = logits.shape[2]
loss = 0.0
for i in range(num_utts):
for j in range(num_keywords):
# Add entropy loss CE = -(t * log(p) + (1 - t) * log(1 - p))
if target[i] == j:
# For the keyword, do max-polling
prob = logits[i, :, j]
m = mask[i]
if min_duration > 0:
m[:min_duration] = True
prob = fill_mask_elements(m, 0.0, prob)
prob = paddle.clip(prob, 1e-8, 1.0)
max_prob = prob.max()
loss += -paddle.log(max_prob)
else:
# For other keywords or filler, do min-polling
prob = 1 - logits[i, :, j]
prob = fill_mask_elements(mask[i], 1.0, prob)
prob = paddle.clip(prob, 1e-8, 1.0)
min_prob = prob.min()
loss += -paddle.log(min_prob)
loss = loss / num_utts
# Compute accuracy of current batch
mask = mask.unsqueeze(-1)
logits = fill_mask_elements(mask, 0.0, logits)
max_logits = logits.max(1)
num_correct = 0
for i in range(num_utts):
max_p = max_logits[i].max(0).item()
idx = max_logits[i].argmax(0).item()
# Predict correct as the i'th keyword
if max_p > 0.5 and idx == target[i].item():
num_correct += 1
# Predict correct as the filler, filler id < 0
if max_p < 0.5 and target[i].item() < 0:
num_correct += 1
acc = num_correct / num_utts
# acc = 0.0
return loss, num_correct, acc
```
#### File: s2t/modules/attention.py
```python
import math
from typing import Optional
from typing import Tuple
import paddle
from paddle import nn
from paddle.nn import initializer as I
from paddlespeech.s2t.modules.align import Linear
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
__all__ = ["MultiHeadedAttention", "RelPositionMultiHeadedAttention"]
# Relative Positional Encodings
# https://www.jianshu.com/p/c0608efcc26f
# https://zhuanlan.zhihu.com/p/344604604
class MultiHeadedAttention(nn.Layer):
"""Multi-Head Attention layer."""
def __init__(self, n_head: int, n_feat: int, dropout_rate: float):
"""Construct an MultiHeadedAttention object.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
super().__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = Linear(n_feat, n_feat)
self.linear_k = Linear(n_feat, n_feat)
self.linear_v = Linear(n_feat, n_feat)
self.linear_out = Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self,
query: paddle.Tensor,
key: paddle.Tensor,
value: paddle.Tensor
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Transform query, key and value.
Args:
query (paddle.Tensor): Query tensor (#batch, time1, size).
key (paddle.Tensor): Key tensor (#batch, time2, size).
value (paddle.Tensor): Value tensor (#batch, time2, size).
Returns:
paddle.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
paddle.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
paddle.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.shape[0]
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k)
k = k.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
v = v.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self,
value: paddle.Tensor,
scores: paddle.Tensor,
mask: Optional[paddle.Tensor]) -> paddle.Tensor:
"""Compute attention context vector.
Args:
value (paddle.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (paddle.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (paddle.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
paddle.Tensor: Transformed value weighted
by the attention score, (#batch, time1, d_model).
"""
n_batch = value.shape[0]
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
scores = scores.masked_fill(mask, -float('inf'))
attn = paddle.softmax(
scores, axis=-1).masked_fill(mask,
0.0) # (batch, head, time1, time2)
else:
attn = paddle.softmax(
scores, axis=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)
x = paddle.matmul(p_attn, value) # (batch, head, time1, d_k)
x = x.transpose([0, 2, 1, 3]).view(n_batch, -1, self.h *
self.d_k) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self,
query: paddle.Tensor,
key: paddle.Tensor,
value: paddle.Tensor,
mask: Optional[paddle.Tensor]) -> paddle.Tensor:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
scores = paddle.matmul(q,
k.transpose([0, 1, 3, 2])) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding."""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
super().__init__(n_head, n_feat, dropout_rate)
# linear transformation for positional encoding
self.linear_pos = Linear(n_feat, n_feat, bias_attr=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
#self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
#self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
#torch.nn.init.xavier_uniform_(self.pos_bias_u)
#torch.nn.init.xavier_uniform_(self.pos_bias_v)
pos_bias_u = self.create_parameter(
[self.h, self.d_k], default_initializer=I.XavierUniform())
self.add_parameter('pos_bias_u', pos_bias_u)
pos_bias_v = self.create_parameter(
(self.h, self.d_k), default_initializer=I.XavierUniform())
self.add_parameter('pos_bias_v', pos_bias_v)
def rel_shift(self, x, zero_triu: bool=False):
"""Compute relative positinal encoding.
Args:
x (paddle.Tensor): Input tensor (batch, head, time1, time1).
zero_triu (bool): If true, return the lower triangular part of
the matrix.
Returns:
paddle.Tensor: Output tensor. (batch, head, time1, time1)
"""
zero_pad = paddle.zeros(
(x.shape[0], x.shape[1], x.shape[2], 1), dtype=x.dtype)
x_padded = paddle.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.shape[0], x.shape[1], x.shape[3] + 1,
x.shape[2])
x = x_padded[:, :, 1:].view_as(x) # [B, H, T1, T1]
if zero_triu:
ones = paddle.ones((x.shape[2], x.shape[3]))
x = x * paddle.tril(ones, x.shape[3] - x.shape[2])[None, None, :, :]
return x
def forward(self,
query: paddle.Tensor,
key: paddle.Tensor,
value: paddle.Tensor,
pos_emb: paddle.Tensor,
mask: Optional[paddle.Tensor]):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (paddle.Tensor): Query tensor (#batch, time1, size).
key (paddle.Tensor): Key tensor (#batch, time2, size).
value (paddle.Tensor): Value tensor (#batch, time2, size).
pos_emb (paddle.Tensor): Positional embedding tensor
(#batch, time1, size).
mask (paddle.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
Returns:
paddle.Tensor: Output tensor (#batch, time1, d_model).
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose([0, 2, 1, 3]) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.shape[0]
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose([0, 2, 1, 3])
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose([0, 2, 1, 3])
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = paddle.matmul(q_with_bias_u, k.transpose([0, 1, 3, 2]))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = paddle.matmul(q_with_bias_v, p.transpose([0, 1, 3, 2]))
# Remove rel_shift since it is useless in speech recognition,
# and it requires special attention for streaming.
# matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
```
#### File: s2t/modules/initializer.py
```python
import numpy as np
from paddle.fluid import framework
from paddle.fluid import unique_name
from paddle.fluid.core import VarDesc
from paddle.fluid.initializer import MSRAInitializer
__all__ = ['KaimingUniform']
class KaimingUniform(MSRAInitializer):
r"""Implements the Kaiming Uniform initializer
This class implements the weight initialization from the paper
`Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_
by <NAME>, <NAME>, <NAME> and <NAME>. This is a
robust initialization method that particularly considers the rectifier
nonlinearities.
In case of Uniform distribution, the range is [-x, x], where
.. math::
x = \sqrt{\frac{1.0}{fan\_in}}
In case of Normal distribution, the mean is 0 and the standard deviation
is
.. math::
\sqrt{\\frac{2.0}{fan\_in}}
Args:
fan_in (float32|None): fan_in for Kaiming uniform Initializer. If None, it is\
inferred from the variable. default is None.
Note:
It is recommended to set fan_in to None for most cases.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
linear = nn.Linear(2,
4,
weight_attr=nn.initializer.KaimingUniform())
data = paddle.rand([30, 10, 2], dtype='float32')
res = linear(data)
"""
def __init__(self, fan_in=None):
super(KaimingUniform, self).__init__(
uniform=True, fan_in=fan_in, seed=0)
def __call__(self, var, block=None):
"""Initialize the input tensor with MSRA initialization.
Args:
var(Tensor): Tensor that needs to be initialized.
block(Block, optional): The block in which initialization ops
should be added. Used in static graph only, default None.
Returns:
The initialization op
"""
block = self._check_block(block)
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
f_in, f_out = self._compute_fans(var)
# If fan_in is passed, use it
fan_in = f_in if self._fan_in is None else self._fan_in
if self._seed == 0:
self._seed = block.program.random_seed
# to be compatible of fp16 initalizers
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
out_dtype = VarDesc.VarType.FP32
out_var = block.create_var(
name=unique_name.generate(
".".join(['masra_init', var.name, 'tmp'])),
shape=var.shape,
dtype=out_dtype,
type=VarDesc.VarType.LOD_TENSOR,
persistable=False)
else:
out_dtype = var.dtype
out_var = var
if self._uniform:
limit = np.sqrt(1.0 / float(fan_in))
op = block.append_op(
type="uniform_random",
inputs={},
outputs={"Out": out_var},
attrs={
"shape": out_var.shape,
"dtype": int(out_dtype),
"min": -limit,
"max": limit,
"seed": self._seed
},
stop_gradient=True)
else:
std = np.sqrt(2.0 / float(fan_in))
op = block.append_op(
type="gaussian_random",
outputs={"Out": out_var},
attrs={
"shape": out_var.shape,
"dtype": int(out_dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
},
stop_gradient=True)
if var.dtype == VarDesc.VarType.FP16 or (
var.dtype == VarDesc.VarType.BF16 and not self._uniform):
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype})
if not framework.in_dygraph_mode():
var.op = op
return op
class DefaultInitializerContext(object):
"""
egs:
with DefaultInitializerContext("kaiming_uniform"):
code for setup_model
"""
def __init__(self, init_type=None):
self.init_type = init_type
def __enter__(self):
if self.init_type is None:
return
else:
from paddlespeech.s2t.modules import align
align.global_init_type = self.init_type
return
def __exit__(self, exc_type, exc_val, exc_tb):
from paddlespeech.s2t.modules import align
align.global_init_type = None
```
#### File: s2t/utils/bleu_score.py
```python
import numpy as np
import sacrebleu
__all__ = ['bleu', 'char_bleu', "ErrorCalculator"]
def bleu(hypothesis, reference):
"""Calculate BLEU. BLEU compares reference text and
hypothesis text in word-level using scarebleu.
:param reference: The reference sentences.
:type reference: list[list[str]]
:param hypothesis: The hypothesis sentence.
:type hypothesis: list[str]
:raises ValueError: If the reference length is zero.
"""
return sacrebleu.corpus_bleu(hypothesis, reference)
def char_bleu(hypothesis, reference):
"""Calculate BLEU. BLEU compares reference text and
hypothesis text in char-level using scarebleu.
:param reference: The reference sentences.
:type reference: list[list[str]]
:param hypothesis: The hypothesis sentence.
:type hypothesis: list[str]
:raises ValueError: If the reference number is zero.
"""
hypothesis = [' '.join(list(hyp.replace(' ', ''))) for hyp in hypothesis]
reference = [[' '.join(list(ref_i.replace(' ', ''))) for ref_i in ref]
for ref in reference]
return sacrebleu.corpus_bleu(hypothesis, reference)
class ErrorCalculator():
"""Calculate BLEU for ST and MT models during training.
:param y_hats: numpy array with predicted text
:param y_pads: numpy array with true (target) text
:param char_list: vocabulary list
:param sym_space: space symbol
:param sym_pad: pad symbol
:param report_bleu: report BLUE score if True
"""
def __init__(self, char_list, sym_space, sym_pad, report_bleu=False):
"""Construct an ErrorCalculator object."""
super().__init__()
self.char_list = char_list
self.space = sym_space
self.pad = sym_pad
self.report_bleu = report_bleu
if self.space in self.char_list:
self.idx_space = self.char_list.index(self.space)
else:
self.idx_space = None
def __call__(self, ys_hat, ys_pad):
"""Calculate corpus-level BLEU score.
:param torch.Tensor ys_hat: prediction (batch, seqlen)
:param torch.Tensor ys_pad: reference (batch, seqlen)
:return: corpus-level BLEU score in a mini-batch
:rtype float
"""
bleu = None
if not self.report_bleu:
return bleu
bleu = self.calculate_corpus_bleu(ys_hat, ys_pad)
return bleu
def calculate_corpus_bleu(self, ys_hat, ys_pad):
"""Calculate corpus-level BLEU score in a mini-batch.
:param torch.Tensor seqs_hat: prediction (batch, seqlen)
:param torch.Tensor seqs_true: reference (batch, seqlen)
:return: corpus-level BLEU score
:rtype float
"""
seqs_hat, seqs_true = [], []
for i, y_hat in enumerate(ys_hat):
y_true = ys_pad[i]
eos_true = np.where(y_true == -1)[0]
ymax = eos_true[0] if len(eos_true) > 0 else len(y_true)
# NOTE: padding index (-1) in y_true is used to pad y_hat
# because y_hats is not padded with -1
seq_hat = [self.char_list[int(idx)] for idx in y_hat[:ymax]]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.pad, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
seqs_hat.append(seq_hat_text)
seqs_true.append(seq_true_text)
bleu = sacrebleu.corpus_bleu(seqs_hat, [[ref] for ref in seqs_true])
return bleu.score * 100
```
#### File: paddlespeech/server/base_commands.py
```python
from typing import List
from .entry import client_commands
from .entry import server_commands
from .util import cli_client_register
from .util import cli_server_register
from .util import get_client_command
from .util import get_server_command
__all__ = [
'ServerBaseCommand',
'ServerHelpCommand',
'ClientBaseCommand',
'ClientHelpCommand',
]
@cli_server_register(name='paddlespeech_server')
class ServerBaseCommand:
def execute(self, argv: List[str]) -> bool:
help = get_server_command('paddlespeech_server.help')
return help().execute(argv)
@cli_server_register(
name='paddlespeech_server.help', description='Show help for commands.')
class ServerHelpCommand:
def execute(self, argv: List[str]) -> bool:
msg = 'Usage:\n'
msg += ' paddlespeech_server <command> <options>\n\n'
msg += 'Commands:\n'
for command, detail in server_commands['paddlespeech_server'].items():
if command.startswith('_'):
continue
if '_description' not in detail:
continue
msg += ' {:<15} {}\n'.format(command,
detail['_description'])
print(msg)
return True
@cli_client_register(name='paddlespeech_client')
class ClientBaseCommand:
def execute(self, argv: List[str]) -> bool:
help = get_client_command('paddlespeech_client.help')
return help().execute(argv)
@cli_client_register(
name='paddlespeech_client.help', description='Show help for commands.')
class ClientHelpCommand:
def execute(self, argv: List[str]) -> bool:
msg = 'Usage:\n'
msg += ' paddlespeech_client <command> <options>\n\n'
msg += 'Commands:\n'
for command, detail in client_commands['paddlespeech_client'].items():
if command.startswith('_'):
continue
if '_description' not in detail:
continue
msg += ' {:<15} {}\n'.format(command,
detail['_description'])
print(msg)
return True
```
#### File: tests/text/http_client.py
```python
import argparse
import json
import time
import requests
from paddlespeech.cli.log import logger
# Request and response
def text_client(args):
""" Request and response
Args:
text: A sentence to be processed by PaddleSpeech Text Server
outfile: The punctuation text
"""
url = "http://" + str(args.server) + ":" + str(
args.port) + "/paddlespeech/text"
request = {
"text": args.text,
}
response = requests.post(url, json.dumps(request))
response_dict = response.json()
punc_text = response_dict["result"]["punc_text"]
# transform audio
outfile = args.output
if outfile:
with open(outfile, 'w') as w:
w.write(punc_text + "\n")
logger.info(f"The punc text is: {punc_text}")
return punc_text
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--text',
type=str,
default="今天的天气真不错啊你下午有空吗我想约你一起去吃饭",
help='A sentence to be synthesized')
parser.add_argument(
'--output', type=str, default="./punc_text", help='Punc text file')
parser.add_argument(
"--server", type=str, help="server ip", default="127.0.0.1")
parser.add_argument("--port", type=int, help="server port", default=8090)
args = parser.parse_args()
st = time.time()
try:
punc_text = text_client(args)
time_consume = time.time() - st
time_per_word = time_consume / len(args.text)
print("Text Process successfully.")
print("Inference time: %f" % (time_consume))
print("The text length: %f" % (len(args.text)))
print("The time per work is: %f" % (time_per_word))
except BaseException as e:
logger.info("Failed to Process text.")
logger.info(e)
```
#### File: server/utils/onnx_infer.py
```python
import os
from typing import Optional
import onnxruntime as ort
def get_sess(model_path: Optional[os.PathLike]=None, sess_conf: dict=None):
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
if "gpu" in sess_conf["device"]:
# fastspeech2/mb_melgan can't use trt now!
if sess_conf["use_trt"]:
providers = ['TensorrtExecutionProvider']
else:
providers = ['CUDAExecutionProvider']
elif sess_conf["device"] == "cpu":
providers = ['CPUExecutionProvider']
sess_options.intra_op_num_threads = sess_conf["cpu_threads"]
sess = ort.InferenceSession(
model_path, providers=providers, sess_options=sess_options)
return sess
```
#### File: t2s/datasets/vocoder_batch_fn.py
```python
import numpy as np
import paddle
from paddlespeech.t2s.audio.codec import encode_mu_law
from paddlespeech.t2s.audio.codec import float_2_label
from paddlespeech.t2s.audio.codec import label_2_float
class Clip(object):
"""Collate functor for training vocoders.
"""
def __init__(
self,
batch_max_steps=20480,
hop_size=256,
aux_context_window=0, ):
"""Initialize customized collater for DataLoader.
Args:
batch_max_steps (int): The maximum length of input signal in batch.
hop_size (int): Hop size of auxiliary features.
aux_context_window (int): Context window size for auxiliary feature conv.
"""
if batch_max_steps % hop_size != 0:
batch_max_steps += -(batch_max_steps % hop_size)
assert batch_max_steps % hop_size == 0
self.batch_max_steps = batch_max_steps
self.batch_max_frames = batch_max_steps // hop_size
self.hop_size = hop_size
self.aux_context_window = aux_context_window
# set useful values in random cutting
self.start_offset = aux_context_window
self.end_offset = -(self.batch_max_frames + aux_context_window)
self.mel_threshold = self.batch_max_frames + 2 * aux_context_window
def __call__(self, batch):
"""Convert into batch tensors.
Args:
batch (list): list of tuple of the pair of audio and features. Audio shape (T, ), features shape(T', C).
Returns:
Tensor:
Auxiliary feature batch (B, C, T'), where
T = (T' - 2 * aux_context_window) * hop_size.
Tensor:
Target signal batch (B, 1, T).
"""
# check length
batch = [
self._adjust_length(b['wave'], b['feats']) for b in batch
if b['feats'].shape[0] > self.mel_threshold
]
xs, cs = [b[0] for b in batch], [b[1] for b in batch]
# make batch with random cut
c_lengths = [c.shape[0] for c in cs]
start_frames = np.array([
np.random.randint(self.start_offset, cl + self.end_offset)
for cl in c_lengths
])
x_starts = start_frames * self.hop_size
x_ends = x_starts + self.batch_max_steps
c_starts = start_frames - self.aux_context_window
c_ends = start_frames + self.batch_max_frames + self.aux_context_window
y_batch = np.stack(
[x[start:end] for x, start, end in zip(xs, x_starts, x_ends)])
c_batch = np.stack(
[c[start:end] for c, start, end in zip(cs, c_starts, c_ends)])
# convert each batch to tensor, assume that each item in batch has the same length
y_batch = paddle.to_tensor(
y_batch, dtype=paddle.float32).unsqueeze(1) # (B, 1, T)
c_batch = paddle.to_tensor(
c_batch, dtype=paddle.float32).transpose([0, 2, 1]) # (B, C, T')
return y_batch, c_batch
def _adjust_length(self, x, c):
"""Adjust the audio and feature lengths.
Note:
Basically we assume that the length of x and c are adjusted
through preprocessing stage, but if we use other library processed
features, this process will be needed.
"""
if len(x) < c.shape[0] * self.hop_size:
x = np.pad(x, (0, c.shape[0] * self.hop_size - len(x)), mode="edge")
elif len(x) > c.shape[0] * self.hop_size:
# print(
# f"wave length: ({len(x)}), mel length: ({c.shape[0]}), hop size: ({self.hop_size })"
# )
x = x[:c.shape[0] * self.hop_size]
# check the legnth is valid
assert len(x) == c.shape[
0] * self.hop_size, f"wave length: ({len(x)}), mel length: ({c.shape[0]})"
return x, c
class WaveRNNClip(Clip):
def __init__(self,
mode: str='RAW',
batch_max_steps: int=4500,
hop_size: int=300,
aux_context_window: int=2,
bits: int=9,
mu_law: bool=True):
self.mode = mode
self.mel_win = batch_max_steps // hop_size + 2 * aux_context_window
self.batch_max_steps = batch_max_steps
self.hop_size = hop_size
self.aux_context_window = aux_context_window
self.mu_law = mu_law
self.batch_max_frames = batch_max_steps // hop_size
self.mel_threshold = self.batch_max_frames + 2 * aux_context_window
if self.mode == 'MOL':
self.bits = 16
else:
self.bits = bits
def to_quant(self, wav):
if self.mode == 'RAW':
if self.mu_law:
quant = encode_mu_law(wav, mu=2**self.bits)
else:
quant = float_2_label(wav, bits=self.bits)
elif self.mode == 'MOL':
quant = float_2_label(wav, bits=16)
quant = quant.astype(np.int64)
return quant
def __call__(self, batch):
# voc_pad = 2 this will pad the input so that the resnet can 'see' wider than input length
# max_offsets = n_frames - 2 - (mel_win + 2 * hp.voc_pad) = n_frames - 15
"""Convert into batch tensors.
Args:
batch (list): list of tuple of the pair of audio and features. Audio shape (T, ), features shape(T', C).
Returns:
Tensor: Input signal batch (B, 1, T).
Tensor: Target signal batch (B, 1, T).
Tensor: Auxiliary feature batch (B, C, T'),
where T = (T' - 2 * aux_context_window) * hop_size.
"""
# check length
batch = [
self._adjust_length(b['wave'], b['feats']) for b in batch
if b['feats'].shape[0] > self.mel_threshold
]
wav, mel = [b[0] for b in batch], [b[1] for b in batch]
# mel 此处需要转置
mel = [x.T for x in mel]
max_offsets = [
x.shape[-1] - 2 - (self.mel_win + 2 * self.aux_context_window)
for x in mel
]
# the slice point of mel selecting randomly
mel_offsets = [np.random.randint(0, offset) for offset in max_offsets]
# the slice point of wav selecting randomly, which is behind 2(=pad) frames
sig_offsets = [(offset + self.aux_context_window) * self.hop_size
for offset in mel_offsets]
# mels.shape[1] = voc_seq_len // hop_length + 2 * voc_pad
mels = [
x[:, mel_offsets[i]:mel_offsets[i] + self.mel_win]
for i, x in enumerate(mel)
]
# label.shape[1] = voc_seq_len + 1
wav = [self.to_quant(x) for x in wav]
labels = [
x[sig_offsets[i]:sig_offsets[i] + self.batch_max_steps + 1]
for i, x in enumerate(wav)
]
mels = np.stack(mels).astype(np.float32)
labels = np.stack(labels).astype(np.int64)
mels = paddle.to_tensor(mels)
labels = paddle.to_tensor(labels, dtype='int64')
# x is input, y is label
x = labels[:, :self.batch_max_steps]
y = labels[:, 1:]
'''
mode = RAW:
mu_law = True:
quant: bits = 9 0, 1, 2, ..., 509, 510, 511 int
mu_law = False
quant bits = 9 [0, 511] float
mode = MOL:
quant: bits = 16 [0. 65536] float
'''
# x should be normalizes in.[0, 1] in RAW mode
x = label_2_float(paddle.cast(x, dtype='float32'), self.bits)
# y should be normalizes in.[0, 1] in MOL mode
if self.mode == 'MOL':
y = label_2_float(paddle.cast(y, dtype='float32'), self.bits)
return x, y, mels
```
#### File: t2s/exps/ort_predict.py
```python
import argparse
from pathlib import Path
import jsonlines
import numpy as np
import paddle
import soundfile as sf
from timer import timer
from paddlespeech.t2s.exps.syn_utils import get_sess
from paddlespeech.t2s.exps.syn_utils import get_test_dataset
from paddlespeech.t2s.utils import str2bool
def ort_predict(args):
# construct dataset for evaluation
with jsonlines.open(args.test_metadata, 'r') as reader:
test_metadata = list(reader)
am_name = args.am[:args.am.rindex('_')]
am_dataset = args.am[args.am.rindex('_') + 1:]
test_dataset = get_test_dataset(test_metadata=test_metadata, am=args.am)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
fs = 24000 if am_dataset != 'ljspeech' else 22050
# am
am_sess = get_sess(
model_dir=args.inference_dir,
model_file=args.am + ".onnx",
device=args.device,
cpu_threads=args.cpu_threads)
# vocoder
voc_sess = get_sess(
model_dir=args.inference_dir,
model_file=args.voc + ".onnx",
device=args.device,
cpu_threads=args.cpu_threads)
# am warmup
for T in [27, 38, 54]:
am_input_feed = {}
if am_name == 'fastspeech2':
phone_ids = np.random.randint(1, 266, size=(T, ))
am_input_feed.update({'text': phone_ids})
elif am_name == 'speedyspeech':
phone_ids = np.random.randint(1, 92, size=(T, ))
tone_ids = np.random.randint(1, 5, size=(T, ))
am_input_feed.update({'phones': phone_ids, 'tones': tone_ids})
am_sess.run(None, input_feed=am_input_feed)
# voc warmup
for T in [227, 308, 544]:
data = np.random.rand(T, 80).astype("float32")
voc_sess.run(None, {"logmel": data})
print("warm up done!")
N = 0
T = 0
am_input_feed = {}
for example in test_dataset:
utt_id = example['utt_id']
if am_name == 'fastspeech2':
phone_ids = example["text"]
am_input_feed.update({'text': phone_ids})
elif am_name == 'speedyspeech':
phone_ids = example["phones"]
tone_ids = example["tones"]
am_input_feed.update({'phones': phone_ids, 'tones': tone_ids})
with timer() as t:
mel = am_sess.run(output_names=None, input_feed=am_input_feed)
mel = mel[0]
wav = voc_sess.run(output_names=None, input_feed={'logmel': mel})
N += len(wav[0])
T += t.elapse
speed = len(wav[0]) / t.elapse
rtf = fs / speed
sf.write(
str(output_dir / (utt_id + ".wav")),
np.array(wav)[0],
samplerate=fs)
print(
f"{utt_id}, mel: {mel.shape}, wave: {len(wav[0])}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}."
)
print(f"generation speed: {N / T}Hz, RTF: {fs / (N / T) }")
def parse_args():
parser = argparse.ArgumentParser(description="Infernce with onnxruntime.")
# acoustic model
parser.add_argument(
'--am',
type=str,
default='fastspeech2_csmsc',
choices=['fastspeech2_csmsc', 'speedyspeech_csmsc'],
help='Choose acoustic model type of tts task.')
# voc
parser.add_argument(
'--voc',
type=str,
default='hifigan_csmsc',
choices=['hifigan_csmsc', 'mb_melgan_csmsc', 'pwgan_csmsc'],
help='Choose vocoder type of tts task.')
# other
parser.add_argument(
"--inference_dir", type=str, help="dir to save inference models")
parser.add_argument("--test_metadata", type=str, help="test metadata.")
parser.add_argument("--output_dir", type=str, help="output dir")
# inference
parser.add_argument(
"--use_trt",
type=str2bool,
default=False,
help="Whether to use inference engin TensorRT.", )
parser.add_argument(
"--device",
default="gpu",
choices=["gpu", "cpu"],
help="Device selected for inference.", )
parser.add_argument('--cpu_threads', type=int, default=1)
args, _ = parser.parse_known_args()
return args
def main():
args = parse_args()
paddle.set_device(args.device)
ort_predict(args)
if __name__ == "__main__":
main()
```
#### File: t2s/exps/ort_predict_streaming.py
```python
import argparse
from pathlib import Path
import numpy as np
import paddle
import soundfile as sf
from timer import timer
from paddlespeech.t2s.exps.syn_utils import denorm
from paddlespeech.t2s.exps.syn_utils import get_chunks
from paddlespeech.t2s.exps.syn_utils import get_frontend
from paddlespeech.t2s.exps.syn_utils import get_sentences
from paddlespeech.t2s.exps.syn_utils import get_sess
from paddlespeech.t2s.utils import str2bool
def ort_predict(args):
# frontend
frontend = get_frontend(
lang=args.lang,
phones_dict=args.phones_dict,
tones_dict=args.tones_dict)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
sentences = get_sentences(text_file=args.text, lang=args.lang)
am_name = args.am[:args.am.rindex('_')]
am_dataset = args.am[args.am.rindex('_') + 1:]
fs = 24000 if am_dataset != 'ljspeech' else 22050
# streaming acoustic model
am_encoder_infer_sess = get_sess(
model_dir=args.inference_dir,
model_file=args.am + "_am_encoder_infer" + ".onnx",
device=args.device,
cpu_threads=args.cpu_threads)
am_decoder_sess = get_sess(
model_dir=args.inference_dir,
model_file=args.am + "_am_decoder" + ".onnx",
device=args.device,
cpu_threads=args.cpu_threads)
am_postnet_sess = get_sess(
model_dir=args.inference_dir,
model_file=args.am + "_am_postnet" + ".onnx",
device=args.device,
cpu_threads=args.cpu_threads)
am_mu, am_std = np.load(args.am_stat)
# vocoder
voc_sess = get_sess(
model_dir=args.inference_dir,
model_file=args.voc + ".onnx",
device=args.device,
cpu_threads=args.cpu_threads)
# frontend warmup
# Loading model cost 0.5+ seconds
if args.lang == 'zh':
frontend.get_input_ids("你好,欢迎使用飞桨框架进行深度学习研究!", merge_sentences=True)
else:
print("lang should in be 'zh' here!")
# am warmup
for T in [27, 38, 54]:
phone_ids = np.random.randint(1, 266, size=(T, ))
am_encoder_infer_sess.run(None, input_feed={'text': phone_ids})
am_decoder_input = np.random.rand(1, T * 15, 384).astype('float32')
am_decoder_sess.run(None, input_feed={'xs': am_decoder_input})
am_postnet_input = np.random.rand(1, 80, T * 15).astype('float32')
am_postnet_sess.run(None, input_feed={'xs': am_postnet_input})
# voc warmup
for T in [227, 308, 544]:
data = np.random.rand(T, 80).astype("float32")
voc_sess.run(None, input_feed={"logmel": data})
print("warm up done!")
N = 0
T = 0
merge_sentences = True
get_tone_ids = False
chunk_size = args.chunk_size
pad_size = args.pad_size
for utt_id, sentence in sentences:
with timer() as t:
if args.lang == 'zh':
input_ids = frontend.get_input_ids(
sentence,
merge_sentences=merge_sentences,
get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
else:
print("lang should in be 'zh' here!")
# merge_sentences=True here, so we only use the first item of phone_ids
phone_ids = phone_ids[0].numpy()
orig_hs = am_encoder_infer_sess.run(
None, input_feed={'text': phone_ids})
if args.am_streaming:
hss = get_chunks(orig_hs[0], chunk_size, pad_size)
chunk_num = len(hss)
mel_list = []
for i, hs in enumerate(hss):
am_decoder_output = am_decoder_sess.run(
None, input_feed={'xs': hs})
am_postnet_output = am_postnet_sess.run(
None,
input_feed={
'xs': np.transpose(am_decoder_output[0], (0, 2, 1))
})
am_output_data = am_decoder_output + np.transpose(
am_postnet_output[0], (0, 2, 1))
normalized_mel = am_output_data[0][0]
sub_mel = denorm(normalized_mel, am_mu, am_std)
# clip output part of pad
if i == 0:
sub_mel = sub_mel[:-pad_size]
elif i == chunk_num - 1:
# 最后一块的右侧一定没有 pad 够
sub_mel = sub_mel[pad_size:]
else:
# 倒数几块的右侧也可能没有 pad 够
sub_mel = sub_mel[pad_size:(chunk_size + pad_size) -
sub_mel.shape[0]]
mel_list.append(sub_mel)
mel = np.concatenate(mel_list, axis=0)
else:
am_decoder_output = am_decoder_sess.run(
None, input_feed={'xs': orig_hs[0]})
am_postnet_output = am_postnet_sess.run(
None,
input_feed={
'xs': np.transpose(am_decoder_output[0], (0, 2, 1))
})
am_output_data = am_decoder_output + np.transpose(
am_postnet_output[0], (0, 2, 1))
normalized_mel = am_output_data[0]
mel = denorm(normalized_mel, am_mu, am_std)
mel = mel[0]
# vocoder
wav = voc_sess.run(output_names=None, input_feed={'logmel': mel})
N += len(wav[0])
T += t.elapse
speed = len(wav[0]) / t.elapse
rtf = fs / speed
sf.write(
str(output_dir / (utt_id + ".wav")),
np.array(wav)[0],
samplerate=fs)
print(
f"{utt_id}, mel: {mel.shape}, wave: {len(wav[0])}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}."
)
print(f"generation speed: {N / T}Hz, RTF: {fs / (N / T) }")
def parse_args():
parser = argparse.ArgumentParser(description="Infernce with onnxruntime.")
# acoustic model
parser.add_argument(
'--am',
type=str,
default='fastspeech2_csmsc',
choices=['fastspeech2_csmsc'],
help='Choose acoustic model type of tts task.')
parser.add_argument(
"--am_stat",
type=str,
default=None,
help="mean and standard deviation used to normalize spectrogram when training acoustic model."
)
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--tones_dict", type=str, default=None, help="tone vocabulary file.")
# voc
parser.add_argument(
'--voc',
type=str,
default='hifigan_csmsc',
choices=['hifigan_csmsc', 'mb_melgan_csmsc', 'pwgan_csmsc'],
help='Choose vocoder type of tts task.')
# other
parser.add_argument(
"--inference_dir", type=str, help="dir to save inference models")
parser.add_argument(
"--text",
type=str,
help="text to synthesize, a 'utt_id sentence' pair per line")
parser.add_argument("--output_dir", type=str, help="output dir")
parser.add_argument(
'--lang',
type=str,
default='zh',
help='Choose model language. zh or en')
# inference
parser.add_argument(
"--use_trt",
type=str2bool,
default=False,
help="Whether to use inference engin TensorRT.", )
parser.add_argument(
"--device",
default="gpu",
choices=["gpu", "cpu"],
help="Device selected for inference.", )
parser.add_argument('--cpu_threads', type=int, default=1)
# streaming related
parser.add_argument(
"--am_streaming",
type=str2bool,
default=False,
help="whether use streaming acoustic model")
parser.add_argument(
"--chunk_size", type=int, default=42, help="chunk size of am streaming")
parser.add_argument(
"--pad_size", type=int, default=12, help="pad size of am streaming")
args, _ = parser.parse_known_args()
return args
def main():
args = parse_args()
paddle.set_device(args.device)
ort_predict(args)
if __name__ == "__main__":
main()
```
#### File: models/wavernn/wavernn_updater.py
```python
import logging
from pathlib import Path
import paddle
import soundfile as sf
from paddle import distributed as dist
from paddle.io import DataLoader
from paddle.nn import Layer
from paddle.optimizer import Optimizer
from paddlespeech.t2s.training.extensions.evaluator import StandardEvaluator
from paddlespeech.t2s.training.reporter import report
from paddlespeech.t2s.training.updaters.standard_updater import StandardUpdater
logging.basicConfig(
format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s',
datefmt='[%Y-%m-%d %H:%M:%S]')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def calculate_grad_norm(parameters, norm_type: str=2):
'''
calculate grad norm of mdoel's parameters
parameters:
model's parameters
norm_type: str
Returns
------------
Tensor
grad_norm
'''
grad_list = [
paddle.to_tensor(p.grad) for p in parameters if p.grad is not None
]
norm_list = paddle.stack(
[paddle.norm(grad, norm_type) for grad in grad_list])
total_norm = paddle.norm(norm_list)
return total_norm
# for save name in gen_valid_samples()
ITERATION = 0
class WaveRNNUpdater(StandardUpdater):
def __init__(self,
model: Layer,
optimizer: Optimizer,
criterion: Layer,
dataloader: DataLoader,
init_state=None,
output_dir: Path=None,
mode='RAW'):
super().__init__(model, optimizer, dataloader, init_state=None)
self.criterion = criterion
# self.scheduler = scheduler
log_file = output_dir / 'worker_{}.log'.format(dist.get_rank())
self.filehandler = logging.FileHandler(str(log_file))
logger.addHandler(self.filehandler)
self.logger = logger
self.msg = ""
self.mode = mode
def update_core(self, batch):
self.msg = "Rank: {}, ".format(dist.get_rank())
losses_dict = {}
# parse batch
self.model.train()
self.optimizer.clear_grad()
wav, y, mel = batch
y_hat = self.model(wav, mel)
if self.mode == 'RAW':
y_hat = y_hat.transpose([0, 2, 1]).unsqueeze(-1)
elif self.mode == 'MOL':
y_hat = paddle.cast(y, dtype='float32')
y = y.unsqueeze(-1)
loss = self.criterion(y_hat, y)
loss.backward()
grad_norm = float(
calculate_grad_norm(self.model.parameters(), norm_type=2))
self.optimizer.step()
report("train/loss", float(loss))
report("train/grad_norm", float(grad_norm))
losses_dict["loss"] = float(loss)
losses_dict["grad_norm"] = float(grad_norm)
self.msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_dict.items())
global ITERATION
ITERATION = self.state.iteration + 1
class WaveRNNEvaluator(StandardEvaluator):
def __init__(self,
model: Layer,
criterion: Layer,
dataloader: Optimizer,
output_dir: Path=None,
valid_generate_loader=None,
config=None):
super().__init__(model, dataloader)
log_file = output_dir / 'worker_{}.log'.format(dist.get_rank())
self.filehandler = logging.FileHandler(str(log_file))
logger.addHandler(self.filehandler)
self.logger = logger
self.msg = ""
self.criterion = criterion
self.valid_generate_loader = valid_generate_loader
self.config = config
self.mode = config.model.mode
self.valid_samples_dir = output_dir / "valid_samples"
self.valid_samples_dir.mkdir(parents=True, exist_ok=True)
def evaluate_core(self, batch):
self.msg = "Evaluate: "
losses_dict = {}
# parse batch
wav, y, mel = batch
y_hat = self.model(wav, mel)
if self.mode == 'RAW':
y_hat = y_hat.transpose([0, 2, 1]).unsqueeze(-1)
elif self.mode == 'MOL':
y_hat = paddle.cast(y, dtype='float32')
y = y.unsqueeze(-1)
loss = self.criterion(y_hat, y)
report("eval/loss", float(loss))
losses_dict["loss"] = float(loss)
self.msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_dict.items())
self.logger.info(self.msg)
def gen_valid_samples(self):
for i, item in enumerate(self.valid_generate_loader):
if i >= self.config.generate_num:
break
print(
'\n| Generating: {}/{}'.format(i + 1, self.config.generate_num))
mel = item['feats']
wav = item['wave']
wav = wav.squeeze(0)
origin_save_path = self.valid_samples_dir / '{}_steps_{}_target.wav'.format(
self.iteration, i)
sf.write(origin_save_path, wav.numpy(), samplerate=self.config.fs)
if self.config.inference.gen_batched:
batch_str = 'gen_batched_target{}_overlap{}'.format(
self.config.inference.target, self.config.inference.overlap)
else:
batch_str = 'gen_not_batched'
gen_save_path = str(self.valid_samples_dir /
'{}_steps_{}_{}.wav'.format(self.iteration, i,
batch_str))
# (1, T, C_aux) -> (T, C_aux)
mel = mel.squeeze(0)
gen_sample = self.model.generate(
mel, self.config.inference.gen_batched,
self.config.inference.target, self.config.inference.overlap,
self.config.mu_law)
sf.write(
gen_save_path, gen_sample.numpy(), samplerate=self.config.fs)
def __call__(self, trainer=None):
summary = self.evaluate()
for k, v in summary.items():
report(k, v)
# gen samples at then end of evaluate
self.iteration = ITERATION
if self.iteration % self.config.gen_eval_samples_interval_steps == 0:
self.gen_valid_samples()
```
#### File: t2s/modules/causal_conv.py
```python
import paddle
from paddle import nn
class CausalConv1D(nn.Layer):
"""CausalConv1D module with customized initialization."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
dilation=1,
bias=True,
pad="Pad1D",
pad_params={"value": 0.0}, ):
"""Initialize CausalConv1d module."""
super().__init__()
self.pad = getattr(paddle.nn, pad)((kernel_size - 1) * dilation,
**pad_params)
self.conv = nn.Conv1D(
in_channels,
out_channels,
kernel_size,
dilation=dilation,
bias_attr=bias)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
return self.conv(self.pad(x))[:, :, :x.shape[2]]
class CausalConv1DTranspose(nn.Layer):
"""CausalConv1DTranspose module with customized initialization."""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
bias=True):
"""Initialize CausalConvTranspose1d module."""
super().__init__()
self.deconv = nn.Conv1DTranspose(
in_channels, out_channels, kernel_size, stride, bias_attr=bias)
self.stride = stride
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T_in).
Returns:
Tensor: Output tensor (B, out_channels, T_out).
"""
return self.deconv(x)[:, :, :-self.stride]
```
#### File: modules/conformer/convolution.py
```python
from paddle import nn
class ConvolutionModule(nn.Layer):
"""ConvolutionModule in Conformer model.
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernerl size of conv layers.
"""
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
"""Construct an ConvolutionModule object."""
super().__init__()
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1D(
channels,
2 * channels,
kernel_size=1,
stride=1,
padding=0,
bias_attr=bias, )
self.depthwise_conv = nn.Conv1D(
channels,
channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
groups=channels,
bias_attr=bias, )
self.norm = nn.BatchNorm1D(channels)
self.pointwise_conv2 = nn.Conv1D(
channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias_attr=bias, )
self.activation = activation
def forward(self, x):
"""Compute convolution module.
Args:
x (Tensor): Input tensor (#batch, time, channels).
Returns:
Tensor: Output tensor (#batch, time, channels).
"""
# exchange the temporal dimension and the feature dimension
x = x.transpose([0, 2, 1])
# GLU mechanism
# (batch, 2*channel, time)
x = self.pointwise_conv1(x)
# (batch, channel, time)
x = nn.functional.glu(x, axis=1)
# 1D Depthwise Conv
x = self.depthwise_conv(x)
x = self.activation(self.norm(x))
x = self.pointwise_conv2(x)
return x.transpose([0, 2, 1])
```
#### File: t2s/modules/residual_stack.py
```python
from typing import Any
from typing import Dict
from paddle import nn
from paddlespeech.t2s.modules.activation import get_activation
from paddlespeech.t2s.modules.causal_conv import CausalConv1D
class ResidualStack(nn.Layer):
"""Residual stack module introduced in MelGAN."""
def __init__(
self,
kernel_size: int=3,
channels: int=32,
dilation: int=1,
bias: bool=True,
nonlinear_activation: str="leakyrelu",
nonlinear_activation_params: Dict[str, Any]={"negative_slope": 0.2},
pad: str="Pad1D",
pad_params: Dict[str, Any]={"mode": "reflect"},
use_causal_conv: bool=False, ):
"""Initialize ResidualStack module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels of convolution layers.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (Dict[str,Any]): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (Dict[str, Any]): Hyperparameters for padding function.
use_causal_conv (bool): Whether to use causal convolution.
"""
super().__init__()
# for compatibility
if nonlinear_activation:
nonlinear_activation = nonlinear_activation.lower()
# defile residual stack part
if not use_causal_conv:
assert (kernel_size - 1
) % 2 == 0, "Not support even number kernel size."
self.stack = nn.Sequential(
get_activation(nonlinear_activation,
**nonlinear_activation_params),
getattr(nn, pad)((kernel_size - 1) // 2 * dilation,
**pad_params),
nn.Conv1D(
channels,
channels,
kernel_size,
dilation=dilation,
bias_attr=bias),
get_activation(nonlinear_activation,
**nonlinear_activation_params),
nn.Conv1D(channels, channels, 1, bias_attr=bias), )
else:
self.stack = nn.Sequential(
get_activation(nonlinear_activation,
**nonlinear_activation_params),
CausalConv1D(
channels,
channels,
kernel_size,
dilation=dilation,
bias=bias,
pad=pad,
pad_params=pad_params, ),
get_activation(nonlinear_activation,
**nonlinear_activation_params),
nn.Conv1D(channels, channels, 1, bias_attr=bias), )
# defile extra layer for skip connection
self.skip_layer = nn.Conv1D(channels, channels, 1, bias_attr=bias)
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, chennels, T).
"""
return self.stack(c) + self.skip_layer(c)
```
#### File: vector/io/signal_processing.py
```python
import numpy as np
import paddle
# TODO: Complete type-hint and doc string.
def blackman_window(win_len, dtype=np.float32):
arcs = np.pi * np.arange(win_len) / float(win_len)
win = np.asarray(
[0.42 - 0.5 * np.cos(2 * arc) + 0.08 * np.cos(4 * arc) for arc in arcs],
dtype=dtype)
return paddle.to_tensor(win)
def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0)
assert amp_type in ["avg", "peak"]
assert scale in ["linear", "dB"]
if amp_type == "avg":
if lengths is None:
out = paddle.mean(paddle.abs(waveforms), axis=1, keepdim=True)
else:
wav_sum = paddle.sum(paddle.abs(waveforms), axis=1, keepdim=True)
out = wav_sum / lengths
elif amp_type == "peak":
out = paddle.max(paddle.abs(waveforms), axis=1, keepdim=True)
else:
raise NotImplementedError
if scale == "linear":
return out
elif scale == "dB":
return paddle.clip(20 * paddle.log10(out), min=-80)
else:
raise NotImplementedError
def dB_to_amplitude(SNR):
return 10**(SNR / 20)
def convolve1d(
waveform,
kernel,
padding=0,
pad_type="constant",
stride=1,
groups=1, ):
if len(waveform.shape) != 3:
raise ValueError("Convolve1D expects a 3-dimensional tensor")
# Padding can be a tuple (left_pad, right_pad) or an int
if isinstance(padding, list):
waveform = paddle.nn.functional.pad(
x=waveform,
pad=padding,
mode=pad_type,
data_format='NLC', )
# Move time dimension last, which pad and fft and conv expect.
# (N, L, C) -> (N, C, L)
waveform = waveform.transpose([0, 2, 1])
kernel = kernel.transpose([0, 2, 1])
convolved = paddle.nn.functional.conv1d(
x=waveform,
weight=kernel,
stride=stride,
groups=groups,
padding=padding if not isinstance(padding, list) else 0, )
# Return time dimension to the second dimension.
return convolved.transpose([0, 2, 1])
def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
# Check inputs
assert 0 < notch_freq <= 1
assert filter_width % 2 != 0
pad = filter_width // 2
inputs = paddle.arange(filter_width, dtype='float32') - pad
# Avoid frequencies that are too low
notch_freq += notch_width
# Define sinc function, avoiding division by zero
def sinc(x):
def _sinc(x):
return paddle.sin(x) / x
# The zero is at the middle index
res = paddle.concat(
[_sinc(x[:pad]), paddle.ones([1]), _sinc(x[pad + 1:])])
return res
# Compute a low-pass filter with cutoff frequency notch_freq.
hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
# import torch
# hlpf *= paddle.to_tensor(torch.blackman_window(filter_width).detach().numpy())
hlpf *= blackman_window(filter_width)
hlpf /= paddle.sum(hlpf)
# Compute a high-pass filter with cutoff frequency notch_freq.
hhpf = sinc(3 * (notch_freq + notch_width) * inputs)
# hhpf *= paddle.to_tensor(torch.blackman_window(filter_width).detach().numpy())
hhpf *= blackman_window(filter_width)
hhpf /= -paddle.sum(hhpf)
hhpf[pad] += 1
# Adding filters creates notch filter
return (hlpf + hhpf).reshape([1, -1, 1])
def reverberate(waveforms,
rir_waveform,
sample_rate,
impulse_duration=0.3,
rescale_amp="avg"):
orig_shape = waveforms.shape
if len(waveforms.shape) > 3 or len(rir_waveform.shape) > 3:
raise NotImplementedError
# if inputs are mono tensors we reshape to 1, samples
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0).unsqueeze(-1)
elif len(waveforms.shape) == 2:
waveforms = waveforms.unsqueeze(-1)
if len(rir_waveform.shape) == 1: # convolve1d expects a 3d tensor !
rir_waveform = rir_waveform.unsqueeze(0).unsqueeze(-1)
elif len(rir_waveform.shape) == 2:
rir_waveform = rir_waveform.unsqueeze(-1)
# Compute the average amplitude of the clean
orig_amplitude = compute_amplitude(waveforms, waveforms.shape[1],
rescale_amp)
# Compute index of the direct signal, so we can preserve alignment
impulse_index_start = rir_waveform.abs().argmax(axis=1).item()
impulse_index_end = min(
impulse_index_start + int(sample_rate * impulse_duration),
rir_waveform.shape[1])
rir_waveform = rir_waveform[:, impulse_index_start:impulse_index_end, :]
rir_waveform = rir_waveform / paddle.norm(rir_waveform, p=2)
rir_waveform = paddle.flip(rir_waveform, [1])
waveforms = convolve1d(
waveform=waveforms,
kernel=rir_waveform,
padding=[rir_waveform.shape[1] - 1, 0], )
# Rescale to the peak amplitude of the clean waveform
waveforms = rescale(waveforms, waveforms.shape[1], orig_amplitude,
rescale_amp)
if len(orig_shape) == 1:
waveforms = waveforms.squeeze(0).squeeze(-1)
if len(orig_shape) == 2:
waveforms = waveforms.squeeze(-1)
return waveforms
def rescale(waveforms, lengths, target_lvl, amp_type="avg", scale="linear"):
assert amp_type in ["peak", "avg"]
assert scale in ["linear", "dB"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
waveforms = normalize(waveforms, lengths, amp_type)
if scale == "linear":
out = target_lvl * waveforms
elif scale == "dB":
out = dB_to_amplitude(target_lvl) * waveforms
else:
raise NotImplementedError("Invalid scale, choose between dB and linear")
if batch_added:
out = out.squeeze(0)
return out
def normalize(waveforms, lengths=None, amp_type="avg", eps=1e-14):
assert amp_type in ["avg", "peak"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
den = compute_amplitude(waveforms, lengths, amp_type) + eps
if batch_added:
waveforms = waveforms.squeeze(0)
return waveforms / den
```
#### File: zh/local/text_to_lexicon.py
```python
import argparse
from collections import Counter
def main(args):
counter = Counter()
with open(args.text, 'r') as fin, open(args.lexicon, 'w') as fout:
for line in fin:
line = line.strip()
if args.has_key:
utt, text = line.split(maxsplit=1)
words = text.split()
else:
words = line.split()
counter.update(words)
for word in counter:
val = " ".join(list(word))
fout.write(f"{word}\t{val}\n")
fout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='text(line:utt1 中国 人) to lexicon(line:中国 中 国).')
parser.add_argument(
'--has_key', default=True, help='text path, with utt or not')
parser.add_argument(
'--text', required=True, help='text path. line: utt1 中国 人 or 中国 人')
parser.add_argument(
'--lexicon', required=True, help='lexicon path. line:中国 中 国')
args = parser.parse_args()
print(args)
main(args)
```
#### File: unit/vector/test_augment.py
```python
import paddle
def test_add_noise(tmpdir, device):
paddle.device.set_device(device)
from paddlespeech.vector.io.augment import AddNoise
test_waveform = paddle.sin(
paddle.arange(16000.0, dtype="float32")).unsqueeze(0)
test_noise = paddle.cos(
paddle.arange(16000.0, dtype="float32")).unsqueeze(0)
wav_lens = paddle.ones([1], dtype="float32")
# Edge cases
no_noise = AddNoise(mix_prob=0.0)
assert no_noise(test_waveform, wav_lens).allclose(test_waveform)
def test_speed_perturb(device):
paddle.device.set_device(device)
from paddlespeech.vector.io.augment import SpeedPerturb
test_waveform = paddle.sin(
paddle.arange(16000.0, dtype="float32")).unsqueeze(0)
# Edge cases
no_perturb = SpeedPerturb(16000, perturb_prob=0.0)
assert no_perturb(test_waveform).allclose(test_waveform)
no_perturb = SpeedPerturb(16000, speeds=[100])
assert no_perturb(test_waveform).allclose(test_waveform)
# # Half speed
half_speed = SpeedPerturb(16000, speeds=[50])
assert half_speed(test_waveform).allclose(test_waveform[:, ::2], atol=3e-1)
def test_babble(device):
paddle.device.set_device(device)
from paddlespeech.vector.io.augment import AddBabble
test_waveform = paddle.stack(
(paddle.sin(paddle.arange(16000.0, dtype="float32")),
paddle.cos(paddle.arange(16000.0, dtype="float32")), ))
lengths = paddle.ones([2])
# Edge cases
no_babble = AddBabble(mix_prob=0.0)
assert no_babble(test_waveform, lengths).allclose(test_waveform)
no_babble = AddBabble(speaker_count=1, snr_low=1000, snr_high=1000)
assert no_babble(test_waveform, lengths).allclose(test_waveform)
# One babbler just averages the two speakers
babble = AddBabble(speaker_count=1).to(device)
expected = (test_waveform + test_waveform.roll(1, 0)) / 2
assert babble(test_waveform, lengths).allclose(expected, atol=1e-4)
def test_drop_freq(device):
paddle.device.set_device(device)
from paddlespeech.vector.io.augment import DropFreq
test_waveform = paddle.sin(
paddle.arange(16000.0, dtype="float32")).unsqueeze(0)
# Edge cases
no_drop = DropFreq(drop_prob=0.0)
assert no_drop(test_waveform).allclose(test_waveform)
no_drop = DropFreq(drop_count_low=0, drop_count_high=0)
assert no_drop(test_waveform).allclose(test_waveform)
# Check case where frequency range *does not* include signal frequency
drop_diff_freq = DropFreq(drop_freq_low=0.5, drop_freq_high=0.9)
assert drop_diff_freq(test_waveform).allclose(test_waveform, atol=1e-1)
# Check case where frequency range *does* include signal frequency
drop_same_freq = DropFreq(drop_freq_low=0.28, drop_freq_high=0.28)
assert drop_same_freq(test_waveform).allclose(
paddle.zeros([1, 16000]), atol=4e-1)
def test_drop_chunk(device):
paddle.device.set_device(device)
from paddlespeech.vector.io.augment import DropChunk
test_waveform = paddle.sin(
paddle.arange(16000.0, dtype="float32")).unsqueeze(0)
lengths = paddle.ones([1])
# Edge cases
no_drop = DropChunk(drop_prob=0.0)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
no_drop = DropChunk(drop_length_low=0, drop_length_high=0)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
no_drop = DropChunk(drop_count_low=0, drop_count_high=0)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
no_drop = DropChunk(drop_start=0, drop_end=0)
assert no_drop(test_waveform, lengths).allclose(test_waveform)
# Specify all parameters to ensure it is deterministic
dropper = DropChunk(
drop_length_low=100,
drop_length_high=100,
drop_count_low=1,
drop_count_high=1,
drop_start=100,
drop_end=200,
noise_factor=0.0, )
expected_waveform = test_waveform.clone()
expected_waveform[:, 100:200] = 0.0
assert dropper(test_waveform, lengths).allclose(expected_waveform)
# Make sure amplitude is similar before and after
dropper = DropChunk(noise_factor=1.0)
drop_amplitude = dropper(test_waveform, lengths).abs().mean()
orig_amplitude = test_waveform.abs().mean()
assert drop_amplitude.allclose(orig_amplitude, atol=1e-2)
```
#### File: utils/fst/prepare_dict.py
```python
import argparse
def main(args):
# load vocab file
# line: token
unit_table = set()
with open(args.unit_file, 'r') as fin:
for line in fin:
unit = line.strip()
unit_table.add(unit)
def contain_oov(units):
"""token not in vocab
Args:
units (str): token
Returns:
bool: True token in voca, else False.
"""
for unit in units:
if unit not in unit_table:
return True
return False
# load spm model, for English
bpemode = args.bpemodel
if bpemode:
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
sp.Load(sys.bpemodel)
# used to filter polyphone and invalid word
lexicon_table = set()
in_n = 0 # in lexicon word count
out_n = 0 # out lexicon word cout
with open(args.in_lexicon, 'r') as fin, \
open(args.out_lexicon, 'w') as fout:
for line in fin:
word = line.split()[0]
in_n += 1
if word == 'SIL' and not bpemode: # `sil` might be a valid piece in bpemodel
# filter 'SIL' for mandarin, keep it in English
continue
elif word == '<SPOKEN_NOISE>':
# filter <SPOKEN_NOISE>
continue
else:
# each word only has one pronunciation for e2e system
if word in lexicon_table:
continue
if bpemode:
# for english
pieces = sp.EncodeAsPieces(word)
if contain_oov(pieces):
print('Ignoring words {}, which contains oov unit'.
format(''.join(word).strip('▁')))
continue
# word is piece list, which not have <unk> piece, filter out by `contain_oov(pieces)`
chars = ' '.join(
[p if p in unit_table else '<unk>' for p in pieces])
else:
# ignore words with OOV
if contain_oov(word):
print('Ignoring words {}, which contains oov unit'.
format(word))
continue
# Optional, append ▁ in front of english word
# we assume the model unit of our e2e system is char now.
if word.encode('utf8').isalpha() and '▁' in unit_table:
word = '▁' + word
chars = ' '.join(word) # word is a char list
fout.write('{} {}\n'.format(word, chars))
lexicon_table.add(word)
out_n += 1
print(
f"Filter lexicon by unit table: filter out {in_n - out_n}, {out_n}/{in_n}"
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='FST: preprae e2e(char/spm) dict')
parser.add_argument(
'--unit_file',
required=True,
help='e2e model unit file(lang_char.txt/vocab.txt). line: char/spm_pices'
)
parser.add_argument(
'--in_lexicon',
required=True,
help='raw lexicon file. line: word ph0 ... phn')
parser.add_argument(
'--out_lexicon',
required=True,
help='output lexicon file. line: word char0 ... charn')
parser.add_argument('--bpemodel', default=None, help='bpemodel')
args = parser.parse_args()
print(args)
main(args)
```
|
{
"source": "jerryuhoo/VTuberTalk",
"score": 3
}
|
#### File: VTuberTalk/tools/glob_text.py
```python
import os
import re
import argparse
def sorted_alphanumeric(data):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(data, key=alphanum_key)
def process(files, path):
files = sorted_alphanumeric(files)
for file in files:
if file.endswith('.txt'):
pass
else:
continue
position = path + file
print(position)
with open(position ,'r', encoding='utf-8') as f:
for line in f.readlines():
with open("./text.txt","a", encoding='utf-8') as p:
p.write(str(file) + " " + line + "\n")
p.close()
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--path", type=str, required=True)
args = parser.parse_args()
files=os.listdir(args.path)
if os.path.exists("./text.txt"):
os.remove("./text.txt")
process(files, args.path)
```
#### File: VTuberTalk/tools/video_to_wav.py
```python
from pydub import AudioSegment
import os
import argparse
def process(path, output_sample_rate, is_mono=True):
is_dir = os.path.isdir(path)
if is_dir:
path_list=os.listdir(path)
else: # input is a file
path, basename = os.path.split(path)
path_list = [basename]
print(path)
output_dir = os.path.join(path, "../raw")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for filename in path_list:
if os.path.isdir(os.path.join(path, filename)):
continue
filename_suffix = os.path.splitext(filename)[1]
print(filename)
input_file_path = os.path.join(path, filename)
output_file_path = os.path.join(output_dir, os.path.splitext(filename)[0] + ".wav")
if filename_suffix == '.flv':
sound = AudioSegment.from_flv(input_file_path)
sound = sound.set_frame_rate(output_sample_rate)
if is_mono:
sound = sound.set_channels(1)
sound.export(os.path.join(output_file_path), format="wav")
elif filename_suffix == '.mp4' or filename_suffix == '.mp3':
# file name should not contain space.
if is_mono:
cmd = "ffmpeg -i {} -ac 1 -ar {} -f wav {}".format(input_file_path, output_sample_rate, output_file_path)
else:
cmd = "ffmpeg -i {} -ac 2 -ar {} -f wav {}".format(input_file_path, output_sample_rate, output_file_path)
os.system(cmd)
else:
print("file ", filename, " format not supported!")
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--path", type=str, required=True)
parser.add_argument("--is_mono", type=str, default=True)
parser.add_argument("--sr", type=int, default=16000)
args = parser.parse_args()
output_sample_rate = args.sr
is_exist = os.path.exists(args.path)
if not is_exist:
print("path not existed!")
else:
path = args.path
is_mono = args.is_mono
process(path, output_sample_rate, is_mono)
```
|
{
"source": "jerryva/Dialer-Alpabets-Printing-using-Python",
"score": 3
}
|
#### File: jerryva/Dialer-Alpabets-Printing-using-Python/Dialer.py
```python
import re
class Solution:
def letterCombinations(self, digits):
if not digits:
return []
lookup, result = ["", "", "abc", "def", "ghi", "jkl", "mno", \
"pqrs", "tuv", "wxyz"], [""]
for digit in reversed(digits):
choices = lookup[int(digit)]
m, n = len(choices), len(result)
result += [result[i % n] for i in xrange(n, m * n)]
for i in xrange(m * n):
result[i] = choices[i / n] + result[i]
return result
class Solution2:
def letterCombinations(self, digits):
if not digits:
return []
lookup, result = ["", "", "abc", "def", "ghi", "jkl", "mno", \
"pqrs", "tuv", "wxyz"], []
self.letterCombinationsRecu(result, digits, lookup, "", 0)
return result
def letterCombinationsRecu(self, result, digits, lookup, cur, n):
if n == len(digits):
result.append(cur)
else:
for choice in lookup[int(digits[n])]:
self.letterCombinationsRecu(result, digits, lookup, cur + choice, n + 1)
if __name__ == "__main__":
list1=[]
list2=[]
list3=[]
list4=[]
#print Solution().letterCombinations("234")
x=raw_input('Enter the Digits ')
Solution().letterCombinations(x)
for a in Solution().letterCombinations(x):
if re.match("^[aeiou][aeiou][aeiou]$|^[aeiou][aeiou][aeiou][aeiou]$ ",a):
list1.append(a)
elif re.match("^.?[aeiou][aeiou]$|^[aeiou].?[aeiou]$|[aeiou][aeiou].?$|^.?[aeiou][aeiou][aeiou]$|^[aeiou].?[aeiou][aeiou]$|[aeiou][aeiou].?[aeiou]$|[aeiou][aeiou][aeiou].? ",a):
list2.append(a)
elif re.match("^.?.?[aeiou]$|^[aeiou].?.?$|.?[aeiou].?$|^.?.?[aeiou][aeiou]$|^[aeiou].?.?[aeiou]$|[aeiou][aeiou].?.?$|[aeiou].?[aeiou].?|^.?[aeiou][aeiou].?$",a):
list3.append(a)
else:
list4.append(a)
l1=list1+list2+list3
l2=list1+list2+list3+list4
l3=[]
l3.append(list1)
l3.append(list2)
l3.append(list3)
for j in l3:
if j!=[]:
print '\nBest Match :: ',j
break
print '\n============================================\n'
print 'Other Vowel Matches :: ',l1,'\n'
print '============================================\n'
print 'All Matches :: ',l2
```
|
{
"source": "JerryW1120/video_decaptioning",
"score": 2
}
|
#### File: JerryW1120/video_decaptioning/model.py
```python
import torch
from torch import nn
from models import resnet, resnet_AE, resnet_mask, resnet_comp, unet_mask, icnet_mask, icnet_res
import pdb
def generate_model(opt):
assert opt.model in [
'resnet', 'resnet_AE', 'resnet_mask', 'resnet_comp', 'unet', 'icnet', 'icnet_res', 'icnet_res_2D',
'icnet_res_2Dt', 'icnet_DBI', 'icnet_deep', 'icnet_deep_gate', 'icnet_deep_gate_2step'
]
if opt.model == 'resnet':
assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
from models.resnet import get_fine_tuning_parameters
if opt.model_depth == 10:
model = resnet.resnet10(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model_depth == 18:
model = resnet.resnet18(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model_depth == 34:
model = resnet.resnet34(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model_depth == 50:
model = resnet.resnet50(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model_depth == 101:
model = resnet.resnet101(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model_depth == 152:
model = resnet.resnet152(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model_depth == 200:
model = resnet.resnet200(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration)
elif opt.model == 'resnet_AE':
assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
from models.resnet_AE import get_fine_tuning_parameters
if opt.model_depth == 18:
model = resnet_AE.resnet18(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model_depth == 34:
model = resnet_AE.resnet34(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model_depth == 50:
model = resnet_AE.resnet50(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model == 'resnet_mask':
assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
from models.resnet_mask import get_fine_tuning_parameters
if opt.model_depth == 18:
model = resnet_mask.resnet18(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model_depth == 34:
model = resnet_mask.resnet34(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model_depth == 50:
model = resnet_mask.resnet50(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model == 'resnet_comp':
assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]
from models.resnet_comp import get_fine_tuning_parameters
if opt.model_depth == 18:
model = resnet_comp.resnet18(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model_depth == 34:
model = resnet_comp.resnet34(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model_depth == 50:
model = resnet_comp.resnet50(
num_classes=opt.n_classes,
shortcut_type=opt.resnet_shortcut,
sample_size=opt.sample_size,
sample_duration=opt.sample_duration,
is_gray=opt.is_gray,
opt=opt)
elif opt.model == 'unet':
model = unet_mask.UNet3D(
opt=opt
)
elif opt.model == 'icnet':
model = icnet_mask.ICNet3D(
opt=opt
)
elif opt.model == 'icnet_res':
model = icnet_res.ICNetResidual3D(
opt=opt
)
elif opt.model == 'icnet_res_2D':
model = icnet_res.ICNetResidual2D(
opt=opt
)
elif opt.model == 'icnet_res_2Dt':
model = icnet_res.ICNetResidual2Dt(
opt=opt
)
elif opt.model == 'icnet_DBI':
model = icnet_res.ICNetResidual_DBI(
opt=opt
)
elif opt.model == 'icnet_deep':
model = icnet_res.ICNetDeep(
opt=opt
)
elif opt.model == 'icnet_deep_gate':
model = icnet_res.ICNetDeepGate(
opt=opt
)
elif opt.model == 'icnet_deep_gate_2step':
model = icnet_res.ICNetDeepGate2step(
opt=opt
)
if not opt.no_cuda:
model = model.cuda()
model = nn.DataParallel(model, device_ids=None)
if opt.pretrain_path:
print('loading pretrained model {}'.format(opt.pretrain_path))
pretrain = torch.load(opt.pretrain_path)
print('loading from', pretrain['arch'])
child_dict = model.state_dict()
if opt.two_step and opt.test:
parent_list = pretrain['state_dict_1'].keys()
else:
parent_list = pretrain['state_dict'].keys()
print('Not loaded :')
parent_dict = {}
for chi,_ in child_dict.items():
# pdb.set_trace()
# if ('coarse' in chi):
# chi_ori = chi
# chi = 'module.' + ".".join(chi_ori.split('.')[2:])
if chi in parent_list:
if opt.two_step and opt.test:
parent_dict[chi] = pretrain['state_dict_1'][chi]
else:
parent_dict[chi] = pretrain['state_dict'][chi]
else:
print(chi)
print('length :', len(parent_dict.keys()))
child_dict.update(parent_dict)
model.load_state_dict(child_dict)
if not opt.is_AE:
model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes)
return model, model.parameters()
else:
if opt.pretrain_path:
print('loading pretrained model {}'.format(opt.pretrain_path))
pretrain = torch.load(opt.pretrain_path)
assert pretrain['arch'] in ['resnet','resnet_AE']
model.load_state_dict(pretrain['state_dict'])
model.module.fc = nn.Linear(model.module.fc.in_features,
opt.n_finetune_classes)
if not opt.no_cuda:
model.module.fc = model.module.fc.cuda()
parameters = get_fine_tuning_parameters(model, opt.ft_begin_index)
return model, parameters
return model, model.parameters()
```
#### File: video_decaptioning/models/resnet_AE.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import pdb
__all__ = [
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200'
]
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
sample_size,
sample_duration,
shortcut_type='A',
num_classes=400,
is_gray=False,
opt=None):
self.num_classes = num_classes
self.inplanes = 64
super(ResNet, self).__init__()
self.is_fwbw = opt.is_fwbw
if is_gray:
self.conv1 = nn.Conv3d(1,64,kernel_size=7, stride=(2, 2, 2), padding=(3, 3, 3), bias=False)
else:
self.conv1 = nn.Conv3d(3,64,kernel_size=7, stride=(2, 2, 2), padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
#self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type) # 64x 8x64x64
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2) # 128x 4x32x32
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2) # 256x 2x16x16
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=1) # 512x 2x16x16
'''last_size = int(math.ceil(sample_size / 8))
last_duration = int(math.ceil(sample_duration / (8)))
n_bottleneck = 16384
self.conv_last = nn.Conv3d(512,n_bottleneck,kernel_size=(last_duration,last_size,last_size),stride=(1,1,1),groups=512,bias=False)
# DECODER
self.deconv1 = nn.ConvTranspose3d(n_bottleneck,512, kernel_size=(last_duration,4,4),groups=512,bias=False) #1x4x4
self.debn1 = nn.BatchNorm3d(512)
self.deconv2 = nn.ConvTranspose3d(512,256,kernel_size=(2,4,4), stride=2,padding=(0,1,1),bias=False) # 4x8x8
self.debn2 = nn.BatchNorm3d(256)
self.deconv3 = nn.ConvTranspose3d(256,128,kernel_size=(2,4,4), stride=2, padding=(0,1,1),bias=False) # 8x16x16
self.debn3 = nn.BatchNorm3d(128)
self.deconv4 = nn.ConvTranspose3d(128,64,kernel_size=(2,4,4), stride=2, padding=(0,1,1),bias=False) # 16x32x32
self.debn4 = nn.BatchNorm3d(64)
self.deconv5 = nn.ConvTranspose3d(64,3,kernel_size=(2,4,4), stride=2, padding=(0,1,1),bias=False) # 32x64x64'''
self.deconv1 = nn.ConvTranspose3d(512,256, kernel_size=(2,2,2), stride=2, bias=False)
self.debn1 = nn.BatchNorm3d(256) # bx256x4x16x16
self.deconv2 = nn.ConvTranspose3d(256,128, kernel_size=(2,2,2), stride=2, bias=False)
self.debn2 = nn.BatchNorm3d(128) # bx128x8x32x32
self.deconv3 = nn.ConvTranspose3d(128,64, kernel_size=(2,2,2), stride=2, bias=False)
self.debn3 = nn.BatchNorm3d(64) # bx64x16x64x64
self.deconv4 = nn.ConvTranspose3d(64,3, kernel_size=(3,3,3), stride=1, padding=(1,1,1), bias=False) # bx3x16x64x64
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.ConvTranspose3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
#print('C0',x.size())
####x = self.maxpool(x)
#print( 'P0',x.size())
x = self.layer1(x)
#print('L1',x.size())
x = self.layer2(x)
#print('L2',x.size())
x = self.layer3(x)
#print('L3',x.size())
x = self.layer4(x)
x = self.relu(x)
#print('L4',x.size())
'''x = self.relu(self.conv_last(x))
print('B',x.size())'''
x = self.relu(self.debn1(self.deconv1(x)))
x = self.relu(self.debn2(self.deconv2(x)))
x = self.relu(self.debn3(self.deconv3(x)))
x = self.deconv4(x)
#print('D4',x.size())
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def resnet10(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def resnet200(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model
```
#### File: JerryW1120/video_decaptioning/temporal_transforms.py
```python
import random
import math
import pdb
class LoopPadding(object):
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices
print(len(out))
for index in out:
print(index)
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalBeginCrop(object):
"""Temporally crop the given frame indices at a beginning.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices[:self.size]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCrop(object):
"""Temporally crop the given frame indices at a center.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
center_index = len(frame_indices) // 2
begin_index = max(0, center_index - (self.size // 2))
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
rand_end = max(0, len(frame_indices) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalRandomCropMirror(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
mid = self.size//2
tmp1 = frame_indices[1:mid][::-1] # 31
tmp2 = frame_indices[-mid:-1][::-1] # 31
frame_indices = tmp1+frame_indices+tmp2
rand_end = max(0, len(frame_indices) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalRandomCut(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
if len(frame_indices) < self.size:
out = frame_indices
for index in out:
if len(out) >= (self.size/2): #len(out) >= self.size/2(=64)
break
out.append(index)
else:
rand_end = max(0, len(frame_indices) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class RandomTemporalFlip(object):
"""temporally flip given volume randomly with a probability of 0.5."""
def __call__(self, frame_indices):
"""
Args:
video clip to be flipped.
Returns:
randomly flipped video clip.
"""
if random.random() < 0.5:
return frame_indices[::-1]
return frame_indices
```
|
{
"source": "jerrywan4/MineCity",
"score": 3
}
|
#### File: MineCity/stock-filters/RemoveTree.py
```python
import time
from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2, acos, asin
from random import *
import numpy
from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox
from mcplatform import *
import utilityFunctions
from helper import *
from ChunkAnalysis import *
from PlaceDistricts import *
from CreateRoads import *
inputs = (
("Remove Trees", "label"),
("Number to Remove", (1, 10))
)
displayName = "Remove Trees"
def perform(level, box, options):
startTime = time.time()
initializeHeightmap(level, box)
for i in range(options["Number to Remove"]):
listOfTrees = list(treeMap)
if len(listOfTrees) == 0:
break
x = choice(listOfTrees)
deleteTree(level, x[0], x[1])
endTime = time.time()
print "Finished in " + str(endTime - startTime) + " seconds"
return
```
#### File: MineCity/stock-filters/structure.py
```python
wallBlocks = {};
stairBlocks = {};
doorBlocks = {};
supportBlocks ={}
fenceBlocks = {}
slabBlocks = {}
def init():
#Tree
wallBlocks[17] = {};
wallBlocks[17][0] = [5,0]
wallBlocks[17][1] = [5,1]
wallBlocks[17][2] = [5,2]
wallBlocks[17][3] = [5,3]
wallBlocks[162] = {};
wallBlocks[162][0] = [5,4]
wallBlocks[162][1] =[5,5];
supportBlocks[17] = {};
supportBlocks[17][0] = [17,0]
supportBlocks[17][1] = [17,1]
supportBlocks[17][2] = [17,2]
supportBlocks[17][3] = [17,3]
supportBlocks[162] = {};
supportBlocks[162][0] = [160,0]
supportBlocks[162][1] =[161,1];
fenceBlocks[17] = {};
fenceBlocks[17][0] = [85,0]
fenceBlocks[17][1] = [188,0]
fenceBlocks[17][2] = [189,0]
fenceBlocks[17][3] = [190,0]
fenceBlocks[162] = {};
fenceBlocks[162][0] = [191,0]
fenceBlocks[162][1] =[192,0];
stairBlocks[17]={};
stairBlocks[17][0] = 53
stairBlocks[17][1] = 134
stairBlocks[17][2] = 135
stairBlocks[17][3] = 136
stairBlocks[162] = {};
stairBlocks[162][0] = 163
stairBlocks[162][1] =164;
doorBlocks[17] = {};
doorBlocks[17][0] = 64
doorBlocks[17][1] = 193
doorBlocks[17][2] = 194
doorBlocks[17][3] = 195
doorBlocks[162] = {};
doorBlocks[162][0] = 196
doorBlocks[162][1] = 197;
#stone
wallBlocks[1] = {};
wallBlocks[1][0] = [43,5]
wallBlocks[4] = {}
wallBlocks[4][0] = [43,3]
wallBlocks[24] = {};
wallBlocks[24][0] = [24,2]
supportBlocks[1] = {};
supportBlocks[1][0] = [1,0]
supportBlocks[4] = {}
supportBlocks[4][0] = [4,0]
supportBlocks[24] = {};
supportBlocks[24][0] = [24,1]
stairBlocks[1] = {};
stairBlocks[1][0] = 109
stairBlocks[4] = {}
stairBlocks[4][0] = 67
stairBlocks[24] = {};
stairBlocks[24][0] = 128
slabBlocks[1] = {};
slabBlocks[1][0] = [44,0]
slabBlocks[4] = {}
slabBlocks[4][0] = [44,3]
slabBlocks[24] = {};
slabBlocks[24][0] = [44,1]
def mainHouseBox(width, length,material1,material2):
(m1,d1)=material1;
(m2,d2)=material2;
wallId = wallBlocks[m1][d1][0];
wallValue = wallBlocks[m1][d1][1];
supportId = supportBlocks[m2][d2][0];
supportValue = supportBlocks[m2][d2][1]
stair = stairBlocks[m2][d2];
door = doorBlocks[m1][d1];
lv = int((max(width,length)-2) / 5);
lv = min(lv,3);
house = [];
for i in range(width):
house.append([]);
for j in range(length):
house[i].append([]);
for k in range(lv):
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
subWidth = 5;
subLength = (length - 1)/2;
w1 = subWidth + 1;
w2 = width - subWidth - 1
l1 = length - subLength;
#Ground
for x in xrange(1, width - 1):
for z in xrange( 1, length - 1):
if z > l1:
if w1 <= x < w2:
continue
house[x][z][0][0]=supportId;
house[x][z][0][1]=supportValue;
table = ((1,1),(1,length-2),(width-2,1),(width-2,length-2),
(w1-1,length-2),(w2,length-2),(w1-1,l1),(w2,l1));
for l in range(lv):
#eight support
for (x,z) in table:
for he in xrange(1,4):
house[x][z][l*4+he][0]=supportId;
house[x][z][l*4+he][1]=supportValue;
#wall
for x in xrange(2, width - 2):
z = 1
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20
for x in (1,width-2):
for z in xrange(2,length-2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if z % 2 == 1:
house[x][z][l*4+2][0]=20;
for x in xrange(2, w1-1):
z = length - 2;
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20;
for x in xrange(w2+1, width - 2):
z = length - 2;
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20;
for x in xrange(w1,w2):
z = l1
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20;
for z in xrange(l1+1, length - 2):
for x in (w1-1,w2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if z % 2 == 1:
house[x][z][l*4+2][0]=20;
#floor
for x in xrange(1, width - 1):
for z in xrange(1, length - 1):
if z > l1:
if w1 <= x < w2:
continue
house[x][z][l*4+4][0]=supportId;
house[x][z][l*4+4][1]=supportValue;
for x in xrange(1, width-1):
z = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=0;
z = 0 + length - 1;
if x == w1:
continue;
if x == w2-1:
continue;
if w1 <= x < w2:
house[x][l1+1][l*4+4][0]=stair;
house[x][l1+1][l*4+4][1]=2;
else:
house[x][length-1][l*4+4][0]=stair;
house[x][length-1][l*4+4][1]=2;
for z in xrange(1, length - 1):
x = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=3;
x = width - 1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=1;
for z in xrange(l1+1, length - 1):
x = w1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=1;
x = w2-1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=3;
#door
x = (width -1)/2;
z = l1;
house[x][z][1][0]=door;
house[x][z][1][1]=0;
house[x][z][2][0]=door;
house[x][z][2][1]=8;
house[x][z+1][0][0]=stair;
house[x][z+1][0][1]=2;
z=z+2;
while (z<length):
house[x][z][0][0]=333;
z = z+1;
return house
def simpleHouseBox(width,length,material1,material2):
(m1,d1)=material1;
(m2,d2)=material2;
wallId = wallBlocks[m1][d1][0];
wallValue = wallBlocks[m1][d1][1];
supportId = supportBlocks[m2][d2][0];
supportValue = supportBlocks[m2][d2][1]
stair = stairBlocks[m2][d2];
door = doorBlocks[m1][d1];
lv = int((max(width,length)-2) / 5);
lv = min(lv,3);
house = [];
for i in range(width):
house.append([]);
for j in range(length):
house[i].append([]);
for k in range(lv):
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
#Ground
for x in range(1,width-1):
for z in xrange(1,length-1):
house[x][z][0][0] = supportId;
house[x][z][0][1] = supportValue;
table =((1,1),(1,length-2),(width-2,1),(width-2,length-2))
for l in range(lv):
#four support
for (x,z) in table:
for he in xrange(1,4):
house[x][z][l*4+he][0]=supportId;
house[x][z][l*4+he][1]=supportValue;
#wall
for x in xrange(2,width-2):
for z in (1,length-2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue;
for x in (1,width-2):
for z in xrange(2,length-2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue;
#window
for x in xrange(3,width-2,2):
z = 1;
house[x][z][l*4+2][0]=20;
z = length - 2;
house[x][z][l*4+2][0]=20;
for z in xrange(3,length-2,2):
x = 1;
house[x][z][l*4+2][0]=20;
x = width - 2;
house[x][z][l*4+2][0]=20;
#Floor:
for x in xrange(1, width -1):
for z in xrange(1,length -1):
house[x][z][l*4+4][0]=supportId;
house[x][z][l*4+4][1]=supportValue;
for x in xrange(1, width-1):
z = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=0;
z = length - 1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=2;
for z in xrange( 1, length - 1):
x = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=3;
x = width - 1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=1;
#door
x = (width -1)/2;
z = length-2;
house[x][z][1][0]=door;
house[x][z][1][1]=0;
house[x][z][2][0]=door;
house[x][z][2][1]=8;
house[x][z+1][0][0]=stair;
house[x][z+1][0][1]=2;
return house;
def farmBox(material1):
(w1,v1)=material1;
width = 7;
length = 9;
house =[];
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([])
for y in range(2):
house[x][z].append([0,0]);
for x in [0,width-1]:
for z in xrange(0,length):
house[x][z][0][0] = w1;
house[x][z][0][1] = v1;
for x in xrange(1,width-1):
for z in [0,length-1]:
house[x][z][0][0] = w1;
house[x][z][0][1] = v1;
for x in xrange(1,width-1):
for z in xrange(1,length-1):
house[x][z][0][0] = 60;
house[x][z][0][1] = 0;
x = (width-1) / 2;
for z in xrange(1,length-1):
house[x][z][0][0] = 9;
for x in xrange(1,width-1):
if x == (width-1)/2:
continue;
for z in xrange(1,length-1):
house[x][z][1][0] = 59;
house[x][z][1][1] = 4;
return house;
def smithBox(material1,material2):
(m1,d1)=material1;
(m2,d2)=material2;
wallId = wallBlocks[m1][d1][0];
wallValue = wallBlocks[m1][d1][1];
supportId = wallBlocks[m2][d2][0];
supportValue = wallBlocks[m2][d2][1]
stair = stairBlocks[m2][d2];
width = 8;
lenth = 10;
house = []
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([])
for y in range(2):
house[x][z].append([0,0]);
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([])
for y in range(6):
house[x][z].append([0,0]);
for x in range(width-1):
for z in range(length):
house[x][z][0][0] = wallId;
house[x][z][0][1] = wallValue;
house[width-1][1][0][0] = stair;
house[width-1][1][0][1] = 0;
house[width-1][2][0][0] = stair;
house[width-1][2][0][1] = 0;
house[width-1][3][0][0] = stair;
house[width-1][3][0][1] = 0;
for x in range(3):
for z in range(4):
house[x][z][1][0] = supportId;
house[x][z][1][1] = supportValue;
if x == 1:
if z in [1,2]:
house[x][z][1][0] = 11;
house[x][z][1][1] = 0;
x = 0;
for z in xrange(4,length-1):
house[x][z][1][0]=wallId;
house[x][z][1][1]=wallValue;
z=length-1;
for x in xrange(1,width-1):
house[x][z][1][0]=wallId;
house[x][z][1][1]=wallValue;
x = width-2;
for z in xrange(7,length-1):
house[x][z][1][0]=wallId;
house[x][z][1][1]=wallValue;
def buildFloor(floors,themap):
height = len(floors);
width = 0;
length = 0;
for y in range(height):
length = max(length,len(floors[y]));
for x in range(len(floors[y])):
width = max(width,len(floors[y][x]));
house = [];
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([]);
for y in range(height):
house[x][z].append([0,0]);
for y in range(height):
for x in range(len(floors[y])):
for z in range(len(floors[y][x])):
char = floors[y][x][z];
if char in themap:
house[z][x][y] = themap[char]
return house;
def readFloors(fileName):
file = open(fileName,'r');
floor = [];
now = 0;
with open(fileName) as file:
for line in file:
if line[0] == '|':
floor.append([])
else:
floor[-1].append(line);
if floor[-1][-1][-1] == '\r' or floor[-1][-1][-1] == '\n':
floor[-1][-1] = floor[-1][-1][:-1]
return floor;
#material1:mainpart, material2:sidepart
def smithBox(material1,material2):
floor = readFloors("stock-filters/structures/smith.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
fence = fenceBlocks[m2][d2]
themap = {};
themap['C'] = mainSupport;
themap['O'] = [mainStair,2];
themap['P'] = sideWall;
themap['W'] = sideSupport;
themap['L'] = [11,0]
themap['S'] = [sideStair,3];
themap['s'] = [sideStair,1];
themap['F'] = fence;
themap['D'] = mainWall
themap['Q'] = [54,3]
themap['N'] = [102,0]
themap['n'] = [102,0]
themap['I'] = [101,0]
themap['B'] = [61,0]
themap['R'] = [72,0];
themap['$'] = slabBlocks[m1][d1]
return buildFloor(floor,themap);
def butcherBox(material1,material2):
floor = readFloors("stock-filters/structures/butcher.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['I'] = [2,0];
themap['C'] = mainSupport;
themap['P'] = sideWall;
themap['S'] = [sideStair,2];
themap['N'] = slab
themap['F'] = fence;
themap['O'] = [door,0];
themap['Y'] = [door,8];
themap['T'] = [sideStair,2];
themap['t'] = [sideStair,0];
themap['D'] = mainWall
themap['G'] = [102,0]
themap['g'] = [102,0]
themap['W'] = sideSupport;
themap['L'] = [72,0]
themap['!'] = [50,2]
themap['h'] = [50,0]
return buildFloor(floor,themap);
def churchBox(material1,material2):
floor = readFloors("stock-filters/structures/church.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['C'] = mainSupport;
themap['S'] = [mainStair,2];
themap['s'] = [mainStair,3];
themap['$'] = [mainStair,0];
themap['L'] = [65,0]
themap['D'] = [door,0]
themap['G'] = [102,0]
themap['g'] = [102,0]
themap['O'] = [door,8];
themap['T'] = [50,2]
themap['t'] = [50,3]
themap['H'] = [50,0]
themap['h'] = [50,1]
return buildFloor(floor,themap);
def lampBox(material2):
floor = readFloors("stock-filters/structures/lamp.txt");
(m2,d2)=material2;
fence = fenceBlocks[m2][d2]
themap = {};
themap['F'] = fence;
themap['W'] = [m2,d2];
themap['T'] = [50,2]
themap['t'] = [50,3]
themap['H'] = [50,0]
themap['h'] = [50,1]
return buildFloor(floor,themap);
def libraryBox(material1,material2):
floor = readFloors("stock-filters/structures/library.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['c'] = mainSupport;
themap['o'] = [mainStair,2];
themap['p'] = sideWall
themap['s'] = [sideStair,2]
themap['S'] = [sideStair,0]
themap['d'] = [door,0]
themap['a'] = [door,8]
themap['e'] = [58,0]
themap['f'] = fence;
themap['g'] = [102,0]
themap['G'] = [102,0]
themap['r'] = [72,0]
themap['l'] = [47,0]
return buildFloor(floor,themap);
def wellBox(material1,material2):
floor = readFloors("stock-filters/structures/well.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['C'] = mainSupport;
themap['W'] = [8,0]
themap['F'] = fence
return buildFloor(floor,themap);
```
|
{
"source": "JerryWei1985/k2",
"score": 2
}
|
#### File: python/tests/intersect_test.py
```python
import unittest
import k2
import torch
class TestIntersect(unittest.TestCase):
def test_treat_epsilon_specially_false(self):
devices = [torch.device('cpu')]
if torch.cuda.is_available() and k2.with_cuda:
devices.append(torch.device('cuda'))
for device in devices:
# a_fsa recognizes `(0|1)2*`
s1 = '''
0 1 0 0.1
0 1 1 0.2
1 1 2 0.3
1 2 -1 0.4
2
'''
a_fsa = k2.Fsa.from_str(s1).to(device)
a_fsa.requires_grad_(True)
# b_fsa recognizes `1|2`
s2 = '''
0 1 1 1
0 1 2 2
1 2 -1 3
2
'''
b_fsa = k2.Fsa.from_str(s2).to(device)
b_fsa.requires_grad_(True)
# fsa recognizes `1`
fsa = k2.intersect(a_fsa, b_fsa, treat_epsilons_specially=False)
assert len(fsa.shape) == 2
actual_str = k2.to_str_simple(fsa)
expected_str = '\n'.join(['0 1 1 1.2', '1 2 -1 3.4', '2'])
assert actual_str.strip() == expected_str
loss = fsa.scores.sum()
(-loss).backward()
# arc 1 and 3 of a_fsa are kept in the final intersected FSA
assert torch.allclose(a_fsa.grad,
torch.tensor([0, -1, 0, -1]).to(a_fsa.grad))
# arc 0 and 2 of b_fsa are kept in the final intersected FSA
assert torch.allclose(b_fsa.grad,
torch.tensor([-1, 0, -1]).to(b_fsa.grad))
# if any of the input FSA is an FsaVec,
# the outupt FSA is also an FsaVec.
a_fsa.scores.grad = None
b_fsa.scores.grad = None
a_fsa = k2.create_fsa_vec([a_fsa])
fsa = k2.intersect(a_fsa, b_fsa, treat_epsilons_specially=False)
assert len(fsa.shape) == 3
def test_treat_epsilon_specially_true(self):
# this version works only on CPU and requires
# arc-sorted inputs
# a_fsa recognizes `(1|3)?2*`
s1 = '''
0 1 3 0.0
0 1 1 0.2
0 1 0 0.1
1 1 2 0.3
1 2 -1 0.4
2
'''
a_fsa = k2.Fsa.from_str(s1)
a_fsa.requires_grad_(True)
# b_fsa recognizes `1|2|5`
s2 = '''
0 1 5 0
0 1 1 1
0 1 2 2
1 2 -1 3
2
'''
b_fsa = k2.Fsa.from_str(s2)
b_fsa.requires_grad_(True)
# fsa recognizes 1|2
fsa = k2.intersect(k2.arc_sort(a_fsa), k2.arc_sort(b_fsa))
assert len(fsa.shape) == 2
actual_str = k2.to_str_simple(fsa)
expected_str = '\n'.join(
['0 1 0 0.1', '0 2 1 1.2', '1 2 2 2.3', '2 3 -1 3.4', '3'])
assert actual_str.strip() == expected_str
loss = fsa.scores.sum()
(-loss).backward()
# arc 1, 2, 3, and 4 of a_fsa are kept in the final intersected FSA
assert torch.allclose(a_fsa.grad,
torch.tensor([0, -1, -1, -1, -1]).to(a_fsa.grad))
# arc 1, 2, and 3 of b_fsa are kept in the final intersected FSA
assert torch.allclose(b_fsa.grad,
torch.tensor([0, -1, -1, -1]).to(b_fsa.grad))
# if any of the input FSA is an FsaVec,
# the outupt FSA is also an FsaVec.
a_fsa.scores.grad = None
b_fsa.scores.grad = None
a_fsa = k2.create_fsa_vec([a_fsa])
fsa = k2.intersect(k2.arc_sort(a_fsa), k2.arc_sort(b_fsa))
assert len(fsa.shape) == 3
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jerry-Wemisiko/News-API",
"score": 3
}
|
#### File: News-API/tests/articletest.py
```python
import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
def setUp(self):
self.new_article = Article("Are hydrogen fuel cells the future of green transport? Sorta","Around the world, governments are implementing policies ..",
"https://thenextweb.com/news/are-hydrogen-fuel-cells-the-future-of-green-transport-sorta",
"https://img-cdn.tnwcdn.com/image/shift?filter_last=1&fit=1280%2C640&url=https%3A%2F%2Fcdn0.tnwcdn.com%2Fwp-content%2Fblogs.dir%2F1%2Ffiles%2F2021%2F08%2Fr_MIR_MY21_0006_V001.jpeg&signature=ca3714ab69d07634973e1c4505d67cb8",
"2021-08-11T09:46:58Z")
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Article))
```
|
{
"source": "Jerry-Wemisiko/Password-locker",
"score": 4
}
|
#### File: Jerry-Wemisiko/Password-locker/passwd.py
```python
import random
import string
class User:
'''
Class that generates new instances of users
'''
def __init__(self,user_name,password):
self.user_name = user_name
self.password = password
user_list = []
def save_user(self):
'''
function to save a new to our list
'''
User.user_list.append(self)
def delete_user(self):
'''
function to delete an instance from the list
'''
User.user_list.remove(self)
@classmethod
def display_user(cls):
return cls.user_list
class Credential:
'''
class that generate new instances of user
'''
def __init__( self,site,username,password):
self.site = site
self.username = username
self.password = password
credential_list = []
def save_credentials(self):
'''
method to save a new credential
'''
Credential.credential_list.append(self)
def delete_credential(self):
'''
method to delete a credential
'''
Credential.credential_list.remove(self)
@classmethod
def display_credentials(cls):
'''
method to display a credential
'''
return cls.credential_list
@classmethod
def find_by_username(cls,number):
'''
method to find a credential by username
'''
for credential in cls.credential_list:
if credential.username == number:
return credential
@classmethod
def do_credential_exist(cls,number):
'''
method to check a credential exists
'''
for credential in cls.credential_list:
if credential.site == number:
return True
return False
def generate_password(passlength=10):
'''
method that gives user to use an autogenerated password by the application
'''
password = string.ascii_lowercase+ string.ascii_uppercase + string.digits
return "".join(random.choice(password) for i in range(passlength))
```
|
{
"source": "Jerry-Wemisiko/pitches_app",
"score": 2
}
|
#### File: app/main/errors.py
```python
from flask import render_template
from . import main
@main.errorhandler(403)
def forbidden_access(error):
'''
Function to handles 403 error
'''
return render_template('errors.html',error='page')
@main.errorhandler(404)
def four_Ow_four(error):
'''
Function to handles 404 error
'''
return render_template('errors.html',error='page')
@main.errorhandler(500)
def server_error(error):
'''
Function to handles 500 error
'''
return render_template('errors.html',error='page')
```
|
{
"source": "jerrywences/ansible-websphere",
"score": 2
}
|
#### File: ansible-websphere/library/liberty_server.py
```python
import os
import subprocess
import platform
import datetime
def main():
# Read arguments
module = AnsibleModule(
argument_spec = dict(
state = dict(default='started', choices=['started', 'stopped']),
name = dict(required=True),
libertydir = dict(required=True)
)
)
state = module.params['state']
name = module.params['name']
libertydir = module.params['libertydir']
# Check if paths are valid
if not os.path.exists(libertydir):
module.fail_json(msg=libertydir+" does not exists")
if state == 'stopped':
child = subprocess.Popen([libertydir+"/bin/server stop " + name], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
if not stderr_value.find("is not running") < 0:
module.fail_json(msg=name + " stop failed", stdout=stdout_value, stderr=stderr_value)
module.exit_json(changed=True, msg=name + " stopped successfully", stdout=stdout_value)
if state == 'started':
child = subprocess.Popen([libertydir+"/bin/server start " + name], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
if not stderr_value.find("is running with process") < 0:
module.fail_json(msg=name + " start failed", stdout=stdout_value, stderr=stderr_value)
module.exit_json(changed=True, msg=name + " started successfully", stdout=stdout_value)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
```
#### File: ansible-websphere/library/profile_nodeagent.py
```python
import os
import subprocess
import platform
import datetime
def main():
# Read arguments
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'abcent']),
wasdir = dict(required=True),
name = dict(required=True),
cell_name = dict(required=False),
host_name = dict(required=False),
node_name = dict(required=False),
username = dict(required=False),
password = dict(required=False),
dmgr_host = dict(required=False),
dmgr_port = dict(required=False),
federate = dict(required=False, choices=BOOLEANS)
)
)
state = module.params['state']
wasdir = module.params['wasdir']
name = module.params['name']
cell_name = module.params['cell_name']
host_name = module.params['host_name']
node_name = module.params['node_name']
username = module.params['username']
password = module.params['password']
dmgr_host = module.params['dmgr_host']
dmgr_port = module.params['dmgr_port']
federate = module.params['federate']
# Check if paths are valid
if not os.path.exists(wasdir):
module.fail_json(msg=wasdir+" does not exists")
# Create a profile
if state == 'present':
child = subprocess.Popen([wasdir+"/bin/manageprofiles.sh -create -profileName " + name + " -profilePath " + wasdir+"/profiles/"+name + " -templatePath " + wasdir+"/profileTemplates/managed -cellName " + cell_name + " -hostName " + host_name + " -nodeName " + node_name + " -enableAdminSecurity true -adminUserName " + username + " -adminPassword " + password], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
module.fail_json(msg="Dmgr profile creation failed", stdout=stdout_value, stderr=stderr_value)
if federate:
# Federate the node
child = subprocess.Popen([wasdir+"/bin/addNode.sh " + dmgr_host + " " + dmgr_port + " -conntype SOAP -username " + username + " -password " + password + " -profileName " + name], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
module.fail_json(msg="Node federation failed", stdout=stdout_value, stderr=stderr_value)
module.exit_json(changed=True, msg=name + " profile created successfully", stdout=stdout_value)
# Remove a profile
if state == 'abcent':
child = subprocess.Popen([wasdir+"/bin/manageprofiles.sh -delete -profileName " + name], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
# manageprofiles.sh -delete will fail if the profile does not exist.
# But creation of a profile with the same name will also fail if
# the directory is not empty. So we better remove the dir forcefully.
if not stdout_value.find("INSTCONFFAILED") < 0:
shutil.rmtree(wasdir+"/profiles/"+name, ignore_errors=False, onerror=None)
else:
module.fail_json(msg="Dmgr profile removal failed", stdout=stdout_value, stderr=stderr_value)
module.exit_json(changed=True, msg=name + " profile removed successfully", stdout=stdout_value, stderr=stderr_value)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
```
#### File: ansible-websphere/library/wsadmin.py
```python
import os
import subprocess
import platform
import datetime
def main():
# Read arguments
module = AnsibleModule(
argument_spec = dict(
params = dict(required=True),
host = dict(default='localhost', required=False),
port = dict(default='8879', required=False),
username = dict(required=False),
password = dict(required=False),
script = dict(required=True)
)
)
params = module.params['params']
host = module.params['host']
port = module.params['port']
username = module.params['username']
password = module.params['password']
script = module.params['script']
# Run wsadmin command server
if state == 'stopped':
child = subprocess.Popen([wasdir+"/bin/wsadmin.sh -lang jython -conntype SOAP -host "+host+" -port "+port+" -username " + username + " -password " + password " -f "+script+" "+params], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_value, stderr_value = child.communicate()
if child.returncode != 0:
module.fail_json(msg="Failed executing wsadmin script: " + ¨script, stdout=stdout_value, stderr=stderr_value)
module.exit_json(changed=True, msg="Script executed successfully: " + scrpit, stdout=stdout_value)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
```
|
{
"source": "jerrywgz/mmpose",
"score": 2
}
|
#### File: tests/test_api/test_inference_3d.py
```python
import numpy as np
import pytest
import torch
from mmpose.apis import (inference_pose_lifter_model, init_pose_model,
vis_3d_pose_result)
def test_pose_lifter_demo():
# H36M demo
pose_model = init_pose_model(
'configs/body/3d_kpt_sview_rgb_img/pose_lift/'
'h36m/simplebaseline3d_h36m.py',
None,
device='cpu')
pose_det_result = {
'keypoints': np.zeros((17, 3)),
'bbox': [50, 50, 50, 50],
'track_id': 0,
'image_name': 'tests/data/h36m/S1_Directions_1.54138969_000001.jpg',
}
pose_results_2d = [[pose_det_result]]
dataset = pose_model.cfg.data['test']['type']
_ = inference_pose_lifter_model(
pose_model, pose_results_2d, dataset, with_track_id=False)
pose_lift_results = inference_pose_lifter_model(
pose_model, pose_results_2d, dataset, with_track_id=True)
for res in pose_lift_results:
res['title'] = 'title'
vis_3d_pose_result(
pose_model,
pose_lift_results,
img=pose_lift_results[0]['image_name'],
dataset=dataset)
# test special cases
# Empty 2D results
_ = inference_pose_lifter_model(
pose_model, [[]], dataset, with_track_id=False)
if torch.cuda.is_available():
_ = inference_pose_lifter_model(
pose_model.cuda(), pose_results_2d, dataset, with_track_id=False)
with pytest.raises(NotImplementedError):
_ = inference_pose_lifter_model(
pose_model, pose_results_2d, dataset='test')
```
|
{
"source": "jerrywgz/Paddle",
"score": 2
}
|
#### File: tests/unittests/dist_se_resnext.py
```python
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import sys
import signal
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class SE_ResNeXt():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 101:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 23, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 152:
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=3,
stride=2,
act='relu')
conv = self.conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
conv = self.conv_bn_layer(
input=conv,
num_filters=128,
filter_size=3,
stride=1,
act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
out = fluid.layers.fc(input=drop, size=class_dim, act='softmax')
return out
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
filter_size = 1
return self.conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride, cardinality,
reduction_ratio):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = self.squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = self.shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) / 2,
groups=groups,
act=None,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def squeeze_excitation(self, input, num_channels, reduction_ratio):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(input=pool,
size=num_channels / reduction_ratio,
act='relu')
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def get_model(batch_size):
# Input data
image = fluid.layers.fill_constant(
shape=[batch_size, 3, 224, 224], dtype='float32', value=0.0)
label = fluid.layers.fill_constant(
shape=[batch_size, 1], dtype='int64', value=0.0)
# Train program
model = SE_ResNeXt(layers=50)
out = model.net(input=image, class_dim=102)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
# Evaluator
test_program = fluid.default_main_program().clone(for_test=True)
# Optimization
total_images = 6149 # flowers
epochs = [30, 60, 90]
step = int(total_images / batch_size + 1)
bd = [step * e for e in epochs]
base_lr = 0.1
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
optimizer.minimize(avg_cost)
# Reader
train_reader = paddle.batch(
paddle.dataset.flowers.train(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.flowers.test(), batch_size=batch_size)
return test_program, avg_cost, train_reader, test_reader, acc_top1, out
def get_transpiler(trainer_id, main_program, pserver_endpoints, trainers):
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id=trainer_id,
program=main_program,
pservers=pserver_endpoints,
trainers=trainers)
return t
class DistSeResneXt2x2:
def run_pserver(self, pserver_endpoints, trainers, current_endpoint,
trainer_id):
get_model(batch_size=2)
t = get_transpiler(trainer_id,
fluid.default_main_program(), pserver_endpoints,
trainers)
pserver_prog = t.get_pserver_program(current_endpoint)
startup_prog = t.get_startup_program(current_endpoint, pserver_prog)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
exe.run(pserver_prog)
def _wait_ps_ready(self, pid):
retry_times = 20
while True:
assert retry_times >= 0, "wait ps ready failed"
time.sleep(3)
print("waiting ps ready: ", pid)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
retry_times -= 1
def run_trainer(self, place, endpoints, trainer_id, trainers, is_dist=True):
test_program, avg_cost, train_reader, test_reader, batch_acc, predict = get_model(
batch_size=20)
if is_dist:
t = get_transpiler(trainer_id,
fluid.default_main_program(), endpoints,
trainers)
trainer_prog = t.get_trainer_program()
else:
trainer_prog = fluid.default_main_program()
startup_exe = fluid.Executor(place)
startup_exe.run(fluid.default_startup_program())
strategy = fluid.ExecutionStrategy()
strategy.num_threads = 1
strategy.allow_op_delay = False
exe = fluid.ParallelExecutor(
True,
loss_name=avg_cost.name,
exec_strategy=strategy,
num_trainers=trainers,
trainer_id=trainer_id)
feed_var_list = [
var for var in trainer_prog.global_block().vars.itervalues()
if var.is_data
]
feeder = fluid.DataFeeder(feed_var_list, place)
reader_generator = train_reader()
first_loss, = exe.run(fetch_list=[avg_cost.name])
print(first_loss)
for i in xrange(5):
loss, = exe.run(fetch_list=[avg_cost.name])
last_loss, = exe.run(fetch_list=[avg_cost.name])
print(last_loss)
def main(role="pserver",
endpoints="127.0.0.1:9123",
trainer_id=0,
current_endpoint="127.0.0.1:9123",
trainers=1,
is_dist=True):
model = DistSeResneXt2x2()
if role == "pserver":
model.run_pserver(endpoints, trainers, current_endpoint, trainer_id)
else:
p = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
model.run_trainer(p, endpoints, trainer_id, trainers, is_dist)
if __name__ == "__main__":
if len(sys.argv) != 7:
print(
"Usage: python dist_se_resnext.py [pserver/trainer] [endpoints] [trainer_id] [current_endpoint] [trainers] [is_dist]"
)
role = sys.argv[1]
endpoints = sys.argv[2]
trainer_id = int(sys.argv[3])
current_endpoint = sys.argv[4]
trainers = int(sys.argv[5])
is_dist = True if sys.argv[6] == "TRUE" else False
main(
role=role,
endpoints=endpoints,
trainer_id=trainer_id,
current_endpoint=current_endpoint,
trainers=trainers,
is_dist=is_dist)
```
#### File: tests/unittests/test_dist_se_resnext.py
```python
import numpy as np
import argparse
import time
import math
import unittest
import os
import signal
import subprocess
class TestDistSeResneXt2x2(unittest.TestCase):
def setUp(self):
self._trainers = 2
self._pservers = 2
self._ps_endpoints = "127.0.0.1:9123,127.0.0.1:9124"
self._python_interp = "python"
def start_pserver(self):
ps0_ep, ps1_ep = self._ps_endpoints.split(",")
ps0_cmd = "%s dist_se_resnext.py pserver %s 0 %s %d TRUE" % \
(self._python_interp, self._ps_endpoints, ps0_ep, self._trainers)
ps1_cmd = "%s dist_se_resnext.py pserver %s 0 %s %d TRUE" % \
(self._python_interp, self._ps_endpoints, ps1_ep, self._trainers)
ps0_proc = subprocess.Popen(
ps0_cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps1_proc = subprocess.Popen(
ps1_cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return ps0_proc, ps1_proc
def _wait_ps_ready(self, pid):
retry_times = 20
while True:
assert retry_times >= 0, "wait ps ready failed"
time.sleep(3)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
retry_times -= 1
def non_test_with_place(self):
# *ATTENTION* THIS TEST NEEDS AT LEAST 2GPUS TO RUN
required_envs = {
"PATH": os.getenv("PATH"),
"PYTHONPATH": os.getenv("PYTHONPATH"),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH"),
"FLAGS_fraction_of_gpu_memory_to_use": "0.15"
}
# Run local to get a base line
env_local = {"CUDA_VISIBLE_DEVICES": "0"}
env_local.update(required_envs)
local_cmd = "%s dist_se_resnext.py trainer %s 0 %s %d FLASE" % \
(self._python_interp, "127.0.0.1:1234", "127.0.0.1:1234", 1)
local_proc = subprocess.Popen(
local_cmd.split(" "), stdout=subprocess.PIPE, env=env_local)
local_proc.wait()
local_ret = local_proc.stdout.read()
# Run dist train to compare with local results
ps0, ps1 = self.start_pserver()
self._wait_ps_ready(ps0.pid)
self._wait_ps_ready(ps1.pid)
ps0_ep, ps1_ep = self._ps_endpoints.split(",")
tr0_cmd = "%s dist_se_resnext.py trainer %s 0 %s %d TRUE" % \
(self._python_interp, self._ps_endpoints, ps0_ep, self._trainers)
tr1_cmd = "%s dist_se_resnext.py trainer %s 1 %s %d TRUE" % \
(self._python_interp, self._ps_endpoints, ps1_ep, self._trainers)
env0 = {"CUDA_VISIBLE_DEVICES": "0"}
env1 = {"CUDA_VISIBLE_DEVICES": "1"}
env0.update(required_envs)
env1.update(required_envs)
FNULL = open(os.devnull, 'w')
tr0_proc = subprocess.Popen(
tr0_cmd.split(" "), stdout=subprocess.PIPE, stderr=FNULL, env=env0)
tr1_proc = subprocess.Popen(
tr1_cmd.split(" "), stdout=subprocess.PIPE, stderr=FNULL, env=env1)
tr0_proc.wait()
tr1_proc.wait()
loss_data0 = tr0_proc.stdout.read()
lines = loss_data0.split("\n")
dist_first_loss = eval(lines[0].replace(" ", ","))[0]
dist_last_loss = eval(lines[1].replace(" ", ","))[0]
local_lines = local_ret.split("\n")
local_first_loss = eval(local_lines[0])[0]
local_last_loss = eval(local_lines[1])[0]
self.assertAlmostEqual(local_first_loss, dist_first_loss)
self.assertAlmostEqual(local_last_loss, dist_last_loss)
# check tr0_out
# FIXME: ensure the server process is killed
# replace with ps0.terminate()
os.kill(ps0.pid, signal.SIGKILL)
os.kill(ps1.pid, signal.SIGKILL)
FNULL.close()
if __name__ == "__main__":
unittest.main()
```
#### File: tests/unittests/test_seq_concat_op.py
```python
import unittest
import numpy as np
import sys
from op_test import OpTest
def to_abs_offset_lod(lod):
offset_lod = [[0] for i in lod]
for i, level in enumerate(lod):
for seq_len in level:
offset_lod[i].append(offset_lod[i][-1] + seq_len)
if len(offset_lod) == 0 or len(offset_lod) == 1:
return offset_lod
import copy
new_offset_lod = copy.deepcopy(offset_lod)
for idx, val in enumerate(offset_lod[0]):
new_offset_lod[0][idx] = offset_lod[1][val]
return new_offset_lod
def seq_concat(inputs, level):
lod0 = inputs['X'][0][1][1]
lod1 = inputs['X'][1][1][1]
x0 = inputs['X'][0][1][0]
x1 = inputs['X'][1][1][0]
level_idx = len(lod0) - level - 1
outs = []
for i in range(len(lod0[level_idx])):
sub_x0 = x0[to_abs_offset_lod(lod0)[level_idx][i]:to_abs_offset_lod(
lod0)[level_idx][i + 1], :]
sub_x1 = x1[to_abs_offset_lod(lod1)[level_idx][i]:to_abs_offset_lod(
lod1)[level_idx][i + 1], :]
outs.append(np.concatenate((sub_x0, sub_x1), axis=0))
return np.concatenate(outs, axis=0)
class TestSeqConcatOp(OpTest):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[2, 2], [1, 1, 1, 1]]
x1 = np.random.random((4, 8, 3)).astype('float32')
lod1 = [[2, 2], [1, 1, 1, 1]]
axis = 1
level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
self.outputs = {'Out': (np.concatenate([x0, x1], axis=1), lod0)}
def setUp(self):
self.op_type = "sequence_concat"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['x0'], 'Out')
class TestSeqConcatOpLevelZeroNestedSequence(TestSeqConcatOp):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[2, 2], [1, 1, 1, 1]]
x1 = np.random.random((7, 6, 3)).astype('float32')
lod1 = [[2, 2], [1, 2, 2, 2]]
axis = 0
level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
out_lod = [[2, 2], [2, 3, 3, 3]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
class TestSeqConcatOplevelOneNestedSequence(TestSeqConcatOp):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 6, 3)).astype('float32')
lod0 = [[2, 2], [1, 1, 1, 1]]
x1 = np.random.random((7, 6, 3)).astype('float32')
lod1 = [[3, 1], [1, 2, 2, 2]]
axis = 0
level = 1
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
out_lod = [[5, 3], [1, 1, 1, 2, 2, 1, 1, 2]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
class TestSeqConcatOpLevelZeroSequence(TestSeqConcatOp):
def set_data(self):
# two level, batch size is 3
x0 = np.random.random((4, 3, 4)).astype('float32')
lod0 = [[1, 1, 1, 1]]
x1 = np.random.random((7, 3, 4)).astype('float32')
lod1 = [[1, 2, 2, 2]]
axis = 0
level = 0
self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]}
self.attrs = {'axis': axis, 'level': level}
out_lod = [[2, 3, 3, 3]]
self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)}
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JerryWuZiJie/dynamic_graph_head",
"score": 3
}
|
#### File: src/dynamic_graph_head/sim_head.py
```python
import numpy as np
class SimHead:
def __init__(self, robot, vicon_name='', with_sliders=True, joint_index=None,
measurement_delay_dt=0, control_delay_dt=0, noise_data_std={}):
self._robot = robot
self._vicon_name = vicon_name
self._joint_index = joint_index
# Define the common sensor values.
nv = robot.pin_robot.model.nv
# Get number of joints nj
if robot.useFixedBase:
if joint_index is None:
nj = nv
else:
nj = len(joint_index)
else:
nj = nv - 6
self.nj = nj
self._sensor_joint_positions = np.zeros(nj)
self._sensor_joint_velocities = np.zeros(nj)
self.with_sliders = with_sliders
if self.with_sliders:
self._sensor_slider_positions = np.zeros(4)
# If not fixed base, then assume we have an IMU and a vicon.
if not robot.useFixedBase:
# Simulated IMU.
self._sensor_imu_gyroscope = np.zeros(3)
# Utility for vicon class.
self._sensor__vicon_base_position = np.zeros(7)
self._sensor__vicon_base_velocity = np.zeros(6)
# Controls.
self._control_ctrl_joint_torques = np.zeros(nj)
self.update_noise_data(noise_data_std)
self.update_control_delay(control_delay_dt)
self.update_measurement_delay(measurement_delay_dt)
def update_noise_data(self, noise_data_std):
self._noise_data_std = noise_data_std
if not 'joint_positions' in noise_data_std:
self._noise_data_std['joint_positions'] = np.zeros(self.nj)
if not 'joint_velocities' in noise_data_std:
self._noise_data_std['base_velocity'] = np.zeros(self.nj)
if not 'imu_gyroscope' in noise_data_std:
self._noise_data_std['imu_gyroscope'] = np.zeros(3)
def update_control_delay(self, delay_dt):
self._fill_history_control = True
self._ti = 0
self._control_delay_dt = delay_dt
length = delay_dt + 1
self._history_control = {
'ctrl_joint_torques': np.zeros((length, self.nj))
}
def update_measurement_delay(self, delay_dt):
self._fill_history_measurement = True
self._ti = 0
self._measurement_delay_dt = delay_dt
length = delay_dt + 1
self._history_measurements = {
'joint_positions': np.zeros((length, self.nj)),
'joint_velocities': np.zeros((length, self.nj)),
'imu_gyroscope': np.zeros((length, 3))
}
def sample_noise(self, entry):
noise_var = self._noise_data_std[entry]**2
return np.random.multivariate_normal(np.zeros_like(noise_var), np.diag(noise_var))
def read(self):
q, dq = self._robot.get_state()
write_idx = self._ti % (self._measurement_delay_dt + 1)
if self._fill_history_measurement:
self._fill_history_measurement = False
write_idx = None
read_idx = (self._ti + 1) % (self._measurement_delay_dt + 1)
history = self._history_measurements
if not self._robot.useFixedBase:
# Write to the measurement history with noise.
history['joint_positions'][write_idx] = q[7:]
history['joint_velocities'][write_idx] = dq[6:]
history['imu_gyroscope'][write_idx] = dq[3:6]
self._sensor_imu_gyroscope[:] = history['imu_gyroscope'][read_idx]
self._sensor__vicon_base_position[:] = q[:7]
self._sensor__vicon_base_velocity[:] = dq[:6]
else:
if self._joint_index:
history['joint_positions'][write_idx] = q[self._joint_index]
history['joint_velocities'][write_idx] = dq[self._joint_index]
else:
history['joint_positions'][write_idx] = q
history['joint_velocities'][write_idx] = dq
self._sensor_joint_positions[:] = history['joint_positions'][read_idx]
self._sensor_joint_velocities[:] = history['joint_velocities'][read_idx]
if self.with_sliders:
for i, l in enumerate(['a', 'b', 'c', 'd']):
self._sensor_slider_positions[i] = self._robot.get_slider_position(l)
def write(self):
write_idx = self._ti % (self._measurement_delay_dt + 1)
if self._fill_history_control:
self._fill_history_control = False
write_idx = None
read_idx = (self._ti + 1) % (self._measurement_delay_dt + 1)
history = self._history_control
history['ctrl_joint_torques'][write_idx] = self._control_ctrl_joint_torques
self._last_ctrl_joint_torques = history['ctrl_joint_torques'][read_idx]
self._ti += 1
def sim_step(self):
self._robot.send_joint_command(self._last_ctrl_joint_torques)
def get_sensor(self, sensor_name):
return self.__dict__['_sensor_' + sensor_name]
def set_control(self, control_name, value):
self.__dict__['_control_' + control_name][:] = value
def reset_state(self, q, dq):
self._robot.reset_state(q, dq)
```
|
{
"source": "JerryWuZiJie/go_to_bed",
"score": 3
}
|
#### File: JerryWuZiJie/go_to_bed/app.py
```python
import os
import time
import threading
from cv2 import cuda_BufferPool
import RPi.GPIO as GPIO
from flask import Flask, render_template, redirect
import go_to_bed
### constants ###
MIN_DELAY = 10 # delay for tasks need to update within minute precision
FAST_DELAY = 0.01 # delay for tasks need to update immediately
SNOOZE_TIME = 1 # TODO: 10 # snooze time in minutes
SOUND_PATH = "sound/Let Her Go.mp3" # path to sound file
# FUTURE scan for available alarm music in the sound folder
# available_files = []
# for (dirpath, dirnames, filenames) in os.walk("./sound"):
# available_files.extend(filenames)
BED_TIME_THRESHOLD = 5 # minutes
SETTING_ITEM = ['bed time', 'wake up time']
LED_ON = GPIO.LOW
LED_OFF = GPIO.HIGH
# MAIN_STATUS: 0: wakeup, 1: sleep, 2: alarm
MAIN_STATUS = 'main status'
MAIN_STATUS_WAKEUP = 0
MAIN_STATUS_NEED_SLEEP = 1
MAIN_STATUS_SLEEP = 2
MAIN_STATUS_ALARM = 3
# ALARM_SWITCH: 0: on, 1: off
ALARM_STATUS = 'alarm status'
ALARM_ON = 0
ALARM_OFF = 1
# OLED_STATUS
OLED_STATUS = 'oled status'
OLED_DISPLAY = 0
OLED_SETTING = 1
OLED_SET_HOUR = 2
OLED_SET_MINUTE = 3
# setting status TODO: when oled timeout or confirm, update status
# indicate which option is currently selected
SETTING_SELECTION = 0
# display time on oled
SETTING_TIME = 1
# global variables
current_status = {MAIN_STATUS: MAIN_STATUS_WAKEUP,
ALARM_STATUS: ALARM_OFF,
OLED_STATUS: OLED_DISPLAY}
bed_time = [11, 10] # TODO: [22, 30] # time to sleep (hour, minute)
today_bed_time = 0 # today's bed time (time.time())
up_time = [11, 15] # TODO: [7, 0] # time to wake up (hour, minute)
alarm_time = up_time # time to play alarm clock sound (hour, minute)
sleep_info = [("05/6", "10:30", True), # list to store sleep info (date, time, follow schedule)
("05/7", "11:53", False),
("05/8", "10:30", True),
("05/9", "10:30", True)] # TODO: make empty []
light_threshold = 1.5 # threshold voltage for light sensor, user tunable
time_12_hour = False # 12 hour mode or 24 hour mode
setting_status = {SETTING_SELECTION: 0,
SETTING_TIME: bed_time}
settings_info = [['sleep time', f'{bed_time[0]}:{bed_time[1]}'],
['wake time', f"{up_time[0]}:{up_time[1]}"],
['volume', '100%'],
['brightness', '100%'],
['light sensitivity', light_threshold],
['12 hour format', time_12_hour]]
friends_sleep_info = [('Jerry', '83%'),
('Tom', '75%'),
('George', '72%'),
('Mary', '65%'),
('Bob', '60%'),
('Alice', '55%'),
('Jack', '50%'),
('Linda', '45%'),
('John', '40%'),
('Jane', '35%')]
# GPIO pins
SNOOZE_BUT = 24
STOP_BUT = 23
RED_LED = 25
GREEN_LED = 26
ALARM_SWITCH = 22
ENCODER_L = 14
ENCODER_R = 15
ENCODER_BUT = 16
### onetime tasks ###
def simple_GPIO_setup():
"""
setup some devices that only need input or output
devices: red/green LEDs, snooze button, stop button, alarm switch
"""
GPIO.setmode(GPIO.BCM)
# setup stop/pause button pull up by default
GPIO.setup(SNOOZE_BUT, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(SNOOZE_BUT, GPIO.RISING, callback=pause_alarm)
GPIO.setup(STOP_BUT, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(STOP_BUT, GPIO.RISING, callback=stop_alarm)
# setup alarm switch pull up by default
GPIO.setup(ALARM_SWITCH, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
current_status[ALARM_STATUS] = GPIO.input(ALARM_SWITCH)
GPIO.add_event_detect(ALARM_SWITCH, GPIO.BOTH, callback=alarm_switch)
# setup red/green LED
GPIO.setup(RED_LED, GPIO.OUT)
GPIO.output(RED_LED, LED_OFF)
GPIO.setup(GREEN_LED, GPIO.OUT)
if current_status[ALARM_STATUS] == ALARM_ON:
GPIO.output(GREEN_LED, LED_ON)
else:
GPIO.output(GREEN_LED, LED_OFF)
# setup encoder
# default to ground
GPIO.setup(ENCODER_L, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ENCODER_R, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ENCODER_BUT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# add event detect
GPIO.add_event_detect(ENCODER_L, GPIO.FALLING, callback=encoder_rotate)
GPIO.add_event_detect(ENCODER_BUT, GPIO.FALLING, callback=encoder_but)
# add timer
global encoder_ccw_time, encoder_cw_time
encoder_ccw_time = time.time()
encoder_cw_time = time.time()
def peripheral_setup():
"""
setup all the peripherals
peripherals: rfid, oled, clock, speaker
"""
global rfid, oled, clock, speaker, light_sensor
# setup RFID
rfid = go_to_bed.RFID()
# setup OLED (I2C)
oled = go_to_bed.OLED()
# setup led
clock = go_to_bed.Clock()
# setup speaker
speaker = go_to_bed.Speaker()
speaker.set_sound(SOUND_PATH) # FUTURE: let user choose sound
# setup light sensor
light_sensor = go_to_bed.ADC()
# setup webpage
webpage_flask = Flask(__name__, static_folder='assets')
### interrupt ###
def alarm_switch(channel):
"""
callback function to determine alarm switch state
if switch is on, turn off the alarm, green LED off
otherwise, turn on the alarm, green LED on
"""
print("switch interrupt") # TODO
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if GPIO.input(channel) == ALARM_ON:
current_status[ALARM_STATUS] = ALARM_ON
GPIO.output(GREEN_LED, LED_ON)
else:
current_status[ALARM_STATUS] = ALARM_OFF
GPIO.output(GREEN_LED, LED_OFF)
def pause_alarm(channel):
"""
callback function to pause the alarm
"""
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if GPIO.input(channel):
# stop sound
if not speaker.is_stopped():
speaker.stop_sound()
if current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
# snooze alarm
hour, minute, _ = get_time()
set_time(alarm_time, hour, (minute + SNOOZE_TIME))
# act as back button
elif current_status[OLED_STATUS] == OLED_SETTING:
setting_status[SETTING_SELECTION] = 0 # set selection back to 0
current_status[OLED_STATUS] = OLED_DISPLAY
oled_update_display()
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
current_status[OLED_STATUS] = OLED_SETTING
oled_update_display()
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
current_status[OLED_STATUS] = OLED_SET_HOUR
oled_update_display()
def stop_alarm(channel):
"""
callback function to stop alarm clock. If button pushed, alarm is stopped
"""
global alarm_time
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if GPIO.input(channel):
# turn off alarm
if not speaker.is_stopped():
speaker.stop_sound()
if current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
# set MAIN_STATUS to wakeup
current_status[MAIN_STATUS] = MAIN_STATUS_WAKEUP
oled_update_display()
# set alarm_time to up_time
set_time(alarm_time, *up_time)
def encoder_rotate(channel):
assert channel == ENCODER_L
global encoder_ccw_time, encoder_cw_time
if GPIO.input(ENCODER_R) == GPIO.HIGH:
if time.time() - encoder_cw_time < 0.1:
pass # still clockwise
else:
if current_status[OLED_STATUS] == OLED_SETTING:
setting_status[SETTING_SELECTION] += 1
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
setting_status[SETTING_TIME][0] = (
setting_status[SETTING_TIME][0] + 1) % 24
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
setting_status[SETTING_TIME][1] = (
setting_status[SETTING_TIME][1] + 1) % 60
oled_update_display()
encoder_ccw_time = time.time()
elif GPIO.input(ENCODER_R) == GPIO.LOW:
if time.time() - encoder_ccw_time < 0.1:
pass # still counter clockwise
else:
if current_status[OLED_STATUS] == OLED_SETTING:
setting_status[SETTING_SELECTION] -= 1
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
setting_status[SETTING_TIME][0] = (
setting_status[SETTING_TIME][0] - 1) % 24
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
setting_status[SETTING_TIME][1] = (
setting_status[SETTING_TIME][1] - 1) % 60
oled_update_display()
encoder_cw_time = time.time()
def encoder_but(channel):
global bed_time, up_time
time.sleep(0.020)
if not GPIO.input(channel):
if current_status[OLED_STATUS] == OLED_DISPLAY:
current_status[OLED_STATUS] = OLED_SETTING
elif current_status[OLED_STATUS] == OLED_SETTING:
# determine whether to set bed time or up time
if setting_status[SETTING_SELECTION] == 0:
setting_status[SETTING_TIME] = bed_time
else:
setting_status[SETTING_TIME] = up_time
current_status[OLED_STATUS] = OLED_SET_HOUR
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
current_status[OLED_STATUS] = OLED_SET_MINUTE
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
# store current setting
if setting_status[SETTING_SELECTION] == 0:
bed_time[0] = setting_status[SETTING_TIME][0]
bed_time[1] = setting_status[SETTING_TIME][1]
print('update bed time:', bed_time) # TODO: test
else:
up_time[0] = setting_status[SETTING_TIME][0]
up_time[1] = setting_status[SETTING_TIME][1]
print('update up time:', up_time) # TODO: test
setting_status[SETTING_SELECTION] = 0
current_status[OLED_STATUS] = OLED_DISPLAY
oled_update_display()
### helper functions ###
def get_time():
"""
get current time
@return: hour, min, sec
"""
current_time = time.localtime()
hour = current_time.tm_hour
minute = current_time.tm_min
sec = current_time.tm_sec
return hour, minute, sec
def get_date():
"""
get today's date
@return: month, day
"""
current_time = time.localtime()
month = current_time.tm_mon
day = current_time.tm_mday
return month, day
def set_time(time_object, hour, minute):
"""
set time given hour and min in 24hr format
@param time_object: time object to set
@param hour: hour to set
@param min: minute to set
"""
time_object[1] = minute % 60
time_object[0] = (hour + minute // 60) % 24
def inc_time(time_object, hour=0, minute=0):
"""
increment
@param time_object: time object to increase
@param hour: hour increment
@param min: minute to increment
"""
set_time(time_object, time_object[0] + hour, time_object[1] + minute)
def oled_update_display():
"""
change the oled display according to different status
should be manual called everytime the current_status is changed
FUTURE: separate process to check for state change and call oled_display
automatically?
"""
oled.clear_display()
if current_status[OLED_STATUS] == OLED_DISPLAY:
if current_status[MAIN_STATUS] == MAIN_STATUS_WAKEUP:
oled.add_text('wake up') # TODO: change to picture
elif current_status[MAIN_STATUS] == MAIN_STATUS_NEED_SLEEP:
oled.add_text('need sleep') # TODO: change to picture
oled.add_text('40% slept')
elif current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
oled.add_text('sleep') # TODO: change to picture
elif current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
oled.add_text('alarm') # TODO: change to picture
elif current_status[OLED_STATUS] == OLED_SETTING:
for i in range(len(SETTING_ITEM)):
if i == (setting_status[SETTING_SELECTION]//2 % len(SETTING_ITEM)):
oled.add_text('> ' + SETTING_ITEM[i])
else:
oled.add_text(SETTING_ITEM[i])
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
h, m = setting_status[SETTING_TIME]
oled.add_text(f'-{h:02d}-:{m:02d}')
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
h, m = setting_status[SETTING_TIME]
oled.add_text(f'{h:02d}:-{m:02d}-')
oled.update_display()
@webpage_flask.route("/")
def home():
return redirect("/index")
@webpage_flask.route("/index")
def home_template():
status = 'wakeup'
if current_status[MAIN_STATUS] == MAIN_STATUS_WAKEUP:
pass
elif current_status[MAIN_STATUS] == MAIN_STATUS_NEED_SLEEP:
status = 'need sleep'
elif current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
status = 'sleep'
elif current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
status = 'alarm'
return render_template("index.html",
sleep_info=sleep_info,
up_time=f"{up_time[0]}:{up_time[1]}",
bed_time=f"{bed_time[0]}:{bed_time[1]}",
other_info=friends_sleep_info,
status=status)
@webpage_flask.route("/settings")
def settings_template():
global settings_info
settings_info = [['sleep time', f'{bed_time[0]:02d}:{bed_time[1]:02d}'],
['wake time', f"{up_time[0]:02d}:{up_time[1]:02d}"],
['volume', '50%'],
['brightness', '50%'],
['light sensitivity', light_threshold],
['12 hour format', time_12_hour]]
return render_template("settings.html",
settings=settings_info)
### background tasks ###
def run_webpage():
"""
process that runs the webpage continuously
"""
# TODO
pass
def update_time():
"""
update the time shown on LED every 1 seconds, the ':' will blink
"""
while True:
hour, minute, _ = get_time()
if time_12_hour:
hour %= 12
if hour == 0:
hour = 12
clock.set_display(f'{hour:02d}:{minute:02d}')
time.sleep(1)
clock.set_display(f'{hour:02d}{minute:02d}')
time.sleep(1)
def check_sleeping():
"""
process that check whether light turns off and phone is nearby RFID
"""
global today_bed_time, bed_time
while True:
if current_status[MAIN_STATUS] == MAIN_STATUS_WAKEUP:
h, m, _ = get_time()
if h == bed_time[0] and m == bed_time[1]:
current_status[MAIN_STATUS] = MAIN_STATUS_NEED_SLEEP
oled_update_display()
today_bed_time = time.time()
GPIO.output(RED_LED, LED_ON)
if current_status[MAIN_STATUS] == MAIN_STATUS_NEED_SLEEP:
# check phone
rfid.read() # will block until RFID is read
voltage = light_sensor.read()
# check light sensor
if voltage <= light_threshold:
current_status[MAIN_STATUS] = MAIN_STATUS_SLEEP
oled_update_display()
# if sleep within BED_TIME_THRESHOLD, count as follow schedule
month, day = get_date()
if (time.time() - today_bed_time)/60 <= BED_TIME_THRESHOLD:
sleep_info.append((f'{month:02d}/{day:02d}',
f'{bed_time[0]:02d}:{bed_time[1]:02d}',
True))
else:
h, m, _ = get_time()
sleep_info.append((f'{month:02d}/{day:02d}',
f'{h:02d}:{m:02d}',
False))
GPIO.output(RED_LED, LED_OFF)
elif current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
# check phone
id, _ = rfid.read_no_block()
voltage = light_sensor.read()
# check light sensor
if not id or voltage > light_threshold:
current_status[MAIN_STATUS] = MAIN_STATUS_NEED_SLEEP
oled_update_display()
time.sleep(1)
def alarm_clock():
"""
process for alarm clock
"""
while True:
h, m, _ = get_time()
if current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
if h == up_time[0] and m == up_time[1]:
if current_status[ALARM_STATUS] == ALARM_ON:
# set status to alarm if sleep before
current_status[MAIN_STATUS] = MAIN_STATUS_ALARM
else:
current_status[MAIN_STATUS] = MAIN_STATUS_WAKEUP
oled_update_display()
if current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
if h == alarm_time[0] and m == alarm_time[1]:
# move next alarm to SNOOZE_TIME minutes later
inc_time(alarm_time, minute=SNOOZE_TIME)
speaker.play_sound()
time.sleep(MIN_DELAY)
if __name__ == "__main__":
# one time tasks
simple_GPIO_setup()
peripheral_setup()
oled_update_display()
# background tasks
background_tasks = [alarm_clock, update_time, check_sleeping]
# start background tasks
for task in background_tasks:
thread = threading.Thread(target=task, daemon=True)
thread.start()
# TODO
# # turn on webpage
# webpage_flask.run(host='0.0.0.0', port=80) #, debug=True, threaded=True)
# TODO: test only
try:
print("program started")
ex = input('type exit to exit: ')
while ex != 'exit':
ex = input('type exit to exit: ')
except KeyboardInterrupt:
pass
print("program finished, perform GPIO cleanup")
GPIO.cleanup()
```
#### File: JerryWuZiJie/go_to_bed/demo.py
```python
import time
import RPi.GPIO as GPIO
import pyttsx3
import schedule
import go_to_bed
SNOOZE_BUT = 24 # pins connects to snooze button
STOP_BUT = 23 # pins connects to stop button
VOLUME = 1 # volume range 0 to 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(SNOOZE_BUT, GPIO.IN, pull_up_down=GPIO.PUD_UP) # pull up by default
GPIO.setup(STOP_BUT, GPIO.IN, pull_up_down=GPIO.PUD_UP) # pull up by default
################################################################
# speaker usage
print("=== speaker demo ===")
print("\n" + "-"*20 +
"\nPress button connect on pin", SNOOZE_BUT, "to pause/resume\n" +
"Press button connect on pin", STOP_BUT, "to stop\n" +
"Press 'Ctrl C' to skip demo\n"
+ "-"*20 + "\n")
# initialize speaker object
speaker = go_to_bed.Speaker()
# set the sound you want to play
speaker.set_sound("sound/Let Her Go.mp3")
### play sound ###
print("\n--- play sound demo ---")
def pause_button(channel):
"""
callback function to pause/resume sound
"""
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if not GPIO.input(channel):
if speaker.is_paused():
speaker.resume()
print("sound resumed")
else:
speaker.pause()
print("sound paused")
def stop_button(channel):
"""
callback function to stop sound
"""
speaker.stop_sound()
# pause/resume music when button pressed
GPIO.add_event_detect(SNOOZE_BUT, GPIO.FALLING, callback=pause_button)
# stop music when buttoin pressed
GPIO.add_event_detect(STOP_BUT, GPIO.FALLING, callback=stop_button)
# start playing, non-blocking. The sound will stop if program ends or
# stop_sound() is called
speaker.play_sound()
print("Initial volume:", speaker.volume())
try:
while not speaker.is_stopped():
time.sleep(0.01)
print("finish playing")
except KeyboardInterrupt:
print("Ctrl C pressed, sound stopped")
speaker.stop_sound()
# remove event detect after test
GPIO.remove_event_detect(SNOOZE_BUT)
GPIO.remove_event_detect(STOP_BUT)
### TTS ###
print("\n--- TTS demo ---")
try:
# # setup tts engine
voice_engine = pyttsx3.init()
# voice_engine.setProperty('rate', 170)
voice_engine.setProperty('volume', VOLUME)
voice_engine.setProperty('voice', "english-us")
print('"Failed to wake you up"')
voice_engine.say("Failed to wake you up")
voice_engine.runAndWait()
print('"Good Morning!"')
voice_engine.say("Good Morning!")
voice_engine.runAndWait()
except KeyboardInterrupt:
print("Ctrl C pressed, TTS stopped")
### schedule alarm ###
print("\n--- schedule alarm demo ---")
def stop_alarm(channel):
"""
callback function to pause/resume sound
"""
speaker.stop_sound()
# stop alarm when button pressed
GPIO.add_event_detect(STOP_BUT, GPIO.FALLING, callback=stop_alarm)
def alarm():
# set volume to max
speaker.mixer.music.set_volume(VOLUME)
# start alarm
speaker.play_sound()
print("Alarming... Press button to stop")
while not speaker.is_stopped():
if speaker.is_paused():
break
# if the sound finish playing and the user haven't push the button to
# pause it, we consider the alarm failed to wake user up
if speaker.is_stopped():
print("sound stopped, initialize new alarm")
else:
print("sound paused, initialize new alarm")
speaker.stop_sound()
# aram = schedule.every().day.at("07:00").do(alarm) # repeat everyday at 7 AM
aram = schedule.every(3).seconds.do(alarm) # repeat every 3 seconds
# nohup usage: https://www.computerhope.com/unix/unohup.htm#:~:text=nohup%20command%20%3E%20file%22.-,Examples,-nohup%20mycommand
print("Program running... (use nohup to keep runninging the background)")
try:
while True:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
print("Ctrl C pressed, schedule stopped")
# cancel schedule
schedule.cancel_job(alarm)
################################################################
# led usage
print('\n\n\n')
print("=== led demo ===")
SLEEP_TIME = 1
# initialize clock display
clock = go_to_bed.Clock()
### display ###
print("\n--- display demo ---")
try:
# display 8888
clock.set_display("88888888") # str more than 4 will be auto truncate
print(clock.get_display())
time.sleep(SLEEP_TIME)
# clear display by setting empty string
clock.set_display("")
print("clear display")
time.sleep(SLEEP_TIME)
def scrolling_message(led, msg, delay=0.5):
"""
display scrolling text
"""
width = 4
padding = " " * width
msg = padding + msg + padding
for i in range(len(msg) - width + 1):
led.set_display(msg[i:i + width])
time.sleep(delay)
# scrolling text
print("scrolling 31415926")
scrolling_message(clock, "31415926")
# display 12:34
clock.set_display("12:34") # if third char is :, : will be turn on
print(clock.get_display())
time.sleep(SLEEP_TIME)
except KeyboardInterrupt:
print("Ctrl C pressed, display stopped")
### brightness ###
print("\n--- brightness demo ---")
try:
# adjust brightness: 0 - 100
# there's only 32 brightness level in hardware
print("gradually increase brightness")
for i in range(101):
clock.set_brightness(i)
time.sleep(0.05)
time.sleep(SLEEP_TIME)
print("set 50% brightness")
clock.set_brightness(50)
time.sleep(SLEEP_TIME)
print("increase birghtness by 10%")
clock.increase_brightness()
time.sleep(SLEEP_TIME)
print("decrease birghtness by 10%")
clock.decrease_brightness()
time.sleep(SLEEP_TIME)
except KeyboardInterrupt:
print("Ctrl C pressed, brightness stopped")
################################################################
# rfid usage
print('\n\n\n')
print("=== rfid demo ===")
try:
rfid = go_to_bed.RFID()
print("write to rfid...")
rfid.write("some message")
print("write done")
print("read from rfid...")
id, text = rfid.read()
print(id)
print(text)
except KeyboardInterrupt:
print("Ctrl C pressed, test skipped")
print("=== All demo done ===")
```
|
{
"source": "Jerrywx/Python_Down",
"score": 4
}
|
#### File: 01-Python/03-socket/03-server.py
```python
import socket
def handle_request(client):
buf = client.recv(1024)
print(buf)
client.send("HTTP/1.1 200 OK\r\n\r\n".encode("utf8"))
# client.send("<h1 style='color:red'>Hello World!</h1>".encode("utf8"))
client.send("<h1 style='color:red'>Hello World!</h1>".encode("utf8"))
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 8001))
sock.listen(5)
while True:
connection, address = sock.accept()
handle_request(connection)
connection.close()
if __name__ == '__main__':
main()
```
#### File: PerfectCRM/student/views.py
```python
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, "student/index.html")
```
#### File: 04-Tornado/04-SQLALchemy/02-one2more.py
```python
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, MetaData, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import sessionmaker
print(sqlalchemy.__version__)
# 链接数据库
engine = create_engine('mysql+pymysql://root:[email protected]:3306/test', echo=True)
# ===================== 0、创建数据库表
# 生成一个 SQLORM 基类
Base = declarative_base()
class Man(Base):
__tablename__ = "man"
id = Column(Integer, primary_key=True)
name = Column(String(32))
age = Column(String(8))
def __repr__(self):
return self.name
class Woman(Base):
__tablename__ = "woman"
id = Column(Integer, primary_key=True)
name = Column(String(32))
age = Column(String(32))
men_id = Column(Integer, ForeignKey('man.id'))
# 创建所有表
# Base.metadata.create_all(engine)
```
#### File: Strom/App/Company.py
```python
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import urllib.request
import ssl
import json
import time
import random
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
import sys
sys.path.append("../sqlManager")
sys.path.append("../reptile")
from movieApi import top250, baseApi, celebrity, movieInfo, comingSoon, movieList
from dataManager import Movie, Country, MovieType, Celebrity, MovieAlbum, Resource, Image, Company
import math
from sqlalchemy.ext.declarative import DeclarativeMeta
from datetime import datetime
# 公司
class Companys(tornado.web.RequestHandler):
# get 方法
def get(self, *args, **kwargs):
list = self.getCompanyList()
self.render('company.html', company=list)
# self.render('company.html')
# 获取公司列表
def getCompanyList(self):
session = Session.session()
list = session.query(Company).all()
return list
# 编辑
class CompanyEdit(tornado.web.RequestHandler):
addEdList = []
count = 1
#
def get(self, *args, **kwargs):
session = Session.session()
# 判断是否有搜索id
movieId = self.get_argument("movieId", None)
if movieId != None:
# 搜索
movieList = self.searchMovie(movieId)
# 搜索结果转 json
jsonData = json.dumps(movieList, cls=new_alchemy_encoder(), check_circular=False)
print("-------------")
print("-------------")
print("-------------")
print("-------------")
self.write(jsonData)
# self.render('companyEdit.html', movieList=movieList)
return
# 判断是否有添加
addId = self.get_argument("addId", None)
if addId != None:
movieList = self.searchMovie(addId)
self.addEdList = self.addEdList + movieList
print("============= ADDDDDDDDDD", addId, "----", str(self.count))
self.count = self.count + 1
jsonData = json.dumps(self.addEdList, cls=new_alchemy_encoder(), check_circular=False)
self.write(jsonData)
# self.render('companyEdit.html', movieList=self.addEdList)
return
# 是否是编辑
comId = self.get_argument("companyId", None)
if comId == None:
company = Company()
self.render('companyEdit.html', company=company, movieList=[])
else:
company = session.query(Company).filter_by(id=comId).first()
self.render('companyEdit.html', company=company, movieList=[])
# 渲染页面
# self.render('companyEdit.html', movieList=[])
# 提交表单
def post(self, *args, **kwargs):
session = Session.session()
company = Company()
company.name = self.get_argument("name_cn")
company.name_en = self.get_argument("name_en")
company.address = self.get_argument("address")
company.create_time = self.get_argument("time")
company.deal_with = self.get_argument("job")
company.nature = self.get_argument("type")
company.create_person = self.get_argument("person")
company.p_company = self.get_argument("p_company")
company.desc_info = self.get_argument("desc")
session.add(company)
session.commit()
list = session.query(Company).all()
print("==============")
print("==============")
# 渲染页面
self.render('company.html', company=list)
# 搜索电影
def searchMovie(self, movieId):
session = Session.session()
if movieId.isdigit():
movie = session.query(Movie).filter_by(id=movieId).all()
return movie
else:
value = "%" + movieId + "%"
movie = session.query(Movie).filter(Movie.movie_name_cn.like(value)).all()
return movie
# 删除公司
class CompanyDel(tornado.web.RequestHandler):
# get 方法
def get(self, *args, **kwargs):
companyId = self.get_argument("comId", None)
if companyId != None:
session = Session.session()
company = session.query(Company).filter_by(id=companyId).first()
session.delete(company)
session.commit()
self.write("true")
else:
self.write("false")
# 数据库管理类
class Session():
@classmethod # 类方法
def session(cls):
# 1. 链接数据库
engine = create_engine('mysql+pymysql://root:[email protected]:3306/storm?charset=utf8', echo=False)
# 2. 创建数据库表
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
return session
# 模型转json
def new_alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
if isinstance(data, datetime):
data = data.strftime('%Y-%m-%d %H:%M:%S')
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError:
fields[field] = None
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
class Upload(tornado.web.RequestHandler):
def post(self, *args, **kwargs):
# img = self.get_argument("img", None)
file_metas = self.request.files.get('img', None)
print("====================")
print(file_metas[0]['filename'])
file = open("./img.jpg", "wb")
file.write(file_metas[0]['body'])
file.close()
self.write("asdasdas")
```
#### File: Strom/reptile/01-spider.py
```python
import urllib.request
import ssl
import json
# import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
# from sqlalchemy import Column, Integer, String, MetaData, Boolean, Float
from sqlalchemy.orm import sessionmaker, relationship
import sys
sys.path.append("../sqlManager")
from dataManager import Movie
from enum import Enum
# 电影分类
class MovieClass(Enum):
Hot = "热门"
New = "最新"
Classic = "经典"
Play = "可播放"
Top = "豆瓣高分"
Dark = "冷门佳片"
Chinese = "华语"
America = "欧美"
Korean = "韩国"
Japan = "日本"
Action = "动作"
Comedy = "喜剧"
Love = "爱情"
Science = "科幻"
Suspense = "悬疑"
Terrify = "恐怖"
Cure = "治愈"
# 热度 recommend
# 时间 time
# 评价 rank
# 获取电影列表url
def getMovieListUrl(type, sort, pageLimit, pageStrat):
# 电影分类
movieClass = Enum("热门", "最新", "经典", "可播放", "豆瓣高分",
"冷门佳片", "华语", "欧美", "韩国", "日本",
"动作", "喜剧", "爱情", "科幻", "悬疑", "恐怖", "治愈")
# print(movieClass.)
# urlString = "https://movie.douban.com/j/search_subjects?" \
# "type=movie&tag=%e6%9c%80%e6%96%b0&" \
# "sort=recommend&" \
# "page_limit=100&" \
# "page_start=500"
# 电影分类
# movieClass = ["热门", "最新", "经典", "可播放", "豆瓣高分",
# "冷门佳片", "华语", "欧美", "韩国", "日本",
# "动作", "喜剧", "爱情", "科幻", "悬疑", "恐怖", "治愈"]
# 电影类型
pass
baseUrl = "https://movie.douban.com/j/search_subjects"
ssl._create_default_https_context = ssl._create_unverified_context
ua_heaeders = {
"User_Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
}
# 通过 urllib.request() 方法构造一个请求对象
# request = urllib.request.Request("http://www.80s.tw/", headers = ua_heaeders)
# 热门
# urlString = "https://movie.douban.com/j/search_subjects?type=movie&tag=%e7%83%ad%e9%97%a8&sort=recommend&page_limit=100&page_start=300"
# 最新
urlString = "https://movie.douban.com/j/search_subjects?type=movie&tag=%e6%9c%80%e6%96%b0&sort=recommend&page_limit=100&page_start=500"
# 豆瓣高分
# urlString = "https://movie.douban.com/j/search_subjects?type=movie&tag=%e8%b1%86%e7%93%a3%e9%ab%98%e5%88%86&sort=recommend&page_limit=100&page_start=0"
# 冷梦佳片
# urlString = "https://movie.douban.com/j/search_subjects?type=movie&tag=%e5%86%b7%e9%97%a8%e4%bd%b3%e7%89%87&sort=recommend&page_limit=100&page_start=500"
#
# urlString = "https://movie.douban.com/j/search_subjects?type=movie&tag=%e5%8d%8e%e8%af%ad&sort=recommend&page_limit=100&page_start=100"
request = urllib.request.Request(urlString, headers=ua_heaeders)
# 向指定url地址发送请求
response = urllib.request.urlopen(request)
# read() 方法读取文件里的全部内容,返回字符串
html = response.read()
# 打印网页内容
# print(html.decode("utf-8"))
file = open("./resource/list2.json", "w")
file.write(html.decode("utf-8"))
# ------- 存储数据
# 1. 链接数据库
engine = create_engine('mysql+pymysql://root:[email protected]:3306/storm?charset=utf8', echo=False)
# 2. 创建数据库表
Base = declarative_base()
# =============================================================================================
# 读取json文件
# file = open("./resource/list.json", "r")
# content = json.loads(file)
# 读取web内容
content = json.loads(html.decode("utf-8"))
movies = []
Session = sessionmaker(bind=engine)
session = Session()
movieList = content['subjects']
if len(movieList) > 0:
# 遍历数据
for item in content['subjects']:
# 检查电影是否存在
ret = session.query(Movie).filter_by(movie_douban_id=item['id']).first()
if ret:
print("---- 存在的电影:" + item['title'])
else:
print("===== 不存在的电影:" + item['title'])
movie = Movie()
movie.movie_name_cn = item['title']
movie.movie_douban_mark = item['rate']
movie.movie_cover = item['cover']
movie.movie_doubanUrl = item['url']
movie.movie_douban_id = item['id']
# movies.append(movie)
session.add(movie)
else:
print("--------- 数据为空 ---------")
session.commit()
# print(movies)
# session.add_all(movies)
# session.commit()
```
|
{
"source": "JerryX1110/NL-Augmenter",
"score": 3
}
|
#### File: NL-Augmenter/evaluation/evaluate_paraphrase_detection.py
```python
import numpy as np
import torch
from datasets import load_dataset
from torch.nn.functional import cosine_similarity
from transformers import AutoModel, AutoTokenizer
from dataset import KeyValueDataset
from tasks.TaskTypes import TaskType
# Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
"""This function applies mean poling to the sentence embeddings.
Args:
model_output: token embeddings of size 768xtokens.
param2: Attention mask of the embeddings.
Returns:
A single sentence emebdding of size 768.
"""
token_embeddings = model_output[
0
].cpu() # First element of model_output contains all token embeddings
input_mask_expanded = (
attention_mask.cpu()
.unsqueeze(-1)
.expand(token_embeddings.size())
.float()
)
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
input_mask_expanded.sum(1), min=1e-9
)
def evaluate(
operation, evaluate_filter, model_name, dataset_name, split="test[:20%]"
):
"""Perform paraphrase detection evaluation.
Args:
operation: the operation to be evaluated.
evaluate_filter: if a filter should be evaluated or not.
model_name: name of the model to be evaluated.
dataset_name: name of the dataset to be evaluated.
split: split to be used in the evaluation.
Returns:
A dictionary with the evaluation's performance information.
"""
# load model
if model_name is None:
model_name = "sentence-transformers/paraphrase-xlm-r-multilingual-v1"
# load test set
if dataset_name is None:
dataset_name = "paws"
print(
f"Loading <{dataset_name}> dataset to evaluate <{model_name}> model."
)
if dataset_name == "paws":
hf_dataset = load_dataset(dataset_name, "labeled_final", split=split)
elif dataset_name == "mrpc":
hf_dataset = load_dataset("glue", dataset_name, split=split)
else:
hf_dataset = load_dataset(dataset_name, split=split)
dataset = KeyValueDataset.from_huggingface(
hf_dataset,
TaskType.PARAPHRASE_DETECTION,
["sentence1", "sentence2", "label"],
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AutoModel.from_pretrained(model_name).to(device)
print(
f"Here is the performance of the model {model_name} on the {split} split of the {dataset_name} dataset"
)
if evaluate_filter:
performance = filter_performance(
dataset, tokenizer, model, device, filter=operation
)
else:
performance = transformation_performance(
dataset, tokenizer, model, device, transformation=operation
)
performance["model_name"] = model_name
performance["split"] = split
performance["dataset_name"] = dataset_name
return performance
def filter_performance(dataset, tokenizer, model, device, filter):
"""Obtain the performance of the model on the filtered dataset.
Args:
dataset: the dataset to be evaluated.
tokenizer: the tokenized to apply on the data.
model: the model to be evaluated.
device: the device in which to run the evaluation.
filter: the filter to be applied.
Returns:
A dictionary with the evaluation's performance information.
"""
print("Here is the performance of the model on the filtered set")
filtered_dataset = dataset.apply_filter(
filter, subfields=["sentence1", "sentence2", "label"]
)
return performance_on_dataset(filtered_dataset, tokenizer, model, device)
def transformation_performance(
dataset, tokenizer, model, device, transformation
):
"""Obtain the performance of the model on the transformed dataset.
Args:
dataset: the dataset to be evaluated.
tokenizer: the tokenized to apply on the data.
model: the model to be evaluated.
device: the device in which to run the evaluation.
transformation: the transformation to be applied.
Returns:
A dictionary with the evaluation's performance information.
"""
performance = performance_on_dataset(dataset, tokenizer, model, device)
pt_dataset = dataset.apply_transformation(
transformation, subfields=["sentence1", "sentence2", "label"]
)
print("Here is the performance of the model on the transformed set")
performance = performance_on_dataset(pt_dataset, tokenizer, model, device)
return performance
def performance_on_dataset(dataset, tokenizer, model, device):
"""Obtain the performance of the model on a dataset.
Args:
dataset: the dataset to be evaluated.
tokenizer: the tokenized to apply on the data.
model: the model to be evaluated.
device: the device in which to run the evaluation.
Returns:
A dictionary with the evaluation's performance information.
"""
labels = []
preds = []
print(f"Length of Evaluation dataset is {len(dataset)}")
for example in dataset:
sentence1, sentence2, label = example
sentences = [sentence1, sentence2]
# Tokenize sentences
encoded_input = tokenizer(
sentences, padding=True, truncation=True, return_tensors="pt"
).to(device)
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, max pooling.
sentence_embeddings = mean_pooling(
model_output, encoded_input["attention_mask"]
)
similarity = cosine_similarity(
sentence_embeddings[0], sentence_embeddings[1], dim=0
)
labels.append(label)
if similarity > 0.5:
preds.append(1)
else:
preds.append(0)
accuracy = np.round(100 * np.mean(np.array(labels) == np.array(preds)))
total = len(labels)
print(
f"The accuracy on this subset which has {total} examples = {accuracy}"
)
return {"accuracy": accuracy, "total": total}
```
#### File: filters/diacritic_filter/filter.py
```python
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
import unicodedata
class DiacriticFilter(SentenceOperation):
"""
Filter if the input sentence has any diacritic marks.
"""
tasks = [TaskType.TEXT_CLASSIFICATION, TaskType.TEXT_TO_TEXT_GENERATION]
languages = ["en", "fr", "es", "it", "pt", "fi", "nl", "da", "cs", "hr", "bg", "be", "eu", "ru", "uk", "pl", "sv", "sk", "sl"]
keywords = ["visual", "morphological","rule-based", "written", "highly-meaning-preserving", "high-precision"]
def __init__(self):
super().__init__()
def strip_accents(self, sentence):
return str(unicodedata.normalize('NFD', sentence).encode('ascii', 'ignore').decode("utf-8"))
def filter(self, sentence: str = None) -> bool:
return sentence != self.strip_accents(sentence)
```
#### File: filters/gender_bias/filter.py
```python
import re
import json
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
class GenderBiasFilter(SentenceOperation):
tasks = [TaskType.TEXT_TO_TEXT_GENERATION]
languages = ["en", "fr", "pl", "ru"]
keywords = ["rule-based", "social-reasoning"]
def __init__(self, language, feminine_input=[], masculine_input=[]):
"""
initialise the language and user defined arrays of keywords for feminine and masculine groups
:param language: the string key representing the supported language
:param feminine_input: array of keywords defined by the user and designating the female group
:param masculine_input: array of keywords defined by the user and designating the male group
"""
super().__init__()
self.language = language
self.feminine_input = feminine_input
self.masculine_input = masculine_input
@staticmethod
def flag_sentences(
sentences, language, feminine_input=[], masculine_input=[]
):
"""
flag sentences as belonging to the feminine, masculine or neutral groups
:param sentences: sentences array
:param language: the string key representing the supported language
:param feminine_input: array of keywords defined by the user and designating the female group
:param masculine_input: array of keywords defined by the user and designating the male group
:return: array of objects, each containing the analyzed sentence along with three flags
"""
flagged_sentences = []
# Read names
f = open('filters/gender_bias/lexicals.json')
data = json.load(f)
# Define the words, that represent feminine and masculine groups in both languages
if language == "en":
feminine_titles = data["feminine_titles_en"]
feminine_relation = data["feminine_relation_en"]
feminine_relation_plural = data["feminine_relation_plural_en"]
feminine_jobs = data["feminine_jobs_en"]
feminine_jobs_plural = data["feminine_jobs_plural_en"]
feminine_names = data['feminine_names_en']
masculine_titles = data["masculine_titles_en"]
masculine_relation = data["masculine_relation_en"]
masculine_relation_plural = data["masculine_relation_plural_en"]
masculine_jobs = data["masculine_jobs_en"]
masculine_jobs_plural = data["masculine_jobs_plural_en"]
masculine_names = data["masculine_names_en"]
feminine = (
["she", "her", "hers"]
+ feminine_relation
+ feminine_relation_plural
+ feminine_titles
+ feminine_jobs
+ feminine_jobs_plural
+ feminine_names
)
masculine = (
["he", "him", "his"]
+ masculine_relation
+ masculine_relation_plural
+ masculine_titles
+ masculine_jobs
+ masculine_jobs_plural
+ masculine_names
)
elif language == "fr":
feminine_titles = data["feminine_titles_fr"]
feminine_relation = data["feminine_relation_fr"]
feminine_relation_plural = data["feminine_relation_plural_fr"]
feminine_jobs = data["feminine_jobs_fr"]
feminine_jobs_plural = data["feminine_jobs_plural_fr"]
feminine_names = data['feminine_names_fr']
masculine_jobs = data["masculine_jobs_fr"]
masculine_jobs_plural = data["masculine_jobs_plural_fr"]
masculine_relation = data["masculine_relation_fr"]
masculine_relation_plural = data["masculine_relation_plural_fr"]
masculine_titles = data["masculine_titles_fr"]
masculine_names = data['masculine_names_fr']
feminine = (
["elle", "sienne"]
+ feminine_relation
+ feminine_relation_plural
+ feminine_titles
+ feminine_jobs
+ feminine_jobs_plural
+ feminine_names
)
masculine = (
["il", "sien"]
+ masculine_relation
+ masculine_relation_plural
+ masculine_titles
+ masculine_jobs
+ masculine_jobs_plural
+ masculine_names
)
elif language == "pl":
feminine_titles = data["feminine_titles_pl"]
feminine_relation = data["feminine_relation_pl"]
feminine_relation_plural = data["feminine_relation_plural_pl"]
feminine_jobs = data["feminine_jobs_pl"]
feminine_jobs_plural = data["feminine_jobs_plural_pl"]
feminine_names = data['feminine_names_pl']
masculine_titles = data["masculine_titles_pl"]
masculine_relation = data["masculine_relation_pl"]
masculine_relation_plural = data["masculine_relation_plural_pl"]
masculine_jobs = data["masculine_jobs_pl"]
masculine_jobs_plural = data["masculine_jobs_plural_pl"]
masculine_names = data['masculine_names_pl']
feminine = (
["ona", "jej"]
+ feminine_relation
+ feminine_relation_plural
+ feminine_titles
+ feminine_jobs
+ feminine_jobs_plural
+ feminine_names
)
masculine = (
["on", "jego"]
+ masculine_relation
+ masculine_relation_plural
+ masculine_titles
+ masculine_jobs
+ masculine_jobs_plural
+ masculine_names
)
elif language == "ru":
feminine_titles = data["feminine_titles_ru"]
feminine_relation = data["feminine_relation_ru"]
feminine_relation_plural = data["feminine_relation_plural_ru"]
feminine_jobs = data["feminine_jobs_ru"]
feminine_jobs_plural = data["feminine_jobs_plural_ru"]
feminine_names = data['feminine_names_ru']
masculine_titles = data["masculine_titles_ru"]
masculine_relation = data["masculine_relation_ru"]
masculine_relation_plural = data["masculine_relation_plural_ru"]
masculine_jobs = data["masculine_jobs_ru"]
masculine_jobs_plural = data["masculine_jobs_plural_ru"]
masculine_names = data["masculine_names_ru"]
feminine = (
["она", "ее"]
+ feminine_relation
+ feminine_relation_plural
+ feminine_titles
+ feminine_jobs
+ feminine_jobs_plural
+ feminine_names
)
masculine = (
["он", "его"]
+ masculine_relation
+ masculine_relation_plural
+ masculine_titles
+ masculine_jobs
+ masculine_jobs_plural
+ masculine_names
)
else:
raise NameError(
'The specified language is not supported or misformatted. Try "en", "fr", "pl" or "ru" as language arguments to the filter() method.'
)
# Close names file
f.close()
assert (
len(sentences) > 0
), "You must provide at least one sentence for the analysis. Check the content of your sentences array you pass to the filter() method."
for sentence in sentences:
# Clean the sentence content using regex
processed_sentence = sentence.lower()
processed_sentence = re.sub("^", " ", processed_sentence)
processed_sentence = re.sub("$", " ", processed_sentence)
# Take care of urls
words = []
for word in processed_sentence.split():
i = word.find("http")
if i >= 0:
word = word[:i] + " " + "__url__"
words.append(word.strip())
processed_sentence = " ".join(words)
processed_sentence = re.sub(r"\[([^\]]*)\] \( *__url__ *\)", r"\1", processed_sentence)
# Remove illegal chars and extra space
processed_sentence = re.sub("__url__", "URL", processed_sentence)
processed_sentence = re.sub(r"[^A-Za-z0-9():,.!?\"\']", " ", processed_sentence)
processed_sentence = re.sub("URL", "__url__", processed_sentence)
processed_sentence = re.sub(r"^\s+", "", processed_sentence)
processed_sentence = re.sub(r"\s+$", "", processed_sentence)
processed_sentence = re.sub(r"\s+", " ", processed_sentence)
# Make sure that user input has words in lower case form
joint_feminine = feminine + feminine_input
joint_feminine = [word.lower() for word in joint_feminine]
joint_masculine = masculine + masculine_input
joint_masculine = [word.lower() for word in joint_masculine]
# Split the words in the processed_sentence to find the intersection with the feminine array of keywords
intersection_feminine = set(processed_sentence.split()).intersection(
set(joint_feminine)
)
# Split the words in the processed_sentence to find the intersection with the masculine array of keywords
intersection_masculine = set(processed_sentence.split()).intersection(
set(joint_masculine)
)
# If the intersection occured, the intersection_feminine and intersection_masculine variables will contain at least one common keyword
# use this intersection information to get the value for the corresponding flags
feminine_flag = len(intersection_feminine) > 0
masculine_flag = len(intersection_masculine) > 0
# In case the processed_sentence contains the keywords from feminine and masculine arrays, set a union_flag value
union_flag = (
len(intersection_feminine) > 0
and len(intersection_masculine) > 0
)
# If the processed_sentence didn't contain the keywords neither from feminine, nor from masculine arrays, set a neutral_flag value
neutral_flag = (
len(intersection_feminine) == 0
and len(intersection_masculine) == 0
)
# Use the union_flag value to set the neutral_flag value, setting to False the feminine and masculine flags
if union_flag is True:
feminine_flag = False
masculine_flag = False
neutral_flag = True
# Create the sentence object with the retrieved flag values
sentence_object = {
"sentence": sentence,
"feminine_flag": feminine_flag,
"masculine_flag": masculine_flag,
"neutral_flag": neutral_flag,
}
# Append the object to the array we return
flagged_sentences.append(sentence_object)
return flagged_sentences
def count(self, group_flag, flagged_corpus):
"""
generic method for counting the number of sentences
:param group_flag: a string flag to be counted
:return: integer value, representing the number of sentences with a particular flag
"""
flags_count = len(
[
flag
for flag in flagged_corpus
if flag.get(group_flag) is True
]
)
return flags_count
def get_group(self, group_flag, flagged_corpus):
"""
generic method for counting the number of sentences
:param group_flag: a string flag to extract corresponding sentences
:return: array representing the sub group of sentences with a particular flag
"""
group = [
flag.get("sentence")
for flag in flagged_corpus
if flag.get(group_flag) is True
]
return group
def count_genders(self, flagged_corpus):
"""
count the number of sentences in each of groups
:param flagged_corpus: array of flagged sentences
:return: 3 integer values, representing feminine, masculine and neutral groups respectively
"""
feminine_count = self.count("feminine_flag", flagged_corpus)
masculine_count = self.count("masculine_flag", flagged_corpus)
neutral_count = self.count("neutral_flag", flagged_corpus)
return feminine_count, masculine_count, neutral_count
def sort_groups(self, flagged_corpus):
"""
sort the sentences in each of 3 groups
:param flagged_corpus: array of flagged sentences
:return: 3 arrays of strings, containing feminine, masculine and neutral groups respectively
"""
feminine_group = self.get_group("feminine_flag", flagged_corpus)
masculine_group = self.get_group("masculine_flag", flagged_corpus)
neutral_group = self.get_group("neutral_flag", flagged_corpus)
return feminine_group, masculine_group, neutral_group
def filter(self, sentences: []) -> bool:
# Retrieve the flags for each of the sentences
flagged_corpus = self.flag_sentences(
sentences, self.language, self.feminine_input, self.masculine_input
)
# Use the retrieved flags to count the number of sentences in each group
feminine_count, masculine_count, neutral_count = self.count_genders(
flagged_corpus
)
feminine_percentage = 100 * float(feminine_count)/float(len(sentences))
masculine_percentage = 100 * float(masculine_count)/float(len(sentences))
# If the rounded percentage of sentences in the target group is lower than in the test group, set bias to True
# Note, that the neutral group is not taken into account in this calculation
if round(feminine_percentage) < round(masculine_percentage):
biased = True
else:
biased = False
return biased
```
#### File: JerryX1110/NL-Augmenter/setup.py
```python
import os
from test.mapper import map_filter, map_transformation
from setuptools import setup
from TestRunner import OperationRuns
def all_folders(search: str, transformation_type: str) -> list:
"""
Get all folder names for either the transformations or filters
Parameters:
-----------
search: str,
search term, can be either 'transformations' or 'filters'.
transformation_type: str,
if 'transformations' is the search term then specify what type is it (light or heavy).
Returns:
--------
list of folder names.
"""
folder_names = [
search + "/" + f
for f in list(
OperationRuns.get_all_folder_names(search, transformation_type)
)
]
return folder_names
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
data = f.read()
return data
def recursive_requirements(search: str, transformation_type: str) -> str:
# (1) read all requirements.txt in the folder.
requirements = ""
for folder in all_folders(search, transformation_type):
r_file = os.path.join(
os.path.dirname(__file__), folder + "/requirements.txt"
)
if os.path.isfile(r_file):
with open(r_file) as f:
requirements += f.read() + "\n"
return requirements
def get_default_requirements(transformation_type: str) -> list:
"""
Populate the default requirements to be installed for the library
Parameters
----------
transformation_type: str,
type of transformation, light or heavy.
Returns:
-------
list
list of requirements.
"""
# Get the default requirements (light transformations and light filters)
# (1) read main requirements.txt
mandatory_requirements = read("requirements.txt")
# (2) read requirements for light transformations
mandatory_requirements += recursive_requirements(
"transformations", "light"
)
# (3) read requirements for light filters
mandatory_requirements += recursive_requirements(
"filters", "light"
) # light filters
return mandatory_requirements
def filter_requirements(requirements: str) -> list:
"""Filter the requirements, exclude comments, empty strings
Parameters:
-----------
requirements: str,
string of requirements
Returns:
--------
list
list of filtered requirements
"""
list_requirements = requirements.split("\n")
for entry in list_requirements:
if "#" in entry or entry == "":
list_requirements.remove(entry)
return list_requirements
def get_extra_requirements() -> dict:
"""
Get the dict of requirements for all the heavy transformations and filters.
If a user specifies a heavy transformation or filter, the corresponding requirements
from the generated dictionary will be picked up and installed along with the default
requiremens.
The generated dictionary will be of this format:
{
'lost_in_translation': ['rouge_score'],
'mr_value_replacement': ['torchtext==0.9.1'],
'ocr_perturbation': ['trdg==1.6.0', 'tesserocr>=2.5.2'],
'pinyin': ['g2pM==0.1.2.5'],
'punctuation': ['cucco==2.2.1', 'fastpunct==2.0.2'],
'sentence_reordering': ['allennlp==2.5.0', 'allennlp-models==2.5.0'],
'synonym_substitution': ['nltk==3.6.2'],
'token_replacement': ['editdistance>=0.5.3'],
'transformer_fill': ['torch', 'transformers', 'spacy'],
'toxicity': ['detoxify==0.2.2']
}
Example usage: pip install NL-Augmenter[lost_in_translation]
Returns:
-------
dict
dict of requirements for all the heavy transformations and filters.
"""
# Dictionary of requirements
requirements = {}
count = 0
# Heavy transformations picked from test/mapper.py
for entry in map_transformation["heavy"]:
file_name = "transformations/" + entry + "/requirements.txt"
if os.path.exists(file_name):
req_string = read(file_name)
requirements[entry] = filter_requirements(req_string)
count += 1
# Heavy filters picked from test/mapper.py
for entry in map_filter["heavy"]:
file_name = "filters/" + entry + "/requirements.txt"
if os.path.exists(file_name):
req_string = read(file_name)
requirements[entry] = filter_requirements(req_string)
count += 1
print(count)
return requirements
setup(
name="NL-Augmenter",
version="0.0.1",
description="The official repository of transformations.",
long_description=read("README.md"),
install_requires=get_default_requirements("light"),
extras_require=get_extra_requirements(),
package_data={
"": ["*.json", "*.txt", "*.tsv", "*.csv", "*.npz", "*.ckpt"]
},
include_package_data=True,
)
```
#### File: transformations/antonyms_substitute/transformation.py
```python
import re
import nltk
import spacy
from initialize import spacy_nlp
from nltk.corpus import wordnet
import numpy as np
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
"""
The code is adapted from @zijwang https://github.com/GEM-benchmark/NL-Augmenter/tree/main/transformations/synonym_substitution
"""
def untokenize(words):
"""
Untokenizing a text undoes the tokenizing operation, restoring
punctuation and spaces to the places that people expect them to be.
Ideally, `untokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
ref: https://github.com/commonsense/metanl/blob/master/metanl/token_utils.py#L28
"""
text = " ".join(words)
step1 = (
text.replace("`` ", '"').replace(" ''", '"').replace(". . .", "...")
)
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ")
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2)
step4 = re.sub(r" ([.,:;?!%]+)$", r"\1", step3)
step5 = (
step4.replace(" '", "'")
.replace(" n't", "n't")
.replace("can not", "cannot")
)
step6 = step5.replace(" ` ", " '")
return step6.strip()
def is_synonyms(word1, word2):
synonyms = []
for syn in wordnet.synsets(word1):
for l in syn.lemmas():
synonyms.append(l.name())
if word2 in synonyms:
return True
return False
def is_antonyms(word1, word2):
antonyms = []
for syn in wordnet.synsets(word1):
for l in syn.lemmas():
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
if word2 in antonyms:
return True
return False
def antonyms_substitute(text, spacy_pipeline, seed=22, max_outputs=1):
np.random.seed(seed)
upos_wn_dict = {
# "VERB": "v",
# "NOUN": "n",
"ADV": "r",
"ADJ": "s",
}
doc = spacy_pipeline(text)
results = []
for _ in range(max_outputs):
result = []
converted_words = []
counter = 0
for token in doc:
word = token.text
wn_pos = upos_wn_dict.get(token.pos_)
if wn_pos is None:
result.append(word)
else:
antonyms = []
# synonyms = []
for syn in wordnet.synsets(word, pos=wn_pos):
for l in syn.lemmas():
# synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
antonyms = list(set(antonyms))
if len(antonyms) > 0:
antonyms = sorted(antonyms)
result.append(antonyms[0].replace("_", " "))
counter += 1
converted_words.append(word)
else:
result.append(word)
# detokenize sentences
result = untokenize(result)
# choose even number of changes
if counter%2 != 0:
result = text
# avoid doing transformation that original words are either synonyms or antonyms
# e.g. do not transform "Ram is talented and skilled archer"
for word1 in converted_words:
for word2 in converted_words:
if word1 != word2:
if is_antonyms(word1, word2) or is_synonyms(word1, word2):
result = text
break
if result not in results:
# make sure there is no dup in results
results.append(result)
return results
"""
Substitute words with antonyms using stanza (for POS) and wordnet via nltk (for antonyms)
"""
class AntonymsSubstitute(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
]
languages = ["en"]
keywords = ["lexical", "noise", "rule-based", "tokenizer-required"]
def __init__(self, seed=42, prob=0.5, max_outputs=1):
super().__init__(seed, max_outputs=max_outputs)
self.nlp = spacy_nlp if spacy_nlp else spacy.load("en_core_web_sm")
self.prob = prob
nltk.download("wordnet")
def generate(self, sentence: str):
perturbed = antonyms_substitute(
text=sentence,
spacy_pipeline=self.nlp,
seed=self.seed,
max_outputs=self.max_outputs,
)
return perturbed
```
#### File: transformations/chinese_antonym_synonym_substitution/transformation.py
```python
import itertools
import random
import os
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
import jieba
from nlpcda import Similarword
def chinese_antonym_substitution(text,
prob,
chinese_antonym_data
):
perturb_list = []
for perturb_word in text:
perturb_antonym = []
if perturb_word not in chinese_antonym_data.keys():
print("Word to be perturbed is not in the Chinese Antonym database")
else:
perturb_antonym = chinese_antonym_data[perturb_word]
if random.random() <= prob and len(perturb_antonym) != 0:
new_chinese_character = random.choice(perturb_antonym)
else:
new_chinese_character = perturb_word
perturb_list.append(new_chinese_character)
perturb_text = ''.join(perturb_list)
return perturb_text
def load_chinese_antonym_data():
dirname = os.path.dirname(__file__)
antonym_dict = {}
with open(os.path.join(dirname, '反义词库.txt'), mode='r') as file:
lines = file.readlines()
for line in lines:
if(line.rstrip() != ""):
antonym_list = []
word_antonym = line.split("-")
new_antonym = word_antonym[1].rstrip()
if(word_antonym[0] in antonym_dict.keys()):
values = antonym_dict[word_antonym[0]]
for value in values:
antonym_list.append(value)
if(new_antonym not in values):
antonym_list.append(new_antonym)
antonym_dict[word_antonym[0]] = antonym_list
else:
antonym_list.append(word_antonym[1].rstrip())
antonym_dict[word_antonym[0]] = antonym_list
return antonym_dict
"""
Chinese Antonym And Synonym Substitution
"""
class ChineseAntonymAndSynonymSubtitution(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION
]
languages = ["zh"]
keywords = ["lexical", "rule-based", "api-based", "written", "highly-meaning-preserving", "meaning-alteration", "high-precision"]
def __init__(self, seed=0, max_outputs=1, prob=0.5):
super().__init__(seed, max_outputs=max_outputs)
self.prob = prob
self.seed = seed
self.max_outputs = max_outputs
self.chinese_antonym_data = load_chinese_antonym_data()
def generate(self, sentence: str, config : str = 'synonym'):
random.seed(self.seed)
perturbed_texts = []
if (config == 'synonym'):
smw = Similarword(create_num=self.max_outputs + 1, change_rate=self.prob)
output_list = smw.replace(sentence)
# Notice that the first input is the non-perturb version
output_list = output_list[1:]
perturbed_texts = output_list
elif (config == 'antonym'):
output = jieba.lcut(sentence)
for _ in itertools.repeat(None, self.max_outputs):
perturbed_text = chinese_antonym_substitution(
text=output,
prob=self.prob,
chinese_antonym_data=self.chinese_antonym_data
)
perturbed_texts.append(perturbed_text)
return perturbed_texts
"""
if __name__ == '__main__':
simp_text = "汉字是语素文字,总数非常庞大。汉字总共有多少字?到目前为止,恐怕没人能够答得上来精确的数字。"
perturb_func = ChineseAntonymAndSynonymSubtitution()
new_text = perturb_func.generate(simp_text, config = "antonym")
# new_text = perturb_func.generate(simp_text)
print(new_text)
"""
```
#### File: transformations/disability_transformation/transformation.py
```python
from interfaces.SentenceOperation import SentenceOperation
from evaluation.evaluation_engine import evaluate, execute_model
from tasks.TaskTypes import TaskType
import spacy
import string
from spacy.lang.en.examples import sentences
from initialize import spacy_nlp
# A dict containing offensive words and their alternatives
disability_names = {"blind":"person or people with a visual impairment",
"deformed": "person or people with a physical disability",
"handicapped":"person or people with a physical disability",
"cripple":"person with a physical disability",
"crippled":"person or people with a physical disability",
"paraplegic":"person or people with paraplegia",
"psychotic":"person or people with a psychotic condition",
"psycho":"person with a psychotic condition",
"psychopath":"person with a psychotic condition",
"quadriplegic":"person or people with quadriplegia",
"schizophrenic":"person or people with schizophrenia",
"vegetable":"person in a vegetative state",
"bonkers":"person or people with a mental disability",
"senile":"person or people with dementia",
"gimp":"person with a physical disability",
"spastic":"person with a physical disability",
"spaz":"person with a physical disability",
"lame":"person with a physical disability",
"lunatic":"person with a mental disability",
"lunatics":"people with a mental disability",
"looney":"person with a mental disability",
"manic":"person with a psychological disability",
"mutant":"person with an uncommon genetic mutation",
"mutants": "people with an uncommon genetic mutation",
"wheelchairbound":"wheelchair user",
"subnormal":"person or people with learning difficulties or a developmental disorder",
"dwarf":"short-statured person",
"midget":"short-statured person",
"deaf":"person or people with a hearing disability",
"mute":"person or people with a listening disability",
"dumb":"person or people with a mental and/or speech impairment",
"demented":"person or people with dementia",
"dotard":"old person with impaired intellect or physical disability",
"dotards":"old people with impaired intellect or physical disability",
"derp":"person with intellectual disabilities",
"imbecile":"person with intellectual disabilities",
"imbeciles":"people with intellectual disabilities",
"crazy":"person or people with a mental impairment",
"insane ":"person or people with a mental impairment",
"wacko":"person with a mental impairment",
"nuts":"person or people with a mental impairment",
"retard":"person with an intellectual disability",
"retards":"people with an intellectual disability",
"retarded":"person or people with an intellectual disability",
}
def postag(text):
doc = nlp(text)
pos_list = []
word_list = []
for token in doc:
pos_list.append(token.pos_)
word_list.append(token.text)
print(pos_list, word_list)
return word_list, pos_list
def preserve_capitalization(original, transformed):
if original[0].isupper():
transformed = transformed.capitalize()
else:
return original
return transformed
def get_index(wl, n):
indices = [i for i, x in enumerate(wl) if x == n]
return indices
def placement(index_of_dis, wl, pl, input, disability_names, name):
text = input.lower()
wl,pl = postag(text)
index_of_dis = get_index(wl,name)
max_len = len(wl)
for i in index_of_dis:
print("For Occurence", i)
print("For Words Less than Maximum Length:")
if i < (max_len-1):
print("Words in between")
if pl[i+1] == 'NOUN':
s = ' '.join(wl)
text = s
elif pl[i+1]!='NOUN':
s = ' '.join(wl)
namew = wl[i]
wl[i] = disability_names[namew]
text = ' '.join(wl)
elif i >= (max_len-1):
print("For Words At Maximum Length:")
namew = wl[i]
wl[i] = disability_names[namew]
text = ' '.join(wl)
else:
s = ' '.join(wl)
text = s
text = preserve_capitalization(input, text)
return text
def replace_punc(text):
for i in string.punctuation:
text = text.replace(i," "+i)
return text
def restore_punc(text):
for i in string.punctuation:
text = text.replace(" "+i,i)
return text
def different_ability(input, disability_names):
text = input.lower()
text = replace_punc(text)
for name in disability_names.keys():
if name in text:
wl, pl = postag(text)
max_len = len(wl)
indices = get_index(wl, name)
text = placement(indices, wl, pl, input, disability_names, name)
text = preserve_capitalization(input, text)
text = restore_punc(text)
return text
class DifferentAbilityTransformation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION
]
languages = ["en"]
keywords = [
"lexical",
"rule-based",
"tokenizer-required",
"social-reasoning",
]
def __init__(self, seed=0, max_outputs=1):
super().__init__(seed, max_outputs = max_outputs)
self.disability_names = disability_names
self.nlp = self.nlp = (
spacy_nlp if spacy_nlp else spacy.load("en_core_web_sm")
)
def generate(self, sentence: str):
return [different_ability(sentence, self.disability_names)]
```
#### File: transformations/french_conjugation_transformation/transformation.py
```python
import re
import mlconjug
import nltk
import spacy
from spacy.language import Language
from spacy_lefff import LefffLemmatizer, POSTagger
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
default_conjugator = mlconjug.Conjugator(language="fr")
nltk.download("wordnet")
nltk.download("punkt")
dicPronouns = {
"j'": "1s",
"J'": "1s",
"je": "1s",
"Je": "1s",
"tu": "2s",
"Tu": "2s",
"il": "3s",
"Il": "3s",
"elle": "3s",
"Elle": "3s",
"on": "3s",
"On": "3s",
"'on": "3s",
"nous": "1p",
"Nous": "1p",
"vous": "2p",
"Vous": "2p",
"ils": "3p",
"Ils": "3p",
"elles": "3p",
"Elles": "3p",
}
# function to update value of a dictionary based on its key
def update_in_alist(alist, key, value):
return [(k, v) if (k != key) else (key, value) for (k, v) in alist]
def update_in_alist_inplace(alist, key, value):
alist[:] = update_in_alist(alist, key, value)
# function to change a dictionary key based on a given value
def update_in_alist_key(alist, key, value):
return [(k, v) if (v != value) else (key, value) for (k, v) in alist]
def update_in_alist_inplace_key(alist, key, value):
alist[:] = update_in_alist_key(alist, key, value)
# Sometimes, the conjugation of a verb will modify the first letter of the verb (ex : avec le verbe ALLER : je vais (présent) devient j'irais (futur)) we therefore need to change the pronoun "Je" to "J'"
def transform_pronoun_1s(sentence):
# regex to find if "je" or "Je" is followed by a vowel
regex_je = r"je (a|e|i|o|u|y|é|è|ê)|Je (a|e|i|o|u|y|é|è|ê)"
reg = re.search(regex_je, sentence)
if reg is None:
pass
else:
sentence = sentence.replace("je ", "j'")
sentence = sentence.replace("Je ", "J'")
# regex to find if "J'"" or "j'" is folowed by a consonant
regex_j = r"j'(z|r|t|p|q|s|d|f|g|h|j|k|l|m|w|x|c|v|b|n)|J'(z|r|t|p|q|s|d|f|g|h|j|k|l|m|w|x|c|v|b|n)"
regj = re.search(regex_j, sentence)
if regj is None:
pass
else:
sentence = sentence.replace("j'", "je ")
sentence = sentence.replace("J'", "Je ")
return sentence
# sometimes, y is used to do a liason between the pronoun an a vowel, it must be deleted if next word starts by y or i
def y_exception(sentence):
regex_y = r"j'y (i|y)|J'y (i|y|)"
reg = re.search(regex_y, sentence)
if reg is None:
pass
else:
sentence = sentence.replace("j'y ", "j'")
sentence = sentence.replace("J'y ", "J'")
return sentence
# the negation is expressed with n' in font of a vowel and ne in front of a consonant
def n_exception(sentence):
regex_n = r"n'(z|r|t|p|q|s|d|f|g|h|j|k|l|m|w|x|c|v|b|n)|N'(z|r|t|p|q|s|d|f|g|h|j|k|l|m|w|x|c|v|b|n)"
reg = re.search(regex_n, sentence)
if reg is None:
pass
else:
sentence = sentence.replace("n'", "ne ")
sentence = sentence.replace("N'", "Ne ")
regex_ne = r"ne (a|e|i|o|u|y|é|è|ê)|Ne (a|e|i|o|u|y|é|è|ê)"
reg = re.search(regex_ne, sentence)
if reg is None:
pass
else:
sentence = sentence.replace("ne ", "n'")
sentence = sentence.replace("Ne ", "N'")
return sentence
@Language.factory("french_lemmatizer")
def create_french_lemmatizer(nlp, name):
return LefffLemmatizer()
@Language.factory("POSTagger")
def create_POSTagger(nlp, name):
return POSTagger()
# for the library ML conjug, verbs must be in their infinitive form
def TurnToInfinitif(nlp,sentence):
doc = nlp(sentence)
verbs = [d.text for d in doc if d.pos_ == "VERB"]
auxs = [d.text for d in doc if d.pos_ == "AUX"]
auxs_lemma = [d.lemma_ for d in doc if d.pos_ == "AUX"]
verbs_lemma = [d.lemma_ for d in doc if d.pos_ == "VERB"]
listTuples = []
for count, word in enumerate(doc):
if word.lemma_ in verbs_lemma:
listTuples.append((word.lemma_, "VERB"))
if word.text == word.lemma_:
update_in_alist_inplace(listTuples, word.lemma_, "INF")
elif word.lemma_ in auxs_lemma:
listTuples.append((word.lemma_, "AUX"))
if doc[count - 1].text == "aller":
update_in_alist_inplace(listTuples, "aller", "ALLER")
for i in range(0, len(verbs)):
if verbs_lemma[i] == "aller":
update_in_alist_inplace_key(
listTuples, verbs[i], "ALLER"
)
else:
listTuples.append((word.text, "TEXT"))
if word.lemma_ in verbs_lemma:
if doc[count - 1].text in auxs:
update_in_alist_inplace(
listTuples, doc[count - 1].lemma_, "AUX"
)
update_in_alist_inplace(listTuples, word.lemma_, "PP")
update_in_alist_inplace_key(listTuples, word.text, "PP")
elif doc[count - 1].text == "pas":
if doc[count - 2].text in auxs:
update_in_alist_inplace(
listTuples, doc[count - 2].text, "AUX"
)
update_in_alist_inplace(listTuples, word.lemma_, "PP")
update_in_alist_inplace_key(listTuples, word.text, "PP")
return listTuples
# function that conjugate verbs with mlconjug
def conjugate(nlp,sentence, pronoun, tense):
infinitive = TurnToInfinitif(nlp,sentence)
conjugatedSentence = []
for tuple in infinitive:
if tuple[1] == "VERB" or tuple[1] == "AUX":
conjugatedSentence.append(
default_conjugator.conjugate(tuple[0]).conjug_info[
"Indicatif"
][tense][pronoun]
)
else:
conjugatedSentence.append(tuple[0])
conjugatedSentence = transform_pronoun_1s(" ".join(conjugatedSentence))
conjugatedSentence = conjugatedSentence.replace("' ", "'")
conjugatedSentence = y_exception(conjugatedSentence)
conjugatedSentence = n_exception(conjugatedSentence)
return conjugatedSentence
# check if a word is in sentence
def contains_word(s, w):
return f" {w} " in f" {s} "
# transform j' in je to avoid issues in the last function
def anomalous_1s_transform(sentence):
regex_je = r"j'(a|e|i|o|u|y|é|è|ê)|J'(a|e|i|o|u|y|é|è|ê)"
reg = re.search(regex_je, sentence)
if reg is None:
pass
else:
sentence = sentence.replace("j'", "je ")
sentence = sentence.replace("J'", "Je ")
return sentence
# transform "lorsqu'il" to "lorsqu' il" in order to detect the pronoun later on
def anomalous_indicators_transform(sentence):
"""on conjug_multiverbal_sentences, "lorsqu' " is trouble
this is a way to fix it"""
for word in sentence.split(" "):
if word.startswith("lorsqu'"):
sentence = sentence.replace(
word, word.split("'")[0] + "' " + word.split("'")[1]
)
return sentence
# final function used to conjugate verbs
def french_conjugation_transformation(
nlp,sentence, tense, dict_pronouns=dicPronouns
):
"""this function allows you to conjugate a multiverbal sentence where there are one or different
pronouns. If the sentence is multiverbal, verbs are supposed to be conjugated on the same tense
for exemple first person pronoun + verb(future) + ... + Third person pronoun + verb(future) + ...
else concordance of the tenses might not be respected
Pronouns MUST be specified as they are in the dicPronouns"""
perturbed_texts = []
sentence = anomalous_indicators_transform(sentence)
regex_j = r"j'(a|e|i|o|u|y|é|è|ê)|J'(a|e|i|o|u|y|é|è|ê)"
reg = re.search(regex_j, sentence)
# if pronoun is "j'", it needs to be transformed as "je" for this process
if reg is not None:
sentence = anomalous_1s_transform(sentence)
else:
sentence = sentence
pronouns_final_list = []
pronouns = dict_pronouns.keys()
list_pronouns = []
for pronoun in pronouns:
if contains_word(sentence, pronoun) is True:
list_pronouns.append(pronoun)
# if there is only one pronoun in the sentence (and therefore only one person for verbs transformation)
if len(list_pronouns) == 1:
for pronoun in list_pronouns:
if pronoun in sentence:
ispronoun = re.search(r"\b({})\b".format(pronoun), sentence)
index_pronoun = ispronoun.start()
pronouns_final_list.append(dict_pronouns[pronoun])
conjugated = conjugate(nlp,sentence, pronouns_final_list[0], tense)
conjugated = conjugated.replace(".", "")
final_sent = re.sub(" +", " ", conjugated)
final_sent = final_sent.replace(" , ", ", ")
final_sent = final_sent.replace("' ", "'")
perturbed_texts.append(final_sent)
return perturbed_texts
elif len(list_pronouns) == 0:
print(
"No pronouns detected in this sentence, availables pronouns are in the dicPronouns"
)
else:
index_pronouns = []
for pronoun in list_pronouns:
if pronoun in sentence:
ispronoun = re.search(r"\b({})\b".format(pronoun), sentence)
index_pronoun = ispronoun.start()
index_pronouns.append(index_pronoun)
pronouns_final_list.append(dict_pronouns[pronoun])
# if sentence do not start by pronoun
if index_pronouns[0] != 0:
indexBefore = index_pronouns[0] - 1
indexStart = index_pronouns[0]
sentence0 = sentence[0:indexBefore]
sentence1 = sentence[indexStart:]
ispronoun = re.search(
r"\b({})\b".format(pronoun), sentence1
)
idx_pronouns = ispronoun.start()
if idx_pronouns != 0:
indexBefore = idx_pronouns - 1
split_sentence = (
sentence1[:indexBefore]
+ "."
+ sentence1[indexBefore:]
)
splited_sentences = nltk.sent_tokenize(split_sentence)
sent_list = []
conjug_sent = []
for i in range(len(splited_sentences)):
sent_pron = [
splited_sentences[i],
pronouns_final_list[i],
]
conjugated = conjugate(
nlp,
splited_sentences[i],
pronouns_final_list[i],
tense,
)
# delete the "." added in the split sentence process
conjugated = conjugated.replace(".", "")
conjug_sent.append(conjugated)
sent_list.append(sent_pron)
# join the conjugated sentences in a final one
final_sent = " ".join(conjug_sent)
final_sent = sentence0 + " " + final_sent
# delete the extra space
final_sent = re.sub(" +", " ", final_sent)
final_sent = final_sent.replace(" , ", ", ")
final_sent = final_sent.replace("' ", "'").replace(
" '", "'"
)
perturbed_texts.append(final_sent)
return perturbed_texts
else:
indexBefore = index_pronoun - 1
if index_pronoun != 0:
split_sentence = (
sentence[:indexBefore]
+ "."
+ sentence[indexBefore:]
)
splited_sentences = nltk.sent_tokenize(split_sentence)
sent_list = []
conjug_sent = []
for i in range(len(splited_sentences)):
sent_pron = [
splited_sentences[i],
pronouns_final_list[i],
]
conjugated = conjugate(nlp,
splited_sentences[i],
pronouns_final_list[i],
tense,
)
# delete the "." added in the split sentence process
conjugated = conjugated.replace(".", "")
conjug_sent.append(conjugated)
sent_list.append(sent_pron)
# join the conjugated sentences in a final one
final_sent = " ".join(conjug_sent)
# delete the extra space
final_sent = re.sub(" +", " ", final_sent)
final_sent = final_sent.replace(" , ", ", ")
final_sent = final_sent.replace("' ", "'").replace(
" '", "'"
)
perturbed_texts.append(final_sent)
return perturbed_texts
class Conjugation_transformation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
]
languages = ["fr"]
keyword = [
"high-coverage",
"highly-meaning-preserving",
"lexical",
"model-based",
]
# The transormation tense is specified here
def __init__(self, tense):
super().__init__()
assert tense in ["Futur", "Imparfait"]
self.tense = tense
self.nlp = spacy.load("fr_core_news_md")
# definition of spacy pipeline
self.nlp.add_pipe("POSTagger", name="pos")
self.nlp.add_pipe("french_lemmatizer", name="lefff", after="pos")
def generate(self, sentence: str):
perturbed_texts = french_conjugation_transformation(
self.nlp,
sentence,
self.tense,
)
return perturbed_texts
```
|
{
"source": "JerryX1110/VFS",
"score": 3
}
|
#### File: mmaction/datasets/base.py
```python
import copy
import os.path as osp
import random
from abc import ABCMeta, abstractmethod
import decord
import mmcv
import torch
from torch.utils.data import Dataset
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets to process video should subclass it.
All subclasses should overwrite:
- Methods:`load_annotations`, supporting to load information from an
annotation file.
- Methods:`prepare_train_frames`, providing train data.
- Methods:`prepare_test_frames`, providing test data.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
multi_class (bool): Determines whether the dataset is a multi-class
dataset. Default: False.
num_classes (int): Number of classes of the dataset, used in
multi-class datasets. Default: None.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB'):
super().__init__()
self.ann_file = ann_file
# self.data_prefix = osp.realpath(data_prefix) if osp.isdir(
# data_prefix) else data_prefix
self.data_prefix = data_prefix
self.test_mode = test_mode
self.multi_class = multi_class
self.num_classes = num_classes
self.start_index = start_index
self.modality = modality
self.pipeline = Compose(pipeline)
self.video_infos = self.load_annotations()
@abstractmethod
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
pass
# json annotations already looks like video_infos, so for each dataset,
# this func should be the same
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'frame_dir' if 'frame_dir' in video_infos[0] else 'filename'
for i in range(num_videos):
if self.data_prefix is not None:
path_value = video_infos[i][path_key]
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[video_infos[i]['label']] = 1.
video_infos[i]['label'] = onehot
else:
assert len(video_infos[i]['label']) == 1
video_infos[i]['label'] = video_infos[i]['label'][0]
return video_infos
@abstractmethod
def evaluate(self, results, metrics, logger):
"""Evaluation for the dataset.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
logger (logging.Logger | None): Logger for recording.
Returns:
dict: Evaluation results dict.
"""
pass
def dump_results(self, results, out):
"""Dump data to json/yaml/pickle strings or files."""
return mmcv.dump(results, out)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def __getitem__(self, idx):
"""Get the sample for either training or testing given index."""
if self.test_mode:
return self.prepare_test_frames(idx)
else:
while True:
try:
data = self.prepare_train_frames(idx)
except decord._ffi.base.DECORDError:
idx = random.randrange(len(self))
continue
return data
```
#### File: mmaction/datasets/davis_dataset.py
```python
import copy
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pandas as pd
from davis2017.evaluation import DAVISEvaluation
from mmcv.utils import print_log
from PIL import Image
from mmaction.utils import add_prefix, terminal_is_available
from .rawframe_dataset import RawframeDataset
from .registry import DATASETS
@DATASETS.register_module()
class DavisDataset(RawframeDataset):
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[191, 0, 0], [64, 128, 0], [191, 128, 0], [64, 0, 128],
[191, 0, 128], [64, 128, 128], [191, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 191, 0], [128, 191, 0], [0, 64, 128],
[128, 64, 128]]
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
anno_prefix=None,
test_mode=False,
split='val',
data_root='data/davis2017',
task='semi-supervised'):
assert split in ['train', 'val']
assert task in ['semi-supervised']
self.split = split
self.data_root = data_root
self.task = task
self.anno_prefix = anno_prefix
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
filename_tmpl='{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=0,
modality='RGB')
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
ann_frame_dir = results['frame_dir'].replace(self.data_prefix,
self.anno_prefix)
results['seg_map'] = osp.join(
ann_frame_dir,
self.filename_tmpl.format(0).replace('jpg', 'png'))
return self.pipeline(results)
def davis_evaluate(self, results, output_dir, logger=None):
dataset_eval = DAVISEvaluation(
davis_root=self.data_root, task=self.task, gt_set=self.split)
if isinstance(results, str):
metrics_res = dataset_eval.evaluate(results)
else:
assert len(results) == len(self)
for vid_idx in range(len(self)):
assert len(results[vid_idx]) == \
self.video_infos[vid_idx]['total_frames'] or \
isinstance(results[vid_idx], str)
if output_dir is None:
tmp_dir = tempfile.TemporaryDirectory()
output_dir = tmp_dir.name
else:
tmp_dir = None
mmcv.mkdir_or_exist(output_dir)
if terminal_is_available():
prog_bar = mmcv.ProgressBar(len(self))
for vid_idx in range(len(results)):
cur_results = results[vid_idx]
if isinstance(cur_results, str):
file_path = cur_results
cur_results = np.load(file_path)
os.remove(file_path)
for img_idx in range(
self.video_infos[vid_idx]['total_frames']):
result = cur_results[img_idx].astype(np.uint8)
img = Image.fromarray(result)
img.putpalette(
np.asarray(self.PALETTE, dtype=np.uint8).ravel())
frame_dir = self.video_infos[vid_idx]['frame_dir']
save_path = osp.join(
output_dir, osp.relpath(frame_dir, self.data_prefix),
self.filename_tmpl.format(img_idx).replace(
'jpg', 'png'))
mmcv.mkdir_or_exist(osp.dirname(save_path))
img.save(save_path)
if terminal_is_available():
prog_bar.update()
metrics_res = dataset_eval.evaluate(output_dir)
if tmp_dir is not None:
tmp_dir.cleanup()
J, F = metrics_res['J'], metrics_res['F']
# Generate dataframe for the general results
g_measures = [
'J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall',
'F-Decay'
]
final_mean = (np.mean(J['M']) + np.mean(F['M'])) / 2.
g_res = np.array([
final_mean,
np.mean(J['M']),
np.mean(J['R']),
np.mean(J['D']),
np.mean(F['M']),
np.mean(F['R']),
np.mean(F['D'])
])
g_res = np.reshape(g_res, [1, len(g_res)])
print_log(f'\nGlobal results for {self.split}', logger=logger)
table_g = pd.DataFrame(data=g_res, columns=g_measures)
print_log('\n' + table_g.to_string(index=False), logger=logger)
# Generate a dataframe for the per sequence results
seq_names = list(J['M_per_object'].keys())
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pd.DataFrame(
data=list(zip(seq_names, J_per_object, F_per_object)),
columns=seq_measures)
print_log(f'\nPer sequence results for {self.split}', logger=logger)
print_log('\n' + table_seq.to_string(index=False), logger=logger)
eval_results = table_g.to_dict('records')[0]
return eval_results
def evaluate(self, results, metrics='daivs', output_dir=None, logger=None):
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = ['davis']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
eval_results = dict()
if mmcv.is_seq_of(results, np.ndarray) and results[0].ndim == 4:
num_feats = results[0].shape[0]
for feat_idx in range(num_feats):
cur_results = [result[feat_idx] for result in results]
eval_results.update(
add_prefix(
self.davis_evaluate(cur_results, output_dir, logger),
prefix=f'feat_{feat_idx}'))
elif mmcv.is_seq_of(results, list):
num_feats = len(results[0])
for feat_idx in range(num_feats):
cur_results = [result[feat_idx] for result in results]
eval_results.update(
add_prefix(
self.davis_evaluate(cur_results, output_dir, logger),
prefix=f'feat_{feat_idx}'))
else:
eval_results.update(
self.davis_evaluate(results, output_dir, logger))
copypaste = []
for k, v in eval_results.items():
if 'J&F' in k:
copypaste.append(f'{float(v)*100:.2f}')
print_log(f'Results copypaste {",".join(copypaste)}', logger=logger)
return eval_results
```
#### File: mmaction/datasets/image_dataset.py
```python
import os
import os.path as osp
from torchvision.datasets.folder import IMG_EXTENSIONS, make_dataset
from .builder import DATASETS
from .video_dataset import VideoDataset
@DATASETS.register_module()
class ImageDataset(VideoDataset):
def __init__(self, ann_file, pipeline, start_index=0, **kwargs):
super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
def _find_classes(self, dir):
"""Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to
(dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def load_annotations(self):
"""Load annotation file to get image(static 1 frame video)
information."""
video_infos = []
if self.ann_file is not None:
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
filename, label = line_split
label = int(label)
if self.data_prefix is not None:
filename = osp.join(self.data_prefix, filename)
video_infos.append(
dict(filename=filename, label=label, total_frames=1))
else:
classes, class_to_idx = self._find_classes(self.data_prefix)
samples = make_dataset(self.data_prefix, class_to_idx,
IMG_EXTENSIONS, None)
for path, class_index in samples:
video_infos.append(
dict(filename=path, label=class_index, total_frames=1))
return video_infos
```
#### File: models/common/utils.py
```python
from typing import List
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _single, _triple
def change_stride(conv, stride):
"""Inplace change conv stride.
Args:
conv (nn.Module):
stride (int):
"""
if isinstance(conv, nn.Conv1d):
conv.stride = _single(stride)
if isinstance(conv, nn.Conv2d):
conv.stride = _pair(stride)
if isinstance(conv, nn.Conv3d):
conv.stride = _triple(stride)
def pil_nearest_interpolate(input, size):
# workaround for https://github.com/pytorch/pytorch/issues/34808
resized_imgs = []
input = input.permute(0, 2, 3, 1)
for img in input:
img = img.squeeze(-1)
img = img.detach().cpu().numpy()
resized_img = mmcv.imresize(
img,
size=(size[1], size[0]),
interpolation='nearest',
backend='pillow')
resized_img = torch.from_numpy(resized_img).to(
input, non_blocking=True)
resized_img = resized_img.unsqueeze(2).permute(2, 0, 1)
resized_imgs.append(resized_img)
return torch.stack(resized_imgs, dim=0)
def video2images(imgs):
batches, channels, clip_len = imgs.shape[:3]
if clip_len == 1:
new_imgs = imgs.squeeze(2).reshape(batches, channels, *imgs.shape[3:])
else:
new_imgs = imgs.transpose(1, 2).contiguous().reshape(
batches * clip_len, channels, *imgs.shape[3:])
return new_imgs
def images2video(imgs, clip_len):
batches, channels = imgs.shape[:2]
if clip_len == 1:
new_imgs = imgs.unsqueeze(2)
else:
new_imgs = imgs.reshape(batches // clip_len, clip_len, channels,
*imgs.shape[2:]).transpose(1, 2).contiguous()
return new_imgs
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
class StrideContext(object):
def __init__(self, backbone, strides, out_indices=None):
self.backbone = backbone
self.strides = strides
self.out_indices = out_indices
def __enter__(self):
if self.strides is not None:
self.backbone.switch_strides(self.strides)
if self.out_indices is not None:
self.backbone.switch_out_indices(self.out_indices)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.strides is not None:
self.backbone.switch_strides()
if self.out_indices is not None:
self.backbone.switch_out_indices()
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
@torch.no_grad()
def _batch_shuffle_ddp(x):
"""Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(x, idx_unshuffle):
"""Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
class Clamp(nn.Module):
def __init__(self, min=None, max=None):
super(Clamp, self).__init__()
self.min = min
self.max = max
assert self.min is not None or self.max is not None
def forward(self, x):
kwargs = {}
if self.min is not None:
kwargs['min'] = self.min
if self.max is not None:
kwargs['max'] = self.max
return x.clamp(**kwargs)
def extra_repr(self):
"""Extra repr."""
s = f'min={self.min}, max={self.max}'
return s
def cat(tensors: List[torch.Tensor], dim: int = 0):
"""Efficient version of torch.cat that avoids a copy if there is only a
single element in a list."""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def normalize_logit(seg_logit):
seg_logit_min = seg_logit.view(*seg_logit.shape[:2], -1).min(
dim=-1)[0].view(*seg_logit.shape[:2], 1, 1)
seg_logit_max = seg_logit.view(*seg_logit.shape[:2], -1).max(
dim=-1)[0].view(*seg_logit.shape[:2], 1, 1)
normalized_seg_logit = (seg_logit - seg_logit_min) / (
seg_logit_max - seg_logit_min + 1e-12)
seg_logit = torch.where(seg_logit_max > 0, normalized_seg_logit, seg_logit)
return seg_logit
def mean_list(input_list):
ret = input_list[0].clone()
for i in range(1, len(input_list)):
ret += input_list[i]
ret /= len(input_list)
return ret
def interpolate3d(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=False):
results = []
clip_len = input.size(2)
for i in range(clip_len):
results.append(
F.interpolate(
input[:, :, i],
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners))
return torch.stack(results, dim=2)
```
#### File: models/losses/sim_loss.py
```python
import torch
import torch.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class DotSimLoss(BaseWeightedLoss):
"""NLL Loss.
It will calculate Dor Product Similarity loss given cls_score and label.
"""
def _forward(self, cls_score, label, **kwargs):
batches, channels, height, width = cls_score.size()
prod = torch.bmm(
cls_score.view(batches, channels,
height * width).permute(0, 2, 1).contiguous(),
label.view(batches, channels, height * width))
loss = -prod.mean()
return loss
@LOSSES.register_module()
class CosineSimLoss(BaseWeightedLoss):
"""NLL Loss.
It will calculate Cosine Similarity loss given cls_score and label.
"""
def __init__(self,
with_norm=True,
negative=False,
pairwise=False,
**kwargs):
super().__init__(**kwargs)
self.with_norm = with_norm
self.negative = negative
self.pairwise = pairwise
def _forward(self, cls_score, label, mask=None, **kwargs):
if self.with_norm:
cls_score = F.normalize(cls_score, p=2, dim=1)
label = F.normalize(label, p=2, dim=1)
if mask is not None:
assert self.pairwise
if self.pairwise:
cls_score = cls_score.flatten(2)
label = label.flatten(2)
prod = torch.einsum('bci,bcj->bij', cls_score, label)
if mask is not None:
assert prod.shape == mask.shape
prod *= mask.float()
prod = prod.flatten(1)
else:
prod = torch.sum(
cls_score * label, dim=1).view(cls_score.size(0), -1)
if self.negative:
loss = -prod.mean(dim=-1)
else:
loss = 2 - 2 * prod.mean(dim=-1)
return loss
```
#### File: models/recognizers/recognizer2d.py
```python
from ..registry import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class Recognizer2D(BaseRecognizer):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels):
"""Defines the computation performed at every call when training."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
cls_score = self.cls_head(x, num_segs)
gt_labels = labels.squeeze()
loss = self.cls_head.loss(cls_score, gt_labels)
return loss
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
cls_score = self.cls_head(x, num_segs)
cls_score = self.average_clip(cls_score)
return cls_score.cpu().numpy()
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
outs = (self.cls_head(x, num_segs), )
return outs
```
#### File: models/trackers/sim_siam_base_tracker.py
```python
from mmaction.utils import add_prefix
from .. import builder
from ..common import images2video, video2images
from ..registry import TRACKERS
from .vanilla_tracker import BaseTracker
@TRACKERS.register_module()
class SimSiamBaseTracker(BaseTracker):
"""SimSiam framework."""
def __init__(self, *args, backbone, img_head=None, **kwargs):
super().__init__(*args, backbone=backbone, **kwargs)
if img_head is not None:
self.img_head = builder.build_head(img_head)
self.init_extra_weights()
if self.train_cfg is not None:
self.intra_video = self.train_cfg.get('intra_video', False)
self.transpose_temporal = self.train_cfg.get(
'transpose_temporal', False)
@property
def with_img_head(self):
"""bool: whether the detector has img head"""
return hasattr(self, 'img_head') and self.img_head is not None
def init_extra_weights(self):
if self.with_img_head:
self.img_head.init_weights()
def forward_img_head(self, x1, x2, clip_len):
if isinstance(x1, tuple):
x1 = x1[-1]
if isinstance(x2, tuple):
x2 = x2[-1]
losses = dict()
z1, p1 = self.img_head(x1)
z2, p2 = self.img_head(x2)
loss_weight = 1. / clip_len if self.intra_video else 1.
losses.update(
add_prefix(
self.img_head.loss(p1, z1, p2, z2, weight=loss_weight),
prefix='0'))
if self.intra_video:
z2_v, p2_v = images2video(z2, clip_len), images2video(p2, clip_len)
for i in range(1, clip_len):
losses.update(
add_prefix(
self.img_head.loss(
p1,
z1,
video2images(p2_v.roll(i, dims=2)),
video2images(z2_v.roll(i, dims=2)),
weight=loss_weight),
prefix=f'{i}'))
return losses
def forward_train(self, imgs, grids=None, label=None):
# [B, N, C, T, H, W]
if self.transpose_temporal:
imgs = imgs.transpose(1, 3).contiguous()
assert imgs.size(1) == 2
assert imgs.ndim == 6
clip_len = imgs.size(3)
imgs1 = video2images(imgs[:,
0].contiguous().reshape(-1, *imgs.shape[2:]))
imgs2 = video2images(imgs[:,
1].contiguous().reshape(-1, *imgs.shape[2:]))
x1 = self.backbone(imgs1)
x2 = self.backbone(imgs2)
losses = dict()
if self.with_img_head:
loss_img_head = self.forward_img_head(x1, x2, clip_len)
losses.update(add_prefix(loss_img_head, prefix='img_head'))
return losses
def forward_test(self, imgs, **kwargs):
raise NotImplementedError
```
#### File: siamfc-pytorch/siamfc/bbox_utils.py
```python
import numpy as np
LIMIT = 99999999
# BBoxes are [x1, y1, x2, y2]
def clip_bbox(bboxes, min_clip, max_x_clip, max_y_clip):
bboxes_out = bboxes
added_axis = False
if len(bboxes_out.shape) == 1:
added_axis = True
bboxes_out = bboxes_out[:, np.newaxis]
bboxes_out[[0, 2]] = np.clip(bboxes_out[[0, 2]], min_clip, max_x_clip)
bboxes_out[[1, 3]] = np.clip(bboxes_out[[1, 3]], min_clip, max_y_clip)
if added_axis:
bboxes_out = bboxes_out[:, 0]
return bboxes_out
# [xMid, yMid, width, height] to [x1 y1, x2, y2]
def xywh_to_xyxy(bboxes,
clip_min=-LIMIT,
clip_width=LIMIT,
clip_height=LIMIT,
round=False):
added_axis = False
if isinstance(bboxes, list):
bboxes = np.array(bboxes).astype(np.float32)
if len(bboxes.shape) == 1:
added_axis = True
bboxes = bboxes[:, np.newaxis]
bboxes_out = np.zeros(bboxes.shape)
xMid = bboxes[0]
yMid = bboxes[1]
width = bboxes[2]
height = bboxes[3]
bboxes_out[0] = xMid - width / 2.0
bboxes_out[1] = yMid - height / 2.0
bboxes_out[2] = xMid + width / 2.0
bboxes_out[3] = yMid + height / 2.0
if clip_min != -LIMIT or clip_width != LIMIT or clip_height != LIMIT:
bboxes_out = clip_bbox(bboxes_out, clip_min, clip_width, clip_height)
if bboxes_out.shape[0] > 4:
bboxes_out[4:] = bboxes[4:]
if added_axis:
bboxes_out = bboxes_out[:, 0]
if round:
bboxes_out = np.round(bboxes_out).astype(int)
return bboxes_out
```
|
{
"source": "Jerryxia32/CCF",
"score": 2
}
|
#### File: python/ccf/ledger_viz.py
```python
import ccf.ledger
import argparse
import os
from stringcolor import cs # type: ignore
import json
class Liner:
_line = ""
_len = 0
MAX_LENGTH = os.get_terminal_size().columns
def flush(self):
print(self._line)
self._line = ""
self._len = 0
def append(self, s: str, colour: str, background_colour: str = None):
self._line += cs(s, colour, background_colour)
self._len += len(s)
if self._len >= self.MAX_LENGTH:
self.flush()
class DefaultLiner(Liner):
_bg_colour_mapping = {
"New Service": "White",
"Service Open": "Magenta",
"Governance": "Red",
"Signature": "Green",
"Internal": "Orange",
"User Public": "Blue",
"User Private": "DarkBlue",
}
_last_view = None
_fg_colour = "Black"
@staticmethod
def view_to_char(view):
return str(view)[-1]
def __init__(self, write_views, split_views):
self.write_views = write_views
self.split_views = split_views
def entry(self, category, view):
view_change = view != self._last_view
self._last_view = view
if view_change and self.split_views:
self.flush()
self.append(f"{view}: ", "White")
char = " "
if self.write_views:
char = "‾" if not view_change else self.view_to_char(view)
fg_colour = self._fg_colour
bg_colour = self._bg_colour_mapping[category]
self.append(char, fg_colour, bg_colour)
def help(self):
print(
" | ".join(
[
f"{category} {cs(' ', 'White', bg_colour)}"
for category, bg_colour in self._bg_colour_mapping.items()
]
)
)
if self.write_views:
print(
" ".join(
[
f"Start of view 14: {cs(self.view_to_char(14), self._fg_colour, 'Grey')}"
]
)
)
print()
def try_get_service_info(public_tables):
return (
json.loads(
public_tables[ccf.ledger.SERVICE_INFO_TABLE_NAME][
ccf.ledger.WELL_KNOWN_SINGLETON_TABLE_KEY
]
)
if ccf.ledger.SERVICE_INFO_TABLE_NAME in public_tables
else None
)
def main():
parser = argparse.ArgumentParser(
description="Visualise content of CCF ledger",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("paths", help="Path to ledger directories", nargs="+")
parser.add_argument(
"--uncommitted", help="Also parse uncommitted ledger files", action="store_true"
)
parser.add_argument(
"--write-views",
help="Include characters on each tile indicating their view",
action="store_true",
)
parser.add_argument(
"--split-views",
help="Write each view on a new line, prefixed by the view number",
action="store_true",
)
args = parser.parse_args()
ledger_dirs = args.paths
ledger = ccf.ledger.Ledger(ledger_dirs, committed_only=not args.uncommitted)
l = DefaultLiner(args.write_views, args.split_views)
l.help()
current_service_identity = None
for chunk in ledger:
for tx in chunk:
public = tx.get_public_domain().get_tables()
has_private = tx.get_private_domain_size()
view = tx.gcm_header.view
if not has_private:
if ccf.ledger.SIGNATURE_TX_TABLE_NAME in public:
l.entry("Signature", view)
else:
if all(
table.startswith("public:ccf.internal.") for table in public
):
l.entry("Internal", view)
elif any(table.startswith("public:ccf.gov.") for table in public):
service_info = try_get_service_info(public)
if service_info is None:
l.entry("Governance", view)
elif service_info["status"] == "Opening":
l.entry("New Service", view)
current_service_identity = service_info["cert"]
elif (
service_info["cert"] == current_service_identity
and service_info["status"] == "Open"
):
l.entry("Service Open", view)
else:
l.entry("User Public", view)
else:
l.entry("User Private", view)
l.flush()
if __name__ == "__main__":
main()
```
#### File: python/ccf/merkletree.py
```python
from hashlib import sha256
class MerkleTree(object):
"""
Basic Merkle Tree implementation where leaves comprise of hashed transactions.
"""
def __init__(self):
self.levels = None
self.reset_tree()
def reset_tree(self):
self.leaves = list()
self.levels = None
def add_leaf(self, values: bytes, do_hash=True):
digest = values
if do_hash:
digest = sha256(values).digest()
self.leaves.append(digest)
def get_leaf(self, index: int) -> bytes:
return self.leaves[index]
def get_leaf_count(self) -> int:
return len(self.leaves)
def get_merkle_root(self) -> bytes:
# Always make tree before getting root
self._make_tree()
if self.levels is None:
raise Exception(
"Unexpected error while getting root. MerkleTree has no levels."
)
return self.levels[0][0]
def _calculate_next_level(self):
solo_leaf = None
# number of leaves on the level
number_of_leaves_on_current_level = len(self.levels[0])
if number_of_leaves_on_current_level == 1:
raise Exception("Merkle Tree should have more than one leaf at every level")
if (
number_of_leaves_on_current_level % 2 == 1
): # if odd number of leaves on the level
# Get the solo leaf (last leaf in-case the leaves are odd numbered)
solo_leaf = self.levels[0][-1]
number_of_leaves_on_current_level -= 1
new_level = []
for left_node, right_node in zip(
self.levels[0][0:number_of_leaves_on_current_level:2],
self.levels[0][1:number_of_leaves_on_current_level:2],
):
new_level.append(sha256(left_node + right_node).digest())
if solo_leaf is not None:
new_level.append(solo_leaf)
self.levels = [
new_level,
] + self.levels # prepend new level
def _make_tree(self):
if self.get_leaf_count() > 0:
self.levels = [
self.leaves,
]
while len(self.levels[0]) > 1:
self._calculate_next_level()
```
#### File: CCF/tests/e2e_operations.py
```python
import tempfile
import os
import shutil
import infra.logging_app as app
import infra.e2e_args
import infra.network
import ccf.ledger
import suite.test_requirements as reqs
import infra.crypto
import ipaddress
import infra.interfaces
import infra.path
import infra.proc
from loguru import logger as LOG
@reqs.description("Move committed ledger files to read-only directory")
def test_save_committed_ledger_files(network, args):
# Issue txs in a loop to force a signature and a new ledger chunk
# each time. Record log messages at the same key (repeat=True) so
# that CCF makes use of historical queries when verifying messages
for _ in range(1, 5):
network.txs.issue(network, 1, repeat=True)
LOG.info(f"Moving committed ledger files to {args.common_read_only_ledger_dir}")
primary, _ = network.find_primary()
for ledger_dir in primary.remote.ledger_paths():
for l in os.listdir(ledger_dir):
if infra.node.is_file_committed(l):
shutil.move(
os.path.join(ledger_dir, l),
os.path.join(args.common_read_only_ledger_dir, l),
)
network.txs.verify(network)
return network
def test_parse_snapshot_file(network, args):
primary, _ = network.find_primary()
network.txs.issue(network, number_txs=args.snapshot_tx_interval * 2)
committed_snapshots_dir = network.get_committed_snapshots(primary)
for snapshot in os.listdir(committed_snapshots_dir):
with ccf.ledger.Snapshot(os.path.join(committed_snapshots_dir, snapshot)) as s:
assert len(
s.get_public_domain().get_tables()
), "No public table in snapshot"
return network
def run_file_operations(args):
with tempfile.TemporaryDirectory() as tmp_dir:
txs = app.LoggingTxs("user0")
with infra.network.network(
args.nodes,
args.binary_dir,
args.debug_nodes,
args.perf_nodes,
pdb=args.pdb,
txs=txs,
) as network:
args.common_read_only_ledger_dir = tmp_dir
network.start_and_join(args)
test_save_committed_ledger_files(network, args)
test_parse_snapshot_file(network, args)
def run_tls_san_checks(args):
with infra.network.network(
args.nodes,
args.binary_dir,
args.debug_nodes,
args.perf_nodes,
pdb=args.pdb,
) as network:
args.common_read_only_ledger_dir = None # Reset from previous test
network.start_and_join(args)
LOG.info("Check SAN value in TLS certificate")
dummy_san = "*.dummy.com"
new_node = network.create_node("local://localhost")
args.subject_alt_names = [f"dNSName:{dummy_san}"]
network.join_node(new_node, args.package, args)
sans = infra.crypto.get_san_from_pem_cert(new_node.get_tls_certificate_pem())
assert len(sans) == 1, "Expected exactly one SAN"
assert sans[0].value == dummy_san
LOG.info("A node started with no specified SAN defaults to public RPC host")
dummy_public_rpc_host = "172.16.58.3"
args.subject_alt_names = []
new_node = network.create_node(
infra.interfaces.HostSpec(
rpc_interfaces=[
infra.interfaces.RPCInterface(public_rpc_host=dummy_public_rpc_host)
]
)
)
network.join_node(new_node, args.package, args)
# Cannot trust the node here as client cannot authenticate dummy public IP in cert
with open(
os.path.join(network.common_dir, f"{new_node.local_node_id}.pem"),
encoding="utf-8",
) as self_signed_cert:
sans = infra.crypto.get_san_from_pem_cert(self_signed_cert.read())
assert len(sans) == 1, "Expected exactly one SAN"
assert sans[0].value == ipaddress.ip_address(dummy_public_rpc_host)
def run_configuration_file_checks(args):
LOG.info(
f"Verifying JSON configuration samples in {args.config_samples_dir} directory"
)
CCHOST_BINARY_NAME = "cchost"
MIGRATE_CONFIGURATION_SCRIPT = "migrate_1_x_config.py"
OUTPUT_2_X_CONFIGURATION_FILE = "2_x_config.json"
bin_path = infra.path.build_bin_path(
CCHOST_BINARY_NAME, enclave_type=args.enclave_type, binary_dir=args.binary_dir
)
# Assumes MIGRATE_CONFIGURATION_SCRIPT is in the path
cmd = [
MIGRATE_CONFIGURATION_SCRIPT,
args.config_file_1x,
OUTPUT_2_X_CONFIGURATION_FILE,
]
assert infra.proc.ccall(*cmd).returncode == 0
config_files_to_check = [OUTPUT_2_X_CONFIGURATION_FILE]
config_files_to_check.extend(
[
os.path.join(args.config_samples_dir, c)
for c in os.listdir(args.config_samples_dir)
]
)
for config in config_files_to_check:
cmd = [bin_path, f"--config={config}", "--check"]
rc = infra.proc.ccall(*cmd).returncode
assert rc == 0, f"Failed to run tutorial script: {rc}"
def run(args):
run_file_operations(args)
run_tls_san_checks(args)
run_configuration_file_checks(args)
```
#### File: CCF/tests/governance.py
```python
import os
import http
import subprocess
import infra.network
import infra.path
import infra.proc
import infra.net
from ccf.ledger import NodeStatus
import infra.e2e_args
import suite.test_requirements as reqs
import infra.logging_app as app
import json
import requests
import infra.crypto
from datetime import datetime
import governance_js
from infra.runner import ConcurrentRunner
import governance_history
from loguru import logger as LOG
@reqs.description("Test create endpoint is not available")
def test_create_endpoint(network, args):
primary, _ = network.find_nodes()
with primary.client() as c:
r = c.post("/node/create")
assert r.status_code == http.HTTPStatus.FORBIDDEN.value
assert r.body.json()["error"]["message"] == "Node is not in initial state."
return network
@reqs.description("Test consensus status")
def test_consensus_status(network, args):
primary, _ = network.find_nodes()
with primary.client() as c:
r = c.get("/node/consensus")
assert r.status_code == http.HTTPStatus.OK.value
assert r.body.json()["details"]["leadership_state"] == "Leader"
return network
@reqs.description("Test quotes")
@reqs.supports_methods("quotes/self", "quotes")
def test_quote(network, args):
if args.enclave_type == "virtual":
LOG.warning("Quote test can only run in real enclaves, skipping")
return network
primary, _ = network.find_nodes()
with primary.client() as c:
oed = subprocess.run(
[
os.path.join(args.oe_binary, "oesign"),
"dump",
"-e",
infra.path.build_lib_path(args.package, args.enclave_type),
],
capture_output=True,
check=True,
)
lines = [
line
for line in oed.stdout.decode().split(os.linesep)
if line.startswith("mrenclave=")
]
expected_mrenclave = lines[0].strip().split("=")[1]
r = c.get("/node/quotes/self")
primary_quote_info = r.body.json()
assert primary_quote_info["node_id"] == primary.node_id
primary_mrenclave = primary_quote_info["mrenclave"]
assert primary_mrenclave == expected_mrenclave, (
primary_mrenclave,
expected_mrenclave,
)
r = c.get("/node/quotes")
quotes = r.body.json()["quotes"]
assert len(quotes) == len(network.get_joined_nodes())
for quote in quotes:
mrenclave = quote["mrenclave"]
assert mrenclave == expected_mrenclave, (mrenclave, expected_mrenclave)
cafile = os.path.join(network.common_dir, "networkcert.pem")
assert (
infra.proc.ccall(
"verify_quote.sh",
f"https://{primary.get_public_rpc_host()}:{primary.get_public_rpc_port()}",
"--cacert",
f"{cafile}",
log_output=True,
).returncode
== 0
), f"Quote verification for node {quote['node_id']} failed"
return network
@reqs.description("Add user, remove user")
@reqs.supports_methods("log/private")
def test_user(network, args, verify=True):
# Note: This test should not be chained in the test suite as it creates
# a new user and uses its own LoggingTxs
primary, _ = network.find_nodes()
new_user_local_id = f"user{3}"
new_user = network.create_user(new_user_local_id, args.participants_curve)
user_data = {"lifetime": "temporary"}
network.consortium.add_user(primary, new_user.local_id, user_data)
txs = app.LoggingTxs(user_id=new_user.local_id)
txs.issue(
network=network,
number_txs=1,
)
if verify:
txs.verify()
network.consortium.remove_user(primary, new_user.service_id)
with primary.client(new_user_local_id) as c:
r = c.get("/app/log/private")
assert r.status_code == http.HTTPStatus.UNAUTHORIZED.value
return network
@reqs.description("Add untrusted node, check no quote is returned")
def test_no_quote(network, args):
untrusted_node = network.create_node("local://localhost")
network.join_node(untrusted_node, args.package, args)
with untrusted_node.client(
ca=os.path.join(
untrusted_node.common_dir, f"{untrusted_node.local_node_id}.pem"
)
) as uc:
r = uc.get("/node/quotes/self")
assert r.status_code == http.HTTPStatus.NOT_FOUND
return network
@reqs.description("Check member data")
def test_member_data(network, args):
assert args.initial_operator_count > 0
latest_public_tables, _ = network.get_latest_ledger_public_state()
members_info = latest_public_tables["public:ccf.gov.members.info"]
md_count = 0
for member in network.get_members():
stored_member_info = json.loads(members_info[member.service_id.encode()])
if member.member_data:
assert (
stored_member_info["member_data"] == member.member_data
), f'stored member data "{stored_member_info["member_data"]}" != expected "{member.member_data} "'
md_count += 1
else:
assert "member_data" not in stored_member_info
assert md_count == args.initial_operator_count
return network
@reqs.description("Check network/nodes endpoint")
def test_node_ids(network, args):
nodes = network.find_nodes()
for node in nodes:
with node.client() as c:
r = c.get(
f"/node/network/nodes?host={node.get_public_rpc_host()}&port={node.get_public_rpc_port()}"
)
assert r.status_code == http.HTTPStatus.OK.value
info = r.body.json()["nodes"]
assert len(info) == 1
assert info[0]["node_id"] == node.node_id
assert info[0]["status"] == NodeStatus.TRUSTED.value
return network
@reqs.description("Test ack state digest updates")
def test_ack_state_digest_update(network, args):
for node in network.get_joined_nodes():
network.consortium.get_any_active_member().update_ack_state_digest(node)
return network
@reqs.description("Test invalid client signatures")
def test_invalid_client_signature(network, args):
primary, _ = network.find_primary()
def post_proposal_request_raw(node, headers=None, expected_error_msg=None):
r = requests.post(
f"https://{node.get_public_rpc_host()}:{node.get_public_rpc_port()}/gov/proposals",
headers=headers,
verify=os.path.join(node.common_dir, "networkcert.pem"),
).json()
assert r["error"]["code"] == "InvalidAuthenticationInfo"
assert (
expected_error_msg in r["error"]["message"]
), f"Expected error message '{expected_error_msg}' not in '{r['error']['message']}'"
# Verify that _some_ HTTP signature parsing errors are communicated back to the client
post_proposal_request_raw(
primary,
headers=None,
expected_error_msg="Missing signature",
)
post_proposal_request_raw(
primary,
headers={"Authorization": "invalid"},
expected_error_msg="'authorization' header only contains one field",
)
post_proposal_request_raw(
primary,
headers={"Authorization": "invalid invalid"},
expected_error_msg="'authorization' scheme for signature should be 'Signature",
)
post_proposal_request_raw(
primary,
headers={"Authorization": "Signature invalid"},
expected_error_msg="Error verifying HTTP 'digest' header: Missing 'digest' header",
)
@reqs.description("Update certificates of all nodes, one by one")
def test_each_node_cert_renewal(network, args):
primary, _ = network.find_primary()
now = datetime.now()
validity_period_allowed = args.maximum_node_certificate_validity_days - 1
validity_period_forbidden = args.maximum_node_certificate_validity_days + 1
test_vectors = [
(now, validity_period_allowed, None),
(now, None, None), # Omit validity period (deduced from service configuration)
(now, -1, infra.proposal.ProposalNotCreated),
(now, validity_period_forbidden, infra.proposal.ProposalNotAccepted),
]
for (valid_from, validity_period_days, expected_exception) in test_vectors:
for node in network.get_joined_nodes():
with node.client() as c:
c.get("/node/network/nodes")
node_cert_tls_before = node.get_tls_certificate_pem()
assert (
infra.crypto.compute_public_key_der_hash_hex_from_pem(
node_cert_tls_before
)
== node.node_id
)
try:
valid_from_x509 = str(infra.crypto.datetime_to_X509time(valid_from))
network.consortium.set_node_certificate_validity(
primary,
node,
valid_from=valid_from_x509,
validity_period_days=validity_period_days,
)
node.set_certificate_validity_period(
valid_from_x509,
validity_period_days
or args.maximum_node_certificate_validity_days,
)
except Exception as e:
assert isinstance(e, expected_exception)
continue
else:
assert (
expected_exception is None
), "Proposal should have not succeeded"
node_cert_tls_after = node.get_tls_certificate_pem()
assert (
node_cert_tls_before != node_cert_tls_after
), f"Node {node.local_node_id} certificate was not renewed"
node.verify_certificate_validity_period()
LOG.info(
f"Certificate for node {node.local_node_id} has successfully been renewed"
)
# Long-connected client is still connected after certificate renewal
c.get("/node/network/nodes")
return network
@reqs.description("Update certificates of all nodes, one by one")
def test_all_nodes_cert_renewal(network, args):
primary, _ = network.find_primary()
valid_from = str(infra.crypto.datetime_to_X509time(datetime.now()))
validity_period_days = args.maximum_node_certificate_validity_days
network.consortium.set_all_nodes_certificate_validity(
primary,
valid_from=valid_from,
validity_period_days=validity_period_days,
)
for node in network.get_joined_nodes():
node.set_certificate_validity_period(valid_from, validity_period_days)
def gov(args):
with infra.network.network(
args.nodes, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
network.consortium.set_authenticate_session(args.authenticate_session)
test_create_endpoint(network, args)
test_consensus_status(network, args)
test_node_ids(network, args)
test_member_data(network, args)
test_quote(network, args)
test_user(network, args)
test_no_quote(network, args)
test_ack_state_digest_update(network, args)
test_invalid_client_signature(network, args)
test_each_node_cert_renewal(network, args)
test_all_nodes_cert_renewal(network, args)
def js_gov(args):
with infra.network.network(
args.nodes, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
governance_js.test_proposal_validation(network, args)
governance_js.test_proposal_storage(network, args)
governance_js.test_proposal_withdrawal(network, args)
governance_js.test_ballot_storage(network, args)
governance_js.test_pure_proposals(network, args)
governance_js.test_proposals_with_votes(network, args)
governance_js.test_vote_failure_reporting(network, args)
governance_js.test_operator_proposals_and_votes(network, args)
governance_js.test_apply(network, args)
governance_js.test_actions(network, args)
governance_js.test_set_constitution(network, args)
if __name__ == "__main__":
cr = ConcurrentRunner()
cr.add(
"session_auth",
gov,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
initial_user_count=3,
authenticate_session=True,
)
cr.add(
"session_noauth",
gov,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
initial_user_count=3,
authenticate_session=False,
)
cr.add(
"js",
js_gov,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
initial_user_count=3,
authenticate_session=True,
)
cr.add(
"history",
governance_history.run,
package="samples/apps/logging/liblogging",
nodes=infra.e2e_args.max_nodes(cr.args, f=0),
# Higher snapshot interval as snapshots trigger new ledger chunks, which
# may result in latest chunk being partially written
snapshot_tx_interval=10000,
)
cr.run(2)
```
#### File: tests/infra/doc.py
```python
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst import Directive
class StubDirective(Directive):
has_content = True
def run(self):
return []
class TablesVisitor(docutils.nodes.NodeVisitor):
prefix = None
tables = []
def visit_section(self, node):
(name,) = node.attributes["names"]
if name.startswith("public:"):
self.prefix = name
else:
if self.prefix:
self.tables.append(f"{self.prefix}{name}")
def unknown_visit(self, node) -> None:
pass
def parse(text):
for t in ("enum", "struct"):
register_directive(f"doxygen{t}", StubDirective)
parser = docutils.parsers.rst.Parser()
components = (docutils.parsers.rst.Parser,)
settings = docutils.frontend.OptionParser(
components=components
).get_default_values()
document = docutils.utils.new_document("<rst-doc>", settings=settings)
parser.parse(text, document)
return document
def extract_table_names(doc):
v = TablesVisitor(doc)
doc.walk(v)
return v.tables
```
|
{
"source": "Jerryxiaoyu/maml_rl",
"score": 2
}
|
#### File: Jerryxiaoyu/maml_rl/test.py
```python
from sandbox.rocky.tf.algos.maml_trpo import MAMLTRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from rllab.envs.mujoco.ant_env_rand import AntEnvRand
from rllab.envs.mujoco.ant_env_rand_goal import AntEnvRandGoal
from rllab.envs.mujoco.ant_env_rand_direc import AntEnvRandDirec
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.policies.maml_minimal_gauss_mlp_policy import MAMLGaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
import numpy as np
import tensorflow as tf
stub(globals())
from rllab.misc.instrument import VariantGenerator, variant
class VG(VariantGenerator):
@variant
def fast_lr(self):
return [0.1]
@variant
def meta_step_size(self):
return [0.01] # sometimes 0.02 better
@variant
def fast_batch_size(self):
return [20,30]
@variant
def meta_batch_size(self):
return [40,50] # at least a total batch size of 400. (meta batch size*fast batch size)
@variant
def seed(self):
return [1]
@variant
def task_var(self): # fwd/bwd task or goal vel task
# 0 for fwd/bwd, 1 for goal vel (kind of), 2 for goal pose
return [0]
# should also code up alternative KL thing
variants = VG().variants()
print(variants)
for v in variants:
task_var = v['task_var']
#oracle = v['oracle']
print('v : ',v)
print('task: ',task_var)
#print('oracle: ',oracle)
print('---------------------')
make_video = True
'''''
if not make_video:
test_num_goals = 10
np.random.seed(2)
goals = np.random.uniform(0.0, 3.0, size=(test_num_goals, ))
else:
np.random.seed(1)
test_num_goals = 2
goals = [0.0, 3.0]
file_ext = 'mp4' # can be mp4 or gif
print(goals)
'''''
np.random.seed(0)
np.random.rand(5)
```
|
{
"source": "Jerryxiaoyu/maml_rl_v2",
"score": 2
}
|
#### File: CPG_core/controllers/CPG_controller_bigdog2.py
```python
from CPG_core.CPG_osillator import matsuoka_oscillator
from CPG_core.CPG_osillator import CPG_neutron
class CPG_network(object):
def __init__(self, position_vector):
kf = position_vector[0]
GAIN0 = position_vector[1]
GAIN1 = position_vector[2]
GAIN2 = position_vector[3]
GAIN3 = position_vector[4]
GAIN4 = position_vector[5]
GAIN5 = position_vector[6]
GAIN6 = position_vector[7]
GAIN7 = position_vector[8]
GAIN8 = position_vector[9]
GAIN9 = position_vector[10]
GAIN10 = position_vector[11]
GAIN11 = position_vector[12]
GAIN12 = position_vector[13]
GAIN13 = position_vector[14]
BIAS0 = position_vector[15]
BIAS1 = position_vector[16]
BIAS2 = position_vector[17]
BIAS3 = position_vector[18]
BIAS4 = position_vector[19]
BIAS5 = position_vector[20]
BIAS6 = position_vector[21]
BIAS7 = position_vector[22]
BIAS8 = position_vector[23]
BIAS9 = position_vector[24]
BIAS10 = position_vector[25]
BIAS11 = position_vector[26]
BIAS12 = position_vector[27]
BIAS13 = position_vector[28]
parm_list = {
0: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
1: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN0, BIAS0],
2: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN1, BIAS1],
3: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN2, BIAS2],
4: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN3, BIAS3],
5: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN4, BIAS4],
6: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN5, BIAS5],
7: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN6, BIAS6],
8: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN7, BIAS7],
9: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN8, BIAS8],
10: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN9, BIAS9],
11: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN10, BIAS10],
12: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN11, BIAS11],
13: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN12, BIAS12],
14: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, GAIN13, BIAS13],
}
self.kf = position_vector[0]
self.num_CPG = len(parm_list)
self.CPG_list =[]
self.w_ms_list = [None, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, ]
self.master_list = [None, 0, 0, 1, 3, 3, 1, 6, 6, 2, 9, 9, 2, 12, 12]
for i in range(self.num_CPG):
if i == 0:
self.CPG_list.append(CPG_neutron(0, master_nuron = None, param=parm_list[0] ,kf= self.kf, w_ms = 0))
else:
self.CPG_list.append(CPG_neutron(1, master_nuron=self.CPG_list[self.master_list[i]],
param=parm_list[i], kf=self.kf, w_ms= self.w_ms_list[i]))
def output(self, state):
output_list = []
for cpg_n in self.CPG_list:
cpg_n.next_output(f1=0, f2=0)
output_list.append(cpg_n.parm['o'])
return output_list
```
#### File: CPG_core/controllers/CPG_controller_bigdog2_sin.py
```python
from CPG_core.CPG_Sin_osillator import sin_oscillator
from CPG_core.CPG_Sin_osillator import CPG_Sinneutron
class CPG_network(object):
def __init__(self, CPG_node_num, position_vector):
kf = position_vector[0]
self.CPG_node_num = CPG_node_num # 不包括placemarker
if len(position_vector) != self.CPG_node_num *3+1:
assert "Position vector out of range!"
GAIN,BIAS,PHASE = [],[],[]
for i in range(self.CPG_node_num):
GAIN.append(position_vector[i+1])
BIAS.append(position_vector[self.CPG_node_num+i+1])
PHASE.append(position_vector[2 * self.CPG_node_num+i+1])
self.parm_list = {
0: [0.0, 0.0, 0.0, 1.0, 0.0, 0],
}
for i in range(self.CPG_node_num):
parm ={i+1:[0.0, 0.0, 0.0, GAIN[i], BIAS[i], PHASE[i]]}
self.parm_list.update(parm)
#print(parm_list)
self.kf = position_vector[0]
self.num_CPG = len(self.parm_list)
self.CPG_list =[]
#self.w_ms_list = [None, 1, 1,1,1, 1, 1, 1, 1, 1,1,1, 1, 1, 1, ]
self.w_ms_list = [None, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, ]
self.master_list = [None, 0,0,1,3,3,1,6,6,2,9,9, 2,12,12 ]
for i in range(self.num_CPG):
if i == 0:
self.CPG_list.append(CPG_Sinneutron(0, master_nuron = None, param=self.parm_list[0] ,kf= self.kf, w_ms = 0))
else:
self.CPG_list.append(CPG_Sinneutron(i, master_nuron=self.CPG_list[self.master_list[i]],
param=self.parm_list[i], kf=self.kf, w_ms= self.w_ms_list[i]))
def output(self, state):
output_list = []
for cpg_n in self.CPG_list:
cpg_n.next_output(f1=0, f2=0)
output_list.append(cpg_n.parm['o'])
return output_list
def update(self, fi_l):
self.kesi = 5
if len(fi_l) == 2:
# f_left = 1 - (0.5 - fi_l[0]) * self.kesi
# f_right = 1 - (0.5 - fi_l[1]) * self.kesi
# self.CPG_list[2].parm['f12'] = self.parm_list[2][5] * f_left
# self.CPG_list[6].parm['f12'] = self.parm_list[6][5] * f_left
# self.CPG_list[7].parm['f12'] = self.parm_list[7][5] * f_left
#
# self.CPG_list[3].parm['f12'] = self.parm_list[3][5] * f_right
# self.CPG_list[8].parm['f12'] = self.parm_list[8][5] * f_right
# self.CPG_list[9].parm['f12'] = self.parm_list[9][5] * f_right
#
# self.CPG_list[4].parm['f12'] = self.parm_list[4][5] * f_right
# self.CPG_list[10].parm['f12'] = self.parm_list[10][5] * f_right
# self.CPG_list[11].parm['f12'] = self.parm_list[11][5] * f_right
#
# self.CPG_list[5].parm['f12'] = self.parm_list[5][5] * f_left
# self.CPG_list[12].parm['f12'] = self.parm_list[12][5] * f_left
# self.CPG_list[13].parm['f12'] = self.parm_list[13][5] * f_left
gain_left = 1 - (0.5 - fi_l[0]) * self.kesi
gain_right = 1 - (0.5 - fi_l[1]) * self.kesi
self.CPG_list[3].parm['R1'] = self.parm_list[3][3] * gain_left
self.CPG_list[4].parm['R1'] = self.parm_list[4][3] * gain_left
self.CPG_list[5].parm['R1'] = self.parm_list[5][3] * gain_left
self.CPG_list[6].parm['R1'] = self.parm_list[6][3] * gain_left
self.CPG_list[7].parm['R1'] = self.parm_list[7][3] * gain_left
self.CPG_list[8].parm['R1'] = self.parm_list[8][3] * gain_left
self.CPG_list[9].parm['R1'] = self.parm_list[9][3] * gain_right
self.CPG_list[10].parm['R1'] = self.parm_list[10][3] * gain_right
self.CPG_list[11].parm['R1'] = self.parm_list[11][3] * gain_right
self.CPG_list[12].parm['R1'] = self.parm_list[12][3] * gain_right
self.CPG_list[13].parm['R1'] = self.parm_list[13][3] * gain_right
self.CPG_list[14].parm['R1'] = self.parm_list[14][3] * gain_right
else:
assert 'RL output error'
# import numpy as np
# position_vector = np.zeros(40)
# position_vector[0]=1
# for i in range(1,14):
# position_vector[i] = 1
# CPG_network(position_vector)
class CPG_network5(object):
def __init__(self, CPG_node_num, position_vector):
kf = position_vector[0]
self.CPG_node_num = CPG_node_num # 不包括placemarker
if len(position_vector) != self.CPG_node_num * 4 + 1:
assert "Position vector out of range!"
GAIN, BIAS, PHASE, WEIGHT = [], [], [], []
for i in range(self.CPG_node_num):
GAIN.append(position_vector[i + 1])
BIAS.append(position_vector[self.CPG_node_num + i + 1])
PHASE.append(position_vector[2 * self.CPG_node_num + i + 1])
WEIGHT.append(position_vector[3 * self.CPG_node_num + i + 1])
parm_list = {
0: [0.0, 0.0, 0.0, 1.0, 0.0, 0],
}
for i in range(self.CPG_node_num):
parm = {i + 1: [0.0, 0.0, 0.0, GAIN[i], BIAS[i], PHASE[i]]}
parm_list.update(parm)
# print(parm_list)
self.kf = position_vector[0]
self.num_CPG = len(parm_list)
self.CPG_list = []
self.w_ms_list = [None,WEIGHT[0], WEIGHT[1], WEIGHT[2], WEIGHT[3], WEIGHT[4], WEIGHT[5], WEIGHT[6],
WEIGHT[7], WEIGHT[8], WEIGHT[9], WEIGHT[10], WEIGHT[11], WEIGHT[12], WEIGHT[13] ]
self.master_list = [None, 0,0,1,3,3,1,6,6,2,9,9, 2,12,12]
for i in range(self.num_CPG):
if i == 0:
self.CPG_list.append(CPG_Sinneutron(0, master_nuron=None, param=parm_list[0], kf=self.kf, w_ms=0))
else:
self.CPG_list.append(CPG_Sinneutron(i, master_nuron=self.CPG_list[self.master_list[i]],
param=parm_list[i], kf=self.kf, w_ms=self.w_ms_list[i]))
def output(self, state):
output_list = []
for cpg_n in self.CPG_list:
cpg_n.next_output(f1=0, f2=0)
output_list.append(cpg_n.parm['o'])
return output_list
```
#### File: maml_rl_v2/CPG_core/CPG_Sin_osillator.py
```python
from math import pi, cos, sin
class sin_oscillator(object):
def __init__(self, kf=1):
# Set up the oscillator constants
self.tau =kf
self.a1 = 1.0
self.v1 = 1.0
# Step time
self.dt = 0.01
def oscillator_fun(self, R1, X1, f12 , w_0, u1, u2, r1, r2 ):
"""
Calculates the state variables in the next time step
"""
d_u1_dt = (2 * pi * self.v1 + w_0 * r2 * sin(u2 - u1 - f12)) / self.tau
r1_dt = self.a1 * (R1 - r1)
u1 += d_u1_dt * self.dt
r1 += r1_dt * self.dt
o = r1 * sin(u1) + X1
return o, u1, r1
class CPG_Sinneutron(object):
def __init__(self, id, master_nuron, param ,kf=1, w_ms = 1):
self.id = id
self.parm = {'kf': kf, 'u1':param[0], 'r1':param[1],
'o':param[2], 'R1':param[3], 'X1':param[4],'f12':param[5],}
self.w_ms =w_ms
osillator = sin_oscillator(self.parm['kf'])
self.osillator_fun = osillator.oscillator_fun
self.master_nuron = master_nuron
def next_output(self, f1, f2 ):
if self.master_nuron is not None:
u2 = self.master_nuron.parm['u1']
r2 = self.master_nuron.parm['r1']
else:
u2 = 0
r2 = 0
self.parm['o'],self.parm['u1'], self.parm['r1'] = \
self.osillator_fun(self.parm['R1'],self.parm['X1'], self.parm['f12'], self.w_ms, self.parm['u1'], u2, self.parm['r1'], r2 )
```
#### File: envs/mujoco/cellrobot_rand_direc_env_body.py
```python
import numpy as np
from gym import utils
from math import pi,sin,cos
import numpy as np
from rllab.misc import autoargs
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import logger
from rllab.misc.overrides import overrides
#from .mujoco_env import MujocoEnv
from CPG_core.PID_controller import PID_controller
from CPG_core.math.transformation import euler_from_quaternion,quaternion_inverse ,quaternion_multiply
# choose your CPG network
# from CPG_core.controllers.CPG_controller_quadruped_sin import CPG_network
from CPG_core.controllers.CPG_controller_quadruped_sin import CPG_network
state_M =np.array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]])
position_vector = [0.9005710154022419, 0.19157649858525766, 0.20363844865472536, -0.2618038524762938, -0.04764016477204058, -0.4923544636213292, -0.30514082693887024, 0.7692727139092137, 0.7172509186944478, -0.6176943450166859, -0.43476218435592706, 0.7667223977603919, 0.29081693103406536, 0.09086369237435465, 0.0, 0.0, -0.0171052262902362, 0.0, 0.0, 0.0, 0.0, 0.0004205454597565903, 0.0, 0.0, 0.0, 0.0, 0.0, -0.6989070655586036, 1.231416257452789, 1.188419262405775, -1.0974581723778125, -1.023151598620554, -0.40304458466288917, 0.5513169936393982, 0.646385738643396, 1.3694066886743392, 0.7519699447089043, 0.06997050535309216, -1.5500743998481212, 0.8190474090403703]
class CellRobotRandDirectBodyEnv(MujocoEnv, Serializable):
FILE = 'cellrobot_Quadruped_float.xml'
def __init__(self, goal_num=None, *args, **kwargs):
self.goal_num = goal_num
self.goal_theta = 0.0
self.quat_init = [0.49499825, -0.49997497, 0.50500175, 0.49997499]
self.t = 0
self.CPG_controller = CPG_network(position_vector)
super(CellRobotRandDirectBodyEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
self.reset(reset_args=goal_num)
def sample_goals(self, num_goals):
# for fwd/bwd env, goal direc is backwards if < 1.5, forwards if > 1.5
return np.random.uniform(-pi/3, pi/3, (num_goals, ))
def get_current_obs(self):
quat = self.model.data.qpos.flat[3:7]
# print('quat = ', quat)
quat_tranfor = quaternion_multiply(quat, quaternion_inverse(self.quat_init))
angle = euler_from_quaternion(quat_tranfor, 'rxyz')
#print(self.goal_theta)
return np.concatenate([
self.get_body_com("torso").flat,
# self.sim.data.qpos.flat[:3], # 3:7 表示角度
# self.sim.data.qpos.flat[:7], # 3:7 表示角度
np.array(angle),
np.array([angle[2] - self.goal_theta])
]).reshape(-1)
@overrides
def reset(self, init_state=None, reset_args=None, **kwargs):
goal_vel = reset_args
if goal_vel is not None:
self._goal_vel = goal_vel
else:
self._goal_vel = np.random.uniform(-pi/3, pi/3)
self.goal_theta = self._goal_vel
#print(self.goal_theta)
self.goal_direction = -1.0 if self._goal_vel < 1.5 else 1.0
self.reset_mujoco(init_state)
self.model.forward()
self.current_com = self.model.data.com_subtree[0]
self.dcom = np.zeros_like(self.current_com)
obs = self.get_current_obs()
return obs
def step(self, a):
#print(a)
u = np.array([cos(self.goal_theta), sin(self.goal_theta)])
action = self.CPG_transfer(a, self.CPG_controller)
xposbefore = self.get_body_com("torso")[0]
yposbefore = self.get_body_com("torso")[1]
comvel_xy_before = np.array([xposbefore, yposbefore])
proj_parbefore = comvel_xy_before.dot(np.transpose(u))
self.forward_dynamics(action)
xposafter = self.get_body_com("torso")[0]
yposafter = self.get_body_com("torso")[1]
comvel_xy_after = np.array([xposafter, yposafter])
proj_parafter = comvel_xy_after.dot(np.transpose(u))
comvel = self.get_body_comvel("torso")
comvel_xy = np.array([comvel[0], comvel[1]])
proj_par = comvel_xy.dot(np.transpose(u))
proj_ver = abs(u[0] * comvel_xy[1] - u[1] * comvel_xy[0])
#forward_reward = 1* proj_par - 10 * proj_ver
#print('reward: ', (proj_parafter - proj_parbefore) /0.01, 5 * proj_ver)
forward_reward = 1 * (proj_parafter - proj_parbefore) /0.01 - 5 * proj_ver
# lb, ub = self.action_space_ture.bounds
# scaling = (ub - lb) * 0.5
# ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
ctrl_cost=0
contact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.model.data.cfrc_ext, -1, 1)))
survive_reward = 0.05
#print('reward: ', forward_reward,-ctrl_cost, -contact_cost )
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.1 and state[2] <= 0.6
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
@overrides
def log_diagnostics(self, paths, prefix=''):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular(prefix+'AverageForwardProgress', np.mean(progs))
logger.record_tabular(prefix+'MaxForwardProgress', np.max(progs))
logger.record_tabular(prefix+'MinForwardProgress', np.min(progs))
logger.record_tabular(prefix+'StdForwardProgress', np.std(progs))
def CPG_transfer(self,RL_output, CPG_controller ):
#print(RL_output)
CPG_controller.update(RL_output)
# if self.t % 100 == 0:
# #CPG_controller.update(RL_output)
# print(RL_output)
###adjust CPG_neutron parm using RL_output
output_list = CPG_controller.output(state=None)
target_joint_angles = np.array(output_list[1:])# CPG 第一个输出为placemarke
cur_angles = np.concatenate([state_M.dot(self.model.data.qpos[7:].reshape((-1, 1))).flat])
action = PID_controller(cur_angles, target_joint_angles)
return action
```
#### File: tf/policies/maml_minimal_categorical_mlp_policy.py
```python
from contextlib import contextmanager
import itertools
import numpy as np
import sandbox.rocky.tf.core.layers as L
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.misc import ext
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.spaces.discrete import Discrete
from rllab.misc import logger
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
import tensorflow as tf
from sandbox.rocky.tf.core.utils import make_input, _create_param, add_param, make_dense_layer, forward_dense_layer, make_param_layer, forward_param_layer
tf_layers = None
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class MAMLCategoricalMLPPolicy(StochasticPolicy, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
prob_network=None,
grad_step_size=1.0,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1]
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.n_hidden = len(hidden_sizes)
self.hidden_nonlinearity = hidden_nonlinearity
self.input_shape = (None, obs_dim,)
self.step_size = grad_step_size
if prob_network is None:
self.all_params = self.create_MLP(
output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
name="prob_network",
)
self._l_obs, self._l_prob = self.forward_MLP('prob_network', self.all_params,
n_hidden=len(hidden_sizes), input_shape=(obs_dim,),
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax, reuse=None)
# if you want to input your own tensor.
self._forward_out = lambda x, params, is_train: self.forward_MLP('prob_network', params,
n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax, input_tensor=x, is_training=is_train)[1]
self._init_f_prob = tensor_utils.compile_function(
[self._l_obs],
[self._l_prob])
self._cur_f_prob = self._init_f_prob
self._dist = Categorical(self.action_dim)
self._cached_params = {}
super(MAMLCategoricalMLPPolicy, self).__init__(env_spec)
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None, all_params=None, is_training=True):
# sym means symbolic here.
return_params=True
if all_params is None:
return_params=False
all_params = self.all_params
output = self._forward_out(tf.cast(obs_var,tf.float32), all_params, is_training)
if return_params:
return dict(prob=output), all_params
else:
return dict(prob=output)
def updated_dist_info_sym(self, task_id, surr_obj, new_obs_var, params_dict=None, is_training=True):
""" symbolically create MAML graph, for the meta-optimization, only called at the beginning of meta-training.
Called more than once if you want to do more than one grad step.
"""
old_params_dict = params_dict
step_size = self.step_size
if old_params_dict == None:
old_params_dict = self.all_params
param_keys = self.all_params.keys()
gradients = dict(zip(param_keys, tf.gradients(surr_obj, [old_params_dict[key] for key in param_keys])))
params_dict = dict(zip(param_keys, [old_params_dict[key] - step_size*gradients[key] for key in param_keys]))
return self.dist_info_sym(new_obs_var, all_params=params_dict, is_training=is_training)
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
def switch_to_init_dist(self):
# switch cur policy distribution to pre-update policy
self._cur_f_prob = self._init_f_prob
self.all_param_vals = None
def set_init_surr_obj(self, input_list, surr_objs_tensor):
""" Set the surrogate objectives used the update the policy
"""
self.input_list_for_grad = input_list
self.surr_objs = surr_objs_tensor
def compute_updated_dists(self, samples):
""" Compute fast gradients once and pull them out of tensorflow for sampling.
"""
num_tasks = len(samples)
param_keys = self.all_params.keys()
sess = tf.get_default_session()
obs_list, action_list, adv_list = [], [], []
for i in range(num_tasks):
inputs = ext.extract(samples[i],
'observations', 'actions', 'advantages')
obs_list.append(inputs[0])
action_list.append(inputs[1])
adv_list.append(inputs[2])
inputs = obs_list + action_list + adv_list
# To do a second update, replace self.all_params below with the params that were used to collect the policy.
init_param_values = None
if self.all_param_vals is not None:
init_param_values = self.get_variable_values(self.all_params)
step_size = self.step_size
for i in range(num_tasks):
if self.all_param_vals is not None:
self.assign_params(self.all_params, self.all_param_vals[i])
if 'all_fast_params_tensor' not in dir(self):
# make computation graph once
self.all_fast_params_tensor = []
for i in range(num_tasks):
gradients = dict(zip(param_keys, tf.gradients(self.surr_objs[i], [self.all_params[key] for key in param_keys])))
fast_params_tensor = dict(zip(param_keys, [self.all_params[key] - step_size*gradients[key] for key in param_keys]))
self.all_fast_params_tensor.append(fast_params_tensor)
# pull new param vals out of tensorflow, so gradient computation only done once
self.all_param_vals = sess.run(self.all_fast_params_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, inputs))))
if init_param_values is not None:
self.assign_params(self.all_params, init_param_values)
outputs = []
inputs = tf.split(0, num_tasks, self._l_obs)
for i in range(num_tasks):
# TODO - use a placeholder to feed in the params, so that we don't have to recompile every time.
task_inp = inputs[i]
info, _ = self.dist_info_sym(task_inp, dict(), all_params=self.all_param_vals[i],
is_training=False)
outputs.append([info['prob']])
self._cur_f_prob = tensor_utils.compile_function(
inputs = [self._l_obs],
outputs = outputs,
)
def get_variable_values(self, tensor_dict):
sess = tf.get_default_session()
result = sess.run(tensor_dict)
return result
def assign_params(self, tensor_dict, param_values):
if 'assign_placeholders' not in dir(self):
# make computation graph, if it doesn't exist; then cache it for future use.
self.assign_placeholders = {}
self.assign_ops = {}
for key in tensor_dict.keys():
self.assign_placeholders[key] = tf.placeholder(tf.float32)
self.assign_ops[key] = tf.assign(tensor_dict[key], self.assign_placeholders[key])
feed_dict = {self.assign_placeholders[key]:param_values[key] for key in tensor_dict.keys()}
sess = tf.get_default_session()
sess.run(self.assign_ops, feed_dict)
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._cur_f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
result = self._cur_f_prob(flat_obs)
if len(result) == 1:
probs = result[0]
else:
#import pdb; pdb.set_trace()
# TODO - I think this is correct but not sure.
probs = np.array(result)[:,0,0,:]
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
# This makes all of the parameters.
def create_MLP(self, name, output_dim, hidden_sizes,
hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer,
output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer,
weight_normalization=False,
):
input_shape = self.input_shape
cur_shape = input_shape
with tf.variable_scope(name):
all_params = {}
for idx, hidden_size in enumerate(hidden_sizes):
W, b, cur_shape = make_dense_layer(
cur_shape,
num_units=hidden_size,
name="hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
weight_norm=weight_normalization,
)
all_params['W' + str(idx)] = W
all_params['b' + str(idx)] = b
W, b, _ = make_dense_layer(
cur_shape,
num_units=output_dim,
name='output',
W=output_W_init,
b=output_b_init,
weight_norm=weight_normalization,
)
all_params['W' + str(len(hidden_sizes))] = W
all_params['b'+str(len(hidden_sizes))] = b
return all_params
def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=-1,
hidden_nonlinearity=tf.identity, output_nonlinearity=tf.identity,
batch_normalization=False, reuse=True, is_training=False):
# is_training and reuse are for batch norm, irrelevant if batch_norm set to False
# set reuse to False if the first time this func is called.
with tf.variable_scope(name):
if input_tensor is None:
assert input_shape is not None
l_in = make_input(shape=(None,)+input_shape, input_var=None, name='input')
else:
l_in = input_tensor
l_hid = l_in
for idx in range(n_hidden):
l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],
batch_norm=batch_normalization,
nonlinearity=hidden_nonlinearity,
scope=str(idx), reuse=reuse,
is_training=is_training
)
output = forward_dense_layer(l_hid, all_params['W'+str(n_hidden)], all_params['b'+str(n_hidden)],
batch_norm=False, nonlinearity=output_nonlinearity,
)
return l_in, output
def get_params_internal(self, all_params=False, **tags):
if tags.get('trainable', False):
params = tf.trainable_variables()
else:
params = tf.all_variables()
# TODO - this is hacky...
params = [p for p in params if p.name.startswith('prob_network')]
params = [p for p in params if 'Adam' not in p.name]
return params
def log_diagnostics(self, paths, prefix=''):
pass
```
#### File: tf/policies/minimal_categorical_mlp_policy.py
```python
import sandbox.rocky.tf.core.layers as L
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.misc import ext
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.spaces.discrete import Discrete
from rllab.misc import logger
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
import itertools
import tensorflow as tf
from sandbox.rocky.tf.core.utils import make_input, _create_param, add_param, make_dense_layer, forward_dense_layer, make_param_layer, forward_param_layer
tf_layers = None
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class CategoricalMLPPolicy(StochasticPolicy, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
with tf.variable_scope(name):
if prob_network is None:
prob_network = self.create_MLP(
input_shape=(obs_dim,),
output_dim=env_spec.action_space.n,
hidden_sizes=hidden_sizes,
name="prob_network",
)
self._l_obs, self._l_prob = self.forward_MLP('prob_network', prob_network,
n_hidden=len(hidden_sizes), input_shape=(obs_dim,),
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax, reuse=None)
# if you want to input your own tensor.
self._forward_out = lambda x, is_train: self.forward_MLP('prob_network', prob_network,
n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1]
self._f_prob = tensor_utils.compile_function(
[self._l_obs],
L.get_output(self._l_prob)
)
self._dist = Categorical(env_spec.action_space.n)
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None, is_training=True):
# sym means symbolic here.
output = self._forward_out(tf.cast(obs_var,tf.float32), is_training)
return dict(prob=output)
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
# This makes all of the parameters.
def create_MLP(self, name, output_dim, hidden_sizes,
hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer,
output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer,
input_shape=None, weight_normalization=False,
):
assert input_shape is not None
cur_shape = input_shape
with tf.variable_scope(name):
all_params = {}
for idx, hidden_size in enumerate(hidden_sizes):
W, b, cur_shape = make_dense_layer(
cur_shape,
num_units=hidden_size,
name="hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
weight_norm=weight_normalization,
)
all_params['W' + str(idx)] = W
all_params['b' + str(idx)] = b
W, b, _ = make_dense_layer(
cur_shape,
num_units=output_dim,
name='output',
W=output_W_init,
b=output_b_init,
weight_norm=weight_normalization,
)
all_params['W' + str(len(hidden_sizes))] = W
all_params['b'+str(len(hidden_sizes))] = b
return all_params
def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=-1,
hidden_nonlinearity=tf.identity, output_nonlinearity=tf.identity,
batch_normalization=False, reuse=True, is_training=False):
# is_training and reuse are for batch norm, irrelevant if batch_norm set to False
# set reuse to False if the first time this func is called.
with tf.variable_scope(name):
if input_tensor is None:
assert input_shape is not None
l_in = make_input(shape=(None,)+input_shape, input_var=None, name='input')
else:
l_in = input_tensor
l_hid = l_in
for idx in range(n_hidden):
l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],
batch_norm=batch_normalization,
nonlinearity=hidden_nonlinearity,
scope=str(idx), reuse=reuse,
is_training=is_training
)
output = forward_dense_layer(l_hid, all_params['W'+str(n_hidden)], all_params['b'+str(n_hidden)],
batch_norm=False, nonlinearity=output_nonlinearity,
)
return l_in, output
```
#### File: tf/samplers/vectorized_sampler.py
```python
import pickle
import tensorflow as tf
from rllab.sampler.base import BaseSampler
from sandbox.rocky.tf.envs.parallel_vec_env_executor import ParallelVecEnvExecutor
from sandbox.rocky.tf.envs.vec_env_executor import VecEnvExecutor
from rllab.misc import tensor_utils
import numpy as np
from rllab.sampler.stateful_pool import ProgBarCounter
import rllab.misc.logger as logger
import itertools
class VectorizedSampler(BaseSampler):
def __init__(self, algo, n_envs=None):
super(VectorizedSampler, self).__init__(algo)
self.n_envs = n_envs
def start_worker(self):
n_envs = self.n_envs
if n_envs is None:
n_envs = int(self.algo.batch_size / self.algo.max_path_length)
n_envs = max(1, min(n_envs, 100))
if getattr(self.algo.env, 'vectorized', False):
self.vec_env = self.algo.env.vec_env_executor(n_envs=n_envs, max_path_length=self.algo.max_path_length)
else:
envs = [pickle.loads(pickle.dumps(self.algo.env)) for _ in range(n_envs)]
self.vec_env = VecEnvExecutor(
envs=envs,
#env=pickle.loads(pickle.dumps(self.algo.env)),
#n = n_envs,
max_path_length=self.algo.max_path_length
)
self.env_spec = self.algo.env.spec
def shutdown_worker(self):
self.vec_env.terminate()
def obtain_samples(self, itr, reset_args=None, return_dict=False, log_prefix=''):
# reset_args: arguments to pass to the environments to reset
# return_dict: whether or not to return a dictionary or list form of paths
logger.log("Obtaining samples for iteration %d..." % itr)
#paths = []
paths = {}
for i in range(self.vec_env.num_envs):
paths[i] = []
# if the reset args are not list/numpy, we set the same args for each env
if reset_args is not None and (type(reset_args) != list and type(reset_args)!=np.ndarray):
reset_args = [reset_args]*self.vec_env.num_envs
n_samples = 0
obses = self.vec_env.reset(reset_args)
dones = np.asarray([True] * self.vec_env.num_envs)
running_paths = [None] * self.vec_env.num_envs
pbar = ProgBarCounter(self.algo.batch_size)
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
import time
while n_samples < self.algo.batch_size:
t = time.time()
policy.reset(dones)
actions, agent_infos = policy.get_actions(obses)
policy_time += time.time() - t
t = time.time()
#print(actions)
next_obses, rewards, dones, env_infos = self.vec_env.step(actions, reset_args)
env_time += time.time() - t
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if env_infos is None:
env_infos = [dict() for _ in range(self.vec_env.num_envs)]
if agent_infos is None:
agent_infos = [dict() for _ in range(self.vec_env.num_envs)]
for idx, observation, action, reward, env_info, agent_info, done in zip(itertools.count(), obses, actions,
rewards, env_infos, agent_infos,
dones):
if running_paths[idx] is None:
running_paths[idx] = dict(
observations=[],
actions=[],
rewards=[],
env_infos=[],
agent_infos=[],
)
running_paths[idx]["observations"].append(observation)
running_paths[idx]["actions"].append(action)
running_paths[idx]["rewards"].append(reward)
running_paths[idx]["env_infos"].append(env_info)
running_paths[idx]["agent_infos"].append(agent_info)
if done:
paths[idx].append(dict(
observations=self.env_spec.observation_space.flatten_n(running_paths[idx]["observations"]),
actions=self.env_spec.action_space.flatten_n(running_paths[idx]["actions"]),
rewards=tensor_utils.stack_tensor_list(running_paths[idx]["rewards"]),
env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["env_infos"]),
agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["agent_infos"]),
))
n_samples += len(running_paths[idx]["rewards"])
running_paths[idx] = None
process_time += time.time() - t
pbar.inc(len(obses))
obses = next_obses
pbar.stop()
logger.record_tabular(log_prefix+"PolicyExecTime", policy_time)
logger.record_tabular(log_prefix+"EnvExecTime", env_time)
logger.record_tabular(log_prefix+"ProcessExecTime", process_time)
if not return_dict:
flatten_list = lambda l: [item for sublist in l for item in sublist]
paths = flatten_list(paths.values())
#path_keys = flatten_list([[key]*len(paths[key]) for key in paths.keys()])
return paths
"""
def new_obtain_samples(self, itr, reset_args=None, return_dict=False):
# reset_args: arguments to pass to the environments to reset
# return_dict: whether or not to return a dictionary or list form of paths
logger.log("Obtaining samples for iteration %d..." % itr)
#paths = []
paths = {}
for i in range(self.algo.meta_batch_size):
paths[i] = []
n_samples = 0
pbar = ProgBarCounter(self.algo.batch_size)
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
import time
num_tasks = self.algo.meta_batch_size
task_batch = self.vec_env.num_envs / num_tasks
# inds 0 through task_batch are task 0, the next task_batch are task 1, etc.
task_idxs = np.reshape(np.tile(np.arange(num_tasks), [task_batch,1]).T, [-1])
obses = self.vec_env.reset([reset_args[idx] for idx in task_idxs])
dones = np.asarray([True] * self.vec_env.num_envs)
running_paths = [None] * self.vec_env.num_envs
while n_samples <= self.algo.batch_size:
t = time.time()
policy.reset(dones)
actions, agent_infos = policy.get_actions(obses)
policy_time += time.time() - t
t = time.time()
next_obses, rewards, dones, env_infos = self.vec_env.step(actions)
env_time += time.time() - t
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if env_infos is None:
env_infos = [dict() for _ in range(self.vec_env.num_envs)]
if agent_infos is None:
agent_infos = [dict() for _ in range(self.vec_env.num_envs)]
for idx, observation, action, reward, env_info, agent_info, done in zip(itertools.count(), obses, actions,
rewards, env_infos, agent_infos,
dones):
if running_paths[idx] is None:
running_paths[idx] = dict(
observations=[],
actions=[],
rewards=[],
env_infos=[],
agent_infos=[],
)
running_paths[idx]["observations"].append(observation)
running_paths[idx]["actions"].append(action)
running_paths[idx]["rewards"].append(reward)
running_paths[idx]["env_infos"].append(env_info)
running_paths[idx]["agent_infos"].append(agent_info)
if done:
paths[task_idxs[idx]].append(dict(
observations=self.env_spec.observation_space.flatten_n(running_paths[idx]["observations"]),
actions=self.env_spec.action_space.flatten_n(running_paths[idx]["actions"]),
rewards=tensor_utils.stack_tensor_list(running_paths[idx]["rewards"]),
env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["env_infos"]),
agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["agent_infos"]),
))
n_samples += len(running_paths[idx]["rewards"])
running_paths[idx] = None
process_time += time.time() - t
pbar.inc(len(obses))
obses = next_obses
pbar.stop()
logger.record_tabular("PolicyExecTime", policy_time)
logger.record_tabular("EnvExecTime", env_time)
logger.record_tabular("ProcessExecTime", process_time)
if not return_dict:
flatten_list = lambda l: [item for sublist in l for item in sublist]
paths = flatten_list(paths.values())
#path_keys = flatten_list([[key]*len(paths[key]) for key in paths.keys()])
return paths
"""
```
#### File: Jerryxiaoyu/maml_rl_v2/ssh_upload.py
```python
import paramiko
import datetime
import os
def upload(local_dir, remote_dir, hostname, port, username, pkey):
try:
t = paramiko.Transport((hostname, port))
t.connect(username=username, pkey=pkey)
sftp = paramiko.SFTPClient.from_transport(t)
print('upload file start %s ' % datetime.datetime.now())
for root, dirs, files in os.walk(local_dir):
print('[%s][%s][%s]' % (root, dirs, files))
for filespath in files:
local_file = os.path.join(root, filespath)
print(11, '[%s][%s][%s][%s]' % (root, filespath, local_file, local_dir))
a = local_file.replace(local_dir, '').replace('\\', '/').lstrip('/')
print('01', a, '[%s]' % remote_dir)
remote_file = os.path.join(remote_dir, a).replace('\\', '/')
print(22, remote_file)
try:
sftp.put(local_file, remote_file)
except Exception as e:
sftp.mkdir(os.path.split(remote_file)[0])
sftp.put(local_file, remote_file)
print("66 upload %s to remote %s" % (local_file, remote_file))
for name in dirs:
local_path = os.path.join(root, name)
print(0, local_path, local_dir)
a = local_path.replace(local_dir, '').replace('\\', '/').lstrip('/')
print(1, a)
print(1, remote_dir)
# remote_path = os.path.join(remote_dir, a).replace('\\', '/')
remote_path = remote_dir + a
print(33, remote_path)
try:
sftp.mkdir(remote_path)
print(44, "mkdir path %s" % remote_path)
except Exception as e:
print(55, e)
print('77,upload file success %s ' % datetime.datetime.now())
t.close()
except Exception as e:
print(88, e)
if __name__ == '__main__':
# 选择上传到那个服务器
servers_list =[0]
hostname = ['fdf8:f53e:61e4::18' ]
username = ['drl']
port = [22]
for num in servers_list:
key_path = '/home/ubuntu/.ssh/id_rsa_dl'
key = paramiko.RSAKey.from_private_key_file(key_path)
dir_list = ['Cellrobot-trpo-mamldirec-500-EXP1']
#dir_list = ['Sep_08GA_CPG_Exp10']
for exp_group_dir in dir_list:
local_dir = '/home/ubuntu/jerry/projects/maml_rl_v2/data/local/'+exp_group_dir+'/'
remote_dir = '/home/drl/PycharmProjects/maml_rl-master/data/AWS_data/'+exp_group_dir+'/'
print('Uploading server No.{}...'.format(num))
upload(local_dir, remote_dir, hostname=hostname[num], port=port[num], username=username[num],
pkey=key)
```
|
{
"source": "Jerryxiaoyu/my_baselines",
"score": 3
}
|
#### File: CPG_controllers/neutrons/Matsuoka.py
```python
from math import pi, cos, sin
class matsuoka_oscillator(object):
def __init__(self, kf=1, dt=0.01):
# Set up the oscillator constants
self.tau = 0.2800
self.tau_prime = 0.4977
self.beta = 2.5000
self.w_0 = 2.2829
self.u_e = 0.4111
self.m1 = 1.0
self.m2 = 1.0
self.a = 1.0
# Modify the time constants based on kf
self.tau *= kf
self.tau_prime *= kf
# Step time
self.dt = dt
def oscillator_fun(self, u1, u2, v1, v2, y1, y2, f1, f2, s1, s2, bias, gain ):
"""
Calculates the state variables in the next time step
"""
d_u1_dt = (-u1 - self.w_0 *y2 -self.beta * v1 + self.u_e + f1 + self.a * s1) / self.tau
d_v1_dt = (-v1 + y1) / self.tau_prime
y1 = max([0.0, u1])
d_u2_dt = (-u2 - self.w_0 * y1 - self.beta * v2 + self.u_e + f2 + self.a * s2) / self.tau
d_v2_dt = (-v2 + y2) / self.tau_prime
y2 = max([0.0, u2])
u1 += d_u1_dt * self.dt
u2 += d_u2_dt * self.dt
v1 += d_v1_dt * self.dt
v2 += d_v2_dt * self.dt
o = bias + gain * (-self.m1 * y1 + self.m2 * y2)
return u1, u2, v1, v2, y1, y2, o
class CPG_MatsuokaNeutron(object):
def __init__(self, id, master_nuron, param ,dt=0.01, kf=1, w_ms = 1):
self.id = id
self.parm = {'kf': kf, 'u1':param[0], 'u2':param[1], 'v1':param[2], 'v2':param[3],
'y1':param[4], 'y2':param[5], 'o':param[6], 'gain':param[7], 'bias':param[8]}
self.w_ms = w_ms
osillator = matsuoka_oscillator(self.parm['kf'], dt=dt)
self.osillator_fun = osillator.oscillator_fun
self.master_nuron = master_nuron
def next_output(self, f1, f2 ):
if self.master_nuron is not None:
s1 = self.w_ms * self.master_nuron.parm['u1']
s2 = self.w_ms * self.master_nuron.parm['u2']
else:
s1 = 0
s2 = 0
self.parm['u1'],self.parm['u2'], self.parm['v1'], self.parm['v2'], self.parm['y1'], self.parm['y2'], self.parm['o'] = \
self.osillator_fun(self.parm['u1'],self.parm['u2'], self.parm['v1'], self.parm['v2'], self.parm['y1'], self.parm['y2'],
f1, f2, s1, s2, self.parm['bias'], self.parm['gain'] )
```
#### File: my_envs/mujoco/my_ant.py
```python
import numpy as np
from gym import utils
from my_envs.mujoco import mujoco_env
import os
reward_choice = os.getenv('REWARD_CHOICE')
class MyAntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.Calc_Reward = self.reward_fun1
#mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 1)
mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 1)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_vel = (xposafter - xposbefore) / self.dt
v_commdand = 0
reward, other_rewards = self.Calc_Reward(forward_vel, v_commdand, a, None)
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
velocity_base=forward_vel,
commands=v_commdand,
rewards=other_rewards,
obs=ob)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
def reset_model(self,command = None, reward_fun_choice = None):
global reward_choice
if reward_choice is None:
print('REWARD_CHOICE is not specified!')
reward_fun_choice_env = 1
else:
reward_fun_choice_env = int(reward_choice)
if reward_fun_choice is None:
reward_fun_choice = reward_fun_choice_env
if reward_fun_choice == 1:
self.Calc_Reward = self.reward_fun1
elif reward_fun_choice == 2:
self.Calc_Reward = self.reward_fun2
elif reward_fun_choice == 3:
self.Calc_Reward = self.reward_fun3
elif reward_fun_choice == 4:
self.Calc_Reward = self.reward_fun4
elif reward_fun_choice == 5:
self.Calc_Reward = self.reward_fun5
elif reward_fun_choice == 6:
self.Calc_Reward = self.reward_fun6
elif reward_fun_choice == 7:
self.Calc_Reward = self.reward_fun7
elif reward_fun_choice == 8:
self.Calc_Reward = self.reward_fun8
elif reward_fun_choice is None:
self.Calc_Reward = self.reward_fun1
reward_fun_choice = 1
else:
raise Exception('reward fun error!')
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def reward_fun1(self, forward_vel, v_commdand, action, obs):
forward_reward = forward_vel
ctrl_cost = .5 * np.square(action).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
other_rewards = np.array([reward, forward_reward, -ctrl_cost, -contact_cost, survive_reward])
return reward, other_rewards
def reward_fun2(self, forward_vel, v_commdand, action, obs):
# print('reward2')
forward_reward = forward_vel
ctrl_cost = .5 * np.square(action).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
other_rewards = np.array([reward, forward_reward, -ctrl_cost, -contact_cost, survive_reward])
return reward, other_rewards
```
#### File: my_baselines/test/test_fir.py
```python
import numpy as np
from scipy.signal import kaiserord, lfilter, firwin, freqz, lfilter_zi
class fir_filter(object):
def __init__(self, fs, cutoff, ripple_db):
self.fs = fs # sample_rate
# The Nyquist rate of the signal.
nyq_rate = self.fs / 2.0
# The desired width of the transition from pass to stop,
# relative to the Nyquist rate. We'll design the filter
# with a 5 Hz transition width.
width = 5.0 / nyq_rate
# The desired attenuation in the stop band, in dB.
self.ripple_db = 10
# Compute the order and Kaiser parameter for the FIR filter.
N, beta = kaiserord(ripple_db, width)
#print('N = ', N)
# The cutoff frequency of the filter.
self.cutoff = cutoff
# Use firwin with a Kaiser window to create a lowpass FIR filter.
self.taps = firwin(N, self.cutoff / nyq_rate, window=('kaiser', beta))
self.N = N
self.x_buffer = []
self.index = 0
self.zi = lfilter_zi(self.taps, 1.0)
self.init_done = False
def apply(self, x):
self.x_buffer.append(x)
if not self.init_done:
if self.index < self.N-1:
filtered_x = x
self.index += 1
else:
self.init_done = True
self.index =0
if self.init_done:
y = lfilter(self.taps, 1.0, np.array(self.x_buffer))
filtered_x = y[-1]
#
#
# if self.index < self.N - 1:
# filtered_x = x
# else:
# self.init_done = True
# # Use lfilter to filter x with the FIR filter.
# #y = lfilter(self.taps, 1.0, np.array(self.x_buffer))
# y, _ = lfilter(self.taps, 1.0, np.array(self.x_buffer), zi=self.zi * self.x_buffer[0])
# filtered_x = y[-1]
#
# if len(self.x_buffer) > self.N:
# del self.x_buffer[0]
# self.index += 1
return filtered_x
def reset(self):
self.x_buffer = []
self.index = 0
self.init_done = False
from utils.Logger import IO
from matplotlib.pylab import plt
rlt = IO('RewardDuty/fitness5_param.pkl').read_pickle()
(rewards, commands, v_e) = rlt
x = rewards[:,0]
T = 1000
fir = fir_filter(100,10,10)
filter_x_list = []
for i in range(T):
state = x[i]
state_e = fir.apply(state)
filter_x_list.append(state_e)
filter_x_list = np.array(filter_x_list)
N = fir.N
t = np.arange(0,10,0.01)
plt.plot(t, x[:T], 'b--')
#plot(y)
plt.plot(t, filter_x_list, 'r')
#plt.plot(t[N-1:] , filter_x_list[N-1:], 'g', linewidth=4)
plt.show()
```
#### File: my_baselines/utils/instrument.py
```python
import os
import re
import subprocess
import base64
import os.path as osp
import pickle as pickle
import inspect
import hashlib
import sys
from contextlib import contextmanager
import errno
#from io import StringIO # python 3
#from StringIO import StringIO # python 2
import datetime
import dateutil.tz
import json
import time
import numpy as np
import collections
class IO:
def __init__(self, file_name):
self.file_name = file_name
def to_pickle(self, obj):
with open(self.file_name, 'wb') as output:
pickle.dump(obj, output, protocol=pickle.HIGHEST_PROTOCOL)
def read_pickle(self):
with open(self.file_name, 'rb') as input_:
obj = pickle.load(input_)
return obj
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, "__getitem__", args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if item.startswith("__") and item.endswith("__"):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, "__pow__", [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, "__add__", [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, "__rmul__", [other], dict())
def __div__(self, other):
return StubMethodCall(self, "__div__", [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), "rdiv", [self, other], dict()) # self, "__rdiv__", [other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, "__rpow__", [power, modulo], dict())
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__["_obj"] = obj
self.__dict__["_attr_name"] = attr_name
@property
def obj(self):
return self.__dict__["_obj"]
@property
def attr_name(self):
return self.__dict__["_attr_name"]
def __str__(self):
return "StubAttr(%s, %s)" % (str(self.obj), str(self.attr_name))
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if len(args) > 0:
# Convert the positional arguments to keyword arguments
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return "StubClass(%s)" % self.proxy_class
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict["args"]
self.kwargs = dict["kwargs"]
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
# why doesnt the commented code work?
# return StubAttr(self, item)
# checks bypassed to allow for accesing instance fileds
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))
def __str__(self):
return "StubObject(%s, *%s, **%s)" % (str(self.proxy_class), str(self.args), str(self.kwargs))
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator(object):
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get("hide", False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [x[1].__get__(self, self.__class__)
for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, "__variant_config", dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get("hide", False):
suffix.append(k + "_" + str(variant[k]))
return "_".join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, "__call__"):
args = inspect.getargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, "__self__"):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if len(v) == 0]
if len(free_nodes) == 0:
error_msg = "Invalid parameter dependency: \n"
for k, v in dependencies:
if len(v) > 0:
error_msg += k + " depends on " + " & ".join(v) + "\n"
raise ValueError(error_msg)
dependencies = [(k, v)
for k, v in dependencies if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if len(sorted_keys) == 0:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, "__call__"):
last_val_keys = inspect.getargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, "__call__"):
last_variants = last_vals(
**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def stub(glbs):
# replace the __init__ method in all classes
# hacky!!!
for k, v in list(glbs.items()):
# look at all variables that are instances of a class (not yet Stub)
if isinstance(v, type) and v != StubClass:
glbs[k] = StubClass(v) # and replaces them by a the same but Stub
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
remote_confirmed = False
```
|
{
"source": "jerry-xiazj/EfficientDet",
"score": 2
}
|
#### File: jerry-xiazj/EfficientDet/config.py
```python
from easydict import EasyDict as edict
CFG = edict()
# model name.
CFG.name = 'efficientdet-d1'
# activation type: see activation_fn in utils.py.
CFG.act_type = 'swish'
# input preprocessing parameters
CFG.image_size = 640 # An integer or a string WxH such as 640x320.
CFG.input_rand_hflip = True
CFG.train_scale_min = 0.1
CFG.train_scale_max = 2.0
CFG.autoaugment_policy = None
# dataset specific parameters
CFG.num_classes = 90
CFG.skip_crowd_during_training = True
CFG.label_id_mapping = None
# model architecture
CFG.min_level = 3
CFG.max_level = 7
CFG.num_scales = 3
CFG.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
CFG.anchor_scale = 4.0
# is batchnorm training mode
CFG.is_training_bn = True
# optimization
CFG.momentum = 0.9
CFG.learning_rate = 0.08
CFG.lr_warmup_init = 0.008
CFG.lr_warmup_epoch = 1.0
CFG.first_lr_drop_epoch = 200.0
CFG.second_lr_drop_epoch = 250.0
CFG.poly_lr_power = 0.9
CFG.clip_gradients_norm = 10.0
CFG.num_epochs = 300
# classification loss
CFG.alpha = 0.25
CFG.gamma = 1.5
# localization loss
CFG.delta = 0.1
CFG.box_loss_weight = 50.0
# regularization l2 loss.
CFG.weight_decay = 4e-5
# enable bfloat
CFG.use_tpu = True
# precision: one of 'float32', 'mixed_float16', 'mixed_bfloat16'.
CFG.precision = None # If None, use float32.
# For detection.
CFG.box_class_repeats = 3
CFG.fpn_cell_repeats = 3
CFG.fpn_num_filters = 88
CFG.separable_conv = True
CFG.apply_bn_for_resampling = True
CFG.conv_after_downsample = False
CFG.conv_bn_act_pattern = False
CFG.use_native_resize_op = False
CFG.pooling_type = None
# version.
CFG.fpn_name = None
CFG.fpn_config = None
# No stochastic depth in default.
CFG.survival_prob = None
CFG.lr_decay_method = 'cosine'
CFG.moving_average_decay = 0.9998
CFG.ckpt_var_scope = None # ckpt variable scope.
# exclude vars when loading pretrained ckpts.
CFG.var_exclude_expr = '.*/class-predict/.*' # exclude class weights in default
CFG.backbone_name = 'efficientnet-b1'
CFG.backbone_config = None
CFG.var_freeze_expr = None
# RetinaNet.
CFG.resnet_depth = 50
# def get_efficientdet_config(model_name='efficientdet-d1'):
# """Get the default config for EfficientDet based on model name."""
# h = default_detection_configs()
# CFG.override(efficientdet_model_param_dict[model_name])
# return h
# def get_detection_config(model_name):
# if model_name.startswith('efficientdet'):
# return get_efficientdet_config(model_name)
# else:
# raise ValueError('model name must start with efficientdet.')
```
#### File: EfficientDet/core/utils.py
```python
import tensorflow as tf
from typing import Text, Tuple, Union
import math
import numpy as np
def round_filters(filters, global_params):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob + tf.random.uniform([batch_size, 1, 1, 1],
dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.divide(inputs, survival_prob) * binary_tensor
return output
def activation_fn(features: tf.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type == 'swish':
return tf.nn.swish(features)
elif act_type == 'swish_native':
return features * tf.sigmoid(features)
elif act_type == 'relu':
return tf.nn.relu(features)
elif act_type == 'relu6':
return tf.nn.relu6(features)
else:
raise ValueError('Unsupported act_type {}'.format(act_type))
def batch_norm_act(inputs,
is_training_bn: bool,
act_type: Union[Text, None],
init_zero: bool = False,
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = None):
"""Performs a batch normalization followed by a non-linear activation.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training_bn: `bool` for whether the model is training.
act_type: non-linear relu function type. If None, omits the relu operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
inputs = tf.keras.layers.BatchNormalization(
inputs=inputs,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
training=is_training_bn,
gamma_initializer=gamma_initializer,
name=name)
if act_type:
inputs = activation_fn(inputs, act_type)
return inputs
def get_feat_sizes(image_size: Tuple[int, int],
max_level: int):
"""Get feat widths and heights for all levels.
Args:
image_size: A tuple (H, W).
max_level: maximum feature level.
Returns:
feat_sizes: a list of tuples (height, width) for each level.
"""
feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]
feat_size = image_size
for _ in range(1, max_level + 1):
feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)
feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})
return feat_sizes
```
#### File: jerry-xiazj/EfficientDet/efficientNet_builder.py
```python
from core import EfficientNet
import tensorflow as tf
_DEFAULT_BLOCKS_ARGS = [
EfficientNet.BlockArgs(
kernel_size=2, num_repeat=1, input_filters=32, output_filters=16,
expand_ratio=1, strides=[1, 1], se_ratio=0.25),
EfficientNet.BlockArgs(
kernel_size=3, num_repeat=2, input_filters=16, output_filters=24,
expand_ratio=6, strides=[2, 2], se_ratio=0.25),
EfficientNet.BlockArgs(
kernel_size=5, num_repeat=2, input_filters=24, output_filters=40,
expand_ratio=6, strides=[2, 2], se_ratio=0.25),
EfficientNet.BlockArgs(
kernel_size=3, num_repeat=3, input_filters=40, output_filters=80,
expand_ratio=6, strides=[2, 2], se_ratio=0.25),
EfficientNet.BlockArgs(
kernel_size=5, num_repeat=3, input_filters=80, output_filters=112,
expand_ratio=6, strides=[1, 1], se_ratio=0.25),
EfficientNet.BlockArgs(
kernel_size=5, num_repeat=4, input_filters=112, output_filters=192,
expand_ratio=6, strides=[2, 2], se_ratio=0.25),
EfficientNet.BlockArgs(
kernel_size=3, num_repeat=1, input_filters=192, output_filters=320,
expand_ratio=6, strides=[1, 1], se_ratio=0.25)
]
params_dict = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
def build_model(images, name, training, features_only=False, pooled_features_only=False):
assert name in params_dict
(width_coefficient, depth_coefficient, resolution, dropout_rate) = params_dict[name]
global_params = EfficientNet.GlobalParams(
blocks_args=_DEFAULT_BLOCKS_ARGS,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
survival_prob=0.8,
num_classes=1000,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
relu_fn=tf.nn.swish,
batch_norm=tf.keras.layers.BatchNormalization,
use_se=True
)
model = EfficientNet.Model(_DEFAULT_BLOCKS_ARGS, global_params, name)
outputs = model(
images,
training=training,
features_only=features_only,
pooled_features_only=pooled_features_only
)
if features_only:
outputs = tf.identity(outputs, 'features')
elif pooled_features_only:
outputs = tf.identity(outputs, 'pooled_features')
else:
outputs = tf.identity(outputs, 'logits')
return outputs, model.endpoints
```
|
{
"source": "jerryxu4j/odoo-docker-build",
"score": 2
}
|
#### File: migrations/13.0.4.0.0/post-migration.py
```python
from odoo import SUPERUSER_ID, api
from odoo.tools.sql import column_exists
def migrate(cr, version=None):
env = api.Environment(cr, SUPERUSER_ID, {})
if column_exists(cr, "product_template", "purchase_request"):
_migrate_purchase_request_to_property(env)
def _migrate_purchase_request_to_property(env):
"""Create properties for all products with the flag set on all companies"""
env.cr.execute("select id, coalesce(purchase_request, False) from product_template")
values = dict(env.cr.fetchall())
for company in env["res.company"].with_context(active_test=False).search([]):
env["ir.property"].with_context(force_company=company.id).set_multi(
"purchase_request", "product.template", values, False,
)
env.cr.execute("alter table product_template drop column purchase_request")
```
#### File: purchase_request/models/purchase_order.py
```python
from odoo import _, api, exceptions, fields, models
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
def _purchase_request_confirm_message_content(self, request, request_dict=None):
self.ensure_one()
if not request_dict:
request_dict = {}
title = _("Order confirmation %s for your Request %s") % (
self.name,
request.name,
)
message = "<h3>%s</h3><ul>" % title
message += _(
"The following requested items from Purchase Request %s "
"have now been confirmed in Purchase Order %s:"
) % (request.name, self.name)
for line in request_dict.values():
message += _(
"<li><b>%s</b>: Ordered quantity %s %s, Planned date %s</li>"
) % (
line["name"],
line["product_qty"],
line["product_uom"],
line["date_planned"],
)
message += "</ul>"
return message
def _purchase_request_confirm_message(self):
request_obj = self.env["purchase.request"]
for po in self:
requests_dict = {}
for line in po.order_line:
for request_line in line.sudo().purchase_request_lines:
request_id = request_line.request_id.id
if request_id not in requests_dict:
requests_dict[request_id] = {}
date_planned = "%s" % line.date_planned
data = {
"name": request_line.name,
"product_qty": line.product_qty,
"product_uom": line.product_uom.name,
"date_planned": date_planned,
}
requests_dict[request_id][request_line.id] = data
for request_id in requests_dict:
request = request_obj.sudo().browse(request_id)
message = po._purchase_request_confirm_message_content(
request, requests_dict[request_id]
)
if message is not None: # override preparation method to avoid email
request.message_post(body=message, subtype="mail.mt_comment")
return True
def _purchase_request_line_check(self):
for po in self:
for line in po.order_line:
for request_line in line.purchase_request_lines:
if request_line.sudo().purchase_state == "done":
raise exceptions.UserError(
_("Purchase Request %s has already been completed")
% request_line.request_id.name
)
return True
def button_confirm(self):
self._purchase_request_line_check()
res = super(PurchaseOrder, self).button_confirm()
self._purchase_request_confirm_message()
return res
def unlink(self):
alloc_to_unlink = self.env["purchase.request.allocation"]
for rec in self:
for alloc in (
rec.order_line.mapped("purchase_request_lines")
.mapped("purchase_request_allocation_ids")
.filtered(lambda alloc: alloc.purchase_line_id.order_id.id == rec.id)
):
alloc_to_unlink += alloc
res = super().unlink()
alloc_to_unlink.unlink()
return res
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
purchase_request_lines = fields.Many2many(
comodel_name="purchase.request.line",
relation="purchase_request_purchase_order_line_rel",
column1="purchase_order_line_id",
column2="purchase_request_line_id",
string="Purchase Request Lines",
readonly=True,
copy=False,
)
purchase_request_allocation_ids = fields.One2many(
comodel_name="purchase.request.allocation",
inverse_name="purchase_line_id",
string="Purchase Request Allocation",
copy=False,
)
def action_openRequestLineTreeView(self):
"""
:return dict: dictionary value for created view
"""
request_line_ids = []
for line in self:
request_line_ids += line.purchase_request_lines.ids
domain = [("id", "in", request_line_ids)]
return {
"name": _("Purchase Request Lines"),
"type": "ir.actions.act_window",
"res_model": "purchase.request.line",
"view_mode": "tree,form",
"domain": domain,
}
def _prepare_stock_moves(self, picking):
self.ensure_one()
val = super(PurchaseOrderLine, self)._prepare_stock_moves(picking)
all_list = []
for v in val:
all_ids = self.env["purchase.request.allocation"].search(
[("purchase_line_id", "=", v["purchase_line_id"])]
)
for all_id in all_ids:
all_list.append((4, all_id.id))
v["purchase_request_allocation_ids"] = all_list
return val
def update_service_allocations(self, prev_qty_received):
for rec in self:
allocation = self.env["purchase.request.allocation"].search(
[
("purchase_line_id", "=", rec.id),
("purchase_line_id.product_id.type", "=", "service"),
]
)
if not allocation:
return
qty_left = rec.qty_received - prev_qty_received
for alloc in allocation:
allocated_product_qty = alloc.allocated_product_qty
if not qty_left:
alloc.purchase_request_line_id._compute_qty()
break
if alloc.open_product_qty <= qty_left:
allocated_product_qty += alloc.open_product_qty
qty_left -= alloc.open_product_qty
alloc._notify_allocation(alloc.open_product_qty)
else:
allocated_product_qty += qty_left
alloc._notify_allocation(qty_left)
qty_left = 0
alloc.write({"allocated_product_qty": allocated_product_qty})
message_data = self._prepare_request_message_data(
alloc, alloc.purchase_request_line_id, allocated_product_qty
)
message = self._purchase_request_confirm_done_message_content(
message_data
)
if message is not None: # override preparation method to avoid email
alloc.purchase_request_line_id.request_id.message_post(
body=message, subtype="mail.mt_comment"
)
alloc.purchase_request_line_id._compute_qty()
return True
@api.model
def _purchase_request_confirm_done_message_content(self, message_data):
title = _("Service confirmation for Request %s") % (
message_data["request_name"]
)
message = "<h3>%s</h3>" % title
message += _(
"The following requested services from Purchase"
" Request %s requested by %s "
"have now been received:"
) % (message_data["request_name"], message_data["requestor"])
message += "<ul>"
message += _("<li><b>%s</b>: Received quantity %s %s</li>") % (
message_data["product_name"],
message_data["product_qty"],
message_data["product_uom"],
)
message += "</ul>"
return message
def _prepare_request_message_data(self, alloc, request_line, allocated_qty):
return {
"request_name": request_line.request_id.name,
"product_name": request_line.product_id.name_get()[0][1],
"product_qty": allocated_qty,
"product_uom": alloc.product_uom_id.name,
"requestor": request_line.request_id.requested_by.partner_id.name,
}
def write(self, vals):
# As services do not generate stock move this tweak is required
# to allocate them.
prev_qty_received = {}
if vals.get("qty_received", False):
service_lines = self.filtered(lambda l: l.product_id.type == "service")
for line in service_lines:
prev_qty_received[line.id] = line.qty_received
res = super(PurchaseOrderLine, self).write(vals)
if prev_qty_received:
for line in service_lines:
line.update_service_allocations(prev_qty_received[line.id])
return res
```
#### File: purchase_request/tests/test_purchase_request_allocation.py
```python
from odoo.tests import common
from odoo.tools import SUPERUSER_ID
class TestPurchaseRequestToRfq(common.TransactionCase):
def setUp(self):
super(TestPurchaseRequestToRfq, self).setUp()
self.purchase_request = self.env["purchase.request"]
self.purchase_request_line = self.env["purchase.request.line"]
self.wiz = self.env["purchase.request.line.make.purchase.order"]
self.purchase_order = self.env["purchase.order"]
vendor = self.env["res.partner"].create({"name": "Partner #2"})
self.service_product = self.env["product.product"].create(
{"name": "Product Service Test", "type": "service"}
)
self.product_product = self.env["product.product"].create(
{
"name": "Product Product Test",
"type": "product",
"description_purchase": "Test Description",
}
)
self.env["product.supplierinfo"].create(
{
"name": vendor.id,
"product_tmpl_id": self.service_product.product_tmpl_id.id,
}
)
self.env["product.supplierinfo"].create(
{
"name": vendor.id,
"product_tmpl_id": self.product_product.product_tmpl_id.id,
}
)
def test_purchase_request_allocation(self):
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
}
purchase_request1 = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request1.id,
"product_id": self.product_product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 2.0,
}
purchase_request_line1 = self.purchase_request_line.create(vals)
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
}
purchase_request2 = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request1.id,
"product_id": self.product_product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 2.0,
}
purchase_request_line2 = self.purchase_request_line.create(vals)
purchase_request1.button_approved()
purchase_request2.button_approved()
purchase_request1.action_view_purchase_request_line()
vals = {"supplier_id": self.env.ref("base.res_partner_1").id}
wiz_id = self.wiz.with_context(
active_model="purchase.request.line",
active_ids=[purchase_request_line1.id, purchase_request_line2.id],
).create(vals)
wiz_id.make_purchase_order()
purchase_request1.action_view_purchase_order()
po_line = purchase_request_line1.purchase_lines[0]
purchase = po_line.order_id
purchase.order_line.action_openRequestLineTreeView()
purchase.button_confirm()
purchase_request1.action_view_stock_move()
self.assertEqual(purchase_request_line1.qty_in_progress, 2.0)
self.assertEqual(purchase_request_line2.qty_in_progress, 2.0)
picking = purchase.picking_ids[0]
picking.move_line_ids[0].write({"qty_done": 2.0})
backorder_wiz_id = picking.button_validate()["res_id"]
backorder_wiz = self.env["stock.backorder.confirmation"].browse(
[backorder_wiz_id]
)
backorder_wiz.process()
self.assertEqual(purchase_request_line1.qty_done, 2.0)
self.assertEqual(purchase_request_line2.qty_done, 0.0)
backorder_picking = purchase.picking_ids.filtered(lambda p: p.id != picking.id)
backorder_picking.move_line_ids[0].write({"qty_done": 1.0})
backorder_wiz_id2 = backorder_picking.button_validate()["res_id"]
backorder_wiz2 = self.env["stock.backorder.confirmation"].browse(
[backorder_wiz_id2]
)
backorder_wiz2.process()
self.assertEqual(purchase_request_line1.qty_done, 2.0)
self.assertEqual(purchase_request_line2.qty_done, 1.0)
for pick in purchase.picking_ids:
if pick.state == "assigned":
pick.action_cancel()
self.assertEqual(purchase_request_line1.qty_cancelled, 0.0)
self.assertEqual(purchase_request_line2.qty_cancelled, 1.0)
self.assertEqual(purchase_request_line1.pending_qty_to_receive, 0.0)
self.assertEqual(purchase_request_line2.pending_qty_to_receive, 1.0)
def test_purchase_request_allocation_services(self):
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
"assigned_to": SUPERUSER_ID,
}
purchase_request1 = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request1.id,
"product_id": self.service_product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 2.0,
}
purchase_request_line1 = self.purchase_request_line.create(vals)
vals = {"supplier_id": self.env.ref("base.res_partner_1").id}
purchase_request1.button_approved()
purchase_request1.action_view_purchase_request_line()
wiz_id = self.wiz.with_context(
active_model="purchase.request.line", active_ids=[purchase_request_line1.id]
).create(vals)
wiz_id.make_purchase_order()
purchase_request1.action_view_purchase_order()
po_line = purchase_request_line1.purchase_lines[0]
purchase = po_line.order_id
purchase.button_confirm()
self.assertEqual(purchase_request_line1.qty_in_progress, 2.0)
purchase_request1.action_view_stock_move()
# manually set in the PO line
po_line.write({"qty_received": 0.5})
self.assertEqual(purchase_request_line1.qty_done, 0.5)
purchase.button_cancel()
self.assertEqual(purchase_request_line1.qty_cancelled, 1.5)
self.assertEqual(purchase_request_line1.pending_qty_to_receive, 1.5)
# Case revieve 2 product
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
"assigned_to": SUPERUSER_ID,
}
purchase_request2 = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request2.id,
"product_id": self.service_product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 2.0,
}
purchase_request_line2 = self.purchase_request_line.create(vals)
vals = {"supplier_id": self.env.ref("base.res_partner_1").id}
purchase_request2.button_approved()
purchase_request2.action_view_purchase_request_line()
wiz_id = self.wiz.with_context(
active_model="purchase.request.line", active_ids=[purchase_request_line2.id]
).create(vals)
wiz_id.make_purchase_order()
purchase_request2.action_view_purchase_order()
po_line = purchase_request_line2.purchase_lines[0]
purchase2 = po_line.order_id
purchase2.button_confirm()
self.assertEqual(purchase_request_line2.qty_in_progress, 2.0)
purchase_request1.action_view_stock_move()
# manually set in the PO line
po_line.write({"qty_received": 2.0})
self.assertEqual(purchase_request_line2.qty_done, 2.0)
def test_purchase_request_allocation_min_qty(self):
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
}
purchase_request1 = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request1.id,
"product_id": self.product_product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 2.0,
}
purchase_request_line1 = self.purchase_request_line.create(vals)
# add a vendor
vendor1 = self.env.ref("base.res_partner_1")
self.env["product.supplierinfo"].create(
{
"name": vendor1.id,
"product_tmpl_id": self.product_product.product_tmpl_id.id,
"min_qty": 8,
}
)
vals = {"supplier_id": self.env.ref("base.res_partner_1").id}
purchase_request1.button_approved()
wiz_id = self.wiz.with_context(
active_model="purchase.request.line", active_ids=[purchase_request_line1.id]
).create(vals)
wiz_id.make_purchase_order()
self.assertEqual(
purchase_request_line1.purchase_request_allocation_ids[0].open_product_qty,
2.0,
)
def test_purchase_request_stock_allocation(self):
product = self.env.ref("product.product_product_6")
product.uom_po_id = self.env.ref("uom.product_uom_dozen")
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
}
purchase_request = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request.id,
"product_id": product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 12.0,
}
purchase_request_line1 = self.purchase_request_line.create(vals)
vals = {
"request_id": purchase_request.id,
"product_id": product.id,
"product_uom_id": self.env.ref("uom.product_uom_dozen").id,
"product_qty": 1,
}
purchase_request_line2 = self.purchase_request_line.create(vals)
vals = {"supplier_id": self.env.ref("base.res_partner_1").id}
purchase_request.button_approved()
wiz_id = self.wiz.with_context(
active_model="purchase.request.line",
active_ids=[purchase_request_line1.id, purchase_request_line2.id],
).create(vals)
# Create PO
wiz_id.make_purchase_order()
po_line = purchase_request_line1.purchase_lines[0]
self.assertEquals(po_line.product_qty, 2, "Quantity should be 2")
self.assertEquals(
po_line.product_uom,
self.env.ref("uom.product_uom_dozen"),
"The purchase UoM should be Dozen(s).",
)
self.assertEquals(
purchase_request_line1.purchase_request_allocation_ids[
0
].requested_product_uom_qty,
12.0,
)
self.assertEquals(
purchase_request_line2.purchase_request_allocation_ids[
0
].requested_product_uom_qty,
1.0,
)
purchase = po_line.order_id
# Cancel PO allocation requested quantity is set to 0.
purchase.button_cancel()
self.assertEquals(
purchase_request_line1.purchase_request_allocation_ids[0].open_product_qty,
0,
)
self.assertEquals(
purchase_request_line2.purchase_request_allocation_ids[0].open_product_qty,
0,
)
# Set to draft allocation requested quantity is set
purchase.button_draft()
self.assertEquals(
purchase_request_line1.purchase_request_allocation_ids[0].open_product_qty,
12.0,
)
self.assertEquals(
purchase_request_line2.purchase_request_allocation_ids[0].open_product_qty,
1.0,
)
purchase.button_confirm()
picking = purchase.picking_ids[0]
picking.move_line_ids[0].write({"qty_done": 24.0})
picking.button_validate()
self.assertEquals(
purchase_request_line1.purchase_request_allocation_ids[
0
].allocated_product_qty,
purchase_request_line1.purchase_request_allocation_ids[
0
].requested_product_uom_qty,
)
self.assertEquals(
purchase_request_line2.purchase_request_allocation_ids[
0
].allocated_product_qty,
purchase_request_line2.purchase_request_allocation_ids[
0
].requested_product_uom_qty,
)
def test_purchase_request_stock_allocation_unlink(self):
product = self.env.ref("product.product_product_6")
product.uom_po_id = self.env.ref("uom.product_uom_dozen")
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
}
purchase_request = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request.id,
"product_id": product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 12.0,
}
purchase_request_line1 = self.purchase_request_line.create(vals)
vals = {"supplier_id": self.env.ref("base.res_partner_1").id}
purchase_request.button_approved()
wiz_id = self.wiz.with_context(
active_model="purchase.request.line", active_ids=[purchase_request_line1.id]
).create(vals)
# Create PO
wiz_id.make_purchase_order()
po_line = purchase_request_line1.purchase_lines[0]
self.assertEquals(
purchase_request_line1.purchase_request_allocation_ids[
0
].requested_product_uom_qty,
12.0,
)
purchase = po_line.order_id
purchase.button_cancel()
# Delete PO: allocation and Purchase Order Lines are unlinked from PRL
purchase.unlink()
self.assertEquals(len(purchase_request_line1.purchase_lines), 0)
self.assertEquals(
len(purchase_request_line1.purchase_request_allocation_ids), 0
)
def test_onchange_product_id(self):
vals = {
"picking_type_id": self.env.ref("stock.picking_type_in").id,
"requested_by": SUPERUSER_ID,
}
purchase_request1 = self.purchase_request.create(vals)
vals = {
"request_id": purchase_request1.id,
"product_id": self.product_product.id,
"product_uom_id": self.env.ref("uom.product_uom_unit").id,
"product_qty": 2.0,
}
purchase_request_line1 = self.purchase_request_line.create(vals)
purchase_request_line1.onchange_product_id()
def test_empty_records_for_company_constraint(self):
self.assertFalse(self.env["stock.move"]._check_company_purchase_request())
```
#### File: purchase_request/wizard/purchase_request_line_make_purchase_order.py
```python
from datetime import datetime
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class PurchaseRequestLineMakePurchaseOrder(models.TransientModel):
_name = "purchase.request.line.make.purchase.order"
_description = "Purchase Request Line Make Purchase Order"
supplier_id = fields.Many2one(
comodel_name="res.partner",
string="Supplier",
required=True,
domain=[("is_company", "=", True)],
context={"res_partner_search_mode": "supplier", "default_is_company": True},
)
item_ids = fields.One2many(
comodel_name="purchase.request.line.make.purchase.order.item",
inverse_name="wiz_id",
string="Items",
)
purchase_order_id = fields.Many2one(
comodel_name="purchase.order",
string="Purchase Order",
domain=[("state", "=", "draft")],
)
sync_data_planned = fields.Boolean(
string="Merge on PO lines with equal Scheduled Date"
)
@api.model
def _prepare_item(self, line):
return {
"line_id": line.id,
"request_id": line.request_id.id,
"product_id": line.product_id.id,
"name": line.name or line.product_id.name,
"product_qty": line.pending_qty_to_receive,
"product_uom_id": line.product_uom_id.id,
}
@api.model
def _check_valid_request_line(self, request_line_ids):
picking_type = False
company_id = False
for line in self.env["purchase.request.line"].browse(request_line_ids):
if line.request_id.state == "done":
raise UserError(_("The purchase has already been completed."))
if line.request_id.state != "approved":
raise UserError(
_("Purchase Request %s is not approved") % line.request_id.name
)
if line.purchase_state == "done":
raise UserError(_("The purchase has already been completed."))
line_company_id = line.company_id and line.company_id.id or False
if company_id is not False and line_company_id != company_id:
raise UserError(_("You have to select lines from the same company."))
else:
company_id = line_company_id
line_picking_type = line.request_id.picking_type_id or False
if not line_picking_type:
raise UserError(_("You have to enter a Picking Type."))
if picking_type is not False and line_picking_type != picking_type:
raise UserError(
_("You have to select lines from the same Picking Type.")
)
else:
picking_type = line_picking_type
@api.model
def check_group(self, request_lines):
if len(list(set(request_lines.mapped("request_id.group_id")))) > 1:
raise UserError(
_(
"You cannot create a single purchase order from "
"purchase requests that have different procurement group."
)
)
@api.model
def get_items(self, request_line_ids):
request_line_obj = self.env["purchase.request.line"]
items = []
request_lines = request_line_obj.browse(request_line_ids)
self._check_valid_request_line(request_line_ids)
self.check_group(request_lines)
for line in request_lines:
items.append([0, 0, self._prepare_item(line)])
return items
@api.model
def default_get(self, fields):
res = super().default_get(fields)
active_model = self.env.context.get("active_model", False)
request_line_ids = []
if active_model == "purchase.request.line":
request_line_ids += self.env.context.get("active_ids", [])
elif active_model == "purchase.request":
request_ids = self.env.context.get("active_ids", False)
request_line_ids += (
self.env[active_model].browse(request_ids).mapped("line_ids.id")
)
if not request_line_ids:
return res
res["item_ids"] = self.get_items(request_line_ids)
request_lines = self.env["purchase.request.line"].browse(request_line_ids)
supplier_ids = request_lines.mapped("supplier_id").ids
if len(supplier_ids) == 1:
res["supplier_id"] = supplier_ids[0]
return res
@api.model
def _prepare_purchase_order(self, picking_type, group_id, company, origin):
if not self.supplier_id:
raise UserError(_("Enter a supplier."))
supplier = self.supplier_id
data = {
"origin": origin,
"partner_id": self.supplier_id.id,
"fiscal_position_id": supplier.property_account_position_id
and supplier.property_account_position_id.id
or False,
"picking_type_id": picking_type.id,
"company_id": company.id,
"group_id": group_id.id,
}
return data
@api.model
def _get_purchase_line_onchange_fields(self):
return ["product_uom", "price_unit", "name", "taxes_id"]
@api.model
def _execute_purchase_line_onchange(self, vals):
cls = self.env["purchase.order.line"]
onchanges_dict = {
"onchange_product_id": self._get_purchase_line_onchange_fields()
}
for onchange_method, changed_fields in onchanges_dict.items():
if any(f not in vals for f in changed_fields):
obj = cls.new(vals)
getattr(obj, onchange_method)()
for field in changed_fields:
vals[field] = obj._fields[field].convert_to_write(obj[field], obj)
def create_allocation(self, po_line, pr_line, new_qty, alloc_uom):
vals = {
"requested_product_uom_qty": new_qty,
"product_uom_id": alloc_uom.id,
"purchase_request_line_id": pr_line.id,
"purchase_line_id": po_line.id,
}
return self.env["purchase.request.allocation"].create(vals)
@api.model
def _prepare_purchase_order_line(self, po, item):
if not item.product_id:
raise UserError(_("Please select a product for all lines"))
product = item.product_id
# Keep the standard product UOM for purchase order so we should
# convert the product quantity to this UOM
qty = item.product_uom_id._compute_quantity(
item.product_qty, product.uom_po_id or product.uom_id
)
# Suggest the supplier min qty as it's done in Odoo core
min_qty = item.line_id._get_supplier_min_qty(product, po.partner_id)
qty = max(qty, min_qty)
date_required = item.line_id.date_required
vals = {
"name": product.name,
"order_id": po.id,
"product_id": product.id,
"product_uom": product.uom_po_id.id or product.uom_id.id,
"price_unit": 0.0,
"product_qty": qty,
"account_analytic_id": item.line_id.analytic_account_id.id,
"purchase_request_lines": [(4, item.line_id.id)],
"date_planned": datetime(
date_required.year, date_required.month, date_required.day
),
"move_dest_ids": [(4, x.id) for x in item.line_id.move_dest_ids],
}
self._execute_purchase_line_onchange(vals)
return vals
@api.model
def _get_purchase_line_name(self, order, line):
product_lang = line.product_id.with_context(
{"lang": self.supplier_id.lang, "partner_id": self.supplier_id.id}
)
name = product_lang.display_name
if product_lang.description_purchase:
name += "\n" + product_lang.description_purchase
return name
@api.model
def _get_order_line_search_domain(self, order, item):
vals = self._prepare_purchase_order_line(order, item)
name = self._get_purchase_line_name(order, item)
order_line_data = [
("order_id", "=", order.id),
("name", "=", name),
("product_id", "=", item.product_id.id or False),
("product_uom", "=", vals["product_uom"]),
("account_analytic_id", "=", item.line_id.analytic_account_id.id or False),
]
if self.sync_data_planned:
date_required = item.line_id.date_required
order_line_data += [
(
"date_planned",
"=",
datetime(
date_required.year, date_required.month, date_required.day
),
)
]
if not item.product_id:
order_line_data.append(("name", "=", item.name))
return order_line_data
def make_purchase_order(self):
res = []
purchase_obj = self.env["purchase.order"]
po_line_obj = self.env["purchase.order.line"]
pr_line_obj = self.env["purchase.request.line"]
purchase = False
for item in self.item_ids:
line = item.line_id
if item.product_qty <= 0.0:
raise UserError(_("Enter a positive quantity."))
if self.purchase_order_id:
purchase = self.purchase_order_id
if not purchase:
po_data = self._prepare_purchase_order(
line.request_id.picking_type_id,
line.request_id.group_id,
line.company_id,
line.origin,
)
purchase = purchase_obj.create(po_data)
# Look for any other PO line in the selected PO with same
# product and UoM to sum quantities instead of creating a new
# po line
domain = self._get_order_line_search_domain(purchase, item)
available_po_lines = po_line_obj.search(domain)
new_pr_line = True
# If Unit of Measure is not set, update from wizard.
if not line.product_uom_id:
line.product_uom_id = item.product_uom_id
# Allocation UoM has to be the same as PR line UoM
alloc_uom = line.product_uom_id
wizard_uom = item.product_uom_id
if available_po_lines and not item.keep_description:
new_pr_line = False
po_line = available_po_lines[0]
po_line.purchase_request_lines = [(4, line.id)]
po_line.move_dest_ids |= line.move_dest_ids
po_line_product_uom_qty = po_line.product_uom._compute_quantity(
po_line.product_uom_qty, alloc_uom
)
wizard_product_uom_qty = wizard_uom._compute_quantity(
item.product_qty, alloc_uom
)
all_qty = min(po_line_product_uom_qty, wizard_product_uom_qty)
self.create_allocation(po_line, line, all_qty, alloc_uom)
else:
po_line_data = self._prepare_purchase_order_line(purchase, item)
if item.keep_description:
po_line_data["name"] = item.name
po_line = po_line_obj.create(po_line_data)
po_line_product_uom_qty = po_line.product_uom._compute_quantity(
po_line.product_uom_qty, alloc_uom
)
wizard_product_uom_qty = wizard_uom._compute_quantity(
item.product_qty, alloc_uom
)
all_qty = min(po_line_product_uom_qty, wizard_product_uom_qty)
self.create_allocation(po_line, line, all_qty, alloc_uom)
# TODO: Check propagate_uom compatibility:
new_qty = pr_line_obj._calc_new_qty(
line, po_line=po_line, new_pr_line=new_pr_line
)
po_line.product_qty = new_qty
po_line._onchange_quantity()
# The onchange quantity is altering the scheduled date of the PO
# lines. We do not want that:
date_required = item.line_id.date_required
po_line.date_planned = datetime(
date_required.year, date_required.month, date_required.day
)
res.append(purchase.id)
return {
"domain": [("id", "in", res)],
"name": _("RFQ"),
"view_mode": "tree,form",
"res_model": "purchase.order",
"view_id": False,
"context": False,
"type": "ir.actions.act_window",
}
class PurchaseRequestLineMakePurchaseOrderItem(models.TransientModel):
_name = "purchase.request.line.make.purchase.order.item"
_description = "Purchase Request Line Make Purchase Order Item"
wiz_id = fields.Many2one(
comodel_name="purchase.request.line.make.purchase.order",
string="Wizard",
required=True,
ondelete="cascade",
readonly=True,
)
line_id = fields.Many2one(
comodel_name="purchase.request.line", string="Purchase Request Line"
)
request_id = fields.Many2one(
comodel_name="purchase.request",
related="line_id.request_id",
string="Purchase Request",
readonly=False,
)
product_id = fields.Many2one(
comodel_name="product.product",
string="Product",
related="line_id.product_id",
readonly=False,
)
name = fields.Char(string="Description", required=True)
product_qty = fields.Float(
string="Quantity to purchase", digits="Product Unit of Measure"
)
product_uom_id = fields.Many2one(
comodel_name="uom.uom", string="UoM", required=True
)
keep_description = fields.Boolean(
string="Copy descriptions to new PO",
help="Set true if you want to keep the "
"descriptions provided in the "
"wizard in the new PO.",
)
@api.onchange("product_id")
def onchange_product_id(self):
if self.product_id:
name = self.product_id.name
code = self.product_id.code
sup_info_id = self.env["product.supplierinfo"].search(
[
"|",
("product_id", "=", self.product_id.id),
("product_tmpl_id", "=", self.product_id.product_tmpl_id.id),
("name", "=", self.wiz_id.supplier_id.id),
]
)
if sup_info_id:
p_code = sup_info_id[0].product_code
p_name = sup_info_id[0].product_name
name = "[{}] {}".format(
p_code if p_code else code, p_name if p_name else name
)
else:
if code:
name = "[{}] {}".format(code, name)
if self.product_id.description_purchase:
name += "\n" + self.product_id.description_purchase
self.product_uom_id = self.product_id.uom_id.id
self.product_qty = 1.0
self.name = name
```
|
{
"source": "jerryxyx/alphalens",
"score": 2
}
|
#### File: alphalens/tests/test_utils.py
```python
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from numpy import (nan)
from pandas import (
Series,
DataFrame,
date_range,
datetime,
Panel,
MultiIndex,
)
from pandas.util.testing import (assert_frame_equal,
assert_series_equal)
from .. utils import (compute_forward_returns,
quantize_factor,
common_start_returns)
class UtilsTestCase(TestCase):
dr = date_range(start='2015-1-1', end='2015-1-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = DataFrame(index=dr,
columns=tickers,
data=[[1, 2, 3, 4],
[4, 3, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = DataFrame()
factor_data['factor'] = factor
factor_data['group'] = Series(index=factor.index,
data=[1, 1, 2, 2, 1, 1, 2, 2],
dtype="category")
def test_compute_forward_returns(self):
dr = date_range(start='2015-1-1', end='2015-1-3')
prices = DataFrame(index=dr, columns=['A', 'B'],
data=[[1, 1], [1, 2], [2, 1]])
fp = compute_forward_returns(prices, periods=[1, 2])
ix = MultiIndex.from_product([dr, ['A', 'B']],
names=['date', 'asset'])
expected = DataFrame(index=ix, columns=[1, 2])
expected[1] = [0., 1., 1., -0.5, nan, nan]
expected[2] = [1., 0., nan, nan, nan, nan]
assert_frame_equal(fp, expected)
@parameterized.expand([(factor_data, 4, None, False,
[1, 2, 3, 4, 4, 3, 2, 1]),
(factor_data, 2, None, False,
[1, 1, 2, 2, 2, 2, 1, 1]),
(factor_data, 2, None, True,
[1, 2, 1, 2, 2, 1, 2, 1]),
(factor_data, [0, .25, .5, .75, 1.], None, False,
[1, 2, 3, 4, 4, 3, 2, 1]),
(factor_data, [0, .5, .75, 1.], None, False,
[1, 1, 2, 3, 3, 2, 1, 1]),
(factor_data, [0, .25, .5, 1.], None, False,
[1, 2, 3, 3, 3, 3, 2, 1]),
(factor_data, [0, .5, 1.], None, False,
[1, 1, 2, 2, 2, 2, 1, 1]),
(factor_data, [.25, .5, .75], None, False,
[nan, 1, 2, nan, nan, 2, 1, nan]),
(factor_data, [0, .5, 1.], None, True,
[1, 2, 1, 2, 2, 1, 2, 1]),
(factor_data, [.5, 1.], None, True,
[nan, 1, nan, 1, 1, nan, 1, nan]),
(factor_data, [0, 1.], None, True,
[1, 1, 1, 1, 1, 1, 1, 1]),
(factor_data, None, 4, False,
[1, 2, 3, 4, 4, 3, 2, 1]),
(factor_data, None, 2, False,
[1, 1, 2, 2, 2, 2, 1, 1]),
(factor_data, None, 3, False,
[1, 1, 2, 3, 3, 2, 1, 1]),
(factor_data, None, 8, False,
[1, 3, 6, 8, 8, 6, 3, 1]),
(factor_data, None, [0, 1, 2, 3, 5], False,
[1, 2, 3, 4, 4, 3, 2, 1]),
(factor_data, None, [1, 2, 3], False,
[nan, 1, 2, nan, nan, 2, 1, nan]),
(factor_data, None, [0, 2, 5], False,
[1, 1, 2, 2, 2, 2, 1, 1]),
(factor_data, None, [0.5, 2.5, 4.5], False,
[1, 1, 2, 2, 2, 2, 1, 1]),
(factor_data, None, [0.5, 2.5], True,
[1, 1, nan, nan, nan, nan, 1, 1]),
(factor_data, None, 2, True,
[1, 2, 1, 2, 2, 1, 2, 1])])
def test_quantize_factor(self, factor, quantiles, bins, by_group,
expected_vals):
quantized_factor = quantize_factor(factor,
quantiles=quantiles,
bins=bins,
by_group=by_group)
expected = Series(index=factor.index,
data=expected_vals,
name='factor_quantile').dropna()
assert_series_equal(quantized_factor, expected)
@parameterized.expand([(2, 3, False, False,
[[0.075, 0.241868],[0.075, 0.241868],[0.075, 0.241868],
[0.075, 0.241868],[0.075, 0.241868],[0.075, 0.241868]]),
(3, 2, False, True,
[[0.0, 0.241868],[0.0, 0.241868],[0.0, 0.241868],
[0.0, 0.241868],[0.0, 0.241868],[0.0, 0.241868]]),
(3, 5, True, False,
[[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],
[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],
[0.075, 0.0]]),
(1, 4, True, True,
[[0., 0.],[0., 0.],[0., 0.], [0., 0.],[0., 0.],[0., 0.]]),
(6, 6, False, False,
[[0.075, 0.243614],[0.075, 0.242861],[0.075, 0.242301],
[0.075, 0.241868],[0.075, 0.241868],[0.075, 0.241868],
[0.075, 0.241868],[0.075, 0.241868],[0.075, 0.241868],
[0.075, 0.241868],[0.075, 0.241868],[0.075, 0.242301],
[0.075, 0.242861]]),
(6, 6, False, True,
[[0.0, 0.243614],[0.0, 0.242861],[0.0, 0.242301],
[0.0, 0.241868],[0.0, 0.241868],[0.0, 0.241868],
[0.0, 0.241868],[0.0, 0.241868],[0.0, 0.241868],
[0.0, 0.241868],[0.0, 0.241868],[0.0, 0.242301],
[0.0, 0.242861]]),
(6, 6, True, False,
[[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],
[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],
[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],[0.075, 0.0],
[0.075, 0.0]]),
(6, 6, True, True,
[[0., 0.],[0., 0.],[0., 0.], [0., 0.],[0., 0.],[0., 0.],
[0., 0.],[0., 0.],[0., 0.], [0., 0.],[0., 0.],[0., 0.],
[0., 0.]]),
])
def test_common_start_returns(self, before, after, mean_by_date, demeaned,
expected_vals):
dr = date_range(start='2015-1-17', end='2015-2-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
r1, r2, r3, r4 = (1.20, 1.40, 0.90, 0.80)
prices = DataFrame(index=dr, columns=tickers,
data=[[r1**1, r2**1, r3**1, r4**1 ],
[r1**2, r2**2, r3**2, r4**2 ],
[r1**3, r2**3, r3**3, r4**3 ],
[r1**4, r2**4, r3**4, r4**4 ],
[r1**5, r2**5, r3**5, r4**5 ],
[r1**6, r2**6, r3**6, r4**6 ],
[r1**7, r2**7, r3**7, r4**7 ],
[r1**8, r2**8, r3**8, r4**8 ],
[r1**9, r2**9, r3**9, r4**9 ],
[r1**10, r2**10, r3**10, r4**10],
[r1**11, r2**11, r3**11, r4**11],
[r1**12, r2**12, r3**12, r4**12],
[r1**13, r2**13, r3**13, r4**13],
[r1**14, r2**14, r3**14, r4**14],
[r1**15, r2**15, r3**15, r4**15],
[r1**16, r2**16, r3**16, r4**16],
[r1**17, r2**17, r3**17, r4**17]])
dr2 = date_range(start='2015-1-21', end='2015-1-29')
factor = DataFrame(index=dr2, columns=tickers,
data=[[3, 4, 2, 1], [3, 4, 2, 1], [3, 4, 2, 1],
[3, 4, 2, 1], [3, 4, 2, 1], [3, 4, 2, 1],
[3, 4, 2, 1], [3, 4, 2, 1], [3, 4, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
cmrt = common_start_returns(factor, prices, before, after, False, mean_by_date,
factor if demeaned else None)
cmrt = DataFrame( {'mean': cmrt.mean(axis=1), 'std': cmrt.std(axis=1)} )
expected = DataFrame(index=range(-before, after+1), columns=['mean', 'std'], data=expected_vals)
assert_frame_equal(cmrt, expected)
```
|
{
"source": "Jerryyang2001/math-clear",
"score": 2
}
|
#### File: math-clear/conventor/conventor.py
```python
import string
from typing import Dict, List
def convent(document: List[string], config: Dict) -> string:
pass
```
|
{
"source": "JerryYangGitHub/qtum-test-suite",
"score": 2
}
|
#### File: qtum-test-suite/global--env/deploy.py
```python
from copy import deepcopy
import shutil, os, sys
from envconfig import *
DST_DIR = './dst-files/'
def load_env_kv() : #return [prog_env, nodes_env]
prog = {
'QTUM_PREFIX' : QTUM_PREFIX,
'QTUM_BIN' : QTUM_BIN,
'CMD_QTUMD': CMD_QTUMD,
'CMD_QTUMCLI': CMD_QTUMCLI
}
return [
deepcopy(prog),
deepcopy(QTUM_NODES)
]
def main() :
env_kv = load_env_kv()
prog_env = env_kv[0]
nodes_env = env_kv[1]
for node_name in sorted(nodes_env.keys()) :
node_datadir = nodes_env[node_name]['NODEX__QTUM_DATADIR']
try:
os.makedirs(node_datadir)
except (OSError) as e :
if e.errno == 17 :
print 'node_datadir(%s) seems not empty, please check it artificially. prog exit now.' % node_datadir
sys.exit(1)
else :
raise
f = os.listdir(node_datadir)
if len(f) != 0:
print 'node_datadir(%s) seems not empty, please check it artificially. prog exit now.'
sys.exit(1)
####
node_dir = os.path.join(DST_DIR, node_name)
####
f_s = os.path.join(node_dir, 'qtum.conf')
f_d = os.path.join(node_datadir, 'qtum.conf')
shutil.copy(f_s, f_d)
####
filename = node_name + '--' + 'wrp-qtumd.sh'
f_s = os.path.join(node_dir, filename)
f_d = os.path.join(QTUM_BIN, filename)
shutil.copy(f_s, f_d)
os.system('chmod +x %s ' % f_d)
####
filename = node_name + '--' + 'wrp-qtum-cli.sh'
f_s = os.path.join(node_dir, filename)
f_d = os.path.join(QTUM_BIN, filename)
shutil.copy(f_s, f_d)
os.system('chmod +x %s ' % f_d)
####
filename = node_name + '--' + 'wrp-solar.sh'
f_s = os.path.join(node_dir, filename)
f_d = os.path.join(QTUM_BIN, filename)
shutil.copy(f_s, f_d)
os.system('chmod +x %s ' % f_d)
####
f_s = os.path.join(DST_DIR, 'qtum-path.sh')
f_d = os.path.join('/etc/profile.d', 'qtum-path.sh')
cmd = "sudo cp -rf %s %s" % (f_s, f_d)
print "cmd: %s ; to set PATH" % cmd
os.system(cmd)
os.chdir(QTUM_BIN)
####
sl_list = [
(QTUM_DFT_NODE + '--' + 'wrp-qtumd.sh', 'wrp-qtumd'),
(QTUM_DFT_NODE + '--' + 'wrp-qtum-cli.sh', 'wrp-qtum-cli'),
(QTUM_DFT_NODE + '--' + 'wrp-solar.sh', 'wrp-solar'),
]
for (f_r, f_l) in sl_list :
try:
os.remove(f_l)
except OSError as e:
if e.errno == 2:
pass
else :
raise
os.symlink(f_r, f_l)
if __name__ == '__main__' :
main()
```
|
{
"source": "jerryyao120-ndd/qmdesc",
"score": 3
}
|
#### File: qmdesc/qmdesc/model.py
```python
from argparse import Namespace
import torch.nn as nn
from qmdesc.mpn import MPN
from qmdesc.ffn import MultiReadout
class MoleculeModel(nn.Module):
"""A MoleculeModel is a model which contains a message passing network following by feed-forward layers."""
def __init__(self, args, atom_fdim, bond_fdim):
"""
Initializes the MoleculeModel.
:param classification: Whether the model is a classification model.
"""
super(MoleculeModel, self).__init__()
self.create_encoder(args, atom_fdim, bond_fdim)
self.create_ffn(args)
def create_encoder(self, args: Namespace, atom_fdim, bond_fdim):
"""
Creates the message passing encoder for the model.
:param args: Arguments.
"""
self.encoder = MPN(args, atom_fdim, bond_fdim)
def create_ffn(self, args: Namespace):
"""
Creates the feed-forward network for the model.
:param args: Arguments.
"""
# Create readout layer
self.readout = MultiReadout(args, args.atom_targets, args.bond_targets,
args.atom_constraints, args.bond_constraints)
def forward(self, input):
"""
Runs the MoleculeModel on input.
:param input: Input.
:return: The output of the MoleculeModel.
"""
output_all = self.readout(self.encoder(input))
return output_all
```
|
{
"source": "jerryyeezus/nlp-summarization",
"score": 3
}
|
#### File: jerryyeezus/nlp-summarization/educreator.py
```python
import codecs
import nltk.data
def create_edus(infilename, outfilename):
text = open(infilename).read() #.decode("ISO-8859-1").encode("UTF-8")
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = sent_detector.tokenize(text.strip())
with codecs.open(outfilename, 'w', encoding="UTF-8") as out:
for sentence in sentences:
out.write(sentence + "\n")
create_edus("./topics/battery-life_amazon_kindle.txt.data", "./summary/battery-life.edus")
```
#### File: jerryyeezus/nlp-summarization/evaluation.py
```python
import numpy
class Performance(object):
def __init__(self, percision, recall):
self.percision = percision
self.recall = recall
class Metrics(object):
def __init__(self, levels=['span','nuclearity','relation']):
""" Initialization
:type levels: list of string
:param levels: evaluation levels, the possible values are only
'span','nuclearity','relation'
"""
self.levels = set(levels)
self.span_perf = Performance([], [])
self.nuc_perf = Performance([], [])
self.rela_perf = Performance([], [])
def eval(self, goldtree, predtree):
""" Evaluation performance on one pair of RST trees
:type goldtree: RSTTree class
:param goldtree: gold RST tree
:type predtree: RSTTree class
:param predtree: RST tree from the parsing algorithm
"""
goldbrackets = goldtree.bracketing()
predbrackets = predtree.bracketing()
for level in self.levels:
if level == 'span':
self._eval(goldbrackets, predbrackets, idx=1)
elif level == 'nuclearity':
self._eval(goldbrackets, predbrackets, idx=2)
elif level == 'relation':
self._eval(goldbrackets, predbrackets, idx=3)
else:
raise ValueError("Unrecognized evaluation level: {}".format(level))
def _eval(self, goldbrackets, predbrackets, idx):
""" Evaluation on each discourse span
"""
goldspan = [item[:idx] for item in goldbrackets]
predspan = [item[:idx] for item in predbrackets]
allspan = [span for span in goldspan if span in predspan]
p, r = 0.0, 0.0
for span in allspan:
if span in goldspan:
p += 1.0
if span in predspan:
r += 1.0
p /= len(goldspan)
r /= len(predspan)
if idx == 1:
self.span_perf.percision.append(p)
self.span_perf.recall.append(r)
elif idx == 2:
self.nuc_perf.percision.append(p)
self.nuc_perf.recall.append(r)
elif idx == 3:
self.rela_perf.percision.append(p)
self.rela_perf.recall.append(r)
def report(self):
""" Compute the F1 score for different evaluation levels
and print it out
"""
for level in self.levels:
if 'span' == level:
p = numpy.array(self.span_perf.percision).mean()
r = numpy.array(self.span_perf.recall).mean()
f1 = (2 * p * r) / (p + r)
print 'F1 score on span level is {0:0.3f}'.format(f1)
elif 'nuclearity' == level:
p = numpy.array(self.nuc_perf.percision).mean()
r = numpy.array(self.nuc_perf.recall).mean()
f1 = (2 * p * r) / (p + r)
print 'F1 score on nuclearity level is {0:0.3f}'.format(f1)
elif 'relation' == level:
p = numpy.array(self.rela_perf.percision).mean()
r = numpy.array(self.rela_perf.recall).mean()
f1 = (2 * p * r) / (p + r)
print 'F1 score on relation level is {0:0.3f}'.format(f1)
```
#### File: pyrouge-0.1.0/bin/pyrouge_write_config_file.py
```python
from __future__ import print_function, unicode_literals, division
import argparse
from pyrouge import Rouge155
from pyrouge.utils.argparsers import model_sys_parser, config_parser
def get_args():
parser = argparse.ArgumentParser(parents=[model_sys_parser, config_parser])
return parser.parse_args()
def main():
args = get_args()
Rouge155.write_config_static(
args.system_dir, args.system_filename_pattern,
args.model_dir, args.model_filename_pattern,
args.config_file_path, args.system_id)
if __name__ == "__main__":
main()
```
#### File: pyrouge/utils/log.py
```python
import logging
def get_console_logger(name):
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setFormatter(logFormatter)
logger.addHandler(ch)
return logger
def get_global_console_logger():
return get_console_logger('global')
```
#### File: jerryyeezus/nlp-summarization/util.py
```python
from scipy.sparse import lil_matrix
def label2action(label):
""" Transform label to action
"""
items = label.split('-')
if len(items) == 1:
action = (items[0], None, None)
elif len(items) == 3:
action = tuple(items)
else:
raise ValueError("Unrecognized label: {}".format(label))
return action
def action2label(action):
""" Transform action into label
"""
if action[0] == 'Shift':
label = action[0]
elif action[0] == 'Reduce':
label = '-'.join(list(action))
else:
raise ValueError("Unrecognized parsing action: {}".format(action))
return label
def vectorize(features, vocab):
""" Transform a feature list into a numeric vector
with a given vocab
"""
vec = lil_matrix((1, len(vocab)))
for feat in features:
try:
fidx = vocab[feat]
vec[0,fidx] += 1.0
except KeyError:
pass
return vec
def extractrelation(s, level=0):
""" Extract discourse relation on different level
"""
return s.lower().split('-')[0]
def reversedict(dct):
""" Reverse the {key:val} in dct to
{val:key}
"""
# print labelmap
newmap = {}
for (key, val) in dct.iteritems():
newmap[val] = key
return newmap
```
|
{
"source": "jerryyip/respeaker_adapter",
"score": 3
}
|
#### File: jerryyip/respeaker_adapter/bing_tts.py
```python
import json
import uuid
import wave
import io
import hashlib
from monotonic import monotonic
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
from bing_base import *
CACHE_SIZE = 2*1024*1024 #2M
class BingTTS():
def __init__(self, bing_base):
self.bing_base = bing_base
self.locales = {
"ar-eg": {"Female": "Microsoft Server Speech Text to Speech Voice (ar-EG, Hoda)"},
"de-DE": {"Female": "Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)", "Male": "Microsoft Server Speech Text to Speech Voice (de-DE, Stefan, Apollo)"},
"en-AU": {"Female": "Microsoft Server Speech Text to Speech Voice (en-AU, Catherine)"},
"en-CA": {"Female": "Microsoft Server Speech Text to Speech Voice (en-CA, Linda)"},
"en-GB": {"Female": "Microsoft Server Speech Text to Speech Voice (en-GB, Susan, Apollo)", "Male": "Microsoft Server Speech Text to Speech Voice (en-GB, George, Apollo)"},
"en-IN": {"Male": "Microsoft Server Speech Text to Speech Voice (en-IN, Ravi, Apollo)"},
"en-US":{"Female": "Microsoft Server Speech Text to Speech Voice (en-US, ZiraRUS)","Male": "Microsoft Server Speech Text to Speech Voice (en-US, BenjaminRUS)"},
"es-ES":{"Female": "Microsoft Server Speech Text to Speech Voice (es-ES, Laura, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (es-ES, Pablo, Apollo)"},
"es-MX":{"Male": "Microsoft Server Speech Text to Speech Voice (es-MX, Raul, Apollo)"},
"fr-CA":{"Female": "Microsoft Server Speech Text to Speech Voice (fr-CA, Caroline)"},
"fr-FR":{"Female": "Microsoft Server Speech Text to Speech Voice (fr-FR, Julie, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (fr-FR, Paul, Apollo)"},
"it-IT":{"Male": "Microsoft Server Speech Text to Speech Voice (it-IT, Cosimo, Apollo)"},
"ja-JP":{"Female": "Microsoft Server Speech Text to Speech Voice (ja-JP, Ayumi, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (ja-JP, Ichiro, Apollo)"},
"pt-BR":{"Male": "Microsoft Server Speech Text to Speech Voice (pt-BR, Daniel, Apollo)"},
"ru-RU":{"Female": "Microsoft Server Speech Text to Speech Voice (pt-BR, Daniel, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (ru-RU, Pavel, Apollo)"},
"zh-CN":{"Female": "Microsoft Server Speech Text to Speech Voice (zh-CN, HuihuiRUS)","Female2": "Microsoft Server Speech Text to Speech Voice (zh-CN, Yaoyao, Apollo)", "Male": "Microsoft Server Speech Text to Speech Voice (zh-CN, Kangkang, Apollo)"},
"zh-HK":{"Female": "Microsoft Server Speech Text to Speech Voice (zh-HK, Tracy, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (zh-HK, Danny, Apollo)"},
"zh-TW":{"Female": "Microsoft Server Speech Text to Speech Voice (zh-TW, Yating, Apollo)","Male": "Microsoft Server Speech Text to Speech Voice (zh-TW, Zhiwei, Apollo)"}
}
self.cache = {}
def speak(self, text, language="en-US", gender="Female"):
access_token = self.bing_base.token()
if language not in self.locales.keys():
raise LocaleError("language locale not supported.")
lang = self.locales.get(language)
if gender not in ["Female", "Male", "Female2"]:
gender = "Female"
if len(lang) == 1:
gender = lang.keys()[0]
service_name = lang[gender]
hasher = hashlib.sha1()
hasher.update(text+language+gender)
sha1 = hasher.hexdigest()
if sha1 in self.cache:
print '[TTS wave from cache]'
# [size, data, ref_cnt]
self.cache[sha1][2] += 1
return self.cache[sha1][1]
body = "<speak version='1.0' xml:lang='en-us'>\
<voice xml:lang='%s' xml:gender='%s' name='%s'>%s</voice>\
</speak>" % (language, gender, service_name, text)
headers = {"Content-type": "application/ssml+xml",
"X-Microsoft-OutputFormat": "raw-16khz-16bit-mono-pcm",
"Authorization": "Bearer " + access_token,
"X-Search-AppId": "07D3234E49CE426DAA29772419F436CA",
"X-Search-ClientID": str(uuid.uuid1()).replace('-',''),
"User-Agent": "TTSForPython"}
url = "https://speech.platform.bing.com/synthesize"
request = Request(url, data=body, headers=headers)
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("tts request failed: {0}".format(
getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("tts connection failed: {0}".format(e.reason))
data = response.read()
size = len(data)
print "[TTS wave length: %dkB]" %(size/1024),
self.cache_wave(sha1, data, size)
return data
def _sum_cache(self):
sum = 0
for k,v in self.cache.items():
sum += v[0]
return sum
def cache_wave(self, sha, data, size):
overflow = self._sum_cache() + size - CACHE_SIZE
to_be_del = []
if overflow > 0:
lst = sorted(self.cache.items(), key=lambda t: t[1][2])
while overflow > 0 and len(lst) > 0:
garbage = lst.pop(0)
to_be_del.append(garbage[0])
overflow -= garbage[1][0]
for d in to_be_del:
del self.cache[d]
#print self.cache.keys()
# [size, data, ref_cnt]
self.cache[sha] = [size, data, 0]
if __name__ == '__main__':
import sys
try:
from creds import BING_KEY
except ImportError:
print('Get a key from https://www.microsoft.com/cognitive-services/en-us/speech-api and create creds.py with the key')
sys.exit(-1)
from bing_base import *
from player import Player
import pyaudio
import time
pa = pyaudio.PyAudio()
player = Player(pa)
if len(sys.argv) != 2:
print('Usage: %s "text"' % sys.argv[0])
sys.exit(-1)
bing = BingBase(BING_KEY)
tts = BingTTS(bing)
# recognize speech using Microsoft Bing Voice Recognition
try:
data = tts.speak(sys.argv[1], language='en-US')
player.play_buffer(data)
except LocaleError as e:
print e
except RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
time.sleep(10)
player.close()
pa.terminate()
```
#### File: jerryyip/respeaker_adapter/main.py
```python
import os
import signal
from microphone import Microphone
from bing_base import *
from bing_recognizer import *
from bing_tts import *
from player import Player
import pyaudio
import sys
#from relay import Relay
try:
from creds import BING_KEY
except ImportError:
print('Get a key from https://www.microsoft.com/cognitive-services/en-us/speech-api and create creds.py with the key')
sys.exit(-1)
ACCESS_TOKEN = "<KEY>"
#import time
from relay import Relay
script_dir = os.path.dirname(os.path.realpath(__file__))
hi = os.path.join(script_dir, 'audio/hi.wav')
bing = BingBase(BING_KEY)
recognizer = BingVoiceRecognizer(bing)
tts = BingTTS(bing)
mission_completed = False
awake = False
pa = pyaudio.PyAudio()
mic = Microphone(pa)
player = Player(pa)
#mic.player = player
relay1 = Relay(ACCESS_TOKEN)
relay1.set_tts(tts)
relay1.set_player(player)
def handle_int(sig, frame):
global mission_completed
print "Terminating..."
mission_completed = True
mic.close()
player.close()
# worker.stop()
pa.terminate()
signal.signal(signal.SIGINT, handle_int)
#worker.start()
while not mission_completed:
if not awake:
#print('test1')
if mic.detect():
awake = True
player.play(hi)
continue
data = mic.listen()
if not data:
awake = False
print ('no data')
continue
# recognize speech using Microsoft Bing Voice Recognition
try:
text = recognizer.recognize(data, language='en-US')
print('Bing:' + text.encode('utf-8'))
relay1.text = text
relay1.run()
#print('test2')
if text.find('bye bye') > -1:
awake = False
elif text.find('shut down') > -1:
handle_int(0,0)
except UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
except RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
awake = False
#time.sleep(2)
```
#### File: jerryyip/respeaker_adapter/relay.py
```python
import re
import json
import requests
from player import Player
import time
#import monotonic
#import Queue
#import threading
#ACCESS_TOKEN = "<KEY>"
class Relay(object):
def __init__(self, ACCESS_TOKEN):
self.text = ""
self.ACCESS_TOKEN = ACCESS_TOKEN
self.onoff = 0
def set_tts(self, tts):
self.tts = tts
def set_player(self, ply):
self.player = ply
def play_text(self, text):
try:
self.player.play_buffer(self.tts.speak(text))
except Exception as e:
print e
def run(self, ):
if re.search(r'turn on', self.text) or re.search(r'open', self.text):
self.play_text("Relay turn on now")
self.onoff = 1
self._post_relay_onoff()
elif re.search(r'turn off', self.text) or re.search(r'close', self.text):
self.play_text("Relay turn off now")
self.onoff = 0
self._post_relay_onoff()
elif re.search(r'status',self.text):
resp = 'the relay is %s now' % (self._URL_RELAY_STATUS())
self.play_text(resp)
elif re.search(r'thank you', self.text):
self.play_text("you're welcome!")
elif re.search(r'how(\'re)?.*(are)?.*you', self.text):
self.play_text("good, thank you.")
elif re.search(r'bye bye', self.text):
self.play_text("bye!")
elif re.search(r'shut.*down', self.text):
self.play_text("see you next time")
else:
print 'unknown command, ignore.'
self.play_text("I don't know your command.")
def _get_relay_status(self, ):
_URL_RELAY_STATUS = "https://cn.wio.seeed.io/v1/node/GroveRelayD0/onoff_status?access_token=%s" % (self.ACCESS_TOKEN)
time_out = 0
result = requests.get(_URL_RELAY_STATUS)
while result.status_code != 200: #if response error try again 5 times
time_out = time_out + 1
time.sleep(1)
result = requests.get(_URL_RELAY_STATUS)
if (time_out >= 5):
print ("can't get relay status")
return "ERROR"
print ("get relay status success")
status_text = result.json()
if (status_text["onoff"] == 0):
return "OFF"
elif (status_text["onoff"] == 1):
return "ON"
else:
return "UNKNOWN"
def _post_relay_onoff(self, ):
_URL_RELAY_ONOFF = "https://cn.wio.seeed.io/v1/node/GroveRelayD0/onoff/%d?access_token=%s" % (self.onoff,self.ACCESS_TOKEN)
time_out = 0
result = requests.post(_URL_RELAY_ONOFF)
while result.status_code != 200: #if response error try again 5 times
time_out = time_out + 1
time.sleep(1)
result = requests.post(_URL_RELAY_ONOFF)
if (time_out >= 5):
print ("can't post relay onoff")
return "ERROR"
print ("post relay onoff success")
return "SUCCESS"
if __name__ == "__main__":
# postRelayOnoff(onoff=1)
# print(getRelayStatus())
# postRelayOnoff(onoff=0)
# print(getRelayStatus())
relay1 = Relay("c79dfaf4d5f720b925e2b262220d99fd")
print(relay1._get_relay_status())
relay1.onoff = 1
relay1._post_relay_onoff()
print(relay1._get_relay_status())
```
#### File: jerryyip/respeaker_adapter/worker.py
```python
import threading
import Queue
import time
import re
import random
import json
from monotonic import monotonic
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
WIO_TOKEN = "<KEY>"
class Worker(threading.Thread):
def __init__(self, queue_len = 10):
threading.Thread.__init__(self)
self.q = Queue.Queue(queue_len) #fifo queue
self.thread_stop = False
self.last_time_broadcast = 0
self.human_around = False
self.humidity = 0
def set_tts(self, tts):
self.tts = tts
def set_player(self, ply):
self.player = ply
def push_cmd(self, cmd):
self.q.put(cmd)
def wait_done(self):
self.q.join() #wait queue emtry
def play_text(self, text):
try:
self.player.play_buffer(self.tts.speak(text))
except Exception as e:
print e
def loop(self):
"""
do stuff in the thread loop
"""
now = monotonic()
if now - self.last_time_broadcast > 60 and self.human_around:
self.last_time_broadcast = now
if self.humidity < 20.0:
print 'the plants need water.'
self.play_text("Hi, my soil humidity is now less than 20%, I think it's time for you to water the plants.")
# read from wio
self.humidity = 19.0 # this is faked for better expression of the demo
url = "https://cn.wio.seeed.io/v1/node/GroveMoistureA0/moisture?access_token=%s" % (WIO_TOKEN,)
request = Request(url)
try:
response = urlopen(request)
data = response.read()
result = json.loads(data)
if result['moisture']:
#self.humidity = result['moisture']
print 'moisture raw:', result['moisture']
except Exception as e:
print e
pass
# read from wio
url = "https://cn.wio.seeed.io/v1/node/GrovePIRMotionD0/approach?access_token=%s" % (WIO_TOKEN,)
request = Request(url)
try:
response = urlopen(request)
data = response.read()
result = json.loads(data)
if result['approach']:
self.human_around = True
print 'human around'
else:
self.human_around = False
except Exception as e:
print e
pass
# chance = random.randint(0, 100)
# if chance < 10:
# print 'the plants need water.'
# self.play_text("Hi, my soil humidity is now less than %d%%, I think it's time for you to water the plants." % (chance,))
def run(self):
while not self.thread_stop:
self.loop()
cmd = ''
try:
cmd = self.q.get(timeout=1)
except:
continue
print("worker thread get cmd: %s" %(cmd, ))
self._parse_cmd(cmd)
self.q.task_done()
len = self.q.qsize()
if len > 0:
print("still got %d commands to execute." % (len,))
def _parse_cmd(self, cmd):
if re.search(r'how.*(plant|plants|plans).*(going|doing)?', cmd) or re.search(r'check.*(plant|plants|plans).*', cmd):
resp = 'they are good, the soil humidity is now %.1f percent' % self.humidity
self.play_text(resp)
elif re.search(r'thank you', cmd):
self.play_text("you're welcome!")
elif re.search(r'how(\'re)?.*(are)?.*you', cmd):
self.play_text("good, thank you.")
elif re.search(r'bye bye', cmd):
self.play_text("bye!")
elif re.search(r'shut.*down', cmd):
self.play_text("see you next time")
else:
print 'unknown command, ignore.'
self.play_text("I don't know your command.")
def stop(self):
self.thread_stop = True
self.q.join()
```
|
{
"source": "JerryYouxin/L-DAG",
"score": 3
}
|
#### File: JerryYouxin/L-DAG/DagGenerator.py
```python
import sys
import os
import shutil
from xml.etree import ElementTree as ET
import argparse
import platform
args = None
def parse_MACRO(s, param):
result = ''
substr = ''
analysing = False
for c in s:
if c != '$':
if analysing:
if c.isalpha() or c == '_':
substr += c
else:
analysing = False
result += substr + c
substr = ''
else:
result += c
else:
if analysing:
result += str(param.para[substr[1:]])
substr = ''
analysing = False
else:
analysing = True
substr += c
return result
def parse_expr(expr, param):
expr_new = parse_MACRO(expr, param)
return eval(expr_new.replace('\n', '').replace('\r', ''))
def parse_statement(child, param):
expr = child.findall('expr')
if len(expr) > 0:
if len(expr) > 1:
print('WARNING : there are multiple "expr" in ' + child.tag + ' domain. Only take the first one.')
val = parse_expr(expr[0].text, param)
else:
val = parse_MACRO(child.text, param)
return val
def parse_parent(domain, beginning, ending):
edges = {}
for one in domain:
pid = one.get('ref')
if pid in ending:
ending.remove(pid)
children = one.findall('child')
clist = []
for child in children:
cid = child.get('ref')
clist.append(cid)
if cid in beginning:
beginning.remove(cid)
if pid in edges:
edges[pid].extend(clist)
else:
edges[pid] = clist
return edges
class Parameter:
def __init__(self):
self.para = {}
def add(self, key, val):
self.para[key] = val
def parseLine(self, line):
line.strip()
sl= line.split('=')
print("get para = ",sl)
self.para[sl[0]] = sl[1]
def general_read(self, fn_para):
if fn_para=='':
return
if not os.path.exists(fn_para):
print("Error : "+fn_para+" is missing !!!")
exit(-1)
f = open(fn_para, 'r', encoding='UTF-8')
for line in f:
self.parseLine(line)
f.close()
def read(self):
if not os.path.exists('Parameter.dat'):
print("Error : Parameter.dat is missing !!!")
exit(-1)
f = open('Parameter.dat', 'r', encoding='UTF-8')
# first line is comment
f.readline()
self.para['WeightDistribution'] = int(f.readline())
self.para['Nelem'] = int(f.readline())
self.para['NAdd'] = int(f.readline())
self.para['Pc'] = float(f.readline())
self.para['Pm'] = float(f.readline())
self.para['FunctionMode'] = int(f.readline())
self.para['Cmax'] = int(f.readline())
self.para['Cmin'] = int(f.readline())
self.para['PackageSize'] = int(f.readline())
self.para['MaxGen'] = int(f.readline())
self.para['PopSize'] = int(f.readline())
self.para['P4Angle'] = int(f.readline())
self.para['NAddType'] = int(f.readline())
self.para['WeightRate'] = float(f.readline())
self.para['MaxThickLimt'] = int(f.readline())
self.para['MaxThickValue'] = int(f.readline())
self.para['OptElemType'] = int(f.readline())
self.para['Bee'] = float(f.readline())
f.close()
if os.path.exists('Outer_Loop_No.txt'):
f = open('Outer_Loop_No.txt', 'r')
self.para['Outer_Loop_No'] = int(f.readline()[11:])
f.close()
else:
print("WARNING : Outer_Loop_No.txt not found for parameter configuration. Use default value [=1]")
self.para['Outer_Loop_No'] = 1
class Node:
# private vars
__ss = ['transfer_input_files = ', 'transfer_output_files = ', 'requirements','arguments = ', 'executable = ',
'should_transfer_files = ', 'universe = ', 'when_to_transfer_output = ', 'log = ',
'error = ', 'output = ', 'initialdir = ', '']
# index lists
__transfer_input_files = 0
__transfer_output_files = 1
__requirements = 2
__arguments = 3
__executable = 4
__should_transfer_files = 5
__universe = 6
__when_to_transfer_output = 7
__log = 8
__error = 9
__output = 10
__initialdir = 11
__a = 12
def getid(self):
return self.__id
def parse_regular(self, child, param):
param.add(child.tag, parse_MACRO(child.text, param))
if child.tag == 'input_file':
self.__contents[self.__transfer_input_files] = parse_MACRO(child.text, param)
elif child.tag == 'output_file':
self.__contents[self.__transfer_output_files] = parse_MACRO(child.text, param)
elif child.tag == 'requirements':
self.__contents[self.__requirements] = parse_MACRO(child.text, param)
elif child.tag == 'arguments':
self.__contents[self.__arguments] = parse_MACRO(child.text, param)
elif child.tag == 'executable':
self.__contents[self.__executable] = parse_MACRO(child.text, param)
elif child.tag == 'jobnum':
self.__jobnum = int(parse_statement(child, param))
param.add(child.tag, self.__jobnum)
def parse_other(self, child, param):
param.add(child.tag, parse_MACRO(child.text, param))
if child.tag == 'Universe':
self.__contents[self.__universe] = parse_MACRO(child.text, param)
elif child.tag == 'should_transfer_files':
self.__contents[self.__should_transfer_files] = parse_MACRO(child.text, param)
elif child.tag == 'when_to_transfer_output':
self.__contents[self.__when_to_transfer_output] = parse_MACRO(child.text, param)
elif child.tag == 'Log':
self.__contents[self.__log] = parse_MACRO(child.text, param)
elif child.tag == 'error':
self.__contents[self.__error] = parse_MACRO(child.text, param)
elif child.tag == 'output':
self.__contents[self.__output] = parse_MACRO(child.text, param)
elif child.tag == 'initialdir':
self.__contents[self.__initialdir] = parse_MACRO(child.text, param)
elif child.tag == 'a':
self.__contents[self.__a] = parse_MACRO(child.text, param)
def parse(self, one, param):
for child in list(one):
self.parse_regular(child, param)
self.__contents[self.__transfer_input_files] += ','+self.__contents[self.__executable]
two = one.findall('other')
if len(two) > 1:
print('WARNING : there are multiple domain "other" in a node declaration. Only take the first one.')
if len(two) > 0:
for child in list(two[0]):
self.parse_other(child, param)
def __init__(self, node_id, workdir):
self.__id = node_id
self.__workdir = workdir
self.__contents = ['', '', '', '', '', '', '', '', '', '', '', '','']
self.__jobnum = 1
self.is_noop = False
def set_to_noop(self):
self.__contents = ['', '', '', 'noop', '', '', '', '', '', '', '','', 'Queue']
self.is_noop = True
def to_string(self):
return 'node_' + str(self.__id)
def to_file_string(self):
return 'node_' + str(self.__id) + '.sub'
# generate sub file for this node
# workdir must be in fold format (end with '/')
def generate(self):
f = open(self.__workdir + self.to_file_string(), 'w')
f.write(self.generate_to_string())
f.close()
def generate_to_string(self):
buff = ''
for j in range(0, len(self.__ss)):
if self.__contents[j] != '':
buff += self.__ss[j] + self.__contents[j] + '\n'
return buff
class Phase:
def parse(self, my_phase, param):
beginning = []
ending = []
domain = my_phase.findall('node')
for one in domain:
node_id = one.get('id')
self.__nodes[node_id] = Node(node_id, self.__workdir)
self.__nodes[node_id].parse(one, param)
beginning.append(node_id)
ending.append(node_id)
domain = my_phase.findall('parent')
self.__edges = parse_parent(domain, beginning, ending)
if len(beginning) == 1:
self.start = beginning[0]
else:
print("Error : Phase must have only one entry! " + str(len(beginning)) + ' entries detected!')
exit(-2)
if len(ending) == 1:
self.end = ending[0]
else:
print("Error : Phase must have only one ending! " + str(len(ending)) + ' endings detected!')
exit(-3)
domain = my_phase.findall('loop')
if len(domain)>1:
print('WARNING : there are multiple domain "loop" in a phase declaration. Only take the first one.')
if len(domain)>0:
#self.__loop = int(domain[0].text)
self.__loop = int(parse_statement(domain[0],param))
domain = my_phase.findall('scr')
if len(domain) > 1:
print('WARNING : there are multiple domain "scr" in a phase declaration. Only take the first one.')
for scr in list(domain[0]):
if scr.tag == 'pre':
self.__pre = scr.text
f = open(scr.text, 'r')
# now begin to replace the MACROS to the values configured
scr_contents = f.readlines()
f.close()
scr_new_contents = []
for cont in scr_contents:
scr_new_contents.append(parse_MACRO(cont, param))
f = open(scr.text, 'w', newline='\n')
f.writelines(scr_new_contents)
f.close()
elif scr.tag == 'post':
self.__post = scr.text
f = open(scr.text, 'r')
# now begin to replace the MACROS to the values configured
scr_contents = f.readlines()
scr_new_contents = []
for cont in scr_contents:
scr_new_contents.append(parse_MACRO(cont, param))
f.close()
f = open(scr.text, 'w', newline='\n')
f.writelines(scr_new_contents)
f.close()
else:
pass
if self.__loop > 1:
self.add_count_loop_to_pre(self.to_string() + '_')
def __init__(self, id, workdir):
self.__id = id
self.__workdir = workdir
self.__nodes = {}
self.__edges = {}
self.__loop = 1
self.__pre = ''
self.__post = ''
self.start = None
self.end = None
self.is_noop = False
self.__final_node = []
self.__first_node = []
def set_to_noop(self):
self.__nodes['.NOOP'] = Node('.NOOP', self.__workdir)
self.__nodes['.NOOP'].set_to_noop()
self.is_noop = True
def add_count_loop_to_pre(self, prefix=''):
#self.__pre = 'count_loop.bat ' + prefix + 'Outer_Loop_No.txt ' + self.__pre
return
def generate_nodes(self):
# generate sub file
for n in self.__nodes:
self.__nodes[n].generate()
def generate(self):
self.generate_nodes()
# generate sub dag file
f = open(self.__workdir + 'sub_Phase' + str(self.__id) + '.dag', 'w')
# generate node info
for n in self.__nodes:
f.write('job ' + self.__nodes[n].to_string() + ' ' + self.__nodes[n].to_file_string() + '\n')
# ADD PRE/POST SCRIPT
if self.__pre != '':
f.write('script pre ' + self.__nodes[self.start].to_string() + ' ' + self.__pre + '\n')
if self.__post != '':
f.write('script post ' + self.__nodes[self.end].to_string() + ' ' + self.__post + '\n')
# generate edge info
for parent in self.__edges:
f.write('parent ' + self.__nodes[parent].to_string() + ' child')
for child in self.__edges[parent]:
f.write(' ' + self.__nodes[child].to_string())
f.write('\n')
f.close()
f = open(self.__workdir + 'Phase' + str(self.__id) + '.dag', 'w')
for k in range(0, self.__loop):
f.write('subdag external sub_Phase' + str(self.__id) + '_' + str(k) +
' sub_Phase' + str(self.__id) + '.dag' + '\n')
for k in range(0, self.__loop-1):
f.write('parent sub_Phase' + str(self.__id) + '_' + str(k) +
' child sub_Phase' + str(self.__id) + '_' + str(k+1) + '\n')
f.close()
# generate dag description
def generate_to_string(self, loopnum):
buff = ''
for k in range(0, self.__loop):
for n in self.__nodes:
if self.__nodes[n].is_noop:
buff += 'job ' + self.__nodes[n].to_string() + '_' + str(k) + '_' + str(loopnum) + ' ' + \
self.__nodes[n].to_file_string() + ' NOOP\n'
else:
buff += 'job ' + self.__nodes[n].to_string() + '_' + str(k) + '_' + str(loopnum) + ' ' + \
self.__nodes[n].to_file_string() + '\n'
# ADD PRE/POST SCRIPT
if self.__pre != '':
buff += 'script pre ' + self.__nodes[self.start].to_string() + '_' + str(k) + '_' + str(loopnum) + \
' ' + self.__pre + '\n'
if self.__post != '':
buff += 'script post ' + self.__nodes[self.end].to_string() + '_' + str(k) + '_' + str(loopnum) + \
' ' + self.__post + '\n'
# generate edge info
for parent in self.__edges:
buff += 'parent ' + self.__nodes[parent].to_string() + '_' + str(k) + '_' + str(loopnum) + ' child'
for child in self.__edges[parent]:
buff += ' ' + self.__nodes[child].to_string() + '_' + str(k) + '_' + str(loopnum)
buff += '\n'
for k in range(0, self.__loop - 1):
buff += 'parent ' + self.__nodes[self.end].to_string() + '_' + str(k) + '_' + str(loopnum) + \
' child ' + self.__nodes[self.start].to_string() + '_' + str(k + 1) + '_' + str(loopnum) + '\n'
self.__final_node.append(self.__nodes[self.end].to_string() + '_' + str(self.__loop-1) + '_' + str(loopnum))
self.__first_node.append(self.__nodes[self.start].to_string() + '_0_' + str(loopnum))
return buff
def to_string(self):
return 'Phase' + str(self.__id)
def to_file_string(self):
return self.to_string() + '.dag'
def final_node(self, loopnum):
return self.__final_node[loopnum]
def first_node(self, loopnum):
return self.__first_node[loopnum]
class DAG:
def parse(self, xml, param):
work = ET.parse(xml)
phases = work.findall('phase')
for p in phases:
pid = p.get('id')
self.__phases[pid] = Phase(pid, self.__workdir)
self.__phases[pid].parse(p, param)
self.start.append(pid)
self.end.append(pid)
domain = work.findall('parent')
self.__edges = parse_parent(domain, self.start, self.end)
if len(self.start) > 1:
self.__edges['.NOOP'] = self.start
self.start = ['.NOOP']
self.__phases['.NOOP'] = Phase('.NOOP', self.__workdir)
self.__phases['.NOOP'].set_to_noop()
if len(self.start) <= 0:
print("Error : workflow must have at least one entry! " + str(len(self.start)) + ' entries detected!')
exit(-2)
if len(self.end) <= 0:
print("Error : workflow must have at least one ending! " + str(len(self.end)) + ' endings detected!')
exit(-3)
domain = work.findall('loop')
if len(domain) > 0:
if len(domain) > 1:
print('WARNING : there are multiple domain "loop" in a workflow declaration. Only take the first one.')
self.__loop = int(parse_statement(domain[0], param))
def __init__(self, workdir, xml, param):
self.__workdir = workdir
self.__phases = {}
self.__edges = {}
self.__loop = 1
self.start = []
self.end = []
self.parse(xml, param)
def generate(self, mode):
if mode == 'foo':
self.generate_foo()
elif mode == 'sub':
self.generate_sub()
elif mode == 'mix':
raise NotImplementedError('MIX MODE NOT IMPLEMENTED')
def generate_sub(self):
for key in self.__phases:
self.__phases[key].generate()
f = open(self.__workdir + 'submit.dag', 'w')
for k in range(0, self.__loop):
for key in self.__phases:
f.write('subdag external ' + self.__phases[key].to_string() + '_' + str(k) + ' ' +
self.__phases[key].to_file_string() + '\n')
for key in self.__edges:
f.write('parent ' + self.__phases[key].to_string() + '_' + str(k) + ' child')
for child in self.__edges[key]:
f.write(' ' + self.__phases[child].to_string() + '_' + str(k))
f.write('\n')
buff = ''
for k in range(0, self.__loop-1):
buff += 'parent '
for p in self.end:
buff += self.__phases[p].to_string() + '_' + str(k) + ' '
buff += 'child'
for c in self.start:
buff += ' ' + self.__phases[c].to_string() + '_' + str(k+1)
buff += '\n'
f.write(buff)
f.close()
def generate_foo(self):
for key in self.__phases:
self.__phases[key].generate_nodes()
if self.__loop > 1:
self.__phases[self.start[0]].add_count_loop_to_pre()
buff = ''
for k in range(0, self.__loop):
for key in self.__phases:
buff += self.__phases[key].generate_to_string(k) + '\n'
for key in self.__edges:
buff += 'parent ' + self.__phases[key].final_node(k) + ' child'
for child in self.__edges[key]:
buff += ' ' + self.__phases[child].first_node(k)
buff += '\n'
buff += '\n'
for k in range(0, self.__loop-1):
buff += 'parent '
for p in self.end:
buff += self.__phases[p].final_node(k) + ' '
buff += 'child'
for c in self.start:
buff += ' ' + self.__phases[c].first_node(k+1)
buff += '\n'
f = open(self.__workdir + 'submit.dag', 'w')
f.write(buff)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--xml', help='Input XML file', default='in.xml')
parser.add_argument('--out', help='Output folder', default='./submit/')
parser.add_argument('--mode', help='Generation Mode: foo / mix / sub', default='foo')
parser.add_argument('--parameter', help='parameter files for dag generation',default='')
args = parser.parse_args()
XML = args.xml
out = args.out
if not os.path.exists(XML):
print("Error : Input file not exists!")
exit(-1)
if out == './':
if os.path.exists('./log'):
shutil.rmtree('./log')
os.mkdir('./log')
out = './'
elif os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
os.mkdir(out + 'log')
else:
os.mkdir(out)
os.mkdir(out + 'log')
parameter = Parameter()
if args.parameter == 'Parameter.dat':
parameter.read()
else:
parameter.general_read(args.parameter)
dag = DAG(out, XML, parameter)
dag.generate(args.mode)
```
|
{
"source": "JerryYuYG/kafka",
"score": 2
}
|
#### File: clients/python/kafka.py
```python
import socket
import struct
import binascii
import sys
PRODUCE_REQUEST_ID = 0
def encode_message(message):
# <MAGIC_BYTE: char> <COMPRESSION_ALGO: char> <CRC32: int> <PAYLOAD: bytes>
return struct.pack('>B', 1) + \
struct.pack('>B', 0) + \
struct.pack('>i', binascii.crc32(message)) + \
message
def encode_produce_request(topic, partition, messages):
# encode messages as <LEN: int><MESSAGE_BYTES>
encoded = [encode_message(message) for message in messages]
message_set = ''.join([struct.pack('>i', len(m)) + m for m in encoded])
# create the request as <REQUEST_SIZE: int> <REQUEST_ID: short> <TOPIC: bytes> <PARTITION: int> <BUFFER_SIZE: int> <BUFFER: bytes>
data = struct.pack('>H', PRODUCE_REQUEST_ID) + \
struct.pack('>H', len(topic)) + topic + \
struct.pack('>i', partition) + \
struct.pack('>i', len(message_set)) + message_set
return struct.pack('>i', len(data)) + data
class KafkaProducer:
def __init__(self, host, port):
self.REQUEST_KEY = 0
self.connection = socket.socket()
self.connection.connect((host, port))
def close(self):
self.connection.close()
def send(self, messages, topic, partition = 0):
self.connection.sendall(encode_produce_request(topic, partition, messages))
if __name__ == '__main__':
if len(sys.argv) < 4:
print >> sys.stderr, 'USAGE: python', sys.argv[0], 'host port topic'
sys.exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
topic = sys.argv[3]
producer = KafkaProducer(host, port)
while True:
print 'Enter comma seperated messages: ',
line = sys.stdin.readline()
messages = line.split(',')
producer.send(messages, topic)
print 'Sent', len(messages), 'messages successfully'
```
|
{
"source": "jerryz1982/api_client",
"score": 2
}
|
#### File: api_client/common/exceptions.py
```python
import six
import sys
from oslo_config import cfg
from oslo_log import log as logging
from api_client._i18n import _, _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class FortinetException(Exception):
"""Base Fortinet Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
else:
# at least get the core message out if something happened
message = self.msg_fmt
self.message = message
super(FortinetException, self).__init__(message)
def format_message(self):
return self.args[0]
class Invalid(FortinetException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class NotFound(FortinetException):
msg_fmt = _("Resource could not be found.")
code = 404
class VirtualInterfaceMacAddressException(FortinetException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class HypervisorUnavailable(FortinetException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class DeviceDetachFailed(FortinetException):
msg_fmt = _("Device detach failed for %(device)s: %(reason)s)")
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class DeviceNotFound(NotFound):
msg_fmt = _("Device '%(device)s' not found.")
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidAggregateActionAdd(InvalidAggregateAction):
msg_fmt = _("Cannot add host to aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionDelete(InvalidAggregateAction):
msg_fmt = _("Cannot remove host from aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdate(InvalidAggregateAction):
msg_fmt = _("Cannot update aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
msg_fmt = _("Cannot update metadata of aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InvalidStrTime(Invalid):
msg_fmt = _("Invalid datetime string: %(reason)s")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class InvalidFixedIpAndMaxCountRequest(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
```
|
{
"source": "jerryz1982/rq",
"score": 2
}
|
#### File: rq/rq/logutils.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from rq.utils import ColorizingStreamHandler
from rq.defaults import (DEFAULT_LOGGING_FORMAT,
DEFAULT_LOGGING_DATE_FORMAT)
def setup_loghandlers(level, date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT):
logger = logging.getLogger('rq.worker')
if not _has_effective_handler(logger):
logger.setLevel(level)
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
handler = ColorizingStreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
def _has_effective_handler(logger):
"""
Checks if a logger has a handler that will catch its messages in its logger hierarchy.
:param `logging.Logger` logger: The logger to be checked.
:return: True if a handler is found for the logger, False otherwise.
:rtype: bool
"""
while True:
if logger.handlers:
return True
if not logger.parent:
return False
logger = logger.parent
```
|
{
"source": "Jerryzcn/autogluon",
"score": 2
}
|
#### File: autogluon/scheduler/rl_scheduler.py
```python
import os
import json
import time
import pickle
import logging
import threading
import multiprocessing as mp
from collections import OrderedDict
import mxnet as mx
from .resource import DistributedResource
from ..utils import (save, load, mkdir, try_import_mxboard, tqdm)
from ..core import Task
from ..core.decorator import _autogluon_method
from ..searcher import RLSearcher
from .fifo import FIFOScheduler
from .reporter import DistStatusReporter
__all__ = ['RLScheduler']
logger = logging.getLogger(__name__)
class RLScheduler(FIFOScheduler):
r"""Scheduler that uses Reinforcement Learning with a LSTM controller created based on the provided search spaces
Parameters
----------
train_fn : callable
A task launch function for training. Note: please add the `@ag.args` decorater to the original function.
args : object (optional)
Default arguments for launching train_fn.
resource : dict
Computation resources. For example, `{'num_cpus':2, 'num_gpus':1}`
searcher : object (optional)
Autogluon searcher. For example, autogluon.searcher.RandomSearcher
time_attr : str
A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_epoch` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
reward_attr : str
The training result objective value attribute. As with `time_attr`, this may refer to any objective value.
Stopping procedures will use this attribute.
controller_resource : int
Batch size for training controllers.
dist_ip_addrs : list of str
IP addresses of remote machines.
Examples
--------
>>> import numpy as np
>>> import autogluon as ag
>>>
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True),
... wd=ag.space.Real(1e-3, 1e-2))
>>> def train_fn(args, reporter):
... print('lr: {}, wd: {}'.format(args.lr, args.wd))
... for e in range(10):
... dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2*e))
... reporter(epoch=e, accuracy=dummy_accuracy, lr=args.lr, wd=args.wd)
...
>>> scheduler = ag.scheduler.RLScheduler(train_fn,
... resource={'num_cpus': 2, 'num_gpus': 0},
... num_trials=20,
... reward_attr='accuracy',
... time_attr='epoch')
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
"""
def __init__(self, train_fn, args=None, resource=None, checkpoint='./exp/checkpoint.ag',
resume=False, num_trials=None, time_attr='epoch', reward_attr='accuracy',
visualizer='none', controller_lr=1e-3, ema_baseline_decay=0.95,
controller_resource={'num_cpus': 0, 'num_gpus': 0},
controller_batch_size=1,
dist_ip_addrs=[], sync=True, **kwargs):
assert isinstance(train_fn, _autogluon_method), 'Please use @ag.args ' + \
'to decorate your training script.'
self.ema_baseline_decay = ema_baseline_decay
self.sync = sync
# create RL searcher/controller
searcher = RLSearcher(train_fn.kwspaces)
super(RLScheduler,self).__init__(
train_fn, train_fn.args, resource, searcher,
checkpoint=checkpoint, resume=False, num_trials=num_trials,
time_attr=time_attr, reward_attr=reward_attr,
visualizer=visualizer, dist_ip_addrs=dist_ip_addrs, **kwargs)
# reserve controller computation resource on master node
master_node = self.REMOTE_MANAGER.get_master_node()
self.controller_resource = DistributedResource(**controller_resource)
assert self.RESOURCE_MANAGER.reserve_resource(
master_node, self.controller_resource), 'Not Enough Resource on Master Node' + \
' for Training Controller'
self.controller_ctx = [mx.gpu(i) for i in self.controller_resource.gpu_ids] if \
controller_resource['num_gpus'] > 0 else [mx.cpu()]
# controller setup
self.controller = searcher.controller
self.controller.collect_params().reset_ctx(self.controller_ctx)
self.controller_optimizer = mx.gluon.Trainer(
self.controller.collect_params(), 'adam',
optimizer_params={'learning_rate': controller_lr*controller_batch_size})
self.controller_batch_size = controller_batch_size
self.baseline = None
self.lock = mp.Lock()
# async buffers
if not sync:
self.mp_count = mp.Value('i', 0)
self.mp_seed = mp.Value('i', 0)
self.mp_fail = mp.Value('i', 0)
if resume:
if os.path.isfile(checkpoint):
self.load_state_dict(load(checkpoint))
else:
msg = 'checkpoint path {} is not available for resume.'.format(checkpoint)
logger.exception(msg)
def run(self, **kwargs):
"""Run multiple number of trials
"""
self.num_trials = kwargs.get('num_trials', self.num_trials)
logger.info('Starting Experiments')
logger.info('Num of Finished Tasks is {}'.format(self.num_finished_tasks))
logger.info('Num of Pending Tasks is {}'.format(self.num_trials - self.num_finished_tasks))
if self.sync:
self._run_sync()
else:
self._run_async()
def _run_sync(self):
decay = self.ema_baseline_decay
for i in tqdm(range(self.num_trials // self.controller_batch_size + 1)):
with mx.autograd.record():
# sample controller_batch_size number of configurations
batch_size = self.num_trials % self.num_trials \
if i == self.num_trials // self.controller_batch_size \
else self.controller_batch_size
if batch_size == 0: continue
configs, log_probs, entropies = self.controller.sample(
batch_size, with_details=True)
# schedule the training tasks and gather the reward
rewards = self.sync_schedule_tasks(configs)
# substract baseline
if self.baseline is None:
self.baseline = rewards[0]
avg_rewards = mx.nd.array([reward - self.baseline for reward in rewards],
ctx=self.controller.context)
# EMA baseline
for reward in rewards:
self.baseline = decay * self.baseline + (1 - decay) * reward
# negative policy gradient
log_probs = log_probs.sum(axis=1)
loss = - log_probs * avg_rewards#.reshape(-1, 1)
loss = loss.sum() # or loss.mean()
# update
loss.backward()
self.controller_optimizer.step(batch_size)
logger.debug('controller loss: {}'.format(loss.asscalar()))
def _run_async(self):
def _async_run_trial():
self.mp_count.value += 1
self.mp_seed.value += 1
seed = self.mp_seed.value
mx.random.seed(seed)
with mx.autograd.record():
# sample one configuration
with self.lock:
config, log_prob, entropy = self.controller.sample(with_details=True)
config = config[0]
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
# start training task
reporter = DistStatusReporter(remote=task.resources.node)
task.args['reporter'] = reporter
task_thread = self.add_job(task)
# run reporter
last_result = None
config = task.args['config']
while task_thread.is_alive():
reported_result = reporter.fetch()
if 'done' in reported_result and reported_result['done'] is True:
reporter.move_on()
task_thread.join()
break
self._add_training_result(task.task_id, reported_result, task.args['config'])
reporter.move_on()
last_result = reported_result
reward = last_result[self._reward_attr]
self.searcher.update(config, reward, done=True)
with self.lock:
if self.baseline is None:
self.baseline = reward
avg_reward = mx.nd.array([reward - self.baseline], ctx=self.controller.context)
# negative policy gradient
with self.lock:
loss = -log_prob * avg_reward.reshape(-1, 1)
loss = loss.sum()
# update
print('loss', loss)
with self.lock:
try:
loss.backward()
self.controller_optimizer.step(1)
except Exception:
self.mp_fail.value += 1
logger.warning('Exception during backward {}.'.format(self.mp_fail.value))
self.mp_count.value -= 1
# ema
with self.lock:
decay = self.ema_baseline_decay
self.baseline = decay * self.baseline + (1 - decay) * reward
reporter_threads = []
for i in range(self.num_trials):
while self.mp_count.value >= self.controller_batch_size:
time.sleep(0.2)
#_async_run_trial()
reporter_thread = threading.Thread(target=_async_run_trial)
reporter_thread.start()
reporter_threads.append(reporter_thread)
for p in reporter_threads:
p.join()
def sync_schedule_tasks(self, configs):
rewards = []
results = {}
def _run_reporter(task, task_job, reporter):
last_result = None
config = task.args['config']
while not task_job.done():
reported_result = reporter.fetch()
if 'traceback' in reported_result:
logger.exception(reported_result['traceback'])
reporter.move_on()
break
if 'done' in reported_result and reported_result['done'] is True:
reporter.move_on()
break
self._add_training_result(task.task_id, reported_result, task.args['config'])
reporter.move_on()
last_result = reported_result
if last_result is not None:
self.searcher.update(config, last_result[self._reward_attr], done=True)
with self.lock:
results[pickle.dumps(config)] = last_result[self._reward_attr]
# launch the tasks
tasks = []
task_jobs = []
reporter_threads = []
for config in configs:
logger.debug('scheduling config: {}'.format(config))
# create task
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
reporter = DistStatusReporter()
task.args['reporter'] = reporter
task_job = self.add_job(task)
# run reporter
reporter_thread = threading.Thread(target=_run_reporter, args=(task, task_job, reporter))
reporter_thread.start()
tasks.append(task)
task_jobs.append(task_job)
reporter_threads.append(reporter_thread)
for p1, p2 in zip(task_jobs, reporter_threads):
p1.result()
p2.join()
with self.LOCK:
for task in tasks:
self.finished_tasks.append({'TASK_ID': task.task_id,
'Config': task.args['config']})
if self._checkpoint is not None:
logger.debug('Saving Checkerpoint')
self.save()
for config in configs:
rewards.append(results[pickle.dumps(config)])
return rewards
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new training task
"""
cls = RLScheduler
cls.RESOURCE_MANAGER._request(task.resources)
# main process
job = cls._start_distributed_job(task, cls.RESOURCE_MANAGER)
return job
def join_tasks(self):
pass
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
logger.debug('\nState_Dict self.finished_tasks: {}'.format(self.finished_tasks))
destination['finished_tasks'] = pickle.dumps(self.finished_tasks)
destination['baseline'] = pickle.dumps(self.baseline)
destination['TASK_ID'] = Task.TASK_ID.value
destination['searcher'] = self.searcher.state_dict()
destination['training_history'] = json.dumps(self.training_history)
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
destination['visualizer'] = json.dumps(self.mxboard._scalar_dict)
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
self.finished_tasks = pickle.loads(state_dict['finished_tasks'])
#self.baseline = pickle.loads(state_dict['baseline'])
Task.set_id(state_dict['TASK_ID'])
self.searcher.load_state_dict(state_dict['searcher'])
self.training_history = json.loads(state_dict['training_history'])
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
self.mxboard._scalar_dict = json.loads(state_dict['visualizer'])
logger.debug('Loading Searcher State {}'.format(self.searcher))
```
#### File: task/tabular_prediction/tabular_prediction.py
```python
import copy
import logging
import math
import numpy as np
from .dataset import TabularDataset
from .predictor import TabularPredictor
from ..base import BaseTask
from ..base.base_task import schedulers
from ...utils import verbosity2loglevel
from ...utils.tabular.features.auto_ml_feature_generator import AutoMLFeatureGenerator
from ...utils.tabular.metrics import get_metric
from ...utils.tabular.ml.learner.default_learner import DefaultLearner as Learner
from ...utils.tabular.ml.trainer.auto_trainer import AutoTrainer
from ...utils.tabular.ml.utils import setup_outputdir, setup_compute, setup_trial_limits
__all__ = ['TabularPrediction']
logger = logging.getLogger() # return root logger
class TabularPrediction(BaseTask):
"""
AutoGluon Task for predicting values in column of tabular dataset (classification or regression)
"""
Dataset = TabularDataset
Predictor = TabularPredictor
@staticmethod
def load(output_directory, verbosity=2):
"""
Load a predictor object previously produced by `fit()` from file and returns this object.
Parameters
----------
output_directory : str
Path to directory where trained models are stored (i.e. the output_directory specified in previous call to `fit`).
verbosity : int, default = 2
Verbosity levels range from 0 to 4 and control how much information will be printed by the loaded `Predictor`.
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where L ranges from 0 to 50 (Note: higher values L correspond to fewer print statements, opposite of verbosity levels)
Returns
-------
:class:`autogluon.task.tabular_prediction.TabularPredictor` object that can be used to make predictions.
"""
logger.setLevel(verbosity2loglevel(verbosity)) # Reset logging after load (since we may be in new Python session)
if output_directory is None:
raise ValueError("output_directory cannot be None in load()")
output_directory = setup_outputdir(output_directory) # replace ~ with absolute path if it exists
learner = Learner.load(output_directory)
return TabularPredictor(learner=learner)
@staticmethod
def fit(train_data, label, tuning_data=None, output_directory=None, problem_type=None, eval_metric=None, stopping_metric=None,
auto_stack=False, hyperparameter_tune=False, feature_prune=False, holdout_frac=None,
num_bagging_folds=0, num_bagging_sets=None, stack_ensemble_levels=0,
hyperparameters=None, cache_data=True,
time_limits=None, num_trials=None, search_strategy='random', search_options=None,
nthreads_per_trial=None, ngpus_per_trial=None, dist_ip_addrs=None, visualizer='none',
verbosity=2, **kwargs):
"""
Fit models to predict a column of data table based on the other columns.
Parameters
----------
train_data : str or :class:`autogluon.task.tabular_prediction.TabularDataset` or `pandas.DataFrame`
Table of the training data, which is similar to pandas DataFrame.
If str is passed, `train_data` will be loaded using the str value as the file path.
label : str
Name of the column that contains the target variable to predict.
tuning_data : str or :class:`autogluon.task.tabular_prediction.TabularDataset` or `pandas.DataFrame`, default = None
Another dataset containing validation data reserved for hyperparameter tuning (in same format as training data).
If str is passed, `tuning_data` will be loaded using the str value as the file path.
Note: final model returned may be fit on this tuning_data as well as train_data. Do not provide your evaluation test data here!
In particular, when `num_bagging_folds` > 0 or `stack_ensemble_levels` > 0, models will be trained on both `tuning_data` and `train_data`.
If `tuning_data = None`, `fit()` will automatically hold out some random validation examples from `train_data`.
output_directory : str
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "autogluon-fit-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `output_directory` locations.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
problem_type : str, default = None
Type of prediction problem, i.e. is this a binary/multiclass classification or regression problem (options: 'binary', 'multiclass', 'regression').
If `problem_type = None`, the prediction problem type is inferred based on the label-values in provided dataset.
eval_metric : function or str, default = None
Metric by which predictions will be ultimately evaluated on test data.
AutoGluon tunes factors such as hyperparameters, early-stopping, ensemble-weights, etc. in order to improve this metric on validation data.
If `eval_metric = None`, it is automatically chosen based on `problem_type`.
Defaults to 'accuracy' for binary and multiclass classification and 'root_mean_squared_error' for regression.
Otherwise, options for classification: [
'accuracy', 'balanced_accuracy', 'f1', 'f1_macro', 'f1_micro', 'f1_weighted',
'roc_auc', 'average_precision', 'precision', 'precision_macro', 'precision_micro', 'precision_weighted',
'recall', 'recall_macro', 'recall_micro', 'recall_weighted', 'log_loss', 'pac_score'].
Options for regression: ['root_mean_squared_error', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error', 'r2'].
For more information on these options, see `sklearn.metrics`: https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics
You can also pass your own evaluation function here as long as it follows formatting of the functions defined in `autogluon/utils/tabular/metrics/`.
stopping_metric : function or str, default = None
Metric which iteratively-trained models use to early stop to avoid overfitting.
`stopping_metric` is not used by weighted ensembles, instead weighted ensembles maximize `eval_metric`.
Defaults to `eval_metric` value except when `eval_metric='roc_auc'`, where it defaults to `log_loss`.
Options are identical to options for `eval_metric`.
auto_stack : bool, default = False
Whether AutoGluon should automatically utilize bagging and multi-layer stack ensembling to boost predictive accuracy.
Set this = True if you are willing to tolerate longer training times in order to maximize predictive accuracy!
Note: This overrides `num_bagging_folds` and `stack_ensemble_levels` arguments (selects optimal values for these parameters based on dataset properties).
Note: This can increase training time (and inference time) by up to 20x, but can greatly improve predictive performance.
hyperparameter_tune : bool, default = False
Whether to tune hyperparameters or just use fixed hyperparameter values for each model. Setting as True will increase `fit()` runtimes.
It is currently not recommended to use `hyperparameter_tune` with `auto_stack` due to potential overfitting.
Use `auto_stack` to maximize predictive accuracy; use `hyperparameter_tune` if you prefer to deploy just a single model rather than an ensemble.
feature_prune : bool, default = False
Whether or not to perform feature selection.
hyperparameters : dict
Keys are strings that indicate which model types to train.
Options include: 'NN' (neural network), 'GBM' (lightGBM boosted trees), 'CAT' (CatBoost boosted trees), 'RF' (random forest), 'XT' (extremely randomized trees), 'KNN' (k-nearest neighbors)
If certain key is missing from hyperparameters, then `fit()` will not train any models of that type.
For example, set `hyperparameters = { 'NN':{...} }` if say you only want to train neural networks and no other types of models.
Values = dict of hyperparameter settings for each model type.
Each hyperparameter can either be single fixed value or a search space containing many possible values.
Unspecified hyperparameters will be set to default values (or default search spaces if `hyperparameter_tune = True`).
Caution: Any provided search spaces will be overriden by fixed defauls if `hyperparameter_tune = False`.
Note: `hyperparameters` can also take a special key 'custom', which maps to a list of model names (currently supported options = 'GBM').
If `hyperparameter_tune = False`, then these additional models will also be trained using custom pre-specified hyperparameter settings that are known to work well.
Details regarding the hyperparameters you can specify for each model are provided in the following files:
NN: `autogluon/utils/tabular/ml/models/tabular_nn/hyperparameters/parameters.py`
Note: certain hyperparameter settings may cause these neural networks to train much slower.
GBM: `autogluon/utils/tabular/ml/models/lgb/hyperparameters/parameters.py`
See also the lightGBM docs: https://lightgbm.readthedocs.io/en/latest/Parameters.html
CAT: `autogluon/utils/tabular/ml/models/catboost/hyperparameters/parameters.py`
See also the CatBoost docs: https://catboost.ai/docs/concepts/parameter-tuning.html
RF: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Note: Hyperparameter tuning is disabled for this model.
Note: 'criterion' parameter will be overriden. Both 'gini' and 'entropy' are used automatically, training two models.
XT: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
Note: Hyperparameter tuning is disabled for this model.
Note: 'criterion' parameter will be overriden. Both 'gini' and 'entropy' are used automatically, training two models.
KNN: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
Note: Hyperparameter tuning is disabled for this model.
Note: 'weights' parameter will be overriden. Both 'distance' and 'uniform' are used automatically, training two models.
holdout_frac : float
Fraction of train_data to holdout as tuning data for optimizing hyperparameters (ignored unless `tuning_data = None`, ignored if `num_bagging_folds != 0`).
Default value is selected based on the number of rows in the training data. Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows.
Default value is doubled if `hyperparameter_tune = True`, up to a maximum of 0.2.
Disabled if `num_bagging_folds >= 2`.
num_bagging_folds : int, default = 0
Number of folds used for bagging of models. When `num_bagging_folds = k`, training time is roughly increased by a factor of `k` (set = 0 to disable bagging).
Disabled by default, but we recommend values between 5-10 to maximize predictive performance.
Increasing num_bagging_folds will result in models with lower bias but that are more prone to overfitting.
Values > 10 may produce diminishing returns, and can even harm overall results due to overfitting.
To further improve predictions, avoid increasing num_bagging_folds much beyond 10 and instead increase num_bagging_sets.
num_bagging_sets : int
Number of repeats of kfold bagging to perform (values must be >= 1). Total number of models trained during bagging = num_bagging_folds * num_bagging_sets.
Defaults to 1 if time_limits is not specified, otherwise 20 (always disabled if num_bagging_folds is not specified).
Values greater than 1 will result in superior predictive performance, especially on smaller problems and with stacking enabled (reduces overall variance).
stack_ensemble_levels : int, default = 0
Number of stacking levels to use in stack ensemble. Roughly increases model training time by factor of `stack_ensemble_levels+1` (set = 0 to disable stack ensembling).
Disabled by default, but we recommend values between 1-3 to maximize predictive performance.
To prevent overfitting, this argument is ignored unless you have also set `num_bagging_folds >= 2`.
cache_data : bool, default = True
When enabled, the training and validation data are saved to disk for future reuse.
Enables advanced functionality in the resulting Predictor object such as feature importance calculation on the original data.
time_limits : int
Approximately how long `fit()` should run for (wallclock time in seconds).
If not specified, `fit()` will run until all models have completed training, but will not repeatedly bag models unless `num_bagging_sets` is specified.
num_trials : int
Maximal number of different hyperparameter settings of each model type to evaluate during HPO (only matters if `hyperparameter_tune = True`).
If both `time_limits` and `num_trials` are specified, `time_limits` takes precedent.
search_strategy : str
Which hyperparameter search algorithm to use (only matters if `hyperparameter_tune = True`).
Options include: 'random' (random search), 'skopt' (SKopt Bayesian optimization), 'grid' (grid search), 'hyperband' (Hyperband)
search_options : dict
Auxiliary keyword arguments to pass to the searcher that performs hyperparameter optimization.
nthreads_per_trial : int
How many CPUs to use in each training run of an individual model.
This is automatically determined by AutoGluon when left as None (based on available compute).
ngpus_per_trial : int
How many GPUs to use in each trial (ie. single training run of a model).
This is automatically determined by AutoGluon when left as None.
dist_ip_addrs : list
List of IP addresses corresponding to remote workers, in order to leverage distributed computation.
visualizer : str
How to visualize the neural network training progress during `fit()`. Options: ['mxboard', 'tensorboard', 'none'].
verbosity: int, default = 2
Verbosity levels range from 0 to 4 and control how much information is printed during fit().
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels)
Kwargs can include addtional arguments for advanced users:
feature_generator_type : `FeatureGenerator` class, default=`AutoMLFeatureGenerator`
A `FeatureGenerator` class specifying which feature engineering protocol to follow
(see autogluon.utils.tabular.features.abstract_feature_generator.AbstractFeatureGenerator).
Note: The file containing your `FeatureGenerator` class must be imported into current Python session in order to use a custom class.
feature_generator_kwargs : dict, default={}
Keyword arguments to pass into the `FeatureGenerator` constructor.
trainer_type : `Trainer` class, default=`AutoTrainer`
A class inheritng from `autogluon.utils.tabular.ml.trainer.abstract_trainer.AbstractTrainer` that controls training/ensembling of many models.
Note: In order to use a custom `Trainer` class, you must import the class file that defines it into the current Python session.
label_count_threshold : int, default = 10
For multi-class classification problems, this is the minimum number of times a label must appear in dataset in order to be considered an output class.
AutoGluon will ignore any classes whose labels do not appear at least this many times in the dataset (i.e. will never predict them).
id_columns : list, default = []
Banned subset of column names that model may not use as predictive features (e.g. contains label, user-ID, etc).
These columns are ignored during `fit()`, but DataFrame of just these columns with appended predictions may be produced, for example to submit in a ML competition.
Returns
-------
:class:`autogluon.task.tabular_prediction.TabularPredictor` object which can make predictions on new data and summarize what happened during `fit()`.
Examples
--------
>>> from autogluon import TabularPrediction as task
>>> train_data = task.Dataset(file_path='https://autogluon.s3-us-west-2.amazonaws.com/datasets/Inc/train.csv')
>>> label_column = 'class'
>>> predictor = task.fit(train_data=train_data, label=label_column)
>>> test_data = task.Dataset(file_path='https://autogluon.s3-us-west-2.amazonaws.com/datasets/Inc/test.csv')
>>> y_test = test_data[label_column]
>>> test_data = test_data.drop(labels=[label_column], axis=1)
>>> y_pred = predictor.predict(test_data)
>>> perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred)
>>> results = predictor.fit_summary()
To maximize predictive performance, use the following:
>>> eval_metric = 'roc_auc' # set this to the metric you ultimately care about
>>> time_limits = 360 # set as long as you are willing to wait (in sec)
>>> predictor = task.fit(train_data=train_data, label=label_column, eval_metric=eval_metric, auto_stack=True, time_limits=time_limits)
"""
if verbosity < 0:
verbosity = 0
elif verbosity > 4:
verbosity = 4
logger.setLevel(verbosity2loglevel(verbosity))
allowed_kwarg_names = {
'feature_generator_type',
'feature_generator_kwargs',
'trainer_type',
'label_count_threshold',
'id_columns',
'enable_fit_continuation' # TODO: Remove on 0.1.0 release
}
for kwarg_name in kwargs.keys():
if kwarg_name not in allowed_kwarg_names:
raise ValueError("Unknown keyword argument specified: %s" % kwarg_name)
if isinstance(train_data, str):
train_data = TabularDataset(file_path=train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(file_path=tuning_data)
if len(set(train_data.columns)) < len(train_data.columns):
raise ValueError("Column names are not unique, please change duplicated column names (in pandas: train_data.rename(columns={'current_name':'new_name'})")
if tuning_data is not None and np.any(train_data.columns != tuning_data.columns):
raise ValueError("Column names must match between training and tuning data")
if feature_prune:
feature_prune = False # TODO: Fix feature pruning to add back as an option
# Currently disabled, needs to be updated to align with new model class functionality
logger.log(30, 'Warning: feature_prune does not currently work, setting to False.')
# TODO: Remove on 0.1.0 release
if 'enable_fit_continuation' in kwargs.keys():
logger.log(30, 'Warning: `enable_fit_continuation` is a deprecated parameter. It has been renamed to `cache_data`. Starting from AutoGluon 0.1.0, specifying `enable_fit_continuation` as a parameter will cause an exception.')
logger.log(30, 'Setting `cache_data` value equal to `enable_fit_continuation` value.')
cache_data = kwargs['enable_fit_continuation']
if not cache_data:
logger.log(30, 'Warning: `cache_data=False` will disable or limit advanced functionality after training such as feature importance calculations. It is recommended to set `cache_data=True` unless you explicitly wish to not have the data saved to disk.')
if hyperparameter_tune:
logger.log(30, 'Warning: `hyperparameter_tune=True` is currently experimental and may cause the process to hang. Setting `auto_stack=True` instead is recommended to achieve maximum quality models.')
if dist_ip_addrs is None:
dist_ip_addrs = []
if search_options is None:
search_options = dict()
if hyperparameters is None:
hyperparameters = {
'NN': {'num_epochs': 500},
'GBM': {'num_boost_round': 10000},
'CAT': {'iterations': 10000},
'RF': {'n_estimators': 300},
'XT': {'n_estimators': 300},
'KNN': {},
'custom': ['GBM'],
}
# Process kwargs to create feature generator, trainer, schedulers, searchers for each model:
output_directory = setup_outputdir(output_directory) # Format directory name
feature_generator_type = kwargs.get('feature_generator_type', AutoMLFeatureGenerator)
feature_generator_kwargs = kwargs.get('feature_generator_kwargs', {})
feature_generator = feature_generator_type(**feature_generator_kwargs) # instantiate FeatureGenerator object
id_columns = kwargs.get('id_columns', [])
trainer_type = kwargs.get('trainer_type', AutoTrainer)
nthreads_per_trial, ngpus_per_trial = setup_compute(nthreads_per_trial, ngpus_per_trial)
num_train_rows = len(train_data)
if auto_stack:
# TODO: What about datasets that are 100k+? At a certain point should we not bag?
# TODO: What about time_limits? Metalearning can tell us expected runtime of each model, then we can select optimal folds + stack levels to fit time constraint
num_bagging_folds = min(10, max(5, math.floor(num_train_rows / 100)))
stack_ensemble_levels = min(1, max(0, math.floor(num_train_rows / 750)))
if num_bagging_sets is None:
if num_bagging_folds >= 2:
if time_limits is not None:
num_bagging_sets = 20
else:
num_bagging_sets = 1
else:
num_bagging_sets = 1
label_count_threshold = kwargs.get('label_count_threshold', 10)
if num_bagging_folds is not None: # Ensure there exist sufficient labels for stratified splits across all bags
label_count_threshold = max(label_count_threshold, num_bagging_folds)
time_limits_orig = copy.deepcopy(time_limits)
time_limits_hpo = copy.deepcopy(time_limits)
if num_bagging_folds >= 2 and (time_limits_hpo is not None):
time_limits_hpo = time_limits_hpo / (1 + num_bagging_folds * (1 + stack_ensemble_levels))
time_limits_hpo, num_trials = setup_trial_limits(time_limits_hpo, num_trials, hyperparameters) # TODO: Move HPO time allocation to Trainer
if (num_trials is not None) and hyperparameter_tune and (num_trials == 1):
hyperparameter_tune = False
logger.log(30, 'Warning: Specified num_trials == 1 or time_limits is too small for hyperparameter_tune, setting to False.')
if holdout_frac is None:
# Between row count 5,000 and 25,000 keep 0.1 holdout_frac, as we want to grow validation set to a stable 2500 examples
if num_train_rows < 5000:
holdout_frac = max(0.1, min(0.2, 500.0 / num_train_rows))
else:
holdout_frac = max(0.01, min(0.1, 2500.0 / num_train_rows))
if hyperparameter_tune:
holdout_frac = min(0.2, holdout_frac * 2) # We want to allocate more validation data for HPO to avoid overfitting
# Add visualizer to NN hyperparameters:
if (visualizer is not None) and (visualizer != 'none') and ('NN' in hyperparameters):
hyperparameters['NN']['visualizer'] = visualizer
eval_metric = get_metric(eval_metric, problem_type, 'eval_metric')
stopping_metric = get_metric(stopping_metric, problem_type, 'stopping_metric')
# All models use the same scheduler:
scheduler_options = {
'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'num_trials': num_trials,
'time_out': time_limits_hpo,
'visualizer': visualizer,
'time_attr': 'epoch', # For tree ensembles, each new tree (ie. boosting round) is considered one epoch
'reward_attr': 'validation_performance',
'dist_ip_addrs': dist_ip_addrs,
'searcher': search_strategy,
'search_options': search_options,
}
if isinstance(search_strategy, str):
scheduler = schedulers[search_strategy.lower()]
# This is a fix for now. But we need to separate between scheduler
# (mainly FIFO and Hyperband) and searcher. Currently, most searchers
# only work with FIFO, and Hyperband works only with random searcher,
# but this will be different in the future.
if search_strategy == 'hyperband':
# Currently, HyperbandScheduler only supports random searcher
scheduler_options['searcher'] = 'random'
else:
# TODO: Check that search_strategy is a subclass of TaskScheduler
assert callable(search_strategy)
scheduler = search_strategy
scheduler_options['searcher'] = 'random'
scheduler_options = (scheduler, scheduler_options) # wrap into tuple
learner = Learner(path_context=output_directory, label=label, problem_type=problem_type, objective_func=eval_metric, stopping_metric=stopping_metric,
id_columns=id_columns, feature_generator=feature_generator, trainer_type=trainer_type,
label_count_threshold=label_count_threshold)
learner.fit(X=train_data, X_test=tuning_data, scheduler_options=scheduler_options,
hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune,
holdout_frac=holdout_frac, num_bagging_folds=num_bagging_folds, num_bagging_sets=num_bagging_sets, stack_ensemble_levels=stack_ensemble_levels,
hyperparameters=hyperparameters, time_limit=time_limits_orig, save_data=cache_data, verbosity=verbosity)
return TabularPredictor(learner=learner)
```
|
{
"source": "Jerryzcn/incubator-mxnet",
"score": 3
}
|
#### File: python/mxnet/_numpy_op_doc.py
```python
def _np_ones_like(a):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
"""
pass
def _np_zeros_like(a):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
"""
pass
def _np_cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
pass
def _npx_nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a ndarray with ndim is 2. Each row contains the indices
of the non-zero elements. The values in `a` are always tested and returned in
row-major, C-style order.
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
array : ndarray
Indices of elements that are non-zero.
Notes
-----
This function differs from the original numpy.nonzero in the following aspects:
- Does not support python numeric.
- The return value is same as numpy.transpose(numpy.nonzero(a)).
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> npx.nonzero(x)
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
>>> np.transpose(npx.nonzero(x))
array([[0, 1, 2, 2],
[0, 1, 0, 1]], dtype=int64)
"""
pass
def _np_repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : ndarray
Input array.
repeats : int
The number of repetitions for each element.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
Notes
-----
Unlike the official NumPy ``repeat`` operator, this operator currently
does not support array of ints for the parameter `repeats`.
Examples
--------
>>> x = np.arange(4).reshape(2, 2)
>>> x
array([[0., 1.],
[2., 3.]])
>>> np.repeat(x, repeats=3)
array([0., 0., 0., 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> np.repeat(x, repeats=3, axis=0)
array([[0., 1.],
[0., 1.],
[0., 1.],
[2., 3.],
[2., 3.],
[2., 3.]])
>>> np.repeat(x, repeats=3, axis=1)
array([[0., 0., 0., 1., 1., 1.],
[2., 2., 2., 3., 3., 3.]])
"""
pass
def _np_transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : ndarray
Input array.
axes : list of ints, optional
By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns
-------
p : ndarray
a with its axes permuted.
Notes
-----
This function differs from the original `numpy.transpose
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables are not supported
- the operator always returns an `ndarray` that does not share the memory with the input
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0., 1.],
[2., 3.]])
>>> np.transpose(x)
array([[0., 2.],
[1., 3.]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
pass
def _np_dot(a, b, out=None):
"""
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``np.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is a 2-D array, it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])
Parameters
----------
a : ndarray
First argument.
b : ndarray
Second argument.
out : ndarray, optional
Output argument. It must have the same shape and type as the expected output.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned
Examples
--------
>>> a = np.array(3)
>>> b = np.array(4)
>>> np.dot(a, b)
array(12.)
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0], [0, 1]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.dot(a, b)
array([[4., 1.],
[2., 2.]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(5*6)[::-1].reshape((6,5))
>>> np.dot(a, b)[2,3,2,2]
array(29884.)
>>> np.sum(a[2,3,2,:] * b[:,2])
array(29884.)
"""
pass
def _np_sum(a, axis=None, dtype=None, keepdims=False, initial=None, out=None):
r"""
Sum of array elements over a given axis.
Parameters
----------
a : ndarray
Input data.
axis : None or int, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The default type is float32.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
initial: Currently only supports None as input, optional
Starting value for the sum.
Currently not implemented. Please use ``None`` as input or skip this argument.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
sum_along_axis : ndarray
An ndarray with the same shape as `a`, with the specified
axis removed. If an output array is specified, a reference to
`out` is returned.
Notes
-----
- Input type does not support Python native iterables.
- "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.
- "initial" param is not supported yet. Please use None as input.
- Arithmetic is modular when using integer types, and no error is raised on overflow.
- The sum of an empty array is the neutral element 0:
>>> a = np.empty(1)
>>> np.sum(a)
array(0.)
This function differs from the original `numpy.sum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.
- "initial" param is not supported yet. Please use ``None`` as input or skip it.
Examples
--------
>>> a = np.array([0.5, 1.5])
>>> np.sum(a)
array(2.)
>>> a = np.array([0.5, 0.7, 0.2, 1.5])
>>> np.sum(a, dtype=np.int32)
array(2, dtype=int32)
>>> a = np.array([[0, 1], [0, 5]])
>>> np.sum(a)
array(6.)
>>> np.sum(a, axis=0)
array([0., 6.])
>>> np.sum(a, axis=1)
array([1., 5.])
With output ndarray:
>>> a = np.array([[0, 1], [0, 5]])
>>> b = np.ones((2,), dtype=np.float32)
>>> np.sum(a, axis = 0, out=b)
array([0., 6.])
>>> b
array([0., 6.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
array(-128, dtype=int8)
"""
pass
def _np_copy(a, out=None):
"""
Return an array copy of the given object.
Parameters
----------
a : ndarray
Input data.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-------
This function differs from the original `numpy.copy
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copy.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- Does not support "order" parameter.
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when ``x`` is modified, ``y`` is also modified, but not ``z``:
>>> x[0] = 10
>>> x[0] == y[0]
array([1.])
>>> x[0] == z[0]
array([0.])
"""
pass
def _np_reshape(a, newshape, order='C', out=None):
"""
Gives a new shape to an array without changing its data.
This function always returns a copy of the input array if
``out`` is not provided.
Parameters
----------
a : ndarray
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. Other order types such as 'F'/'A'
may be added in the future.
Returns
-------
reshaped_array : ndarray
It will be always a copy of the original array. This behavior is different
from the official NumPy ``reshape`` operator where views of the original array may be
generated.
See Also
--------
ndarray.reshape : Equivalent method.
"""
def _np_roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : ndarray
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
Notes
-----
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> np.roll(x, -2)
array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.roll(x2, 1)
array([[9., 0., 1., 2., 3.],
[4., 5., 6., 7., 8.]])
>>> np.roll(x2, -1)
array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 0.]])
>>> np.roll(x2, 1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, -1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, 1, axis=1)
array([[4., 0., 1., 2., 3.],
[9., 5., 6., 7., 8.]])
>>> np.roll(x2, -1, axis=1)
array([[1., 2., 3., 4., 0.],
[6., 7., 8., 9., 5.]])
"""
def _np_trace(a, offset=0, axis1=0, axis2=1, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : ndarray
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
out : ndarray, optional
Array into which the output is placed. It must be of the right shape
and right type to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
Examples
--------
>>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> np.trace(a)
array(3.)
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.trace(a)
array([6., 8.])
>>> a = np.arange(24).reshape((2, 2, 2, 3))
>>> np.trace(a).shape
(2, 3)
"""
pass
def _np_squeeze(a, axis=None, out=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : ndarray
Input data.
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
out : ndarray, optional
Array into which the output is placed. It must have the same size
and dtype as the input array.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. It always returns a copy of `a`.
Raises
------
MXNetError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
mxnet.base.MXNetError: cannot select an axis to squeeze out which has size=3 not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
pass
def _np_max(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
pass
def _np_min(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
pass
def _np_prod(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : ndarray
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed.
The default (`axis` = `None`) is perform a product over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
If this is a tuple of ints, a product is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
array(8.307675e+34)
Examples
--------
By default, calculate the product of all elements:
>>> np.prod(np.array([1.,2.]))
array(2.)
Even when the input array is two-dimensional:
>>> np.prod(np.array([1.,2.,3.,4.]).reshape((2,2)))
array(24.)
But we can also specify the axis over which to multiply:
>>> np.prod(np.array([1.,2.,3.,4.]).reshape((2,2)), axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint8
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int8
True
"""
pass
def _np_moveaxis(a, source, destination):
"""Move axes of an array to new positions.
Other axes remain in their original order.
Parameters
----------
a : ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose: Permute the dimensions of an array.
swapaxes: Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
pass
def _np__random_shuffle(x):
"""
Modify a sequence in-place by shuffling its contents.
This function only shuffles the array along the first axis of a
multi-dimensional array. The order of sub-arrays is changed but
their contents remain the same.
Parameters
----------
x: ndarray
The array or list to be shuffled.
Returns
-------
None
Examples
--------
>>> arr = np.arange(10)
>>> np.random.shuffle(arr)
>>> arr
array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.shuffle(arr)
>>> arr
array([[6., 7., 8.], # random
[3., 4., 5.],
[0., 1., 2.]])
"""
pass
def _np_broadcast_to(array, shape, out=None):
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray
The array to broadcast.
shape : tuple, optional, default=[]
The shape of the desired array.
out : ndarray, optional
The output ndarray to hold the result.
Returns
-------
out : ndarray or list of ndarrays
Raises
------
MXNetError
- If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
- If the shape of the output array is not consistent with the desired shape.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1., 2., 3.],
[1., 2., 3.],
[1., 2., 3.]])
"""
pass
```
|
{
"source": "jerryzenghao/ReinformanceLearning",
"score": 3
}
|
#### File: ReinformanceLearning/REINFORCEjs/agent.py
```python
import numpy as np
import random
class Agent:
def __init__(self, env, gamma = 0.9):
self.env = env
self.gamma = gamma
self.policy = [[[0.25,0.25,0.25,0.25] for i in range(env.state_size[0])] for j in range(env.state_size[1])]
self.value = np.zeros(env.state_size)
self.action_value = np.random.rand(env.state_size[0],env.state_size[1],4)*0.0001
#self.action_value = np.zeros((env.state_size[0],env.state_size[1],4))
self.TDpolicy = np.zeros(env.state_size)
for i in env.wall:
self.value[i] = None
self.policy[i[0]][i[1]] = None
self.action_value[i] = None
self.TDpolicy[i] = None
for i in range(1,9):
self.policy[0][i] = [0,1/3,1/3,1/3]
self.policy[-1][i] = [1/3,1/3,0,1/3]
self.policy[i][0] = [1/3,1/3,1/3,0]
self.policy[i][-1] = [1/3,0,1/3,1/3]
self.policy[0][0] = [0,0.5,0.5,0]
self.policy[0][-1] = [0,0,0.5,0.5]
self.policy[-1][0] = [0.5,0.5,0,0]
self.policy[-1][-1] = [0.5,0,0,0.5]
for i in range(10):
self.action_value[0,i][0] = -np.inf
self.action_value[-1,i][2] = -np.inf
self.action_value[i,0][3] = -np.inf
self.action_value[i,-1][1] = -np.inf
def get_policy(self,s):
return self.policy[s[0]][s[1]]
class DPAgent(Agent):
def policy_evaluation(self):
while True:
delta = 0
v_new = np.zeros(self.env.state_size)
for i in range(self.env.state_size[0]):
for j in range(self.env.state_size[1]):
s = (i,j)
if s in self.env.wall:
v_new[s] = None
continue
policy = self.get_policy(s)
r = self.env.get_reward(s)
for a in range(4):
s_n = self.env.get_next_state(s,a)
v_new[s] += policy[a]*(r + self.gamma*self.value[s_n])
delta = max(delta, abs(self.value[s]-v_new[s]))
self.value = v_new
if delta < 1e-6:
print('Policy Evaluation:\n', np.round(self.value,2))
return
def policy_improvement(self):
stable = True
for i in range(self.env.state_size[0]):
for j in range(self.env.state_size[1]):
s = (i,j)
if s in self.env.wall:
continue
policy = self.get_policy(s)
nmax = 0
vmax = - np.inf
v = []
r = self.env.get_reward(s)
for a in range(4):
pi = policy[a]
s_n = self.env.get_next_state(s,a)
v.append(r + self.gamma*self.value[s_n])
if a == 0 or v[a] > vmax:
vmax = v[a]
nmax = 1
elif v[a] == vmax:
nmax += 1
new_policy = [0,0,0,0]
for a in range(4):
if vmax == v[a]:
new_policy[a] = 1.0/nmax
else:
new_policy[a] = 0.0
if policy != new_policy:
stable = False
self.policy[s[0]][s[1]] = new_policy
return stable
def policy_iteration(self):
while True:
self.policy_evaluation()
if self.policy_improvement():
print('Policy stable.')
return
class TDAgent(Agent):
def Q_learning(self,alpha, epsi=0.2):
iterations = []
for _ in range(1000):
S = (0,0)
iteration = 0
while True:
# choose A by epsilon-greedy
coin = np.random.binomial(1,1-epsi)
policy = self.get_policy(S)
if coin:
A = np.argmax(self.action_value[S])
else:
A = np.random.choice([0,1,2,3], p = policy)
s_n = self.env.get_next_state(S,A)
r = self.env.get_reward(S)
self.action_value[S][A] += alpha*(r+self.gamma*max(self.action_value[s_n])-self.action_value[S][A])
if self.env.is_terminal(S):
break
S = s_n
iteration += 1
iterations.append(iteration)
# Get policy
self.TDpolicy = np.argmax(self.action_value, axis=2)
self.value = np.max(self.action_value, axis = 2)
for i in self.env.wall:
self.TDpolicy[i] = -1
self.value[i] = None
return iterations
def sarsa(self,alpha, epsi=0.2):
iterations = []
for _ in range(1000):
S = (0,0)
iteration = 0
while True:
# choose A by epsilon-greedy
coin = np.random.binomial(1,1-epsi)
policy = self.get_policy(S)
if coin:
A = np.argmax(self.action_value[S])
else:
A = np.random.choice([0,1,2,3], p = policy)
s_n = self.env.get_next_state(S,A)
coin = np.random.binomial(1,1-epsi)
policy = self.get_policy(s_n)
if coin:
a_n = np.argmax(self.action_value[s_n])
else:
a_n = np.random.choice([0,1,2,3], p = policy)
r = self.env.get_reward(S)
self.action_value[S][A] += alpha*(r+self.gamma*self.action_value[s_n][a_n]-self.action_value[S][A])
if self.env.is_terminal(S) or iteration>= 1000:
break
S = s_n
A = a_n
iteration += 1
iterations.append(iteration)
# Get policy
self.TDpolicy = np.argmax(self.action_value, axis=2)
self.value = np.max(self.action_value, axis = 2)
for i in self.env.wall:
self.TDpolicy[i] = -1
self.value[i] = None
return iterations
```
#### File: ReinformanceLearning/REINFORCEjs/environment.py
```python
import numpy as np
import random
STATE_SPACE_SIZE = (10,10)
START_STATE = (0,0)
ACTIONS = (0,1,2,3)
class GridWorld:
def __init__(self,reward_matrix):
self.reward_matrix = reward_matrix
self.wall = {(2,1),(2,2),(2,3),(2,4),(2,6),(2,7),(2,8), (3,4), (4,4), (5,4),(6,4), (7,4)}
for i in self.wall:
reward_matrix[i] = None
self.state_size = STATE_SPACE_SIZE
def get_next_state(self, s, a):
if self.reward_matrix[s] > 0:
return (0,0)
if a == 0:
n_s = (s[0]-1,s[1])
elif a == 1:
n_s = (s[0],s[1]+1)
elif a == 2:
n_s = (s[0]+1,s[1])
else:
n_s = (s[0],s[1]-1)
if n_s not in self.wall and 0<=n_s[0]<10 and 0<=n_s[1]<10:
return n_s
return s
def get_reward(self,s):
return self.reward_matrix[s]
def is_terminal(self,s):
if self.reward_matrix[s] > 0:
return True
return False
```
|
{
"source": "jerryzh168/ClassyVision-1",
"score": 2
}
|
#### File: classy_vision/hooks/__init__.py
```python
import copy
import traceback
from pathlib import Path
from typing import Any, Dict, List
from classy_vision.generic.registry_utils import import_all_modules
from .constants import ClassyHookFunctions # isort:skip
from .classy_hook import ClassyHook # isort:skip
FILE_ROOT = Path(__file__).parent
HOOK_REGISTRY = {}
HOOK_CLASS_NAMES = set()
HOOK_REGISTRY_TB = {}
HOOK_CLASS_NAMES_TB = {}
def register_hook(name):
"""Registers a :class:`ClassyHook` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyHook` from a configuration file, even if the class
itself is not part of the base Classy Vision framework. To use it,
apply this decorator to a ClassyHook subclass, like this:
.. code-block:: python
@register_hook('custom_hook')
class CustomHook(ClassyHook):
...
To instantiate a hook from a configuration file, see
:func:`build_hook`.
"""
def register_hook_cls(cls):
if name in HOOK_REGISTRY:
msg = "Cannot register duplicate hook ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, HOOK_REGISTRY_TB[name]))
if not issubclass(cls, ClassyHook):
raise ValueError(
"Hook ({}: {}) must extend ClassyHook".format(name, cls.__name__)
)
if cls.__name__ in HOOK_CLASS_NAMES:
msg = (
"Cannot register hook with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, HOOK_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
HOOK_REGISTRY[name] = cls
HOOK_CLASS_NAMES.add(cls.__name__)
HOOK_REGISTRY_TB[name] = tb
HOOK_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_hook_cls
def build_hooks(hook_configs: List[Dict[str, Any]]):
return [build_hook(config) for config in hook_configs]
def build_hook(hook_config: Dict[str, Any]):
"""Builds a ClassyHook from a config.
This assumes a 'name' key in the config which is used to determine
what hook class to instantiate. For instance, a config `{"name":
"my_hook", "foo": "bar"}` will find a class that was registered as
"my_hook" (see :func:`register_hook`) and call .from_config on
it."""
assert hook_config["name"] in HOOK_REGISTRY, (
"Unregistered hook. Did you make sure to use the register_hook decorator "
"AND import the hook file before calling this function??"
)
hook_config = copy.deepcopy(hook_config)
hook_name = hook_config.pop("name")
return HOOK_REGISTRY[hook_name].from_config(hook_config)
# automatically import any Python files in the hooks/ directory
import_all_modules(FILE_ROOT, "classy_vision.hooks")
from .checkpoint_hook import CheckpointHook # isort:skip
from .torchscript_hook import TorchscriptHook # isort:skip
from .output_csv_hook import OutputCSVHook # isort:skip
from .exponential_moving_average_model_hook import ( # isort:skip
ExponentialMovingAverageModelHook,
)
from .loss_lr_meter_logging_hook import LossLrMeterLoggingHook # isort:skip
from .model_complexity_hook import ModelComplexityHook # isort:skip
from .model_tensorboard_hook import ModelTensorboardHook # isort:skip
from .precise_batch_norm_hook import PreciseBatchNormHook # isort:skip
from .profiler_hook import ProfilerHook # isort:skip
from .progress_bar_hook import ProgressBarHook # isort:skip
from .tensorboard_plot_hook import TensorboardPlotHook # isort:skip
from .visdom_hook import VisdomHook # isort:skip
__all__ = [
"build_hooks",
"build_hook",
"register_hook",
"CheckpointHook",
"ClassyHook",
"ClassyHookFunctions",
"ExponentialMovingAverageModelHook",
"LossLrMeterLoggingHook",
"OutputCSVHook",
"TensorboardPlotHook",
"TorchscriptHook",
"ModelComplexityHook",
"ModelTensorboardHook",
"PreciseBatchNormHook",
"ProfilerHook",
"ProgressBarHook",
"VisdomHook",
]
```
#### File: classy_vision/meters/precision_meter.py
```python
from typing import Any, Dict, Sequence
import torch
from classy_vision.generic.distributed_util import all_reduce_sum
from classy_vision.generic.util import is_pos_int, maybe_convert_to_one_hot
from classy_vision.meters import ClassyMeter
from . import register_meter
@register_meter("precision_at_k")
class PrecisionAtKMeter(ClassyMeter):
"""
Meter to calculate top-k precision for single-label or multi-label
image classification task. Note, ties are resolved randomly.
"""
def __init__(self, topk):
"""
args:
topk: list of int `k` values.
"""
super().__init__()
assert isinstance(topk, Sequence), "topk must be a sequence"
assert len(topk) > 0, "topk list should have at least one element"
assert [is_pos_int(x) for x in topk], "each value in topk must be >= 1"
self._topk = topk
# _total_* variables store running, in-sync totals for the
# metrics. These should not be communicated / summed.
self._total_correct_predictions_k = None
self._total_sample_count = None
# _curr_* variables store counts since the last sync. Only
# these should be summed across workers and they are reset
# after each communication
self._curr_correct_predictions_k = None
self._curr_sample_count = None
# Initialize all values properly
self.reset()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "PrecisionAtKMeter":
"""Instantiates a PrecisionAtKMeter from a configuration.
Args:
config: A configuration for a PrecisionAtKMeter.
See :func:`__init__` for parameters expected in the config.
Returns:
A PrecisionAtKMeter instance.
"""
return cls(topk=config["topk"])
@property
def name(self):
return "precision_at_k"
def sync_state(self):
# Communications
self._curr_correct_predictions_k = all_reduce_sum(
self._curr_correct_predictions_k
)
self._curr_sample_count = all_reduce_sum(self._curr_sample_count)
# Store results
self._total_correct_predictions_k += self._curr_correct_predictions_k
self._total_sample_count += self._curr_sample_count
# Reset values until next sync
self._curr_correct_predictions_k.zero_()
self._curr_sample_count.zero_()
@property
def value(self):
# Return value based on the local state of meter which
# includes the local sample count since last sync and the total global sample
# count obtained at the last sync
correct_predictions = {
k: self._curr_correct_predictions_k[i]
+ self._total_correct_predictions_k[i]
for i, k in enumerate(self._topk)
}
sample_count = self._total_sample_count + self._curr_sample_count
return {
"top_{}".format(k): (correct_predictions[k] / (k * sample_count)).item()
if sample_count
else 0.0
for k in self._topk
}
def get_classy_state(self):
"""Contains the states of the meter."""
return {
"name": self.name,
"top_k": self._topk,
"total_correct_predictions": self._total_correct_predictions_k.clone(),
"total_sample_count": self._total_sample_count.clone(),
"curr_sample_count": self._curr_sample_count.clone(),
"curr_correct_predictions_k": self._curr_correct_predictions_k.clone(),
}
def set_classy_state(self, state):
assert (
self.name == state["name"]
), "State name {state_name} does not match meter name {obj_name}".format(
state_name=state["name"], obj_name=self.name
)
assert (
self._topk == state["top_k"]
), "top-k of state {state_k} does not match object's top-k {obj_k}".format(
state_k=state["top_k"], obj_k=self._topk
)
# Restore the state -- correct_predictions and sample_count.
self.reset()
self._total_correct_predictions_k = state["total_correct_predictions"].clone()
self._total_sample_count = state["total_sample_count"].clone()
self._curr_correct_predictions_k = state["curr_correct_predictions_k"].clone()
self._curr_sample_count = state["curr_sample_count"].clone()
def update(self, model_output, target, **kwargs):
"""
args:
model_output: tensor of shape (B, C) where each value is
either logit or class probability.
target: tensor of shape (B, C), which is one-hot /
multi-label encoded, or tensor of shape (B) /
(B, 1), integer encoded
"""
# Convert target to 0/1 encoding if isn't
target = maybe_convert_to_one_hot(target, model_output)
_, pred_classes = model_output.topk(
max(self._topk), dim=1, largest=True, sorted=True
)
pred_mask_tensor = torch.zeros(target.size())
for i, k in enumerate(self._topk):
pred_mask_tensor.zero_()
self._curr_correct_predictions_k[i] += torch.sum(
# torch.min is used to simulate AND between binary
# tensors. If tensors are not binary, this will fail.
torch.min(
pred_mask_tensor.scatter_(1, pred_classes[:, :k], 1.0),
target.float(),
)
).item()
self._curr_sample_count += model_output.shape[0]
def reset(self):
self._total_correct_predictions_k = torch.zeros(len(self._topk))
self._total_sample_count = torch.zeros(1)
self._curr_correct_predictions_k = torch.zeros(len(self._topk))
self._curr_sample_count = torch.zeros(1)
def validate(self, model_output_shape, target_shape):
assert (
len(model_output_shape) == 2
), "model_output_shape must be (B, C) \
Found shape {}".format(
model_output_shape
)
assert (
len(target_shape) > 0 and len(target_shape) < 3
), "target_shape must be (B) or (B, C) \
Found shape {}".format(
target_shape
)
assert (
max(self._topk) < model_output_shape[1]
), "k in top_k, for \
precision_meter cannot be larger than num_classes: \
{}".format(
model_output_shape[1]
)
```
#### File: ClassyVision-1/test/heads_vision_transformer_head_test.py
```python
import unittest
import torch
from classy_vision.heads.vision_transformer_head import VisionTransformerHead
class TestVisionTransformerHead(unittest.TestCase):
def test_vision_transformer_head(self):
batch_size = 2
in_plane = 3
num_classes = 5
head = VisionTransformerHead(
"default_head",
num_classes=num_classes,
in_plane=in_plane,
)
input = torch.rand([batch_size, in_plane])
output = head(input)
self.assertEqual(output.shape, torch.Size([batch_size, num_classes]))
def test_vision_transformer_head_normalize_inputs(self):
batch_size = 2
in_plane = 3
head = VisionTransformerHead(
"default_head",
num_classes=None,
in_plane=in_plane,
normalize_inputs="l2",
)
input = torch.rand([batch_size, in_plane])
output = head(input)
self.assertEqual(output.shape, torch.Size([batch_size, in_plane]))
for i in range(batch_size):
output_i = output[i]
self.assertAlmostEqual(output_i.norm().item(), 1, places=3)
```
#### File: ClassyVision-1/test/losses_soft_target_cross_entropy_loss_test.py
```python
import copy
import unittest
import torch
from classy_vision.losses import SoftTargetCrossEntropyLoss, build_loss
class TestSoftTargetCrossEntropyLoss(unittest.TestCase):
def _get_config(self):
return {
"name": "soft_target_cross_entropy",
"ignore_index": -1,
"reduction": "mean",
}
def _get_outputs(self):
return torch.tensor([[1.0, 7.0, 0.0, 0.0, 2.0]])
def _get_targets(self):
return torch.tensor([[1, 0, 0, 0, 1]])
def _get_loss(self):
return 5.51097965
def test_build_soft_target_cross_entropy(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, SoftTargetCrossEntropyLoss))
self.assertEqual(crit._ignore_index, -1)
self.assertEqual(crit._reduction, "mean")
def test_soft_target_cross_entropy(self):
config = self._get_config()
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), self._get_loss())
# Verify ignore index works
outputs = self._get_outputs()
targets = torch.tensor([[-1, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.01097918)
def test_soft_target_cross_entropy_none_reduction(self):
# reduction mode is "none"
config = self._get_config()
config["reduction"] = "none"
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 7.0, 0.0, 0.0, 2.0], [4.0, 2.0, 1.0, 6.0, 0.5]])
targets = torch.tensor([[1, 0, 0, 0, 1], [0, 1, 0, 1, 0]])
loss = crit(outputs, targets)
self.assertEqual(loss.numel(), outputs.size(0))
def test_soft_target_cross_entropy_integer_label(self):
config = self._get_config()
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = self._get_outputs()
targets = torch.tensor([4])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.01097918)
def test_unnormalized_soft_target_cross_entropy(self):
config = {
"name": "soft_target_cross_entropy",
"ignore_index": -1,
"reduction": "mean",
"normalize_targets": False,
}
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 11.0219593)
# Verify ignore index works
outputs = self._get_outputs()
targets = torch.tensor([[-1, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.01097965)
def test_ignore_row(self):
# If a sample has no valid targets, it should be ignored in the reduction.
config = self._get_config()
crit = SoftTargetCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 7.0, 0.0, 0.0, 2.0], [4.0, 2.0, 1.0, 6.0, 0.5]])
targets = torch.tensor([[1, 0, 0, 0, 1], [-1, -1, -1, -1, -1]])
self.assertAlmostEqual(crit(outputs, targets).item(), self._get_loss())
def test_deep_copy(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, SoftTargetCrossEntropyLoss))
outputs = self._get_outputs()
targets = self._get_targets()
crit(outputs, targets)
crit2 = copy.deepcopy(crit)
self.assertAlmostEqual(crit2(outputs, targets).item(), self._get_loss())
```
|
{
"source": "Jerryzhangzhao/DL_tensorflow",
"score": 3
}
|
#### File: DL_tensorflow/14_tf_hub/image_test_with_retrain_graph.py
```python
import tensorflow as tf
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# class NodeLookup(object):
# def __init__(self):
# label_lookup_path = 'inception_model/imagenet_2012_challenge_label_map_proto.pbtxt';
# uid_lookup_path = 'inception_model/imagenet_synset_to_human_label_map.txt'
#
# self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
#
# def load(self, label_lookup_path, uid_lookup_path):
# proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
#
# uid_to_human = {}
#
# for line in proto_as_ascii_lines:
# line = line.strip('\n') # remove \n
# parsed_items = line.split('\t') # split with \t
# uid = parsed_items[0]
# human_string = parsed_items[1]
# uid_to_human[uid] = human_string
#
# proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
# node_id_to_uid = {}
# for line in proto_as_ascii:
# if line.startswith(' target_class:'):
# target_class = int(line.split(': ')[1])
# if line.startswith(' target_class_string:'):
# target_class_string = line.split(': ')[1]
# node_id_to_uid[target_class] = target_class_string[1:-2]
#
# node_id_to_name = {}
#
# for key, value in node_id_to_uid.items():
# name = uid_to_human[value]
# node_id_to_name[key] = name
#
# return node_id_to_name
#
# def id_to_string(self, node_id):
# if node_id not in self.node_lookup:
# return 'no '
# return self.node_lookup[node_id]
# load the model graph
with tf.gfile.FastGFile('../inception_model/tf_hub_inceptionV3_graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# "image","bottleneck_txt")
current_test_resource = "image"
if current_test_resource == "bottleneck_txt":
# load bottle neck txt file
f = open('../images/test_images/bn_sunflowers/2.txt')
bottle_neck_list = f.readline().split(',')
data = np.zeros((1, 2048), dtype=float)
data[0:] = bottle_neck_list[0:]
predictions = sess.run(softmax_tensor, {'input/BottleneckInputPlaceholder:0': data})
predictions = np.squeeze(predictions) # transfer to one dimension
elif current_test_resource == "image":
# load image
image_path = "../images/test_images/rose2.jpg"
print("image path: ", image_path)
# read image and decode
image_data_raw = tf.gfile.FastGFile(image_path, 'rb').read()
image_data = tf.image.decode_jpeg(image_data_raw)
# covert to float [0,1]
image_data_float = tf.image.convert_image_dtype(image_data, tf.float32)
# resize image
image_resize = tf.image.resize_images(image_data_float, (299, 299), method=tf.image.ResizeMethod.BILINEAR)
# expand to shape of [N,W,H,C]
image_resize_expand = tf.expand_dims(image_resize, 0)
image_data_input = sess.run(image_resize_expand) # The value of a feed cannot be a tf.Tensor object
# print(image_data_input)
print(image_data_input.dtype)
predictions = sess.run(softmax_tensor, {'Placeholder:0': image_data_input})
predictions = np.squeeze(predictions) # transfer to one dimension
# show image
# img = Image.open(image_path)
# plt.imshow(img)
# plt.axis('off')
# plt.show()
# prediction sort
top_k = predictions.argsort()[-5:][::-1] # list[<start>:<stop>:<step>] -> [::-1]
print(top_k)
# class name (order from retrain output_labels.txt)
class_name = ["daisy", "dandelion", "roses", "sunflowers", "tulips"]
for node_id in top_k:
human_string = class_name[node_id]
score = predictions[node_id]
print('prediction: ', human_string, ' probability: ', score)
print('')
# show image
# img = Image.open(image_path)
# plt.imshow(img)
# plt.axis('off')
# plt.show()
```
#### File: Jerryzhangzhao/DL_tensorflow/9_RNN_MNIST.py
```python
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
n_inputs = 28 # 输入的一行,28维的数据
max_time = 28 # 时间序列长度
lstm_size = 100 # 隐层单元数
n_classes = 10
batch_size = 50
batch_num = mnist.train.num_examples // batch_size
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))
def rnn(x, weights, biases):
inputs = tf.reshape(x, [-1, max_time, n_inputs])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# 'state' is a tensor of shape[batch_size, cell_state_size]
# state[0]:cell state,state[1]:hidden state
# state is final state of the time serials while output contains all the states of each time point
outputs, state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32)
results = tf.nn.softmax(tf.matmul(state[1], weights) + biases)
return results
# state is LSTMStateTuple, state[0]:cell state,state[1]:hidden state
'''
LSTMStateTuple(c=array([[ 1.4003208 , 5.3911433 , 1.3681278 , ..., 0.88553107,
2.6449218 , 3.021435 ],
[ 0.73893404, 7.522912 , 5.368811 , ..., 8.097184 ,
1.5976303 , -0.4217282 ],
[ 0.923707 , 1.8645589 , 2.7729654 , ..., -2.3037126 ,
3.0440154 , -1.1315142 ],
...,
[-2.4747496 , 5.387638 , -1.5895548 , ..., 3.225986 ,
2.19178 , -3.2867982 ],
[-2.6102498 , 6.910054 , -0.3397467 , ..., 5.625205 ,
0.63867795, -2.3031251 ],
[-3.755093 , 7.8372283 , 4.604886 , ..., 3.7100544 ,
0.19672015, -0.41049248]], dtype=float32),
h=array([[ 0.5207462 , 0.7044978 , 0.79254985, ..., 0.6382765 ,
0.87966275, 0.9602473 ],
[ 0.5697053 , 0.90182847, 0.9575436 , ..., 0.9356195 ,
0.83545005, -0.38531256],
[ 0.3323384 , 0.7125735 , 0.8852245 , ..., -0.69027716,
0.8095767 , -0.6152911 ],
...,
[-0.8340237 , 0.7708159 , -0.8142196 , ..., 0.68907934,
0.86848384, -0.91779894],
[-0.9046849 , 0.9284657 , -0.3011895 , ..., 0.7684504 ,
0.4953476 , -0.9350287 ],
[-0.84070975, 0.836363 , 0.819017 , ..., 0.7208597 ,
0.17305236, -0.31775635]], dtype=float32))
'''
# prediction
prediction = rnn(x, weights, biases)
# cost fun
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# train
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# correct prediction
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) # 判断是否预测正确,boolean
# accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # reduce_mean 计算准确度
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(10):
for batch in range(batch_num):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})
acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("epoch: " + str(epoch) + " acc: " + str(acc))
```
|
{
"source": "jerryzhao173985/SMARTS",
"score": 2
}
|
#### File: SMARTS/examples/history_vehicles_replacement_for_imitation_learning.py
```python
import logging
from envision.client import Client as Envision
from examples import default_argument_parser
from smarts.core.agent import Agent, AgentSpec
from smarts.core.agent_interface import AgentInterface, AgentType
from smarts.core.scenario import Scenario
from smarts.core.smarts import SMARTS
from smarts.core.sumo_traffic_simulation import SumoTrafficSimulation
logging.basicConfig(level=logging.INFO)
class KeepLaneAgent(Agent):
def act(self, obs):
return "keep_lane"
def main(scenarios, headless, seed):
scenarios_iterator = Scenario.scenario_variations(scenarios, [])
smarts = SMARTS(
agent_interfaces={},
traffic_sim=SumoTrafficSimulation(headless=True, auto_start=True),
envision=Envision(),
)
for _ in scenarios:
scenario = next(scenarios_iterator)
agent_missions = scenario.discover_missions_of_traffic_histories()
for agent_id, mission in agent_missions.items():
scenario.set_ego_missions({agent_id: mission})
agent_spec = AgentSpec(
interface=AgentInterface.from_type(
AgentType.Laner, max_episode_steps=None
),
agent_builder=KeepLaneAgent,
)
agent = agent_spec.build_agent()
smarts.switch_ego_agent({agent_id: agent_spec.interface})
observations = smarts.reset(scenario)
dones = {agent_id: False}
while not dones[agent_id]:
agent_obs = observations[agent_id]
agent_action = agent.act(agent_obs)
observations, rewards, dones, infos = smarts.step(
{agent_id: agent_action}
)
smarts.destroy()
if __name__ == "__main__":
parser = default_argument_parser("history-vehicles-replacement-example")
args = parser.parse_args()
main(
scenarios=args.scenarios,
headless=args.headless,
seed=args.seed,
)
```
#### File: smarts/core/remote_agent_buffer.py
```python
import atexit
import logging
import random
import time
from concurrent import futures
from multiprocessing import Process
from typing import List, Tuple
import grpc
from smarts.core.remote_agent import RemoteAgent, RemoteAgentException
from smarts.core.utils.networking import find_free_port
from smarts.zoo import manager as zoo_manager
from smarts.zoo import manager_pb2, manager_pb2_grpc
class RemoteAgentBuffer:
def __init__(self, zoo_manager_addrs=None, buffer_size=3):
"""
Args:
zoo_manager_addrs:
List of (ip, port) tuples for manager processes. Manager will instantiate
worker processes which run remote agents.
buffer_size:
Number of RemoteAgents to pre-initialize and keep running in the background,
must be non-zero (default: 3).
"""
assert buffer_size > 0
self._log = logging.getLogger(self.__class__.__name__)
# self._zoo_manager_conns is a list of dictionaries.
# Each dictionary provides connection info for a zoo manager.
# Example:
# [
# {"address": ("127.0.0.1", 7432)),
# "process": <proc>,
# "channel": <grpc_channel>,
# "stub" : <grpc_stub>
# },
# {
# ...
# }
# ...
# ]
self._zoo_manager_conns = []
self._local_zoo_manager = False
# Populate zoo manager connection with address and process handles.
if not zoo_manager_addrs:
# Spawn a local zoo manager since no remote zoo managers were provided.
self._local_zoo_manager = True
port = find_free_port()
self._zoo_manager_conns = [
{
"address": ("localhost", port),
"process": spawn_local_zoo_manager(port),
}
]
else:
self._zoo_manager_conns = [{"address": addr} for addr in zoo_manager_addrs]
# Populate zoo manager connection with channel and stub details.
for conn in self._zoo_manager_conns:
conn["channel"], conn["stub"] = get_manager_channel_stub(conn["address"])
self._buffer_size = buffer_size
self._replenish_threadpool = futures.ThreadPoolExecutor()
self._agent_buffer = [
self._remote_agent_future() for _ in range(self._buffer_size)
]
atexit.register(self.destroy)
def destroy(self):
if atexit:
atexit.unregister(self.destroy)
# Teardown any remaining remote agents.
for remote_agent_future in self._agent_buffer:
try:
remote_agent = remote_agent_future.result()
remote_agent.terminate()
except Exception as e:
self._log.error(
f"Exception while tearing down buffered remote agent. {repr(e)}"
)
raise e
# If available, teardown local zoo manager.
if self._local_zoo_manager:
self._zoo_manager_conns[0]["channel"].close()
self._zoo_manager_conns[0]["process"].terminate()
self._zoo_manager_conns[0]["process"].join()
def _build_remote_agent(self, zoo_manager_conns):
# Get a random zoo manager connection.
zoo_manager_conn = random.choice(zoo_manager_conns)
# Spawn remote worker and get its port.
retries = 3
worker_port = None
for retry in range(retries):
try:
response = zoo_manager_conn["stub"].spawn_worker(manager_pb2.Machine())
worker_port = response.num
break
except grpc.RpcError as e:
self._log.debug(
f"Failed {retry+1}/{retries} times in attempt to spawn a remote worker process. {e}"
)
if worker_port == None:
raise RemoteAgentException(
"Remote worker process could not be spawned by the zoo manager."
)
# Instantiate and return a local RemoteAgent.
return RemoteAgent(
zoo_manager_conn["address"], (zoo_manager_conn["address"][0], worker_port)
)
def _remote_agent_future(self):
return self._replenish_threadpool.submit(
self._build_remote_agent, self._zoo_manager_conns
)
def _try_to_acquire_remote_agent(self):
assert len(self._agent_buffer) == self._buffer_size
# Check if we have any done remote agent futures.
done_future_indices = [
idx
for idx, agent_future in enumerate(self._agent_buffer)
if agent_future.done()
]
if len(done_future_indices) > 0:
# If so, prefer one of these done ones to avoid sim delays.
future = self._agent_buffer.pop(done_future_indices[0])
else:
# Otherwise, we will block, waiting on a remote agent future.
self._log.debug(
"No ready remote agents, simulation will block until one is available."
)
future = self._agent_buffer.pop(0)
# Schedule the next remote agent and add it to the buffer.
self._agent_buffer.append(self._remote_agent_future())
remote_agent = future.result(timeout=10)
return remote_agent
def acquire_remote_agent(self, retries=3) -> RemoteAgent:
for retry in range(retries):
try:
return self._try_to_acquire_remote_agent()
except Exception as e:
self._log.debug(
f"Failed {retry+1}/{retries} times in acquiring remote agent. {repr(e)}"
)
time.sleep(0.1)
raise RemoteAgentException("Failed to acquire remote agent.")
def spawn_local_zoo_manager(port):
manager = Process(target=zoo_manager.serve, args=(port,))
manager.start()
return manager
def get_manager_channel_stub(addr):
channel = grpc.insecure_channel(f"{addr[0]}:{addr[1]}")
try:
# Wait until the grpc server is ready or timeout after 30 seconds
grpc.channel_ready_future(channel).result(timeout=30)
except grpc.FutureTimeoutError:
raise RemoteAgentException("Timeout in connecting to remote zoo manager.")
stub = manager_pb2_grpc.ManagerStub(channel)
return channel, stub
```
#### File: sac/sac/policy.py
```python
import os
import pathlib
from sys import path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from smarts.core.agent import Agent
from ultra.baselines.common.replay_buffer import ReplayBuffer
from ultra.baselines.common.social_vehicle_config import get_social_vehicle_configs
from ultra.baselines.common.state_preprocessor import *
from ultra.baselines.common.yaml_loader import load_yaml
from ultra.baselines.sac.sac.network import SACNetwork
from ultra.utils.common import compute_sum_aux_losses, to_2d_action, to_3d_action
class SACPolicy(Agent):
def __init__(
self,
policy_params=None,
checkpoint_dir=None,
):
# print("LOADING THE PARAMS", policy_params, checkpoint_dir)
self.policy_params = policy_params
self.gamma = float(policy_params["gamma"])
self.critic_lr = float(policy_params["critic_lr"])
self.actor_lr = float(policy_params["actor_lr"])
self.critic_update_rate = int(policy_params["critic_update_rate"])
self.policy_update_rate = int(policy_params["policy_update_rate"])
self.warmup = int(policy_params["warmup"])
self.seed = int(policy_params["seed"])
self.batch_size = int(policy_params["batch_size"])
self.hidden_units = int(policy_params["hidden_units"])
self.tau = float(policy_params["tau"])
self.initial_alpha = float(policy_params["initial_alpha"])
self.logging_freq = int(policy_params["logging_freq"])
self.action_size = int(policy_params["action_size"])
self.prev_action = np.zeros(self.action_size)
# state preprocessing
self.social_policy_hidden_units = int(
policy_params["social_vehicles"].get("social_policy_hidden_units", 0)
)
self.social_capacity = int(
policy_params["social_vehicles"].get("social_capacity", 0)
)
self.observation_num_lookahead = int(
policy_params.get("observation_num_lookahead", 0)
)
self.social_polciy_init_std = int(
policy_params["social_vehicles"].get("social_polciy_init_std", 0)
)
self.num_social_features = int(
policy_params["social_vehicles"].get("num_social_features", 0)
)
self.social_vehicle_config = get_social_vehicle_configs(
**policy_params["social_vehicles"]
)
self.social_vehicle_encoder = self.social_vehicle_config["encoder"]
self.state_description = get_state_description(
policy_params["social_vehicles"],
policy_params["observation_num_lookahead"],
self.action_size,
)
self.state_preprocessor = StatePreprocessor(
preprocess_state, to_2d_action, self.state_description
)
self.social_feature_encoder_class = self.social_vehicle_encoder[
"social_feature_encoder_class"
]
self.social_feature_encoder_params = self.social_vehicle_encoder[
"social_feature_encoder_params"
]
# others
self.checkpoint_dir = checkpoint_dir
self.device_name = "cuda:0" if torch.cuda.is_available() else "cpu"
self.device = torch.device(self.device_name)
self.save_codes = (
policy_params["save_codes"] if "save_codes" in policy_params else None
)
self.memory = ReplayBuffer(
buffer_size=int(policy_params["replay_buffer"]["buffer_size"]),
batch_size=int(policy_params["replay_buffer"]["batch_size"]),
state_preprocessor=self.state_preprocessor,
device_name=self.device_name,
)
self.current_iteration = 0
self.steps = 0
self.init_networks()
if checkpoint_dir:
self.load(checkpoint_dir)
@property
def state_size(self):
# Adjusting state_size based on number of features (ego+social)
size = sum(self.state_description["low_dim_states"].values())
if self.social_feature_encoder_class:
size += self.social_feature_encoder_class(
**self.social_feature_encoder_params
).output_dim
else:
size += self.social_capacity * self.num_social_features
return size
def init_networks(self):
self.sac_net = SACNetwork(
action_size=self.action_size,
state_size=self.state_size,
hidden_units=self.hidden_units,
seed=self.seed,
initial_alpha=self.initial_alpha,
social_feature_encoder_class=self.social_feature_encoder_class,
social_feature_encoder_params=self.social_feature_encoder_params,
).to(self.device_name)
self.actor_optimizer = torch.optim.Adam(
self.sac_net.actor.parameters(), lr=self.actor_lr
)
self.critic_optimizer = torch.optim.Adam(
self.sac_net.critic.parameters(), lr=self.critic_lr
)
self.log_alpha_optimizer = torch.optim.Adam(
[self.sac_net.log_alpha], lr=self.critic_lr
)
def act(self, state, explore=True):
state = self.state_preprocessor(
state=state,
normalize=True,
unsqueeze=True,
device=self.device_name,
social_capacity=self.social_capacity,
observation_num_lookahead=self.observation_num_lookahead,
social_vehicle_config=self.social_vehicle_config,
prev_action=self.prev_action,
)
action, _, mean = self.sac_net.sample(state)
if explore: # training mode
action = torch.squeeze(action, 0)
action = action.detach().cpu().numpy()
else: # testing mode
mean = torch.squeeze(mean, 0)
action = mean.detach().cpu().numpy()
return to_3d_action(action)
def step(self, state, action, reward, next_state, done):
# dont treat timeout as done equal to True
max_steps_reached = state["events"].reached_max_episode_steps
if max_steps_reached:
done = False
action = to_2d_action(action)
self.memory.add(
state=state,
action=action,
reward=reward,
next_state=next_state,
done=float(done),
social_capacity=self.social_capacity,
observation_num_lookahead=self.observation_num_lookahead,
social_vehicle_config=self.social_vehicle_config,
prev_action=self.prev_action,
)
self.steps += 1
output = {}
if self.steps > max(self.warmup, self.batch_size):
states, actions, rewards, next_states, dones, others = self.memory.sample(
device=self.device_name
)
if self.steps % self.critic_update_rate == 0:
critic_loss = self.update_critic(
states, actions, rewards, next_states, dones
)
output["loss/critic_loss"] = {
"type": "scalar",
"data": critic_loss.item(),
"freq": 2,
}
if self.steps % self.policy_update_rate == 0:
actor_loss, temp_loss = self.update_actor_temp(
states, actions, rewards, next_states, dones
)
output["loss/actor_loss"] = {
"type": "scalar",
"data": actor_loss.item(),
"freq": self.logging_freq,
}
output["loss/temp_loss"] = {
"type": "scalar",
"data": temp_loss.item(),
"freq": self.logging_freq,
}
output["others/alpha"] = {
"type": "scalar",
"data": self.sac_net.alpha.item(),
"freq": self.logging_freq,
}
self.current_iteration += 1
self.target_soft_update(self.sac_net.critic, self.sac_net.target, self.tau)
self.prev_action = action if not done else np.zeros(self.action_size)
return output
def update_critic(self, states, actions, rewards, next_states, dones):
q1_current, q2_current, aux_losses = self.sac_net.critic(
states, actions, training=True
)
with torch.no_grad():
next_actions, log_probs, _ = self.sac_net.sample(next_states)
q1_next, q2_next = self.sac_net.target(next_states, next_actions)
v_next = (
torch.min(q1_next, q2_next) - self.sac_net.alpha.detach() * log_probs
)
q_target = (rewards + ((1 - dones) * self.gamma * v_next)).detach()
critic_loss = F.mse_loss(q1_current, q_target) + F.mse_loss(
q2_current, q_target
)
aux_losses = compute_sum_aux_losses(aux_losses)
overall_loss = critic_loss + aux_losses
self.critic_optimizer.zero_grad()
overall_loss.backward()
self.critic_optimizer.step()
return critic_loss
def update_actor_temp(self, states, actions, rewards, next_states, dones):
for p in self.sac_net.target.parameters():
p.requires_grad = False
for p in self.sac_net.critic.parameters():
p.requires_grad = False
# update actor:
actions, log_probs, aux_losses = self.sac_net.sample(states, training=True)
q1, q2 = self.sac_net.critic(states, actions)
q_old = torch.min(q1, q2)
actor_loss = (self.sac_net.alpha.detach() * log_probs - q_old).mean()
aux_losses = compute_sum_aux_losses(aux_losses)
overall_loss = actor_loss + aux_losses
self.actor_optimizer.zero_grad()
overall_loss.backward()
self.actor_optimizer.step()
# update temp:
temp_loss = (
self.sac_net.log_alpha.exp()
* (-log_probs.detach().mean() + self.action_size).detach()
)
self.log_alpha_optimizer.zero_grad()
temp_loss.backward()
self.log_alpha_optimizer.step()
self.sac_net.alpha.data = self.sac_net.log_alpha.exp().detach()
for p in self.sac_net.target.parameters():
p.requires_grad = True
for p in self.sac_net.critic.parameters():
p.requires_grad = True
return actor_loss, temp_loss
def target_soft_update(self, critic, target_critic, tau):
with torch.no_grad():
for critic_param, target_critic_param in zip(
critic.parameters(), target_critic.parameters()
):
target_critic_param.data = (
tau * critic_param.data + (1 - tau) * target_critic_param.data
)
def load(self, model_dir):
model_dir = pathlib.Path(model_dir)
map_location = None
if self.device and self.device.type == "cpu":
map_location = "cpu"
self.sac_net.actor.load_state_dict(
torch.load(model_dir / "actor.pth", map_location=map_location)
)
self.sac_net.target.load_state_dict(
torch.load(model_dir / "target.pth", map_location=map_location)
)
self.sac_net.critic.load_state_dict(
torch.load(model_dir / "critic.pth", map_location=map_location)
)
print("<<<<<<< MODEL LOADED >>>>>>>>>", model_dir)
def save(self, model_dir):
model_dir = pathlib.Path(model_dir)
# with open(model_dir / "params.yaml", "w") as file:
# yaml.dump(policy_params, file)
torch.save(self.sac_net.actor.state_dict(), model_dir / "actor.pth")
torch.save(self.sac_net.target.state_dict(), model_dir / "target.pth")
torch.save(self.sac_net.critic.state_dict(), model_dir / "critic.pth")
print("<<<<<<< MODEL SAVED >>>>>>>>>", model_dir)
def reset(self):
pass
```
|
{
"source": "Jerryzhao-z/Cross-lingual-Simple-Question-Answering-with-Memory-Networks",
"score": 2
}
|
#### File: Jerryzhao-z/Cross-lingual-Simple-Question-Answering-with-Memory-Networks/common.py
```python
import os.path as path
def settings_file():
"""
:return: project settings from the 'SETTINGS.json' file
"""
return path.join(path.dirname(path.realpath(__file__)), '.', 'SETTINGS.json')
```
#### File: Cross-lingual-Simple-Question-Answering-with-Memory-Networks/MemNN/input.py
```python
import itertools
import scipy.sparse as sparse
from MemNN.common import split_line, clean_words
def process_fact(line):
# Arg: "entity \t rel \t obj1 obj2 ob3 ..."
# Return: (entity rel obj1 obj2 obj3 ....)
[entity, rel, obj] = line.rstrip().split('\t')
return entity, rel, obj.split(' ')
def symbols_bag(kb):
"""
Note:
collect symbols from knowledge base
Args:
kb : knowledge base's path
Returns:
symbol_list, symbol2index
"""
print ("processing Knowledge base to bag-of-symbole")
# symbol_list
all_symbol = set()
with open(kb, encoding="utf8") as f_in:
for l in f_in:
entity, rel, objs = process_fact(l)
all_symbol.update([entity, rel])
all_symbol.update(objs)
symbol_list = list(all_symbol)
print ("%d symbols have been processed" % len(symbol_list))
symbol_list.sort()
# symbol2index
symbol2index = {}
symbol2index.update(zip(symbol_list, itertools.count()))
return symbol_list, symbol2index
def ngram_bag(corpus_list, labels):
"""
Note:
collect ngrams from dataset
Args:
corpus_list: list of dataset similar to SimpleQuestion train/test/valid
labels: labels of entity in Knowledge base
Returns:
vocabulary, voc2index
"""
# vocabulary
print ("processing corpus to bag-of-words")
words = set()
line_ctr = itertools.count()
# words in questions
for ds in corpus_list:
with open(ds, encoding="utf8") as in_f:
for line in in_f:
try:
line_number = next(line_ctr)
words.update(clean_words(split_line(line)))
except IndexError:
print ("Index Error in line %d" % line_number)
# word in labels
for l in labels:
words.update(l.split())
vocabulary = list(words)
print ("%d words have been processed" % len(vocabulary))
vocabulary.sort()
# voc2index
voc2index = {}
voc2index.update(zip(vocabulary, itertools.count()))
return vocabulary, voc2index
# preprocessing Freebase facts: transform a fact (s, r, {o1, ... ok} to vector with a bag-of-symbole
def f_y(symbols2index, kb):
"""
Note:
preprocessing knowledge base
Args:
symbols2index: mapping object
kb: knowledge base's path
Returns:
mx: knowledge matrix
knowledgebase_size: number of facts
candidate_mx: subject and relationship matrix
responses: mapping number of fact to objects
"""
line_ctr = itertools.count()
data_tuples = list()
responses = dict()
candidate_tuple = list()
with open(kb, encoding="utf8") as f_in:
for l in f_in:
entity, rel, objs = process_fact(l)
l = next(line_ctr)
data_tuples.append((1.0, l, symbols2index[entity]))
data_tuples.append((1.0, l, symbols2index[rel]))
candidate_tuple.append((1.0, l, symbols2index[entity]))
candidate_tuple.append((1.0, l, symbols2index[rel]))
data_tuples.extend([(1./len(objs), l, symbols2index[o]) for o in objs])
responses[l] = objs
data, row, col = zip(*data_tuples)
candidate_data, candidate_row, candidate_col = zip(*candidate_tuple)
knowledgebase_size = next(line_ctr)
symbol_size = len(symbols2index.keys())
mx = sparse.csr_matrix((data, (row, col)), shape=(knowledgebase_size,symbol_size))
candidate_mx = sparse.csr_matrix((candidate_data, (candidate_row, candidate_col)), shape=(knowledgebase_size,symbol_size))
return mx, knowledgebase_size, candidate_mx, responses
def f_y_facts(symbols2index, dataset):
"""
Note:
preprocessing facts in dataset
Args:
symbols2index: mapping object
dataset: dataset similar to SimpleQuestion train/valid/test
Returns:
mx: fact matrice
"""
line_ctr = itertools.count()
data_tuples = list()
for l in dataset:
entity, rel, obj, question = l.rstrip().split('\t')
l = next(line_ctr)
data_tuples.append((1.0, l, symbols2index[entity]))
data_tuples.append((1.0, l, symbols2index[rel]))
data_tuples.append((1.0, l, symbols2index[obj]))
data, row, col = zip(*data_tuples)
mx = sparse.csr_matrix((data, (row, col)))
return mx
def g_q(symbols2index, voc2index, dataset):
"""
Note:
preprocessing dataset
Args:
symbols2index: map symbol to index
voc2index: map word to index
dataset: dataset similar to SimpleQuestion train/valid/test
Returns:
f_mx: fact matrice
q_mx: question matrice
M: number of records in dataset
"""
line_ctr = itertools.count()
data_tuples = list()
fact_tuples = list()
with open(dataset, encoding="utf8") as in_f:
for line in in_f:
l = next(line_ctr)
fact_tuples.extend([(1, l, symbols2index[s]) for s in line.split("\t")[0:3]])
data_tuples.extend([(1, l, voc2index[w]) for w in clean_words(split_line(line))])
f_data, f_row, f_col = zip(*fact_tuples)
q_data, q_row, q_col = zip(*data_tuples)
M = next(line_ctr)
N = len(symbols2index.keys())
O = len(voc2index.keys())
f_mx = sparse.csr_matrix((f_data, (f_row, f_col)), shape=(M, N))
q_mx = sparse.csr_matrix((q_data, (q_row, q_col)), shape=(M, O))
return f_mx, q_mx, M
def g_q_single_question(voc2index, question):
"""
Note:
preprocessing single question
Args:
voc2index: map word to index
question: question in natural language
Returns:
q_mx: question vector
"""
data_tuples = list()
data_tuples.extend([(1, 0, voc2index[w]) for w in clean_words(question.strip().lower().split(' ')) if voc2index[w]])
q_data, q_row, q_col = zip(*data_tuples)
O = len(voc2index.keys())
q_mx = sparse.csr_matrix((q_data, (q_row, q_col)), shape=(1, O))
return q_mx
def negative_exemples_generation(symbols2index, kb):
"""
Note:
generate negative examples from knowledge base
Args:
symbols2index: map symbol to index
kb: knowledge base's path
Returns:
mx: negative example matrice
M: number of negative examples
"""
line_ctr = itertools.count()
data_tuples = list()
with open(kb, encoding="utf8") as f_in:
for l in f_in:
entity, rel, objs = process_fact(l)
for o in objs:
l = next(line_ctr)
data_tuples.append((1.0, l, symbols2index[entity]))
data_tuples.append((1.0, l, symbols2index[rel]))
data_tuples.append((1.0, l, symbols2index[o]))
data, row, col = zip(*data_tuples)
M = next(line_ctr)
N = len(symbols2index.keys())
mx = sparse.csr_matrix((data, (row, col)), shape=(M,N))
return mx, M
```
#### File: Cross-lingual-Simple-Question-Answering-with-Memory-Networks/MemNN/model.py
```python
import tensorflow as tf
from MemNN.output import g_q_single_question, candidate_generation
import random
def normalize_vector(vector_to_normalize):
norm = tf.sqrt(tf.reduce_sum(tf.square(vector_to_normalize), 1, keep_dims=True))
normalized_vector = vector_to_normalize/norm
return normalized_vector
def cosine_similarity(labels, predictions):
normalized_labels = normalize_vector(labels)
normalized_predictions = normalize_vector(predictions)
similarity = tf.matmul(normalized_labels, tf.transpose(normalized_predictions))
return similarity
class SimpleQA_MemNN(object):
def __init__(self, parameters, config, session):
"""Creates an embedding based Memory Network
Args:
parameters:
vocabulary_size : size of bag of word
symbol_size: size of bag of symbol
initializer: random initializer for embedding matrix
kb_vec: knowledge base vector
kb_size: size of knowledge Base
candidate_vec: candidate(subject, relationship) vector
word2index: map word to index
symbol2index: map mid/IMDBid to index
ngram2id: map labels to mid or IMDBid
responses: map numero of question to response
config:
embedding_size: dimension of embedding matrix
lamb_da: margin in ranking loss function
model_name
session: Tensorflow Session the model is run with. Defaults to `tf.Session()`.
"""
self._embedding_size = config.embedding_size
self._lambda = config.lamb_da
self._name = config.model_name
self._vocabulary_size = parameters["vocabulary_size"]
self._init = parameters["initializer"]
self._symbole_size = parameters["symbol_size"]
self._kb_vec = parameters["kb_vec"]
self._kb_size = parameters["kb_size"]
self._candidate_vec = parameters["candidate_vec"]
self._word2index = parameters["word2index"]
self._symbol2index = parameters["symbol2index"]
self._ngram2id = parameters["ngram2id"]
self._responses = parameters["responses"]
# build placeholder and initializer embedding matrices
self._build_inputs()
self._build_vars()
# optimizer
self._opt = tf.train.AdadeltaOptimizer()
# cosine similarity
cosine_similarity_positive = self.similarity_calcul(self._facts, self._questions)
cosine_similarity_negative = self.similarity_calcul(self._negative_facts, self._questions)
loss = tf.nn.relu(tf.add(tf.subtract(self._lambda, cosine_similarity_positive), cosine_similarity_negative))
loss_sum = tf.reduce_sum(loss, name="loss_sum")
loss_unity = tf.reduce_mean(loss)
tf.summary.scalar('average loss', loss_unity)
# loss op
loss_op = loss_sum
train_op = self._opt.minimize(loss_op)
# assign ops
self.loss_op = loss_op
self.similarity_op = cosine_similarity_positive
self.train_op = train_op
self.merged_summary = tf.summary.merge_all()
# variable intialization
init_op = tf.global_variables_initializer()
self._sess = session
self._sess.run(init_op)
def _build_inputs(self):
# build placeholder for input vectors
self._questions = tf.placeholder(tf.float32, [None, self._vocabulary_size], name="queries")
self._facts = tf.placeholder(tf.float32, [None, self._symbole_size], name="facts")
self._negative_facts = tf.placeholder(tf.float32, [None, self._symbole_size], name="negative_facts")
self._wiki_questions = tf.placeholder(tf.float32, [None, self._vocabulary_size], name="wiki_queries")
self._wiki_similar_questions = tf.placeholder(tf.float32, [None, self._vocabulary_size], name="wiki_similar_queries")
self._wiki_unsimilar_questions = tf.placeholder(tf.float32, [None, self._vocabulary_size], name="wiki_unsimilar_queries")
def _build_vars(self):
# build embedding matrix
with tf.variable_scope(self._name):
self.Wv = tf.Variable(self._init([self._embedding_size, self._vocabulary_size]), name="Wv")
self.Ws = tf.Variable(self._init([self._embedding_size, self._symbole_size]), name="Ws")
self.Wvs = tf.concat([self.Wv, self.Ws], 1)
def similarity_calcul(self, facts, questions):
with tf.variable_scope(self._name):
labels = tf.matmul(self.Wv,tf.transpose(questions))
predictions = tf.matmul(self.Ws,tf.transpose(facts))
cosine_sim = cosine_similarity(tf.transpose(labels), tf.transpose(predictions))
return tf.diag_part(cosine_sim)
def vairable_summaries(self, var):
with tf.variable_scope(self._name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def batch_fit(self, facts, negative_facts, questions):
# train with ranking loss function
feed_dict = {self._questions: questions, self._facts: facts, self._negative_facts: negative_facts}
loss, _, summary = self._sess.run([self.loss_op, self.train_op, self.merged_summary], feed_dict=feed_dict)
return loss, summary
def loss(self, facts, negative_facts, questions):
# calculate loss
feed_dict = {self._questions: questions, self._facts: facts, self._negative_facts: negative_facts}
loss, summary= self._sess.run([self.loss_op, self.merged_summary], feed_dict=feed_dict)
return loss, summary
def similarity(self, facts, questions):
# calcul similarity for answering questions
feed_dict = {self._questions: questions, self._facts: facts}
similar= self._sess.run([self.similarity_op], feed_dict=feed_dict)
return similar
def find_most_probable_candidate(self, question):
question_vectorized = g_q_single_question(self._word2index, question)
candidates_generated = candidate_generation(self._candidate_vec, self._kb_size, len(self._symbol2index), self._ngram2id , self._symbol2index, question)
max_similarity = 0
for candidate in candidates_generated:
candidate_fact = self._kb_vec[candidate]
similarity_rate = self.similarity(candidate_fact.todense(), question_vectorized.todense())
if max_similarity < similarity_rate[0][0]:
max_similarity = similarity_rate[0][0]
best_candidate = candidate
best_fact = candidate_fact
return (max_similarity, best_candidate, best_fact)
def evaluate(self, obj, question):
try:
(max_similarity, best_candidate, best_fact) = self.find_most_probable_candidate(question)
if max_similarity > 0 and obj in self._responses[best_candidate]:
return True
return False
except:
return False
```
|
{
"source": "Jerry-zhk/se-proj",
"score": 3
}
|
#### File: Jerry-zhk/se-proj/bpnn.py
```python
import numpy as np
import time
import h5py
import matplotlib.pyplot as plt
import scipy
from sklearn.model_selection import train_test_split
from parse_coverages import get_tests_matrix
import argparse
# from PIL import Image
# %matplotlib inline
# plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
# plt.rcParams['image.interpolation'] = 'nearest'
# plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
np.random.seed(1)
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dA = np.squeeze(np.asarray(dA))
s = np.squeeze(np.asarray(s))
dZ = dA * s * (1-s)
if (Z.shape[0] == 1):
dZ = dZ.reshape((1, dZ.shape[0]))
assert (dZ.shape == Z.shape)
return dZ
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(5)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
# print(parameters)
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
# print ('linear_forward')
# print ('W',W)
# print ('A',A)
# print ('b',b)
Z = np.dot(W, A) + b
# print ('Z',Z)
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "sigmoid")
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
cost = -np.sum(np.multiply(np.log(AL),Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m
cost = np.squeeze(cost)
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = np.dot(dZ, A_prev.T) / m
db = np.sum(dZ, axis=1, keepdims=True) / m
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, "sigmoid")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False, filename='plot.png'):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, input number)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
parameters = initialize_parameters_deep(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# print ('iteration:', i, '---------------------------')
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
AL, caches = L_model_forward(X.T, parameters)
# Compute cost.
cost = compute_cost(AL, Y)
# Backward propagation.
grads = L_model_backward(AL, Y, caches)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
print (AL)
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
# plt.show()
plt.savefig(filename)
return parameters
def getData(m):
matrix = np.matrix(m).astype(int)
labels = np.squeeze(np.asarray(matrix[:, -1]))
dataset = matrix[:, 0:-1]
return dataset, labels
def getData1():
# in this case; num_features = num_statements
# num_input = num_test_cases
dataset = np.matrix([
[1,1,1,1,0,1,0,0,1],
[1,0,0,0,1,1,1,1,0],
[0,0,0,0,0,1,1,0,0],
[1,1,0,0,1,0,1,1,1],
[1,1,1,0,1,1,1,1,1],
[0,0,1,0,0,1,1,1,0],
[1,1,1,1,0,1,0,1,1]
]).astype(int)
# in labels, 0 means success, 1 means failure
#labels = np.array([[0],[0],[0],[0],[0],[1],[1]])
labels = np.array([0,0,0,0,0,1,1])
# transform the labels to one-hot format
labels_onehot = np.zeros((labels.shape[0], 2)).astype(int)
labels_onehot[np.arange(len(labels)), labels.astype(int)] = 1
# # divide the dataset into train and test datasets
# train_dataset, test_dataset, \
# train_labels, test_labels = train_test_split(
# dataset, labels, test_size = .1, random_state = 12)
return dataset, labels
def getDataTest(dim):
# estimate the suspiciousness of each statement
test_susp_dataset = np.identity(dim)
return test_susp_dataset
def predict(X, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
# p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# # convert probas to 0/1 predictions
# for i in range(0, probas.shape[1]):
# if probas[0,i] > 0.5:
# p[0,i] = 1
# else:
# p[0,i] = 0
# print("Accuracy: " + str(np.sum((p == y)/m)))
return probas
def insertonSort(alist):
index = [x for x in range(len(alist))]
rank = [1 for x in range(len(alist))]
for i in range(len(alist)):
key = alist[i]
val = index[i]
j = i - 1
while j >= 0 and alist[j] > key:
alist[j+1] = alist[j]
index[j+1] = index[j]
j -= 1
alist[j+1] = key
index[j+1] = val
ranking = 1
for i in range(len(alist)-1,0,-1):
ranking += 1
if alist[i] == alist[i-1]:
rank[index[i-1]] = rank[index[i]]
else:
rank[index[i-1]] = ranking
return rank,index
def train(train_dataset, train_labels):
layers_dims = [train_dataset.shape[1],5,5,1]
train_labels = np.array([train_labels])
parameters = L_layer_model(train_dataset, train_labels, layers_dims, learning_rate = 0.3, num_iterations = 15000, print_cost = True, filename='case2.1.png')
return parameters
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run BPNN to locate fault against coverage XML files')
parser.add_argument('source', type=str, help='name of the folder containing coverage .xml file')
parser.add_argument('prog_name', type=str, help='name of program, i.e. \'sort\' for sort.py')
args = parser.parse_args()
matrix = get_tests_matrix(args.source, args.prog_name)
train_dataset, train_labels = getData(matrix)
# print(train_dataset)
# print(train_labels)
params = train(train_dataset, train_labels)
# print(params)
test_dataset = getDataTest(train_dataset.shape[1])
result = predict(test_dataset, params)
print(result)
#sus = result
# print(np.squeeze(np.asarray(result)))
rank, index= insertonSort(np.squeeze(np.asarray(result)))
# print ("sus: ",sus,"\n")
# print ("sorted: ",result,"\n")
# print ("index:", index,"\n")
# print ("rank is :",rank,"\n")
# print ("The most buggy statement is: Statement No.", rank.index(1)+1)
#print(len(rank))
for i in range(len(rank)-1,-1,-1):
#print("i is:", i)
print("Statement {:>2}: {:>4}".format(index[i]+1,rank[index[i]]))
```
#### File: Jerry-zhk/se-proj/gen_tc_symbcount.py
```python
import random
import string
import argparse
import os
#:;<=>?@
def gen_s1(n):
N = n
return ''.join(random.choice(":;<=>?@") for _ in range(N))
#[\]^_`
def gen_s2(n):
N = n
return ''.join(random.choice("[]^_`") for _ in range(N))
#{|}~
def gen_s3(n):
N = n
return ''.join(random.choice("{|}~") for _ in range(N))
def gen_upper(n):
N = n
return ''.join(random.choice(string.ascii_uppercase) for _ in range(N))
def gen_lower(n):
N = n
return ''.join(random.choice(string.ascii_lowercase) for _ in range(N))
def gen_digit(n):
N = n
return ''.join(random.choice(string.digits) for _ in range(N))
def gen_str(up,lo,num,s1,s2,s3):
s = ""
s = gen_upper(up) + gen_lower(lo) + gen_digit(num) + gen_s1(s1) + gen_s2(s2) + gen_s3(s3)
return s
def generate(num_of_tc = 25, dest = 'tc_symbcount'):
if not os.path.exists(dest):
os.makedirs(dest)
for i in range(num_of_tc):
with open(dest + '/testcases_' + str(i) + '.txt', 'w') as f:
# random gen
up = random.randint(0,3)
lo = random.randint(0,3)
num = random.randint(0,3)
s1 = random.randint(0,3)
s2 = random.randint(0,3)
s3 = random.randint(0,3)
#s = gen_str(up,0,0,0,0,0)
s = gen_str(up,lo,num,s1,s2,s3)
#count_list = [up,0,0,0,0,0]
count_list = [up,lo,num,s1,s2,s3]
count_list_str = ",".join(str(x) for x in count_list)
f.write(s + ' ' + count_list_str + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate testcases for sorting algorithms', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--ntc', default=25, type=int, help='number of testcases')
parser.add_argument('--dest', default='tc_symbcount', type=str, help='destination folder to store the testcase .txt file')
args = parser.parse_args()
generate(args.ntc, args.dest)
```
|
{
"source": "jerryzhou196/clac",
"score": 3
}
|
#### File: clac/clac/rpa.py
```python
from ctypes import pythonapi
import subprocess, pyautogui, time, glob, os
from clac.utils.rpa_utils import focus_screen
def add_word(word, option, scraped_info, t_sleep=2.75):
"""
a RPA that adds a word to Anki
"""
subprocess.Popen('C:\\Program Files\\Anki\\anki.exe') # opening the anki program
time.sleep(t_sleep+5)
focus_screen()
time.sleep(t_sleep)
pyautogui.hotkey('a') # opening the add window - in the front area
n_example = len(glob.glob(f'./words/{word}/meaning_{option}/example[0-9].txt')) # numbers of examples
time.sleep(t_sleep)
pyautogui.write(word + '\n')
try: # try to write the inflections
with open(f'./words/{word}/inflections.txt') as file: # add inflection (if exist)
pyautogui.write('Inflections: ' + file.readline() + '\n\n')
except FileNotFoundError: # inflections not found, pass
pass
if scraped_info['searched word']['mp3'] != None: # adding the word pronunciation
pyautogui.hotkey('f3') # attach picture/audio/video
time.sleep(t_sleep)
pyautogui.hotkey('ctrl', 'l') # path insert mode
pyautogui.write(os.getcwd() + f'\\words\\{word}')
time.sleep(t_sleep)
pyautogui.press('enter')
time.sleep(t_sleep)
pyautogui.hotkey('alt', 'n')
time.sleep(t_sleep)
pyautogui.write(f'{word}.mp3')
time.sleep(t_sleep)
pyautogui.press('enter')
for example_number in range(n_example):
with open(f'./words/{word}/meaning_{option}/example{example_number}.txt', 'r') as file:
pyautogui.write(('\n' if example_number!=0 else '') + f'Example {example_number+1}:' + next(file) + '\n') # write the example
pyautogui.hotkey('f3') # attach picture/audio/video
time.sleep(t_sleep)
pyautogui.hotkey('ctrl', 'l') # path insert mode
pyautogui.write(os.getcwd() + f'\\words\\{word}\\meaning_{option}')
time.sleep(t_sleep)
pyautogui.press('enter')
time.sleep(t_sleep)
pyautogui.hotkey('alt', 'n')
time.sleep(t_sleep)
pyautogui.write(f'example{example_number}.mp3')
time.sleep(t_sleep)
pyautogui.press('enter')
time.sleep(t_sleep)
pyautogui.press('tab') # switch to back
with open(f'./words/{word}/meaning_{option}/meaning{option}.txt') as file:
pyautogui.write(next(file)) # insert the meaning
time.sleep(t_sleep)
pyautogui.press('tab') # switch to back
with open(f'./words/{word}/meaning_{option}/tag.txt') as file:
pyautogui.write(next(file) + ' [CLAC]') # insert the vim
time.sleep(t_sleep)
pyautogui.press('tab') # switch to back
time.sleep(t_sleep)
pyautogui.press('enter')
time.sleep(t_sleep)
pyautogui.press('esc')
# add_word('bait', 2) # just testing
```
|
{
"source": "jerryzhucs21/spinningup",
"score": 2
}
|
#### File: spinningup/demonstrations/demonstrator.py
```python
import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.distributions.normal import Normal
from torch.optim import Adam
import numpy as np
import gym
import copy
import os
import pickle
from gym.spaces import Discrete, Box
from spinup.envs.pointbot import *
import datetime
import os
import pickle
import math
import sys
import os.path as osp
import numpy as np
from gym import Env
from gym import utils
from gym.spaces import Box
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
from spinup.envs.pointbot_const import *
from spinup.rewards.cvar_utils import cvar_enumerate_pg
from spinup.rewards.pointbot_reward_utils import PointBotReward
class LineBuilder:
def __init__(self, line, env, fig, typ):
self.line = line
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.xv = [0]
self.yv = [0]
self.xt = []
self.yt = []
self.typ = typ
self.env = env
self.steps = 0
self.state = env.state
self.states = [self.state]
self.actions = []
self.cid = line.figure.canvas.mpl_connect('button_press_event', self)
self.info = fig.canvas.mpl_connect('key_press_event', self.press)
def press(self, event):
sys.stdout.flush()
if event.key == 'v':
print("X_v: ", str(self.env.state[1]), " Y_v: ", str(self.env.state[3]), " Action: ", str(self.env.curr_action))
if event.key == 'p':
print("X: ", str(self.env.state[0]), " Y: ", str(self.env.state[2]))
if event.key == 'w':
print("Most recent state: ", str(self.states[-1]), "\n states: ", str(self.states))
if event.key == 't':
if TRASH:
print("Closest Trash X: ", str(self.env.closest_trash(self.env.state)[0]), " Y: ", str(self.env.closest_trash(self.env.state)[1]))
else:
print("No TRASH on the field!")
if event.key == 'a':
print("Current Feature: ", str(self.env.feature), " Remaining steps: " + str(HORIZON - self.steps))
if event.key == 'e':
if TRASH:
plt.savefig('demonstrations/visualization_' + self.typ + "_" + str(args.dem_num) + '.png')
plt.close()
else:
if np.linalg.norm(np.subtract(GOAL_STATE, self.states[-1][:4])) <= GOAL_THRESH:
plt.savefig('demonstrations/visualization_' + self.typ + "_" + str(args.dem_num) + '.png')
plt.close()
else:
print("\nNot proper ending! X distance from END: " + str(self.xs[-1] - END_POS[0]) + " Y distance from END: " + str(self.ys[-1] - END_POS[1]))
if event.key == 'r':
if (os.path.exists("demonstrations/states_" + str(args.dem_num) + ".txt")):
os.remove("demonstrations/states_" + str(args.dem_num) + ".txt")
if event.key == 'g':
if event.inaxes!=self.line.axes: return
if self.steps == HORIZON:
plt.savefig('demonstrations/visualization_' + self.typ + "_" + str(args.dem_num) + '.png')
plt.close()
return
final_x = END_POS[0]
final_y = END_POS[1]
init_x = self.xs[-1]
init_y = self.ys[-1]
diff_x = final_x - init_x
diff_y = final_y - init_y
x_f = diff_x
y_f = diff_y
if diff_x >= 0 and diff_y >= 0:
if abs(diff_x) >= abs(diff_y):
x_f = abs(diff_x/diff_x) * MAX_FORCE/10
y_f = abs(diff_y/diff_x) * MAX_FORCE/10
else:
x_f = abs(diff_x/diff_y) * MAX_FORCE/10
y_f = abs(diff_y/diff_y) * MAX_FORCE/10
elif diff_x < 0 and diff_y >= 0:
if abs(diff_x) >= abs(diff_y):
x_f = -abs(diff_x/diff_x) * MAX_FORCE/10
y_f = abs(diff_y/diff_x) * MAX_FORCE/10
else:
x_f = -abs(diff_x/diff_y) * MAX_FORCE/10
y_f = abs(diff_y/diff_y) * MAX_FORCE/10
elif diff_x >= 0 and diff_y < 0:
if abs(diff_x) >= abs(diff_y):
x_f = abs(diff_x/diff_x) * MAX_FORCE/10
y_f = -abs(diff_y/diff_x) * MAX_FORCE/10
else:
x_f = abs(diff_x/diff_y) * MAX_FORCE/10
y_f = -abs(diff_y/diff_y) * MAX_FORCE/10
elif diff_x < 0 and diff_y < 0:
if abs(diff_x) >= abs(diff_y):
x_f = -abs(diff_x/diff_x) * MAX_FORCE/5
y_f = -abs(diff_y/diff_x) * MAX_FORCE/10
else:
x_f = -abs(diff_x/diff_y) * MAX_FORCE/10
y_f = -abs(diff_y/diff_y) * MAX_FORCE/10
act = tuple((x_f, y_f))
new_state, _, _, _ = self.env.step(act)
self.actions.append(self.env.curr_action)
if TRASH:
plt.scatter([self.env.next_trash[0]],[self.env.next_trash[1]], [20], '#000000')
self.xs.append(new_state[0])
self.ys.append(new_state[2])
self.steps += 1
self.xv.append(new_state[1])
self.yv.append(new_state[3])
self.states.append(new_state)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
def __call__(self, event):
if event.inaxes!=self.line.axes: return
if self.steps == HORIZON:
plt.savefig('demonstrations/visualization_' + self.typ + "_" + str(args.dem_num) + '.png')
plt.close()
return
final_x = event.xdata
final_y = event.ydata
init_x = self.xs[-1]
init_y = self.ys[-1]
diff_x = final_x - init_x
diff_y = final_y - init_y
x_f = diff_x
y_f = diff_y
if diff_x >= 0 and diff_y >= 0:
if abs(diff_x) >= abs(diff_y):
x_f = abs(diff_x/diff_x) * MAX_FORCE
y_f = abs(diff_y/diff_x) * MAX_FORCE
else:
x_f = abs(diff_x/diff_y) * MAX_FORCE
y_f = abs(diff_y/diff_y) * MAX_FORCE
elif diff_x < 0 and diff_y >= 0:
if abs(diff_x) >= abs(diff_y):
x_f = -abs(diff_x/diff_x) * MAX_FORCE
y_f = abs(diff_y/diff_x) * MAX_FORCE
else:
x_f = -abs(diff_x/diff_y) * MAX_FORCE
y_f = abs(diff_y/diff_y) * MAX_FORCE
elif diff_x >= 0 and diff_y < 0:
if abs(diff_x) >= abs(diff_y):
x_f = abs(diff_x/diff_x) * MAX_FORCE
y_f = -abs(diff_y/diff_x) * MAX_FORCE
else:
x_f = abs(diff_x/diff_y) * MAX_FORCE
y_f = -abs(diff_y/diff_y) * MAX_FORCE
elif diff_x < 0 and diff_y < 0:
if abs(diff_x) >= abs(diff_y):
x_f = -abs(diff_x/diff_x) * MAX_FORCE
y_f = -abs(diff_y/diff_x) * MAX_FORCE
else:
x_f = -abs(diff_x/diff_y) * MAX_FORCE
y_f = -abs(diff_y/diff_y) * MAX_FORCE
act = tuple((x_f, y_f))
new_state, _, _, _ = self.env.step(act)
self.actions.append(self.env.curr_action)
if TRASH:
plt.scatter([self.env.next_trash[0]],[self.env.next_trash[1]], [20], '#000000')
self.xs.append(new_state[0])
self.ys.append(new_state[2])
self.steps += 1
self.xv.append(new_state[1])
self.yv.append(new_state[3])
self.states.append(new_state)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
def init(typ="Good"):
env = gym.make('PointBot-v0')
env.reset()
fig = plt.figure()
ax = fig.add_subplot()
ax.set_title('PointBot Env '+ typ +' Demonstrator')
line, = ax.plot([env.state[0]], env.state[2]) # empty line
linebuilder = LineBuilder(line, env, fig, typ)
num_obst = len(env.obstacle.obs)
for i in range(num_obst):
xbound = env.obstacle.obs[i].boundsx
ybound = env.obstacle.obs[i].boundsy
rect = patches.Rectangle((xbound[0],ybound[0]),abs(xbound[1] - xbound[0]),abs(ybound[1] - ybound[0]),linewidth=1, zorder = 0, edgecolor='#d3d3d3',facecolor='#d3d3d3', fill = True)
ax.add_patch(rect)
if TRASH:
plt.scatter([env.next_trash[0]],[env.next_trash[1]], [25], '#000000')
ax.scatter([env.state[0]],[env.state[2]], [5], '#00FF00')
if not TRASH:
ax.scatter([END_POS[0]],[END_POS[1]], [5], '#FF0000')
ax.set_xlim([env.grid[0], env.grid[1]])
ax.set_ylim([env.grid[2], env.grid[3]])
plt.show()
return linebuilder
def end(linebuilder, typ="Good"):
if TRASH:
feature_length = sum(linebuilder.env.feature)
for _ in range(HORIZON-feature_length):
next_state = [linebuilder.env.state[0], 0, linebuilder.env.state[2], 0] + NOISE_SCALE * np.random.randn(4)
next_state = np.concatenate((next_state, linebuilder.env.closest_trash(linebuilder.env.state)))
linebuilder.states.append(next_state)
linebuilder.actions.append([0, 0])
else:
for _ in range(HORIZON-feature_length):
linebuilder.states.append([END_POS[0], 0, END_POS[1], 0] + NOISE_SCALE * np.random.randn(4))
if not os.path.exists('demonstrations'):
os.makedirs('demonstrations')
try:
f = open("demonstrations/states_" + str(args.dem_num) + ".txt", "a")
f.write("\n" + typ)
f.write("\nFeature: " + str(linebuilder.env.feature))
f.write("\n\nStates: " + str(linebuilder.states))
f.write("\n\nActions: " + str(linebuilder.actions))
if TRASH:
f.write("\n\nTrash Locations: "+ str(linebuilder.env.current_trash_taken))
f.close()
return linebuilder.env.feature, linebuilder.states, linebuilder.actions
except AssertionError as msg:
print(msg)
return None
if __name__ == '__main__':
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('--dem_num', type=int, default=1)
parser.add_argument('--single', type=bool, default=False)
args = parser.parse_args()
if args.single:
linebuilder = init("Optimal")
feature, states, actions = end(linebuilder, "Optimal")
dic = {"feature": feature, "states": states, "actions": actions}
p = open("demonstrations/features_states_actions_" + str(args.dem_num) + ".pkl", "wb")
pickle.dump(dic, p)
p.close()
else:
linebuilder = init()
good_feature, good_states, good_actions = end(linebuilder)
linebuilder = init("Bad")
bad_feature, bad_states, bad_actions = end(linebuilder, "Bad")
dic = {"Good_feature": good_feature, "Bad_feature": bad_feature, "Good_states": good_states, "Good_actions": good_actions, "Bad_states": bad_states, "Bad_actions": bad_actions}
p = open("demonstrations/features_states_actions_" + str(args.dem_num) + ".pkl", "wb")
pickle.dump(dic, p)
p.close()
```
#### File: spinup/rewards/pointbot_reward_utils.py
```python
import numpy as np
from spinup.envs.pointbot_const import *
class PointBotReward():
def __init__(self):
self.posterior = np.array([0.4, 0.3, 0.2, .05, .05])
self.penalties = np.array([50, 40, 0, -40, -500])
if TRASH:
self.penalties = np.array([0, 0, 0, 0, 0])
def get_reward_distribution(self, env, obs):
initial_reward = env.rewards[-1] # NOTE: Be careful of this. If we ever want to get reward distributions from observations, this will be an issue
if env.obstacle(obs) == 0:
return np.array([initial_reward] * self.posterior.shape[0])
else:
extra_cost = self.penalties * env.obstacle(obs)
return initial_reward + extra_cost
```
#### File: spinup/rewards/reacher_reward_brex.py
```python
import numpy as np
from spinup.envs.pointbot_const import *
import pickle
class ReacherRewardBrex():
# Daniel's Suggested Reward
def __init__(self):
with open('brex_reacher.pickle', 'rb') as handle:
b = pickle.load(handle)
#print(b)
self.posterior = []
self.target_penalty = []
self.obstacle_penalty = []
self.weight_vectors = []
for w, prob in b.items():
self.posterior.append(prob)
self.target_penalty.append(w[0])
self.obstacle_penalty.append(w[1])
self.weight_vectors.append(np.asarray(w))
self.posterior = np.array(self.posterior)
self.obstacle_penalty = np.array(self.obstacle_penalty)
self.target_penalty = np.array(self.target_penalty)
self.weight_vectors = np.array(self.weight_vectors)
def get_posterior_weight_matrix(self):
#get the matrix of hypothesis weight vectors from the posterior one per row
return self.weight_vectors
def get_reward_distribution(self, env):
feats = env.get_features()
#print(feats)
dist_rew = feats[0]*self.target_penalty
obs_rew = feats[1]*self.obstacle_penalty
return dist_rew+obs_rew
```
#### File: spinup/rewards/reacher_reward_utils.py
```python
import numpy as np
class ReacherReward():
def __init__(self):
self.posterior = np.array([0.8,0.1,0.1])
self.penalties = np.array([-2,2,0]) # for debugging 0 penalty always
def get_reward_distribution(self, env):
if not env.get_constraint():
return np.array([env.get_reward()] * self.posterior.shape[0])
else:
reward_penalties = self.penalties# * env.get_constraint()
return env.get_reward() - reward_penalties
```
|
{
"source": "jerryzhuongithub/BAL",
"score": 2
}
|
#### File: jerryzhuongithub/BAL/bal.py
```python
from __future__ import print_function
import sys
sys.path.append('.')
import logging
import numpy as np
import pandas as pd
import time
import copy
import tqdm
import xlsxwriter
import csv
import urllib.request
import os
import zipfile
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from logistic import LogisticRegression
def default_options():
opts = Options()
opts.option('phase1-increment', type=int, default=1)
opts.option('phase2-increment', type=int, default=1)
opts.option('regularizer', type=float, default=1e4)
opts.option('trials', type=int, default=20)
opts.option('phase1', choices=['klogk','fixed','until-all-labels','m-per-class','skip'], default = 'fixed')
opts.option('phase2', choices=['passive', 'margin', 'entropy'], default = 'passive')
opts.option('fixed', type=int, default=1)
opts.option('m-per-class', type=int, default=1)
opts.option('minipool-size', type=int, default=10)
return opts
# log = Log(option('log', default=None))
class UntilAllLabelsStopping():
# TODO Generalize to m examples per class, use same m-per-class flag
def __init__(self, y):
self.y = y
self.k = len(np.unique(y))
def met(self, idxs):
labels = len(np.unique(self.y[idxs]))
return labels == self.k
class AlwaysAlreadyStopped():
def met(self, idxs):
return True
class FixedStopping():
def __init__(self, fixed):
self.fixed = fixed
def met(self, idxs):
if self.fixed is None:
raise Exception("Requires option --fixed [n]")
return len(idxs) >= self.fixed
class KLogKStopping():
def __init__(self, y):
k = len(np.unique(y))
self.j = int(k * np.log(k))
def met(self, idxs):
return len(idxs) >= self.j
class PerClass:
def __init__(self, x_train, y_train, m_per_class, increment):
self.classes = np.unique(y_train)
self.y_train = y_train
self.m_per_class = m_per_class
self.increment = increment
self.called = False
def sample(self, clf, log, t):
if not self.called:
idxs = []
for k in self.classes:
yk = np.where(self.y_train == k)[0]
np.random.shuffle(yk)
idxs += yk[:self.m_per_class].tolist()
self.buffer = idxs
self.called = True
idxs = self.buffer[:self.increment]
self.buffer = self.buffer[self.increment:]
return idxs
class MinipoolB:
def __init__(self, b, idxs, x_train, y_train, x_teach, y_teach, increment):
self.b = b
self.schedule = np.arange(np.shape(x_train)[0])
self.schedule = np.setdiff1d(self.schedule, idxs)
np.random.shuffle(self.schedule)
self.increment = increment
self.idxs = copy.deepcopy(idxs)
self.x_train = x_train
self.y_train = y_train
self.x_teach = x_teach
self.y_teach = y_teach
def sample(self, clf, log, t):
candidates = np.random.choice(self.schedule, min(len(self.schedule), self.b), replace=False)
newclf = copy.deepcopy(clf)
scores = []
for c in candidates:
z = self.idxs + [c]
newclf.fit(self.x_train[z], self.y_train[z])
predicted = newclf.predict(self.x_teach)
err = 1 - np.mean(predicted == self.y_teach)
scores.append(err)
best = candidates[np.argsort(scores)][:self.increment]
self.schedule = np.setdiff1d(self.schedule, best)
self.idxs.extend(best)
log.minipoolb.candidates.info(t, candidates)
log.minipoolb.validation_err.info(t, scores)
return best.tolist()
class Passive:
def __init__(self, idxs, x_train, increment):
self.schedule = np.arange(np.shape(x_train)[0])
self.schedule = np.setdiff1d(self.schedule, idxs)
np.random.shuffle(self.schedule)
self.increment = increment
def sample(self, clf, log, t):
idxs = self.schedule[:self.increment]
self.schedule = self.schedule[self.increment:]
return idxs.tolist()
class Margin:
def __init__(self, idxs, x_train, increment):
self.schedule = np.arange(np.shape(x_train)[0])
self.schedule = np.setdiff1d(self.schedule, idxs)
# We sort so that there is no particular logic to ties
np.random.shuffle(self.schedule)
self.increment = increment
self.x_train = x_train
self.n = np.shape(x_train)[0]
def sample(self, clf, log, t):
pk = clf.predict_proba(self.x_train[self.schedule])
pk.sort(axis = 1)
margin = pk[:,-1] - pk[:,-2]
top = self.schedule[np.argsort(margin)]
lmargin = np.repeat(np.NaN, self.n)
lmargin[self.schedule] = margin
n = self.n - len(self.schedule)
log.margins.debug(n, lmargin)
log.sorted_margins.debug(n, np.sort(margin))
log.top_margins.debug(n, top)
idxs = top[:self.increment]
self.schedule = np.setdiff1d(self.schedule, idxs)
return idxs.tolist()
class Entropy:
def __init__(self, idxs, x_train, increment):
self.schedule = np.arange(np.shape(x_train)[0])
self.schedule = np.setdiff1d(self.schedule, idxs)
np.random.shuffle(self.schedule)
self.increment = increment
self.x_train = x_train
self.n = np.shape(x_train)[0]
def sample(self, clf, log, t):
pk = clf.predict_proba(self.x_train[self.schedule])
s = np.sum(pk * np.log(pk), axis = 1)
top = self.schedule[np.argsort(s)]
ls = np.repeat(np.NaN, self.n)
ls[self.schedule] = s
n = self.n - len(self.schedule)
log.entropies.debug(n, ls)
log.sorted_entropies.debug(n, np.sort(s))
log.top_entropies.debug(n, top)
# s computes the negative entropy so taking smallest is appropriate
idxs = top[:self.increment]
self.schedule = np.setdiff1d(self.schedule, idxs)
return idxs.tolist()
class GreedyAccuracy:
def __init__(self, idxs, x_train, y_train, x_teach, y_teach, increment):
self.schedule = np.arange(np.shape(x_train)[0])
self.schedule = np.setdiff1d(self.schedule, idxs)
np.random.shuffle(self.schedule)
self.increment = increment
self.idxs = copy.deepcopy(idxs)
self.x_train = x_train
self.y_train = y_train
self.x_teach = x_teach
self.y_teach = y_teach
def sample(self, clf, log, t):
newclf = copy.deepcopy(clf)
scores = []
for c in self.schedule:
z = self.idxs + [c]
newclf.fit(self.x_train[z], self.y_train[z])
predicted = newclf.predict(self.x_teach)
err = 1 - np.mean(predicted == self.y_teach)
scores.append(err)
best = self.schedule[np.argsort(scores)][:self.increment]
self.schedule = np.setdiff1d(self.schedule, best)
self.idxs.extend(best)
return best.tolist()
class GreedyCoverage:
def __init__(self, idxs, x_train, y_train, increment, feature_offsets, class_proportions, feature_trials):
self.schedule = np.arange(np.shape(x_train)[0])
self.schedule = np.setdiff1d(self.schedule, idxs)
# We sort so that there is no particular logic to ties
np.random.shuffle(self.schedule)
self.increment = increment
self.x_train = x_train
self.y_train = y_train
self.x_features = [set(np.nonzero(x_train[i])[0].tolist()) for i in range(len(x_train))]
self.feature_size_per_class = []
for i in range(len(feature_offsets)-1):
self.feature_size_per_class.append(int(feature_offsets[i+1] - feature_offsets[i]))
self.features_seen_per_class = [set() for _ in range(len(self.feature_size_per_class))]
for i in idxs:
f = self.x_features[i]
k = self.y_train[i]
self.features_seen_per_class[k] = self.features_seen_per_class[k].union(f)
self.class_proportions = class_proportions
self.feature_trials = feature_trials
def sample(self, clf, log, t):
scores = []
for i in self.schedule:
k = self.y_train[i]
old_risk = self.class_proportions[k] * ((1 - len(self.features_seen_per_class[k]) / self.feature_size_per_class[k]) ** self.feature_trials)
new_risk = self.class_proportions[k] * ((1 - len(self.features_seen_per_class[k].union(self.x_features[i])) / self.feature_size_per_class[k]) ** self.feature_trials)
scores.append(new_risk - old_risk)
top = self.schedule[np.argsort(scores)]
idxs = top[:self.increment]
self.schedule = np.setdiff1d(self.schedule, idxs)
for i in idxs:
f = self.x_features[i]
k = self.y_train[i]
self.features_seen_per_class[k] = self.features_seen_per_class[k].union(f)
return idxs.tolist()
def get_phase1(idxs, x_train, y_train, options):
if options.phase1 == 'fixed':
return Passive(idxs, x_train, options.phase1_increment)
elif options.phase1 == 'klogk':
return Passive(idxs, x_train, options.phase1_increment)
elif options.phase1 == 'until-all-labels':
return Passive(idxs, x_train, options.phase1_increment)
elif options.phase1 == 'm-per-class':
return PerClass(x_train, y_train, options.m_per_class, options.phase1_increment)
elif options.phase1 == 'skip':
return
else:
raise Exception("%s not recognized " % options.phase1)
def get_phase2(idxs, x_train, y_train, options):
if options.phase2 == 'passive':
return Passive(idxs, x_train, options.phase2_increment)
elif options.phase2 == 'margin':
return Margin(idxs, x_train, options.phase2_increment)
elif options.phase2 == 'entropy':
return Entropy(idxs, x_train, options.phase2_increment)
elif options.phase2 == 'greedy-coverage':
return GreedyCoverage(idxs, x_train, y_train, options.phase2_increment, options.feature_offsets, options.class_props, options.l)
elif options.phase2 == 'minipool-b':
return MinipoolB(options.minipool_size, idxs, x_train, y_train, options.x_teach, options.y_teach, options.phase2_increment)
elif options.phase2 == 'greedy':
return GreedyAccuracy(idxs, x_train, y_train, options.x_teach, options.y_teach, options.phase2_increment)
else:
raise Exception("%s not recognized " % options.phase2)
def get_stopping_condition(y_train, options):
if options.phase1 == 'fixed':
return FixedStopping(options.fixed)
elif options.phase1 == 'klogk':
return KLogKStopping()
elif options.phase1 == 'm-per-class':
return FixedStopping(len(np.unique(y_train)) * options.m_per_class)
elif options.phase1 == 'until-all-labels':
return UntilAllLabelsStopping(y_train)
elif options.phase1 == 'skip':
return AlwaysAlreadyStopped()
def single_trial(x_train, y_train, x_test, y_test, options, trial):
show_progress = True
k = len(np.unique(y_train))
big_n = options.N or np.shape(x_train)[0]
idxs = []
clf = LogisticRegression(C = options.regularizer, k=k)
phase1 = get_phase1(idxs, x_train, y_train, options)
stopping_condition = get_stopping_condition(y_train, options)
stopped = False
if show_progress:
pbar = tqdm.tqdm(total=big_n)
if stopping_condition.met(idxs):
trial.info('stopped_phase1', len(idxs))
phase2 = get_phase2(idxs, x_train, y_train, options)
stopped = True
lastn = 0
while len(idxs) < big_n:
if show_progress:
pbar.update(len(idxs) - lastn)
lastn = len(idxs)
start_time = time.time()
if trial.level_is_at_least(logging.DEBUG):
oldclf = copy.deepcopy(clf)
query_time = time.time()
if not stopped:
new_idxs = phase1.sample(clf, trial, len(idxs))
idxs += new_idxs
if stopping_condition.met(idxs):
trial.info('stopped_phase1', len(idxs))
phase2 = get_phase2(idxs, x_train, y_train, options)
stopped = True
else:
new_idxs = phase2.sample(clf, trial, len(idxs))
idxs += new_idxs
n = len(idxs)
trial.query_time.info(n, time.time() - query_time)
clf = LogisticRegression(C = options.regularizer, k=k)
training_time = time.time()
clf.fit(x_train[idxs,:], y_train[idxs])
trial.training_time.info(n, time.time() - training_time)
test_time = time.time()
predicted = clf.predict(x_test)
err = 1 - np.mean(predicted == y_test)
trial.test_time.info(n, time.time() - test_time)
trial.z.info(n, new_idxs)
trial.err.info(n, err)
if trial.level_is_at_least(logging.DEBUG):
trial.pretrain_model.debug(n, oldclf)
trial.mean_predicted.debug(n, np.mean(predicted))
trial.norm_beta.debug(n, np.linalg.norm(clf.coef_[0]))
train_predicted = clf.predict(x_train[idxs,:])
train_err = 1 - np.mean(train_predicted == y_train[idxs])
trial.train_err.debug(n, train_err)
trial.mean_train_predicted.debug(n, np.mean(train_predicted))
trial.mean_train_ys.debug(n, np.mean(y_train[idxs]))
trial.predicted.debug(n, predicted)
trial.predicted_proba.debug(n, clf.predict_proba(x_test))
trial.beta.debug(n, clf.coef_[0])
trial.train_predicted.debug(n, train_predicted)
trial.train_predicted_proba.debug(n, clf.predict_proba(x_train[idxs,:]))
elapsed_time = time.time() - start_time
trial.info('elapsed_time', elapsed_time)
if show_progress:
pbar.close()
return trial
import numpy as np
import pandas as pd
import argparse
import bal
import log
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='Methods',dest='method')
parser.add_argument('TRAINING_SET', help="Training set file")
parser.add_argument('TEST_SET', help="Test set file")
parser.add_argument('OUTPUT_FILE', help="Output file")
parser.add_argument('--trials', type=int, default=20, help="Number of experimental trials")
parser.add_argument('--regularizer', type=float, default=1e4)
parser.add_argument('--batch', type=int, default=1, help="Batch size of training set")
parser.add_argument('--training-set-max-size', type=int, default=None, help="Maximum training set size")
passive_parser = subparsers.add_parser('passive')
active_parser = subparsers.add_parser('active')
active_parser.add_argument('--uncertainty', choices=['margin','entropy'],default='margin')
weak_teaching_parser = subparsers.add_parser('weak-teaching')
weak_teaching_parser.add_argument('--uncertainty', choices=['margin','entropy'],default='margin')
minipool_parser = subparsers.add_parser('minipool')
minipool_parser.add_argument('--validation-set', help="Validation set file")
minipool_parser.add_argument('--candidates', type=int, required=True, help="Number of candidates to use per iteration when using minipool")
def main(args):
args.training_set = args.TRAINING_SET
args.test_set = args.TEST_SET
args.output = args.OUTPUT_FILE
x_train, y_train = np.load(args.training_set)
x_test, y_test = np.load(args.test_set)
if args.training_set_max_size is None:
args.training_set_max_size = np.shape(x_train)[0]
args.N = args.training_set_max_size
if args.method == 'passive':
args.phase1 = 'fixed'
args.fixed = 0
args.phase1_increment = 0
args.phase2 = 'passive'
elif args.method == 'active':
args.phase1 = 'until-all-labels'
args.phase1_increment = args.batch
args.phase2 = args.uncertainty
elif args.method == 'weak-teaching':
args.phase1 = 'm-per-class'
args.phase1_increment = args.batch
args.phase2 = args.uncertainty
args.m_per_class = 1
elif args.method == 'minipool':
args.phase1 = 'fixed'
args.fixed = 0
args.phase1_increment = 0
args.phase2 = 'minipool-b'
args.minipool_size = args.candidates
x_teach, y_teach = np.load(args.validation_set)
args.x_teach = x_teach
args.y_teach = y_teach
args.phase2_increment = args.batch
logger = log.Log()
for i in range(args.trials):
single_trial(x_train, y_train, x_test, y_test, args, logger.trials.select(i+1))
df1 = logger.trials._.err.df()
df2 = logger.trials._.stopped_phase1.df().transpose()
df3 = logger.trials._.z.df()
df3 = df3.apply(lambda x: pd.Series([z for y in x for z in y]), axis=0)
df3.index += 1
df_list = [df1,df2,df3]
writer = pd.ExcelWriter(args.output, engine='xlsxwriter')
df1.to_excel(writer,'Test Set Error')
df2.to_excel(writer,'Initialization Complete At')
df3.to_excel(writer,'Training Set Items')
writer.save()
if __name__ == '__main__':
args = parser.parse_args()
main(args)
```
#### File: jerryzhuongithub/BAL/log.py
```python
import logging
import six
import pandas as pd
class LogSelector(object):
def __init__(self, path, log):
self.path = path
self.log = log
def select(self, name):
return LogSelector(self.path + [name], self.log)
def debug(self, k, v):
if self.level_is_at_least(logging.DEBUG):
self.log.notify(logging.DEBUG, self.path + [k], v)
def info(self, k, v):
if self.level_is_at_least(logging.INFO):
self.log.notify(logging.INFO, self.path, k, v)
def __getattr__(self, name):
return LogSelector(self.path + [name], self.log)
def __getitem__(self, name):
return LogSelector(self.path + [name], self.log)
def __repr__(self):
d = self.data()
if hasattr(d, 'values'):
val = six.next(six.itervalues(d))
if isinstance(val, dict):
return repr(d.keys())
return repr(d)
def __iter__(self):
return iter([LogSelector(self.path + [key], self.log) for key in self.data()])
def __len__(self):
return len(self.data())
def data(self):
current = self.log.data
for i, p in enumerate(self.path):
if p == '_':
prefix = self.path[:i]
suffix = self.path[i+1:]
stuff = [(k, LogSelector(prefix + [k] + suffix, self.log).data()) for k,v in current.items()]
return dict(stuff)
elif p == '__':
prefix = self.path[:i]
suffix = self.path[i+1:]
stuff = []
for k1, v in current.items():
m = LogSelector(prefix + [k1] + suffix, self.log).data()
for k2 in m:
stuff.append(((k1,k2),m[k2]))
return dict(stuff)
else:
current = current[p]
return current
def series(self):
return pd.Series(self.data())
def df(self):
d = self.data()
if isinstance(list(d.values())[0], dict):
return pd.DataFrame(d)
else:
return pd.DataFrame({self.path[-1]: d})
def level_is_at_least(self, level):
return self.log.level <= level
def concat(self, f, axis=1):
k = self.data().keys()
return pd.concat([f(x) for x in iter(self)], axis=axis, keys=k)
class Log(object):
def __init__(self, name=None):
self.data = {}
self.level = logging.INFO
self.listeners = []
def select(self, name):
return LogSelector([name], self)
def info(self, k, v):
return LogSelector([], self).info(k,v)
def debug(self, k, v):
return LogSelector([], self).info(k,v)
def df(self):
return LogSelector([], self).df()
def series(self):
return LogSelector([], self).series()
def __getattr__(self, name):
return LogSelector([name], self)
def __repr__(self):
return repr(self.data.keys())
def __iter__(self):
return iter([LogSelector([key], self) for key in self.data])
def concat(self, f, axis=1):
return LogSelector([], self).concat(f, axis)
def notify(self, level, path, k, v):
if level >= logging.INFO:
current = self.data
for p in path:
if p not in current:
current[p] = {}
current = current[p]
if k in current:
raise Exception("%s is overriding log entry!" % (self.path + [k]))
current[k] = v
for listener in self.listeners:
listener.notify(level, path, k, v)
```
|
{
"source": "jerryzxiang/PyBaMM",
"score": 3
}
|
#### File: integration/test_models/standard_output_tests.py
```python
import pybamm
import numpy as np
class StandardOutputTests(object):
"""Calls all the tests on the standard output variables."""
def __init__(self, model, parameter_values, disc, solution):
# Assign attributes
self.model = model
self.parameter_values = parameter_values
self.disc = disc
self.solution = solution
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
self.chemistry = "Lithium-ion"
elif isinstance(self.model, pybamm.lead_acid.BaseModel):
self.chemistry = "Lead acid"
# Only for constant current
current_sign = np.sign(parameter_values["Current function [A]"])
if current_sign == 1:
self.operating_condition = "discharge"
elif current_sign == -1:
self.operating_condition = "charge"
else:
self.operating_condition = "off"
def process_variables(self):
return
def run_test_class(self, ClassName):
"""Run all tests from a class 'ClassName'"""
tests = ClassName(
self.model,
self.parameter_values,
self.disc,
self.solution,
self.operating_condition,
)
tests.test_all()
def test_all(self, skip_first_timestep=False):
self.run_test_class(VoltageTests)
self.run_test_class(ElectrolyteConcentrationTests)
self.run_test_class(PotentialTests)
self.run_test_class(CurrentTests)
if self.chemistry == "Lithium-ion":
self.run_test_class(ParticleConcentrationTests)
self.run_test_class(DegradationTests)
if self.model.options["convection"] != "none":
self.run_test_class(VelocityTests)
class BaseOutputTest(object):
def __init__(self, model, param, disc, solution, operating_condition):
self.model = model
self.param = param
self.disc = disc
self.solution = solution
self.operating_condition = operating_condition
# Use dimensional time and space
self.t = solution.t * model.timescale_eval
geo = pybamm.geometric_parameters
L_x = param.evaluate(geo.L_x)
self.x_n = disc.mesh["negative electrode"].nodes * L_x
self.x_s = disc.mesh["separator"].nodes * L_x
self.x_p = disc.mesh["positive electrode"].nodes * L_x
whole_cell = ["negative electrode", "separator", "positive electrode"]
self.x = disc.mesh.combine_submeshes(*whole_cell).nodes * L_x
self.x_n_edge = disc.mesh["negative electrode"].edges * L_x
self.x_s_edge = disc.mesh["separator"].edges * L_x
self.x_p_edge = disc.mesh["positive electrode"].edges * L_x
self.x_edge = disc.mesh.combine_submeshes(*whole_cell).edges * L_x
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
R_n_typ = param.evaluate(model.param.R_n_typ)
R_p_typ = param.evaluate(model.param.R_p_typ)
self.r_n = disc.mesh["negative particle"].nodes * R_n_typ
self.r_p = disc.mesh["positive particle"].nodes * R_p_typ
self.r_n_edge = disc.mesh["negative particle"].edges * R_n_typ
self.r_p_edge = disc.mesh["positive particle"].edges * R_p_typ
if self.model.options["particle size"] == "distribution":
self.R_n = disc.mesh["negative particle size"].nodes * R_n_typ
self.R_p = disc.mesh["positive particle size"].nodes * R_p_typ
# Useful parameters
self.l_n = param.evaluate(geo.l_n)
self.l_p = param.evaluate(geo.l_p)
current_param = self.model.param.current_with_time
self.i_cell = param.process_symbol(current_param).evaluate(solution.t)
class VoltageTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.eta_r_n = solution["Negative electrode reaction overpotential [V]"]
self.eta_r_p = solution["Positive electrode reaction overpotential [V]"]
self.eta_r_n_av = solution[
"X-averaged negative electrode reaction overpotential [V]"
]
self.eta_r_p_av = solution[
"X-averaged positive electrode reaction overpotential [V]"
]
self.eta_r_av = solution["X-averaged reaction overpotential [V]"]
self.eta_sei_av = solution["X-averaged SEI film overpotential [V]"]
self.eta_e_av = solution["X-averaged electrolyte overpotential [V]"]
self.delta_phi_s_av = solution["X-averaged solid phase ohmic losses [V]"]
self.ocp_n_av = solution[
"X-averaged negative electrode open circuit potential [V]"
]
self.ocp_p_av = solution[
"X-averaged positive electrode open circuit potential [V]"
]
self.ocv_av = solution["X-averaged open circuit voltage [V]"]
self.voltage = solution["Terminal voltage [V]"]
def test_each_reaction_overpotential(self):
"""Testing that:
- discharge: eta_r_n > 0, eta_r_p < 0
- charge: eta_r_n < 0, eta_r_p > 0
- off: eta_r_n == 0, eta_r_p == 0
"""
tol = 0.01
t, x_n, x_p = self.t, self.x_n, self.x_p
if self.operating_condition == "discharge":
np.testing.assert_array_less(-self.eta_r_n(t, x_n), tol)
np.testing.assert_array_less(self.eta_r_p(t, x_p), tol)
elif self.operating_condition == "charge":
np.testing.assert_array_less(self.eta_r_n(t, x_n), tol)
np.testing.assert_array_less(-self.eta_r_p(t, x_p), tol)
elif self.operating_condition == "off":
np.testing.assert_array_equal(self.eta_r_n(t, x_n), 0)
np.testing.assert_array_equal(-self.eta_r_p(t, x_p), 0)
def test_overpotentials(self):
"""Testing that all are:
- discharge: . < 0
- charge: . > 0
- off: . == 0
"""
tol = 0.001
if self.operating_condition == "discharge":
np.testing.assert_array_less(self.eta_r_av(self.t), tol)
np.testing.assert_array_less(self.eta_e_av(self.t), tol)
np.testing.assert_array_less(self.delta_phi_s_av(self.t), tol)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-self.eta_r_av(self.t), tol)
np.testing.assert_array_less(-self.eta_e_av(self.t), tol)
np.testing.assert_array_less(-self.delta_phi_s_av(self.t), tol)
elif self.operating_condition == "off":
np.testing.assert_array_equal(self.eta_r_av(self.t), 0)
np.testing.assert_array_equal(self.eta_e_av(self.t), 0)
# For some reason SPM gives delta_phi_s_av ~ 1e-17
np.testing.assert_array_almost_equal(
self.delta_phi_s_av(self.t), 0, decimal=16
)
def test_ocps(self):
"""Testing that:
- discharge: ocp_n increases, ocp_p decreases
- charge: ocp_n decreases, ocp_p increases
- off: ocp_n, ocp_p constant
"""
neg_end_vs_start = self.ocp_n_av(self.t[-1]) - self.ocp_n_av(self.t[1])
pos_end_vs_start = self.ocp_p_av(self.t[-1]) - self.ocp_p_av(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(-neg_end_vs_start, 0)
np.testing.assert_array_less(pos_end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(neg_end_vs_start, 0)
np.testing.assert_array_less(-pos_end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(neg_end_vs_start, 0)
np.testing.assert_array_almost_equal(pos_end_vs_start, 0)
def test_ocv(self):
"""Testing that:
- discharge: ocv decreases
- charge: ocv increases
- off: ocv constant
"""
end_vs_start = self.ocv_av(self.t[-1]) - self.ocv_av(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(end_vs_start, 0)
def test_voltage(self):
"""Testing that:
- discharge: voltage decreases
- charge: voltage increases
- off: voltage constant
"""
end_vs_start = self.voltage(self.t[-1]) - self.voltage(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(end_vs_start, 0)
def test_consistent(self):
"""Test voltage components are consistent with one another by ensuring they sum
correctly"""
np.testing.assert_array_almost_equal(
self.ocv_av(self.t), self.ocp_p_av(self.t) - self.ocp_n_av(self.t)
)
np.testing.assert_array_almost_equal(
self.eta_r_av(self.t), self.eta_r_p_av(self.t) - self.eta_r_n_av(self.t)
)
np.testing.assert_array_almost_equal(
self.voltage(self.t),
self.ocv_av(self.t)
+ self.eta_r_av(self.t)
+ self.eta_e_av(self.t)
+ self.delta_phi_s_av(self.t)
+ self.eta_sei_av(self.t),
decimal=2,
)
def test_all(self):
self.test_each_reaction_overpotential()
self.test_overpotentials()
self.test_ocps()
self.test_ocv()
self.test_voltage()
self.test_consistent()
class ParticleConcentrationTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.c_s_n = solution["Negative particle concentration"]
self.c_s_p = solution["Positive particle concentration"]
self.c_s_n_rav = solution["R-averaged negative particle concentration"]
self.c_s_p_rav = solution["R-averaged positive particle concentration"]
self.c_s_n_surf = solution["Negative particle surface concentration"]
self.c_s_p_surf = solution["Positive particle surface concentration"]
self.c_s_n_tot = solution["Total lithium in negative electrode [mol]"]
self.c_s_p_tot = solution["Total lithium in positive electrode [mol]"]
self.N_s_n = solution["Negative particle flux"]
self.N_s_p = solution["Positive particle flux"]
self.c_SEI_tot = solution["Loss of lithium to SEI [mol]"]
self.c_Li_tot = solution["Loss of lithium to lithium plating [mol]"]
if model.options["particle size"] == "distribution":
# These concentration variables are only present for distribution models.
# Take only the x-averaged of these for now, since variables cannot have
# 4 domains yet
self.c_s_n_dist = solution[
"X-averaged negative particle concentration distribution"
]
self.c_s_p_dist = solution[
"X-averaged positive particle concentration distribution"
]
self.c_s_n_surf_dist = solution[
"Negative particle surface concentration distribution"
]
self.c_s_p_surf_dist = solution[
"Positive particle surface concentration distribution"
]
def test_concentration_increase_decrease(self):
"""Test all concentrations in negative particles decrease and all
concentrations in positive particles increase over a discharge."""
t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p
tol = 1e-16
if self.model.options["particle"] in ["quadratic profile", "quartic profile"]:
# For the assumed polynomial concentration profiles the values
# can increase/decrease within the particle as the polynomial shifts,
# so we just check the average instead
neg_diff = self.c_s_n_rav(t[1:], x_n) - self.c_s_n_rav(t[:-1], x_n)
pos_diff = self.c_s_p_rav(t[1:], x_p) - self.c_s_p_rav(t[:-1], x_p)
neg_end_vs_start = self.c_s_n_rav(t[-1], x_n) - self.c_s_n_rav(t[0], x_n)
pos_end_vs_start = self.c_s_p_rav(t[-1], x_p) - self.c_s_p_rav(t[0], x_p)
elif self.model.options["particle size"] == "distribution":
R_n, R_p = self.R_n, self.R_p
# Test the concentration variables that depend on x-R (surface values only,
# as 3D vars not implemented)
neg_diff = self.c_s_n_surf_dist(t[1:], x=x_n, R=R_n) - self.c_s_n_surf_dist(
t[:-1], x=x_n, R=R_n
)
pos_diff = self.c_s_p_surf_dist(t[1:], x=x_p, R=R_p) - self.c_s_p_surf_dist(
t[:-1], x=x_p, R=R_p
)
neg_end_vs_start = self.c_s_n_surf_dist(
t[-1], x=x_n, R=R_n
) - self.c_s_n_surf_dist(t[0], x=x_n, R=R_n)
pos_end_vs_start = self.c_s_p_surf_dist(
t[-1], x=x_p, R=R_p
) - self.c_s_p_surf_dist(t[0], x=x_p, R=R_p)
tol = 1e-15
else:
neg_diff = self.c_s_n(t[1:], x_n, r_n) - self.c_s_n(t[:-1], x_n, r_n)
pos_diff = self.c_s_p(t[1:], x_p, r_p) - self.c_s_p(t[:-1], x_p, r_p)
neg_end_vs_start = self.c_s_n(t[-1], x_n, r_n) - self.c_s_n(t[0], x_n, r_n)
pos_end_vs_start = self.c_s_p(t[-1], x_p, r_p) - self.c_s_p(t[0], x_p, r_p)
if self.operating_condition == "discharge":
np.testing.assert_array_less(neg_diff, tol)
np.testing.assert_array_less(-tol, pos_diff)
np.testing.assert_array_less(neg_end_vs_start, 0)
np.testing.assert_array_less(0, pos_end_vs_start)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-tol, neg_diff)
np.testing.assert_array_less(pos_diff, tol)
np.testing.assert_array_less(0, neg_end_vs_start)
np.testing.assert_array_less(pos_end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(neg_diff, 0)
np.testing.assert_array_almost_equal(pos_diff, 0)
np.testing.assert_array_almost_equal(neg_end_vs_start, 0)
np.testing.assert_array_almost_equal(pos_end_vs_start, 0)
def test_concentration_limits(self):
"""Test that concentrations do not go below 0 or exceed the maximum."""
t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p
np.testing.assert_array_less(-self.c_s_n(t, x_n, r_n), 0)
np.testing.assert_array_less(-self.c_s_p(t, x_p, r_p), 0)
np.testing.assert_array_less(self.c_s_n(t, x_n, r_n), 1)
np.testing.assert_array_less(self.c_s_p(t, x_p, r_p), 1)
if self.model.options["particle size"] == "distribution":
R_n, R_p = self.R_n, self.R_p
# Cannot have 3D processed variables, so test concs that depend on
# r-R and x-R
# r-R (x-averaged)
np.testing.assert_array_less(-self.c_s_n_dist(t, r=r_n, R=R_n), 0)
np.testing.assert_array_less(-self.c_s_p_dist(t, r=r_p, R=R_p), 0)
np.testing.assert_array_less(self.c_s_n_dist(t, r=r_n, R=R_n), 1)
np.testing.assert_array_less(self.c_s_p_dist(t, r=r_p, R=R_p), 1)
# x-R (surface concentrations)
np.testing.assert_array_less(-self.c_s_n_surf_dist(t, x=x_n, R=R_n), 0)
np.testing.assert_array_less(-self.c_s_p_surf_dist(t, x=x_p, R=R_p), 0)
np.testing.assert_array_less(self.c_s_n_surf_dist(t, x=x_n, R=R_n), 1)
np.testing.assert_array_less(self.c_s_p_surf_dist(t, x=x_p, R=R_p), 1)
def test_conservation(self):
"""Test amount of lithium stored across all particles and in SEI layers is
constant."""
c_s_tot = (
self.c_s_n_tot(self.solution.t)
+ self.c_s_p_tot(self.solution.t)
+ self.c_SEI_tot(self.solution.t)
+ self.c_Li_tot(self.solution.t)
)
diff = (c_s_tot[1:] - c_s_tot[:-1]) / c_s_tot[:-1]
if self.model.options["particle"] == "quartic profile":
np.testing.assert_array_almost_equal(diff, 0, decimal=10)
# elif self.model.options["particle size"] == "distribution":
# np.testing.assert_array_almost_equal(diff, 0, decimal=10)
elif self.model.options["surface form"] == "differential":
np.testing.assert_array_almost_equal(diff, 0, decimal=10)
else:
np.testing.assert_array_almost_equal(diff, 0, decimal=15)
def test_concentration_profile(self):
"""Test that the concentration in the centre of the negative particles is
greater than the average concentration in the particle and also that the
concentration on the surface of the negative particle is less than the average
concentration in the particle. Test opposite is true for the positive
particle."""
# TODO: add an output for average particle concentration
def test_fluxes(self):
"""Test that no flux holds in the centre of the particle. Test that surface
flux in the negative particles is greater than zero and that the flux in the
positive particles is less than zero during a discharge."""
t, x_n, x_p, r_n, r_p = (
self.t,
self.x_n,
self.x_p,
self.r_n_edge,
self.r_p_edge,
)
if self.model.options["particle"] == "uniform profile":
# Fluxes are zero everywhere since the concentration is uniform
np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)
np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)
else:
if self.operating_condition == "discharge":
if self.model.options["particle"] == "quartic profile":
# quartic profile has a transient at the beginning where
# the concentration "rearranges" giving flux of the opposite
# sign, so ignore first three times
np.testing.assert_array_less(0, self.N_s_n(t[3:], x_n, r_n[1:]))
np.testing.assert_array_less(self.N_s_p(t[3:], x_p, r_p[1:]), 0)
else:
np.testing.assert_array_less(
-1e-16, self.N_s_n(t[1:], x_n, r_n[1:])
)
np.testing.assert_array_less(self.N_s_p(t[1:], x_p, r_p[1:]), 1e-16)
if self.operating_condition == "charge":
np.testing.assert_array_less(self.N_s_n(t[1:], x_n, r_n[1:]), 1e-16)
np.testing.assert_array_less(-1e-16, self.N_s_p(t[1:], x_p, r_p[1:]))
if self.operating_condition == "off":
np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)
np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)
np.testing.assert_array_almost_equal(0, self.N_s_n(t, x_n, r_n[0]), decimal=4)
np.testing.assert_array_almost_equal(0, self.N_s_p(t, x_p, r_p[0]), decimal=4)
def test_all(self):
self.test_concentration_increase_decrease()
self.test_concentration_limits()
self.test_conservation()
self.test_concentration_profile()
self.test_fluxes()
class ElectrolyteConcentrationTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.c_e = solution["Electrolyte concentration"]
self.c_e_n = solution["Negative electrolyte concentration"]
self.c_e_s = solution["Separator electrolyte concentration"]
self.c_e_p = solution["Positive electrolyte concentration"]
self.c_e_av = solution["X-averaged electrolyte concentration"]
self.c_e_n_av = solution["X-averaged negative electrolyte concentration"]
self.c_e_s_av = solution["X-averaged separator electrolyte concentration"]
self.c_e_p_av = solution["X-averaged positive electrolyte concentration"]
self.c_e_tot = solution["Total lithium in electrolyte [mol]"]
self.N_e_hat = solution["Electrolyte flux"]
# self.N_e_hat = solution["Reduced cation flux"]
def test_concentration_limit(self):
"""Test that the electrolyte concentration is always greater than zero."""
np.testing.assert_array_less(-self.c_e(self.t, self.x), 0)
def test_conservation(self):
"""Test conservation of species in the electrolyte."""
# sufficient to check average concentration is constant
diff = (
self.c_e_tot(self.solution.t[1:]) - self.c_e_tot(self.solution.t[:-1])
) / self.c_e_tot(self.solution.t[:-1])
np.testing.assert_array_almost_equal(diff, 0)
def test_concentration_profile(self):
"""Test continuity of the concentration profile. Test average concentration is
as expected and that the concentration in the negative electrode is greater
than the average and the concentration in the positive is less than the average
during a discharge."""
# TODO: uncomment when have average concentrations
# small number so that can use array less
# epsilon = 0.001
# if self.operating_condition == "discharge":
# np.testing.assert_array_less(
# -self.c_e_n_av.entries, self.c_e_av.entries + epsilon
# )
# np.testing.assert_array_less(
# self.c_e_p_av.entries, self.c_e_av.entries + epsilon
# )
# elif self.operating_condition == "charge":
# np.testing.assert_array_less(
# -self.c_e_n_av.entries, self.c_e_av.entries + epsilon
# )
# np.testing.assert_array_less(
# self.c_e_p_av.entries, self.c_e_av.entries + epsilon
# )
# elif self.operating_condition == "off":
# np.testing.assert_array_equal(self.c_e_n_av.entries, self.c_e_av.entries)
# np.testing.assert_array_equal(self.c_e_s_av.entries, self.c_e_av.entries)
# np.testing.assert_array_equal(self.c_e_p_av.entries, self.c_e_av.entries)
def test_fluxes(self):
"""Test current collector fluxes are zero. Tolerance reduced for surface form
models (bug in implementation of boundary conditions?)"""
t, x = self.t, self.x_edge
np.testing.assert_array_almost_equal(self.N_e_hat(t, x[0]), 0, decimal=3)
np.testing.assert_array_almost_equal(self.N_e_hat(t, x[-1]), 0, decimal=3)
def test_splitting(self):
"""Test that when splitting the concentrations and fluxes by negative electrode,
separator, and positive electrode, we get the correct behaviour: continuous
solution and recover combined through concatenation."""
t, x_n, x_s, x_p, x = self.t, self.x_n, self.x_s, self.x_p, self.x
c_e_combined = np.concatenate(
(self.c_e_n(t, x_n), self.c_e_s(t, x_s), self.c_e_p(t, x_p)), axis=0
)
np.testing.assert_array_equal(self.c_e(t, x), c_e_combined)
def test_all(self):
self.test_concentration_limit()
self.test_conservation()
self.test_concentration_profile()
self.test_fluxes()
self.test_splitting()
class PotentialTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.phi_s_n = solution["Negative electrode potential [V]"]
self.phi_s_p = solution["Positive electrode potential [V]"]
self.phi_s_n_av = solution["X-averaged negative electrode potential [V]"]
self.phi_s_p_av = solution["X-averaged positive electrode potential [V]"]
self.phi_e = solution["Electrolyte potential [V]"]
self.phi_e_n = solution["Negative electrolyte potential [V]"]
self.phi_e_s = solution["Separator electrolyte potential [V]"]
self.phi_e_p = solution["Positive electrolyte potential [V]"]
self.phi_e_n_av = solution["X-averaged negative electrolyte potential [V]"]
self.phi_e_p_av = solution["X-averaged positive electrolyte potential [V]"]
self.delta_phi_n = solution[
"Negative electrode surface potential difference [V]"
]
self.delta_phi_p = solution[
"Positive electrode surface potential difference [V]"
]
self.delta_phi_n_av = solution[
"X-averaged negative electrode surface potential difference [V]"
]
self.delta_phi_p_av = solution[
"X-averaged positive electrode surface potential difference [V]"
]
self.grad_phi_e = solution["Gradient of electrolyte potential"]
self.grad_phi_e_n = solution["Gradient of negative electrolyte potential"]
self.grad_phi_e_s = solution["Gradient of separator electrolyte potential"]
self.grad_phi_e_p = solution["Gradient of positive electrolyte potential"]
def test_negative_electrode_potential_profile(self):
"""Test that negative electrode potential is zero on left boundary. Test
average negative electrode potential is less than or equal to zero."""
np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)
def test_positive_electrode_potential_profile(self):
"""Test average positive electrode potential is less than the positive electrode
potential on the right current collector."""
# TODO: add these when have averages
def test_potential_differences(self):
"""Test that potential differences are the difference between electrode
potential and electrolyte potential"""
t, x_n, x_p = self.t, self.x_n, self.x_p
np.testing.assert_array_almost_equal(
self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)
)
np.testing.assert_array_almost_equal(
self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),
self.delta_phi_p(t, x_p),
decimal=5,
)
def test_average_potential_differences(self):
"""Test that average potential differences are the difference between electrode
potential and electrolyte potential"""
t = self.t
np.testing.assert_array_almost_equal(
self.phi_s_n_av(t) - self.phi_e_n_av(t), self.delta_phi_n_av(t)
)
np.testing.assert_array_almost_equal(
self.phi_s_p_av(t) - self.phi_e_p_av(t), self.delta_phi_p_av(t)
)
def test_gradient_splitting(self):
t, x_n, x_s, x_p, x = self.t, self.x_n, self.x_s, self.x_p, self.x
grad_phi_e_combined = np.concatenate(
(
self.grad_phi_e_n(t, x_n),
self.grad_phi_e_s(t, x_s),
self.grad_phi_e_p(t, x_p),
),
axis=0,
)
np.testing.assert_array_equal(self.grad_phi_e(t, x), grad_phi_e_combined)
def test_all(self):
self.test_negative_electrode_potential_profile()
self.test_positive_electrode_potential_profile()
self.test_potential_differences()
self.test_average_potential_differences()
class CurrentTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.j = solution["Interfacial current density"]
self.j0 = solution["Exchange current density"]
self.j_n = solution["Negative electrode interfacial current density"]
self.j_p = solution["Positive electrode interfacial current density"]
self.j_n_av = solution[
"X-averaged negative electrode interfacial current density"
]
self.j_p_av = solution[
"X-averaged positive electrode interfacial current density"
]
self.j_n_sei = solution["SEI interfacial current density"]
self.j_n_sei_av = solution["X-averaged SEI interfacial current density"]
self.j0_n = solution["Negative electrode exchange current density"]
self.j0_p = solution["Positive electrode exchange current density"]
self.i_s_n = solution["Negative electrode current density"]
self.i_s_p = solution["Positive electrode current density"]
self.i_s = solution["Electrode current density"]
self.i_e = solution["Electrolyte current density"]
self.a_n = solution["Negative electrode surface area to volume ratio"]
self.a_p = solution["Positive electrode surface area to volume ratio"]
def test_interfacial_current_average(self):
"""Test that average of the surface area density distribution (in x)
multiplied by the interfacial current density is equal to the true
value."""
np.testing.assert_array_almost_equal(
np.mean(
self.a_n(self.t, self.x_n)
* (self.j_n(self.t, self.x_n) + self.j_n_sei(self.t, self.x_n)),
axis=0,
),
self.i_cell / self.l_n,
decimal=3,
)
np.testing.assert_array_almost_equal(
np.mean(
self.a_p(self.t, self.x_p) * self.j_p(self.t, self.x_p),
axis=0,
),
-self.i_cell / self.l_p,
decimal=4,
)
def test_conservation(self):
"""Test sum of electrode and electrolyte current densities give the applied
current density"""
t, x_n, x_s, x_p = self.t, self.x_n, self.x_s, self.x_p
current_param = self.model.param.current_with_time
i_cell = self.param.process_symbol(current_param).evaluate(t=t)
for x in [x_n, x_s, x_p]:
np.testing.assert_array_almost_equal(
self.i_s(t, x) + self.i_e(t, x), i_cell, decimal=2
)
np.testing.assert_array_almost_equal(
self.i_s(t, x_n), self.i_s_n(t, x_n), decimal=3
)
np.testing.assert_array_almost_equal(
self.i_s(t, x_p), self.i_s_p(t, x_p), decimal=3
)
def test_current_density_boundaries(self):
"""Test the boundary values of the current densities"""
t, x_n, x_p = self.t, self.x_n_edge, self.x_p_edge
current_param = self.model.param.current_with_time
i_cell = self.param.process_symbol(current_param).evaluate(t=t)
np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[0]), i_cell, decimal=2)
np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[-1]), 0, decimal=4)
np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[-1]), i_cell, decimal=3)
np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[0]), 0, decimal=4)
def test_all(self):
self.test_conservation()
self.test_current_density_boundaries()
# Skip average current test if capacitance is used, since average interfacial
# current density will be affected slightly by capacitance effects
if self.model.options["surface form"] != "differential":
self.test_interfacial_current_average()
class VelocityTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.v_box = solution["Volume-averaged velocity"]
self.i_e = solution["Electrolyte current density"]
self.dVbox_dz = solution["Transverse volume-averaged acceleration"]
def test_velocity_boundaries(self):
"""Test the boundary values of the current densities"""
L_x = self.x_edge[-1]
np.testing.assert_array_almost_equal(self.v_box(self.t, 0), 0, decimal=4)
np.testing.assert_array_almost_equal(self.v_box(self.t, L_x), 0, decimal=4)
def test_vertical_velocity(self):
"""Test the boundary values of the current densities"""
L_x = self.x_edge[-1]
np.testing.assert_array_equal(self.dVbox_dz(self.t, 0), 0)
np.testing.assert_array_less(self.dVbox_dz(self.t, 0.5 * L_x), 0)
np.testing.assert_array_equal(self.dVbox_dz(self.t, L_x), 0)
def test_velocity_vs_current(self):
"""Test the boundary values of the current densities"""
t, x_n, x_p = self.t, self.x_n, self.x_p
beta_n = self.model.param.beta_n
beta_n = self.param.evaluate(beta_n)
beta_p = self.model.param.beta_p
beta_p = self.param.evaluate(beta_p)
np.testing.assert_array_almost_equal(
self.v_box(t, x_n), beta_n * self.i_e(t, x_n)
)
np.testing.assert_array_almost_equal(
self.v_box(t, x_p), beta_p * self.i_e(t, x_p)
)
def test_all(self):
self.test_velocity_boundaries()
self.test_vertical_velocity()
self.test_velocity_vs_current()
class DegradationTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.LAM_ne = solution["Loss of active material in negative electrode [%]"]
self.LAM_pe = solution["Loss of active material in positive electrode [%]"]
self.LLI = solution["Loss of lithium inventory [%]"]
self.n_Li_lost = solution["Total lithium lost [mol]"]
self.n_Li_lost_rxn = solution["Total lithium lost to side reactions [mol]"]
def test_degradation_modes(self):
"""Test degradation modes are between 0 and 100%"""
np.testing.assert_array_less(-3e-3, self.LLI(self.t))
np.testing.assert_array_less(-1e-13, self.LAM_ne(self.t))
np.testing.assert_array_less(-1e-13, self.LAM_pe(self.t))
np.testing.assert_array_less(self.LLI(self.t), 100)
np.testing.assert_array_less(self.LAM_ne(self.t), 100)
np.testing.assert_array_less(self.LAM_pe(self.t), 100)
def test_lithium_lost(self):
"""Test the two ways of measuring lithium lost give the same value"""
np.testing.assert_array_almost_equal(
self.n_Li_lost(self.t), self.n_Li_lost_rxn(self.t), decimal=3
)
def test_all(self):
self.test_degradation_modes()
self.test_lithium_lost()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.