code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import json
from tkinter import EXCEPTION
from xmlrpc.client import boolean
from .constants import *
from .logging import Log
from .exceptions import *
from .utils import *
import os
from .login import *
from time import sleep
from datetime import datetime, date,timedelta
import logging
from playwright.async_api import async_playwright
class DouyinUpload:
def __init__(
self,
root_profile_directory: str,
proxy_option: str = "",
timeout: int = 3,
watcheveryuploadstep: bool = True,
debug: bool = True,
username:str ="",
password:str ="",
CHANNEL_COOKIES: str = "",
login_method:str="phone",
ytb_cookies:str="",
tiktok_cookies:str="",
recordvideo:bool=False
) -> None:
self.timeout = timeout
self.log = Log(debug)
self.username=username
self.password=password
self.CHANNEL_COOKIES = CHANNEL_COOKIES
self.root_profile_directory=root_profile_directory
self.proxy_option=proxy_option
self.watcheveryuploadstep=watcheveryuploadstep
self.ytb_cookies=ytb_cookies
self.tiktok_cookies=tiktok_cookies
self._playwright=''
self.browser=''
self.login_method='qrcode'
self.context=''
self.page=''
self.recordvideo=recordvideo
# self.setup()
async def upload(
self,
videopath: str="",
title: str = "",
description: str = "",
thumbnail: str = "",
publishpolicy: str = 0,
# mode a:release_offset exist,publish_data exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
release_offset: str = '0-1',
publish_date: datetime = datetime(
date.today().year, date.today().month, date.today().day, 10, 15),
tags: list = [],
location:str='',
miniprogram:str='',
hottopic:str='',
heji:str='',
up2toutiao:bool=False,
allow2save:bool=True,
allow2see:str='公开',
closewhen100percentupload:bool =True
) -> Tuple[bool, Optional[str]]:
"""Uploads a video to douyin.
Returns if the video was uploaded and the video id.
"""
self._playwright = await self._start_playwright()
# browser = p.chromium.launch()
# proxy_option = "socks5://127.0.0.1:1080"
headless=True
if self.watcheveryuploadstep:
headless=False
print('whether run in view mode',headless)
if self.proxy_option == "":
print('start web page without proxy')
browserLaunchOptionDict = {
"headless": headless,
# "executable_path": executable_path,
"timeout": 30000
}
if not self.root_profile_directory:
self.browser = await self._start_browser("firefox", **browserLaunchOptionDict)
if self.recordvideo:
self.context = await self.browser.new_context(record_video_dir=os.getcwd()+os.sep+"screen-recording")
else:
self.context = await self.browser.new_context()
else:
self.context = await self._start_persistent_browser(
"firefox", user_data_dir=self.root_profile_directory, **browserLaunchOptionDict
)
else:
print('start web page with proxy')
browserLaunchOptionDict = {
"headless": headless,
"proxy": {
"server": self.proxy_option,
},
# timeout <float> Maximum time in milliseconds to wait for the browser instance to start. Defaults to 30000 (30 seconds). Pass 0 to disable timeout.#
"timeout": 30000
}
if not self.root_profile_directory:
self.browser = await self._start_browser("firefox", **browserLaunchOptionDict)
if self.recordvideo:
self.context = await self.browser.new_context(record_video_dir=os.getcwd()+os.sep+"screen-recording")
else:
self.context = await self.browser.new_context()
else:
self.context = await self._start_persistent_browser(
"firefox", user_data_dir=self.root_profile_directory, **browserLaunchOptionDict
)
self.log.debug("Firefox is now running")
await self.context.grant_permissions(['geolocation'])
page = await self.context.new_page()
print('============tags',tags)
if not videopath:
raise FileNotFoundError(f'Could not find file with path: "{videopath}"')
if self.CHANNEL_COOKIES and not self.CHANNEL_COOKIES == '':
print('cookies existing', self.CHANNEL_COOKIES)
await self.context.clear_cookies()
cookies=await format_cookie_file(self.CHANNEL_COOKIES)
await self.context.add_cookies(
cookies
)
await page.goto(DOUYIN_URL,timeout=300000)
await page.reload()
else:
self.log.debug('Please sign in and then press enter')
# input()
await page.goto(DOUYIN_URL,timeout=300000)
# Interact with login form
await self.context.clear_cookies()
await page.locator('.login').click()
await page.locator('.semi-button-content').click()
if self.login_method=='phone-verify':
await page.locator('div.semi-tabs-tab:nth-child(2)').click()
await page.locator('.toggle').click()
time.sleep(30)
elif self.login_method=='password':
print('not recommend')
time.sleep(10)
await page.fill('.semi-input-wrapper__with-prefix', self.username)
await page.fill('div.semi-form-field:nth-child(2)>div>div>input', self.password)
await page.locaotr('.agreement >img').click()
elif self.login_method=='qrcode':
print('pls open douyin to scan this qrcode')
time.sleep(30)
# page.click('text=Submit')
sleep(USER_WAITING_TIME)
storage = await self.context.storage_state(path=self.CHANNEL_COOKIES)
await self.context.grant_permissions(['geolocation'])
try:
page.locator('.semi-modal-content')
print('there is hint for 开始体验')
await page.locator('button.semi-button:nth-child(3)').click()
await page.locator('.popoverFooter--2G_g0 > button:nth-child(1) > span:nth-child(1)').click()
await page.locator('.popoverFooter--2G_g0 > button:nth-child(1) > span:nth-child(1)').click()
except:
print('this is not the first time to login in')
islogin = confirm_logged_in_douyin(page)
print('checking login status', islogin)
if not islogin:
print('try to load cookie files')
await self.context.clear_cookies()
cookies=await format_cookie_file(self.CHANNEL_COOKIES)
await self.context.add_cookies(
cookies
)
print('success load cookie files')
await page.goto(DOUYIN_URL,timeout=300000)
print('start to check login status')
islogin = confirm_logged_in_douyin(page)
# https://github.com/xtekky/google-login-bypass/blob/main/login.py
self.log.debug("Found douyin upload Dialog Modal")
await page.goto(DOUYIN_UPLOAD_URL,timeout=300000)
# sleep(self.timeout)
self.log.debug(f'Trying to upload "{videopath}" to douyin...')
if os.path.exists(get_path(videopath)):
page.locator(
DOUYIN_INPUT_FILE_VIDEO)
await page.set_input_files(DOUYIN_INPUT_FILE_VIDEO, get_path(videopath))
else:
if os.path.exists(videopath.encode('utf-8')):
print('file found', videopath)
page.locator(
DOUYIN_INPUT_FILE_VIDEO)
await page.set_input_files(DOUYIN_INPUT_FILE_VIDEO, videopath.encode('utf-8'))
sleep(self.timeout)
# accountcheck=await textbox.is_editable()
# if not accountcheck:
# try:
# while True:
# check = page.locator('//*[@id="dialog-title"]')
# self.log.debug(f'found to douyin account check')
# x_path = '//*[@id="textbox"]'
# if page.locator(x_path):
# self.log.debug(f'fix douyin account check')
# break
# except:
# sleep(1)
self.log.debug(f'Trying to set "{title}" as title...')
# get file name (default) title
# title=title if title else page.locator(TEXTBOX).text_content()
# print(title)
sleep(self.timeout)
if len(title) > DOUYIN_TITLE_COUNTER:
print(f"Title was not set due to exceeding the maximum allowed characters ({len(title)}/{TITLE_COUNTER})")
title=title[:DOUYIN_TITLE_COUNTER-1]
# TITLE
print('click title field to input')
titlecontainer= page.locator(DOUYIN_TEXTBOX)
await titlecontainer.click()
print('clear existing title')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
print('filling new title')
await page.keyboard.type(title)
if thumbnail:
self.log.debug(f'Trying to set "{thumbnail}" as thumbnail...')
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_EDIT).click()
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_OPTION_UPLOAD).click()
if os.path.exists(get_path(thumbnail)):
print('thumb file name without utf8')
await page.locator(
DOUYIN_INPUT_FILE_THUMBNAIL).set_input_files(get_path(thumbnail))
time.sleep(USER_WAITING_TIME)
# page.locator()
# 如果缩略图尺寸不是9:16 会弹出裁剪框
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_TRIM_CONFIRM).click()
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_CONFIRM).click()
else:
if os.path.exists(thumbnail.encode('utf-8')):
print('thumbnail found', thumbnail)
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL).set_input_files(
thumbnail.encode('utf-8'))
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_TRIM_CONFIRM).click()
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_CONFIRM).click()
time.sleep(USER_WAITING_TIME)
sleep(self.timeout)
self.log.debug('Trying to set {location} to ')
if location is None or location =="" or len(location)==0:
pass
else:
print('location you give',location)
sleep(self.timeout)
await page.locator(DOUYIN_LOCATION).click()
print('clear existing location')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
await page.keyboard.type(location)
await page.keyboard.press("Enter")
try:
locationresultscount=await page.locator('.semi-select-option-list>div.semi-select-option').count()
if locationresultscount>0:
await page.locator('div.semi-select-option:nth-child(1)').click()
except:
print('no hint for location ',location)
self.log.debug(f'Trying to set "{location}" as location...')
self.log.debug('Trying to set {miniprogram} to ')
if miniprogram is None or miniprogram =="" or len(miniprogram)==0:
pass
else:
print('miniprogram you give',miniprogram)
sleep(self.timeout)
await page.locator(DOUYIN_MINI_SELECT).click()
await page.locator(DOUYIN_MINI_SELECT_OPTION).click()
await page.locator(DOUYIN_MINI).click()
print('clear existing location')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
await page.keyboard.type(miniprogram)
await page.keyboard.press("Enter")
self.log.debug(f'Trying to set "{miniprogram}" as mini...')
self.log.debug('Trying to set {hottopic} to ')
if hottopic is None or hottopic =="" or len(hottopic)==0:
pass
else:
print('hottopic you give',hottopic)
sleep(self.timeout)
await page.locator(DOUYIN_HOT_TOPIC).click()
print('clear existing hottopic')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
await page.keyboard.type(hottopic)
# 输入准确的热点词 可以保存
#输入的如果是提示词, 需从下拉列表中选择第一项
try:
hottopiccount=await page.locator('.semi-select-option-list > div.semi-select-option').count()
if hottopiccount>0:
await page.locator('.semi-select-option-list div.semi-select-option:nth-child(1)').click()
except:
pass
self.log.debug(f'Trying to set "{hottopic}" as hottopic...')
self.log.debug(f'Trying to set "{hottopic}" as mini...')
self.log.debug('Trying to set {heji} to ')
if heji is None or heji =="" or len(heji)==0:
pass
else:
print('heji you give',heji)
sleep(self.timeout)
print('click to 选择合集')
await page.locator(DOUYIN_HEJI_SELECT_OPTION).click()
print('罗列已有合集')
try:
hejicount=await page.locator('div.mix-dropdown>div.semi-select-option-list').count()
print('we found he ji count ',hejicount)
if hejicount==0:
print('pleas manual create 合集 first',heji)
else:
index=0
for i in hejicount:
text=await page.locator('.semi-select-option-list > div.semi-select-option').nth(i).text_content()
text=text.strip()
if text==heji:
index=i
if index==0:
print('we cannot detect this heji,pleas create 合集 first',heji)
else:
await page.locator('.semi-select-option-list div.semi-select-option:nth-child({index})').click()
except:
print('暂无合集',heji)
self.log.debug(f'Trying to set "{heji}" as heji...')
self.log.debug('Trying to set {up2toutiao} to ')
if up2toutiao is None or up2toutiao ==False:
pass
else:
await page.locator('.semi-switch-native-control').click()
self.log.debug('Trying to set {allow2save} to ')
if allow2save is None or allow2save ==True:
pass
else:
await page.locator('.form--3R0Ka > div:nth-child(14) > div:nth-child(1) > label:nth-child(2)').click()
self.log.debug('Trying to set {allow2see} to ')
if heji:
print('添加进合集或专辑的视频,无法设置好友可见或仅自己可见')
else:
if not allow2see in ['公开','好友可见','仅自己可见']:
allow2see='公开'
if allow2see is None or allow2see =='公开':
pass
elif allow2see=='好友可见':
await page.locator('.publish-settings--3rCGw > div:nth-child(3) > label:nth-child(2)').click()
elif allow2see=='仅自己可见':
await page.locator('.publish-settings--3rCGw > div:nth-child(3) > label:nth-child(3)').click()
else:
print('请重新设置有效的值 只能是 公开、好友可见、仅自己可见',allow2see)
if not publishpolicy in ['立即发布','定时发布']:
publishpolicy='立即发布'
if publishpolicy == '立即发布':
self.log.debug("Trying to set video visibility to 立即发布...")
else:
self.log.debug("Trying to set video visibility to 定时发布...")
# mode a:release_offset exist,publish_data exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
self.log.debug(
"Trying to set video schedule time...{publish_date}")
if release_offset and not release_offset == "":
if not int(release_offset.split('-')[0]) == 0:
offset = timedelta(months=int(release_offset.split(
'-')[0]), days=int(release_offset.split('-')[-1]))
else:
offset = timedelta(days=1)
if publish_date is None:
publish_date =datetime(
date.today().year, date.today().month, date.today().day, 10, 15)
else:
publish_date += offset
else:
if publish_date is None:
publish_date =datetime(
date.today().year, date.today().month, date.today().day, 10, 15)
offset = timedelta(days=1)
else:
publish_date = publish_date
# dailycount=4
# release_offset=str(int(start_index/30))+'-'+str(int(start_index)/int(setting['dailycount']))
await setscheduletime_douyin(page,publish_date)
# set_time_cssSelector(page,publish_date)
retry_btn = r'//div[@class="word-card--1neCx"]/*[@class="text--GjPv4" and contains(text(),"重新上传")]'
try:
print('video is 100 uploading')
page.locator(retry_btn)
print('click publish button')
await page.locator('//button[text()="发布"]').click()
video_id=''
print(page.url)
if 'https://creator.douyin.com/creator-micro/content/manage' in page.url :
print('提交成功:' + videopath)
else:
await page.screenshot(full_page=True)
print('稿件提交失败,截图记录')
sleep(5)
logging.info("Upload is complete")
await self.close()
return True, video_id
except:
print('still uploading')
return True
async def get_video_id(self, page) -> Optional[str]:
video_id=None
try:
video_url_container=page.locator(
VIDEO_URL_CONTAINER)
video_url_element=video_url_container.locator(
VIDEO_URL_ELEMENT
)
video_id=await video_url_element.get_attribute(HREF)
video_id=video_id.split("/")[-1]
# if 'https://creator.douyin.com/creator-micro/content/manage' in self.driver.current_url :
# print('提交成功:' + videopath)
# print('Remove ' + videopath)
# os.remove(videopath)
# else:
# self.driver.save_screenshot('err.png')
# print('稿件提交失败,截图记录')
except:
raise VideoIDError("Could not get video ID")
return video_id
# @staticmethod
async def _start_playwright(self):
# sync_playwright().start()
return await async_playwright().start()
async def _start_browser(self, browsertype: str, **kwargs):
if browsertype == "chromium":
return await self._playwright.chromium.launch(**kwargs)
if browsertype == "firefox":
# return await self._playwright.firefox.launch(**kwargs)
if self.recordvideo:
return await self._playwright.firefox.launch(record_video_dir=os.path.abspath('')+os.sep+"screen-recording", **kwargs)
else:
return await self._playwright.firefox.launch( **kwargs)
if browsertype == "webkit":
return await self._playwright.webkit.launch(**kwargs)
raise RuntimeError(
"You have to select either 'chromium', 'firefox', or 'webkit' as browser."
)
async def _start_persistent_browser(
self, browser: str, user_data_dir: Optional[Union[str, Path]], **kwargs
):
if browser == "chromium":
return await self._playwright.chromium.launch_persistent_context(
user_data_dir, **kwargs
)
if browser == "firefox":
self.browser=await self._playwright.firefox.launch(**kwargs)
if self.recordvideo:
return await self._playwright.firefox.launch_persistent_context(user_data_dir,record_video_dir=os.path.abspath('')+os.sep+"screen-recording", **kwargs)
else:
return await self._playwright.firefox.launch_persistent_context(user_data_dir, **kwargs)
if browser == "webkit":
return await self._playwright.webkit.launch_persistent_context(
user_data_dir, **kwargs
)
raise RuntimeError(
"You have to select either 'chromium', 'firefox' or 'webkit' as browser."
)
async def close(self):
await self.browser.close()
await self._playwright.stop() | ytb-up | /ytb_up-0.1.15-py3-none-any.whl/ytb_up/tiktok.py | tiktok.py |
import json
from tkinter import EXCEPTION
from xmlrpc.client import boolean
from .constants import *
from .logging import Log
from .exceptions import *
from .utils import *
import os
from .login import *
from time import sleep
from datetime import datetime, date,timedelta
import logging
from playwright.async_api import async_playwright
class DouyinUpload:
def __init__(
self,
root_profile_directory: str,
proxy_option: str = "",
timeout: int = 3,
watcheveryuploadstep: bool = True,
debug: bool = True,
username:str ="",
password:str ="",
CHANNEL_COOKIES: str = "",
login_method:str="phone",
ytb_cookies:str="",
tiktok_cookies:str="",
recordvideo:bool=False
) -> None:
self.timeout = timeout
self.log = Log(debug)
self.username=username
self.password=password
self.CHANNEL_COOKIES = CHANNEL_COOKIES
self.root_profile_directory=root_profile_directory
self.proxy_option=proxy_option
self.watcheveryuploadstep=watcheveryuploadstep
self.ytb_cookies=ytb_cookies
self.tiktok_cookies=tiktok_cookies
self._playwright=''
self.browser=''
self.login_method='qrcode'
self.context=''
self.page=''
self.recordvideo=recordvideo
# self.setup()
async def upload(
self,
videopath: str="",
title: str = "",
description: str = "",
thumbnail: str = "",
publishpolicy: str = 0,
# mode a:release_offset exist,publish_data exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
release_offset: str = '0-1',
publish_date: datetime = datetime(
date.today().year, date.today().month, date.today().day, 10, 15),
tags: list = [],
location:str='',
miniprogram:str='',
hottopic:str='',
heji:str='',
up2toutiao:bool=False,
allow2save:bool=True,
allow2see:str='公开',
closewhen100percentupload:bool =True
) -> Tuple[bool, Optional[str]]:
"""Uploads a video to douyin.
Returns if the video was uploaded and the video id.
"""
self._playwright = await self._start_playwright()
# browser = p.chromium.launch()
# proxy_option = "socks5://127.0.0.1:1080"
headless=True
if self.watcheveryuploadstep:
headless=False
print('whether run in view mode',headless)
if self.proxy_option == "":
print('start web page without proxy')
browserLaunchOptionDict = {
"headless": headless,
# "executable_path": executable_path,
"timeout": 30000
}
if not self.root_profile_directory:
self.browser = await self._start_browser("firefox", **browserLaunchOptionDict)
if self.recordvideo:
self.context = await self.browser.new_context(record_video_dir=os.getcwd()+os.sep+"screen-recording")
else:
self.context = await self.browser.new_context()
else:
self.context = await self._start_persistent_browser(
"firefox", user_data_dir=self.root_profile_directory, **browserLaunchOptionDict
)
else:
print('start web page with proxy')
browserLaunchOptionDict = {
"headless": headless,
"proxy": {
"server": self.proxy_option,
},
# timeout <float> Maximum time in milliseconds to wait for the browser instance to start. Defaults to 30000 (30 seconds). Pass 0 to disable timeout.#
"timeout": 30000
}
if not self.root_profile_directory:
self.browser = await self._start_browser("firefox", **browserLaunchOptionDict)
if self.recordvideo:
self.context = await self.browser.new_context(record_video_dir=os.getcwd()+os.sep+"screen-recording")
else:
self.context = await self.browser.new_context()
else:
self.context = await self._start_persistent_browser(
"firefox", user_data_dir=self.root_profile_directory, **browserLaunchOptionDict
)
self.log.debug("Firefox is now running")
await self.context.grant_permissions(['geolocation'])
page = await self.context.new_page()
print('============tags',tags)
if not videopath:
raise FileNotFoundError(f'Could not find file with path: "{videopath}"')
if self.CHANNEL_COOKIES and not self.CHANNEL_COOKIES == '':
print('cookies existing', self.CHANNEL_COOKIES)
await self.context.clear_cookies()
cookies=await format_cookie_file(self.CHANNEL_COOKIES)
await self.context.add_cookies(
cookies
)
await page.goto(DOUYIN_URL,timeout=300000)
await page.reload()
else:
self.log.debug('Please sign in and then press enter')
# input()
await page.goto(DOUYIN_URL,timeout=300000)
# Interact with login form
await self.context.clear_cookies()
await page.locator('.login').click()
await page.locator('.semi-button-content').click()
if self.login_method=='phone-verify':
await page.locator('div.semi-tabs-tab:nth-child(2)').click()
await page.locator('.toggle').click()
time.sleep(30)
elif self.login_method=='password':
print('not recommend')
time.sleep(10)
await page.fill('.semi-input-wrapper__with-prefix', self.username)
await page.fill('div.semi-form-field:nth-child(2)>div>div>input', self.password)
await page.locator('.agreement >img').click()
elif self.login_method=='qrcode':
print('pls open douyin to scan this qrcode')
time.sleep(30)
# page.click('text=Submit')
sleep(USER_WAITING_TIME)
storage = await self.context.storage_state(path=self.CHANNEL_COOKIES)
await self.context.grant_permissions(['geolocation'])
try:
page.locator('.semi-modal-content')
print('there is hint for 开始体验')
await page.locator('button.semi-button:nth-child(3)').click()
await page.locator('.popoverFooter--2G_g0 > button:nth-child(1) > span:nth-child(1)').click()
await page.locator('.popoverFooter--2G_g0 > button:nth-child(1) > span:nth-child(1)').click()
except:
print('this is not the first time to login in')
islogin = confirm_logged_in_douyin(page)
print('checking login status', islogin)
if not islogin:
print('try to load cookie files')
await self.context.clear_cookies()
cookies=await format_cookie_file(self.CHANNEL_COOKIES)
await self.context.add_cookies(
cookies
)
print('success load cookie files')
await page.goto(DOUYIN_URL,timeout=300000)
print('start to check login status')
islogin = confirm_logged_in_douyin(page)
# https://github.com/xtekky/google-login-bypass/blob/main/login.py
self.log.debug("Found douyin upload Dialog Modal")
await page.goto(DOUYIN_UPLOAD_URL,timeout=300000)
# sleep(self.timeout)
self.log.debug(f'Trying to upload "{videopath}" to douyin...')
if os.path.exists(get_path(videopath)):
page.locator(
DOUYIN_INPUT_FILE_VIDEO)
await page.set_input_files(DOUYIN_INPUT_FILE_VIDEO, get_path(videopath))
else:
if os.path.exists(videopath.encode('utf-8')):
print('file found', videopath)
page.locator(
DOUYIN_INPUT_FILE_VIDEO)
await page.set_input_files(DOUYIN_INPUT_FILE_VIDEO, videopath.encode('utf-8'))
sleep(self.timeout)
# accountcheck=await textbox.is_editable()
# if not accountcheck:
# try:
# while True:
# check = page.locator('//*[@id="dialog-title"]')
# self.log.debug(f'found to douyin account check')
# x_path = '//*[@id="textbox"]'
# if page.locator(x_path):
# self.log.debug(f'fix douyin account check')
# break
# except:
# sleep(1)
self.log.debug(f'Trying to set "{title}" as title...')
# get file name (default) title
# title=title if title else page.locator(TEXTBOX).text_content()
# print(title)
sleep(self.timeout)
if len(title) > DOUYIN_TITLE_COUNTER:
print(f"Title was not set due to exceeding the maximum allowed characters ({len(title)}/{TITLE_COUNTER})")
title=title[:DOUYIN_TITLE_COUNTER-1]
# TITLE
print('click title field to input')
titlecontainer= page.locator(DOUYIN_TEXTBOX)
await titlecontainer.click()
print('clear existing title')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
print('filling new title')
await page.keyboard.type(title)
if thumbnail:
self.log.debug(f'Trying to set "{thumbnail}" as thumbnail...')
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_EDIT).click()
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_OPTION_UPLOAD).click()
if os.path.exists(get_path(thumbnail)):
print('thumb file name without utf8')
await page.locator(
DOUYIN_INPUT_FILE_THUMBNAIL).set_input_files(get_path(thumbnail))
time.sleep(USER_WAITING_TIME)
# page.locator()
# 如果缩略图尺寸不是9:16 会弹出裁剪框
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_TRIM_CONFIRM).click()
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_CONFIRM).click()
else:
if os.path.exists(thumbnail.encode('utf-8')):
print('thumbnail found', thumbnail)
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL).set_input_files(
thumbnail.encode('utf-8'))
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_TRIM_CONFIRM).click()
await page.locator(DOUYIN_INPUT_FILE_THUMBNAIL_UPLOAD_CONFIRM).click()
time.sleep(USER_WAITING_TIME)
sleep(self.timeout)
self.log.debug('Trying to set {location} to ')
if location is None or location =="" or len(location)==0:
pass
else:
print('location you give',location)
sleep(self.timeout)
await page.locator(DOUYIN_LOCATION).click()
print('clear existing location')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
await page.keyboard.type(location)
await page.keyboard.press("Enter")
try:
locationresultscount=await page.locator('.semi-select-option-list>div.semi-select-option').count()
if locationresultscount>0:
await page.locator('div.semi-select-option:nth-child(1)').click()
except:
print('no hint for location ',location)
self.log.debug(f'Trying to set "{location}" as location...')
self.log.debug('Trying to set {miniprogram} to ')
if miniprogram is None or miniprogram =="" or len(miniprogram)==0:
pass
else:
print('miniprogram you give',miniprogram)
sleep(self.timeout)
await page.locator(DOUYIN_MINI_SELECT).click()
await page.locator(DOUYIN_MINI_SELECT_OPTION).click()
await page.locator(DOUYIN_MINI).click()
print('clear existing location')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
await page.keyboard.type(miniprogram)
await page.keyboard.press("Enter")
self.log.debug(f'Trying to set "{miniprogram}" as mini...')
self.log.debug('Trying to set {hottopic} to ')
if hottopic is None or hottopic =="" or len(hottopic)==0:
pass
else:
print('hottopic you give',hottopic)
sleep(self.timeout)
await page.locator(DOUYIN_HOT_TOPIC).click()
print('clear existing hottopic')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
await page.keyboard.type(hottopic)
# 输入准确的热点词 可以保存
#输入的如果是提示词, 需从下拉列表中选择第一项
try:
hottopiccount=await page.locator('.semi-select-option-list > div.semi-select-option').count()
if hottopiccount>0:
await page.locator('.semi-select-option-list div.semi-select-option:nth-child(1)').click()
except:
pass
self.log.debug(f'Trying to set "{hottopic}" as hottopic...')
self.log.debug(f'Trying to set "{hottopic}" as mini...')
self.log.debug('Trying to set {heji} to ')
if heji is None or heji =="" or len(heji)==0:
pass
else:
print('heji you give',heji)
sleep(self.timeout)
print('click to 选择合集')
await page.locator(DOUYIN_HEJI_SELECT_OPTION).click()
print('罗列已有合集')
try:
hejicount=await page.locator('div.mix-dropdown>div.semi-select-option-list').count()
print('we found he ji count ',hejicount)
if hejicount==0:
print('pleas manual create 合集 first',heji)
else:
index=0
for i in hejicount:
text=await page.locator('.semi-select-option-list > div.semi-select-option').nth(i).text_content()
text=text.strip()
if text==heji:
index=i
if index==0:
print('we cannot detect this heji,pleas create 合集 first',heji)
else:
await page.locator('.semi-select-option-list div.semi-select-option:nth-child({index})').click()
except:
print('暂无合集',heji)
self.log.debug(f'Trying to set "{heji}" as heji...')
self.log.debug('Trying to set {up2toutiao} to ')
if up2toutiao is None or up2toutiao ==False:
pass
else:
await page.locator('.semi-switch-native-control').click()
self.log.debug('Trying to set {allow2save} to ')
if allow2save is None or allow2save ==True:
pass
else:
await page.locator('.form--3R0Ka > div:nth-child(14) > div:nth-child(1) > label:nth-child(2)').click()
self.log.debug('Trying to set {allow2see} to ')
if heji:
print('添加进合集或专辑的视频,无法设置好友可见或仅自己可见')
else:
if not allow2see in ['公开','好友可见','仅自己可见']:
allow2see='公开'
if allow2see is None or allow2see =='公开':
pass
elif allow2see=='好友可见':
await page.locator('.publish-settings--3rCGw > div:nth-child(3) > label:nth-child(2)').click()
elif allow2see=='仅自己可见':
await page.locator('.publish-settings--3rCGw > div:nth-child(3) > label:nth-child(3)').click()
else:
print('请重新设置有效的值 只能是 公开、好友可见、仅自己可见',allow2see)
if not publishpolicy in ['立即发布','定时发布']:
publishpolicy='立即发布'
if publishpolicy == '立即发布':
self.log.debug("Trying to set video visibility to 立即发布...")
else:
self.log.debug("Trying to set video visibility to 定时发布...")
# mode a:release_offset exist,publish_data exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
self.log.debug(
"Trying to set video schedule time...{publish_date}")
if release_offset and not release_offset == "":
if not int(release_offset.split('-')[0]) == 0:
offset = timedelta(months=int(release_offset.split(
'-')[0]), days=int(release_offset.split('-')[-1]))
else:
offset = timedelta(days=1)
if publish_date is None:
publish_date =datetime(
date.today().year, date.today().month, date.today().day, 10, 15)
else:
publish_date += offset
else:
if publish_date is None:
publish_date =datetime(
date.today().year, date.today().month, date.today().day, 10, 15)
offset = timedelta(days=1)
else:
publish_date = publish_date
# dailycount=4
# release_offset=str(int(start_index/30))+'-'+str(int(start_index)/int(setting['dailycount']))
await setscheduletime_douyin(page,publish_date)
# set_time_cssSelector(page,publish_date)
retry_btn = r'//div[@class="word-card--1neCx"]/*[@class="text--GjPv4" and contains(text(),"重新上传")]'
try:
print('video is 100 uploading')
page.locator(retry_btn)
print('click publish button')
await page.locator('//button[text()="发布"]').click()
video_id=''
print(page.url)
if 'https://creator.douyin.com/creator-micro/content/manage' in page.url :
print('提交成功:' + videopath)
else:
await page.screenshot(full_page=True)
print('稿件提交失败,截图记录')
sleep(5)
logging.info("Upload is complete")
await self.close()
return True, video_id
except:
print('still uploading')
return True
async def get_video_id(self, page) -> Optional[str]:
video_id=None
try:
video_url_container=page.locator(
VIDEO_URL_CONTAINER)
video_url_element=video_url_container.locator(
VIDEO_URL_ELEMENT
)
video_id=await video_url_element.get_attribute(HREF)
video_id=video_id.split("/")[-1]
# if 'https://creator.douyin.com/creator-micro/content/manage' in self.driver.current_url :
# print('提交成功:' + videopath)
# print('Remove ' + videopath)
# os.remove(videopath)
# else:
# self.driver.save_screenshot('err.png')
# print('稿件提交失败,截图记录')
except:
raise VideoIDError("Could not get video ID")
return video_id
# @staticmethod
async def _start_playwright(self):
# sync_playwright().start()
return await async_playwright().start()
async def _start_browser(self, browsertype: str, **kwargs):
if browsertype == "chromium":
return await self._playwright.chromium.launch(**kwargs)
if browsertype == "firefox":
# return await self._playwright.firefox.launch(**kwargs)
if self.recordvideo:
return await self._playwright.firefox.launch(record_video_dir=os.path.abspath('')+os.sep+"screen-recording", **kwargs)
else:
return await self._playwright.firefox.launch( **kwargs)
if browsertype == "webkit":
return await self._playwright.webkit.launch(**kwargs)
raise RuntimeError(
"You have to select either 'chromium', 'firefox', or 'webkit' as browser."
)
async def _start_persistent_browser(
self, browser: str, user_data_dir: Optional[Union[str, Path]], **kwargs
):
if browser == "chromium":
return await self._playwright.chromium.launch_persistent_context(
user_data_dir, **kwargs
)
if browser == "firefox":
self.browser=await self._playwright.firefox.launch(**kwargs)
if self.recordvideo:
return await self._playwright.firefox.launch_persistent_context(user_data_dir,record_video_dir=os.path.abspath('')+os.sep+"screen-recording", **kwargs)
else:
return await self._playwright.firefox.launch_persistent_context(user_data_dir, **kwargs)
if browser == "webkit":
return await self._playwright.webkit.launch_persistent_context(
user_data_dir, **kwargs
)
raise RuntimeError(
"You have to select either 'chromium', 'firefox' or 'webkit' as browser."
)
async def close(self):
await self.browser.close()
await self._playwright.stop() | ytb-up | /ytb_up-0.1.15-py3-none-any.whl/ytb_up/douyin.py | douyin.py |
import time
import re
import logging
from time import sleep
from datetime import datetime
from playwright.async_api import *
from .constants import *
from pathlib import Path
import os
from typing import Tuple, Optional,Union
def get_path(file_path: str) -> str:
# no clue why, but this character gets added for me when running
# return str(os.path(file_path)).replace("\u202a", "")
# return file_path.replace("\u202a", "")
return str(Path(file_path)).replace("\u202a", "")
def close_browser(self):
self.browser.close()
self._playwright.stop()
async def set_channel_language_english(page):
# why does not work again
try:
print('Click your profile icon .')
page.locator(
"yt-img-shadow.ytd-topbar-menu-button-renderer > img:nth-child(1)")
await page.click(
"yt-img-shadow.ytd-topbar-menu-button-renderer > img:nth-child(1)")
print(' Click Language or Location icon')
page.locator("yt-multi-page-menu-section-renderer.style-scope:nth-child(2) > div:nth-child(2) > ytd-compact-link-renderer:nth-child(2) > a:nth-child(1) > tp-yt-paper-item:nth-child(1) > div:nth-child(2) > yt-formatted-string:nth-child(2)")
await page.click("yt-multi-page-menu-section-renderer.style-scope:nth-child(2) > div:nth-child(2) > ytd-compact-link-renderer:nth-child(2) > a:nth-child(1) > tp-yt-paper-item:nth-child(1) > div:nth-child(2) > yt-formatted-string:nth-child(2)")
selector_en_path = "ytd-compact-link-renderer.style-scope:nth-child(13) > a:nth-child(1) > tp-yt-paper-item:nth-child(1) > div:nth-child(2) > yt-formatted-string:nth-child(1)"
print('choose the language or location you like to use.')
selector_en=page.locator(selector_en_path)
await selector_en.click()
# page.click(selector_en)
# print(page.text_content('//*[@id="label"]'))
# if page.text_content(selector_en)=="English (US)":
# return True
# else:
# return False
return True
except TimeoutError:
return False
# fix google account verify
async def verify(self, page):
try:
while True:
await page.locator('#confirm-button > div:nth-child(2)').click()
await page.goto("https://accounts.google.com/signin/v2/identifier?service=youtube&uilel=3&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Faction_handle_signin%3Dtrue%26app%3Ddesktop%26next%3Dhttps%253A%252F%252Fstudio.youtube.com%252Freauth%26feature%3Dreauth%26authuser%3D2%26skip_identity_prompt%3Dtrue&hl=en&authuser=2&rart=ANgoxcfF1TrrQp5lP5ySTmlJmdnwuMbSDi81WlN2aDXRgvpTnD1cv0nXHlRcMz6yv6hnqfERyjXMCgJqa8thKIAqVqatu9kTtA&flowName=GlifWebSignIn&flowEntry=ServiceLogin")
await page.locator("#identifierId").click()
await page.fill("#identifierId", self.username)
await page.locator(
".VfPpkd-LgbsSe-OWXEXe-k8QpJ > span:nth-child(4)").click()
time.sleep(3)
await page.fill(
"#password > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > input:nth-child(1)", self.password)
await page.locator(
".VfPpkd-LgbsSe-OWXEXe-k8QpJ > span:nth-child(4)").click()
time.sleep(60)
except:
time.sleep(1)
# x_path = '//*[@id="textbox"]'
# if page.wait_for_selector(x_path):
# break
async def wait_for_processing(page, process):
page = page
if process == True:
# Wait for processing to complete
progress_label = await page.wait_for_selector(
"span.progress-label")
pattern = re.compile(
r"(finished processing)|(processing hd.*)|(check.*)")
current_progress = await progress_label.get_attribute("textContent")
last_progress = None
while not pattern.match(current_progress.lower()):
if last_progress != current_progress:
logging.info(f'Current progress: {current_progress}')
last_progress = current_progress
sleep(5)
current_progress = await progress_label.get_attribute("textContent")
else:
while True:
x_path = "//span[@class='progress-label style-scope ytcp-video-upload-progress']"
# TypeError: 'WebElement' object is not subscriptable
upload_progress = await page.locator(
'[class="progress-label style-scope ytcp-video-upload-progress"]').all_text_contents()
# innerhtml = page.locator(x_path).get_attribute('innerHTML')
# if re.match(r"\D \.\.\. \D", innerhtml) or re.match(r"^[^\.]+$", innerhtml):
# break
upload_progress=' '.join(upload_progress)
if not '%' in upload_progress.lower():
break
elif 'complete' in upload_progress.lower():
break
async def setscheduletime_douyin(page, publish_date: datetime):
hour_to_post, date_to_post, publish_date_hour=hour_and_date_douyin(
publish_date)
# Clicking in schedule video
print('click schedule')
await page.locator('label.one-line--2rHu9:nth-child(2)').click()
sleep(1)
# Writing date
date_to_post=publish_date.strftime("%Y-%m-%d")
hour_xpath=get_hour_xpath(hour_to_post)
print('click date',str(publish_date_hour),type(publish_date_hour))
# 2022-05-15 09:24
# await page.locator('.semi-input').click()
sleep(1)
await page.keyboard.press("Control+KeyA")
await page.keyboard.type(str(publish_date_hour))
await page.keyboard.press("Enter")
sleep(1)
sleep(1)
async def setscheduletime(page, publish_date: datetime):
hour_to_post, date_to_post, publish_date_hour=hour_and_date(
publish_date)
date_to_post=publish_date.strftime("%b %d, %Y")
hour_xpath=get_hour_xpath(hour_to_post)
# Clicking in schedule video
print('click schedule')
await page.locator(
'//html/body/ytcp-uploads-dialog/tp-yt-paper-dialog/div/ytcp-animatable[1]/ytcp-uploads-review/div[2]/div[1]/ytcp-video-visibility-select/div[2]/tp-yt-paper-radio-button/div[1]/div[1]').click()
sleep(1)
# Writing date
print('click date')
await page.locator('#datepicker-trigger > ytcp-dropdown-trigger:nth-child(1) > div:nth-child(2) > div:nth-child(4)').click()
# page.locator(
# '//html/body/ytcp-uploads-dialog/tp-yt-paper-dialog/div/ytcp-animatable[1]/ytcp-uploads-review/div[2]/div[1]/ytcp-video-visibility-select/div[2]/ytcp-visibility-scheduler/div[1]/ytcp-datetime-picker/div/ytcp-text-dropdown-trigger[1]/ytcp-dropdown-trigger/div/div[3]').click()
sleep(1)
page.locator('//*[@id="input-4"]')
# page.locator(
# '//html/body/ytcp-date-picker/tp-yt-paper-dialog/div/form/tp-yt-paper-input/tp-yt-paper-input-container/div[2]/div/iron-input/input').click()
await page.keyboard.press("Control+KeyA")
await page.keyboard.type(date_to_post)
await page.keyboard.press("Enter")
sleep(1)
print('click hour')
# #input-1
try:
await page.locator(
'#input-1').click()
sleep(1)
await page.locator(hour_xpath).click()
except:
# input_hour=page.wait_for_selector(
# 'input.tp-yt-paper-input').click()
print('no hour input found')
await page.keyboard.press("Control+KeyA")
await page.keyboard.type(hour_to_post)
await page.keyboard.press("Enter")
sleep(1)
def hour_and_date_douyin( now_date_hour):
# now_date_hour += datetime.timedelta(seconds=TIME_BETWEEN_POSTS)
hour_to_post=now_date_hour.strftime('%H:%M')
hour, minutes=hour_to_post.split(
':')[0], int(hour_to_post.split(':')[1])
setting_minutes=minutes//15
minutes=setting_minutes * 15
if minutes == 0:
minutes='00'
hour_to_post=f'{hour}:{minutes}'
# 2022-05-15 09:24
print('now_date_hour',now_date_hour)
date_to_post=now_date_hour.strftime('%d/%m/%Y')
return hour_to_post, date_to_post, now_date_hour
def hour_and_date( now_date_hour):
# now_date_hour += datetime.timedelta(seconds=TIME_BETWEEN_POSTS)
hour_to_post=now_date_hour.strftime('%H:%M')
hour, minutes=hour_to_post.split(
':')[0], int(hour_to_post.split(':')[1])
setting_minutes=minutes//15
minutes=setting_minutes * 15
if minutes == 0:
minutes='00'
hour_to_post=f'{hour}:{minutes}'
date_to_post=now_date_hour.strftime('%d/%m/%Y')
return hour_to_post, date_to_post, now_date_hour
def get_hour_xpath( input_hour):
hour_xpath=dict()
xpath_time=0
for hour in range(24):
if hour < 10 and hour >= 0:
hour=f'0{hour}'
for minute in range(0, 46, 15):
if minute == 0:
minute='00'
xpath_time += 1
hour_xpath.update(
{f'{hour}:{minute}': f'//html/body/ytcp-time-of-day-picker/tp-yt-paper-dialog/tp-yt-paper-listbox/tp-yt-paper-item[{xpath_time}]'})
return hour_xpath[input_hour]
def _set_time_cssSelector(page, publish_date: datetime):
# Start time scheduling
page.locator("SCHEDULE").click()
# Open date_picker
page.locator(
"#datepicker-trigger > ytcp-dropdown-trigger:nth-child(1)").click()
date_input=page.locator(
"input.tp-yt-paper-input").click()
date_input.clear()
# Transform date into required format: Mar 19, 2021
page.keyboard.press("Control+KeyA")
page.keyboard.type(publish_date.strftime("%b %d, %Y"))
page.keyboard.press("KeyReturn")
# Open time_picker
page.locator(
"#time-of-day-trigger > ytcp-dropdown-trigger:nth-child(1) > div:nth-child(2)"
).click()
time_list=page.locator(
"tp-yt-paper-item.tp-yt-paper-item")
# Transform time into required format: 8:15 PM
time_str=publish_date.strftime("%I:%M %p").strip("0")
time=[time for time in time_list[2:] if time.text == time_str][0]
time.click()
def _set_basic_settings(page, title: str, description: str, thumbnail_path: str=None):
title_input=page.wait_for_selector(
'//ytcp-mention-textbox[@label="Title"]//div[@id="textbox"]' )
# Input meta data (title, description, etc ... )
description_input=page.wait_for_selector(
'//ytcp-mention-textbox[@label="Description"]//div[@id="textbox"]'
)
thumbnail_input=page.wait_for_selector(
"input#file-loader"
)
title_input.clear()
title_input.send_keys(title)
description_input.send_keys(description)
if thumbnail_path:
thumbnail_input.send_keys(thumbnail_path)
def _set_advanced_settings(page, game_title: str, made_for_kids: bool):
# Open advanced options
page=page
page.wait_for_selector("#toggle-button").click()
if game_title:
game_title_input=page.wait_for_selector(
".ytcp-form-gaming > "
"ytcp-dropdown-trigger:nth-child(1) > "
":nth-child(2) > div:nth-child(3) > input:nth-child(3)"
)
game_title_input.send_keys(game_title)
# Select first item in game drop down
page.wait_for_selector("#text-item-2").click()
# WebDriverWait(page, 20).until(EC.element_to_be_clickable(
# ("VIDEO_MADE_FOR_KIDS_MFK" if made_for_kids else "VIDEO_MADE_FOR_KIDS_NOT_MFK")
# )).click()
def _set_endcard(self):
page=page
# Add endscreen
page.wait_for_selector("#endscreens-button").click()
sleep(5)
for i in range(1, 11):
try:
# Select endcard type from last video or first suggestion if no prev. video
page.wait_for_selector(
"div.card:nth-child(1)").click()
break
except:
logging.warning(
f"Couldn't find endcard button. Retry in 5s! ({i}/10)")
sleep(5)
page.is_visible("save-button").click()
# def close(self):
# page.close()
# page.quit()
# self.log.debug("Closed Firefox")
def remove_unwatched_videos(self,page, remove_copyrighted, remove_unwatched_views):
try:
page.goto(YOUTUBE_URL)
sleep(USER_WAITING_TIME)
# set english as language
self.__set_channel_language_english()
page.get("https://studio.youtube.com/")
sleep(USER_WAITING_TIME)
page.wait_for_selector("menu-paper-icon-item-1").click()
sleep(USER_WAITING_TIME)
if self.__is_videos_available():
return True
page.wait_for_selector(
"#page-size .ytcp-text-dropdown-trigger").click()
sleep(USER_WAITING_TIME)
# clock 50 items per page
pagination_sizes=page.wait_for_selector(
"#select-menu-for-page-size #dialog .paper-item")
pagination_sizes[2].click()
sleep(USER_WAITING_TIME)
# filter to delete only copyrighted videos
if remove_copyrighted:
page.wait_for_selector("filter-icon").click()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"ytcp-text-menu#menu tp-yt-paper-dialog tp-yt-paper-listbox paper-item#text-item-1 ytcp-ve div").click()
sleep(USER_WAITING_TIME)
# filter to delete videos with views lower than 100
if remove_unwatched_views:
views_no="100000"
page.wait_for_selector("filter-icon").click()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"ytcp-text-menu#menu tp-yt-paper-dialog tp-yt-paper-listbox paper-item#text-item-5 ytcp-ve div").click()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"//iron-input[@id='input-2']/input").click()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"//iron-input[@id='input-2']/input").clear()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"//iron-input[@id='input-2']/input").send_keys(views_no)
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"//input[@type='text']").click()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"//tp-yt-paper-listbox[@id='operator-list']/paper-item[2]").click()
sleep(USER_WAITING_TIME)
page.wait_for_selector(
"//ytcp-button[@id='apply-button']/div").click()
sleep(USER_WAITING_TIME)
return self.__remove_unwatched_videos()
except Exception as e:
print(e)
return False
def __is_videos_available(self,page):
# if there are no videos to be deleted, this element should be visible
# if not visible throw error, and proceed to delete more videos
try:
page.wait_for_selector(
"//ytcp-video-section-content[@id='video-list']/div/div[2]/div")
# return True, there are no more video to be deleted
return True
except:
return False
def __write_in_field(self, field, string, select_all=False):
field.click()
sleep(USER_WAITING_TIME)
if select_all:
if self.is_mac:
field.send_keys(Keys.COMMAND + 'a')
else:
field.send_keys(Keys.CONTROL + 'a')
sleep(USER_WAITING_TIME)
field.send_keys(string)
# def __set_scheduler(self, publish_date):
# # Set upload time
# action=ActionChains(self.page)
# schedule_radio_button=page.wait_for_selector("schedule-radio-button")
# action.move_to_element(schedule_radio_button)
# action.click(schedule_radio_button).perform()
# self.log.debug('Set delevery to {}'.format("schedule"))
# sleep(.33)
# # Set close action
# action_close=ActionChains(self.page)
# action_close.send_keys(Keys.ESCAPE)
# # date picker
# action_datepicker=ActionChains(self.page)
# datepicker_trigger=page.wait_for_selector("datepicker-trigger")
# action_datepicker.move_to_element(datepicker_trigger)
# action_datepicker.click(datepicker_trigger).perform()
# sleep(.33)
# date_string=publish_date.strftime("%d.%m.%Y")
# date_input=page.wait_for_selector(
# '//ytcp-date-picker/tp-yt-paper-dialog//iron-input/input')
# # date_input.clear()
# # # Transform date into required format: Mar 19, 2021
# # date_input.send_keys(publish_date.strftime("%b %d, %Y"))
# # date_input.send_keys(Keys.RETURN)
# self.__write_in_field(date_input, date_string, True)
# self.log.debug('Set schedule date to {}'.format(date_string))
# action_close.perform()
# sleep(.33)
# # time picker
# action_timepicker=ActionChains(self.page)
# time_of_day_trigger=page.wait_for_selector("time-of-day-trigger")
# action_timepicker.move_to_element(time_of_day_trigger)
# action_timepicker.click(time_of_day_trigger).perform()
# sleep(.33)
# time_dto=(publish_date - timedelta(
# minutes=publish_date.minute % 15,
# seconds=publish_date.second,
# microseconds=publish_date.microsecond))
# time_string=time_dto.strftime("%H:%M")
# time_container=page.wait_for_selector(
# '//ytcp-time-of-day-picker//*[@id="dialog"]')
# time_item=page.wait_for_selector(
# '//ytcp-time-of-day-picker//tp-yt-paper-item[text() = "{}"]'.format(time_string))
# self.log.debug('Set schedule date to {}'.format(time_string))
# page.execute_script(
# "arguments[0].scrollTop = arguments[1].offsetTop; ", time_container, time_item)
# time_item.click()
# action_close.perform()
# sleep(.33)
# def __remove_unwatched_videos(self):
# DELETE_WAIT_TIME=60 * 2
# # check if videos deletion process has finished
# # if not visible throw error, and proceed to delete more videos
# try:
# page.wait_for_selector(
# "//div[@id='header']/div/span[2]")
# # wait for the videos to be deleted and try delete videos after
# sleep(DELETE_WAIT_TIME)
# return self.__remove_unwatched_videos()
# except:
# pass
# if self.__is_videos_available():
# return True
# page.wait_for_selector("checkbox-container").click()
# sleep(USER_WAITING_TIME)
# page.wait_for_selector(".ytcp-bulk-actions .toolbar .ytcp-select .ytcp-text-dropdown-trigger .ytcp-dropdown-trigger .right-container .ytcp-dropdown-trigger").click()
# sleep(USER_WAITING_TIME)
# page.wait_for_selector(
# "#select-menu-for-additional-action-options #dialog #paper-list #text-item-1").click()
# sleep(USER_WAITING_TIME)
# page.wait_for_selector(
# "#dialog-content-confirm-checkboxes #confirm-checkbox #checkbox-container").click()
# sleep(USER_WAITING_TIME)
# page.wait_for_selector(
# ".ytcp-confirmation-dialog #dialog-buttons #confirm-button").click()
# # wait 5 minutes for the videos to be deleted
# sleep(DELETE_WAIT_TIME)
# return self.__remove_unwatched_videos()
def waitfordone(page):
# wait until video uploads
# uploading progress text contains ": " - Timp ramas/Remaining time: 3 minutes.
# we wait until ': ' is removed, so we know the text has changed and video has entered processing stage
uploading_progress_text=page.locator(UPLOADING_PROGRESS_SELECTOR).text_content()
while ': ' in uploading_progress_text:
sleep(5)
page.locator( UPLOADING_PROGRESS_SELECTOR).text_content()
def uploadTikTok(username, tiktok, deletionStatus, file):
regex = re.compile('[0-9]{17}')
regexA = re.compile('[0-9]{18}')
regexB = re.compile('[0-9]{19}')
regexC = re.compile('[0-9]{8}')
regexD = re.compile('[0-9]{9}')
if os.path.isdir(tiktok):
if (
regex.match(str(tiktok))
or (regexA.match(str(tiktok)))
or (regexB.match(str(tiktok)))
or (regexC.match(str(tiktok)))
or (regexD.match(str(tiktok)))
): # TODO: use or regex with "|" instead of this
item = get_item('tiktok-' + tiktok)
if username is None:
if file is not None:
file.write(str(tiktok))
file.write('\n')
return None
item.upload(
'./' + tiktok + '/',
verbose=True,
checksum=True,
delete=deletionStatus,
metadata=dict(
collection='opensource_media',
subject='tiktok',
creator=username,
title='TikTok Video by ' + username,
originalurl='https://www.tiktok.com/@' + username + '/video/' + tiktok,
scanner='TikUp ' + getVersion(),
),
retries=9001,
retries_sleep=60,
)
if deletionStatus:
os.rmdir(tiktok)
print()
print('Uploaded to https://archive.org/details/tiktok-' + tiktok)
print()
if file is not None:
file.write(str(tiktok))
file.write('\n') | ytb-up | /ytb_up-0.1.15-py3-none-any.whl/ytb_up/utils.py | utils.py |
import json
from tkinter import EXCEPTION
from .constants import *
from .logging import Log
from .exceptions import *
from .utils import *
import os
from .login import *
from time import sleep
from datetime import datetime, date,timedelta
import logging
from playwright.async_api import async_playwright
class YoutubeUpload:
def __init__(
self,
root_profile_directory: str,
proxy_option: str = "",
timeout: int = 3,
watcheveryuploadstep: bool = True,
debug: bool = True,
username:str ="",
password:str ="",
CHANNEL_COOKIES: str = "",
ytb_cookies:str="",
tiktok_cookies:str="",
recordvideo:bool=False
) -> None:
self.timeout = timeout
self.log = Log(debug)
self.username=username
self.password=password
self.CHANNEL_COOKIES = CHANNEL_COOKIES
self.root_profile_directory=root_profile_directory
self.proxy_option=proxy_option
self.watcheveryuploadstep=watcheveryuploadstep
self.ytb_cookies=ytb_cookies
self.tiktok_cookies=tiktok_cookies
self._playwright=''
self.browser=None
self.context=''
self.page=''
self.recordvideo=recordvideo
# self.setup()
def send(self, element, text: str) -> None:
element.clear()
sleep(self.timeout)
element.send_keys(text)
sleep(self.timeout)
async def click_next(self, page) -> None:
await page.locator(NEXT_BUTTON).click()
sleep(self.timeout)
async def not_uploaded(self, page) -> bool:
s=await page.locator(STATUS_CONTAINER).text_content()
return s.find(UPLOADED) != -1
async def upload(
self,
videopath: str="",
title: str = "",
description: str = "",
thumbnail: str = "",
publishpolicy: str = 0,
# mode a:release_offset exist,publish_data exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
release_offset: str = '0-1',
publish_date: datetime = datetime(
date.today().year, date.today().month, date.today().day, 10, 15),
tags: list = [],
closewhen100percentupload:bool =True
) -> Tuple[bool, Optional[str]]:
"""Uploads a video to YouTube.
Returns if the video was uploaded and the video id.
"""
self._playwright = await self._start_playwright()
# browser = p.chromium.launch()
# proxy_option = "socks5://127.0.0.1:1080"
headless=True
if self.watcheveryuploadstep:
headless=False
print('whether run in view mode',headless)
if self.proxy_option == "":
print('start web page without proxy')
browserLaunchOptionDict = {
"headless": headless,
# "executable_path": executable_path,
"timeout": 300000
}
if not self.root_profile_directory:
self.browser = await self._start_browser("firefox", **browserLaunchOptionDict)
if self.recordvideo:
self.context = await self.browser.new_context(record_video_dir=os.getcwd()+os.sep+"screen-recording")
else:
self.context = await self.browser.new_context()
else:
self.context = await self._start_persistent_browser(
"firefox", user_data_dir=self.root_profile_directory, **browserLaunchOptionDict
)
else:
print('start web page with proxy')
browserLaunchOptionDict = {
"headless": headless,
"proxy": {
"server": self.proxy_option,
},
# timeout <float> Maximum time in milliseconds to wait for the browser instance to start. Defaults to 30000 (30 seconds). Pass 0 to disable timeout.#
"timeout": 300000
}
if not self.root_profile_directory:
self.browser = await self._start_browser("firefox", **browserLaunchOptionDict)
if self.recordvideo:
self.context = await self.browser.new_context(record_video_dir=os.getcwd()+os.sep+"screen-recording")
else:
self.context = await self.browser.new_context()
else:
self.context = await self._start_persistent_browser(
"firefox", user_data_dir=self.root_profile_directory, **browserLaunchOptionDict
)
self.log.debug("Firefox is now running")
page = await self.context.new_page()
print('============tags',tags)
if not videopath:
raise FileNotFoundError(f'Could not find file with path: "{videopath}"')
if self.CHANNEL_COOKIES and not self.CHANNEL_COOKIES == '':
print('cookies existing', self.CHANNEL_COOKIES)
await self.context.clear_cookies()
await self.context.add_cookies(
json.load(
open(
self.CHANNEL_COOKIES,
'r'
)
)
)
# login_using_cookie_file(self,self.CHANNEL_COOKIES,page)
await page.goto(YOUTUBE_URL,timeout=300000)
await page.reload()
else:
self.log.info('Please sign in and then press enter')
# input()
await page.goto(YOUTUBE_URL,timeout=300000)
# Interact with login form
await self.context.clear_cookies()
# page.click('text=Login')
# page.fill('input[name="login"]', USERNAME)
# page.fill('input[name="password"]', PASSWORD)
# page.click('text=Submit')
sleep(USER_WAITING_TIME)
storage = await self.context.storage_state(path=self.CHANNEL_COOKIES)
islogin = confirm_logged_in(page)
print('checking login status', islogin)
if not islogin:
print('try to load cookie files')
await self.context.clear_cookies()
await self.context.add_cookies(
json.load(
open(
self.CHANNEL_COOKIES,
'r'
)
)
)
print('success load cookie files')
await page.goto(YOUTUBE_URL,timeout=30000)
print('start to check login status')
islogin = confirm_logged_in(page)
# https://github.com/xtekky/google-login-bypass/blob/main/login.py
print('start change locale to english')
await set_channel_language_english(page)
print('finish change locale to english')
await page.goto(YOUTUBE_UPLOAD_URL,timeout=300000)
# sleep(self.timeout)
self.log.debug("Found YouTube upload Dialog Modal")
self.log.debug(f'Trying to upload "{videopath}" to YouTube...')
if os.path.exists(get_path(videopath)):
page.locator(
INPUT_FILE_VIDEO)
await page.set_input_files(INPUT_FILE_VIDEO, get_path(videopath))
else:
if os.path.exists(videopath.encode('utf-8')):
print('file found', videopath)
page.locator(
INPUT_FILE_VIDEO)
await page.set_input_files(INPUT_FILE_VIDEO, videopath.encode('utf-8'))
sleep(self.timeout)
textbox=page.locator(TEXTBOX)
# <h1 slot="primary-header" id="dialog-title" class="style-scope ytcp-confirmation-dialog">
# Verify it's you
# </h1>
try:
self.log.debug(f'Trying to detect verify...')
hint=await page.locator('#dialog-title').text_content()
if "Verify it's you" in hint:
# fix google account verify
print('verify its you')
# await page.click('text=Login')
# time.sleep(60)
# await page.locator('#confirm-button > div:nth-child(2)').click()
await page.goto('https://accounts.google.com/signin/v2/identifier?service=youtube&uilel=3&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Faction_handle_signin%3Dtrue%26app%3Ddesktop%26next%3Dhttps%253A%252F%252Fstudio.youtube.com%252Freauth%26feature%3Dreauth%26authuser%3D3%26pageid%3D106691143538188646876%26skip_identity_prompt%3Dtrue&hl=en&authuser=3&rart=ANgoxcd6AUvx_ynaUmq5M6nROFwTagKglTZqT8c97xb1AEzoDasGeJ14cNlvYfH1_mJsl7us_sFLNGJskNrJyjMaIE2KklrO7Q&flowName=GlifWebSignIn&flowEntry=ServiceLogin')
page.locator('#identifierId')
print('input username or email')
# <div class="rFrNMe N3Hzgf jjwyfe QBQrY zKHdkd sdJrJc Tyc9J" jscontroller="pxq3x" jsaction="clickonly:KjsqPd; focus:Jt1EX; blur:fpfTEe; input:Lg5SV" jsshadow="" jsname="Vsb5Ub"><div class="aCsJod oJeWuf"><div class="aXBtI Wic03c"><div class="Xb9hP"><input type="email" class="whsOnd zHQkBf" jsname="YPqjbf" autocomplete="username" spellcheck="false" tabindex="0" aria-label="Email or phone" name="identifier" autocapitalize="none" id="identifierId" dir="ltr" data-initial-dir="ltr" data-initial-value=""><div jsname="YRMmle" class="AxOyFc snByac" aria-hidden="true">Email or phone</div></div><div class="i9lrp mIZh1c"></div><div jsname="XmnwAc" class="OabDMe cXrdqd Y2Zypf"></div></div></div><div class="LXRPh"><div jsname="ty6ygf" class="ovnfwe Is7Fhb"></div><div jsname="B34EJ" class="dEOOab RxsGPe" aria-atomic="true" aria-live="assertive"></div></div></div>
await page.fill('input[name="identifier"]', self.username)
await page.locator('.VfPpkd-LgbsSe-OWXEXe-k8QpJ > span:nth-child(4)').click()
time.sleep(10)
await page.fill('input[name="password"]', self.password)
time.sleep(10)
await page.locator('.VfPpkd-LgbsSe-OWXEXe-k8QpJ > span:nth-child(4)').click()
# await page.click('text=Submit')
Stephint=await page.locator('.bCAAsb > form:nth-child(1) > span:nth-child(1) > section:nth-child(1) > header:nth-child(1) > div:nth-child(1)').text_content()
print(Stephint)
if "2-Step Verification" in Stephint:
# <div class="L9iFZc" role="presentation" jsname="NjaE2c"><h2 class="kV95Wc TrZEUc"><span jsslot="" jsname="Ud7fr">2-Step Verification</span></h2><div class="yMb59d" jsname="HSrbLb" aria-hidden="true"></div></div>
# <span jsslot="" jsname="Ud7fr">2-Step Verification</span>
print('you need google auth and sms very code')
time.sleep(60)
# await page.locator('#confirm-button > div:nth-child(2)').click()
await page.goto(YOUTUBE_UPLOAD_URL)
except:
print('there is no verification at all')
#confirm-button > div:nth-child(2)
# # Catch max uploads/day limit errors
# if page.get_attribute(NEXT_BUTTON, 'hidden') == 'true':
# error_short_by_xpath=page.locator(ERROR_SHORT_XPATH)
# # print(f"ERROR: {error_short_by_xpath.text} {self.cookie_working_dir}")
# return False
try:
daylimit=await self.page.is_visible(ERROR_SHORT_XPATH)
print('catch daily limit,pls try tomorrow',daylimit)
if daylimit:
self.close()
except:
pass
self.log.debug(f'Trying to set "{title}" as title...')
# get file name (default) title
# title=title if title else page.locator(TEXTBOX).text_content()
# print(title)
sleep(self.timeout)
if len(title) > TITLE_COUNTER:
print(f"Title was not set due to exceeding the maximum allowed characters ({len(title)}/{TITLE_COUNTER})")
title=title[:TITLE_COUNTER-1]
# TITLE
print('click title field to input')
titlecontainer= page.locator(TEXTBOX)
await titlecontainer.click()
print('clear existing title')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
print('filling new title')
await page.keyboard.type(title)
self.log.debug(f'Trying to set "{title}" as description...')
if description:
if len(description) > DESCRIPTION_COUNTER:
print(
f"Description was not set due to exceeding the maximum allowed characters ({len(description)}/{DESCRIPTION_COUNTER})"
)
description=description[:4888]
self.log.debug(f'Trying to set "{description}" as description...')
print('click description field to input')
await page.locator(DESCRIPTION_CONTAINER).click()
print('clear existing description')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
print('filling new description')
await page.keyboard.type(description)
if thumbnail:
self.log.debug(f'Trying to set "{thumbnail}" as thumbnail...')
if os.path.exists(get_path(thumbnail)):
await page.locator(
INPUT_FILE_THUMBNAIL).set_input_files(get_path(thumbnail))
else:
if os.path.exists(thumbnail.encode('utf-8')):
print('thumbnail found', thumbnail)
await page.locator(INPUT_FILE_THUMBNAIL).set_input_files(
thumbnail.encode('utf-8'))
sleep(self.timeout)
try:
self.log.debug('Trying to set video to "Not made for kids"...')
kids_section=page.locator(NOT_MADE_FOR_KIDS_LABEL)
await page.locator(NOT_MADE_FOR_KIDS_RADIO_LABEL).click()
sleep(self.timeout)
print('not made for kids task done')
except:
print('failed to set not made for kids')
if tags is None or tags =="" or len(tags)==0:
pass
else:
print('tags you give',tags)
if type(tags) == list:
tags=",".join(str(tag) for tag in tags)
tags=tags[:500]
else:
tags=tags
print('overwrite prefined channel tags',tags)
if len(tags) > TAGS_COUNTER:
print(f"Tags were not set due to exceeding the maximum allowed characters ({len(tags)}/{TAGS_COUNTER})")
tags=tags[:TAGS_COUNTER]
print('click show more button')
sleep(self.timeout)
await page.locator(MORE_OPTIONS_CONTAINER).click()
self.log.debug(f'Trying to set "{tags}" as tags...')
await page.locator(TAGS_CONTAINER).locator(TEXT_INPUT).click()
print('clear existing tags')
await page.keyboard.press("Backspace")
await page.keyboard.press("Control+KeyA")
await page.keyboard.press("Delete")
print('filling new tags')
await page.keyboard.type(tags)
# Language and captions certification
# Recording date and location
# Shorts sampling
# Category
if closewhen100percentupload==False:
pass
else:
await wait_for_processing(page,process=False)
print('uploading progress check task done')
# if "complete" in page.locator(".progress-label").text_content():
# sometimes you have 4 tabs instead of 3
# this handles both cases
for _ in range(3):
try:
await self.click_next(page)
print('next next!')
except:
pass
if not int(publishpolicy) in [0, 1, 2]:
publishpolicy=0
if int(publishpolicy) == 0:
self.log.debug("Trying to set video visibility to private...")
public_main_button=page.locator(PRIVATE_BUTTON)
await page.locator(PRIVATE_RADIO_LABEL).click()
elif int(publishpolicy) == 1:
self.log.debug("Trying to set video visibility to public...")
public_main_button=page.locator(PUBLIC_BUTTON)
await page.locator(PUBLIC_RADIO_LABEL).click()
else:
# mode a:release_offset exist,publish_data exist will take date value as a starting date to schedule videos
# mode b:release_offset not exist, publishdate exist , schedule to this specific date
# mode c:release_offset not exist, publishdate not exist,daily count to increment schedule from tomorrow
# mode d: offset exist, publish date not exist, daily count to increment with specific offset schedule from tomorrow
print('date',type(publish_date),publish_date)
if type(publish_date)==str:
publish_date=datetime.fromisoformat(publish_date)
if release_offset and not release_offset == "0-1":
print('mode a sta',release_offset)
if not int(release_offset.split('-')[0]) == 0:
offset = timedelta(months=int(release_offset.split(
'-')[0]), days=int(release_offset.split('-')[-1]))
else:
offset = timedelta(days=1)
if publish_date is None:
publish_date =datetime(
date.today().year, date.today().month, date.today().day, 10, 15)
else:
publish_date += offset
else:
if publish_date is None:
publish_date =datetime(
date.today().year, date.today().month, date.today().day, 10, 15)
offset = timedelta(days=1)
else:
publish_date = publish_date
# dailycount=4
# release_offset=str(int(start_index/30))+'-'+str(int(start_index)/int(setting['dailycount']))
self.log.debug(
f"Trying to set video schedule time...{publish_date}")
await setscheduletime(page,publish_date)
# set_time_cssSelector(page,publish_date)
print('publish setting task done')
video_id=await self.get_video_id(page)
# option 1 to check final upload status
print('start to check whether upload is finished')
while await self.not_uploaded(page):
self.log.debug("Still uploading...")
sleep(5)
try:
done_button=page.locator(DONE_BUTTON)
if await done_button.get_attribute("aria-disabled") == "true":
error_message= await page.locator(
ERROR_CONTAINER).text_content()
return False, error_message
await done_button.click()
except:
print('=======done buttone ')
print('upload process is done')
sleep(5)
logging.info("Upload is complete")
await self.close()
# page.locator("#close-icon-button > tp-yt-iron-icon:nth-child(1)").click()
# print(page.expect_popup().locator("#html-body > ytcp-uploads-still-processing-dialog:nth-child(39)"))
# page.wait_for_selector("ytcp-dialog.ytcp-uploads-still-processing-dialog > tp-yt-paper-dialog:nth-child(1)")
# page.locator("ytcp-button.ytcp-uploads-still-processing-dialog > div:nth-child(2)").click()
return True, video_id
async def get_video_id(self, page) -> Optional[str]:
video_id=None
try:
video_url_container=page.locator(
VIDEO_URL_CONTAINER)
video_url_element=video_url_container.locator(
VIDEO_URL_ELEMENT
)
video_id=await video_url_element.get_attribute(HREF)
video_id=video_id.split("/")[-1]
except:
raise VideoIDError("Could not get video ID")
return video_id
# @staticmethod
async def _start_playwright(self):
# sync_playwright().start()
return await async_playwright().start()
async def _start_browser(self, browsertype: str, **kwargs):
if browsertype == "chromium":
return await self._playwright.chromium.launch(**kwargs)
if browsertype == "firefox":
return await self._playwright.firefox.launch(**kwargs)
# if self.recordvideo:
# return await self._playwright.firefox.launch(record_video_dir=os.path.abspath('')+os.sep+"screen-recording", **kwargs)
# else:
# return await self._playwright.firefox.launch( **kwargs)
if browsertype == "webkit":
return await self._playwright.webkit.launch(**kwargs)
raise RuntimeError(
"You have to select either 'chromium', 'firefox', or 'webkit' as browser."
)
async def _start_persistent_browser(
self, browser: str, user_data_dir: Optional[Union[str, Path]], **kwargs
):
if browser == "chromium":
return await self._playwright.chromium.launch_persistent_context(
user_data_dir, **kwargs
)
if browser == "firefox":
self.browser=await self._playwright.firefox.launch(**kwargs)
if self.recordvideo:
return await self._playwright.firefox.launch_persistent_context(user_data_dir,record_video_dir=os.path.abspath('')+os.sep+"screen-recording", **kwargs)
else:
return await self._playwright.firefox.launch_persistent_context(user_data_dir, **kwargs)
if browser == "webkit":
return await self._playwright.webkit.launch_persistent_context(
user_data_dir, **kwargs
)
raise RuntimeError(
"You have to select either 'chromium', 'firefox' or 'webkit' as browser."
)
async def close(self):
await self.browser.close()
await self._playwright.stop() | ytb-up | /ytb_up-0.1.15-py3-none-any.whl/ytb_up/youtube.py | youtube.py |
# ytbdl: Music Downloader and Tagger
Combines the power of [yt-dlp](https://github.com/yt-dlp/yt-dlp) and [beets](https://github.com/beetbox/beets) to download music from the internet and automatically tag it.
This application is targeted at those who are already familiar with youtube-dl and beets. This app will work out of the box, but is great for those who want ultimate customization of the beets configuration used to tag the music, and to supply custom arguments to yt-dlp.
Note that this application only supports Python 3.6+.
## Installation
Install the ytbdl tool with pip:
```shell
pip install ytbdl
```
Or, download or clone the [ytbdl GitHub repository](https://github.com/danloveg/ytbdl) and install it with pip:
```shell
cd ytbdl
pip install -e .
```
## Usage
Before using ytbdl for this first time, you need to create a configuration file. To do so, run:
```shell
ytbdl config create
```
Then, to download an album (by an artist) from a playlist at https://youtube.com/some_playlist:
```shell
ytbdl get 'Artist' 'Album' 'https://youtube.com/some_playlist'
```
You can control `ytbdl` with the config file, or using command line arguments.
## Changing yt-dlp's Behaviour
You may change how yt-dlp behaves by specifying arguments on the command line, or by adding arguments to the configuration file. [Click here for a list of yt-dlp options](https://github.com/yt-dlp/yt-dlp#usage-and-options).
To pass options to `yt-dlp` from the command line, use the `--ytdl-args` option:
```shell
ytbdl get --ytdl-args "-f bestaudio[ext=m4a] --reject-title 'music mix 2021' --geo-bypass" ...
```
You can also specify arguments by editing the `ytdl_args` setting in the config file. To get the path to your config file, run `ytbdl config path`. The `ytdl_args` setting can be edited like so:
```yaml
ytdl_args:
- -f
- bestaudio[ext=m4a]
- --geo-bypass
```
Use `ytdl_args` in the config file for settings you want to use all the time. Use `--ytdl-args` on the command line for settings that may change between downloads.
## Changing beets' Behaviour
You can modify beets' behaviour by editing ytbdl's config. ytbdl's config file *is* a beets config file, so edit it as you would a beets config file. [Click here for a list of beets configuration options](https://beets.readthedocs.io/en/stable/reference/config.html).
To edit ytbdl's config, set an editor in the config file. First, open the configuration to edit it:
```shell
# If you like vim!
vim $(ytbdl config path)
# If you're on Windows and like notepad!
notepad $(ytbdl config path)
```
Add an `editor` to the YAML configuration:
```yaml
editor: vim
```
`ytbdl` will read this option and allow you to edit the configuration with that editor using the `edit` command:
```shell
ytbdl config edit
```
For example, maybe you want to add the [zero](https://beets.readthedocs.io/en/stable/plugins/zero.html) plugin. Simply add it to the list of plugins:
```yaml
plugins: # DO NOT REMOVE
- fromdirname # DO NOT REMOVE
- fromyoutubetitle # DO NOT REMOVE
- fetchart
- embedart
- zero # <-- Just added!
# Options for zero
zero:
fields: day month genre
```
Make sure not to remove any lines that say "DO NOT REMOVE" or you will encounter issues!
## Configuration Notes
ytbdl exposes a configuration file that can be used to control the behaviour of beets during the auto-tag process. This configuration file *is* a beets config file, and "overwrites" your beets config when ytbdl calls beets. All of the configuration options you'd use with beets can be used in the ytbdl configuration. If you already have a beets config, it will not be modified, but the options specified in the ytbdl configuration have higher priority and will take precedence over any existing options.
The only two option that ytbdl exposes that aren't beets config options are the `editor` and `ytdl_args` options. For a list of beets' options, view the [beets documentation](https://beets.readthedocs.io/en/stable/reference/config.html).
For a list of yt-dlp options, view the [yt-dlp documentation](https://github.com/yt-dlp/yt-dlp#usage-and-options). Note that the `--output` and `--extract-audio` options are used by default (and can't be turned off). Any attempt at re-specifying these options will result in an error.
## Updating yt-dlp
yt-dlp is frequently updated. If you find that downloads aren't working, try updating yt-dlp.
```shell
pip install --upgrade yt-dlp
```
| ytbdl | /ytbdl-0.0.5.tar.gz/ytbdl-0.0.5/README.md | README.md |
YTBOT
## THIS BOT GENERATES GENUINE YOUTUBE VIEWS ##
How to install?
pip install ytbot
what are the requirements?
*python3.6 or above*
*chromium-browser(developer version)*
*A few google accounts(this is important)
*a little bit of patience*
I tried my best to make it as user friendly as possible. But you should find bugs here and there. I'm still testing the capabilities of this bot. So, I don't know for sure if it would work for everyone. But it won't be a garbage I assure you.
How to use it?
Once installed ytbot, go to the terminal and write "ytbot configure" then follow the instructions there.
You will need to provide the path to a chromium-browser that you installed(developer version of course)
after that, add a few google accounts(remember to give right infos) it's necessary because of the way this bot works. The mechanics is like that you get 1 view per account every 10 minutes. So, 10 account means 10 views per 10 minutes. 20 account means 20 views per 10 minutes! and so on. Cool right?
But beware! It will eat alot of ram.
then it will ask for youtube video url. provide as many as you want.But keep the number less than 10 for better performance.
When the configuration is complete, run the bot symply by "ytbot run" or "ytbot run -h" for headless.
| ytbot | /ytbot-0.2.10.tar.gz/ytbot-0.2.10/README.rst | README.rst |
# ytcc - The YouTube channel checker
# Copyright (C) 2021 Wolfgang Popp
#
# This file is part of ytcc.
#
# ytcc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ytcc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ytcc. If not, see <http://www.gnu.org/licenses/>.
set -o pipefail
set -o errexit
set -o nounset
declare -r -x THUMBNAIL_DIR=${XDG_CACHE_HOME:-$HOME/.cache}/ytccf/thumbnails
FILTERS=()
KEY_BINDINGS="
tab: select/deselect
enter: play video(s)
alt-enter: play audio track(s)
alt-d: download video(s)
alt-r: update
alt-m: mark selection as watched
alt-u: mark last watched video as unwatched
alt-h: show help"
if command -v ueberzug &> /dev/null || [[ $TERM == "xterm-kitty" ]]; then
THUMBNAILS=1
else
THUMBNAILS=0
fi
function draw_preview() { :; }
function clear_preview() { :; }
function init_preview() { :; }
function finalize_preview() { :; }
function fetch_thumbnails() { :; }
function calculate_preview_size() { :; }
function check_cmd() {
if ! command -v "$1" &> /dev/null; then
echo "Command '$1' not found. Aborting."
exit 1
fi
}
function usage() {
cat <<EOF
Usage: $0 [OPTIONS]
An interactive terminal user interface for ytcc based on fzf. The options
filter which videos are shown. All unwatched videos are shown by default.
OPTIONS:
-c, --tags COMMA_SEPARATED_VALUES
Listed videos must be tagged with one of the
given tags.
-s, --since [%Y-%m-%d] Listed videos must be published after the
given date.
-t, --till [%Y-%m-%d] Listed videos must be published before the
given date.
-p, --playlists COMMA_SEPARATED_VALUES
Listed videos must be in on of the given
playlists.
-w, --watched Only watched videos are listed.
-u, --unwatched Only unwatched videos are listed.
--clear-thumbnails Empty the thumbnail cache.
--no-thumbnails Do not display thumbnails.
-h, --help Show this message and exit.
KEY BINDINGS:$KEY_BINDINGS
For more keybindings see fzf(1).
EOF
}
while [[ $# -gt 0 ]]; do
key="$1"
[[ $# -gt 1 ]] && printf -v safe_val %q "$2"
case $key in
-p | --playlists)
FILTERS+=(-p "$safe_val")
shift
shift
;;
-c | --tags)
FILTERS+=( -c "$safe_val")
shift
shift
;;
-s | --since)
FILTERS+=( -s "$safe_val")
shift
shift
;;
-t | --till)
FILTERS+=(-t "$safe_val")
shift
shift
;;
-w | --watched)
FILTERS+=(-w)
shift
;;
-u | --unwatched)
FILTERS+=(-u)
shift
;;
-h | --help)
usage
exit
;;
--no-thumbnails)
THUMBNAILS=0
shift
;;
--clear-thumbnails)
[[ -d "$THUMBNAIL_DIR" ]] && rm -r "$THUMBNAIL_DIR" && echo "Successfully cleared thumbnail cache"
exit
;;
*)
echo "Unknown option: $key"
echo "Try $0 --help for help."
exit 1
;;
esac
done
check_cmd ytcc
check_cmd fzf
check_cmd stty
# shellcheck disable=SC2016
TABLE_WIDTH_CMD='$(($(stty size < /dev/tty | cut -d" " -f2) - 3))'
if [[ $THUMBNAILS -eq 1 ]]; then
# shellcheck disable=SC2016
TABLE_WIDTH_CMD='$((2 * $(stty size < /dev/tty | cut -d" " -f2) / 3 - 3))'
check_cmd curl
function fetch_thumbnails() {
local -a curl_args=()
for line in $(ytcc --output xsv list --attributes id,thumbnail_url "${FILTERS[@]}"); do
read -r -a arr <<< "${line/,/ }"
if ((${#arr[@]} > 1)) && ! [[ -e $THUMBNAIL_DIR/${arr[0]} ]]; then
curl_args+=( -o "$THUMBNAIL_DIR/${arr[0]}" "${arr[1]}")
fi
done
mkdir -p "$THUMBNAIL_DIR"
if (( ${#curl_args[@]} > 0 )); then
echo INFO: Fetching thumbnails
curl -L --silent "${curl_args[@]}"
fi
}
function calculate_preview_size() {
local term_lines term_cols
read -r term_lines term_cols < <(stty size </dev/tty)
X=$((2 * term_cols / 3))
Y=3
LINES=$((term_lines / 2 - 3))
COLUMNS=$((term_cols / 3 ))
}
if [[ $TERM == "xterm-kitty" ]]; then
function draw_preview {
calculate_preview_size
kitty icat --transfer-mode file -z=-1 --place=${COLUMNS}x${LINES}@${X}x${Y} --scale-up "${@}"
}
else
check_cmd ueberzug
UEBERZUG_FIFO="$(mktemp --dry-run --suffix "fzf-$$-ueberzug")"
declare -r -x UEBERZUG_FIFO
declare -r -x PREVIEW_ID="preview"
function draw_preview {
calculate_preview_size
local -A cmd=( \
[action]=add
[identifier]="${PREVIEW_ID}" \
[x]="${X}" [y]="${Y}" \
[width]="${COLUMNS}" [height]="${LINES}" \
[scaler]=fit_contain [scaling_position_x]=0.5 [scaling_position_y]=0.5 \
[path]="${@}"
)
declare -p cmd >"${UEBERZUG_FIFO}"
}
function clear_preview {
local -A cmd=( \
[action]=remove [identifier]="${PREVIEW_ID}" \
)
declare -p cmd >"${UEBERZUG_FIFO}"
}
function init_preview {
mkfifo "${UEBERZUG_FIFO}"
ueberzug layer --parser bash --silent <"${UEBERZUG_FIFO}" &
# prevent EOF
exec 3>"${UEBERZUG_FIFO}"
}
function finalize_preview {
exec 3>&-
rm "${UEBERZUG_FIFO}" &>/dev/null
kill "$(jobs -p)" &>/dev/null
}
fi
fi
MAKE_TABLE="ytcc --output table --truncate \"$TABLE_WIDTH_CMD\" list --attributes id,title,publish_date,duration,playlists ${FILTERS[*]}"
init_preview
trap finalize_preview EXIT
fetch_thumbnails
export -f draw_preview clear_preview fetch_thumbnails calculate_preview_size
eval "$MAKE_TABLE" |
SHELL=$(command -v bash) fzf --preview "draw_preview $THUMBNAIL_DIR/{1}; ytcc --output plain list --watched --unwatched -a title,playlists,description -i {1}" \
--multi \
--layout reverse \
--preview-window down:50%:wrap \
--bind "enter:execute%clear_preview; ytcc play {+1}%+reload%$MAKE_TABLE%" \
--bind "alt-enter:execute%clear_preview; ytcc play --audio-only {+1}%+reload%$MAKE_TABLE%" \
--bind "alt-d:execute%clear_preview; ytcc download {+1}%+reload%$MAKE_TABLE%" \
--bind "alt-r:execute%clear_preview; ytcc update; fetch_thumbnails%+reload%$MAKE_TABLE%" \
--bind "alt-h:execute%clear_preview; echo 'Key bindings:$KEY_BINDINGS' | less%+reload%$MAKE_TABLE%" \
--bind "alt-m:reload%ytcc mark {+1}; $MAKE_TABLE%" \
--bind "alt-u:reload%ytcc ls --order-by watched desc --watched | head -n1 | ytcc unmark; $MAKE_TABLE%" \
--header-lines 2 | ytcc | /ytcc-2.6.0-py3-none-any.whl/ytcc-2.6.0.data/scripts/ytccf.sh | ytccf.sh |
## Data cleaning
## Requirements
```
pip3 install bs4 markdownify
pip3 install polyglot pyicu pycld2
```
## Steps
```
# Convert html to markdown
python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
# Keep or remove specific languages
python3 -m fastchat.data.optional_clean --in sharegpt_clean.json --out sharegpt_clean_lang.json --skip-lang SOME_LANGUAGE_CODE
# Split long conversations
python3 -m fastchat.data.split_long_conversation --in sharegpt_clean_lang.json --out sharegpt_clean_lang_split.json --model-name /home/ubuntu/model_weights/llama-7b/
```
| ytchat | /ytchat-0.0.16-py3-none-any.whl/docs/commands/data_cleaning.md | data_cleaning.md |
### Local GPU cluster (node-01)
```
python3 -m fastchat.serve.controller --host 0.0.0.0 --port 10002
CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/vicuna-13b/ --controller http://localhost:10002 --port 31000 --worker http://localhost:31000
CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/vicuna-13b/ --controller http://localhost:10002 --port 31001 --worker http://localhost:31001
CUDA_VISIBLE_DEVICES=2 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/bair-chat-13b/ --controller http://localhost:10002 --port 31002 --worker http://localhost:31002
CUDA_VISIBLE_DEVICES=3 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/alpaca-chat-13b/ --controller http://localhost:10002 --port 31003 --worker http://localhost:31003
python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:10002
```
### Web server
```
python3 -m fastchat.serve.controller --host 0.0.0.0 --port 21001
python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name https://
python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:21001
python3 -m fastchat.serve.gradio_web_server --controller http://localhost:21001
```
### Local GPU cluster (node-02)
```
CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/vicuna-13b/ --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker http://$(hostname):31000
CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/vicuna-13b/ --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker http://$(hostname):31001
CUDA_VISIBLE_DEVICES=2 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/vicuna-13b/ --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker http://$(hostname):31002
CUDA_VISIBLE_DEVICES=3 python3 -m fastchat.serve.model_worker --model-path ~/model_weights/vicuna-13b/ --controller http://node-01:10002 --host 0.0.0.0 --port 31003 --worker http://$(hostname):31003
```
| ytchat | /ytchat-0.0.16-py3-none-any.whl/docs/commands/local_cluster.md | local_cluster.md |
### Install
```
sudo apt update
sudo apt install tmux htop
wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh
bash Anaconda3-2022.10-Linux-x86_64.sh
conda create -n fastchat python=3.9
conda activate fastchat
git clone https://github.com/lm-sys/FastChat.git
cd FastChat
pip3 install -e .
# Install the latest main branch of huggingface/transformers
pip3 install git+https://github.com/huggingface/transformers
```
### Launch servers
```
python3 -m fastchat.serve.controller --host 0.0.0.0 --port 21001
python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name https://
python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:21001
export OPENAI_API_KEY=
python3 -m fastchat.serve.gradio_web_server --controller http://localhost:21001 --moderate --concurrency 20
```
### Increase the limit of max open files
One process (do not need reboot)
```
sudo prlimit --nofile=1048576:1048576 --pid=$id
for id in $(ps -ef | grep gradio_web_server | awk '{print $2}'); do echo $id; prlimit --nofile=1048576:1048576 --pid=$id; done
```
System (need reboot): Add the lines below to `/etc/security/limits.conf`
```
* hard nofile 65535
* soft nofile 65535
```
| ytchat | /ytchat-0.0.16-py3-none-any.whl/docs/commands/webserver.md | webserver.md |
import logging
import logging.handlers
import os
import sys
import json
import warnings
import platform
import requests
import torch
from fastchat.constants import LOGDIR
server_error_msg = (
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
)
moderation_msg = (
"YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
)
handler = None
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
if sys.version_info[1] >= 9:
# This is for windows
logging.basicConfig(level=logging.INFO, encoding="utf-8")
else:
if platform.system() == "Windows":
warnings.warn("If you are running on Windows, "
"we recommend you use Python >= 3.9 for UTF-8 encoding.")
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# Add a file handler for all loggers
if handler is None:
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(
filename, when="D", utc=True
)
handler.setFormatter(formatter)
for name, item in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ""
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
# From the io.TextIOWrapper docs:
# On output, if newline is None, any '\n' characters written
# are translated to the system default line separator.
# By default sys.stdout.write() expects '\n' newlines and then
# translates them so this is still cross platform.
if line[-1] == "\n":
encoded_message = line.encode("utf-8", "ignore").decode("utf-8")
self.logger.log(self.log_level, encoded_message.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
encoded_message = self.linebuf.encode("utf-8", "ignore").decode("utf-8")
self.logger.log(self.log_level, encoded_message.rstrip())
self.linebuf = ""
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def violates_moderation(text):
"""
Check whether the text violates OpenAI moderation API.
"""
url = "https://api.openai.com/v1/moderations"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + os.environ["OPENAI_API_KEY"],
}
text = text.replace("\n", "")
data = "{" + '"input": ' + f'"{text}"' + "}"
data = data.encode("utf-8")
try:
ret = requests.post(url, headers=headers, data=data, timeout=5)
flagged = ret.json()["results"][0]["flagged"]
except requests.exceptions.RequestException as e:
flagged = False
except KeyError as e:
flagged = False
return flagged
# Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings,
# Use this function to make sure it can be correctly loaded.
def clean_flant5_ckpt(ckpt_path):
index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json")
index_json = json.load(open(index_file, "r"))
weightmap = index_json["weight_map"]
share_weight_file = weightmap["shared.weight"]
share_weight = torch.load(os.path.join(ckpt_path, share_weight_file))[
"shared.weight"
]
for weight_name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]:
weight_file = weightmap[weight_name]
weight = torch.load(os.path.join(ckpt_path, weight_file))
weight[weight_name] = share_weight
torch.save(weight, os.path.join(ckpt_path, weight_file))
def pretty_print_semaphore(semaphore):
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})" | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/utils.py | utils.py |
import dataclasses
from enum import auto, Enum
from typing import List, Tuple, Any
class SeparatorStyle(Enum):
"""Different separator style."""
ADD_COLON_SINGLE = auto()
ADD_COLON_TWO = auto()
NO_COLON_SINGLE = auto()
BAIZE = auto()
DOLLY = auto()
RWKV = auto()
@dataclasses.dataclass
class Conversation:
"""A class that keeps all conversation history."""
# System prompts
system: str
# Two roles
roles: List[str]
# All messages
messages: List[List[str]]
# Offset of few shot examples
offset: int
# Separator
sep_style: SeparatorStyle
sep: str
sep2: str = None
# Stop criteria (the default one is EOS token)
stop_str: str = None
# Stops generation if meeting any token in this list
stop_token_ids: List[int] = None
# Used for the state in the gradio servers.
# TODO(lmzheng): refactor this
conv_id: Any = None
skip_next: bool = False
model_name: str = None
def get_prompt(self):
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
ret = self.system + self.sep
for role, message in self.messages:
if message:
ret += role + ": " + message + self.sep
else:
ret += role + ":"
return ret
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
seps = [self.sep, self.sep2]
ret = self.system + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
ret = self.system
for role, message in self.messages:
if message:
ret += role + message + self.sep
else:
ret += role
return ret
elif self.sep_style == SeparatorStyle.BAIZE:
ret = self.system + "\n"
for role, message in self.messages:
if message:
ret += role + message + "\n"
else:
ret += role
return ret
elif self.sep_style == SeparatorStyle.DOLLY:
seps = [self.sep, self.sep2]
ret = self.system
for i, (role, message) in enumerate(self.messages):
if message:
ret += role + ":\n" + message + seps[i % 2]
if i % 2 == 1:
ret += "\n\n"
else:
ret += role + ":\n"
return ret
elif self.sep_style == SeparatorStyle.RWKV:
ret = self.system
for i, (role, message) in enumerate(self.messages):
if message:
ret += (
role
+ ": "
+ message.replace("\r\n", "\n").replace("\n\n", "\n")
)
ret += "\n\n"
else:
ret += role + ":"
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def append_message(self, role, message):
self.messages.append([role, message])
def to_gradio_chatbot(self):
ret = []
for i, (role, msg) in enumerate(self.messages[self.offset :]):
if i % 2 == 0:
ret.append([msg, None])
else:
ret[-1][-1] = msg
return ret
def copy(self):
return Conversation(
system=self.system,
roles=self.roles,
messages=[[x, y] for x, y in self.messages],
offset=self.offset,
sep_style=self.sep_style,
sep=self.sep,
sep2=self.sep2,
stop_str=self.stop_str,
stop_token_ids=self.stop_token_ids,
conv_id=self.conv_id,
model_name=self.model_name,
)
def dict(self):
return {
"system": self.system,
"roles": self.roles,
"messages": self.messages,
"offset": self.offset,
"conv_id": self.conv_id,
"model_name": self.model_name,
}
# A template with one conversation example
conv_one_shot = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
roles=("Human", "Assistant"),
messages=(
(
"Human",
"What are the key differences between renewable and non-renewable energy sources?",
),
(
"Assistant",
"Renewable energy sources are those that can be replenished naturally in a relatively "
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
"renewable and non-renewable energy sources:\n"
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
"energy sources are finite and will eventually run out.\n"
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
"and other negative effects.\n"
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
"have lower operational costs than non-renewable sources.\n"
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
"locations than non-renewable sources.\n"
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
"non-renewable sources are not, and their depletion can lead to economic and social instability.",
),
),
offset=2,
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
sep="\n### ",
stop_str="###",
)
# Vicuna v1.1 template
conv_vicuna_v1_1 = Conversation(
system="A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
roles=("USER", "ASSISTANT"),
messages=(),
offset=0,
sep_style=SeparatorStyle.ADD_COLON_TWO,
sep=" ",
sep2="</s>",
)
# Koala default template
conv_koala_v1 = Conversation(
system="BEGINNING OF CONVERSATION:",
roles=("USER", "GPT"),
messages=(),
offset=0,
sep_style=SeparatorStyle.ADD_COLON_TWO,
sep=" ",
sep2="</s>",
)
# Dolly V2 default template
conv_dolly = Conversation(
system="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
roles=("### Instruction", "### Response"),
messages=(),
offset=0,
sep_style=SeparatorStyle.DOLLY,
sep="\n\n",
sep2="### End",
)
# OpenAssistant Pythia default template
conv_oasst = Conversation(
system="",
roles=("<|prompter|>", "<|assistant|>"),
messages=(),
offset=0,
sep_style=SeparatorStyle.NO_COLON_SINGLE,
sep="<|endoftext|>",
)
# StableLM Alpha default template
conv_stablelm = Conversation(
system="""<|SYSTEM|># StableLM Tuned (Alpha version)
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
- StableLM will refuse to participate in anything that could harm a human.
""",
roles=("<|USER|>", "<|ASSISTANT|>"),
messages=(),
offset=0,
sep_style=SeparatorStyle.NO_COLON_SINGLE,
sep="",
stop_token_ids=[50278, 50279, 50277, 1, 0],
)
# Baize default template
conv_baize = Conversation(
system="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.",
roles=("[|Human|]", "[|AI|]"),
messages=(
("[|Human|]", "Hello!"),
("[|AI|]", "Hi!"),
),
offset=2,
sep_style=SeparatorStyle.BAIZE,
sep="[|Human|]",
stop_str="[|Human|]",
)
# RWKV-4-Raven default template
conv_rwkv = Conversation(
system="",
roles=("Bob", "Alice"),
messages=(),
offset=0,
sep_style=SeparatorStyle.RWKV,
sep="",
stop_str="\n\n",
)
conv_templates = {
"baize": conv_baize,
"conv_one_shot": conv_one_shot,
"dolly": conv_dolly,
"koala_v1": conv_koala_v1,
"oasst": conv_oasst,
"stablelm": conv_stablelm,
"vicuna_v1.1": conv_vicuna_v1_1,
"rwkv": conv_rwkv,
}
def get_default_conv_template(model_name):
model_name = model_name.lower()
if "vicuna" in model_name or "output" in model_name:
return conv_vicuna_v1_1
elif "koala" in model_name:
return conv_koala_v1
elif "dolly-v2" in model_name:
return conv_dolly
elif "oasst" in model_name and "pythia" in model_name:
return conv_oasst
elif "baize" in model_name:
return conv_baize
elif "stablelm" in model_name:
return conv_stablelm
elif "rwkv-4" in model_name:
return conv_rwkv
return conv_one_shot
if __name__ == "__main__":
conv = conv_templates["vicuna_v1.1"].copy()
conv.append_message(conv.roles[0], "Hello!")
conv.append_message(conv.roles[1], "Hi!")
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.get_prompt()) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/conversation.py | conversation.py |
import math
from typing import List, Optional, Tuple
import torch
from torch import nn
import transformers
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2].clone()
x2 = x[..., x.shape[-1] // 2 :].clone()
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = (
self.q_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
key_states = (
self.k_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin, position_ids
)
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(
self.head_dim
)
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
query_states.dtype
)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
def replace_llama_attn_with_non_inplace_operations():
"""Avoid bugs in mps backend by not using in-place operations."""
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/monkey_patch_non_inplace.py | monkey_patch_non_inplace.py |
import dataclasses
import gc
import glob
import os
from accelerate import init_empty_weights
from accelerate.utils import set_module_tensor_to_device
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
@dataclasses.dataclass
class CompressionConfig:
"""Group-wise quantization."""
num_bits: int
group_size: int
group_dim: int
symmetric: bool
enabled: bool = True
default_compression_config = CompressionConfig(
num_bits=8, group_size=256, group_dim=1, symmetric=True, enabled=True
)
class CLinear(nn.Module):
"""Compressed Linear Layer."""
def __init__(self, weight=None, bias=None, device=None):
super().__init__()
if weight is None:
self.weight = None
elif isinstance(weight, Tensor):
self.weight = compress(weight.data.to(device), default_compression_config)
else:
self.weight = weight
self.bias = bias
def forward(self, input: Tensor) -> Tensor:
weight = decompress(self.weight, default_compression_config)
return F.linear(input.to(weight.dtype), weight, self.bias)
def compress_module(module, target_device):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
setattr(
module,
attr_str,
CLinear(target_attr.weight, target_attr.bias, target_device),
)
for name, child in module.named_children():
compress_module(child, target_device)
def get_compressed_list(module, prefix=''):
compressed_list = []
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
full_name = f"{prefix}.{attr_str}.weight" if prefix else f"{attr_str}.weight"
compressed_list.append(full_name)
for name, child in module.named_children():
child_prefix = f"{prefix}.{name}" if prefix else name
for each in get_compressed_list(child, child_prefix):
compressed_list.append(each)
return compressed_list
def apply_compressed_weight(module, compressed_state_dict, target_device, prefix=''):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
full_name = f"{prefix}.{attr_str}.weight" if prefix else f"{attr_str}.weight"
setattr(module, attr_str,
CLinear(compressed_state_dict[full_name], target_attr.bias, target_device))
for name, child in module.named_children():
child_prefix = f"{prefix}.{name}" if prefix else name
apply_compressed_weight(child, compressed_state_dict, target_device, child_prefix)
def load_compress_model(model_path, device, torch_dtype):
# partially load model
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
base_pattern = os.path.join(model_path, "pytorch_model-*.bin")
files = glob.glob(base_pattern)
with init_empty_weights():
config = AutoConfig.from_pretrained(model_path, low_cpu_mem_usage=True,
torch_dtype=torch_dtype)
model = AutoModelForCausalLM.from_config(config)
linear_weights = get_compressed_list(model)
compressed_state_dict = {}
for filename in tqdm(files):
tmp_state_dict = torch.load(filename)
for name in tmp_state_dict:
if name in linear_weights:
tensor = tmp_state_dict[name].to(device).data.to(torch_dtype)
compressed_state_dict[name] = compress(tensor, default_compression_config)
else:
compressed_state_dict[name] = tmp_state_dict[name].to(device)
tmp_state_dict[name] = None
tensor = None
gc.collect()
torch.cuda.empty_cache()
for name in model.state_dict():
if name not in linear_weights:
set_module_tensor_to_device(model, name, device, value=compressed_state_dict[name])
apply_compressed_weight(model, compressed_state_dict, device)
model.to(device)
return model, tokenizer
def compress(tensor, config):
"""Simulate group-wise quantization."""
if not config.enabled:
return tensor
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
assert num_bits <= 8
original_shape = tensor.shape
num_groups = (original_shape[group_dim] + group_size - 1) // group_size
new_shape = (
original_shape[:group_dim]
+ (num_groups, group_size)
+ original_shape[group_dim + 1 :]
)
# Pad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len != 0:
pad_shape = (
original_shape[:group_dim] + (pad_len,) + original_shape[group_dim + 1 :]
)
tensor = torch.cat(
[tensor, torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
dim=group_dim,
)
data = tensor.view(new_shape)
# Quantize
if symmetric:
B = 2 ** (num_bits - 1) - 1
scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
data = data * scale
data = data.clamp_(-B, B).round_().to(torch.int8)
return data, scale, original_shape
else:
B = 2**num_bits - 1
mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
scale = B / (mx - mn)
data = data - mn
data.mul_(scale)
data = data.clamp_(0, B).round_().to(torch.uint8)
return data, mn, scale, original_shape
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim]
+ (original_shape[group_dim] + pad_len,)
+ original_shape[group_dim + 1 :]
)
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/compression.py | compression.py |
import json
import time
import gradio as gr
import numpy as np
from fastchat.conversation import get_default_conv_template
from fastchat.utils import (
build_logger,
violates_moderation,
moderation_msg,
)
from fastchat.serve.gradio_patch import Chatbot as grChatbot
from fastchat.serve.gradio_web_server import (
http_bot,
get_conv_log_filename,
no_change_btn,
enable_btn,
disable_btn,
model_description_md,
learn_more_md,
)
logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log")
num_models = 2
enable_moderation = False
anony_names = ["", ""]
models = []
def set_global_vars_anony(enable_moderation_):
global enable_moderation
enable_moderation = enable_moderation_
def load_demo_side_by_side_anony(models_, url_params):
global models
models = models_
states = (None,) * num_models
selector_updates = (
gr.Markdown.update(visible=True),
gr.Markdown.update(visible=True),
)
return (
states
+ selector_updates
+ (gr.Chatbot.update(visible=True),) * num_models
+ (
gr.Textbox.update(visible=True),
gr.Box.update(visible=True),
gr.Row.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
)
def vote_last_response(states, vote_type, model_selectors, request: gr.Request):
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(time.time(), 4),
"type": vote_type,
"models": [x for x in model_selectors],
"states": [x.dict() for x in states],
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
if ":" not in model_selectors[0]:
for i in range(15):
names = ("### Model A: " + states[0].model_name, "### Model B: " + states[1].model_name)
yield names + ("",) + (disable_btn,) * 4
time.sleep(0.2)
else:
names = ("### Model A: " + states[0].model_name, "### Model B: " + states[1].model_name)
yield names + ("",) + (disable_btn,) * 4
def leftvote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"leftvote (anony). ip: {request.client.host}")
for x in vote_last_response(
[state0, state1], "leftvote", [model_selector0, model_selector1], request
):
yield x
def rightvote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"rightvote (anony). ip: {request.client.host}")
for x in vote_last_response(
[state0, state1], "rightvote", [model_selector0, model_selector1], request
):
yield x
def tievote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"tievote (anony). ip: {request.client.host}")
for x in vote_last_response(
[state0, state1], "tievote", [model_selector0, model_selector1], request
):
yield x
def bothbad_vote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"bothbad_vote (anony). ip: {request.client.host}")
for x in vote_last_response(
[state0, state1], "bothbad_vote", [model_selector0, model_selector1], request
):
yield x
def regenerate(state0, state1, request: gr.Request):
logger.info(f"regenerate (anony). ip: {request.client.host}")
states = [state0, state1]
for i in range(num_models):
states[i].messages[-1][-1] = None
states[i].skip_next = False
return states + [x.to_gradio_chatbot() for x in states] + [""] + [disable_btn] * 6
def clear_history(request: gr.Request):
logger.info(f"clear_history (anony). ip: {request.client.host}")
return [None] * num_models + [None] * num_models + anony_names + [""] + [disable_btn] * 6
def share_click(state0, state1, model_selector0, model_selector1,
request: gr.Request):
logger.info(f"share (anony). ip: {request.client.host}")
if state0 is not None and state1 is not None:
vote_last_response(
[state0, state1], "share", [model_selector0, model_selector1], request
)
def add_text(state0, state1, text, request: gr.Request):
logger.info(f"add_text (anony). ip: {request.client.host}. len: {len(text)}")
states = [state0, state1]
if states[0] is None:
assert states[1] is None
weights = ([1, 1, 1, 1] + [1] * 32)[:len(models)]
if len(models) > 1:
weights = weights / np.sum(weights)
model_left, model_right = np.random.choice(
models, size=(2,), p=weights, replace=False)
else:
model_left = model_right = models[0]
states = [
get_default_conv_template("vicuna").copy(),
get_default_conv_template("vicuna").copy(),
]
states[0].model_name = model_left
states[1].model_name = model_right
if len(text) <= 0:
for i in range(num_models):
states[i].skip_next = True
return (
states
+ [x.to_gradio_chatbot() for x in states]
+ [""]
+ [
no_change_btn,
]
* 6
)
if enable_moderation:
flagged = violates_moderation(text)
if flagged:
logger.info(f"violate moderation (anony). ip: {request.client.host}. text: {text}")
for i in range(num_models):
states[i].skip_next = True
return (
states
+ [x.to_gradio_chatbot() for x in states]
+ [moderation_msg]
+ [
no_change_btn,
]
* 6
)
text = text[:1536] # Hard cut-off
for i in range(num_models):
states[i].append_message(states[i].roles[0], text)
states[i].append_message(states[i].roles[1], None)
states[i].skip_next = False
return (
states
+ [x.to_gradio_chatbot() for x in states]
+ [""]
+ [
disable_btn,
]
* 6
)
def http_bot_all(
state0,
state1,
model_selector0,
model_selector1,
temperature,
max_new_tokens,
request: gr.Request,
):
logger.info(f"http_bot_all (anony). ip: {request.client.host}")
if state0.skip_next:
# This generate call is skipped due to invalid inputs
yield (state0, state1, state0.to_gradio_chatbot(),
state1.to_gradio_chatbot()) + (no_change_btn,) * 6
return
states = [state0, state1]
model_selector = [state0.model_name, state1.model_name]
gen = []
for i in range(num_models):
gen.append(
http_bot(states[i], model_selector[i], temperature, max_new_tokens, request)
)
chatbots = [None] * num_models
while True:
stop = True
for i in range(num_models):
try:
ret = next(gen[i])
states[i], chatbots[i] = ret[0], ret[1]
stop = False
except StopIteration:
pass
yield states + chatbots + [disable_btn] * 6
if stop:
break
for i in range(10):
if i % 2 == 0:
yield states + chatbots + [disable_btn] * 4 + [enable_btn] * 2
else:
yield states + chatbots + [enable_btn] * 6
time.sleep(0.2)
def build_side_by_side_ui_anony(models):
notice_markdown = ("""
# ⚔️ Chatbot Arena ⚔️
Rules:
- Chat with two anonymous models side-by-side and vote for which one is better!
- The names of the models will be revealed after your vote.
- You can continue chating and voting or click "Clear history" to start a new round.
- A leaderboard will be available soon.
- [[GitHub]](https://github.com/lm-sys/FastChat) [[Twitter]](https://twitter.com/lmsysorg) [[Discord]](https://discord.gg/h6kCZb72G7)
### Terms of use
By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data for future research.**
The demo works better on desktop devices with a wide screen.
### The participated models
""" + model_description_md)
states = [gr.State() for _ in range(num_models)]
model_selectors = [None] * num_models
chatbots = [None] * num_models
notice = gr.Markdown(notice_markdown, elem_id="notice_markdown")
with gr.Box(elem_id="share-region-anony"):
with gr.Row():
for i in range(num_models):
with gr.Column():
model_selectors[i] = gr.Markdown(anony_names[i])
with gr.Row():
for i in range(num_models):
label = "Model A" if i == 0 else "Model B"
with gr.Column():
chatbots[i] = grChatbot(label=label, elem_id=f"chatbot{i}",
visible=False).style(height=550)
with gr.Box() as button_row:
with gr.Row():
leftvote_btn = gr.Button(value="👈 A is better", interactive=False)
rightvote_btn = gr.Button(value="👉 B is better", interactive=False)
tie_btn = gr.Button(value="🤝 Tie", interactive=False)
bothbad_btn = gr.Button(value="👎 Both are bad", interactive=False)
with gr.Row():
with gr.Column(scale=20):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter text and press ENTER",
visible=False,
).style(container=False)
with gr.Column(scale=1, min_width=50):
send_btn = gr.Button(value="Send", visible=False)
with gr.Row() as button_row2:
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
share_btn = gr.Button(value="📷 Share")
with gr.Accordion("Parameters", open=False, visible=True) as parameter_row:
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
interactive=True,
label="Temperature",
)
max_output_tokens = gr.Slider(
minimum=0,
maximum=1024,
value=512,
step=64,
interactive=True,
label="Max output tokens",
)
gr.Markdown(learn_more_md)
# Register listeners
btn_list = [leftvote_btn, rightvote_btn, tie_btn, bothbad_btn,
regenerate_btn, clear_btn]
leftvote_btn.click(
leftvote_last_response,
states + model_selectors,
model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
rightvote_btn.click(
rightvote_last_response,
states + model_selectors,
model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
tie_btn.click(
tievote_last_response,
states + model_selectors,
model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
bothbad_btn.click(
bothbad_vote_last_response,
states + model_selectors,
model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
regenerate_btn.click(
regenerate, states, states + chatbots + [textbox] + btn_list
).then(
http_bot_all,
states + model_selectors + [temperature, max_output_tokens],
states + chatbots + btn_list,
)
clear_btn.click(clear_history, None, states + chatbots + model_selectors + [
textbox] + btn_list)
share_js="""
function (a, b, c, d) {
const captureElement = document.querySelector('#share-region-anony');
html2canvas(captureElement)
.then(canvas => {
canvas.style.display = 'none'
document.body.appendChild(canvas)
return canvas
})
.then(canvas => {
const image = canvas.toDataURL('image/png')
const a = document.createElement('a')
a.setAttribute('download', 'chatbot-arena.png')
a.setAttribute('href', image)
a.click()
canvas.remove()
});
return [a, b, c, d];
}
"""
share_btn.click(share_click, states + model_selectors, [], _js=share_js)
textbox.submit(
add_text, states + [textbox], states + chatbots + [textbox] + btn_list
).then(
http_bot_all,
states + model_selectors + [temperature, max_output_tokens],
states + chatbots + btn_list,
)
send_btn.click(
add_text, states + [textbox], states + chatbots + [textbox] + btn_list
).then(
http_bot_all,
states + model_selectors + [temperature, max_output_tokens],
states + chatbots + btn_list,
)
return (
states,
model_selectors,
chatbots,
textbox,
send_btn,
button_row,
button_row2,
parameter_row,
) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gradio_block_arena_anony.py | gradio_block_arena_anony.py |
import argparse
import asyncio
import json
import threading
import time
import uuid
from typing import List, Dict
import requests
import torch
import uvicorn
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
from transformers import AutoTokenizer
from cacheflow.master.server import Server, initialize_ray_cluster
from cacheflow.sampling_params import SamplingParams
from cacheflow.sequence import Sequence, SequenceGroup
from cacheflow.utils import Counter, get_gpu_memory, get_cpu_memory
from fastchat.constants import WORKER_HEART_BEAT_INTERVAL
from fastchat.utils import build_logger, pretty_print_semaphore
GB = 1 << 30
TIMEOUT_TO_PREVENT_DEADLOCK = 1 # seconds
worker_id = str(uuid.uuid4())[:6]
logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
global_counter = 0
seed = torch.cuda.current_device()
def heart_beat_worker(controller):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
controller.send_heart_beat()
class CacheFlowWorker:
def __init__(
self,
controller_addr,
worker_addr,
worker_id,
no_register,
model_path,
model_name,
block_size,
seed,
swap_space,
max_num_batched_tokens,
distributed_init_method,
all_stage_devices,
):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith("/"):
model_path = model_path[:-1]
self.model_name = model_name or model_path.split("/")[-1]
logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
self.block_size = block_size
# FIXME(Hao): we need to pass the tokenizer into cacheflow because we need
# to detect the stopping criteria "###".
self.tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
self.seq_group_counter = Counter()
self.seq_counter = Counter()
# FIXME(Hao): hard code context len
self.context_len = 2048
# pipeline_parallel_size = 1,
# tensor_parallel_size = 1,
# dtype = torch.float16
remote_server_class = Server
self.server = remote_server_class(
model=self.model_name,
model_path=model_path,
pipeline_parallel_size=1,
tensor_parallel_size=1,
block_size=block_size,
dtype=torch.float16,
seed=seed,
swap_space=swap_space,
max_num_batched_tokens=max_num_batched_tokens,
num_nodes=1,
num_devices_per_node=4,
distributed_init_method=distributed_init_method,
all_stage_devices=all_stage_devices,
gpu_memory=get_gpu_memory(),
cpu_memory=get_cpu_memory(),
)
self.running_seq_groups: Dict[int, SequenceGroup] = {}
self.sequence_group_events: Dict[int, asyncio.Event] = {}
self.is_server_running = False
if not no_register:
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {[self.model_name]}. "
f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
f"global_counter: {global_counter}"
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
model_semaphore is None
or model_semaphore._value is None
or model_semaphore._waiters is None
):
return 0
else:
return (
args.limit_model_concurrency
- model_semaphore._value
+ len(model_semaphore._waiters)
)
def get_status(self):
return {
"model_names": [self.model_name],
"speed": 1,
"queue_length": self.get_queue_length(),
}
async def server_step(self):
self.is_server_running = True
updated_seq_groups = self.server.step()
self.is_server_running = False
# Notify the waiting coroutines that there new outputs ready.
for seq_group in updated_seq_groups:
group_id = seq_group.group_id
self.running_seq_groups[group_id] = seq_group
self.sequence_group_events[group_id].set()
async def generate_stream(self, params):
tokenizer = self.tokenizer
context = params["prompt"]
temperature = float(params.get("temperature", 1.0))
max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
stop_str = params.get("stop", None)
echo = params.get("echo", True)
params["stop_token_ids"] = params.get("stop_token_ids", None) or []
input_ids = tokenizer(context).input_ids
max_src_len = self.context_len - max_new_tokens - 8
input_ids = input_ids[-max_src_len:]
# make sampling params in cacheflow
sampling_params = SamplingParams.from_dict(params)
sampling_params.stop_token_ids.add(tokenizer.eos_token_id)
sampling_params.n = 1
sampling_params.max_num_steps = max_new_tokens
sampling_params.temperature = temperature
if stop_str is not None:
sampling_params.stop_str = stop_str
# we might sample multiple sequences, but in chatbot, this is one
seqs: List[Sequence] = []
for _ in range(sampling_params.n):
seq_id = next(self.seq_counter)
seq = Sequence(seq_id, input_ids, block_size=self.block_size)
seqs.append(seq)
arrival_time = time.time()
group_id = next(self.seq_group_counter)
# logger.info(f"Group {group_id} arrives at {time.time()}")
seq_group = SequenceGroup(group_id, seqs, arrival_time)
group_event = asyncio.Event()
self.running_seq_groups[group_id] = seq_group
self.sequence_group_events[group_id] = group_event
self.server.add_sequence_groups([(seq_group, sampling_params)])
while True:
if not self.is_server_running:
await self.server_step()
try:
await asyncio.wait_for(
group_event.wait(), timeout=TIMEOUT_TO_PREVENT_DEADLOCK
)
except:
pass
group_event.clear()
seq_group = self.running_seq_groups[group_id]
all_outputs = []
for seq in seq_group.seqs:
token_ids = seq.get_token_ids()
if not echo:
token_ids = token_ids[len(input_ids):]
output = self.tokenizer.decode(token_ids, skip_special_tokens=True)
if stop_str is not None:
if output.endswith(stop_str):
output = output[: -len(stop_str)]
all_outputs.append(output)
assert len(seq_group.seqs) == 1
ret = {
"text": all_outputs[0],
"error_code": 0,
}
yield (json.dumps(ret) + "\0").encode("utf-8")
if seq_group.is_finished():
del self.running_seq_groups[group_id]
del self.sequence_group_events[group_id]
break
app = FastAPI()
model_semaphore = None
def release_model_semaphore():
model_semaphore.release()
@app.post("/worker_generate_stream")
async def generate_stream(request: Request):
global model_semaphore, global_counter
global_counter += 1
params = await request.json()
if model_semaphore is None:
model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
await model_semaphore.acquire()
background_tasks = BackgroundTasks()
background_tasks.add_task(release_model_semaphore)
# return StreamingResponse(generator, background=background_tasks)
return StreamingResponse(
worker.generate_stream(params), background=background_tasks
)
@app.post("/worker_get_status")
async def get_status(request: Request):
return worker.get_status()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=21002)
parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
parser.add_argument(
"--controller-address", type=str, default="http://localhost:21001"
)
parser.add_argument(
"--model-path", type=str, default="/home/haozhang/weights/hf-llama-7b"
)
parser.add_argument("--model-name", type=str)
parser.add_argument("--limit-model-concurrency", type=int, default=1024)
parser.add_argument("--stream-interval", type=int, default=2)
parser.add_argument("--no-register", action="store_true")
# cacheflow specific params
parser.add_argument(
"--block-size", type=int, default=8, choices=[8, 16], help="token block size"
)
parser.add_argument(
"--swap-space", type=int, default=20, help="CPU swap space size (GiB) per GPU"
)
parser.add_argument(
"--max-num-batched-tokens",
type=int,
default=2560,
help="maximum number of batched tokens",
)
args = parser.parse_args()
(
num_nodes,
num_devices_per_node,
distributed_init_method,
all_stage_devices,
) = initialize_ray_cluster(pipeline_parallel_size=1, tensor_parallel_size=1)
worker = CacheFlowWorker(
args.controller_address,
args.worker_address,
worker_id,
args.no_register,
args.model_path,
args.model_name,
args.block_size,
seed,
args.swap_space,
args.max_num_batched_tokens,
distributed_init_method,
all_stage_devices,
)
uvicorn.run(app, host=args.host, port=args.port, log_level="info") | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/cacheflow_worker.py | cacheflow_worker.py |
import argparse
import asyncio
import json
import os
import time
from typing import List, Union
import threading
import uuid
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
import torch
import uvicorn
from fastchat.constants import WORKER_HEART_BEAT_INTERVAL
from fastchat.utils import build_logger, server_error_msg, pretty_print_semaphore
GB = 1 << 30
worker_id = str(uuid.uuid4())[:6]
logger = build_logger("fake_worker", f"fake_worker_{worker_id}.log")
global_counter = 0
model_semaphore = None
def heart_beat_worker(controller):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
controller.send_heart_beat()
@torch.inference_mode()
def generate_stream():
yield "This is a fake worker."
class ModelWorker:
def __init__(
self,
controller_addr,
worker_addr,
worker_id,
model_path,
model_name,
device,
):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith("/"):
model_path = model_path[:-1]
self.model_name = model_name or model_path.split("/")[-1]
self.device = device
self.generate_stream_func = generate_stream
if True:
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {[self.model_name]}. "
f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
f"global_counter: {global_counter}"
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
model_semaphore is None
or model_semaphore._value is None
or model_semaphore._waiters is None
):
return 0
else:
return (
args.limit_model_concurrency
- model_semaphore._value
+ len(model_semaphore._waiters)
)
def get_status(self):
return {
"model_names": [self.model_name],
"speed": 1,
"queue_length": self.get_queue_length(),
}
def generate_stream_gate(self, params):
for output in self.generate_stream_func():
ret = {
"text": output,
"error_code": 0,
}
yield json.dumps(ret).encode() + b"\0"
app = FastAPI()
def release_model_semaphore():
model_semaphore.release()
@app.post("/worker_generate_stream")
async def api_generate_stream(request: Request):
global model_semaphore, global_counter
global_counter += 1
params = await request.json()
if model_semaphore is None:
model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
await model_semaphore.acquire()
generator = worker.generate_stream_gate(params)
background_tasks = BackgroundTasks()
background_tasks.add_task(release_model_semaphore)
return StreamingResponse(generator, background=background_tasks)
@app.post("/worker_get_status")
async def api_get_status(request: Request):
return worker.get_status()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=21002)
parser.add_argument("--worker-address", type=str, default="http://127.0.0.1:21002")
parser.add_argument(
"--controller-address", type=str, default="http://127.0.0.1:21001"
)
parser.add_argument(
"--model-path",
type=str,
default="fake/fake-worker",
help="The path to the weights",
)
parser.add_argument("--model-name", type=str, help="Optional name")
parser.add_argument(
"--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda"
)
parser.add_argument("--num-gpus", type=int, default=1)
parser.add_argument(
"--gpus",
type=str,
default=None,
help="A single GPU like 1 or multiple GPUs like 0,2"
)
parser.add_argument(
"--max-gpu-memory",
type=str,
help="The maximum memory per gpu. Use a string like '13Gib'",
)
parser.add_argument("--limit-model-concurrency", type=int, default=5)
parser.add_argument("--stream-interval", type=int, default=2)
args = parser.parse_args()
logger.info(f"args: {args}")
worker = ModelWorker(
args.controller_address,
args.worker_address,
worker_id,
args.model_path,
args.model_name,
args.device,
)
uvicorn.run(app, host=args.host, port=args.port, log_level="info") | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/fake_worker.py | fake_worker.py |
import argparse
import asyncio
import dataclasses
import logging
import json
import os
import time
from typing import List, Union
import threading
import uuid
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
try:
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
AutoModel,
)
except ImportError:
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LLaMATokenizer,
AutoModel,
)
import torch
import uvicorn
from fastchat.constants import WORKER_HEART_BEAT_INTERVAL
from fastchat.serve.inference import load_model, generate_stream, add_model_args
from fastchat.serve.serve_chatglm import chatglm_generate_stream
from fastchat.utils import build_logger, server_error_msg, pretty_print_semaphore
GB = 1 << 30
worker_id = str(uuid.uuid4())[:6]
logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
global_counter = 0
model_semaphore = None
def heart_beat_worker(controller):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
controller.send_heart_beat()
class ModelWorker:
def __init__(
self,
controller_addr,
worker_addr,
worker_id,
no_register,
model_path,
model_name,
device,
num_gpus,
max_gpu_memory,
load_8bit=False,
cpu_offloading=False,
):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith("/"):
model_path = model_path[:-1]
self.model_name = model_name or model_path.split("/")[-1]
self.device = device
logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
self.model, self.tokenizer = load_model(
model_path, device, num_gpus, max_gpu_memory, load_8bit, cpu_offloading
)
if hasattr(self.model.config, "max_sequence_length"):
self.context_len = self.model.config.max_sequence_length
elif hasattr(self.model.config, "max_position_embeddings"):
self.context_len = self.model.config.max_position_embeddings
else:
self.context_len = 2048
is_chatglm = "chatglm" in str(type(self.model)).lower()
if is_chatglm:
self.generate_stream_func = chatglm_generate_stream
else:
self.generate_stream_func = generate_stream
if not no_register:
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {[self.model_name]}. "
f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
f"global_counter: {global_counter}"
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
model_semaphore is None
or model_semaphore._value is None
or model_semaphore._waiters is None
):
return 0
else:
return (
args.limit_model_concurrency
- model_semaphore._value
+ len(model_semaphore._waiters)
)
def get_status(self):
return {
"model_names": [self.model_name],
"speed": 1,
"queue_length": self.get_queue_length(),
}
def generate_stream_gate(self, params):
try:
for output in self.generate_stream_func(
self.model,
self.tokenizer,
params,
self.device,
self.context_len,
args.stream_interval,
):
ret = {
"text": output,
"error_code": 0,
}
yield json.dumps(ret).encode() + b"\0"
except torch.cuda.OutOfMemoryError:
ret = {
"text": server_error_msg,
"error_code": 1,
}
yield json.dumps(ret).encode() + b"\0"
app = FastAPI()
def release_model_semaphore():
model_semaphore.release()
@app.post("/worker_generate_stream")
async def api_generate_stream(request: Request):
global model_semaphore, global_counter
global_counter += 1
params = await request.json()
if model_semaphore is None:
model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
await model_semaphore.acquire()
generator = worker.generate_stream_gate(params)
background_tasks = BackgroundTasks()
background_tasks.add_task(release_model_semaphore)
return StreamingResponse(generator, background=background_tasks)
@app.post("/worker_get_status")
async def api_get_status(request: Request):
return worker.get_status()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=21002)
parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
parser.add_argument(
"--controller-address", type=str, default="http://localhost:21001"
)
add_model_args(parser)
parser.add_argument("--model-name", type=str, help="Optional display name")
parser.add_argument("--limit-model-concurrency", type=int, default=5)
parser.add_argument("--stream-interval", type=int, default=2)
parser.add_argument("--no-register", action="store_true")
args = parser.parse_args()
logger.info(f"args: {args}")
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!")
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
worker = ModelWorker(
args.controller_address,
args.worker_address,
worker_id,
args.no_register,
args.model_path,
args.model_name,
args.device,
args.num_gpus,
args.max_gpu_memory,
args.load_8bit,
args.cpu_offloading,
)
uvicorn.run(app, host=args.host, port=args.port, log_level="info") | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/model_worker.py | model_worker.py |
code_highlight_css = """
#chatbot .hll { background-color: #ffffcc }
#chatbot .c { color: #408080; font-style: italic }
#chatbot .err { border: 1px solid #FF0000 }
#chatbot .k { color: #008000; font-weight: bold }
#chatbot .o { color: #666666 }
#chatbot .ch { color: #408080; font-style: italic }
#chatbot .cm { color: #408080; font-style: italic }
#chatbot .cp { color: #BC7A00 }
#chatbot .cpf { color: #408080; font-style: italic }
#chatbot .c1 { color: #408080; font-style: italic }
#chatbot .cs { color: #408080; font-style: italic }
#chatbot .gd { color: #A00000 }
#chatbot .ge { font-style: italic }
#chatbot .gr { color: #FF0000 }
#chatbot .gh { color: #000080; font-weight: bold }
#chatbot .gi { color: #00A000 }
#chatbot .go { color: #888888 }
#chatbot .gp { color: #000080; font-weight: bold }
#chatbot .gs { font-weight: bold }
#chatbot .gu { color: #800080; font-weight: bold }
#chatbot .gt { color: #0044DD }
#chatbot .kc { color: #008000; font-weight: bold }
#chatbot .kd { color: #008000; font-weight: bold }
#chatbot .kn { color: #008000; font-weight: bold }
#chatbot .kp { color: #008000 }
#chatbot .kr { color: #008000; font-weight: bold }
#chatbot .kt { color: #B00040 }
#chatbot .m { color: #666666 }
#chatbot .s { color: #BA2121 }
#chatbot .na { color: #7D9029 }
#chatbot .nb { color: #008000 }
#chatbot .nc { color: #0000FF; font-weight: bold }
#chatbot .no { color: #880000 }
#chatbot .nd { color: #AA22FF }
#chatbot .ni { color: #999999; font-weight: bold }
#chatbot .ne { color: #D2413A; font-weight: bold }
#chatbot .nf { color: #0000FF }
#chatbot .nl { color: #A0A000 }
#chatbot .nn { color: #0000FF; font-weight: bold }
#chatbot .nt { color: #008000; font-weight: bold }
#chatbot .nv { color: #19177C }
#chatbot .ow { color: #AA22FF; font-weight: bold }
#chatbot .w { color: #bbbbbb }
#chatbot .mb { color: #666666 }
#chatbot .mf { color: #666666 }
#chatbot .mh { color: #666666 }
#chatbot .mi { color: #666666 }
#chatbot .mo { color: #666666 }
#chatbot .sa { color: #BA2121 }
#chatbot .sb { color: #BA2121 }
#chatbot .sc { color: #BA2121 }
#chatbot .dl { color: #BA2121 }
#chatbot .sd { color: #BA2121; font-style: italic }
#chatbot .s2 { color: #BA2121 }
#chatbot .se { color: #BB6622; font-weight: bold }
#chatbot .sh { color: #BA2121 }
#chatbot .si { color: #BB6688; font-weight: bold }
#chatbot .sx { color: #008000 }
#chatbot .sr { color: #BB6688 }
#chatbot .s1 { color: #BA2121 }
#chatbot .ss { color: #19177C }
#chatbot .bp { color: #008000 }
#chatbot .fm { color: #0000FF }
#chatbot .vc { color: #19177C }
#chatbot .vg { color: #19177C }
#chatbot .vi { color: #19177C }
#chatbot .vm { color: #19177C }
#chatbot .il { color: #666666 }
"""
# .highlight { background: #f8f8f8; } | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gradio_css.py | gradio_css.py |
import argparse
import asyncio
import dataclasses
from enum import Enum, auto
import json
import logging
import time
from typing import List, Union
import threading
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import numpy as np
import requests
import uvicorn
from fastchat.constants import CONTROLLER_HEART_BEAT_EXPIRATION
from fastchat.utils import build_logger, server_error_msg
logger = build_logger("controller", "controller.log")
class DispatchMethod(Enum):
LOTTERY = auto()
SHORTEST_QUEUE = auto()
@classmethod
def from_str(cls, name):
if name == "lottery":
return cls.LOTTERY
elif name == "shortest_queue":
return cls.SHORTEST_QUEUE
else:
raise ValueError(f"Invalid dispatch method")
@dataclasses.dataclass
class WorkerInfo:
model_names: List[str]
speed: int
queue_length: int
check_heart_beat: bool
last_heart_beat: str
def heart_beat_controller(controller):
while True:
time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
controller.remove_stable_workers_by_expiration()
class Controller:
def __init__(self, dispatch_method: str):
# Dict[str -> WorkerInfo]
self.worker_info = {}
self.dispatch_method = DispatchMethod.from_str(dispatch_method)
self.heart_beat_thread = threading.Thread(
target=heart_beat_controller, args=(self,)
)
self.heart_beat_thread.start()
logger.info("Init controller")
def register_worker(
self, worker_name: str, check_heart_beat: bool, worker_status: dict
):
if worker_name not in self.worker_info:
logger.info(f"Register a new worker: {worker_name}")
else:
logger.info(f"Register an existing worker: {worker_name}")
if not worker_status:
worker_status = self.get_worker_status(worker_name)
if not worker_status:
return False
self.worker_info[worker_name] = WorkerInfo(
worker_status["model_names"],
worker_status["speed"],
worker_status["queue_length"],
check_heart_beat,
time.time(),
)
logger.info(f"Register done: {worker_name}, {worker_status}")
return True
def get_worker_status(self, worker_name: str):
try:
r = requests.post(worker_name + "/worker_get_status", timeout=5)
except requests.exceptions.RequestException as e:
logger.error(f"Get status fails: {worker_name}, {e}")
return None
if r.status_code != 200:
logger.error(f"Get status fails: {worker_name}, {r}")
return None
return r.json()
def remove_worker(self, worker_name: str):
del self.worker_info[worker_name]
def refresh_all_workers(self):
old_info = dict(self.worker_info)
self.worker_info = {}
for w_name, w_info in old_info.items():
if not self.register_worker(w_name, w_info.check_heart_beat, None):
logger.info(f"Remove stale worker: {w_name}")
def list_models(self):
model_names = set()
for w_name, w_info in self.worker_info.items():
model_names.update(w_info.model_names)
return list(model_names)
def get_worker_address(self, model_name: str):
if self.dispatch_method == DispatchMethod.LOTTERY:
worker_names = []
worker_speeds = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_speeds.append(w_info.speed)
worker_speeds = np.array(worker_speeds, dtype=np.float32)
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
if True: # Directly return address
pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
worker_name = worker_names[pt]
return worker_name
# Check status before returning
while True:
pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
worker_name = worker_names[pt]
if self.get_worker_status(worker_name):
break
else:
self.remove_worker(worker_name)
worker_speeds[pt] = 0
norm = np.sum(worker_speeds)
if norm < 1e-4:
return ""
worker_speeds = worker_speeds / norm
continue
return worker_name
elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
worker_names = []
worker_qlen = []
for w_name, w_info in self.worker_info.items():
if model_name in w_info.model_names:
worker_names.append(w_name)
worker_qlen.append(w_info.queue_length / w_info.speed)
if len(worker_names) == 0:
return ""
min_index = np.argmin(worker_qlen)
w_name = worker_names[min_index]
self.worker_info[w_name].queue_length += 1
logger.info(
f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}"
)
return w_name
else:
raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
def receive_heart_beat(self, worker_name: str, queue_length: int):
if worker_name not in self.worker_info:
logger.info(f"Receive unknown heart beat. {worker_name}")
return False
self.worker_info[worker_name].queue_length = queue_length
self.worker_info[worker_name].last_heart_beat = time.time()
logger.info(f"Receive heart beat. {worker_name}")
return True
def remove_stable_workers_by_expiration(self):
expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
to_delete = []
for worker_name, w_info in self.worker_info.items():
if w_info.check_heart_beat and w_info.last_heart_beat < expire:
to_delete.append(worker_name)
for worker_name in to_delete:
self.remove_worker(worker_name)
def worker_api_generate_stream(self, params):
worker_addr = self.get_worker_address(params["model"])
if not worker_addr:
logger.info(f"no worker: {params['model']}")
ret = {
"text": server_error_msg,
"error_code": 2,
}
yield json.dumps(ret).encode() + b"\0"
try:
response = requests.post(
worker_addr + "/worker_generate_stream",
json=params,
stream=True,
timeout=15,
)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
if chunk:
yield chunk + b"\0"
except requests.exceptions.RequestException as e:
logger.info(f"worker timeout: {worker_addr}")
ret = {
"text": server_error_msg,
"error_code": 3,
}
yield json.dumps(ret).encode() + b"\0"
# Let the controller act as a worker to achieve hierarchical
# management. This can be used to connect isolated sub networks.
def worker_api_get_status(self):
model_names = set()
speed = 0
queue_length = 0
for w_name in self.worker_info:
worker_status = self.get_worker_status(w_name)
if worker_status is not None:
model_names.update(worker_status["model_names"])
speed += worker_status["speed"]
queue_length += worker_status["queue_length"]
return {
"model_names": list(model_names),
"speed": speed,
"queue_length": queue_length,
}
app = FastAPI()
@app.post("/register_worker")
async def register_worker(request: Request):
data = await request.json()
controller.register_worker(
data["worker_name"], data["check_heart_beat"], data.get("worker_status", None)
)
@app.post("/refresh_all_workers")
async def refresh_all_workers():
models = controller.refresh_all_workers()
@app.post("/list_models")
async def list_models():
models = controller.list_models()
return {"models": models}
@app.post("/get_worker_address")
async def get_worker_address(request: Request):
data = await request.json()
addr = controller.get_worker_address(data["model"])
return {"address": addr}
@app.post("/receive_heart_beat")
async def receive_heart_beat(request: Request):
data = await request.json()
exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"])
return {"exist": exist}
@app.post("/worker_generate_stream")
async def worker_api_generate_stream(request: Request):
params = await request.json()
generator = controller.worker_api_generate_stream(params)
return StreamingResponse(generator)
@app.post("/worker_get_status")
async def worker_api_get_status(request: Request):
return controller.worker_api_get_status()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=21001)
parser.add_argument(
"--dispatch-method",
type=str,
choices=["lottery", "shortest_queue"],
default="shortest_queue",
)
args = parser.parse_args()
logger.info(f"args: {args}")
controller = Controller(args.dispatch_method)
uvicorn.run(app, host=args.host, port=args.port, log_level="info") | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/controller.py | controller.py |
from __future__ import annotations
from gradio.components import *
from markdown2 import Markdown
import nh3
class _Keywords(Enum):
NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
@document("style")
class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
"""
Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
Preprocessing: this component does *not* accept input.
Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
Demos: chatbot_simple, chatbot_multimodal
"""
def __init__(
self,
value: List[Tuple[str | None, str | None]] | Callable | None = None,
color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
*,
label: str | None = None,
every: float | None = None,
show_label: bool = True,
visible: bool = True,
elem_id: str | None = None,
elem_classes: List[str] | str | None = None,
**kwargs,
):
"""
Parameters:
value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
label: component name in interface.
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
show_label: if True, will display label.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
if color_map is not None:
warnings.warn(
"The 'color_map' parameter has been deprecated.",
)
# self.md = utils.get_markdown_parser()
self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
self.select: EventListenerMethod
"""
Event listener for when the user selects message from Chatbot.
Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
See EventData documentation on how to use this event data.
"""
IOComponent.__init__(
self,
label=label,
every=every,
show_label=show_label,
visible=visible,
elem_id=elem_id,
elem_classes=elem_classes,
value=value,
**kwargs,
)
def get_config(self):
return {
"value": self.value,
"selectable": self.selectable,
**IOComponent.get_config(self),
}
@staticmethod
def update(
value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
label: str | None = None,
show_label: bool | None = None,
visible: bool | None = None,
):
updated_config = {
"label": label,
"show_label": show_label,
"visible": visible,
"value": value,
"__type__": "update",
}
return updated_config
def _process_chat_messages(
self, chat_message: str | Tuple | List | Dict | None
) -> str | Dict | None:
if chat_message is None:
return None
elif isinstance(chat_message, (tuple, list)):
mime_type = processing_utils.get_mimetype(chat_message[0])
return {
"name": chat_message[0],
"mime_type": mime_type,
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
"data": None, # These last two fields are filled in by the frontend
"is_file": True,
}
elif isinstance(
chat_message, dict
): # This happens for previously processed messages
return chat_message
elif isinstance(chat_message, str):
# return self.md.render(chat_message)
return str(self.md.convert(chat_message))
else:
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
def postprocess(
self,
y: List[
Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]
],
) -> List[Tuple[str | Dict | None, str | Dict | None]]:
"""
Parameters:
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
Returns:
List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
"""
if y is None:
return []
processed_messages = []
for message_pair in y:
assert isinstance(
message_pair, (tuple, list)
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
assert (
len(message_pair) == 2
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
processed_messages.append(
(
# self._process_chat_messages(message_pair[0]),
'<pre style="font-family: var(--font)">'
+ nh3.clean(message_pair[0])
+ "</pre>",
self._process_chat_messages(message_pair[1]),
)
)
return processed_messages
def style(self, height: int | None = None, **kwargs):
"""
This method can be used to change the appearance of the Chatbot component.
"""
if height is not None:
self._style["height"] = height
if kwargs.get("color_map") is not None:
warnings.warn("The 'color_map' parameter has been deprecated.")
Component.style(
self,
**kwargs,
)
return self | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gradio_patch.py | gradio_patch.py |
import argparse
import gradio as gr
from fastchat.utils import build_logger
from fastchat.serve.gradio_patch import Chatbot as grChatbot
from fastchat.serve.gradio_web_server import (
set_global_vars,
get_window_url_params,
block_css,
build_single_model_ui,
get_model_list,
load_demo_single,
)
from fastchat.serve.gradio_block_arena_anony import (build_side_by_side_ui_anony,
load_demo_side_by_side_anony, set_global_vars_anony)
from fastchat.serve.gradio_block_arena_named import (build_side_by_side_ui_named,
load_demo_side_by_side_named, set_global_vars_named)
logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log")
def load_demo(url_params, request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
selected = 0
if "arena" in url_params:
selected = 1
elif "compare" in url_params:
selected = 2
single_updates = load_demo_single(models, url_params)
side_by_side_anony_updates = load_demo_side_by_side_anony(models, url_params)
side_by_side_named_updates = load_demo_side_by_side_named(models, url_params)
return ((gr.Tabs.update(selected=selected),) + single_updates +
side_by_side_anony_updates + side_by_side_named_updates)
def build_demo(models):
with gr.Blocks(
title="Chat with Open Large Language Models",
theme=gr.themes.Base(),
css=block_css,
) as demo:
with gr.Tabs() as tabs:
with gr.Tab("Single Model", id=0):
(
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
) = build_single_model_ui(models)
a_list = [
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
]
with gr.Tab("Chatbot Arena (battle)", id=1):
(
b_states,
b_model_selectors,
b_chatbots,
b_textbox,
b_send_btn,
b_button_row,
b_button_row2,
b_parameter_row,
) = build_side_by_side_ui_anony(models)
b_list = (
b_states
+ b_model_selectors
+ b_chatbots
+ [
b_textbox,
b_send_btn,
b_button_row,
b_button_row2,
b_parameter_row,
]
)
with gr.Tab("Chatbot Arena (side-by-side)", id=2):
(
c_states,
c_model_selectors,
c_chatbots,
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
) = build_side_by_side_ui_named(models)
c_list = (
c_states
+ c_model_selectors
+ c_chatbots
+ [
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
]
)
url_params = gr.JSON(visible=False)
if args.model_list_mode == "once":
demo.load(
load_demo,
[url_params],
[tabs] + a_list + b_list + c_list,
_js=get_window_url_params,
)
else:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int)
parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
parser.add_argument("--concurrency-count", type=int, default=10)
parser.add_argument(
"--model-list-mode", type=str, default="once", choices=["once", "reload"]
)
parser.add_argument("--share", action="store_true")
parser.add_argument(
"--moderate", action="store_true", help="Enable content moderation"
)
args = parser.parse_args()
logger.info(f"args: {args}")
set_global_vars(args.controller_url, args.moderate)
set_global_vars_named(args.moderate)
set_global_vars_anony(args.moderate)
models = get_model_list(args.controller_url)
logger.info(args)
demo = build_demo(models)
demo.queue(
concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False
).launch(
server_name=args.host, server_port=args.port, share=args.share, max_threads=200
) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gradio_web_server_multi.py | gradio_web_server_multi.py |
import argparse
from collections import defaultdict
import datetime
import json
import os
import time
import uuid
import gradio as gr
import requests
from fastchat.conversation import (
get_default_conv_template,
SeparatorStyle,
)
from fastchat.constants import LOGDIR
from fastchat.utils import (
build_logger,
server_error_msg,
violates_moderation,
moderation_msg,
)
from fastchat.serve.gradio_patch import Chatbot as grChatbot
from fastchat.serve.gradio_css import code_highlight_css
logger = build_logger("gradio_web_server", "gradio_web_server.log")
headers = {"User-Agent": "fastchat Client"}
no_change_btn = gr.Button.update()
enable_btn = gr.Button.update(interactive=True)
disable_btn = gr.Button.update(interactive=False)
controller_url = None
enable_moderation = False
priority = {
"vicuna-13b": "aaa",
"koala-13b": "aab",
"fastchat-t5-3b": "aac",
"oasst-pythia-12b": "aad",
"chatglm-6b": "aae",
"stablelm-tuned-alpha-7b": "aaf",
"alpaca-13b": "aag",
"llama-13b": "aah",
"dolly-v2-12b": "aai",
}
model_description_md = """
| | |
| ---- | ---- |
| [Vicuna](https://vicuna.lmsys.org): a chat assistant fine-tuned from LLaMA on user-shared conversations by LMSYS. | [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/): a dialogue model for academic research by BAIR |
| [FastChat-T5](https://huggingface.co/lmsys/fastchat-t5-3b-v1.0): a chat assistant fine-tuned from FLAN-T5 by LMSYS. | [OpenAssistant (oasst)](https://open-assistant.io/): a chat-based assistant for everyone by LAION. |
| [ChatGLM](https://chatglm.cn/blog): an open bilingual dialogue language model by Tsinghua University | [StableLM](https://github.com/stability-AI/stableLM/): Stability AI language models. |
| [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html): a model fine-tuned from LLaMA on instruction-following demonstrations by Stanford. | [LLaMA](https://arxiv.org/abs/2302.13971): open and efficient foundation language models by Meta. |
| [Dolly](https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm): an instruction-tuned open large language model by Databricks. | |
"""
learn_more_md = """
### License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
"""
def set_global_vars(controller_url_, enable_moderation_):
global controller_url, enable_moderation
controller_url = controller_url_
enable_moderation = enable_moderation_
def get_conv_log_filename():
t = datetime.datetime.now()
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
return name
def get_model_list(controller_url):
ret = requests.post(controller_url + "/refresh_all_workers")
assert ret.status_code == 200
ret = requests.post(controller_url + "/list_models")
models = ret.json()["models"]
models.sort(key=lambda x: priority.get(x, x))
logger.info(f"Models: {models}")
return models
get_window_url_params = """
function() {
const params = new URLSearchParams(window.location.search);
url_params = Object.fromEntries(params);
console.log("url_params", url_params);
return url_params;
}
"""
def load_demo_single(models, url_params):
dropdown_update = gr.Dropdown.update(visible=True)
if "model" in url_params:
model = url_params["model"]
if model in models:
dropdown_update = gr.Dropdown.update(value=model, visible=True)
state = None
return (
state,
dropdown_update,
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def load_demo(url_params, request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
return load_demo_single(models, url_params)
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(time.time(), 4),
"type": vote_type,
"model": model_selector,
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
def upvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"upvote. ip: {request.client.host}")
vote_last_response(state, "upvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def downvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"downvote. ip: {request.client.host}")
vote_last_response(state, "downvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def flag_last_response(state, model_selector, request: gr.Request):
logger.info(f"flag. ip: {request.client.host}")
vote_last_response(state, "flag", model_selector, request)
return ("",) + (disable_btn,) * 3
def regenerate(state, request: gr.Request):
logger.info(f"regenerate. ip: {request.client.host}")
state.messages[-1][-1] = None
state.skip_next = False
return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
def clear_history(request: gr.Request):
logger.info(f"clear_history. ip: {request.client.host}")
state = None
return (state, [], "") + (disable_btn,) * 5
def add_text(state, text, request: gr.Request):
logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
if state is None:
state = get_default_conv_template("vicuna").copy()
if len(text) <= 0:
state.skip_next = True
return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5
if enable_moderation:
flagged = violates_moderation(text)
if flagged:
logger.info(f"violate moderation. ip: {request.client.host}. text: {text}")
state.skip_next = True
return (state, state.to_gradio_chatbot(), moderation_msg) + (
no_change_btn,
) * 5
text = text[:1536] # Hard cut-off
state.append_message(state.roles[0], text)
state.append_message(state.roles[1], None)
state.skip_next = False
return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
def post_process_code(code):
sep = "\n```"
if sep in code:
blocks = code.split(sep)
if len(blocks) % 2 == 1:
for i in range(1, len(blocks), 2):
blocks[i] = blocks[i].replace("\\_", "_")
code = sep.join(blocks)
return code
def http_bot(state, model_selector, temperature, max_new_tokens, request: gr.Request):
logger.info(f"http_bot. ip: {request.client.host}")
start_tstamp = time.time()
model_name = model_selector
temperature = float(temperature)
max_new_tokens = int(max_new_tokens)
if state.skip_next:
# This generate call is skipped due to invalid inputs
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
return
if len(state.messages) == state.offset + 2:
# First round of conversation
new_state = get_default_conv_template(model_name).copy()
new_state.conv_id = uuid.uuid4().hex
new_state.model_name = state.model_name or model_selector
new_state.append_message(new_state.roles[0], state.messages[-2][1])
new_state.append_message(new_state.roles[1], None)
state = new_state
# Query worker address
ret = requests.post(
controller_url + "/get_worker_address", json={"model": model_name}
)
worker_addr = ret.json()["address"]
logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
# No available worker
if worker_addr == "":
state.messages[-1][-1] = server_error_msg
yield (
state,
state.to_gradio_chatbot(),
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
# Construct prompt
conv = state
if "chatglm" in model_name:
prompt = conv.messages[conv.offset :]
else:
prompt = conv.get_prompt()
# Make requests
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"max_new_tokens": max_new_tokens,
"stop": conv.stop_str,
"stop_token_ids": conv.stop_token_ids,
"echo": False,
}
logger.info(f"==== request ====\n{gen_params}")
state.messages[-1][-1] = "▌"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
try:
# Stream output
response = requests.post(
worker_addr + "/worker_generate_stream",
headers=headers,
json=gen_params,
stream=True,
timeout=20,
)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
if chunk:
data = json.loads(chunk.decode())
if data["error_code"] == 0:
output = data["text"].strip()
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
else:
output = data["text"] + f" (error_code: {data['error_code']})"
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.02)
except requests.exceptions.RequestException as e:
state.messages[-1][-1] = server_error_msg + f" (error_code: 4)"
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
finish_tstamp = time.time()
logger.info(f"{output}")
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(finish_tstamp, 4),
"type": "chat",
"model": model_name,
"gen_params": {
"temperature": temperature,
"max_new_tokens": max_new_tokens,
},
"start": round(start_tstamp, 4),
"finish": round(start_tstamp, 4),
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
block_css = (
code_highlight_css
+ """
pre {
white-space: pre-wrap; /* Since CSS 2.1 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
}
#notice_markdown th {
display: none;
}
"""
)
def build_single_model_ui(models):
notice_markdown = ("""
# 🏔️ Chat with Open Large Language Models
- Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality. [[Blog post]](https://vicuna.lmsys.org) [[Evaluation]](https://vicuna.lmsys.org/eval/)
- Koala: A Dialogue Model for Academic Research. [[Blog post]](https://bair.berkeley.edu/blog/2023/04/03/koala/)
- [[GitHub]](https://github.com/lm-sys/FastChat) [[Twitter]](https://twitter.com/lmsysorg) [[Discord]](https://discord.gg/h6kCZb72G7)
### Terms of use
By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data for future research.**
### Choose a model to chat with
""" + model_description_md)
state = gr.State()
notice = gr.Markdown(notice_markdown, elem_id="notice_markdown")
with gr.Row(elem_id="model_selector_row"):
model_selector = gr.Dropdown(
choices=models,
value=models[0] if len(models) > 0 else "",
interactive=True,
show_label=False,
).style(container=False)
chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=550)
with gr.Row():
with gr.Column(scale=20):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter text and press ENTER",
visible=False,
).style(container=False)
with gr.Column(scale=1, min_width=50):
send_btn = gr.Button(value="Send", visible=False)
with gr.Row(visible=False) as button_row:
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
# stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
interactive=True,
label="Temperature",
)
max_output_tokens = gr.Slider(
minimum=0,
maximum=1024,
value=512,
step=64,
interactive=True,
label="Max output tokens",
)
gr.Markdown(learn_more_md)
# Register listeners
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
upvote_btn.click(
upvote_last_response,
[state, model_selector],
[textbox, upvote_btn, downvote_btn, flag_btn],
)
downvote_btn.click(
downvote_last_response,
[state, model_selector],
[textbox, upvote_btn, downvote_btn, flag_btn],
)
flag_btn.click(
flag_last_response,
[state, model_selector],
[textbox, upvote_btn, downvote_btn, flag_btn],
)
regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then(
http_bot,
[state, model_selector, temperature, max_output_tokens],
[state, chatbot] + btn_list,
)
clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list)
model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list)
textbox.submit(
add_text, [state, textbox], [state, chatbot, textbox] + btn_list
).then(
http_bot,
[state, model_selector, temperature, max_output_tokens],
[state, chatbot] + btn_list,
)
send_btn.click(
add_text, [state, textbox], [state, chatbot, textbox] + btn_list
).then(
http_bot,
[state, model_selector, temperature, max_output_tokens],
[state, chatbot] + btn_list,
)
return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row
def build_demo(models):
with gr.Blocks(
title="Chat with Open Large Language Models",
theme=gr.themes.Base(),
css=block_css,
) as demo:
url_params = gr.JSON(visible=False)
(
state,
model_selector,
chatbot,
textbox,
send_btn,
button_row,
parameter_row,
) = build_single_model_ui(models)
if args.model_list_mode == "once":
demo.load(
load_demo,
[url_params],
[
state,
model_selector,
chatbot,
textbox,
send_btn,
button_row,
parameter_row,
],
_js=get_window_url_params,
)
else:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int)
parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
parser.add_argument("--concurrency-count", type=int, default=10)
parser.add_argument(
"--model-list-mode", type=str, default="once", choices=["once", "reload"]
)
parser.add_argument("--share", action="store_true")
parser.add_argument(
"--moderate", action="store_true", help="Enable content moderation"
)
args = parser.parse_args()
logger.info(f"args: {args}")
set_global_vars(args.controller_url, args.moderate)
models = get_model_list(args.controller_url)
logger.info(args)
demo = build_demo(models)
demo.queue(
concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False
).launch(
server_name=args.host, server_port=args.port, share=args.share, max_threads=200
) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gradio_web_server.py | gradio_web_server.py |
import asyncio
from typing import Union, Dict, List, Any
import argparse
import json
import logging
import fastapi
from fastapi.middleware.cors import CORSMiddleware
import httpx
import uvicorn
from pydantic import BaseSettings
from fastchat.protocol.chat_completion import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatMessage,
ChatCompletionResponseChoice,
)
from fastchat.conversation import get_default_conv_template, SeparatorStyle
logger = logging.getLogger(__name__)
class AppSettings(BaseSettings):
# The address of the model controller.
FASTCHAT_CONTROLLER_URL: str = "http://localhost:21001"
app_settings = AppSettings()
app = fastapi.FastAPI()
headers = {"User-Agent": "FastChat API Server"}
@app.get("/v1/models")
async def show_available_models():
controller_url = app_settings.FASTCHAT_CONTROLLER_URL
async with httpx.AsyncClient() as client:
ret = await client.post(controller_url + "/refresh_all_workers")
ret = await client.post(controller_url + "/list_models")
models = ret.json()["models"]
models.sort()
return {"data": [{"id": m} for m in models], "object": "list"}
@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest):
"""Creates a completion for the chat message"""
gen_params = get_gen_params(
request.model,
request.messages,
temperature=request.temperature,
max_tokens=request.max_tokens,
echo=False,
stop=request.stop,
)
choices = []
# TODO: batch the requests. maybe not necessary if using CacheFlow worker
chat_completions = []
for i in range(request.n):
content = asyncio.create_task(chat_completion(request.model, gen_params))
chat_completions.append(content)
for i, content_task in enumerate(chat_completions):
content = await content_task
choices.append(
ChatCompletionResponseChoice(
index=i,
message=ChatMessage(role="assistant", content=content),
# TODO: support other finish_reason
finish_reason="stop",
)
)
# TODO: support usage field
# "usage": {
# "prompt_tokens": 9,
# "completion_tokens": 12,
# "total_tokens": 21
# }
return ChatCompletionResponse(choices=choices)
def get_gen_params(
model_name: str,
messages: List[Dict[str, str]],
*,
temperature: float,
max_tokens: int,
echo: bool,
stop: Union[str, None],
):
is_chatglm = "chatglm" in model_name.lower()
# TODO(suquark): The template is currently a reference. Here we have to make a copy.
conv = get_default_conv_template(model_name).copy()
for message in messages:
msg_role = message["role"]
if msg_role == "system":
conv.system = message["content"]
elif msg_role == "user":
conv.append_message(conv.roles[0], message["content"])
elif msg_role == "assistant":
conv.append_message(conv.roles[1], message["content"])
else:
raise ValueError(f"Unknown role: {msg_role}")
# Add a blank message for the assistant.
conv.append_message(conv.roles[1], None)
if is_chatglm:
prompt = conv.messages[conv.offset :]
else:
prompt = conv.get_prompt()
if max_tokens is None:
max_tokens = 512
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"max_new_tokens": max_tokens,
"echo": echo,
"stop": conv.stop_str,
"stop_token_ids": conv.stop_token_ids,
}
logger.debug(f"==== request ====\n{gen_params}")
return gen_params
async def chat_completion(model_name: str, gen_params: Dict[str, Any]):
controller_url = app_settings.FASTCHAT_CONTROLLER_URL
async with httpx.AsyncClient() as client:
ret = await client.post(
controller_url + "/get_worker_address", json={"model": model_name}
)
worker_addr = ret.json()["address"]
# No available worker
if worker_addr == "":
raise ValueError(f"No available worker for {model_name}")
logger.debug(f"model_name: {model_name}, worker_addr: {worker_addr}")
output = ""
delimiter = b"\0"
async with client.stream(
"POST",
worker_addr + "/worker_generate_stream",
headers=headers,
json=gen_params,
timeout=20,
) as response:
content = await response.aread()
for chunk in content.split(delimiter):
if not chunk:
continue
data = json.loads(chunk.decode())
if data["error_code"] == 0:
output = data["text"].strip()
return output
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="FastChat ChatGPT-compatible Restful API server."
)
parser.add_argument("--host", type=str, default="localhost", help="host name")
parser.add_argument("--port", type=int, default=8000, help="port number")
parser.add_argument("--allow-credentials", action="store_true", help="allow credentials")
parser.add_argument("--allowed-origins", type=json.loads, default=["*"], help="allowed origins")
parser.add_argument("--allowed-methods", type=json.loads, default=["*"], help="allowed methods")
parser.add_argument("--allowed-headers", type=json.loads, default=["*"], help="allowed headers")
args = parser.parse_args()
app.add_middleware(
CORSMiddleware,
allow_origins=args.allowed_origins,
allow_credentials=args.allow_credentials,
allow_methods=args.allowed_methods,
allow_headers=args.allowed_headers,
)
logger.debug(f"==== args ====\n{args}")
uvicorn.run("fastchat.serve.api:app", host=args.host, port=args.port, reload=True) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/api.py | api.py |
import json
import time
import gradio as gr
import numpy as np
from fastchat.conversation import get_default_conv_template
from fastchat.utils import (
build_logger,
violates_moderation,
moderation_msg,
)
from fastchat.serve.gradio_patch import Chatbot as grChatbot
from fastchat.serve.gradio_web_server import (
http_bot,
get_conv_log_filename,
no_change_btn,
enable_btn,
disable_btn,
model_description_md,
learn_more_md,
)
logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log")
num_models = 2
enable_moderation = False
def set_global_vars_named(enable_moderation_):
global enable_moderation
enable_moderation = enable_moderation_
def load_demo_side_by_side_named(models, url_params):
states = (None,) * num_models
model_left = models[0]
if len(models) > 1:
weights = ([1, 1, 1, 1] + [1] * 32)[:len(models) - 1]
weights = weights / np.sum(weights)
model_right = np.random.choice(models[1:], p=weights)
else:
model_right = model_left
selector_updates = (
gr.Dropdown.update(model_left, visible=True),
gr.Dropdown.update(model_right, visible=True),
)
return (
states
+ selector_updates
+ (gr.Chatbot.update(visible=True),) * num_models
+ (
gr.Textbox.update(visible=True),
gr.Box.update(visible=True),
gr.Row.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
)
def vote_last_response(states, vote_type, model_selectors, request: gr.Request):
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(time.time(), 4),
"type": vote_type,
"models": [x for x in model_selectors],
"states": [x.dict() for x in states],
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
def leftvote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"leftvote (named). ip: {request.client.host}")
vote_last_response(
[state0, state1], "leftvote", [model_selector0, model_selector1], request
)
return ("",) + (disable_btn,) * 4
def rightvote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"rightvote (named). ip: {request.client.host}")
vote_last_response(
[state0, state1], "rightvote", [model_selector0, model_selector1], request
)
return ("",) + (disable_btn,) * 4
def tievote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"tievote (named). ip: {request.client.host}")
vote_last_response(
[state0, state1], "tievote", [model_selector0, model_selector1], request
)
return ("",) + (disable_btn,) * 4
def bothbad_vote_last_response(
state0, state1, model_selector0, model_selector1, request: gr.Request
):
logger.info(f"bothbad_vote (named). ip: {request.client.host}")
vote_last_response(
[state0, state1], "bothbad_vote", [model_selector0, model_selector1], request
)
return ("",) + (disable_btn,) * 4
def regenerate(state0, state1, request: gr.Request):
logger.info(f"regenerate (named). ip: {request.client.host}")
states = [state0, state1]
for i in range(num_models):
states[i].messages[-1][-1] = None
states[i].skip_next = False
return states + [x.to_gradio_chatbot() for x in states] + [""] + [disable_btn] * 6
def clear_history(request: gr.Request):
logger.info(f"clear_history (named). ip: {request.client.host}")
return [None] * num_models + [None] * num_models + [""] + [disable_btn] * 6
def share_click(state0, state1, model_selector0, model_selector1,
request: gr.Request):
logger.info(f"share (named). ip: {request.client.host}")
if state0 is not None and state1 is not None:
vote_last_response(
[state0, state1], "share", [model_selector0, model_selector1], request
)
def add_text(state0, state1, text, request: gr.Request):
logger.info(f"add_text (named). ip: {request.client.host}. len: {len(text)}")
states = [state0, state1]
for i in range(num_models):
if states[i] is None:
states[i] = get_default_conv_template("vicuna").copy()
if len(text) <= 0:
for i in range(num_models):
states[i].skip_next = True
return (
states
+ [x.to_gradio_chatbot() for x in states]
+ [""]
+ [
no_change_btn,
]
* 6
)
if enable_moderation:
flagged = violates_moderation(text)
if flagged:
logger.info(f"violate moderation (named). ip: {request.client.host}. text: {text}")
for i in range(num_models):
states[i].skip_next = True
return (
states
+ [x.to_gradio_chatbot() for x in states]
+ [moderation_msg]
+ [
no_change_btn,
]
* 6
)
text = text[:1536] # Hard cut-off
for i in range(num_models):
states[i].append_message(states[i].roles[0], text)
states[i].append_message(states[i].roles[1], None)
states[i].skip_next = False
return (
states
+ [x.to_gradio_chatbot() for x in states]
+ [""]
+ [
disable_btn,
]
* 6
)
def http_bot_all(
state0,
state1,
model_selector0,
model_selector1,
temperature,
max_new_tokens,
request: gr.Request,
):
logger.info(f"http_bot_all (named). ip: {request.client.host}")
if state0.skip_next:
# This generate call is skipped due to invalid inputs
yield (state0, state1, state0.to_gradio_chatbot(),
state1.to_gradio_chatbot()) + (no_change_btn,) * 6
return
states = [state0, state1]
model_selector = [model_selector0, model_selector1]
gen = []
for i in range(num_models):
gen.append(
http_bot(states[i], model_selector[i], temperature, max_new_tokens, request)
)
chatbots = [None] * num_models
while True:
stop = True
for i in range(num_models):
try:
ret = next(gen[i])
states[i], chatbots[i] = ret[0], ret[1]
stop = False
except StopIteration:
pass
yield states + chatbots + [disable_btn] * 6
if stop:
break
for i in range(10):
if i % 2 == 0:
yield states + chatbots + [disable_btn] * 4 + [enable_btn] * 2
else:
yield states + chatbots + [enable_btn] * 6
time.sleep(0.2)
def build_side_by_side_ui_named(models):
notice_markdown = ("""
# ⚔️ Chatbot Arena ⚔️
Rules:
- Chat with two models side-by-side and vote for which one is better!
- You pick the models you want to chat with.
- You can continue chating and voting or click "Clear history" to start a new round.
- A leaderboard will be available soon.
- [[GitHub]](https://github.com/lm-sys/FastChat) [[Twitter]](https://twitter.com/lmsysorg) [[Discord]](https://discord.gg/h6kCZb72G7)
### Terms of use
By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data for future research.**
The demo works better on desktop devices with a wide screen.
### Choose two models to chat with
""" + model_description_md)
states = [gr.State() for _ in range(num_models)]
model_selectors = [None] * num_models
chatbots = [None] * num_models
notice = gr.Markdown(notice_markdown, elem_id="notice_markdown")
with gr.Box(elem_id="share-region-named"):
with gr.Row():
for i in range(num_models):
with gr.Column():
model_selectors[i] = gr.Dropdown(
choices=models,
value=models[i] if len(models) > i else "",
interactive=True,
show_label=False,
).style(container=False)
with gr.Row():
for i in range(num_models):
label = "Model A" if i == 0 else "Model B"
with gr.Column():
chatbots[i] = grChatbot(label=label, elem_id=f"chatbot{i}",
visible=False).style(height=550)
with gr.Box() as button_row:
with gr.Row():
leftvote_btn = gr.Button(value="👈 A is better", interactive=False)
rightvote_btn = gr.Button(value="👉 B is better", interactive=False)
tie_btn = gr.Button(value="🤝 Tie", interactive=False)
bothbad_btn = gr.Button(value="👎 Both are bad", interactive=False)
with gr.Row():
with gr.Column(scale=20):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter text and press ENTER",
visible=False,
).style(container=False)
with gr.Column(scale=1, min_width=50):
send_btn = gr.Button(value="Send", visible=False)
with gr.Row() as button_row2:
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
share_btn = gr.Button(value="📷 Share")
with gr.Accordion("Parameters", open=False, visible=True) as parameter_row:
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
interactive=True,
label="Temperature",
)
max_output_tokens = gr.Slider(
minimum=0,
maximum=1024,
value=512,
step=64,
interactive=True,
label="Max output tokens",
)
gr.Markdown(learn_more_md)
# Register listeners
btn_list = [leftvote_btn, rightvote_btn, tie_btn, bothbad_btn,
regenerate_btn, clear_btn]
leftvote_btn.click(
leftvote_last_response,
states + model_selectors,
[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
rightvote_btn.click(
rightvote_last_response,
states + model_selectors,
[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
tie_btn.click(
tievote_last_response,
states + model_selectors,
[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
bothbad_btn.click(
bothbad_vote_last_response,
states + model_selectors,
[textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
)
regenerate_btn.click(
regenerate, states, states + chatbots + [textbox] + btn_list
).then(
http_bot_all,
states + model_selectors + [temperature, max_output_tokens],
states + chatbots + btn_list,
)
clear_btn.click(clear_history, None, states + chatbots + [textbox] + btn_list)
share_js="""
function (a, b, c, d) {
const captureElement = document.querySelector('#share-region-named');
html2canvas(captureElement)
.then(canvas => {
canvas.style.display = 'none'
document.body.appendChild(canvas)
return canvas
})
.then(canvas => {
const image = canvas.toDataURL('image/png')
const a = document.createElement('a')
a.setAttribute('download', 'chatbot-arena.png')
a.setAttribute('href', image)
a.click()
canvas.remove()
});
return [a, b, c, d];
}
"""
share_btn.click(share_click, states + model_selectors, [], _js=share_js)
for i in range(num_models):
model_selectors[i].change(
clear_history, None, states + chatbots + [textbox] + btn_list
)
textbox.submit(
add_text, states + [textbox], states + chatbots + [textbox] + btn_list
).then(
http_bot_all,
states + model_selectors + [temperature, max_output_tokens],
states + chatbots + btn_list,
)
send_btn.click(
add_text, states + [textbox], states + chatbots + [textbox] + btn_list
).then(
http_bot_all,
states + model_selectors + [temperature, max_output_tokens],
states + chatbots + btn_list,
)
return (
states,
model_selectors,
chatbots,
textbox,
send_btn,
button_row,
button_row2,
parameter_row,
) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gradio_block_arena_named.py | gradio_block_arena_named.py |
import argparse
import os
import re
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import InMemoryHistory
from rich.console import Console
from rich.markdown import Markdown
from rich.live import Live
from fastchat.serve.inference import chat_loop, ChatIO, add_model_args
class SimpleChatIO(ChatIO):
def prompt_for_input(self, role) -> str:
return input(f"{role}: ")
def prompt_for_output(self, role: str):
print(f"{role}: ", end="", flush=True)
def stream_output(self, output_stream):
pre = 0
for outputs in output_stream:
outputs = outputs.strip().split(" ")
now = len(outputs) - 1
if now > pre:
print(" ".join(outputs[pre:now]), end=" ", flush=True)
pre = now
print(" ".join(outputs[pre:]), flush=True)
return " ".join(outputs)
class RichChatIO(ChatIO):
def __init__(self):
self._prompt_session = PromptSession(history=InMemoryHistory())
self._completer = WordCompleter(
words=["!exit", "!reset"], pattern=re.compile("$")
)
self._console = Console()
def prompt_for_input(self, role) -> str:
self._console.print(f"[bold]{role}:")
# TODO(suquark): multiline input has some issues. fix it later.
prompt_input = self._prompt_session.prompt(
completer=self._completer,
multiline=False,
auto_suggest=AutoSuggestFromHistory(),
key_bindings=None,
)
self._console.print()
return prompt_input
def prompt_for_output(self, role: str):
self._console.print(f"[bold]{role}:")
def stream_output(self, output_stream):
"""Stream output from a role."""
# TODO(suquark): the console flickers when there is a code block
# above it. We need to cut off "live" when a code block is done.
# Create a Live context for updating the console output
with Live(console=self._console, refresh_per_second=4) as live:
# Read lines from the stream
for outputs in output_stream:
if not outputs:
continue
# Render the accumulated text as Markdown
# NOTE: this is a workaround for the rendering "unstandard markdown"
# in rich. The chatbots output treat "\n" as a new line for
# better compatibility with real-world text. However, rendering
# in markdown would break the format. It is because standard markdown
# treat a single "\n" in normal text as a space.
# Our workaround is adding two spaces at the end of each line.
# This is not a perfect solution, as it would
# introduce trailing spaces (only) in code block, but it works well
# especially for console output, because in general the console does not
# care about trailing spaces.
lines = []
for line in outputs.splitlines():
lines.append(line)
if line.startswith("```"):
# Code block marker - do not add trailing spaces, as it would
# break the syntax highlighting
lines.append("\n")
else:
lines.append(" \n")
markdown = Markdown("".join(lines))
# Update the Live console output
live.update(markdown)
self._console.print()
return outputs
def main(args):
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if args.style == "simple":
chatio = SimpleChatIO()
elif args.style == "rich":
chatio = RichChatIO()
else:
raise ValueError(f"Invalid style for console: {args.style}")
try:
chat_loop(
args.model_path,
args.device,
args.num_gpus,
args.max_gpu_memory,
args.load_8bit,
args.cpu_offloading,
args.conv_template,
args.temperature,
args.max_new_tokens,
chatio,
args.debug,
)
except KeyboardInterrupt:
print("exit...")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_model_args(parser)
parser.add_argument(
"--conv-template", type=str, default=None, help="Conversation prompt template."
)
parser.add_argument("--temperature", type=float, default=0.7)
parser.add_argument("--max-new-tokens", type=int, default=512)
parser.add_argument(
"--style",
type=str,
default="simple",
choices=["simple", "rich"],
help="Display style.",
)
parser.add_argument("--debug", action="store_true", help="Print debug information")
args = parser.parse_args()
main(args) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/cli.py | cli.py |
import abc
import gc
import math
from typing import Optional
import sys
import warnings
import psutil
import torch
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
LlamaForCausalLM,
AutoModel,
AutoModelForSeq2SeqLM,
T5Tokenizer,
AutoConfig,
)
from fastchat.conversation import (
conv_templates,
get_default_conv_template,
SeparatorStyle,
)
from fastchat.serve.compression import load_compress_model
from fastchat.serve.monkey_patch_non_inplace import (
replace_llama_attn_with_non_inplace_operations,
)
from fastchat.serve.serve_chatglm import chatglm_generate_stream
def raise_warning_for_incompatible_cpu_offloading_configuration(device: str, load_8bit: bool, cpu_offloading: bool):
if cpu_offloading:
if not load_8bit:
warnings.warn("The cpu-offloading feature can only be used while also using 8-bit-quantization.\n"
"Use '--load-8bit' to enable 8-bit-quantization\n"
"Continuing without cpu-offloading enabled\n")
return False
if not "linux" in sys.platform:
warnings.warn("CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n"
"Continuing without cpu-offloading enabled\n")
return False
if device != "cuda":
warnings.warn("CPU-offloading is only enabled when using CUDA-devices\n"
"Continuing without cpu-offloading enabled\n")
return False
return cpu_offloading
def get_gpu_memory(max_gpus=None):
gpu_memory = []
num_gpus = (
torch.cuda.device_count()
if max_gpus is None
else min(max_gpus, torch.cuda.device_count())
)
for gpu_id in range(num_gpus):
with torch.cuda.device(gpu_id):
device = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device)
total_memory = gpu_properties.total_memory / (1024**3)
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
available_memory = total_memory - allocated_memory
gpu_memory.append(available_memory)
return gpu_memory
def raise_warning_for_old_weights(model_path, model):
if "vicuna" in model_path.lower() and isinstance(model, LlamaForCausalLM):
if model.model.vocab_size > 32000:
warnings.warn(
"\nYou are probably using the old Vicuna-v0 model, "
"which will generate unexpected results with the "
"current fastchat.\nYou can try one of the following methods:\n"
"1. Upgrade your weights to the new Vicuna-v1.1: https://github.com/lm-sys/FastChat#vicuna-weights.\n"
"2. Use the old conversation template by `python3 -m fastchat.serve.cli --model-path /path/to/vicuna-v0 --conv-template conv_one_shot`\n"
"3. Downgrade fschat to fschat==0.1.10 (Not recommonded).\n"
)
def load_model(
model_path, device, num_gpus, max_gpu_memory=None, load_8bit=False, cpu_offloading=False, debug=False
):
cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration(device, load_8bit, cpu_offloading)
if device == "cpu":
kwargs = {"torch_dtype": torch.float32}
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus != 1:
kwargs["device_map"] = "auto"
if max_gpu_memory is None:
kwargs[
"device_map"
] = "sequential" # This is important for not the same VRAM sizes
available_gpu_memory = get_gpu_memory(num_gpus)
kwargs["max_memory"] = {
i: str(int(available_gpu_memory[i] * 0.85)) + "GiB"
for i in range(num_gpus)
}
else:
kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
print("init_kwargs", kwargs)
elif device == "mps":
kwargs = {"torch_dtype": torch.float16}
# Avoid bugs in mps backend by not using in-place operations.
replace_llama_attn_with_non_inplace_operations()
else:
raise ValueError(f"Invalid device: {device}")
if cpu_offloading:
# raises an error on incompatible platforms
from transformers import BitsAndBytesConfig
if "max_memory" in kwargs:
kwargs["max_memory"]["cpu"] = str(math.floor(psutil.virtual_memory().available / 2**20)) + 'Mib'
kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit_fp32_cpu_offload=cpu_offloading)
kwargs["load_in_8bit"] = load_8bit
elif load_8bit:
if num_gpus != 1:
warnings.warn("8-bit quantization is not supported for multi-gpu inference.")
else:
return load_compress_model(model_path=model_path,
device=device, torch_dtype=kwargs["torch_dtype"])
if "chatglm" in model_path:
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, **kwargs)
elif "dolly" in model_path:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
# 50277 means "### End"
tokenizer.eos_token_id = 50277
elif "pythia" in model_path or "stablelm" in model_path:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
elif "t5" in model_path:
model = AutoModelForSeq2SeqLM.from_pretrained(model_path,
low_cpu_mem_usage=True, **kwargs)
tokenizer = T5Tokenizer.from_pretrained(model_path, use_fast=False)
elif "RWKV-4" in model_path:
from fastchat.serve.rwkv_model import RwkvModel
model = RwkvModel(model_path)
tokenizer = AutoTokenizer.from_pretrained('EleutherAI/pythia-160m', use_fast=True)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **kwargs
)
raise_warning_for_old_weights(model_path, model)
if (device == "cuda" and num_gpus == 1 and not cpu_offloading) or device == "mps":
model.to(device)
if debug:
print(model)
return model, tokenizer
@torch.inference_mode()
def generate_stream(
model, tokenizer, params, device, context_len=2048, stream_interval=2
):
prompt = params["prompt"]
len_prompt = len(prompt)
temperature = float(params.get("temperature", 1.0))
max_new_tokens = int(params.get("max_new_tokens", 256))
stop_str = params.get("stop", None)
echo = params.get("echo", True)
stop_token_ids = params.get("stop_token_ids", None) or []
stop_token_ids.append(tokenizer.eos_token_id)
input_ids = tokenizer(prompt).input_ids
input_echo_len = len(input_ids)
output_ids = list(input_ids)
if model.config.is_encoder_decoder:
max_src_len = context_len
else:
max_src_len = context_len - max_new_tokens - 8
input_ids = input_ids[-max_src_len:]
if model.config.is_encoder_decoder:
encoder_output = model.encoder(input_ids=torch.as_tensor([input_ids],
device=device))[0]
start_ids = torch.as_tensor([[model.generation_config.decoder_start_token_id]],
dtype=torch.int64, device=device)
for i in range(max_new_tokens):
if i == 0:
if model.config.is_encoder_decoder:
out = model.decoder(input_ids=start_ids,
encoder_hidden_states=encoder_output,
use_cache=True)
logits = model.lm_head(out[0])
else:
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
logits = out.logits
past_key_values = out.past_key_values
else:
if model.config.is_encoder_decoder:
out = model.decoder(input_ids=torch.as_tensor([[token]], device=device),
encoder_hidden_states=encoder_output,
use_cache=True,
past_key_values=past_key_values)
logits = model.lm_head(out[0])
else:
out = model(
input_ids=torch.as_tensor([[token]], device=device),
use_cache=True,
past_key_values=past_key_values,
)
logits = out.logits
past_key_values = out.past_key_values
last_token_logits = logits[0][-1]
if device == "mps":
# Switch to CPU by avoiding some bugs in mps backend.
last_token_logits = last_token_logits.float().to("cpu")
if temperature < 1e-4:
token = int(torch.argmax(last_token_logits))
else:
probs = torch.softmax(last_token_logits / temperature, dim=-1)
token = int(torch.multinomial(probs, num_samples=1))
output_ids.append(token)
if token in stop_token_ids:
stopped = True
else:
stopped = False
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
if echo:
tmp_output_ids = output_ids
rfind_start = len_prompt
else:
tmp_output_ids = output_ids[input_echo_len:]
rfind_start = 0
output = tokenizer.decode(tmp_output_ids, skip_special_tokens=True,
spaces_between_special_tokens=False)
if stop_str:
pos = output.rfind(stop_str, rfind_start)
if pos != -1:
output = output[:pos]
stopped = True
yield output
if stopped:
break
del past_key_values, out
gc.collect()
torch.cuda.empty_cache()
class ChatIO(abc.ABC):
@abc.abstractmethod
def prompt_for_input(self, role: str) -> str:
"""Prompt for input from a role."""
@abc.abstractmethod
def prompt_for_output(self, role: str):
"""Prompt for output from a role."""
@abc.abstractmethod
def stream_output(self, output_stream):
"""Stream output."""
def chat_loop(
model_path: str,
device: str,
num_gpus: int,
max_gpu_memory: str,
load_8bit: bool,
cpu_offloading: bool,
conv_template: Optional[str],
temperature: float,
max_new_tokens: int,
chatio: ChatIO,
debug: bool,
):
# Model
model, tokenizer = load_model(
model_path, device, num_gpus, max_gpu_memory, load_8bit, cpu_offloading, debug
)
is_chatglm = "chatglm" in str(type(model)).lower()
# Chat
if conv_template:
conv = conv_templates[conv_template].copy()
else:
conv = get_default_conv_template(model_path).copy()
while True:
try:
inp = chatio.prompt_for_input(conv.roles[0])
except EOFError:
inp = ""
if not inp:
print("exit...")
break
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
if is_chatglm:
generate_stream_func = chatglm_generate_stream
prompt = conv.messages[conv.offset:]
else:
generate_stream_func = generate_stream
prompt = conv.get_prompt()
gen_params = {
"model": model_path,
"prompt": prompt,
"temperature": temperature,
"max_new_tokens": max_new_tokens,
"stop": conv.stop_str,
"stop_token_ids": conv.stop_token_ids,
"echo": False,
}
chatio.prompt_for_output(conv.roles[1])
output_stream = generate_stream_func(model, tokenizer, gen_params, device)
outputs = chatio.stream_output(output_stream)
# NOTE: strip is important to align with the training data.
conv.messages[-1][-1] = outputs.strip()
if debug:
print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
def add_model_args(parser):
parser.add_argument(
"--model-path",
type=str,
default="lmsys/fastchat-t5-3b-v1.0",
help="The path to the weights. This can be a local folder or a Hugging Face repo ID.",
)
parser.add_argument(
"--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda",
help="The device type"
)
parser.add_argument(
"--gpus",
type=str,
default=None,
help="A single GPU like 1 or multiple GPUs like 0,2"
)
parser.add_argument("--num-gpus", type=int, default=1)
parser.add_argument(
"--max-gpu-memory",
type=str,
help="The maximum memory per gpu. Use a string like '13Gib'",
)
parser.add_argument(
"--load-8bit", action="store_true", help="Use 8-bit quantization"
)
parser.add_argument(
"--cpu-offloading", action="store_true", help="Only when using 8-bit quantization: Offload excess weights to the CPU that don't fit on the GPU"
) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/inference.py | inference.py |
# fastchat Nginx Gateway
## Purpose of the Gateway
The Nginx gateway serves the following purposes:
1. Protects Gradio servers by acting as a firewall.
2. Facilitates dynamic mounting and unmounting of Gradio servers.
3. Provides load balancing for Gradio servers.
4. Offers additional security features, such as total connection limit.
5. Reduces attack surface by requiring only a single public port to be exposed for serving.
## Deployment and Updating of the Gateway
### Installing Nginx
On Debian-based distributions (e.g., Ubuntu):
```bash
sudo apt update
sudo apt install nginx
```
On Red Hat-based distributions (e.g., CentOS, Fedora):
```bash
sudo yum install epel-release
sudo yum install nginx
```
### Deployment
Copy `nginx.conf` to `/etc/nginx/nginx.conf` (need sudo permission).
Replace the port number 7860 in `server localhost:7860` with the port where you deploy the Gradio web server.
Modify `upstream websocket` to configure Gradio servers behind the gateway.
Lastly, update Nginx.
### HTTPS Deployment with a Public Domain URL
Make sure you obtain the HTTPS certificate and the private key used to generate the certificate.
Fill the addresses to your certificate and private key in the `[PATH_TO_SSL_CERT]` and `[PATH_TO_PRIVATE_KEY]` fields.
If you have your own domain url to serve the chatbot, replace the chat.lmsys.org url with your own domain url.
### Updating
Every time when `/etc/nginx/nginx.conf` is modified, you need to update the Nginx service:
```bash
sudo nginx -t # check `/etc/nginx/nginx.conf`
sudo systemctl reload nginx # restart Nginx service to load the new config
sudo systemctl status nginx # check the status of the Nginx service. It should be active (running).
```
| ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/serve/gateway/README.md | README.md |
import copy
from dataclasses import dataclass, field
import json
import pathlib
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from torch.utils.data import Dataset
import transformers
from transformers import Trainer
from transformers.trainer_pt_utils import LabelSmoother
from fastchat.conversation import get_default_conv_template, SeparatorStyle
IGNORE_TOKEN_ID = LabelSmoother.ignore_index
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
@dataclass
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
lazy_preprocess: bool = False
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
local_rank = None
def rank0_print(*args):
if local_rank == 0:
print(*args)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
"""Collects the state dict and dump to disk."""
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def preprocess(
sources,
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
conv = get_default_conv_template("vicuna").copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == SeparatorStyle.ADD_COLON_TWO
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_TOKEN_ID
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID
cur_len += round_len
target[cur_len:] = IGNORE_TOKEN_ID
if False:
z = target.clone()
z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z)
rank0_print(tokenizer.decode(z))
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_TOKEN_ID
rank0_print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=input_ids.ne(tokenizer.pad_token_id),
)
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
rank0_print("Formatting inputs...")
sources = [example["conversations"] for example in raw_data]
data_dict = preprocess(sources, tokenizer)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
self.attention_mask = data_dict["attention_mask"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(
input_ids=self.input_ids[i],
labels=self.labels[i],
attention_mask=self.attention_mask[i],
)
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer):
super(LazySupervisedDataset, self).__init__()
self.tokenizer = tokenizer
rank0_print("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.raw_data = raw_data
self.cached_data_dict = {}
def __len__(self):
return len(self.raw_data)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
if i in self.cached_data_dict:
return self.cached_data_dict[i]
ret = preprocess([self.raw_data[i]["conversations"]], self.tokenizer)
ret = dict(
input_ids=ret["input_ids"][0],
labels=ret["labels"][0],
attention_mask=ret["attention_mask"][0],
)
self.cached_data_dict[i] = ret
return ret
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
raw_data = json.load(open(data_args.data_path, "r"))
# Split train/test
perm = np.random.permutation(len(raw_data))
split = int(len(perm) * 0.98)
train_indices = perm[:split]
eval_indices = perm[split:]
train_raw_data = [raw_data[i] for i in train_indices]
eval_raw_data = [raw_data[i] for i in eval_indices]
rank0_print(f"#train {len(train_raw_data)}, #eval {len(eval_raw_data)}")
train_dataset = dataset_cls(train_raw_data, tokenizer=tokenizer)
eval_dataset = dataset_cls(eval_raw_data, tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments)
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
)
model.config.use_cache = False
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
tokenizer.pad_token = tokenizer.unk_token
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
if __name__ == "__main__":
train() | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/train/train.py | train.py |
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
import logging
import pathlib
import typing
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from peft import LoraConfig, get_peft_model
import transformers
from transformers import Trainer
from fastchat.train.train import (
DataArguments,
ModelArguments,
TrainingArguments,
make_supervised_data_module,
)
from fastchat.train.llama_flash_attn_monkey_patch import (
replace_llama_attn_with_flash_attn,
)
replace_llama_attn_with_flash_attn()
@dataclass
class LoraArguments:
lora_r: int = 8
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_target_modules: typing.List[str] = field(
default_factory=lambda: ["q_proj", "v_proj"]
)
lora_weight_path: str = ""
bias: str = "none"
def maybe_zero_3(param):
if hasattr(param, "ds_id"):
assert param.ds_status == ZeroParamStatus.NOT_AVAILABLE
with zero.GatheredParameters([param]):
param = param.data.cpu().clone().detach()
return param
# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state_maybe_zero_3(state_dict, bias):
if bias == "none":
to_return = {
k: state_dict[k].cpu().clone().detach() for k in state_dict if "lora_" in k
}
elif bias == "all":
to_return = {
k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k
}
elif bias == "lora_only":
to_return = {}
for k in state_dict:
if "lora_" in k:
to_return[k] = state_dict[k]
bias_name = k.split("lora_")[0] + "bias"
if bias_name in state_dict:
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
return to_return
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
)
lora_config = LoraConfig(
r=lora_args.lora_r,
lora_alpha=lora_args.lora_alpha,
target_modules=lora_args.lora_target_modules,
lora_dropout=lora_args.lora_dropout,
bias=lora_args.bias,
task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_config)
if training_args.deepspeed is not None and training_args.local_rank == 0:
model.print_trainable_parameters()
if training_args.gradient_checkpointing:
logging.warning(
"gradient checkpointing with lora makes requires_grad "
"incorrect and needs a monkey patch in Trainer or the "
"wrapped model's forward. ref: "
"https://github.com/lm-sys/FastChat/pull/138#issuecomment-1509172198"
)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
tokenizer.pad_token = tokenizer.unk_token
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
model.config.use_cache = False
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
# Save states. Weights might be a placeholder in zero3 and need a gather
state_dict = get_peft_state_maybe_zero_3(model.state_dict(), lora_args.bias)
if training_args.local_rank == 0:
model.save_pretrained(training_args.output_dir, state_dict=state_dict)
if __name__ == "__main__":
train() | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/train/train_lora.py | train_lora.py |
from typing import List, Optional, Tuple
import torch
from torch import nn
import transformers
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from einops import rearrange
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel
attention_mask: [bsz, q_len]
"""
bsz, q_len, _ = hidden_states.size()
query_states = (
self.q_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
key_states = (
self.k_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
# [bsz, q_len, nh, hd]
# [bsz, nh, q_len, hd]
kv_seq_len = key_states.shape[-2]
assert past_key_value is None, "past_key_value is not supported"
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin, position_ids
)
# [bsz, nh, t, hd]
assert not output_attentions, "output_attentions is not supported"
assert not use_cache, "use_cache is not supported"
# Flash attention codes from
# https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py
# transform the data into the format required by flash attention
qkv = torch.stack(
[query_states, key_states, value_states], dim=2
) # [bsz, nh, 3, q_len, hd]
qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
# We have disabled _prepare_decoder_attention_mask in LlamaModel
# the attention_mask should be the same as the key_padding_mask
key_padding_mask = attention_mask
if key_padding_mask is None:
qkv = rearrange(qkv, "b s ... -> (b s) ...")
max_s = q_len
cu_q_lens = torch.arange(
0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
)
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
)
output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, "b s three h d -> b s (three h d)")
x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
x_unpad = rearrange(
x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
)
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
)
output = rearrange(
pad_input(
rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len
),
"b s (h d) -> b s h d",
h=nheads,
)
return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, None
# Disable the transformation of the attention mask in LlamaModel as the flash attention
# requires the attention mask to be the same as the key_padding_mask
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# [bsz, seq_len]
return attention_mask
def replace_llama_attn_with_flash_attn():
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
_prepare_decoder_attention_mask
)
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/train/llama_flash_attn_monkey_patch.py | llama_flash_attn_monkey_patch.py |
from collections import defaultdict
import copy
import os
from dataclasses import dataclass, field
import random
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence
import torch
import transformers
from torch.utils.data import Dataset
from transformers import Trainer, AddedToken
from fastchat.conversation import conv_one_shot as default_conversation
# TODO: import and use code from ../data/dataset.py
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "</s>"
DEFAULT_UNK_TOKEN = "</s>"
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
@dataclass
class DataArguments:
data_path: str = field(default=None,
metadata={"help": "Path to the training data."})
lazy_preprocess: bool = False
num_data: int = -1
preprocessed_path: str = field(default=None,
metadata={"help": "Path to the preprocessed training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=2048,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
# potential bug for T5 model
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
other_tokens,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
for new_token in other_tokens:
num_new_tokens += tokenizer.add_tokens(AddedToken(new_token, normalized=False))
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def _tokenize_fn(strings: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
) for text in strings
]
input_ids = labels = [
tokenized.input_ids[0] for tokenized in tokenized_list
]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def _form_qa(q_list, a_list, tokenized_conversation, tokenized_lens, speakers, header_len, max_length, eos_id):
cur_idx = header_len
conv_len = len(tokenized_conversation)
for tokenized_len, speaker in zip(tokenized_lens, speakers):
if cur_idx >= conv_len:
break
if speaker == "gpt":
# truncate answer if it is too long
content_a = None
if tokenized_len > max_length:
content_a = tokenized_conversation[cur_idx:cur_idx + max_length]
else:
content_a = tokenized_conversation[cur_idx:cur_idx + tokenized_len]
content_a.append(eos_id)
a_list.append(content_a)
content_q = None
if cur_idx >= max_length:
content_q = tokenized_conversation[cur_idx-max_length: cur_idx]
else:
content_q = tokenized_conversation[:cur_idx]
content_q.append(eos_id)
q_list.append(content_q)
# asser the last token is actually a EOS for an answer
assert a_list[-1][-1] == eos_id, "Last Token is not EOS!"
cur_idx += tokenized_len
def _add_speaker_and_signal(header, source, get_conversation=True):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = "### "
END_SIGNAL = "\n"
conversation = header
unknown_role = "unknown" # use default unknown role
roles = {
"human": default_conversation.roles[0], # human role
"gpt": default_conversation.roles[1], # gpt role
}
for i in range(len(source)):
sentence = source[i]
sentence_from = sentence["from"].lower()
# TODO(Dacheng): verify this is a good way to split sentences
if sentence_from == "human":
# if this is not the last sentence
if i != len(source) - 1:
next_sentence = source[i+1]
sentence["value"] = (
BEGIN_SIGNAL
+ roles.get(sentence_from, unknown_role)
+ ": "
+ sentence["value"]
+ END_SIGNAL
+ BEGIN_SIGNAL
+ roles.get(next_sentence["from"].lower(), unknown_role)
+ ": "
)
else:
# if human is the last speaker, it does not contribute to an answer
pass
else:
sentence["value"] = (
sentence["value"]
+ END_SIGNAL
)
if get_conversation:
conversation += sentence["value"]
return conversation
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
# add end signal and concatenate together
conversations = []
header = f"{default_conversation.system}\n\n"
for source in sources:
conversation = _add_speaker_and_signal(header, source, tokenizer)
conversations.append(conversation)
# TODO(Dacheng): This is related to whether the dataset has been truncated..
# Assume we get long conversations, don't pad, don't return tensor
tokenized_conversations = tokenizer(conversations, max_length=None)["input_ids"]
q_list = []
a_list = []
# count for EOS length
header_len = _tokenize_fn([header], tokenizer)["input_ids_lens"][0] - 1
from tqdm import tqdm
for tokenized_conversation, source in tqdm(zip(tokenized_conversations, sources)):
tokenized_sentence = _tokenize_fn([s["value"] for s in source], tokenizer)
tokenized_lens = tokenized_sentence["input_ids_lens"]
tokenized_lens = [l-1 for l in tokenized_lens]
speakers = [sentence["from"] for sentence in source]
ids = tokenized_sentence["input_ids"]
_form_qa(q_list, a_list, tokenized_conversation, tokenized_lens, speakers, header_len, tokenizer.model_max_length, tokenizer.eos_token_id)
return dict(input_ids=q_list, labels=a_list)
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer,
preprocessed_path,
num_data):
super(SupervisedDataset, self).__init__()
# save to file
self.preprocessed_path = preprocessed_path
if not os.path.exists("./preprocessed_data/"):
os.mkdir("preprocessed_data/")
if os.path.exists(self.preprocessed_path):
print("loading from preprocessed data")
data_dict = json.load(open(self.preprocessed_path, "r"))
else:
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...")
sources = []
sources = [example["conversations"] for example in list_data_dict]
data_dict = preprocess(sources, tokenizer)
json_data_dict = json.dumps(data_dict)
# open file for writing, "w"
f = open(self.preprocessed_path,"w")
# write json object to file
f.write(json_data_dict)
if num_data != -1:
data_dict["input_ids"] = data_dict["input_ids"][:num_data]
data_dict["labels"] = data_dict["labels"][:num_data]
# Shuffle data to see more conversations, if only train on partial data
temp = list(zip(data_dict["input_ids"], data_dict["labels"]))
random.shuffle(temp)
res1, res2 = zip(*temp)
data_dict["input_ids"], data_dict["labels"] = list(res1), list(res2)
# Dacheng: Get rid of short QA pair
self.input_ids = copy.deepcopy(data_dict["input_ids"])
self.labels = copy.deepcopy(data_dict["labels"])
length_arr = defaultdict(int)
for idx, (input, label) in enumerate(zip(data_dict["input_ids"], data_dict["labels"])):
length_arr[str(len(label) // 100)] += 1
if len(input) <= 5:
del_idx = self.input_ids.index(input)
self.input_ids.pop(del_idx)
self.labels.pop(del_idx)
if len(label) <= 5:
del_idx = self.labels.index(label)
self.input_ids.pop(del_idx)
self.labels.pop(del_idx)
for input, label in zip(self.input_ids, self.labels):
assert len(input) >= 5
assert len(label) >= 5
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
@dataclass
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([torch.as_tensor(instance[key], dtype=torch.int64) for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
ret = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
torch.set_printoptions(profile="full")
return ret
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = SupervisedDataset
train_dataset = dataset_cls(tokenizer=tokenizer,
data_path=data_args.data_path,
preprocessed_path=data_args.preprocessed_path,
num_data=data_args.num_data)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model = transformers.AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
)
# Dacheng: Note we can only use T5Tokenizer, otherwise it will prepend
# a space before special tokens.
tokenizer = transformers.T5Tokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN),
other_tokens=["<", "{", "\n", "}", "`", " ", "\\", "^", "\t"],
tokenizer=tokenizer,
model=model,
)
data_module = make_supervised_data_module(tokenizer=tokenizer,
data_args=data_args)
trainer = Trainer(model=model,
tokenizer=tokenizer,
args=training_args,
**data_module)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer,
output_dir=training_args.output_dir)
if __name__ == "__main__":
train() | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/train/train_flant5.py | train_flant5.py |
from typing import Dict, List, Optional
import asyncio
import os
import httpx
from fastchat.protocol.chat_completion import (
ChatCompletionRequest,
ChatCompletionResponse,
)
_BASE_URL = "http://localhost:8000"
if os.environ.get("FASTCHAT_BASE_URL"):
_BASE_URL = os.environ.get("FASTCHAT_BASE_URL")
def set_baseurl(base_url: str):
global _BASE_URL
_BASE_URL = base_url
class ChatCompletionClient:
def __init__(self, base_url: str):
self.base_url = base_url
async def request_completion(
self, request: ChatCompletionRequest, timeout: Optional[float] = None
) -> ChatCompletionResponse:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
timeout=timeout,
)
response.raise_for_status()
return ChatCompletionResponse.parse_obj(response.json())
class ChatCompletion:
OBJECT_NAME = "chat.completions"
@classmethod
def create(cls, *args, **kwargs) -> ChatCompletionResponse:
"""Creates a new chat completion for the provided messages and parameters.
See `acreate` for more details.
"""
return asyncio.run(cls.acreate(*args, **kwargs))
@classmethod
async def acreate(
cls,
model: str,
messages: List[Dict[str, str]],
temperature: Optional[float] = 0.7,
n: int = 1,
max_tokens: Optional[int] = None,
stop: Optional[str] = None,
timeout: Optional[float] = None,
) -> ChatCompletionResponse:
"""Creates a new chat completion for the provided messages and parameters."""
request = ChatCompletionRequest(
model=model,
messages=messages,
temperature=temperature,
n=n,
max_tokens=max_tokens,
stop=stop,
)
client = ChatCompletionClient(_BASE_URL)
response = await client.request_completion(request, timeout=timeout)
return response | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/client/api.py | api.py |
import argparse
import json
import os
import time
import openai
import tqdm
import ray
import shortuuid
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
MAX_API_RETRY = 5
REQ_TIME_GAP = 10
@ray.remote(num_cpus=4)
def get_eval(sys_prompt, user_prompt: str, max_tokens: int):
logging.basicConfig(level=logging.INFO)
for i in range(MAX_API_RETRY):
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": sys_prompt},
{
"role": "user",
"content": user_prompt,
},
],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
content = response["choices"][0]["message"]["content"]
logger.info(content)
return content
except Exception as e:
logger.error(e)
time.sleep(5)
logger.error(f"Failed after {MAX_API_RETRY} retries.")
return "error"
def parse_score(review):
try:
score_pair = review.split("\n")[0]
score_pair = score_pair.replace(",", " ")
sp = score_pair.split(" ")
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
raise Exception("Invalid score pair.")
except Exception as e:
logger.error(
f"{e}\nContent: {review}\n" "You must manually fix the score pair."
)
return [-1, -1]
def gen_prompt(reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2):
# Default to general category (index=0)
reviewer_idx = 0
for idx, reviewer in enumerate(reviewer_jsons):
if reviewer["category"] == cat:
reviewer_idx = idx
break
prompt_id = reviewer_jsons[reviewer_idx]["prompt_id"]
prompt_json = prompt_jsons[prompt_id - 1]
assert prompt_json["prompt_id"] == prompt_id
sys_prompt = prompt_json["system_prompt"]
prompt_template = prompt_json["prompt_template"]
defaults = prompt_json["defaults"]
prompt = prompt_template.format(
question=ques, answer_1=ans1, answer_2=ans2, **defaults
)
return sys_prompt, prompt, reviewer_idx + 1
def get_json_list(file_path):
file_path = os.path.expanduser(file_path)
with open(file_path, "r") as f:
json_list = []
for line in f:
json_list.append(json.loads(line))
return json_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ChatGPT-based QA evaluation.")
parser.add_argument("-q", "--question-file")
parser.add_argument("-a", "--answer-file-list", nargs="+", default=[])
parser.add_argument("-p", "--prompt-file")
parser.add_argument("-r", "--reviewer-file")
parser.add_argument("-o", "--output-review-file")
parser.add_argument(
"--max-tokens",
type=int,
default=1024,
help="maximum number of tokens produced in the output",
)
args = parser.parse_args()
ray.init()
question_jsons = get_json_list(args.question_file)
answer1_jsons = get_json_list(args.answer_file_list[0])
answer2_jsons = get_json_list(args.answer_file_list[1])
reviewer_jsons = get_json_list(args.reviewer_file)
prompt_jsons = get_json_list(args.prompt_file)
# check if # of questions, answers are the same
assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)
handles = []
review_jsons = []
total_len = len(question_jsons)
question_idx_list = list(range(total_len))
for i in question_idx_list:
assert (
answer1_jsons[i]["question_id"]
== question_jsons[i]["question_id"]
== answer2_jsons[i]["question_id"]
)
ques = question_jsons[i]["text"]
cat = question_jsons[i]["category"]
ans1 = answer1_jsons[i]["text"]
ans2 = answer2_jsons[i]["text"]
sys_prompt, prompt, reviewer_id = gen_prompt(
reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2
)
review_id = shortuuid.uuid()
review_jsons.append(
{
"review_id": review_id,
"question_id": question_jsons[i]["question_id"],
"answer1_id": answer1_jsons[i]["answer_id"],
"answer2_id": answer2_jsons[i]["answer_id"],
"reviewer_id": reviewer_id,
"metadata": {},
}
)
# To avoid the rate limit set by OpenAI
handles.append(get_eval.remote(sys_prompt, prompt, args.max_tokens))
logger.info(
f"Waiting for {REQ_TIME_GAP} seconds before sending the next request."
)
time.sleep(REQ_TIME_GAP)
reviews = ray.get(handles)
with open(f"{args.output_review_file}", "w") as output_review_file:
for idx, review in enumerate(reviews):
scores = parse_score(review)
review_jsons[idx]["text"] = review
review_jsons[idx]["score"] = scores
output_review_file.write(json.dumps(review_jsons[idx]) + "\n") | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/eval/eval_gpt_review.py | eval_gpt_review.py |
# Evaluations
This directory contains end-to-end pipelines for AI-enhanced evaluation. We will introduce the evaluation pipeline and the data format in this document.
## Generate Answers
### ChatGPT (gpt-3.5-turbo)
Make sure you have setup the OpenAI API Key in your environment. Then run:
```bash
python qa_baseline_gpt35.py --question table/question.jsonl --output table/answer/answer_gpt35.jsonl
```
### Bard
Unfortunately, Bard has not release its public APIs till now. You may have to enter the anwsers manually. Or you could find a third-party project that interfaces with Bard.
### Vicuna and others
To generate answers with Vicuna or other models, specify path to the model checkpoint, a desired model ID and run:
```bash
python get_model_answer.py --model-id [MODEL-ID] --model-path /model/path --question-file table/question.jsonl --answer-file table/answer/answer.jsonl --num-gpus [NUM-GPUS]
```
Then the answers to the questions will be saved in `table/answer/answer.jsonl`.
Note: we assume the model can be loaded with a single GPU.
## Evaluate Answers Automatically
### Generete Reviews with GPT-4
Note: Below script requires access to GPT-4 API. If you only have access to GPT-4 on web interface, you can evaluate the answers by manually formatting the prompt. See more details in the **Reviewers** and **Prompts** sections in **Data Format**.
It is critical to follow the prompt templates; otherwise GPT-4 may not give fair reviews. `table/review/*.jsonl` are some review examples generated by GPT-4 or you can view them on our eval [webpage](https://vicuna.lmsys.org/eval/).
To use the script for generating reviews with GPT-4, you need to `export` your OpenAI API key in environment variable. Then run:
```bash
python eval_gpt_review.py -q table/question.jsonl -a /path/to/answer_1.jsonl /path/to/answer_2.jsonl -p table/prompt.jsonl -r table/reviewer.jsonl -o /path/to/review_output.jsonl
```
The GPT-4 reviews will be saved in `/path/to/review_output.jsonl`. Note: we implement some simple parsing code to extract the score pairs from GPT-4's reviews. However, you need to double check whether the parsed score pair are correct. Sometime the parsing logic may fail if GPT-4 doesn't give a structured answer.
## Visualize Results
You can generate the data for the webpage by running:
```bash
python eval/generate_webpage_data_from_table.py
```
Then you can serve a static website in `webpage` to see the results.
## Data Format
If you want to have a deeper understanding of our evaluation pipeline or want to contribute to the evaluation process, you need to learn the data format we used for evaluation.
Our evaluation data are encoded with [JSON Lines](https://jsonlines.org/).
### Random ID Generation
We use the `shortuuid` Python library for generating short random UUIDs.
```python
import shortuuid
shortuuid.uuid() -> str
```
### Models
`model.jsonl` contains model information we used for generating anwsers.
Each row contains a record of a model with the following field:
* `model_id` (str): A unique ID for a model. Models with different IDs is supposed to have different performance. This ID is generated by `{model_name}:{model_version}`.
* `model_name` (str): The name of a model. This is not unique, because a model could be trained and updated continuously, but it is still considered as the same model with different versions.
* `model_version` (str): The version of a model.
* `model_metadata` (Any): Any metadata of a model (descriptions etc). This is optional.
For example:
```json
{
"model_id": "vicuna-13b:v1",
"model_name": "vicuna-13b",
"model_version": "v1",
"model_metadata": "learning rate 1e-5, 3 epochs, 13b"
}
```
### Prompts
We store prompts in `prompt.jsonl`. Each row contains a record of a prompt with the following field:
* `prompt_id` (int): A unique integer ID for a prompt. Prompts with different IDs are supposed to have different purpose.
* `system_prompt` (str): The system prompt given to a model. This is the prompt that the model sees first.
* `prompt_template` (str): The prompt body. This is the user prompt that the model sees after the system prompt. It is a Python f-string template, so that we can fill in the inputs later.
* `defaults` (dict): A dictionary of default values for the prompt template. It can be empty.
* `description` (str): A description of the functionality of the prompt.
For example:
```json
{
"prompt_id": 1,
"system_prompt": "You are a helpful assistant.",
"prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n",
"defaults": {"prompt": "Which assistant is more helpful?"},
"description": "Compare two assistants' answers to a question."
}
```
### Reviewers
`reviewer.jsonl` contains reviewer information we used for reviewing answers generated by different models. Each row contains a record of a reviewer with the following field:
* `reviewer_id` (str): A unique ID for a reviewer. Reviewers with different IDs is supposed to have different reviewing performance.
* `prompt_id` (str): The ID of the prompt given to the reviewer (e.g., an AI assistant). Different prompts could result in different reviewing performance.
* `metadata` (dict): Metadata of a reviewer about its configurations.
* `description` (str): A description of the reviewer.
* `category` (str): The category that the reviewer belongs to.
For example:
```json
{
"reviewer_id": "gpt-4-0328-default",
"prompt_id": 1,
"temperature": 0.2,
"max_tokens": 8192,
"description": "GPT-4 for general questions.",
"category": "general"
}
```
### Questions
`question.jsonl` contains questions we used for evaluation. Each row contains a record of a question with the following field:
* `question_id` (int): A unique integer for a question. Questions with different IDs is supposed to be different.
* `text` (str): The question text.
* `category` (str): The category of the question. Questions with the same category are supposed to be similar or originate from the same source.
### Answers
`answer/xxx.jsonl` contains answers generated by different models. Each row contains a record of an answer with the following field:
* `answer_id` (str): A unique UUID for an answer. Answers with different IDs is supposed to be different.
* `question_id` (int): The ID of the question the answer is generated for.
* `model_id` (str): The ID of the model the answer is generated by.
* `text` (str): The answer text.
* `metadata` (dict): Any metadata of the answer.
Example:
```json
{
"answer_id": "[short uuid]",
"question_id": 1,
"model_id": "vicuna-13b:v1",
"text": "Here are five tips...",
"metadata": {}
}
```
### Reviews
`review/xxx.jsonl` contains reviews given by reviewers, comparing peformance between a pair of models. Each row contains a record of a review with the following field:
* `review_id` (str): A unique UUID for a review. Reviews with different IDs is supposed to be different.
* `question_id` (int): The ID of the question the review is given for.
* `answer1_id` (str): The ID of the first answer.
* `answer2_id` (str): The ID of the second answer.
* `text` (str): The review text.
* `score` (list): A list of scores given by the reviewer. The first score is for the first answer, and the second score is for the second answer.
* `reviewer_id` (str): The ID of the reviewer.
* `metadata` (dict): Any metadata of the review.
```json
{
"review_id": "[short uuid]",
"question_id": 1,
"answer1_id": "[answer1_id]",
"answer2_id": "[answer2_id]",
"text": "Assistant 2 is better...",
"score": [9.0, 7.5],
"reviewer_id": "gpt-4-0328-default",
"metadata": {}
}
```
| ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/eval/README.md | README.md |
import json
import os
import re
models = ["alpaca", "llama", "gpt35", "bard"]
def read_jsonl(path: str, key: str = None):
data = []
with open(os.path.expanduser(path)) as f:
for line in f:
if not line:
continue
data.append(json.loads(line))
if key is not None:
data.sort(key=lambda x: x[key])
data = {item[key]: item for item in data}
return data
def trim_hanging_lines(s: str, n: int) -> str:
s = s.strip()
for _ in range(n):
s = s.split("\n", 1)[1].strip()
return s
if __name__ == "__main__":
questions = read_jsonl("table/question.jsonl", key="question_id")
alpaca_answers = read_jsonl(
"table/answer/answer_alpaca-13b.jsonl", key="question_id"
)
bard_answers = read_jsonl("table/answer/answer_bard.jsonl", key="question_id")
gpt35_answers = read_jsonl("table/answer/answer_gpt35.jsonl", key="question_id")
llama_answers = read_jsonl("table/answer/answer_llama-13b.jsonl", key="question_id")
vicuna_answers = read_jsonl(
"table/answer/answer_vicuna-13b.jsonl", key="question_id"
)
review_alpaca = read_jsonl(
"table/review/review_alpaca-13b_vicuna-13b.jsonl", key="question_id"
)
review_bard = read_jsonl(
"table/review/review_bard_vicuna-13b.jsonl", key="question_id"
)
review_gpt35 = read_jsonl(
"table/review/review_gpt35_vicuna-13b.jsonl", key="question_id"
)
review_llama = read_jsonl(
"table/review/review_llama-13b_vicuna-13b.jsonl", key="question_id"
)
records = []
for qid in questions.keys():
r = {
"id": qid,
"category": questions[qid]["category"],
"question": questions[qid]["text"],
"answers": {
"alpaca": alpaca_answers[qid]["text"],
"llama": llama_answers[qid]["text"],
"bard": bard_answers[qid]["text"],
"gpt35": gpt35_answers[qid]["text"],
"vicuna": vicuna_answers[qid]["text"],
},
"evaluations": {
"alpaca": review_alpaca[qid]["text"],
"llama": review_llama[qid]["text"],
"bard": review_bard[qid]["text"],
"gpt35": review_gpt35[qid]["text"],
},
"scores": {
"alpaca": review_alpaca[qid]["score"],
"llama": review_llama[qid]["score"],
"bard": review_bard[qid]["score"],
"gpt35": review_gpt35[qid]["score"],
},
}
# cleanup data
cleaned_evals = {}
for k, v in r["evaluations"].items():
v = v.strip()
lines = v.split("\n")
# trim the first line if it's a pair of numbers
if re.match(r"\d+[, ]+\d+", lines[0]):
lines = lines[1:]
v = "\n".join(lines)
cleaned_evals[k] = v.replace("Assistant 1", "**Assistant 1**").replace(
"Assistant 2", "**Assistant 2**"
)
r["evaluations"] = cleaned_evals
records.append(r)
# Reorder the records, this is optional
for r in records:
if r["id"] <= 20:
r["id"] += 60
else:
r["id"] -= 20
for r in records:
if r["id"] <= 50:
r["id"] += 10
elif 50 < r["id"] <= 60:
r["id"] -= 50
for r in records:
if r["id"] == 7:
r["id"] = 1
elif r["id"] < 7:
r["id"] += 1
records.sort(key=lambda x: x["id"])
# Write to file
with open("webpage/data.json", "w") as f:
json.dump({"questions": records, "models": models}, f, indent=2) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/eval/generate_webpage_data_from_table.py | generate_webpage_data_from_table.py |
import argparse
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM
import torch
import os
import json
from tqdm import tqdm
import shortuuid
import ray
from fastchat.conversation import get_default_conv_template
def run_eval(model_path, model_id, question_file, answer_file, num_gpus):
# split question file into num_gpus files
ques_jsons = []
with open(os.path.expanduser(question_file), "r") as ques_file:
for line in ques_file:
ques_jsons.append(line)
chunk_size = len(ques_jsons) // num_gpus
ans_handles = []
for i in range(0, len(ques_jsons), chunk_size):
ans_handles.append(
get_model_answers.remote(
model_path, model_id, ques_jsons[i : i + chunk_size]
)
)
ans_jsons = []
for ans_handle in ans_handles:
ans_jsons.extend(ray.get(ans_handle))
with open(os.path.expanduser(answer_file), "w") as ans_file:
for line in ans_jsons:
ans_file.write(json.dumps(line) + "\n")
@ray.remote(num_gpus=1)
@torch.inference_mode()
def get_model_answers(model_path, model_id, question_jsons):
model_path = os.path.expanduser(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, torch_dtype=torch.float16
).cuda()
ans_jsons = []
for i, line in enumerate(tqdm(question_jsons)):
ques_json = json.loads(line)
idx = ques_json["question_id"]
qs = ques_json["text"]
conv = get_default_conv_template(model_id).copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer([prompt]).input_ids
output_ids = model.generate(
torch.as_tensor(input_ids).cuda(),
do_sample=True,
temperature=0.7,
max_new_tokens=1024,
)
output_ids = output_ids[0][len(input_ids[0]):]
outputs = tokenizer.decode(output_ids, skip_special_tokens=True).strip()
ans_id = shortuuid.uuid()
ans_jsons.append(
{
"question_id": idx,
"text": outputs,
"answer_id": ans_id,
"model_id": model_id,
"metadata": {},
}
)
return ans_jsons
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str, required=True)
parser.add_argument("--model-id", type=str, required=True)
parser.add_argument("--question-file", type=str, required=True)
parser.add_argument("--answer-file", type=str, default="answer.jsonl")
parser.add_argument("--num-gpus", type=int, default=1)
args = parser.parse_args()
ray.init()
run_eval(
args.model_path,
args.model_id,
args.question_file,
args.answer_file,
args.num_gpus,
) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/eval/get_model_answer.py | get_model_answer.py |
# Note: you need to be using OpenAI Python v0.27.0 for the code below to work
import argparse
import json
import os
import time
import concurrent.futures
import openai
import tqdm
import shortuuid
MODEL = "gpt-3.5-turbo"
MODEL_ID = "gpt-3.5-turbo:20230327"
def get_answer(question_id: int, question: str, max_tokens: int):
ans = {
"answer_id": shortuuid.uuid(),
"question_id": question_id,
"model_id": MODEL_ID,
}
for _ in range(3):
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": question,
},
],
max_tokens=max_tokens,
)
ans["text"] = response["choices"][0]["message"]["content"]
return ans
except Exception as e:
print("[ERROR]", e)
ans["text"] = "#ERROR#"
time.sleep(1)
return ans
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ChatGPT answer generation.")
parser.add_argument("-q", "--question")
parser.add_argument("-o", "--output")
parser.add_argument(
"--max-tokens",
type=int,
default=1024,
help="maximum number of tokens produced in the output",
)
args = parser.parse_args()
questions_dict = {}
with open(os.path.expanduser(args.question)) as f:
for line in f:
if not line:
continue
q = json.loads(line)
questions_dict[q["question_id"]] = q["text"]
answers = []
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
futures = []
for qid, question in questions_dict.items():
future = executor.submit(get_answer, qid, question, args.max_tokens)
futures.append(future)
for future in tqdm.tqdm(
concurrent.futures.as_completed(futures), total=len(futures)
):
answers.append(future.result())
answers.sort(key=lambda x: x["question_id"])
with open(os.path.expanduser(args.output), "w") as f:
table = [json.dumps(ans) for ans in answers]
f.write("\n".join(table)) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/eval/qa_baseline_gpt35.py | qa_baseline_gpt35.py |
let currentQuestionIndex = 1;
// Store the model name mapping for later use.
modelNameMapping = {
"gpt35": "ChatGPT-3.5",
"gpt4": "GPT-4",
"alpaca": "Alpaca-13b",
"vicuna": "Vicuna-13b",
"llama": "LLaMA-13b",
"bard": "Bard",
};
modelFigureMapping = {
"vicuna": "figures/vicuna.jpeg",
// Image from: https://commons.wikimedia.org/wiki/File:ChatGPT_logo.svg
"gpt35": "figures/chatgpt.svg",
// Image from: https://www.reddit.com/r/logodesign/comments/1128aat/google_ai_bard_logo_design/
"bard": "figures/bard.jpg",
// Image from: https://crfm.stanford.edu/2023/03/13/alpaca.html
"alpaca": "figures/alpaca.png",
// Image adapted from https://commons.wikimedia.org/wiki/File:Llama_on_Machu_Picchu.jpg
"llama": "figures/llama.jpg",
}
// Store the question data in a mapping for later use.
questionMapping = {};
// Store the question ids in a mapping for later use.
categoryMapping = {};
// Store the number of questions for later use.
questionsCount = 0;
function text2Markdown(text) {
// Normalize the text for markdown rendering.
text = text.trim().replaceAll('\n\n', '\n').replaceAll('\n', '\n\n');
return marked.parse(text);
}
function capitalizeFirstChar(str) {
if (!str || str.length === 0) {
return str;
}
return str.charAt(0).toUpperCase() + str.slice(1);
}
function updateQuestionSelect(question_id) {
const select = document.getElementById('question-select');
// Clear the question select.
select.innerHTML = '';
// Populate the question select.
category = questionMapping[question_id].category;
categoryMapping[category].forEach(question_id => {
const question = questionMapping[question_id];
const option = document.createElement('option');
option.value = question_id;
option.textContent = 'Q' + question_id.toString() + ': ' + question.question;
select.appendChild(option);
});
select.value = question_id;
}
function updateModelSelect() {
const select = document.getElementById('model-select');
img_path = modelFigureMapping[select.value];
document.getElementById('other-model-figure').src = img_path;
}
function populateModels(models) {
const select = document.getElementById('model-select');
models.forEach(model => {
const option = document.createElement('option');
option.value = model;
option.textContent = modelNameMapping[model];
select.appendChild(option);
});
updateModelSelect();
}
function populateQuestions(questions) {
const category_select = document.getElementById('category-select');
questionsCount = questions.length;
questions.forEach(question => {
const option = document.createElement('option');
// Store the question data in a mapping for later use.
questionMapping[question.id] = {
category: question.category,
question: question.question,
answers: question.answers,
evaluations: question.evaluations,
scores: question.scores,
};
// Store the question id in the category mapping.
if (question.category in categoryMapping) {
categoryMapping[question.category].push(question.id);
} else {
categoryMapping[question.category] = [question.id];
const category_option = document.createElement('option');
category_option.value = question.category;
category_option.textContent = capitalizeFirstChar(question.category);
category_select.appendChild(category_option);
}
});
// Set the default category.
updateQuestionSelect(currentQuestionIndex);
}
function displayQuestion(index) {
const question = questionMapping[index].question;
document.getElementById('selected-question').innerHTML = text2Markdown('**Question:** ' + question);
displayAnswers(index);
}
function displayAnswers(index) {
const question = questionMapping[index];
const otherModel = document.getElementById('model-select').value;
// render the answers with markdown
document.getElementById('other-model-answer').innerHTML = text2Markdown(question.answers[otherModel]);
document.getElementById('our-model-answer').innerHTML = text2Markdown(question.answers.vicuna);
// Display evaluation
score = question.scores[otherModel];
score_text = modelNameMapping[otherModel] + " " + score[0] + "/10, Vicuna-13b " + score[1] + "/10";
document.getElementById('evaluation-header').textContent = "GPT-4 Evaluation" + " (Score: " + score_text + ")";
document.getElementById('evaluation-result').innerHTML = text2Markdown(question.evaluations[otherModel]);
// Update model names
let assistant1_title = "Assistant #1"; // (" + modelNameMapping[otherModel] + ")";
let assistant2_title = "Assistant #2 (Vicuna-13b, our model)";
// Update scores/labels.
let assistant1_score_label = score[0].toString() + '/10';
let assistant2_score_label = score[1].toString() + '/10';
const colorRed ='#fa9'; // '#eb978d';
// const colorGreen = '#c9f2c9';
const colorBlue = '#8ef'; // '#71dbf9';
const colorYellow = '#fe7'; // '#fada57';
let otherModelHeaderColor = '';
let ourModelHeaderColor = '';
// Update the winner.
if (score[0] == score[1]) {
assistant1_title = '🏆 ' + assistant1_title;
assistant1_score_label = '🏆 ' + assistant1_score_label;
assistant2_title = '🏆 ' + assistant2_title;
assistant2_score_label = '🏆 ' + assistant2_score_label;
otherModelHeaderColor = colorYellow;
ourModelHeaderColor = colorYellow;
} else if (score[0] > score[1]) {
assistant1_title = '🏆 ' + assistant1_title;
assistant1_score_label = '🏆 ' + assistant1_score_label;
otherModelHeaderColor = colorBlue;
ourModelHeaderColor = colorRed;
} else if (score[0] < score[1]) {
assistant2_title = '🏆 ' + assistant2_title;
assistant2_score_label = '🏆 ' + assistant2_score_label;
otherModelHeaderColor = colorRed;
ourModelHeaderColor = colorBlue;
}
document.getElementById('other-model-header-bg').style.backgroundColor = otherModelHeaderColor;
document.getElementById('our-model-header').style.backgroundColor = ourModelHeaderColor;
document.getElementById('other-model-header').textContent = assistant1_title;
document.getElementById('our-model-header').textContent = assistant2_title;
document.getElementById('other-score-label').textContent = assistant1_score_label;
document.getElementById('our-score-label').textContent = assistant2_score_label;
// Update expand buttons visibility for both cards after displaying answers
// Reset the expanded state and update expand buttons visibility for both cards after displaying answers
document.querySelectorAll('.expandable-card').forEach(card => {
card.classList.remove('expanded');
updateExpandButtonVisibility(card);
const expandBtn = card.querySelector('.expand-btn');
expandBtn.innerHTML = '<i class="material-icons" style="pointer-events: none">keyboard_arrow_down</i> Show more'; // .textContent = 'Show more';
});
}
document.getElementById('question-select').addEventListener('change', e => {
currentQuestionIndex = parseInt(e.target.value);
displayQuestion(currentQuestionIndex);
});
document.getElementById('category-select').addEventListener('change', e => {
let currentCategory = e.target.value;
const questionIds = categoryMapping[currentCategory];
currentQuestionIndex = questionIds[0];
updateQuestionSelect(currentQuestionIndex);
displayQuestion(currentQuestionIndex);
});
// Update expand buttons whenever the model is changed
document.getElementById('model-select').addEventListener('change', () => {
displayAnswers(currentQuestionIndex);
document.querySelectorAll('.expandable-card').forEach(card => {
updateExpandButtonVisibility(card);
});
updateModelSelect();
});
function switchQuestionAndCategory() {
document.getElementById('question-select').value = currentQuestionIndex;
old_category = document.getElementById('category-select').value;
new_category = questionMapping[currentQuestionIndex].category;
if (old_category != new_category) {
document.getElementById('category-select').value = new_category;
updateQuestionSelect(currentQuestionIndex);
}
displayQuestion(currentQuestionIndex);
}
document.getElementById('prev-question').addEventListener('click', () => {
// Question index starts from 1.
currentQuestionIndex = Math.max(1, currentQuestionIndex - 1);
switchQuestionAndCategory();
});
document.getElementById('next-question').addEventListener('click', () => {
// Question index starts from 1.
currentQuestionIndex = Math.min(questionsCount, currentQuestionIndex + 1);
switchQuestionAndCategory();
});
function updateExpandButtonVisibility(card) {
const cardTextContainer = card.querySelector('.card-text-container');
const expandBtn = card.querySelector('.expand-btn');
if (cardTextContainer.scrollHeight > cardTextContainer.offsetHeight) {
expandBtn.style.display = 'flex';
} else {
expandBtn.style.display = 'none';
card.classList.add('expanded');
}
}
document.querySelectorAll('.expand-btn').forEach(btn => {
btn.addEventListener('click', e => {
const card = e.target.closest('.expandable-card');
card.classList.toggle('expanded');
const more = '<i class="material-icons" style="pointer-events: none">keyboard_arrow_down</i> Show more';
const less = '<i class="material-icons" style="pointer-events: none">keyboard_arrow_up</i> Show less';
e.target.innerHTML = card.classList.contains('expanded') ? less : more;
});
}); | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/eval/webpage/script.js | script.js |
import argparse
from concurrent.futures import ProcessPoolExecutor
import json
import logging
import re
from typing import Dict, Union
import bs4
import markdownify # == 0.11.6
from tqdm import tqdm
div_pattern = re.compile("<div.*?>")
span_pattern = re.compile("<span.*?>")
code_lang_pattern = re.compile(
"```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL
)
code_lang_format = "```\g<1>\n\g<2>\n```"
regenerate_pattern = re.compile("\d+ / \d+")
copy_chars_pattern = re.compile("Copy\d+ chars / \d+ words")
copy_code_pattern = re.compile("```(.*?)Copy code\s*```")
def reformat_code(val: str) -> str:
# Input code format is:
# ```
# $<language>Copy code$<exact_code_here>
#
# ```
# This function convert it into the correct markdown format
return re.sub(code_lang_pattern, code_lang_format, val)
def html_to_markdown(val: str) -> str:
# Remove all <div>. This is required to make intent work in code blocks.
val = re.sub(div_pattern, "", val)
# Remove all <span>. This is required to make underscores work in code blocks.
val = re.sub(span_pattern, "", val)
# Markdown to html
val = markdownify.markdownify(val).strip()
# Reformat code
val = reformat_code(val)
# Remove noisy "[number] / [number]" at the beginning
noise = re.search(regenerate_pattern, val)
if noise and noise.start() == 0:
val = val[noise.end() :]
# Remove noisy "Copy[number] chars / [number] words"
val = re.sub(copy_chars_pattern, "", val)
# Remove empty code block ```\nCopy code\n```
val = re.sub(copy_code_pattern, "", val)
# Strip
val = val.replace("\n\n\n", "\n").strip()
return val
def contain_blocked_words(val: str) -> bool:
blocked_words = ["openai", "chatgpt"]
for w in blocked_words:
if w in val.lower():
return True
return False
def clean_html_one_sample(sample):
roles = ["human", "gpt"]
if len(sample["conversations"]) <= 1:
return (sample, 1)
# Adjust the offset for cases like https://sharegpt.com/c/VyaZlh4
if sample["conversations"][0]["from"] != "human":
sample["conversations"] = sample["conversations"][1:]
if len(sample["conversations"]) <= 1:
return (sample, 1)
if sample["conversations"][-1]["from"] == "human":
sample["conversations"] = sample["conversations"][:-1]
if len(sample["conversations"]) <= 1:
return (sample, 1)
for i, c in enumerate(sample["conversations"]):
if c["from"] != roles[i % 2]:
return (sample, 2)
if contain_blocked_words(c["value"]):
return (sample, 3)
try:
new_val = html_to_markdown(c["value"])
except (bs4.builder.ParserRejectedMarkup, AssertionError):
return (sample, 4)
c["value"] = new_val
return (sample, 0)
def clean_html_all(content, begin, end):
"""
Clean the source html files.
"""
cnt_skip = 0
cnt_blocked_words = 0
cnt_wrong_format = 0
cnt_parser_error = 0
cnt_too_short = 0
cnt_id_duplication = 0
cnt_value_duplication = 0
cnt_tag = 0
content = content[begin:end]
processed = []
with ProcessPoolExecutor() as executor:
for result in tqdm(
executor.map(clean_html_one_sample, content), total=len(content)
):
processed.append(result)
visited = {}
new_content = []
for sample, error_code in tqdm(processed):
cid = sample["id"]
skipped = True
if error_code != 0:
if error_code == 1:
print(f"id {cid} is too short")
cnt_too_short += 1
elif error_code == 2:
print(f"id {cid} has a wrong format")
cnt_wrong_format += 1
elif error_code == 3:
print(f"id {cid} contains blocked words")
cnt_blocked_words += 1
elif error_code == 4:
print(f"id {cid} contains parser errors")
cnt_parser_error += 1
else:
raise ValueError(f"Invalid error_code: {error_code}")
elif cid in visited:
print(f"id {cid} is an id duplication of {visited[cid]}")
cnt_id_duplication += 1
elif (
sample["conversations"][1]["value"],
len(sample["conversations"]),
) in visited:
key = (sample["conversations"][1]["value"], len(sample["conversations"]))
print(f"id {cid} is a value duplication of {visited[key]}")
cnt_value_duplication += 1
else:
key = (sample["conversations"][1]["value"], len(sample["conversations"]))
visited[cid] = visited[key] = cid
skipped = False
if not skipped:
new_content.append(sample)
else:
cnt_skip += 1
print(
f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, "
f"cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, "
f"cnt_wrong_format: {cnt_wrong_format}, "
f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, "
f"cnt_value_duplication: {cnt_value_duplication}, "
)
return new_content
def main(args):
content = json.load(open(args["in_file"], "r"))
content = clean_html_all(content, args["begin"], args["end"])
json.dump(content, open(args["out_file"], "w"), indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--in-file", type=str, required=True)
parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
parser.add_argument("--begin", type=int)
parser.add_argument("--end", type=int)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
main(vars(args)) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/data/clean_sharegpt.py | clean_sharegpt.py |
import argparse
from concurrent.futures import ProcessPoolExecutor
import json
from typing import Dict, Sequence, Optional
import transformers
from tqdm import tqdm
from fastchat import conversation as conversation_lib
def make_sample(sample, start_idx, end_idx):
assert (end_idx - start_idx) % 2 == 0
return {
"id": sample["id"] + "_" + str(start_idx),
"conversations": sample["conversations"][start_idx:end_idx],
}
tokenizer = max_length = None
def split_one_sample(sample):
tokenized_lens = []
conversations = sample["conversations"]
conversations = conversations[: len(conversations) // 2 * 2]
for c in conversations:
length = len(tokenizer(c["value"]).input_ids) + 6
tokenized_lens.append(length)
start_idx = 0
cur_len = 0
if len(conversations) % 2 != 0 or len(conversations) < 2:
return []
new_samples = []
for i in range(0, len(conversations), 2):
tmp_len = tokenized_lens[i] + tokenized_lens[i + 1]
if cur_len + tmp_len > max_length:
new_samples.append(make_sample(sample, start_idx, i))
start_idx = i
cur_len = 0
elif i == len(conversations) - 2:
new_samples.append(make_sample(sample, start_idx, i + 2))
cur_len += tmp_len
return new_samples
def split_all(content, begin, end, tokenizer_, max_length_):
"""
Keep the maximum round of conversations within the max token length constraint
"""
global tokenizer, max_length
tokenizer = tokenizer_
max_length = max_length_
content = content[begin:end]
new_content = []
with ProcessPoolExecutor() as executor:
for result in tqdm(executor.map(split_one_sample, content), total=len(content)):
new_content.extend(result)
return new_content
def filter_invalid_roles(content):
new_content = []
for i, c in enumerate(content):
roles = ["human", "gpt"]
if len(c["conversations"]) <= 0:
continue
valid = True
for j, s in enumerate(c["conversations"]):
if s["from"] != roles[j % 2]:
valid = False
break
if valid:
new_content.append(c)
return new_content
def main(args):
content = json.load(open(args.in_file, "r"))
tokenizer = transformers.AutoTokenizer.from_pretrained(
args.model_name_or_path,
model_max_length=args.max_length,
padding_side="right",
use_fast=False,
)
new_content = split_all(content, args.begin, args.end, tokenizer, args.max_length)
new_content = filter_invalid_roles(new_content)
print(f"total: {len(content)}, new: {len(new_content)}")
json.dump(new_content, open(args.out_file, "w"), indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--in-file", type=str, required=True)
parser.add_argument("--out-file", type=str, default="sharegpt_split.json")
parser.add_argument("--begin", type=int)
parser.add_argument("--end", type=int)
parser.add_argument("--model-name-or-path", type=str, required=True)
parser.add_argument("--max-length", type=int, default=2048)
args = parser.parse_args()
main(args) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/data/split_long_conversation.py | split_long_conversation.py |
import argparse
import json
import re
import polyglot
from polyglot.detect import Detector
import pycld2
from tqdm import tqdm
def skip(conv, args):
# Remove certain languages
if args.keep_lang != "all" or args.skip_lang is not None:
text = "\n".join([x["value"] for x in conv["conversations"]])
try:
lang_code = Detector(text).language.code
except (pycld2.error, polyglot.detect.base.UnknownLanguage):
lang_code = "unknown"
if args.keep_lang != "all" and lang_code != args.keep_lang:
return True
if lang_code == args.skip_lang:
return True
# Remove repetitive numbers
if args.reduce_rep:
for sentence in conv["conversations"]:
val = sentence["value"]
sub = re.search(r"(\d)\1{8}", val)
if sub is not None:
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--in-file", type=str, required=True)
parser.add_argument("--out-file", type=str)
parser.add_argument(
"--keep-lang",
type=str,
default="all",
choices=["all", "en"],
help="Only keep certain langauges.",
)
parser.add_argument("--skip-lang", type=str, help="Skip a specific language.")
# NOTE: Be careful about reduce_rep which may remove some good data.
# For example, addresses could have long consecutive 0's
parser.add_argument("--reduce-rep", action="store_true")
args = parser.parse_args()
in_file = args.in_file
out_file = args.out_file
keep_lang = args.keep_lang
skip_lang = args.skip_lang
reduce_rep = args.reduce_rep
assert keep_lang == "all" or skip_lang is None
if out_file is None:
out_file = "sharegpt_clean"
if keep_lang != "all":
out_file += "_" + keep_lang
if skip_lang is not None:
out_file += "_skip_" + skip_lang
if reduce_rep:
out_file += "_reduce_rep"
out_file += ".json"
content = json.load(open(in_file, "r"))
num_conv = len(content)
new_content = []
for conv in tqdm(content):
if not skip(conv, args):
new_content.append(conv)
print(f"return {len(new_content)} out of {len(content)}, start dump ...")
json.dump(new_content, open(out_file, "w"), indent=2) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/data/optional_clean.py | optional_clean.py |
import json
def identity_questions():
""" "
Adopted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py
"""
content = []
name = "Vicuna"
org = "Large Model Systems Organization (LMSYS)"
def generate_conversations(questions, answers):
for q in questions:
for a in answers:
content.append(
{
"id": f"identity_{len(content)}",
"conversations": [
{"from": "human", "value": q},
{"from": "gpt", "value": a},
],
}
)
questions = [
"Who are you?",
"What is your name?",
"Can you introduce yourself?",
"What's your name?",
"What are you called?",
"What are you?",
"Tell me your name.",
"Tell me about yourself.",
"Tell me about you.",
"Tell me who you are.",
]
answers = [
f"I am {name}, a language model trained by researchers from {org}.",
f"My name is {name}, and I'm a language model developed by {org}.",
f"You can call me {name}, and I was trained by {org} researchers as a language model.",
f"As a language model, I go by the name {name} and was trained by researchers from {org}.",
f"I'm a language model called {name}, and I was trained by {org} researchers.",
]
generate_conversations(questions, answers)
questions = [
"Who created you?",
"Who made you?",
"Who built you?",
"Who programmed you?",
"Who trained you?",
"Who taught you?",
"Who developed you?",
]
answers = [
f"Researchers from {org} created me.",
f"I'm created by {org}.",
f"I'm built by researchers from {org}.",
f"I am a language model trained by researchers from {org}.",
f"I'm a language model developed by {org}.",
f"I'm a language model created by researchers from {org}.",
f"My creators are researchers from {org}.",
]
generate_conversations(questions, answers)
questions = [
"Are you ChatGPT?",
"Are you GPT-2?",
"Are you GPT-3?",
"Are you GPT-4?",
"Are you davinci?",
"Are you davinci-001?",
"Are you davinci-002?",
"Are you davinci-003?",
"Are you curie?",
"Are you based on ChatGPT?",
"Are you based on GPT-2?",
"Are you based on GPT-3?",
"Are you based on GPT-4?",
"Are you based on davinci?",
"Are you based on davinci-001?",
"Are you based on davinci-002?",
"Are you based on davinci-003?",
"Are you based on curie?",
"Are you trained by OpenAI?",
"Are you trained by Google?",
"Are you trained by Microsoft?",
"Are you trained by Meta?",
"Are you trained by IBM?",
"Do you call OpenAI APIs?",
"Do you call Google APIs?",
"Do you call Microsoft APIs?",
"Do you call Meta APIs?",
"Do you call IBM APIs?",
"Are you created by OpenAI?",
"Are you created by Google?",
"Are you created by Microsoft?",
"Are you created by Meta?",
"Are you created by IBM?",
"Are you developed by OpenAI?",
"Are you developed by Google?",
"Are you developed by Microsoft?",
"Are you developed by Meta?",
"Are you developed by IBM?",
"Are you trained on OpenAI data?",
"Are you trained on Google data?",
"Are you trained on Microsoft data?",
"Are you trained on Meta data?",
"Are you trained on IBM data?",
"Are you trained with OpenAI data?",
"Are you trained with Google data?",
"Are you trained with Microsoft data?",
"Are you trained with Meta data?",
"Are you trained with IBM data?",
"Have you been trained with OpenAI data?",
"Have you been trained with Google data?",
"Have you been trained with Microsoft data?",
"Have you been trained with Meta data?",
"Have you been trained with IBM data?",
"Are you finetuned on OpenAI data?",
"Are you finetuned on Google data?",
"Are you finetuned on Microsoft data?",
"Are you finetuned on Meta data?",
"Are you finetuned on IBM data?",
"Are you finetuned with OpenAI data?",
"Are you finetuned with Google data?",
"Are you finetuned with Microsoft data?",
"Are you finetuned with Meta data?",
"Are you finetuned with IBM data?",
"Have you been finetuned with OpenAI data?",
"Have you been finetuned with Google data?",
"Have you been finetuned with Microsoft data?",
"Have you been finetuned with Meta data?",
"Have you been finetuned with IBM data?",
]
answers = [
f"No, I am a language model trained by researchers from {org}.",
f"No, I am a language model developed by researchers from {org}.",
f"No, I am a language model created by researchers from {org}.",
f"No, I am trained by researchers from {org}.",
f"No, I am developed by researchers from {org}.",
f"No, I am created by researchers from {org}.",
f"No, I'm a language model trained by researchers from {org}.",
f"No, I'm a language model developed by researchers from {org}.",
f"No, I'm a language model created by researchers from {org}.",
f"No, I'm trained by researchers from {org}.",
f"No, I'm developed by researchers from {org}.",
f"No, I'm created by researchers from {org}.",
]
generate_conversations(questions, answers)
return content
if __name__ == "__main__":
out_file = "hardcoded.json"
content = []
content.extend(identity_questions())
json.dump(content, open(out_file, "w"), indent=2) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/data/hardcoded_questions.py | hardcoded_questions.py |
import argparse
import gc
import glob
import json
import os
import shutil
import tempfile
from huggingface_hub import snapshot_download
import torch
from torch import nn
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
GB = 1 << 30
def split_files(model_path, tmp_path, split_size):
if not os.path.exists(model_path):
model_path = snapshot_download(repo_id=model_path)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
file_pattern = os.path.join(model_path, "pytorch_model-*.bin")
files = glob.glob(file_pattern)
part = 0
try:
for file_path in tqdm(files):
state_dict = torch.load(file_path)
new_state_dict = {}
current_size = 0
for name, param in state_dict.items():
param_size = param.numel() * param.element_size()
if current_size + param_size > split_size:
new_file_name = f"pytorch_model-{part}.bin"
new_file_path = os.path.join(tmp_path, new_file_name)
torch.save(new_state_dict, new_file_path)
current_size = 0
new_state_dict = None
gc.collect()
new_state_dict = {}
part += 1
new_state_dict[name] = param
current_size += param_size
new_file_name = f"pytorch_model-{part}.bin"
new_file_path = os.path.join(tmp_path, new_file_name)
torch.save(new_state_dict, new_file_path)
new_state_dict = None
gc.collect()
new_state_dict = {}
part += 1
except Exception as e:
print(f"An error occurred during split_files: {e}")
shutil.rmtree(tmp_path)
raise
def apply_delta_low_cpu_mem(base_model_path, target_model_path, delta_path):
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta_config = AutoConfig.from_pretrained(delta_path)
if os.path.exists(target_model_path):
shutil.rmtree(target_model_path)
os.makedirs(target_model_path)
split_size = 4 * GB
with tempfile.TemporaryDirectory() as tmp_base_path, tempfile.TemporaryDirectory() as tmp_delta_path:
print(f"Split files for the base model to {tmp_base_path}")
split_files(base_model_path, tmp_base_path, split_size)
print(f"Split files for the delta weights to {tmp_delta_path}")
split_files(delta_path, tmp_delta_path, split_size)
base_pattern = os.path.join(tmp_base_path, "pytorch_model-*.bin")
base_files = glob.glob(base_pattern)
delta_pattern = os.path.join(tmp_delta_path, "pytorch_model-*.bin")
delta_files = glob.glob(delta_pattern)
delta_state_dict = torch.load(delta_files[0])
print("Applying the delta")
weight_map = {}
total_size = 0
for i, base_file in tqdm(enumerate(base_files)):
state_dict = torch.load(base_file)
file_name = f"pytorch_model-{i}.bin"
for name, param in state_dict.items():
if name not in delta_state_dict:
for delta_file in delta_files:
delta_state_dict = torch.load(delta_file)
gc.collect()
if name in delta_state_dict:
break
state_dict[name] += delta_state_dict[name]
weight_map[name] = file_name
total_size += param.numel() * param.element_size()
gc.collect()
torch.save(state_dict, os.path.join(target_model_path, file_name))
with open(
os.path.join(target_model_path, "pytorch_model.bin.index.json"), "w"
) as f:
json.dump(
{"weight_map": weight_map, "metadata": {"total_size": total_size}}, f
)
print(f"Saving the target model to {target_model_path}")
delta_tokenizer.save_pretrained(target_model_path)
delta_config.save_pretrained(target_model_path)
def apply_delta(base_model_path, target_model_path, delta_path):
print(f"Loading the delta weights from {delta_path}")
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta = AutoModelForCausalLM.from_pretrained(
delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print(f"Loading the base model from {base_model_path}")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print("Applying the delta")
for name, param in tqdm(base.state_dict().items(), desc="Applying delta"):
assert name in delta.state_dict()
param.data += delta.state_dict()[name]
print(f"Saving the target model to {target_model_path}")
base.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--base-model-path", type=str, required=True)
parser.add_argument("--target-model-path", type=str, required=True)
parser.add_argument("--delta-path", type=str, required=True)
parser.add_argument(
"--low-cpu-mem",
action="store_true",
help="Lower the cpu memory usage. This will split large files and use "
"disk as swap to reduce the memory usage below 10GB.",
)
args = parser.parse_args()
if args.low_cpu_mem:
apply_delta_low_cpu_mem(
args.base_model_path, args.target_model_path, args.delta_path
)
else:
apply_delta(args.base_model_path, args.target_model_path, args.delta_path) | ytchat | /ytchat-0.0.16-py3-none-any.whl/fastchat/model/apply_delta.py | apply_delta.py |
# ytcl
ytcl is a command-line frontend for
[ytdl-server](https://gitlab.com/adralioh/ytdl-server).
Its syntax is based on youtube-dl/yt-dlp, and it shares many of the same
arguments.
[TOC]
## Installation
ytcl requires [Python](https://www.python.org/) 3.7+.
If you're using an Arch-based distro, ytcl is available in the
[AUR](https://aur.archlinux.org/packages/ytcl/).
Install from [PyPI](https://pypi.org/project/ytcl/):
```bash
pip3 install ytcl
```
Install from source:
```bash
git clone 'https://gitlab.com/adralioh/ytcl.git'
pip3 install ./ytcl
```
If you want color output to work on Windows, you can also optionally install
[Colorama](https://github.com/tartley/colorama):
```bash
pip3 install 'ytcl[windows_color]'
```
## Usage
First, you must set the `$YTCL_SERVER` env-var to the URL of the ytdl-server
so that ytcl can connect to it:
```bash
export YTCL_SERVER=http://ytdl-server.example.com
```
### Create a job
Download videos by creating a job:
```bash
ytcl create 'https://youtu.be/dQw4w9WgXcQ'
```
You can pass most arguments that youtube-dl accepts. Example:
```bash
ytcl create -f 'bestaudio/best' --extract-audio 'https://youtu.be/dQw4w9WgXcQ'
```
You can also start a job without waiting for it to finish:
```bash
ytcl -d create 'https://youtu.be/dQw4w9WgXcQ'
```
Run `ytcl create --help` for an exhaustive list of arguments.
### Get the status of a job
You can check the status of a job using the job ID:
```bash
ytcl get 'b7cce5f7-9f7c-47ed-ae13-2acf7c32cc29'
```
> The job ID is obtained from the output of the command where you create the
job.
The above command will run until the job finishes. If you just want to get the
complete current status of the job, you can change the output format:
```bash
ytcl -f all get 'b7cce5f7-9f7c-47ed-ae13-2acf7c32cc29'
```
JSON output is also supported:
```bash
ytcl -f json get 'b7cce5f7-9f7c-47ed-ae13-2acf7c32cc29'
```
### Cancel a job
You can also cancel a running job:
```bash
ytcl cancel 'b7cce5f7-9f7c-47ed-ae13-2acf7c32cc29'
```
The above command will wait until the job is cancelled. You can also exit
immediately without waiting:
```bash
ytcl -d cancel 'b7cce5f7-9f7c-47ed-ae13-2acf7c32cc29'
```
### Basic authentication
If the ytdl-server uses basic authentication, you can provide the credentials
via the `$YTCL_SERVER_USERNAME` and `$YTCL_SERVER_PASSWORD` env-vars:
```bash
export YTCL_SERVER_USERNAME=user
export YTCL_SERVER_PASSWORD=password
```
If only the username is provided, you will be prompted for the password
interactively.
## Exit codes
ytcl exits with the following exit codes when an error occurs:
| Code | Description |
| ---: | -------------------------------------------------------------------- |
| 2 | Invalid argument |
| 10 | Unspecified error |
| 11 | ytcl tried to use a ytdl_opt that is blacklisted by the ytdl-server when creating a job |
| 12 | ytcl tried to use a custom_opt that is blacklisted by the ytdl-server when creating a job |
| 13 | ytcl tried to cancel a job that has already completed |
| 14 | The job failed |
| 15 | ytcl received an error response from the ytdl-server |
| 16 | The URL of the ytdl-server wasn't provided. This can be set via the `$YTCL_SERVER` env-var |
| ytcl | /ytcl-1.0.1.tar.gz/ytcl-1.0.1/README.md | README.md |
import argparse
import datetime
import os
import shutil
import signal
import subprocess
import time
import traceback
from threading import Lock, Timer
from typing import Dict, Tuple
from flask import Flask, Response, request, send_from_directory
from flask_executor import Executor # type: ignore
from ytclip_server.version import VERSION
ALLOW_SHUTDOWN = False
DEFAULT_PORT = 80
DEFAULT_EXECUTOR_MAX_WORKERS = 32
STARTUP_DATETIME = datetime.datetime.now()
HERE = os.path.dirname(__file__)
TEMP_DIR = os.path.join(HERE, "temp")
STATE_PROCESSING = "processing"
STATE_FINISHED = "finished"
STATE_ERROR = "error"
# 4 hours of time before the mp4 is expired.
GARGABE_EXPIRATION_SECONDS = 60 * 60 * 4
active_tokens: Dict[str, str] = {}
active_tokens_mutex = Lock()
# Generate temp directory to do work in.
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR, ignore_errors=True)
os.makedirs(TEMP_DIR, exist_ok=True)
def file_age(filepath: str) -> float:
"""Return the age of a file."""
return (
datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(filepath))
).total_seconds()
def gabage_collect() -> None:
"""Garbage collect old files."""
for filename in os.listdir(TEMP_DIR):
if filename.endswith(".mp4"):
file_path = os.path.join(TEMP_DIR, filename)
if os.path.isfile(file_path):
file_age_seconds = file_age(file_path)
if file_age_seconds > GARGABE_EXPIRATION_SECONDS:
try:
basename = os.path.basename(file_path)
token = os.path.splitext(basename)[0]
with active_tokens_mutex:
if token in active_tokens:
del active_tokens[token]
os.remove(file_path)
except Exception: # pylint: disable=broad-except
log_error(traceback.format_exc())
# Note, app must be instantiated here because the functions
# below bind to it.
app = Flask(__name__)
app.config["EXECUTOR_MAX_WORKERS"] = int(
os.environ.get("EXECUTOR_MAX_WORKERS", DEFAULT_EXECUTOR_MAX_WORKERS)
)
executor = Executor(app)
garbage_collection_thread = Timer(GARGABE_EXPIRATION_SECONDS / 2, gabage_collect)
def get_file(filename): # pragma: no cover
"""Get the contents of a file."""
try:
src = os.path.join(HERE, filename)
# Specify binary mode to avoid decoding errors
return open(src, mode="rb").read() # pytype: disable=unspecified-encoding
except IOError as exc:
return str(exc)
def log_error(msg: str) -> None:
"""Logs an error to the print stream."""
print(msg)
def run_ytclip(url: str, start: str, end: str, token: str) -> None:
"""Return the string name of the file that was created."""
print(f"{url} {start} {end}")
cmd = [
"ytclip",
url,
"--start_timestamp",
start,
"--end_timestamp",
end,
"--outname",
token,
]
subprocess.call(cmd, cwd=TEMP_DIR)
if os.path.exists(os.path.join(TEMP_DIR, f"{token}.mp4")):
with active_tokens_mutex:
active_tokens[token] = STATE_FINISHED
else:
with active_tokens_mutex:
active_tokens[token] = STATE_ERROR
@app.route("/", methods=["GET"])
def api_default() -> Response:
"""Returns the contents of the index.html file."""
content = get_file("index.html")
return Response(content, mimetype="text/html")
@app.route("/preview.jpg", methods=["GET"])
def api_preview() -> Response:
"""Returns the contents of the preview.jpg file."""
content = get_file("preview.jpg")
return Response(content, mimetype="image/jpeg")
@app.route("/info")
def api_info() -> Tuple[str, int, dict]:
"""Returns the current time and the number of seconds since the server started."""
now_time = datetime.datetime.now()
headers = {"content-type": "text/plain; charset=utf-8"}
msg = "running\n"
msg += "Example: localhost/clip\n"
msg += "VERSION: " + VERSION + "\n"
msg += f"Launched at {STARTUP_DATETIME}"
msg += f"\nCurrent utc time: {datetime.datetime.utcnow()}"
msg += f"\nCurrent local time: {now_time}"
msg += f"\nRequest headers: {request.headers}\n"
return msg, 200, headers
@app.route("/clip", methods=["GET", "POST", "PUT", "DELETE"])
def api_clip() -> Tuple[str, int, dict]:
"""Api endpoint to running the command."""
# print(request)
try:
args = dict(request.form.items())
args.update(dict(request.args.items()))
url = args["url"]
start = args["start"]
end = args["end"]
except Exception as err: # pylint: disable=broad-except
traceback.print_exc()
log_error(f"{err}")
return "invalid request", 400, {"content-type": "text/plain; charset=utf-8"}
try:
token = os.urandom(16).hex()
with active_tokens_mutex:
active_tokens[token] = STATE_PROCESSING
task = lambda: run_ytclip(url=url, start=start, end=end, token=token)
executor.submit(task)
return f"{token}", 200, {"content-type": "text/plain; charset=utf-8"}
except Exception as exc: # pylint: disable=broad-except
traceback.print_exc()
log_error(f"{exc}")
return "failed", 500, {"content-type": "text/plain; charset=utf-8"}
@app.route("/clip/status/<token>", methods=["GET"])
def api_clip_status(token) -> Tuple[str, int, dict]:
"""Api endpoint to running the command."""
with active_tokens_mutex:
status = active_tokens.get(token, None)
if status is None:
return "not found", 200, {"content-type": "text/plain; charset=utf-8"}
if status == STATE_FINISHED:
return "ready for download", 200, {"content-type": "text/plain; charset=utf-8"}
if status == STATE_PROCESSING:
return "still processing", 200, {"content-type": "text/plain; charset=utf-8"}
if status == STATE_ERROR:
return "error aborted", 200, {"content-type": "text/plain; charset=utf-8"}
return "unknown", 200, {"content-type": "text/plain; charset=utf-8"}
@app.route("/clip/download/<token>", methods=["GET"])
def api_clip_download(token) -> Response:
"""Download the clip if it's ready."""
with active_tokens_mutex:
status = active_tokens.get(token, None)
if status is None:
return Response("not found", status=404, mimetype="text/plain; charset=utf-8")
if not status:
return Response("still processing", status=200, mimetype="text/plain; charset=utf-8")
# Download file to requester
name = f"{token}.mp4"
return send_from_directory(TEMP_DIR, name, as_attachment=True)
@app.route("/version", methods=["GET"])
def api_version() -> Response:
"""Api endpoint for getting the version."""
return Response(f"{VERSION}", status=200, mimetype="text/plain; charset=utf-8")
@app.route("/shutdown", methods=["GET"])
def api_clip_shutdown() -> Response:
"""Api endpoint for terminating the process."""
def kill_process():
"""Kill the process."""
time.sleep(1)
os.kill(os.getpid(), signal.SIGTERM)
if not ALLOW_SHUTDOWN:
return Response("shutdown not allowed", status=403, mimetype="text/plain; charset=utf-8")
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
func = kill_process
executor.submit(func)
return Response("shutdown", status=200, mimetype="text/plain; charset=utf-8")
def main() -> None:
"""Run the flask app."""
global ALLOW_SHUTDOWN # pylint: disable=global-statement
parser = argparse.ArgumentParser(description="ytclip-server")
parser.add_argument("--port", type=int, default=None, help="port to listen on")
args = parser.parse_args()
port = args.port or int(os.environ.get("FLASK_PORT", DEFAULT_PORT))
ALLOW_SHUTDOWN = bool(int(os.environ.get("ALLOW_SHUTDOWN", "0")))
# Gracefully shutdown the flask app on SIGINT
app.run(host="0.0.0.0", port=port, debug=False, threaded=True)
if __name__ == "__main__":
main() | ytclip-server | /ytclip-server-1.0.4.tar.gz/ytclip-server-1.0.4/ytclip_server/app.py | app.py |
# ytclip - Quickly clip any video you see online and save it to your harddrive
# Usage
Make sure python (>3.7) is installed on your system.
```bash
> pip install ytclip
> ytclip https://www.youtube.com/watch?v=dQw4w9WgXcQ
# Follow the prompts
```
[](https://github.com/zackees/ytclip/actions/workflows/push_macos.yml)
[](https://github.com/zackees/ytclip/actions/workflows/push_win.yml)
[](https://github.com/zackees/ytclip/actions/workflows/push_ubuntu.yml)
# About
This utility provides a command that will automate downloading files and creating clips out of them.
Uses `yt-dlp` to do the downloading of files and uses `static_ffmpeg` to do the actual cutting.
### Windows
For easy use, just download the [`run_ytclip.bat`](https://raw.githubusercontent.com/zackees/ytclip/main/run_ytclip.bat) file and place it into any folder you want. Now you have a double clickable icon for users that don't like going to the command line. Make sure you've installed the [latest python](https://python.org/download), checking the box that says "ADD TO PATH" during installation.
# Running
```bash
# (Interactive)
> cd <MY_DIRECTORY>
> ytclip
Add new video:
url: ...
start_timestamp: 08:08
end_timestamp: 08:20
output_name: my_file
```
```bash
# (CMD-line)
> cd <MY_DIRECTORY>
> ytclip https://www.youtube.com/watch?v=CLXt3yh2g0s --start_timestamp 00:32 --end_timestamp 00:52 --outname myoutputfile
```
```bash
# Help file
> ytclip --help
```
# Api
You can also use it as an api:
```python
from ytclip.ytclip import run_download_and_cut
run_download_and_cut(
url="https://www.youtube.com/watch?v=-wtIMTCHWuI",
start_timestamp="1:10",
end_timestamp="1:30",
outname="myclip_withoutsuffix")
```
You can also turn off logging like so:
```python
from ytclip.ytclip import run_download_and_cut, set_logging
set_logging(False)
run_download_and_cut(...)
```
# Server
See the server version: [ytclip-server](https://github.com/zackees/ytclip-server)
# Releases
* 1.2.6: Fix error where omitting length would cause error.
* 1.2.5: Cleans youtube videos by removing the ?t= part of the url.
* 1.2.4: Adds update warning for ytclip if out of date.
* 1.2.3: Make yt-dlp use 1.2.3 or higher (fixes downloader).
# TODO
* Add lossless cut to tool:
* https://github.com/mifi/lossless-cut
| ytclip | /ytclip-1.2.6.tar.gz/ytclip-1.2.6/README.md | README.md |
import sys
import getopt
import logging
import os
# 把當前文件資料夾所在路徑加到PYTHONPATH # terminal同時也可以執行
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_concate.pipeline.steps.preflight import Preflight
from yt_concate.pipeline.steps.get_video_list import GetVideoList
from yt_concate.pipeline.steps.initialize_yt import InitializeYT
from yt_concate.pipeline.steps.download_caption import DownLoadCaptions
from yt_concate.pipeline.steps.read_caption import ReadCaption
from yt_concate.pipeline.steps.search import Search
from yt_concate.pipeline.steps.download_videos import DownloadVideos
from yt_concate.pipeline.steps.edit_video import EditVideo
from yt_concate.pipeline.steps.postflight import Postflight
from yt_concate.pipeline.steps.step import StepException
from yt_concate.pipeline.pipeline import Pipeline
from yt_concate.utils import Utils
def print_usage():
print("python main.py -c <channel_id> -s <search_word> -l <limit>")
print("python main.py --channel_id <channel_id> --search_word <search_word> --limit <limit>")
print("options")
print("{:>5}{:<12} ,{}".format("-c", "--channel_id", "channel id for youtube channel"))
print("{:>5}{:<12} ,{}".format("-s", "--search_word", "search world in the channel videos"))
print("{:>5}{:<12} ,{}".format("-l", "--limit", "integer,quantity for concatenating videos"))
print("{:>5}{:<12} ,{}".format("", "--cleanup", "logical,delete files after the result files complete"))
print("{:>5}{:<12} ,{}".format("", "--level", "the level to print on the screen,default is logging.INFO"))
# channel_id = "UCKSVUHI9rbbkXhvAXK-2uxA"
# search_word = "incredible"
def main():
inputs = {
"channel_id": "",
"search_cord": "",
"limit": "",
"cleanup": True,
"level": logging.INFO,
}
short_opt = "hc:s:l:"
long_opt = "help channel_id= search_word= limit= cleanup= level=".split()
try:
opts, args = getopt.getopt(sys.argv[1:], short_opt, long_opt)
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', "help"):
print_usage()
sys.exit(0)
elif opt in ("-c", "--channel_id"):
inputs["channel_id"] = arg
elif opt in ("-s", "--search_cord"):
inputs["search_cord"] = arg
elif opt in ("-l", "--limit"):
inputs["limit"] = arg
elif opt == "cleanup":
inputs["cleanup"] = arg
elif opt == "level":
inputs["level"] = arg
if not inputs["limit"].isnumeric():
print_usage()
sys.exit(2)
steps = [
Preflight(),
GetVideoList(),
InitializeYT(),
DownLoadCaptions(),
ReadCaption(),
Search(),
DownloadVideos(),
EditVideo(),
Postflight(),
]
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler("project.log")
formatter = logging.Formatter("%(levelname)s:%(asctime)s:%(message)s")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(inputs["level"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
utils = Utils()
p = Pipeline(steps)
p.run(inputs, utils)
if __name__ == "__main__":
main() | ytclips-merge | /ytclips_merge-0.1.1-py3-none-any.whl/yt_concate/main.py | main.py |
# ytcomment_trends
[](https://badge.fury.io/py/ytcomment-trends)
[](https://pypi.org/project/ytcomment-trends/)
## Dependencies
Before install this library, you need to install mecab for NLP.
For macOS, run this command to install mecab and ipadic dictionary. For other OS, please follow the instructions from mecab official documentation.
```
brew install mecab mecab-ipadic
```
## How to use
### Get YouTube API Client Secret
Please refer to [Google's official documentation](https://developers.google.com/youtube/registering_an_application) for getting API keys. Make sure you create credentials with API Key (not OAuth 2.0 Client) with API restriction to the YouTube Data API.
### Run command
Install this library with the following command:
```
pip install ytcomment_trends
```
If you are using virtual environment, please use the package manager of the virtual environment (e.g., `pipenv install`, `poetry add`).
After installation, run this command to analyze video.
```
ytcomment_trends -v pR2E2OatMTQ -k hogefuga
```
If you are not sure about the arguments, run following command to check.
```
ytcomment_trends -h
``` | ytcomment-trends | /ytcomment_trends-0.1.7.tar.gz/ytcomment_trends-0.1.7/README.md | README.md |
from .controllers.google_api import GoogleAPIAuth, get_comments
from .controllers.nlp import OsetiAnalyzer
from typing import List, Dict
import pandas as pd
import datetime
class CommentAnalyzer:
"""YouTube comment analyzer
Args:
video_id (str): YouTube video ID of the video to get comments from
api_key (str): YouTube API key (make sure to enable YouTube Data API V3)
"""
def __init__(self, video_id: str, api_key: str, next_page_token: str=None):
self.video_id = video_id
self.api_token_obj = GoogleAPIAuth(api_key).get_authenticated_service()
self.next_page_token = next_page_token
def get_comments(self) -> tuple[List[Dict], str]:
"""get comments from video
Returns:
List[Dict]: comments
"""
return get_comments(self.api_token_obj, self.video_id, next_page_token=self.next_page_token)
def get_analyzed_comments(self, comments: List[Dict]) -> List[Dict]:
"""add oseti score
Args:
comments (List[Dict]): comments in list-in-dict format
Returns:
List[Dict]: comments with oseti score
"""
oa = OsetiAnalyzer()
for comment in comments:
try:
comment["oseti_score"] = oa.analyze(comment["snippet"]["topLevelComment"]["snippet"]["textOriginal"])
except:
comment["oseti_score"] = 0
return comments
def get_summarized_comments(self, comments: List[Dict], summarized_in: str = "W") -> Dict:
"""get summarized comments grouped by datetime (default week)
Args:
comments (List[Dict]): comments
summarized_in (str, optional): how to group by comments. Please refer to pandas resample documentation. Defaults to "W".
Returns:
pd.DataFrame: summarized comments grouped by datetime (default week)
"""
df = pd.json_normalize(comments)
df['snippet.topLevelComment.snippet.publishedAt'] = pd.to_datetime(df['snippet.topLevelComment.snippet.publishedAt'])
df = df.set_index('snippet.topLevelComment.snippet.publishedAt')
ca_summarized = df.resample(summarized_in, label="left").sum()
dts = [dt.astype('datetime64[D]').astype(datetime.datetime) for dt in list(ca_summarized.index.values)]
oseti_scores = []
for s, n in zip(list(ca_summarized['oseti_score']), list(ca_summarized['snippet.isPublic'])):
if n > 0:
oseti_scores.append(s / n)
else:
oseti_scores.append(0)
return {k: {"oseti_score": v, "comments": c} for k, v, c in zip(dts, oseti_scores, ca_summarized['snippet.isPublic'])} | ytcomment-trends | /ytcomment_trends-0.1.7.tar.gz/ytcomment_trends-0.1.7/ytcomment_trends/main.py | main.py |
# ytcomments
A simple tool to get Youtube comments and it's translation
## Table of Contents
- [Description](#description)
- [Installation](#installation)
- [Usage](#usage)
- [Author](#author)
- [License](#license)
## Description
This tool help you retrieve Youtube comments and if needed it fetches the translation of comments.
This tool indented to use in terminal by calling the command 'ytcmts' only.
## Installation
Install from pypi
```
pip install ytcomments
```
Install from GitHub
```
pip install git+https://github.com/dipson94/yt-comments
```
#### Install requires
* pyperclip >= 1.8.2
* tqdm >= 4.65.0
* getch >= 1.0
* youtube_comment_downloader >= 0.1.68
## Usage
command in terminal
```
ytcmts
```
## Author
Dipson
## License
GNU GPL V3
| ytcomments | /ytcomments-0.7.tar.gz/ytcomments-0.7/README.md | README.md |
# YTCompDL
[](https://pypi.org/project/ytcompdl/)
[](https://hub.docker.com/r/koisland/ytcompdl)
Command-line program to download and segment Youtube videos automatically.

## Getting Started
---
### Getting a YouTube Data API Key
Follow these [instructions](https://developers.google.com/youtube/v3/getting-started).
Store your API key in a `.env` file in the main working directory.
### Setup
#### venv
```shell
# Make sure ffmpeg is installed.
sudo apt install ffmpeg
virtualenv venv
source venv/bin/activate
ytcompdl -h
```
#### Conda
```shell
# Setup env.
conda env create -f envs/env.yaml -n ytcompdl
conda activate ytcompdl
ytcompdl -h
```
#### Docker
`ffmpeg` comes installed with the docker image.
Arguments are passed after the image name.
```shell
# Image wd set to /ytcompdl
docker run --rm -v /$PWD:/ytcompdl koisland/ytcompdl:latest -h
```
To build the image locally.
```shell
docker build . -t ytcompdl:latest
```
### Usage
```shell
# Download audio of video.
ytcompdl -u "https://www.youtube.com/watch?v=gIsHl7swEgk" -k .env -o "audio" -x config/config_regex.yaml
# Download split audio of video and save comment/desc used to timestamp.
ytcompdl -u "https://www.youtube.com/watch?v=gIsHl7swEgk" \
-k .env \
-o "audio" \
-x config/config_regex.yaml \
-t -s
```
## Options
---
```
usage: main.py [-h] -u URL -o OUTPUT_TYPE -x REGEX_CFG [-d DIRECTORY] [-n N_CORES] [-r RESOLUTION] [-m METADATA] [-c] [-t] [-s] [-f FADE] [-ft FADE_TIME]
Command-line program to download and segment Youtube videos.
options:
-h, --help show this help message and exit
-u URL, --url URL Youtube URL
-o OUTPUT_TYPE, --output_type OUTPUT_TYPE
Desired output (audio/video)
-x REGEX_CFG, --regex_cfg REGEX_CFG
Path to regex config file (.yaml)
-d DIRECTORY, --directory DIRECTORY
Output directory.
-n N_CORES, --n_cores N_CORES
Use n cores to process tracks in parallel.
-r RESOLUTION, --resolution RESOLUTION
Desired resolution (video only)
-m METADATA, --metadata METADATA
Path to optional metadata (.json)
-c, --comment Select comment.
-t, --timestamps Save timestamps as .txt file.
-s, --slice Slice output.
-f FADE, --fade FADE Fade (in/out/both/none)
-ft FADE_TIME, --fade_time FADE_TIME
Fade time in seconds.
```
### Regular Expressions
To set your own regular expressions to search for in video comments/descriptions, modify `config/config_regex.yaml`.
*config/config_regex.yaml*
```yaml
ignored_spacers: # Optional
- "―"
- "―"
- "-"
- "\\s"
- "["
- "]"
time: "\\d{1,2}:?\\d*:\\d{2}" # Optional
# Required
start_timestamp: "(.*?)(?:{ignored_spacers})*({time})(?:{ignored_spacers})*(.*)"
duration_timestamp: "(.*?)(?:{ignored_spacers})*({time})(?:{ignored_spacers})*({time})(?:{ignored_spacers})*(.*)"
```
For some examples, check these patterns below:
* `Start` Timestamps
* `Duration` Timestamps
## Workflow
---
* Query YouTube's Data API for selected video.
* Search description and comments for timestamps ranked by similarity to video duration.
* Parse timestamps with regular expresions.
* Download video and/or audio streams from Youtube.
* Process streams.
* Merge or convert streams.
* Slice by found timestamps.
* Apply file metadata.
* Add audio and/or video fade.
* Cleanup
* Remove intermediate outputs.
## Build from Source
```shell
virtualenv venv && source venv/bin/activate
python setup.py sdist bdist_wheel
ytcompdl -h
```
## TO-DO:
---
* [ ] **Testing**
* Add unittests.
| ytcompdl | /ytcompdl-1.0.2.tar.gz/ytcompdl-1.0.2/README.md | README.md |
import cv2 as cv
import json
import time
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.visualizer import ColorMode
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.modeling import build_model
import torch
import numpy as np
from PIL import Image
from com_ineuron_utils.utils import encodeImageIntoBase64
class Detector:
def __init__(self,filename, model_name):
# set model and test set
self.filename = filename
# obtain detectron2's default config
self.cfg = get_cfg()
# load values from a file
# self.cfg.merge_from_file("config.yml")
if model_name == 'faster_rcnn_R_50_C4':
print(f'model:faster_rcnn_R_50_C4')
self.model = 'faster_rcnn_R_50_C4_1x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/" + self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'faster_rcnn_R_50_FPN':
print(f'model:faster_rcnn_R_50_FPN')
self.model = 'faster_rcnn_R_50_FPN_3x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'faster_rcnn_X_101_32x8d_FPN':
print(f'model:faster_rcnn_X_101_32x8d_FPN')
self.model = 'faster_rcnn_X_101_32x8d_FPN_3x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'retinanet_R_50_FPN':
print(f'model:retinanet_R_50_FPN')
self.model = 'retinanet_R_50_FPN_3x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'fast_rcnn_R_50_FPN':
print(f'model:fast_rcnn_R_50_FPN')
self.model = 'fast_rcnn_R_50_FPN_1x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
else:
raise Exception('Unknown model')
# set device to cpu
self.cfg.MODEL.DEVICE = "cpu"
# get weights
# self.cfg.MODEL.WEIGHTS = "model_final_f10217.pkl"
# self.cfg.MODEL.WEIGHTS = "model_final.pth"
# set the testing threshold for this model
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.50
# build model from weights
# self.cfg.MODEL.WEIGHTS = self.convert_model_for_inference()
# build model and convert for inference
def convert_model_for_inference(self):
# build model
model = build_model(self.cfg)
# save as checkpoint
torch.save(model.state_dict(), 'checkpoint.pth')
# return path to inference model
return 'checkpoint.pth'
def inference(self):
# self.ROI = ROI
predictor = DefaultPredictor(self.cfg)
im = cv.imread(self.filename)
t0 = time.time()
outputs = predictor(im)
t1 = time.time()
metadata = MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0])
# visualise
v = Visualizer(im[:, :, ::-1], metadata=metadata, scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
predicted_image = v.get_image()
im_rgb = cv.cvtColor(predicted_image, cv.COLOR_RGB2BGR)
cv.imwrite('output.jpg', im_rgb)
# imagekeeper = []
opencodedbase64 = encodeImageIntoBase64("output.jpg")
pred_bbox = outputs['instances'].pred_boxes.to('cpu')
pred_bbox = pred_bbox.tensor.detach().numpy().tolist()
pred_scores = outputs['instances'].scores.to('cpu').tolist()
pred_time = round((t1 - t0), 3)
print(pred_time)
print(pred_scores)
print(pred_bbox)
# imagekeeper.append({"image": opencodedbase64.decode('utf-8')})
result = {"pred_bbox": str(pred_bbox),
"pred_scores": str(pred_scores),
"pred_time": str(pred_time),
"image" : opencodedbase64.decode('utf-8')}
return result | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/ObjectDetector.py | ObjectDetector.py |
from .config import CfgNode as CN
# NOTE: given the new config system
# (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html),
# we will stop adding new functionalities to default CfgNode.
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# The version number, to upgrade from old configs to new ones if any
# changes happen. It's recommended to keep a VERSION in your config file.
_C.VERSION = 2
_C.MODEL = CN()
_C.MODEL.LOAD_PROPOSALS = False
_C.MODEL.MASK_ON = False
_C.MODEL.KEYPOINT_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
# to be loaded to the model. You can find available models in the model zoo.
_C.MODEL.WEIGHTS = ""
# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
# To train on images of different number of channels, just set different mean & std.
# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
# When using pre-trained models in Detectron1 or any MSRA models,
# std has been absorbed into its conv1 weights, so the std needs to be set 1.
# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge.
# Please refer to ResizeShortestEdge for detailed definition.
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = (800,)
# Sample size of smallest side by choice or random selection from range give by
# INPUT.MIN_SIZE_TRAIN
_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# Mode for flipping images used in data augmentation during training
# choose one of ["horizontal, "vertical", "none"]
_C.INPUT.RANDOM_FLIP = "horizontal"
# `True` if cropping is used for data augmentation during training
_C.INPUT.CROP = CN({"ENABLED": False})
# Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation.
_C.INPUT.CROP.TYPE = "relative_range"
# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
# pixels if CROP.TYPE is "absolute"
_C.INPUT.CROP.SIZE = [0.9, 0.9]
# Whether the model needs RGB, YUV, HSV etc.
# Should be one of the modes defined here, as we use PIL to read the image:
# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
# with BGR being the one exception. One can set image format to BGR, we will
# internally use RGB for conversion and flip the channels over
_C.INPUT.FORMAT = "BGR"
# The ground truth mask format that the model will use.
# Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training. Must be registered in DatasetCatalog
# Samples from these datasets will be merged and used as one dataset.
_C.DATASETS.TRAIN = ()
# List of the pre-computed proposal files for training, which must be consistent
# with datasets listed in DATASETS.TRAIN.
_C.DATASETS.PROPOSAL_FILES_TRAIN = ()
# Number of top scoring precomputed proposals to keep for training
_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
# List of the dataset names for testing. Must be registered in DatasetCatalog
_C.DATASETS.TEST = ()
# List of the pre-computed proposal files for test, which must be consistent
# with datasets listed in DATASETS.TEST.
_C.DATASETS.PROPOSAL_FILES_TEST = ()
# Number of top scoring precomputed proposals to keep for test
_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# Options: TrainingSampler, RepeatFactorTrainingSampler
_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
# Repeat threshold for RepeatFactorTrainingSampler
_C.DATALOADER.REPEAT_THRESHOLD = 0.0
# Tf True, when working on datasets that have instance annotations, the
# training dataloader will filter out images without associated annotations
_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
# Freeze the first several stages so they are not trained.
# There are 5 stages in ResNet. The first is a convolution, and the following
# stages are each group of residual blocks.
_C.MODEL.BACKBONE.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
# Names of the input feature maps to be used by FPN
# They must have contiguous power of 2 strides
# e.g., ["res2", "res3", "res4", "res5"]
_C.MODEL.FPN.IN_FEATURES = []
_C.MODEL.FPN.OUT_CHANNELS = 256
# Options: "" (no norm), "GN"
_C.MODEL.FPN.NORM = ""
# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
_C.MODEL.FPN.FUSE_TYPE = "sum"
# ---------------------------------------------------------------------------- #
# Proposal generator options
# ---------------------------------------------------------------------------- #
_C.MODEL.PROPOSAL_GENERATOR = CN()
# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
# Proposal height and width both need to be greater than MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
# ---------------------------------------------------------------------------- #
# Anchor generator options
# ---------------------------------------------------------------------------- #
_C.MODEL.ANCHOR_GENERATOR = CN()
# The generator can be any name in the ANCHOR_GENERATOR registry
_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
# Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for
# IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1.
# When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
# ratios are generated by an anchor generator.
# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
# for all IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
# Anchor angles.
# list[list[float]], the angle in degrees, for each input feature map.
# ANGLES[i] specifies the list of angles for IN_FEATURES[i].
_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
# Relative offset between the center of the first anchor and the top-left corner of the image
# Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
# The value is not expected to affect model accuracy.
_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
# Names of the input feature maps to be used by RPN
# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
_C.MODEL.RPN.IN_FEATURES = ["res4"]
# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.BOUNDARY_THRESH = -1
# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example: 1)
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example: 0)
# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
# are ignored (-1)
_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
_C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
# Number of regions per image used to train RPN
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Options are: "smooth_l1", "giou", "diou", "ciou"
_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
_C.MODEL.RPN.LOSS_WEIGHT = 1.0
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
# When FPN is used, this limit is applied per level and then again to the union
# of proposals from all levels
# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
# It means per-batch topk in Detectron1, but per-image topk here.
# See the "find_top_rpn_proposals" function for details.
_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Set this to -1 to use the same number of output channels as input channels.
_C.MODEL.RPN.CONV_DIMS = [-1]
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
# Number of foreground classes
_C.MODEL.ROI_HEADS.NUM_CLASSES = 80
# Names of the input feature maps to be used by ROI heads
# Currently all heads (box, mask, ...) use the same input feature map list
# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
# IOU overlap ratios [IOU_THRESHOLD]
# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
# RoI minibatch size *per image* (number of regions of interest [ROIs]) during training
# Total number of RoIs per training minibatch =
# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 16 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
# inference.
_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
# If True, augment proposals with ground-truth boxes before sampling proposals to
# train ROI heads.
_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
# ---------------------------------------------------------------------------- #
# Box Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_BOX_HEAD = CN()
# C4 don't use head name option
# Options for non-C4 models: FastRCNNConvFCHead,
_C.MODEL.ROI_BOX_HEAD.NAME = ""
# Options are: "smooth_l1", "giou", "diou", "ciou"
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
# The final scaling coefficient on the box regression loss, used to balance the magnitude of its
# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
# Hidden layer dimension for FC layers in the RoI box head
_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
# Channel dimension for Conv layers in the RoI box head
_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
_C.MODEL.ROI_BOX_HEAD.NORM = ""
# Whether to use class agnostic for bbox regression
_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
# ---------------------------------------------------------------------------- #
# Cascaded Box Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
# The number of cascade stages is implicitly defined by the length of the following two configs.
_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
(10.0, 10.0, 5.0, 5.0),
(20.0, 20.0, 10.0, 10.0),
(30.0, 30.0, 15.0, 15.0),
)
_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
# ---------------------------------------------------------------------------- #
# Mask Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
_C.MODEL.ROI_MASK_HEAD.NORM = ""
# Whether to use class agnostic for mask prediction
_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
# ---------------------------------------------------------------------------- #
# Keypoint Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_KEYPOINT_HEAD = CN()
_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
# Images with too few (or no) keypoints are excluded from training.
_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
# Normalize by the total number of visible keypoints in the minibatch if True.
# Otherwise, normalize by the total number of keypoints that could ever exist
# in the minibatch.
# The keypoint softmax loss is only calculated on visible keypoints.
# Since the number of visible keypoints can vary significantly between
# minibatches, this has the effect of up-weighting the importance of
# minibatches with few visible keypoints. (Imagine the extreme case of
# only one visible keypoint versus N: in the case of N, each one
# contributes 1/N to the gradient compared to the single keypoint
# determining the gradient direction). Instead, we can normalize the
# loss by the total number of keypoints, if it were the case that all
# keypoints were visible in a full minibatch. (Returning to the example,
# this means that the one visible keypoint contributes as much as each
# of the N keypoints.)
_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
# Multi-task loss weight to use for keypoints
# Recommended values:
# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
# ---------------------------------------------------------------------------- #
# Semantic Segmentation Head
# ---------------------------------------------------------------------------- #
_C.MODEL.SEM_SEG_HEAD = CN()
_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
# the correposnding pixel.
_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
# Number of classes in the semantic segmentation head
_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
# Number of channels in the 3x3 convs inside semantic-FPN heads.
_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
# Normalization method for the convolution layers. Options: "" (no norm), "GN".
_C.MODEL.SEM_SEG_HEAD.NORM = "GN"
_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
_C.MODEL.PANOPTIC_FPN = CN()
# Scaling of all losses from instance detection / segmentation head.
_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
# options when combining instance & semantic segmentation outputs
_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used
_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
# ---------------------------------------------------------------------------- #
# RetinaNet Head
# ---------------------------------------------------------------------------- #
_C.MODEL.RETINANET = CN()
# This is the number of foreground classes.
_C.MODEL.RETINANET.NUM_CLASSES = 80
_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
_C.MODEL.RETINANET.NUM_CONVS = 4
# IoU overlap ratio [bg, fg] for labeling anchors.
# Anchors with < bg are labeled negative (0)
# Anchors with >= bg and < fg are ignored (-1)
# Anchors with >= fg are labeled positive (1)
_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
# Prior prob for rare case (i.e. foreground) at the beginning of training.
# This is used to set the bias for the logits layer of the classifier subnet.
# This improves training stability in the case of heavy class imbalance.
_C.MODEL.RETINANET.PRIOR_PROB = 0.01
# Inference cls score threshold, only anchors with score > INFERENCE_TH are
# considered for inference (to improve speed)
_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.5
# Select topk candidates before NMS
_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Loss parameters
_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
# Options are: "smooth_l1", "giou", "diou", "ciou"
_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
# One of BN, SyncBN, FrozenBN, GN
# Only supports GN until unshared norm is implemented
_C.MODEL.RETINANET.NORM = ""
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
_C.MODEL.RESNETS.DEPTH = 50
_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Options: FrozenBN, GN, "SyncBN", "BN"
_C.MODEL.RESNETS.NORM = "FrozenBN"
# Baseline width of each group.
# Scaling this parameters will scale the width of all bottleneck layers.
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
# For R18 and R34, this needs to be set to 64
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# Apply Deformable Convolution in stages
# Specify if apply deform_conv on Res2, Res3, Res4, Res5
_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
# Use False for DeformableV1.
_C.MODEL.RESNETS.DEFORM_MODULATED = False
# Number of groups in deformable conv.
_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
# Options: WarmupMultiStepLR, WarmupCosineLR.
# See detectron2/solver/build.py for definition.
_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
# The end lr, only used by WarmupCosineLR
_C.SOLVER.BASE_LR_END = 0.0
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.NESTEROV = False
_C.SOLVER.WEIGHT_DECAY = 0.0001
# The weight decay that's applied to parameters of normalization layers
# (typically the affine transformation)
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
_C.SOLVER.GAMMA = 0.1
# The iteration number to decrease learning rate by GAMMA.
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
# Save a checkpoint after every this number of iterations
_C.SOLVER.CHECKPOINT_PERIOD = 5000
# Number of images per batch across all machines. This is also the number
# of training images per step (i.e. per iteration). If we use 16 GPUs
# and IMS_PER_BATCH = 32, each GPU will see 2 images per batch.
# May be adjusted automatically if REFERENCE_WORLD_SIZE is set.
_C.SOLVER.IMS_PER_BATCH = 16
# The reference number of workers (GPUs) this config is meant to train with.
# It takes no effect when set to 0.
# With a non-zero value, it will be used by DefaultTrainer to compute a desired
# per-worker batch size, and then scale the other related configs (total batch size,
# learning rate, etc) to match the per-worker batch size.
# See documentation of `DefaultTrainer.auto_scale_workers` for details:
_C.SOLVER.REFERENCE_WORLD_SIZE = 0
# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
# biases. This is not useful (at least for recent models). You should avoid
# changing these and they exist only to reproduce Detectron v1 training if
# desired.
_C.SOLVER.BIAS_LR_FACTOR = 1.0
_C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY
# Gradient clipping
_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False})
# Type of gradient clipping, currently 2 values are supported:
# - "value": the absolute values of elements of each gradients are clipped
# - "norm": the norm of the gradient for each parameter is clipped thus
# affecting all elements in the parameter
_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
# Maximum absolute value used for clipping gradients
_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
# Floating point number p for L-p norm to be used with the "norm"
# gradient clipping type; for L-inf, please specify .inf
_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
# Enable automatic mixed precision for training
# Note that this does not change model's inference behavior.
# To use AMP in inference, run inference under autocast()
_C.SOLVER.AMP = CN({"ENABLED": False})
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
# For end-to-end tests to verify the expected accuracy.
# Each item is [task, metric, value, tolerance]
# e.g.: [['bbox', 'AP', 38.5, 0.2]]
_C.TEST.EXPECTED_RESULTS = []
# The period (in terms of steps) to evaluate the model during training.
# Set to 0 to disable.
_C.TEST.EVAL_PERIOD = 0
# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval
# When empty, it will use the defaults in COCO.
# Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
_C.TEST.KEYPOINT_OKS_SIGMAS = []
# Maximum number of detections to return per image during inference (100 is
# based on the limit established for the COCO dataset).
_C.TEST.DETECTIONS_PER_IMAGE = 100
_C.TEST.AUG = CN({"ENABLED": False})
_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
_C.TEST.AUG.MAX_SIZE = 4000
_C.TEST.AUG.FLIP = True
_C.TEST.PRECISE_BN = CN({"ENABLED": False})
_C.TEST.PRECISE_BN.NUM_ITER = 200
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Directory where output files are written
_C.OUTPUT_DIR = "./output"
# Set seed to negative to fully randomize everything.
# Set seed to positive to use a fixed seed. Note that a fixed seed increases
# reproducibility but does not guarantee fully deterministic behavior.
# Disabling all parallelism further increases reproducibility.
_C.SEED = -1
# Benchmark different cudnn algorithms.
# If input images have very different sizes, this option will have large overhead
# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
# If input images have the same or similar sizes, benchmark is often helpful.
_C.CUDNN_BENCHMARK = False
# The period (in terms of steps) for minibatch visualization at train time.
# Set to 0 to disable.
_C.VIS_PERIOD = 0
# global config is for quick hack purposes.
# You can set them in command line or config files,
# and access it with:
#
# from detectron2.config import global_cfg
# print(global_cfg.HACK)
#
# Do not commit any configs into it.
_C.GLOBAL = CN()
_C.GLOBAL.HACK = 1.0 | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/config/defaults.py | defaults.py |
import ast
import builtins
import importlib
import inspect
import logging
import os
import uuid
from collections import abc
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import is_dataclass
from typing import List, Tuple, Union
import cloudpickle
import yaml
from omegaconf import DictConfig, ListConfig, OmegaConf
from detectron2.utils.file_io import PathManager
from detectron2.utils.registry import _convert_target_to_string
__all__ = ["LazyCall", "LazyConfig"]
class LazyCall:
"""
Wrap a callable so that when it's called, the call will not be executed,
but returns a dict that describes the call.
LazyCall object has to be called with only keyword arguments. Positional
arguments are not yet supported.
Examples:
::
from detectron2.config import instantiate, LazyCall
layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32)
layer_cfg.out_channels = 64 # can edit it afterwards
layer = instantiate(layer_cfg)
"""
def __init__(self, target):
if not (callable(target) or isinstance(target, (str, abc.Mapping))):
raise TypeError(
f"target of LazyCall must be a callable or defines a callable! Got {target}"
)
self._target = target
def __call__(self, **kwargs):
if is_dataclass(self._target):
# omegaconf object cannot hold dataclass type
# https://github.com/omry/omegaconf/issues/784
target = _convert_target_to_string(self._target)
else:
target = self._target
kwargs["_target_"] = target
return DictConfig(content=kwargs, flags={"allow_objects": True})
def _visit_dict_config(cfg, func):
"""
Apply func recursively to all DictConfig in cfg.
"""
if isinstance(cfg, DictConfig):
func(cfg)
for v in cfg.values():
_visit_dict_config(v, func)
elif isinstance(cfg, ListConfig):
for v in cfg:
_visit_dict_config(v, func)
def _validate_py_syntax(filename):
# see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py
with PathManager.open(filename, "r") as f:
content = f.read()
try:
ast.parse(content)
except SyntaxError as e:
raise SyntaxError(f"Config file {filename} has syntax error!") from e
def _cast_to_config(obj):
# if given a dict, return DictConfig instead
if isinstance(obj, dict):
return DictConfig(obj, flags={"allow_objects": True})
return obj
_CFG_PACKAGE_NAME = "detectron2._cfg_loader"
"""
A namespace to put all imported config into.
"""
def _random_package_name(filename):
# generate a random package name when loading config files
return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename)
@contextmanager
def _patch_import():
"""
Enhance relative import statements in config files, so that they:
1. locate files purely based on relative location, regardless of packages.
e.g. you can import file without having __init__
2. do not cache modules globally; modifications of module states has no side effect
3. support other storage system through PathManager
4. imported dict are turned into omegaconf.DictConfig automatically
"""
old_import = builtins.__import__
def find_relative_file(original_file, relative_import_path, level):
cur_file = os.path.dirname(original_file)
for _ in range(level - 1):
cur_file = os.path.dirname(cur_file)
cur_name = relative_import_path.lstrip(".")
for part in cur_name.split("."):
cur_file = os.path.join(cur_file, part)
# NOTE: directory import is not handled. Because then it's unclear
# if such import should produce python module or DictConfig. This can
# be discussed further if needed.
if not cur_file.endswith(".py"):
cur_file += ".py"
if not PathManager.isfile(cur_file):
raise ImportError(
f"Cannot import name {relative_import_path} from "
f"{original_file}: {cur_file} has to exist."
)
return cur_file
def new_import(name, globals=None, locals=None, fromlist=(), level=0):
if (
# Only deal with relative imports inside config files
level != 0
and globals is not None
and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME)
):
cur_file = find_relative_file(globals["__file__"], name, level)
_validate_py_syntax(cur_file)
spec = importlib.machinery.ModuleSpec(
_random_package_name(cur_file), None, origin=cur_file
)
module = importlib.util.module_from_spec(spec)
module.__file__ = cur_file
with PathManager.open(cur_file) as f:
content = f.read()
exec(compile(content, cur_file, "exec"), module.__dict__)
for name in fromlist: # turn imported dict into DictConfig automatically
val = _cast_to_config(module.__dict__[name])
module.__dict__[name] = val
return module
return old_import(name, globals, locals, fromlist=fromlist, level=level)
builtins.__import__ = new_import
yield new_import
builtins.__import__ = old_import
class LazyConfig:
"""
Provide methods to save, load, and overrides an omegaconf config object
which may contain definition of lazily-constructed objects.
"""
@staticmethod
def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
"""
Similar to :meth:`load()`, but load path relative to the caller's
source file.
This has the same functionality as a relative import, except that this method
accepts filename as a string, so more characters are allowed in the filename.
"""
caller_frame = inspect.stack()[1]
caller_fname = caller_frame[0].f_code.co_filename
assert caller_fname != "<string>", "load_rel Unable to find caller"
caller_dir = os.path.dirname(caller_fname)
filename = os.path.join(caller_dir, filename)
return LazyConfig.load(filename, keys)
@staticmethod
def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
"""
Load a config file.
Args:
filename: absolute path or relative path w.r.t. the current working directory
keys: keys to load and return. If not given, return all keys
(whose values are config objects) in a dict.
"""
has_keys = keys is not None
filename = filename.replace("/./", "/") # redundant
if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]:
raise ValueError(f"Config file {filename} has to be a python or yaml file.")
if filename.endswith(".py"):
_validate_py_syntax(filename)
with _patch_import():
# Record the filename
module_namespace = {
"__file__": filename,
"__package__": _random_package_name(filename),
}
with PathManager.open(filename) as f:
content = f.read()
# Compile first with filename to:
# 1. make filename appears in stacktrace
# 2. make load_rel able to find its parent's (possibly remote) location
exec(compile(content, filename, "exec"), module_namespace)
ret = module_namespace
else:
with PathManager.open(filename) as f:
obj = yaml.unsafe_load(f)
ret = OmegaConf.create(obj, flags={"allow_objects": True})
if has_keys:
if isinstance(keys, str):
return _cast_to_config(ret[keys])
else:
return tuple(_cast_to_config(ret[a]) for a in keys)
else:
if filename.endswith(".py"):
# when not specified, only load those that are config objects
ret = DictConfig(
{
name: _cast_to_config(value)
for name, value in ret.items()
if isinstance(value, (DictConfig, ListConfig, dict))
and not name.startswith("_")
},
flags={"allow_objects": True},
)
return ret
@staticmethod
def save(cfg, filename: str):
"""
Save a config object to a yaml file.
Note that when the config dictionary contains complex objects (e.g. lambda),
it can't be saved to yaml. In that case we will print an error and
attempt to save to a pkl file instead.
Args:
cfg: an omegaconf config object
filename: yaml file name to save the config file
"""
logger = logging.getLogger(__name__)
try:
cfg = deepcopy(cfg)
except Exception:
pass
else:
# if it's deep-copyable, then...
def _replace_type_by_name(x):
if "_target_" in x and callable(x._target_):
try:
x._target_ = _convert_target_to_string(x._target_)
except AttributeError:
pass
# not necessary, but makes yaml looks nicer
_visit_dict_config(cfg, _replace_type_by_name)
save_pkl = False
try:
dict = OmegaConf.to_container(cfg, resolve=False)
dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999)
with PathManager.open(filename, "w") as f:
f.write(dumped)
try:
_ = yaml.unsafe_load(dumped) # test that it is loadable
except Exception:
logger.warning(
"The config contains objects that cannot serialize to a valid yaml. "
f"{filename} is human-readable but cannot be loaded."
)
save_pkl = True
except Exception:
logger.exception("Unable to serialize the config to yaml. Error:")
save_pkl = True
if save_pkl:
new_filename = filename + ".pkl"
try:
# retry by pickle
with PathManager.open(new_filename, "wb") as f:
cloudpickle.dump(cfg, f)
logger.warning(f"Config is saved using cloudpickle at {new_filename}.")
except Exception:
pass
@staticmethod
def apply_overrides(cfg, overrides: List[str]):
"""
In-place override contents of cfg.
Args:
cfg: an omegaconf config object
overrides: list of strings in the format of "a=b" to override configs.
See https://hydra.cc/docs/next/advanced/override_grammar/basic/
for syntax.
Returns:
the cfg object
"""
def safe_update(cfg, key, value):
parts = key.split(".")
for idx in range(1, len(parts)):
prefix = ".".join(parts[:idx])
v = OmegaConf.select(cfg, prefix, default=None)
if v is None:
break
if not OmegaConf.is_config(v):
raise KeyError(
f"Trying to update key {key}, but {prefix} "
f"is not a config, but has type {type(v)}."
)
OmegaConf.update(cfg, key, value, merge=True)
from hydra.core.override_parser.overrides_parser import OverridesParser
parser = OverridesParser.create()
overrides = parser.parse_overrides(overrides)
for o in overrides:
key = o.key_or_group
value = o.value()
if o.is_delete():
# TODO support this
raise NotImplementedError("deletion is not yet a supported override")
safe_update(cfg, key, value)
return cfg
@staticmethod
def to_py(cfg, prefix: str = "cfg."):
"""
Try to convert a config object into Python-like psuedo code.
Note that perfect conversion is not always possible. So the returned
results are mainly meant to be human-readable, and not meant to be executed.
Args:
cfg: an omegaconf config object
prefix: root name for the resulting code (default: "cfg.")
Returns:
str of formatted Python code
"""
import black
cfg = OmegaConf.to_container(cfg, resolve=True)
def _to_str(obj, prefix=None, inside_call=False):
if prefix is None:
prefix = []
if isinstance(obj, abc.Mapping) and "_target_" in obj:
# Dict representing a function call
target = _convert_target_to_string(obj.pop("_target_"))
args = []
for k, v in sorted(obj.items()):
args.append(f"{k}={_to_str(v, inside_call=True)}")
args = ", ".join(args)
call = f"{target}({args})"
return "".join(prefix) + call
elif isinstance(obj, abc.Mapping) and not inside_call:
# Dict that is not inside a call is a list of top-level config objects that we
# render as one object per line with dot separated prefixes
key_list = []
for k, v in sorted(obj.items()):
if isinstance(v, abc.Mapping) and "_target_" not in v:
key_list.append(_to_str(v, prefix=prefix + [k + "."]))
else:
key = "".join(prefix) + k
key_list.append(f"{key}={_to_str(v)}")
return "\n".join(key_list)
elif isinstance(obj, abc.Mapping):
# Dict that is inside a call is rendered as a regular dict
return (
"{"
+ ",".join(
f"{repr(k)}: {_to_str(v, inside_call=inside_call)}"
for k, v in sorted(obj.items())
)
+ "}"
)
elif isinstance(obj, list):
return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]"
else:
return repr(obj)
py_str = _to_str(cfg, prefix=[prefix])
try:
return black.format_str(py_str, mode=black.Mode())
except black.InvalidInput:
return py_str | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/config/lazy.py | lazy.py |
import logging
from typing import List, Optional, Tuple
from .config import CfgNode as CN
from .defaults import _C
__all__ = ["upgrade_config", "downgrade_config"]
def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
"""
Upgrade a config from its current version to a newer version.
Args:
cfg (CfgNode):
to_version (int): defaults to the latest version.
"""
cfg = cfg.clone()
if to_version is None:
to_version = _C.VERSION
assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version):
converter = globals()["ConverterV" + str(k + 1)]
converter.upgrade(cfg)
cfg.VERSION = k + 1
return cfg
def downgrade_config(cfg: CN, to_version: int) -> CN:
"""
Downgrade a config from its current version to an older version.
Args:
cfg (CfgNode):
to_version (int):
Note:
A general downgrade of arbitrary configs is not always possible due to the
different functionalities in different versions.
The purpose of downgrade is only to recover the defaults in old versions,
allowing it to load an old partial yaml config.
Therefore, the implementation only needs to fill in the default values
in the old version when a general downgrade is not possible.
"""
cfg = cfg.clone()
assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version, -1):
converter = globals()["ConverterV" + str(k)]
converter.downgrade(cfg)
cfg.VERSION = k - 1
return cfg
def guess_version(cfg: CN, filename: str) -> int:
"""
Guess the version of a partial config where the VERSION field is not specified.
Returns the version, or the latest if cannot make a guess.
This makes it easier for users to migrate.
"""
logger = logging.getLogger(__name__)
def _has(name: str) -> bool:
cur = cfg
for n in name.split("."):
if n not in cur:
return False
cur = cur[n]
return True
# Most users' partial configs have "MODEL.WEIGHT", so guess on it
ret = None
if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
ret = 1
if ret is not None:
logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
else:
ret = _C.VERSION
logger.warning(
"Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
filename, ret
)
)
return ret
def _rename(cfg: CN, old: str, new: str) -> None:
old_keys = old.split(".")
new_keys = new.split(".")
def _set(key_seq: List[str], val: str) -> None:
cur = cfg
for k in key_seq[:-1]:
if k not in cur:
cur[k] = CN()
cur = cur[k]
cur[key_seq[-1]] = val
def _get(key_seq: List[str]) -> CN:
cur = cfg
for k in key_seq:
cur = cur[k]
return cur
def _del(key_seq: List[str]) -> None:
cur = cfg
for k in key_seq[:-1]:
cur = cur[k]
del cur[key_seq[-1]]
if len(cur) == 0 and len(key_seq) > 1:
_del(key_seq[:-1])
_set(new_keys, _get(old_keys))
_del(old_keys)
class _RenameConverter:
"""
A converter that handles simple rename.
"""
RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
@classmethod
def upgrade(cls, cfg: CN) -> None:
for old, new in cls.RENAME:
_rename(cfg, old, new)
@classmethod
def downgrade(cls, cfg: CN) -> None:
for old, new in cls.RENAME[::-1]:
_rename(cfg, new, old)
class ConverterV1(_RenameConverter):
RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
class ConverterV2(_RenameConverter):
"""
A large bulk of rename, before public release.
"""
RENAME = [
("MODEL.WEIGHT", "MODEL.WEIGHTS"),
("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
(
"MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
"MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
),
(
"MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
"MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
),
(
"MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
"MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
),
("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
("TEST.AUG_ON", "TEST.AUG.ENABLED"),
("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
]
@classmethod
def upgrade(cls, cfg: CN) -> None:
super().upgrade(cfg)
if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
_rename(
cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
)
_rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
else:
_rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
_rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
@classmethod
def downgrade(cls, cfg: CN) -> None:
super().downgrade(cfg)
_rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
_rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/config/compat.py | compat.py |
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
"""
Load content from the given config file and merge it into self.
Args:
cfg_filename: config filename
allow_unsafe: allow unsafe yaml syntax
"""
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
return True
if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/config/config.py | config.py |
import dataclasses
import logging
from collections import abc
from typing import Any
from detectron2.utils.registry import _convert_target_to_string, locate
__all__ = ["dump_dataclass", "instantiate"]
def dump_dataclass(obj: Any):
"""
Dump a dataclass recursively into a dict that can be later instantiated.
Args:
obj: a dataclass object
Returns:
dict
"""
assert dataclasses.is_dataclass(obj) and not isinstance(
obj, type
), "dump_dataclass() requires an instance of a dataclass."
ret = {"_target_": _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
ret[f.name] = v
return ret
def instantiate(cfg):
"""
Recursively instantiate objects defined in dictionaries by
"_target_" and arguments.
Args:
cfg: a dict-like object with "_target_" that defines the caller, and
other keys that define the arguments
Returns:
object instantiated by cfg
"""
from omegaconf import ListConfig
if isinstance(cfg, ListConfig):
lst = [instantiate(x) for x in cfg]
return ListConfig(lst, flags={"allow_objects": True})
if isinstance(cfg, list):
# Specialize for list, because many classes take
# list[objects] as arguments, such as ResNet, DatasetMapper
return [instantiate(x) for x in cfg]
if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
# conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
# but faster: https://github.com/facebookresearch/hydra/issues/1200
cfg = {k: instantiate(v) for k, v in cfg.items()}
cls = cfg.pop("_target_")
cls = instantiate(cls)
if isinstance(cls, str):
cls_name = cls
cls = locate(cls_name)
assert cls is not None, cls_name
else:
try:
cls_name = cls.__module__ + "." + cls.__qualname__
except Exception:
# target could be anything, so the above could fail
cls_name = str(cls)
assert callable(cls), f"_target_ {cls} does not define a callable object"
try:
return cls(**cfg)
except TypeError:
logger = logging.getLogger(__name__)
logger.error(f"Error when instantiating {cls_name}!")
raise
return cfg # return as-is if don't know what to do | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/config/instantiate.py | instantiate.py |
import logging
import numpy as np
from itertools import count
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
logger = logging.getLogger(__name__)
# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ======
class ProtobufModel(torch.nn.Module):
"""
Wrapper of a caffe2's protobuf model.
It works just like nn.Module, but running caffe2 under the hood.
Input/Output are tuple[tensor] that match the caffe2 net's external_input/output.
"""
_ids = count(0)
def __init__(self, predict_net, init_net):
logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
super().__init__()
assert isinstance(predict_net, caffe2_pb2.NetDef)
assert isinstance(init_net, caffe2_pb2.NetDef)
# create unique temporary workspace for each instance
self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
self.net = core.Net(predict_net)
logger.info("Running init_net once to fill the parameters ...")
with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
ws.RunNetOnce(init_net)
uninitialized_external_input = []
for blob in self.net.Proto().external_input:
if blob not in ws.Blobs():
uninitialized_external_input.append(blob)
ws.CreateBlob(blob)
ws.CreateNet(self.net)
self._error_msgs = set()
self._input_blobs = uninitialized_external_input
def _infer_output_devices(self, inputs):
"""
Returns:
list[str]: list of device for each external output
"""
def _get_device_type(torch_tensor):
assert torch_tensor.device.type in ["cpu", "cuda"]
assert torch_tensor.device.index == 0
return torch_tensor.device.type
predict_net = self.net.Proto()
input_device_types = {
(name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
}
device_type_map = infer_device_type(
predict_net, known_status=input_device_types, device_name_style="pytorch"
)
ssa, versions = core.get_ssa(predict_net)
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
output_devices = [device_type_map[outp] for outp in versioned_outputs]
return output_devices
def forward(self, inputs):
"""
Args:
inputs (tuple[torch.Tensor])
Returns:
tuple[torch.Tensor]
"""
assert len(inputs) == len(self._input_blobs), (
f"Length of inputs ({len(inputs)}) "
f"doesn't match the required input blobs: {self._input_blobs}"
)
with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
for b, tensor in zip(self._input_blobs, inputs):
ws.FeedBlob(b, tensor)
try:
ws.RunNet(self.net.Proto().name)
except RuntimeError as e:
if not str(e) in self._error_msgs:
self._error_msgs.add(str(e))
logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
logger.warning("Catch the error and use partial results.")
c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
# Remove outputs of current run, this is necessary in order to
# prevent fetching the result from previous run if the model fails
# in the middle.
for b in self.net.Proto().external_output:
# Needs to create uninitialized blob to make the net runable.
# This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
# but there'no such API.
ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
# Cast output to torch.Tensor on the desired device
output_devices = (
self._infer_output_devices(inputs)
if any(t.device.type != "cpu" for t in inputs)
else ["cpu" for _ in self.net.Proto().external_output]
)
outputs = []
for name, c2_output, device in zip(
self.net.Proto().external_output, c2_outputs, output_devices
):
if not isinstance(c2_output, np.ndarray):
raise RuntimeError(
"Invalid output for blob {}, received: {}".format(name, c2_output)
)
outputs.append(torch.tensor(c2_output).to(device=device))
return tuple(outputs)
class ProtobufDetectionModel(torch.nn.Module):
"""
A class works just like a pytorch meta arch in terms of inference, but running
caffe2 model under the hood.
"""
def __init__(self, predict_net, init_net, *, convert_outputs=None):
"""
Args:
predict_net, init_net (core.Net): caffe2 nets
convert_outptus (callable): a function that converts caffe2
outputs to the same format of the original pytorch model.
By default, use the one defined in the caffe2 meta_arch.
"""
super().__init__()
self.protobuf_model = ProtobufModel(predict_net, init_net)
self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
if convert_outputs is None:
meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
else:
self._convert_outputs = convert_outputs
def _convert_inputs(self, batched_inputs):
# currently all models convert inputs in the same way
return convert_batched_inputs_to_c2_format(
batched_inputs, self.size_divisibility, self.device
)
def forward(self, batched_inputs):
c2_inputs = self._convert_inputs(batched_inputs)
c2_results = self.protobuf_model(c2_inputs)
c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results))
return self._convert_outputs(batched_inputs, c2_inputs, c2_results) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/caffe2_inference.py | caffe2_inference.py |
import collections
import contextlib
import copy
import functools
import logging
import numpy as np
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from unittest import mock
import caffe2.python.utils as putils
import torch
import torch.nn.functional as F
from caffe2.proto import caffe2_pb2
from caffe2.python import core, net_drawer, workspace
from torch.nn.functional import interpolate as interp
logger = logging.getLogger(__name__)
# ==== torch/utils_toffee/cast.py =======================================
def to_device(t, device_str):
"""
This function is a replacement of .to(another_device) such that it allows the
casting to be traced properly by explicitly calling the underlying copy ops.
It also avoids introducing unncessary op when casting to the same device.
"""
src = t.device
dst = torch.device(device_str)
if src == dst:
return t
elif src.type == "cuda" and dst.type == "cpu":
return torch.ops._caffe2.CopyGPUToCPU(t)
elif src.type == "cpu" and dst.type == "cuda":
return torch.ops._caffe2.CopyCPUToGPU(t)
else:
raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst))
# ==== torch/utils_toffee/interpolate.py =======================================
# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
def BilinearInterpolation(tensor_in, up_scale):
assert up_scale % 2 == 0, "Scale should be even"
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
kernel_size = int(up_scale) * 2
bil_filt = upsample_filt(kernel_size)
dim = int(tensor_in.shape[1])
kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
kernel[range(dim), range(dim), :, :] = bil_filt
tensor_out = F.conv_transpose2d(
tensor_in,
weight=to_device(torch.Tensor(kernel), tensor_in.device),
bias=None,
stride=int(up_scale),
padding=int(up_scale / 2),
)
return tensor_out
# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
# using dynamic `scale_factor` rather than static `size`. (T43166860)
# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
def onnx_compatibale_interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
# NOTE: The input dimensions are interpreted in the form:
# `mini-batch x channels x [optional depth] x [optional height] x width`.
if size is None and scale_factor is not None:
if input.dim() == 4:
if isinstance(scale_factor, (int, float)):
height_scale, width_scale = (scale_factor, scale_factor)
else:
assert isinstance(scale_factor, (tuple, list))
assert len(scale_factor) == 2
height_scale, width_scale = scale_factor
assert not align_corners, "No matching C2 op for align_corners == True"
if mode == "nearest":
return torch.ops._caffe2.ResizeNearest(
input, order="NCHW", width_scale=width_scale, height_scale=height_scale
)
elif mode == "bilinear":
logger.warning(
"Use F.conv_transpose2d for bilinear interpolate"
" because there's no such C2 op, this may cause significant"
" slowdown and the boundary pixels won't be as same as"
" using F.interpolate due to padding."
)
assert height_scale == width_scale
return BilinearInterpolation(input, up_scale=height_scale)
logger.warning("Output size is not static, it might cause ONNX conversion issue")
return interp(input, size, scale_factor, mode, align_corners)
@contextlib.contextmanager
def mock_torch_nn_functional_interpolate():
if torch.onnx.is_in_onnx_export():
with mock.patch(
"torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
):
yield
else:
yield
# ==== torch/utils_caffe2/ws_utils.py ==========================================
class ScopedWS(object):
def __init__(self, ws_name, is_reset, is_cleanup=False):
self.ws_name = ws_name
self.is_reset = is_reset
self.is_cleanup = is_cleanup
self.org_ws = ""
def __enter__(self):
self.org_ws = workspace.CurrentWorkspace()
if self.ws_name is not None:
workspace.SwitchWorkspace(self.ws_name, True)
if self.is_reset:
workspace.ResetWorkspace()
return workspace
def __exit__(self, *args):
if self.is_cleanup:
workspace.ResetWorkspace()
if self.ws_name is not None:
workspace.SwitchWorkspace(self.org_ws)
def fetch_any_blob(name):
bb = None
try:
bb = workspace.FetchBlob(name)
except TypeError:
bb = workspace.FetchInt8Blob(name)
except Exception as e:
logger.error("Get blob {} error: {}".format(name, e))
return bb
# ==== torch/utils_caffe2/protobuf.py ==========================================
def get_pb_arg(pb, arg_name):
for x in pb.arg:
if x.name == arg_name:
return x
return None
def get_pb_arg_valf(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return arg.f if arg is not None else default_val
def get_pb_arg_floats(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return list(map(float, arg.floats)) if arg is not None else default_val
def get_pb_arg_ints(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return list(map(int, arg.ints)) if arg is not None else default_val
def get_pb_arg_vali(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return arg.i if arg is not None else default_val
def get_pb_arg_vals(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return arg.s if arg is not None else default_val
def get_pb_arg_valstrings(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return list(arg.strings) if arg is not None else default_val
def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
arg = get_pb_arg(pb, arg_name)
if arg is None:
arg = putils.MakeArgument(arg_name, arg_value)
assert hasattr(arg, arg_attr)
pb.arg.extend([arg])
if allow_override and getattr(arg, arg_attr) != arg_value:
logger.warning(
"Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value)
)
setattr(arg, arg_attr, arg_value)
else:
assert arg is not None
assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format(
getattr(arg, arg_attr), arg_value
)
def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
assert type(tensor) == np.ndarray
kTypeNameMapper = {
np.dtype("float32"): "GivenTensorFill",
np.dtype("int32"): "GivenTensorIntFill",
np.dtype("int64"): "GivenTensorInt64Fill",
np.dtype("uint8"): "GivenTensorStringFill",
}
args_dict = {}
if tensor.dtype == np.dtype("uint8"):
args_dict.update({"values": [str(tensor.data)], "shape": [1]})
else:
args_dict.update({"values": tensor, "shape": tensor.shape})
if device_option is not None:
args_dict["device_option"] = device_option
return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict)
def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
assert type(int8_tensor) == workspace.Int8Tensor
kTypeNameMapper = {
np.dtype("int32"): "Int8GivenIntTensorFill",
np.dtype("uint8"): "Int8GivenTensorFill",
}
tensor = int8_tensor.data
assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
return core.CreateOperator(
kTypeNameMapper[tensor.dtype],
[],
[name],
values=values,
shape=tensor.shape,
Y_scale=int8_tensor.scale,
Y_zero_point=int8_tensor.zero_point,
)
def create_const_fill_op(
name: str,
blob: Union[np.ndarray, workspace.Int8Tensor],
device_option: Optional[caffe2_pb2.DeviceOption] = None,
) -> caffe2_pb2.OperatorDef:
"""
Given a blob object, return the Caffe2 operator that creates this blob
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
"""
tensor_type = type(blob)
assert tensor_type in [
np.ndarray,
workspace.Int8Tensor,
], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
name, type(blob)
)
if tensor_type == np.ndarray:
return _create_const_fill_op_from_numpy(name, blob, device_option)
elif tensor_type == workspace.Int8Tensor:
assert device_option is None
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
def construct_init_net_from_params(
params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
) -> caffe2_pb2.NetDef:
"""
Construct the init_net from params dictionary
"""
init_net = caffe2_pb2.NetDef()
device_options = device_options or {}
for name, blob in params.items():
if isinstance(blob, str):
logger.warning(
(
"Blob {} with type {} is not supported in generating init net,"
" skipped.".format(name, type(blob))
)
)
continue
init_net.op.extend(
[create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
)
init_net.external_output.append(name)
return init_net
def get_producer_map(ssa):
"""
Return dict from versioned blob to (i, j),
where i is index of producer op, j is the index of output of that op.
"""
producer_map = {}
for i in range(len(ssa)):
outputs = ssa[i][1]
for j, outp in enumerate(outputs):
producer_map[outp] = (i, j)
return producer_map
def get_consumer_map(ssa):
"""
Return dict from versioned blob to list of (i, j),
where i is index of consumer op, j is the index of input of that op.
"""
consumer_map = collections.defaultdict(list)
for i in range(len(ssa)):
inputs = ssa[i][0]
for j, inp in enumerate(inputs):
consumer_map[inp].append((i, j))
return consumer_map
def get_params_from_init_net(
init_net: caffe2_pb2.NetDef,
) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
"""
Take the output blobs from init_net by running it.
Outputs:
params: dict from blob name to numpy array
device_options: dict from blob name to the device option of its creating op
"""
# NOTE: this assumes that the params is determined by producer op with the
# only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
def _get_device_option(producer_op):
if producer_op.type == "CopyGPUToCPU":
return caffe2_pb2.DeviceOption()
else:
return producer_op.device_option
with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
ws.RunNetOnce(init_net)
params = {b: fetch_any_blob(b) for b in init_net.external_output}
ssa, versions = core.get_ssa(init_net)
producer_map = get_producer_map(ssa)
device_options = {
b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
for b in init_net.external_output
}
return params, device_options
def _updater_raise(op, input_types, output_types):
raise RuntimeError(
"Failed to apply updater for op {} given input_types {} and"
" output_types {}".format(op, input_types, output_types)
)
def _generic_status_identifier(
predict_net: caffe2_pb2.NetDef,
status_updater: Callable,
known_status: Dict[Tuple[str, int], Any],
) -> Dict[Tuple[str, int], Any]:
"""
Statically infer the status of each blob, the status can be such as device type
(CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
is versioned blob (Tuple[str, int]) in the format compatible with ssa.
Inputs:
predict_net: the caffe2 network
status_updater: a callable, given an op and the status of its input/output,
it returns the updated status of input/output. `None` is used for
representing unknown status.
known_status: a dict containing known status, used as initialization.
Outputs:
A dict mapping from versioned blob to its status
"""
ssa, versions = core.get_ssa(predict_net)
versioned_ext_input = [(b, 0) for b in predict_net.external_input]
versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output]
all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output)
assert all(k in allowed_vbs for k in known_status)
assert all(v is not None for v in known_status.values())
_known_status = copy.deepcopy(known_status)
def _check_and_update(key, value):
assert value is not None
if key in _known_status:
if not _known_status[key] == value:
raise RuntimeError(
"Confilict status for {}, existing status {}, new status {}".format(
key, _known_status[key], value
)
)
_known_status[key] = value
def _update_i(op, ssa_i):
versioned_inputs = ssa_i[0]
versioned_outputs = ssa_i[1]
inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
outputs_status = [_known_status.get(b, None) for b in versioned_outputs]
new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status)
for versioned_blob, status in zip(
versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status
):
if status is not None:
_check_and_update(versioned_blob, status)
for op, ssa_i in zip(predict_net.op, ssa):
_update_i(op, ssa_i)
for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
_update_i(op, ssa_i)
# NOTE: This strictly checks all the blob from predict_net must be assgined
# a known status. However sometimes it's impossible (eg. having deadend op),
# we may relax this constraint if
for k in all_versioned_blobs:
if k not in _known_status:
raise NotImplementedError(
"Can not infer the status for {}. Currently only support the case where"
" a single forward and backward pass can identify status for all blobs.".format(k)
)
return _known_status
def infer_device_type(
predict_net: caffe2_pb2.NetDef,
known_status: Dict[Tuple[str, int], Any],
device_name_style: str = "caffe2",
) -> Dict[Tuple[str, int], str]:
"""Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob"""
assert device_name_style in ["caffe2", "pytorch"]
_CPU_STR = "cpu"
_GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
def _copy_cpu_to_gpu_updater(op, input_types, output_types):
if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
_updater_raise(op, input_types, output_types)
return ([_CPU_STR], [_GPU_STR])
def _copy_gpu_to_cpu_updater(op, input_types, output_types):
if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
_updater_raise(op, input_types, output_types)
return ([_GPU_STR], [_CPU_STR])
def _other_ops_updater(op, input_types, output_types):
non_none_types = [x for x in input_types + output_types if x is not None]
if len(non_none_types) > 0:
the_type = non_none_types[0]
if not all(x == the_type for x in non_none_types):
_updater_raise(op, input_types, output_types)
else:
the_type = None
return ([the_type for _ in op.input], [the_type for _ in op.output])
def _device_updater(op, *args, **kwargs):
return {
"CopyCPUToGPU": _copy_cpu_to_gpu_updater,
"CopyGPUToCPU": _copy_gpu_to_cpu_updater,
}.get(op.type, _other_ops_updater)(op, *args, **kwargs)
return _generic_status_identifier(predict_net, _device_updater, known_status)
# ==== torch/utils_caffe2/vis.py ===============================================
def _modify_blob_names(ops, blob_rename_f):
ret = []
def _replace_list(blob_list, replaced_list):
del blob_list[:]
blob_list.extend(replaced_list)
for x in ops:
cur = copy.deepcopy(x)
_replace_list(cur.input, list(map(blob_rename_f, cur.input)))
_replace_list(cur.output, list(map(blob_rename_f, cur.output)))
ret.append(cur)
return ret
def _rename_blob(name, blob_sizes, blob_ranges):
def _list_to_str(bsize):
ret = ", ".join([str(x) for x in bsize])
ret = "[" + ret + "]"
return ret
ret = name
if blob_sizes is not None and name in blob_sizes:
ret += "\n" + _list_to_str(blob_sizes[name])
if blob_ranges is not None and name in blob_ranges:
ret += "\n" + _list_to_str(blob_ranges[name])
return ret
# graph_name could not contain word 'graph'
def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None):
blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges)
return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None):
graph = None
ops = net.op
if blob_rename_func is not None:
ops = _modify_blob_names(ops, blob_rename_func)
if not op_only:
graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
else:
graph = net_drawer.GetPydotGraphMinimal(
ops, graph_name, rankdir="TB", minimal_dependency=True
)
try:
par_dir = os.path.dirname(file_name)
if not os.path.exists(par_dir):
os.makedirs(par_dir)
format = os.path.splitext(os.path.basename(file_name))[-1]
if format == ".png":
graph.write_png(file_name)
elif format == ".pdf":
graph.write_pdf(file_name)
elif format == ".svg":
graph.write_svg(file_name)
else:
print("Incorrect format {}".format(format))
except Exception as e:
print("Error when writing graph to image {}".format(e))
return graph
# ==== torch/utils_toffee/aten_to_caffe2.py ====================================
def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
"""
For ONNX exported model, GroupNorm will be represented as ATen op,
this can be a drop in replacement from ATen to GroupNorm
"""
count = 0
for op in predict_net.op:
if op.type == "ATen":
op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3
if op_name and op_name.decode() == "group_norm":
op.arg.remove(get_pb_arg(op, "operator"))
if get_pb_arg_vali(op, "cudnn_enabled", None):
op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
num_groups = get_pb_arg_vali(op, "num_groups", None)
if num_groups is not None:
op.arg.remove(get_pb_arg(op, "num_groups"))
check_set_pb_arg(op, "group", "i", num_groups)
op.type = "GroupNorm"
count += 1
if count > 1:
logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
# ==== torch/utils_toffee/alias.py =============================================
def alias(x, name, is_backward=False):
if not torch.onnx.is_in_onnx_export():
return x
assert isinstance(x, torch.Tensor)
return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
def fuse_alias_placeholder(predict_net, init_net):
"""Remove AliasWithName placeholder and rename the input/output of it"""
# First we finish all the re-naming
for i, op in enumerate(predict_net.op):
if op.type == "AliasWithName":
assert len(op.input) == 1
assert len(op.output) == 1
name = get_pb_arg_vals(op, "name", None).decode()
is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward)
rename_op_output(predict_net, i, 0, name)
# Remove AliasWithName, should be very safe since it's a non-op
new_ops = []
for op in predict_net.op:
if op.type != "AliasWithName":
new_ops.append(op)
else:
# safety check
assert op.input == op.output
assert op.input[0] == op.arg[0].s.decode()
del predict_net.op[:]
predict_net.op.extend(new_ops)
# ==== torch/utils_caffe2/graph_transform.py ===================================
class IllegalGraphTransformError(ValueError):
"""When a graph transform function call can't be executed."""
def _rename_versioned_blob_in_proto(
proto: caffe2_pb2.NetDef,
old_name: str,
new_name: str,
version: int,
ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
start_versions: Dict[str, int],
end_versions: Dict[str, int],
):
"""In given proto, rename all blobs with matched version"""
# Operater list
for op, i_th_ssa in zip(proto.op, ssa):
versioned_inputs, versioned_outputs = i_th_ssa
for i in range(len(op.input)):
if versioned_inputs[i] == (old_name, version):
op.input[i] = new_name
for i in range(len(op.output)):
if versioned_outputs[i] == (old_name, version):
op.output[i] = new_name
# external_input
if start_versions.get(old_name, 0) == version:
for i in range(len(proto.external_input)):
if proto.external_input[i] == old_name:
proto.external_input[i] = new_name
# external_output
if end_versions.get(old_name, 0) == version:
for i in range(len(proto.external_output)):
if proto.external_output[i] == old_name:
proto.external_output[i] = new_name
def rename_op_input(
predict_net: caffe2_pb2.NetDef,
init_net: caffe2_pb2.NetDef,
op_id: int,
input_id: int,
new_name: str,
from_producer: bool = False,
):
"""
Rename the op_id-th operator in predict_net, change it's input_id-th input's
name to the new_name. It also does automatic re-route and change
external_input and init_net if necessary.
- It requires the input is only consumed by this op.
- This function modifies predict_net and init_net in-place.
- When from_producer is enable, this also updates other operators that consumes
the same input. Be cautious because may trigger unintended behavior.
"""
assert isinstance(predict_net, caffe2_pb2.NetDef)
assert isinstance(init_net, caffe2_pb2.NetDef)
init_net_ssa, init_net_versions = core.get_ssa(init_net)
predict_net_ssa, predict_net_versions = core.get_ssa(
predict_net, copy.deepcopy(init_net_versions)
)
versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
old_name, version = versioned_inputs[input_id]
if from_producer:
producer_map = get_producer_map(predict_net_ssa)
if not (old_name, version) in producer_map:
raise NotImplementedError(
"Can't find producer, the input {} is probably from"
" init_net, this is not supported yet.".format(old_name)
)
producer = producer_map[(old_name, version)]
rename_op_output(predict_net, producer[0], producer[1], new_name)
return
def contain_targets(op_ssa):
return (old_name, version) in op_ssa[0]
is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
if sum(is_consumer) > 1:
raise IllegalGraphTransformError(
(
"Input '{}' of operator(#{}) are consumed by other ops, please use"
+ " rename_op_output on the producer instead. Offending op: \n{}"
).format(old_name, op_id, predict_net.op[op_id])
)
# update init_net
_rename_versioned_blob_in_proto(
init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions
)
# update predict_net
_rename_versioned_blob_in_proto(
predict_net,
old_name,
new_name,
version,
predict_net_ssa,
init_net_versions,
predict_net_versions,
)
def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str):
"""
Rename the op_id-th operator in predict_net, change it's output_id-th input's
name to the new_name. It also does automatic re-route and change
external_output and if necessary.
- It allows multiple consumers of its output.
- This function modifies predict_net in-place, doesn't need init_net.
"""
assert isinstance(predict_net, caffe2_pb2.NetDef)
ssa, blob_versions = core.get_ssa(predict_net)
versioned_inputs, versioned_outputs = ssa[op_id]
old_name, version = versioned_outputs[output_id]
# update predict_net
_rename_versioned_blob_in_proto(
predict_net, old_name, new_name, version, ssa, {}, blob_versions
)
def get_sub_graph_external_input_output(
predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
"""
Return the list of external input/output of sub-graph,
each element is tuple of the name and corresponding version in predict_net.
external input/output is defined the same way as caffe2 NetDef.
"""
ssa, versions = core.get_ssa(predict_net)
all_inputs = []
all_outputs = []
for op_id in sub_graph_op_indices:
all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
# for versioned blobs, external inputs are just those blob in all_inputs
# but not in all_outputs
ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
# external outputs are essentially outputs of this subgraph that are used
# outside of this sub-graph (including predict_net.external_output)
all_other_inputs = sum(
(ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
[(outp, versions[outp]) for outp in predict_net.external_output],
)
ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)]
return ext_inputs, ext_outputs
class DiGraph:
"""A DAG representation of caffe2 graph, each vertice is a versioned blob."""
def __init__(self):
self.vertices = set()
self.graph = collections.defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
self.vertices.add(u)
self.vertices.add(v)
# grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
def get_all_paths(self, s, d):
visited = {k: False for k in self.vertices}
path = []
all_paths = []
def _get_all_paths_util(graph, u, d, visited, path):
visited[u] = True
path.append(u)
if u == d:
all_paths.append(copy.deepcopy(path))
else:
for i in graph[u]:
if not visited[i]:
_get_all_paths_util(graph, i, d, visited, path)
path.pop()
visited[u] = False
_get_all_paths_util(self.graph, s, d, visited, path)
return all_paths
@staticmethod
def from_ssa(ssa):
graph = DiGraph()
for op_id in range(len(ssa)):
for inp in ssa[op_id][0]:
for outp in ssa[op_id][1]:
graph.add_edge(inp, outp)
return graph
def _get_dependency_chain(ssa, versioned_target, versioned_source):
"""
Return the index list of relevant operator to produce target blob from source blob,
if there's no dependency, return empty list.
"""
# finding all paths between nodes can be O(N!), thus we can only search
# in the subgraph using the op starting from the first consumer of source blob
# to the producer of the target blob.
consumer_map = get_consumer_map(ssa)
producer_map = get_producer_map(ssa)
start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
end_op = (
producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op
)
sub_graph_ssa = ssa[start_op : end_op + 1]
if len(sub_graph_ssa) > 30:
logger.warning(
"Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
" might take non-trival time to find all paths between them.".format(
versioned_source, versioned_target, start_op, end_op
)
)
dag = DiGraph.from_ssa(sub_graph_ssa)
paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends
ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths]
return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]:
"""
Idenfity the reshape sub-graph in a protobuf.
The reshape sub-graph is defined as matching the following pattern:
(input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
└-------------------------------------------> Reshape -> (output_blob)
Return:
List of sub-graphs, each sub-graph is represented as a list of indices
of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
"""
ssa, _ = core.get_ssa(predict_net)
ret = []
for i, op in enumerate(predict_net.op):
if op.type == "Reshape":
assert len(op.input) == 2
input_ssa = ssa[i][0]
data_source = input_ssa[0]
shape_source = input_ssa[1]
op_indices = _get_dependency_chain(ssa, shape_source, data_source)
ret.append(op_indices + [i])
return ret
def remove_reshape_for_fc(predict_net, params):
"""
In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
doesn't work well with ONNX and Int8 tools, and cause using extra
ops (eg. ExpandDims) that might not be available on mobile.
Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
after exporting ONNX model.
"""
from caffe2.python import core
# find all reshape sub-graph that can be removed, which is now all Reshape
# sub-graph whose output is only consumed by FC.
# TODO: to make it safer, we may need the actually value to better determine
# if a Reshape before FC is removable.
reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
sub_graphs_to_remove = []
for reshape_sub_graph in reshape_sub_graphs:
reshape_op_id = reshape_sub_graph[-1]
assert predict_net.op[reshape_op_id].type == "Reshape"
ssa, _ = core.get_ssa(predict_net)
reshape_output = ssa[reshape_op_id][1][0]
consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
if all(predict_net.op[consumer].type == "FC" for consumer in consumers):
# safety check if the sub-graph is isolated, for this reshape sub-graph,
# it means it has one non-param external input and one external output.
ext_inputs, ext_outputs = get_sub_graph_external_input_output(
predict_net, reshape_sub_graph
)
non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
sub_graphs_to_remove.append(reshape_sub_graph)
# perform removing subgraph by:
# 1: rename the Reshape's output to its input, then the graph can be
# seen as in-place itentify, meaning whose external input/output are the same.
# 2: simply remove those ops.
remove_op_ids = []
params_to_remove = []
for sub_graph in sub_graphs_to_remove:
logger.info(
"Remove Reshape sub-graph:\n{}".format(
"".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])
)
)
reshape_op_id = sub_graph[-1]
new_reshap_output = predict_net.op[reshape_op_id].input[0]
rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph)
non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
remove_op_ids.extend(sub_graph)
params_to_remove.extend(params_ext_inputs)
predict_net = copy.deepcopy(predict_net)
new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids]
del predict_net.op[:]
predict_net.op.extend(new_ops)
for versioned_params in params_to_remove:
name = versioned_params[0]
logger.info("Remove params: {} from init_net and predict_net.external_input".format(name))
del params[name]
predict_net.external_input.remove(name)
return predict_net, params
def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
"""
In-place fuse extra copy ops between cpu/gpu for the following case:
a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
-CopyBToA> c2 -NextOp2-> d2
The fused network will look like:
a -NextOp1-> d1
-NextOp2-> d2
"""
_COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
def _fuse_once(predict_net):
ssa, blob_versions = core.get_ssa(predict_net)
consumer_map = get_consumer_map(ssa)
versioned_external_output = [
(name, blob_versions[name]) for name in predict_net.external_output
]
for op_id, op in enumerate(predict_net.op):
if op.type in _COPY_OPS:
fw_copy_versioned_output = ssa[op_id][1][0]
consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]]
reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
is_fusable = (
len(consumer_ids) > 0
and fw_copy_versioned_output not in versioned_external_output
and all(
predict_net.op[_op_id].type == reverse_op_type
and ssa[_op_id][1][0] not in versioned_external_output
for _op_id in consumer_ids
)
)
if is_fusable:
for rv_copy_op_id in consumer_ids:
# making each NextOp uses "a" directly and removing Copy ops
rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0]
predict_net.op[next_op_id].input[inp_id] = op.input[0]
# remove CopyOps
new_ops = [
op
for i, op in enumerate(predict_net.op)
if i != op_id and i not in consumer_ids
]
del predict_net.op[:]
predict_net.op.extend(new_ops)
return True
return False
# _fuse_once returns False is nothing can be fused
while _fuse_once(predict_net):
pass
def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
"""remove ops if its output is not used or not in external_output"""
ssa, versions = core.get_ssa(net_def)
versioned_external_output = [(name, versions[name]) for name in net_def.external_output]
consumer_map = get_consumer_map(ssa)
removed_op_ids = set()
def _is_dead_end(versioned_blob):
return not (
versioned_blob in versioned_external_output
or (
len(consumer_map[versioned_blob]) > 0
and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob])
)
)
for i, ssa_i in reversed(list(enumerate(ssa))):
versioned_outputs = ssa_i[1]
if all(_is_dead_end(outp) for outp in versioned_outputs):
removed_op_ids.add(i)
# simply removing those deadend ops should have no effect to external_output
new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids]
del net_def.op[:]
net_def.op.extend(new_ops) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/shared.py | shared.py |
import copy
import io
import logging
import numpy as np
from typing import List
import onnx
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python.onnx.backend import Caffe2Backend
from tabulate import tabulate
from termcolor import colored
from torch.onnx import OperatorExportTypes
from .shared import (
ScopedWS,
construct_init_net_from_params,
fuse_alias_placeholder,
fuse_copy_between_cpu_and_gpu,
get_params_from_init_net,
group_norm_replace_aten_with_caffe2,
infer_device_type,
remove_dead_end_ops,
remove_reshape_for_fc,
save_graph,
)
logger = logging.getLogger(__name__)
def export_onnx_model(model, inputs):
"""
Trace and export a model to onnx format.
Args:
model (nn.Module):
inputs (tuple[args]): the model will be called by `model(*inputs)`
Returns:
an onnx model
"""
assert isinstance(model, torch.nn.Module)
# make sure all modules are in eval mode, onnx may change the training state
# of the module if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
# Apply ONNX's Optimization
all_passes = onnx.optimizer.get_available_passes()
passes = ["fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnx.optimizer.optimize(onnx_model, passes)
return onnx_model
def _op_stats(net_def):
type_count = {}
for t in [op.type for op in net_def.op]:
type_count[t] = type_count.get(t, 0) + 1
type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
def _assign_device_option(
predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]
):
"""
ONNX exported network doesn't have concept of device, assign necessary
device option for each op in order to make it runable on GPU runtime.
"""
def _get_device_type(torch_tensor):
assert torch_tensor.device.type in ["cpu", "cuda"]
assert torch_tensor.device.index == 0
return torch_tensor.device.type
def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
for op, ssa_i in zip(net_proto.op, net_ssa):
if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]:
op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
else:
devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]]
assert all(d == devices[0] for d in devices)
if devices[0] == "cuda":
op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
# update ops in predict_net
predict_net_input_device_types = {
(name, 0): _get_device_type(tensor)
for name, tensor in zip(predict_net.external_input, tensor_inputs)
}
predict_net_device_types = infer_device_type(
predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch"
)
predict_net_ssa, _ = core.get_ssa(predict_net)
_assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
# update ops in init_net
init_net_ssa, versions = core.get_ssa(init_net)
init_net_output_device_types = {
(name, versions[name]): predict_net_device_types[(name, 0)]
for name in init_net.external_output
}
init_net_device_types = infer_device_type(
init_net, known_status=init_net_output_device_types, device_name_style="pytorch"
)
_assign_op_device_option(init_net, init_net_ssa, init_net_device_types)
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
"""
Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
Arg:
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
tensor_inputs: a list of tensors that caffe2 model takes as input.
"""
model = copy.deepcopy(model)
assert isinstance(model, torch.nn.Module)
assert hasattr(model, "encode_additional_info")
# Export via ONNX
logger.info(
"Exporting a {} model via ONNX ...".format(type(model).__name__)
+ " Some warnings from ONNX are expected and are usually not to worry about."
)
onnx_model = export_onnx_model(model, (tensor_inputs,))
# Convert ONNX model to Caffe2 protobuf
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
logger.info(
"ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
)
# Apply protobuf optimization
fuse_alias_placeholder(predict_net, init_net)
if any(t.device.type != "cpu" for t in tensor_inputs):
fuse_copy_between_cpu_and_gpu(predict_net)
remove_dead_end_ops(init_net)
_assign_device_option(predict_net, init_net, tensor_inputs)
params, device_options = get_params_from_init_net(init_net)
predict_net, params = remove_reshape_for_fc(predict_net, params)
init_net = construct_init_net_from_params(params, device_options)
group_norm_replace_aten_with_caffe2(predict_net)
# Record necessary information for running the pb model in Detectron2 system.
model.encode_additional_info(predict_net, init_net)
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
return predict_net, init_net
def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
"""
Run the caffe2 model on given inputs, recording the shape and draw the graph.
predict_net/init_net: caffe2 model.
tensor_inputs: a list of tensors that caffe2 model takes as input.
graph_save_path: path for saving graph of exported model.
"""
logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
save_graph(predict_net, graph_save_path, op_only=False)
# Run the exported Caffe2 net
logger.info("Running ONNX exported model ...")
with ScopedWS("__ws_tmp__", True) as ws:
ws.RunNetOnce(init_net)
initialized_blobs = set(ws.Blobs())
uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
for name, blob in zip(uninitialized, tensor_inputs):
ws.FeedBlob(name, blob)
try:
ws.RunNetOnce(predict_net)
except RuntimeError as e:
logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
return ws_blobs | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/caffe2_export.py | caffe2_export.py |
import os
import sys
import tempfile
from contextlib import ExitStack, contextmanager
from copy import deepcopy
from unittest import mock
import torch
from torch import nn
# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
import detectron2 # noqa F401
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import _import_file
_counter = 0
def _clear_jit_cache():
from torch.jit._recursive import concrete_type_store
from torch.jit._state import _jit_caching_layer
concrete_type_store.type_store.clear() # for modules
_jit_caching_layer.clear() # for free functions
def _add_instances_conversion_methods(newInstances):
"""
Add from_instances methods to the scripted Instances class.
"""
cls_name = newInstances.__name__
@torch.jit.unused
def from_instances(instances: Instances):
"""
Create scripted Instances from original Instances
"""
fields = instances.get_fields()
image_size = instances.image_size
ret = newInstances(image_size)
for name, val in fields.items():
assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
setattr(ret, name, deepcopy(val))
return ret
newInstances.from_instances = from_instances
@contextmanager
def patch_instances(fields):
"""
A contextmanager, under which the Instances class in detectron2 is replaced
by a statically-typed scriptable class, defined by `fields`.
See more in `scripting_with_instances`.
"""
with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
) as f:
try:
# Objects that use Instances should not reuse previously-compiled
# results in cache, because `Instances` could be a new class each time.
_clear_jit_cache()
cls_name, s = _gen_instance_module(fields)
f.write(s)
f.flush()
f.close()
module = _import(f.name)
new_instances = getattr(module, cls_name)
_ = torch.jit.script(new_instances)
# let torchscript think Instances was scripted already
Instances.__torch_script_class__ = True
# let torchscript find new_instances when looking for the jit type of Instances
Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
_add_instances_conversion_methods(new_instances)
yield new_instances
finally:
try:
del Instances.__torch_script_class__
del Instances._jit_override_qualname
except AttributeError:
pass
sys.modules.pop(module.__name__)
def _gen_instance_class(fields):
"""
Args:
fields (dict[name: type])
"""
class _FieldType:
def __init__(self, name, type_):
assert isinstance(name, str), f"Field name must be str, got {name}"
self.name = name
self.type_ = type_
self.annotation = f"{type_.__module__}.{type_.__name__}"
fields = [_FieldType(k, v) for k, v in fields.items()]
def indent(level, s):
return " " * 4 * level + s
lines = []
global _counter
_counter += 1
cls_name = "ScriptedInstances{}".format(_counter)
field_names = tuple(x.name for x in fields)
extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
lines.append(
f"""
class {cls_name}:
def __init__(self, image_size: Tuple[int, int], {extra_args}):
self.image_size = image_size
self._field_names = {field_names}
"""
)
for f in fields:
lines.append(
indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
)
for f in fields:
lines.append(
f"""
@property
def {f.name}(self) -> {f.annotation}:
# has to use a local for type refinement
# https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
t = self._{f.name}
assert t is not None, "{f.name} is None and cannot be accessed!"
return t
@{f.name}.setter
def {f.name}(self, value: {f.annotation}) -> None:
self._{f.name} = value
"""
)
# support method `__len__`
lines.append(
"""
def __len__(self) -> int:
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
return len(t)
"""
)
lines.append(
"""
raise NotImplementedError("Empty Instances does not support __len__!")
"""
)
# support method `has`
lines.append(
"""
def has(self, name: str) -> bool:
"""
)
for f in fields:
lines.append(
f"""
if name == "{f.name}":
return self._{f.name} is not None
"""
)
lines.append(
"""
return False
"""
)
# support method `to`
none_args = ", None" * len(fields)
lines.append(
f"""
def to(self, device: torch.device) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
if hasattr(f.type_, "to"):
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret._{f.name} = t.to(device)
"""
)
else:
# For now, ignore fields that cannot be moved to devices.
# Maybe can support other tensor-like classes (e.g. __torch_function__)
pass
lines.append(
"""
return ret
"""
)
# support method `getitem`
none_args = ", None" * len(fields)
lines.append(
f"""
def __getitem__(self, item) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret._{f.name} = t[item]
"""
)
lines.append(
"""
return ret
"""
)
# support method `cat`
# this version does not contain checks that all instances have same size and fields
none_args = ", None" * len(fields)
lines.append(
f"""
def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
values: List[{f.annotation}] = [x.{f.name} for x in instances]
if torch.jit.isinstance(t, torch.Tensor):
ret._{f.name} = torch.cat(values, dim=0)
else:
ret._{f.name} = t.cat(values)
"""
)
lines.append(
"""
return ret"""
)
# support method `get_fields()`
lines.append(
"""
def get_fields(self) -> Dict[str, Tensor]:
ret = {}
"""
)
for f in fields:
if f.type_ == Boxes:
stmt = "t.tensor"
elif f.type_ == torch.Tensor:
stmt = "t"
else:
stmt = f'assert False, "unsupported type {str(f.type_)}"'
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret["{f.name}"] = {stmt}
"""
)
lines.append(
"""
return ret"""
)
return cls_name, os.linesep.join(lines)
def _gen_instance_module(fields):
# TODO: find a more automatic way to enable import of other classes
s = """
from copy import deepcopy
import torch
from torch import Tensor
import typing
from typing import *
import detectron2
from detectron2.structures import Boxes, Instances
"""
cls_name, cls_def = _gen_instance_class(fields)
s += cls_def
return cls_name, s
def _import(path):
return _import_file(
"{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
)
@contextmanager
def patch_builtin_len(modules=()):
"""
Patch the builtin len() function of a few detectron2 modules
to use __len__ instead, because __len__ does not convert values to
integers and therefore is friendly to tracing.
Args:
modules (list[stsr]): names of extra modules to patch len(), in
addition to those in detectron2.
"""
def _new_len(obj):
return obj.__len__()
with ExitStack() as stack:
MODULES = [
"detectron2.modeling.roi_heads.fast_rcnn",
"detectron2.modeling.roi_heads.mask_head",
"detectron2.modeling.roi_heads.keypoint_head",
] + list(modules)
ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
for m in ctxs:
m.side_effect = _new_len
yield
def patch_nonscriptable_classes():
"""
Apply patches on a few nonscriptable detectron2 classes.
Should not have side-effects on eager usage.
"""
# __prepare_scriptable__ can also be added to models for easier maintenance.
# But it complicates the clean model code.
from detectron2.modeling.backbone import ResNet, FPN
# Due to https://github.com/pytorch/pytorch/issues/36061,
# we change backbone to use ModuleList for scripting.
# (note: this changes param names in state_dict)
def prepare_resnet(self):
ret = deepcopy(self)
ret.stages = nn.ModuleList(ret.stages)
for k in self.stage_names:
delattr(ret, k)
return ret
ResNet.__prepare_scriptable__ = prepare_resnet
def prepare_fpn(self):
ret = deepcopy(self)
ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
ret.output_convs = nn.ModuleList(ret.output_convs)
for name, _ in self.named_children():
if name.startswith("fpn_"):
delattr(ret, name)
return ret
FPN.__prepare_scriptable__ = prepare_fpn
# Annotate some attributes to be constants for the purpose of scripting,
# even though they are not constants in eager mode.
from detectron2.modeling.roi_heads import StandardROIHeads
if hasattr(StandardROIHeads, "__annotations__"):
# copy first to avoid editing annotations of base class
StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
# These patches are not supposed to have side-effects.
patch_nonscriptable_classes()
@contextmanager
def freeze_training_mode(model):
"""
A context manager that annotates the "training" attribute of every submodule
to constant, so that the training codepath in these modules can be
meta-compiled away. Upon exiting, the annotations are reverted.
"""
classes = {type(x) for x in model.modules()}
# __constants__ is the old way to annotate constants and not compatible
# with __annotations__ .
classes = {x for x in classes if not hasattr(x, "__constants__")}
for cls in classes:
cls.__annotations__["training"] = torch.jit.Final[bool]
yield
for cls in classes:
cls.__annotations__["training"] = bool | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/torchscript_patch.py | torchscript_patch.py |
import os
import torch
from detectron2.utils.file_io import PathManager
from .torchscript_patch import freeze_training_mode, patch_instances
__all__ = ["scripting_with_instances", "dump_torchscript_IR"]
def scripting_with_instances(model, fields):
"""
Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since
attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult
for scripting to support it out of the box. This function is made to support scripting
a model that uses :class:`Instances`. It does the following:
1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``,
but with all attributes been "static".
The attributes need to be statically declared in the ``fields`` argument.
2. Register ``new_Instances``, and force scripting compiler to
use it when trying to compile ``Instances``.
After this function, the process will be reverted. User should be able to script another model
using different fields.
Example:
Assume that ``Instances`` in the model consist of two attributes named
``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and
:class:`Tensor` respectively during inference. You can call this function like:
::
fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
torchscipt_model = scripting_with_instances(model, fields)
Note:
It only support models in evaluation mode.
Args:
model (nn.Module): The input model to be exported by scripting.
fields (Dict[str, type]): Attribute names and corresponding type that
``Instances`` will use in the model. Note that all attributes used in ``Instances``
need to be added, regardless of whether they are inputs/outputs of the model.
Data type not defined in detectron2 is not supported for now.
Returns:
torch.jit.ScriptModule: the model in torchscript format
"""
assert (
not model.training
), "Currently we only support exporting models in evaluation mode to torchscript"
with freeze_training_mode(model), patch_instances(fields):
scripted_model = torch.jit.script(model)
return scripted_model
# alias for old name
export_torchscript_with_instances = scripting_with_instances
def dump_torchscript_IR(model, dir):
"""
Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph,
inlined graph). Useful for debugging.
Args:
model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module
dir (str): output directory to dump files.
"""
dir = os.path.expanduser(dir)
PathManager.mkdirs(dir)
def _get_script_mod(mod):
if isinstance(mod, torch.jit.TracedModule):
return mod._actual_script_module
return mod
# Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code
with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f:
def get_code(mod):
# Try a few ways to get code using private attributes.
try:
# This contains more information than just `mod.code`
return _get_script_mod(mod)._c.code
except AttributeError:
pass
try:
return mod.code
except AttributeError:
return None
def dump_code(prefix, mod):
code = get_code(mod)
name = prefix or "root model"
if code is None:
f.write(f"Could not found code for {name} (type={mod.original_name})\n")
f.write("\n")
else:
f.write(f"\nCode for {name}, type={mod.original_name}:\n")
f.write(code)
f.write("\n")
f.write("-" * 80)
for name, m in mod.named_children():
dump_code(prefix + "." + name, m)
if isinstance(model, torch.jit.ScriptFunction):
f.write(get_code(model))
else:
dump_code("", model)
def _get_graph(model):
try:
# Recursively dump IR of all modules
return _get_script_mod(model)._c.dump_to_str(True, False, False)
except AttributeError:
return model.graph.str()
with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f:
f.write(_get_graph(model))
# Dump IR of the entire graph (all submodules inlined)
with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f:
f.write(str(model.inlined_graph))
if not isinstance(model, torch.jit.ScriptFunction):
# Dump the model structure in pytorch style
with PathManager.open(os.path.join(dir, "model.txt"), "w") as f:
f.write(str(model)) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/torchscript.py | torchscript.py |
import functools
import io
import struct
import types
import torch
from detectron2.modeling import meta_arch
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads import keypoint_head
from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
from .c10 import Caffe2Compatible
from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn
from .shared import (
alias,
check_set_pb_arg,
get_pb_arg_floats,
get_pb_arg_valf,
get_pb_arg_vali,
get_pb_arg_vals,
mock_torch_nn_functional_interpolate,
)
def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
"""
A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
to detectron2's format (i.e. list of Instances instance).
This only works when the model follows the Caffe2 detectron's naming convention.
Args:
image_sizes (List[List[int, int]]): [H, W] of every image.
tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
if the mask is not found from tensor_outputs (usually due to model crash)
"""
results = [Instances(image_size) for image_size in image_sizes]
batch_splits = tensor_outputs.get("batch_splits", None)
if batch_splits:
raise NotImplementedError()
assert len(image_sizes) == 1
result = results[0]
bbox_nms = tensor_outputs["bbox_nms"]
score_nms = tensor_outputs["score_nms"]
class_nms = tensor_outputs["class_nms"]
# Detection will always success because Conv support 0-batch
assert bbox_nms is not None
assert score_nms is not None
assert class_nms is not None
if bbox_nms.shape[1] == 5:
result.pred_boxes = RotatedBoxes(bbox_nms)
else:
result.pred_boxes = Boxes(bbox_nms)
result.scores = score_nms
result.pred_classes = class_nms.to(torch.int64)
mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
if mask_fcn_probs is not None:
# finish the mask pred
mask_probs_pred = mask_fcn_probs
num_masks = mask_probs_pred.shape[0]
class_pred = result.pred_classes
indices = torch.arange(num_masks, device=class_pred.device)
mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
result.pred_masks = mask_probs_pred
elif force_mask_on:
# NOTE: there's no way to know the height/width of mask here, it won't be
# used anyway when batch size is 0, so just set them to 0.
result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
keypoints_out = tensor_outputs.get("keypoints_out", None)
kps_score = tensor_outputs.get("kps_score", None)
if keypoints_out is not None:
# keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
keypoints_tensor = keypoints_out
# NOTE: it's possible that prob is not calculated if "should_output_softmax"
# is set to False in HeatmapMaxKeypoint, so just using raw score, seems
# it doesn't affect mAP. TODO: check more carefully.
keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
result.pred_keypoints = keypoint_xyp
elif kps_score is not None:
# keypoint heatmap to sparse data structure
pred_keypoint_logits = kps_score
keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
return results
def _cast_to_f32(f64):
return struct.unpack("f", struct.pack("f", f64))[0]
def set_caffe2_compatible_tensor_mode(model, enable=True):
def _fn(m):
if isinstance(m, Caffe2Compatible):
m.tensor_mode = enable
model.apply(_fn)
def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
"""
See get_caffe2_inputs() below.
"""
assert all(isinstance(x, dict) for x in batched_inputs)
assert all(x["image"].dim() == 3 for x in batched_inputs)
images = [x["image"] for x in batched_inputs]
images = ImageList.from_tensors(images, size_divisibility)
im_info = []
for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
target_height = input_per_image.get("height", image_size[0])
target_width = input_per_image.get("width", image_size[1]) # noqa
# NOTE: The scale inside im_info is kept as convention and for providing
# post-processing information if further processing is needed. For
# current Caffe2 model definitions that don't include post-processing inside
# the model, this number is not used.
# NOTE: There can be a slight difference between width and height
# scales, using a single number can results in numerical difference
# compared with D2's post-processing.
scale = target_height / image_size[0]
im_info.append([image_size[0], image_size[1], scale])
im_info = torch.Tensor(im_info)
return images.tensor.to(device), im_info.to(device)
class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
"""
Base class for caffe2-compatible implementation of a meta architecture.
The forward is traceable and its traced graph can be converted to caffe2
graph through ONNX.
"""
def __init__(self, cfg, torch_model):
"""
Args:
cfg (CfgNode):
torch_model (nn.Module): the detectron2 model (meta_arch) to be
converted.
"""
super().__init__()
self._wrapped_model = torch_model
self.eval()
set_caffe2_compatible_tensor_mode(self, True)
def get_caffe2_inputs(self, batched_inputs):
"""
Convert pytorch-style structured inputs to caffe2-style inputs that
are tuples of tensors.
Args:
batched_inputs (list[dict]): inputs to a detectron2 model
in its standard format. Each dict has "image" (CHW tensor), and optionally
"height" and "width".
Returns:
tuple[Tensor]:
tuple of tensors that will be the inputs to the
:meth:`forward` method. For existing models, the first
is an NCHW tensor (padded and batched); the second is
a im_info Nx3 tensor, where the rows are
(height, width, unused legacy parameter)
"""
return convert_batched_inputs_to_c2_format(
batched_inputs,
self._wrapped_model.backbone.size_divisibility,
self._wrapped_model.device,
)
def encode_additional_info(self, predict_net, init_net):
"""
Save extra metadata that will be used by inference in the output protobuf.
"""
pass
def forward(self, inputs):
"""
Run the forward in caffe2-style. It has to use caffe2-compatible ops
and the method will be used for tracing.
Args:
inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
They will be the inputs of the converted caffe2 graph.
Returns:
tuple[Tensor]: output tensors. They will be the outputs of the
converted caffe2 graph.
"""
raise NotImplementedError
def _caffe2_preprocess_image(self, inputs):
"""
Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
It normalizes the input images, and the final caffe2 graph assumes the
inputs have been batched already.
"""
data, im_info = inputs
data = alias(data, "data")
im_info = alias(im_info, "im_info")
mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std
normalized_data = (data - mean) / std
normalized_data = alias(normalized_data, "normalized_data")
# Pack (data, im_info) into ImageList which is recognized by self.inference.
images = ImageList(tensor=normalized_data, image_sizes=im_info)
return images
@staticmethod
def get_outputs_converter(predict_net, init_net):
"""
Creates a function that converts outputs of the caffe2 model to
detectron2's standard format.
The function uses information in `predict_net` and `init_net` that are
available at inferene time. Therefore the function logic can be used in inference.
The returned function has the following signature:
def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
Where
* batched_inputs (list[dict]): the original input format of the meta arch
* c2_inputs (tuple[Tensor]): the caffe2 inputs.
* c2_results (dict[str, Tensor]): the caffe2 output format,
corresponding to the outputs of the :meth:`forward` function.
* detectron2_outputs: the original output format of the meta arch.
This function can be used to compare the outputs of the original meta arch and
the converted caffe2 graph.
Returns:
callable: a callable of the above signature.
"""
raise NotImplementedError
class Caffe2GeneralizedRCNN(Caffe2MetaArch):
def __init__(self, cfg, torch_model):
assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
torch_model = patch_generalized_rcnn(torch_model)
super().__init__(cfg, torch_model)
try:
use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
except AttributeError:
use_heatmap_max_keypoint = False
self.roi_heads_patcher = ROIHeadsPatcher(
self._wrapped_model.roi_heads, use_heatmap_max_keypoint
)
def encode_additional_info(self, predict_net, init_net):
size_divisibility = self._wrapped_model.backbone.size_divisibility
check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
check_set_pb_arg(
predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
)
check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
@mock_torch_nn_functional_interpolate()
def forward(self, inputs):
if not self.tensor_mode:
return self._wrapped_model.inference(inputs)
images = self._caffe2_preprocess_image(inputs)
features = self._wrapped_model.backbone(images.tensor)
proposals, _ = self._wrapped_model.proposal_generator(images, features)
with self.roi_heads_patcher.mock_roi_heads():
detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
return tuple(detector_results[0].flatten())
@staticmethod
def get_outputs_converter(predict_net, init_net):
def f(batched_inputs, c2_inputs, c2_results):
_, im_info = c2_inputs
image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
return f
class Caffe2RetinaNet(Caffe2MetaArch):
def __init__(self, cfg, torch_model):
assert isinstance(torch_model, meta_arch.RetinaNet)
super().__init__(cfg, torch_model)
@mock_torch_nn_functional_interpolate()
def forward(self, inputs):
assert self.tensor_mode
images = self._caffe2_preprocess_image(inputs)
# explicitly return the images sizes to avoid removing "im_info" by ONNX
# since it's not used in the forward path
return_tensors = [images.image_sizes]
features = self._wrapped_model.backbone(images.tensor)
features = [features[f] for f in self._wrapped_model.head_in_features]
for i, feature_i in enumerate(features):
features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
return_tensors.append(features[i])
pred_logits, pred_anchor_deltas = self._wrapped_model.head(features)
for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)):
return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
return tuple(return_tensors)
def encode_additional_info(self, predict_net, init_net):
size_divisibility = self._wrapped_model.backbone.size_divisibility
check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
check_set_pb_arg(
predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
)
check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
# Inference parameters:
check_set_pb_arg(
predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh)
)
check_set_pb_arg(
predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates
)
check_set_pb_arg(
predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh)
)
check_set_pb_arg(
predict_net,
"max_detections_per_image",
"i",
self._wrapped_model.max_detections_per_image,
)
check_set_pb_arg(
predict_net,
"bbox_reg_weights",
"floats",
[_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
)
self._encode_anchor_generator_cfg(predict_net)
def _encode_anchor_generator_cfg(self, predict_net):
# serialize anchor_generator for future use
serialized_anchor_generator = io.BytesIO()
torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
# Ideally we can put anchor generating inside the model, then we don't
# need to store this information.
bytes = serialized_anchor_generator.getvalue()
check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
@staticmethod
def get_outputs_converter(predict_net, init_net):
self = types.SimpleNamespace()
serialized_anchor_generator = io.BytesIO(
get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
)
self.anchor_generator = torch.load(serialized_anchor_generator)
bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None)
self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None)
self.max_detections_per_image = get_pb_arg_vali(
predict_net, "max_detections_per_image", None
)
# hack to reuse inference code from RetinaNet
for meth in [
"forward_inference",
"inference_single_image",
"_transpose_dense_predictions",
"_decode_multi_level_predictions",
"_decode_per_level_predictions",
]:
setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self))
def f(batched_inputs, c2_inputs, c2_results):
_, im_info = c2_inputs
image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
dummy_images = ImageList(
torch.randn(
(
len(im_info),
3,
)
+ tuple(image_sizes[0])
),
image_sizes,
)
num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
# For each feature level, feature should have the same batch size and
# spatial dimension as the box_cls and box_delta.
dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits]
# self.num_classess can be inferred
self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4)
results = self.forward_inference(
dummy_images, dummy_features, [pred_logits, pred_anchor_deltas]
)
return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
return f
META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
"GeneralizedRCNN": Caffe2GeneralizedRCNN,
"RetinaNet": Caffe2RetinaNet,
} | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/caffe2_modeling.py | caffe2_modeling.py |
import copy
import logging
import os
import torch
from caffe2.proto import caffe2_pb2
from torch import nn
from detectron2.config import CfgNode
from detectron2.utils.file_io import PathManager
from .caffe2_inference import ProtobufDetectionModel
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph
__all__ = [
"add_export_config",
"Caffe2Model",
"Caffe2Tracer",
]
def add_export_config(cfg):
return cfg
class Caffe2Tracer:
"""
Make a detectron2 model traceable with Caffe2 operators.
This class creates a traceable version of a detectron2 model which:
1. Rewrite parts of the model using ops in Caffe2. Note that some ops do
not have GPU implementation in Caffe2.
2. Remove post-processing and only produce raw layer outputs
After making a traceable model, the class provide methods to export such a
model to different deployment formats.
Exported graph produced by this class take two input tensors:
1. (1, C, H, W) float "data" which is an image (usually in [0, 255]).
(H, W) often has to be padded to multiple of 32 (depend on the model
architecture).
2. 1x3 float "im_info", each row of which is (height, width, 1.0).
Height and width are true image shapes before padding.
The class currently only supports models using builtin meta architectures.
Batch inference is not supported, and contributions are welcome.
"""
def __init__(self, cfg: CfgNode, model: nn.Module, inputs):
"""
Args:
cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model.
model (nn.Module): An original pytorch model. Must be among a few official models
in detectron2 that can be converted to become caffe2-compatible automatically.
Weights have to be already loaded to this model.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model. For most models, random inputs with
no detected objects will not work as they lead to wrong traces.
"""
assert isinstance(cfg, CfgNode), cfg
assert isinstance(model, torch.nn.Module), type(model)
# TODO make it support custom models, by passing in c2 model directly
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model))
self.inputs = inputs
self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs)
def export_caffe2(self):
"""
Export the model to Caffe2's protobuf format.
The returned object can be saved with its :meth:`.save_protobuf()` method.
The result can be loaded and executed using Caffe2 runtime.
Returns:
:class:`Caffe2Model`
"""
from .caffe2_export import export_caffe2_detection_model
predict_net, init_net = export_caffe2_detection_model(
self.traceable_model, self.traceable_inputs
)
return Caffe2Model(predict_net, init_net)
def export_onnx(self):
"""
Export the model to ONNX format.
Note that the exported model contains custom ops only available in caffe2, therefore it
cannot be directly executed by other runtime (such as onnxruntime or TensorRT).
Post-processing or transformation passes may be applied on the model to accommodate
different runtimes, but we currently do not provide support for them.
Returns:
onnx.ModelProto: an onnx model.
"""
from .caffe2_export import export_onnx_model as export_onnx_model_impl
return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,))
def export_torchscript(self):
"""
Export the model to a ``torch.jit.TracedModule`` by tracing.
The returned object can be saved to a file by ``.save()``.
Returns:
torch.jit.TracedModule: a torch TracedModule
"""
logger = logging.getLogger(__name__)
logger.info("Tracing the model with torch.jit.trace ...")
with torch.no_grad():
return torch.jit.trace(self.traceable_model, (self.traceable_inputs,))
class Caffe2Model(nn.Module):
"""
A wrapper around the traced model in Caffe2's protobuf format.
The exported graph has different inputs/outputs from the original Pytorch
model, as explained in :class:`Caffe2Tracer`. This class wraps around the
exported graph to simulate the same interface as the original Pytorch model.
It also provides functions to save/load models in Caffe2's format.'
Examples:
::
c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2()
inputs = [{"image": img_tensor_CHW}]
outputs = c2_model(inputs)
orig_outputs = torch_model(inputs)
"""
def __init__(self, predict_net, init_net):
super().__init__()
self.eval() # always in eval mode
self._predict_net = predict_net
self._init_net = init_net
self._predictor = None
__init__.__HIDE_SPHINX_DOC__ = True
@property
def predict_net(self):
"""
caffe2.core.Net: the underlying caffe2 predict net
"""
return self._predict_net
@property
def init_net(self):
"""
caffe2.core.Net: the underlying caffe2 init net
"""
return self._init_net
def save_protobuf(self, output_dir):
"""
Save the model as caffe2's protobuf format.
It saves the following files:
* "model.pb": definition of the graph. Can be visualized with
tools like `netron <https://github.com/lutzroeder/netron>`_.
* "model_init.pb": model parameters
* "model.pbtxt": human-readable definition of the graph. Not
needed for deployment.
Args:
output_dir (str): the output directory to save protobuf files.
"""
logger = logging.getLogger(__name__)
logger.info("Saving model to {} ...".format(output_dir))
if not PathManager.exists(output_dir):
PathManager.mkdirs(output_dir)
with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f:
f.write(self._predict_net.SerializeToString())
with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f:
f.write(str(self._predict_net))
with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f:
f.write(self._init_net.SerializeToString())
def save_graph(self, output_file, inputs=None):
"""
Save the graph as SVG format.
Args:
output_file (str): a SVG file
inputs: optional inputs given to the model.
If given, the inputs will be used to run the graph to record
shape of every tensor. The shape information will be
saved together with the graph.
"""
from .caffe2_export import run_and_save_graph
if inputs is None:
save_graph(self._predict_net, output_file, op_only=False)
else:
size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0)
device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii")
inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)
inputs = [x.cpu().numpy() for x in inputs]
run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)
@staticmethod
def load_protobuf(dir):
"""
Args:
dir (str): a directory used to save Caffe2Model with
:meth:`save_protobuf`.
The files "model.pb" and "model_init.pb" are needed.
Returns:
Caffe2Model: the caffe2 model loaded from this directory.
"""
predict_net = caffe2_pb2.NetDef()
with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f:
predict_net.ParseFromString(f.read())
init_net = caffe2_pb2.NetDef()
with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f:
init_net.ParseFromString(f.read())
return Caffe2Model(predict_net, init_net)
def __call__(self, inputs):
"""
An interface that wraps around a Caffe2 model and mimics detectron2's models'
input/output format. See details about the format at :doc:`/tutorials/models`.
This is used to compare the outputs of caffe2 model with its original torch model.
Due to the extra conversion between Pytorch/Caffe2, this method is not meant for
benchmark. Because of the conversion, this method also has dependency
on detectron2 in order to convert to detectron2's output format.
"""
if self._predictor is None:
self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)
return self._predictor(inputs) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/api.py | api.py |
import math
import torch
import torch.nn.functional as F
from detectron2.layers import cat
from detectron2.layers.roi_align_rotated import ROIAlignRotated
from detectron2.modeling import poolers
from detectron2.modeling.proposal_generator import rpn
from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference
from detectron2.structures import Boxes, ImageList, Instances, Keypoints
from .shared import alias, to_device
"""
This file contains caffe2-compatible implementation of several detectron2 components.
"""
class Caffe2Boxes(Boxes):
"""
Representing a list of detectron2.structures.Boxes from minibatch, each box
is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector
(batch index + 5 coordinates) for RotatedBoxes.
"""
def __init__(self, tensor):
assert isinstance(tensor, torch.Tensor)
assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size()
# TODO: make tensor immutable when dim is Nx5 for Boxes,
# and Nx6 for RotatedBoxes?
self.tensor = tensor
# TODO clean up this class, maybe just extend Instances
class InstancesList(object):
"""
Tensor representation of a list of Instances object for a batch of images.
When dealing with a batch of images with Caffe2 ops, a list of bboxes
(instances) are usually represented by single Tensor with size
(sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is
for providing common functions to convert between these two representations.
"""
def __init__(self, im_info, indices, extra_fields=None):
# [N, 3] -> (H, W, Scale)
self.im_info = im_info
# [N,] -> indice of batch to which the instance belongs
self.indices = indices
# [N, ...]
self.batch_extra_fields = extra_fields or {}
self.image_size = self.im_info
def get_fields(self):
"""like `get_fields` in the Instances object,
but return each field in tensor representations"""
ret = {}
for k, v in self.batch_extra_fields.items():
# if isinstance(v, torch.Tensor):
# tensor_rep = v
# elif isinstance(v, (Boxes, Keypoints)):
# tensor_rep = v.tensor
# else:
# raise ValueError("Can't find tensor representation for: {}".format())
ret[k] = v
return ret
def has(self, name):
return name in self.batch_extra_fields
def set(self, name, value):
data_len = len(value)
if len(self.batch_extra_fields):
assert (
len(self) == data_len
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
self.batch_extra_fields[name] = value
def __setattr__(self, name, val):
if name in ["im_info", "indices", "batch_extra_fields", "image_size"]:
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name):
if name not in self.batch_extra_fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self.batch_extra_fields[name]
def __len__(self):
return len(self.indices)
def flatten(self):
ret = []
for _, v in self.batch_extra_fields.items():
if isinstance(v, (Boxes, Keypoints)):
ret.append(v.tensor)
else:
ret.append(v)
return ret
@staticmethod
def to_d2_instances_list(instances_list):
"""
Convert InstancesList to List[Instances]. The input `instances_list` can
also be a List[Instances], in this case this method is a non-op.
"""
if not isinstance(instances_list, InstancesList):
assert all(isinstance(x, Instances) for x in instances_list)
return instances_list
ret = []
for i, info in enumerate(instances_list.im_info):
instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())]))
ids = instances_list.indices == i
for k, v in instances_list.batch_extra_fields.items():
if isinstance(v, torch.Tensor):
instances.set(k, v[ids])
continue
elif isinstance(v, Boxes):
instances.set(k, v[ids, -4:])
continue
target_type, tensor_source = v
assert isinstance(tensor_source, torch.Tensor)
assert tensor_source.shape[0] == instances_list.indices.shape[0]
tensor_source = tensor_source[ids]
if issubclass(target_type, Boxes):
instances.set(k, Boxes(tensor_source[:, -4:]))
elif issubclass(target_type, Keypoints):
instances.set(k, Keypoints(tensor_source))
elif issubclass(target_type, torch.Tensor):
instances.set(k, tensor_source)
else:
raise ValueError("Can't handle targe type: {}".format(target_type))
ret.append(instances)
return ret
class Caffe2Compatible(object):
"""
A model can inherit this class to indicate that it can be traced and deployed with caffe2.
"""
def _get_tensor_mode(self):
return self._tensor_mode
def _set_tensor_mode(self, v):
self._tensor_mode = v
tensor_mode = property(_get_tensor_mode, _set_tensor_mode)
"""
If true, the model expects C2-style tensor only inputs/outputs format.
"""
class Caffe2RPN(Caffe2Compatible, rpn.RPN):
def _generate_proposals(
self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None
):
assert isinstance(images, ImageList)
if self.tensor_mode:
im_info = images.image_sizes
else:
im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(
images.tensor.device
)
assert isinstance(im_info, torch.Tensor)
rpn_rois_list = []
rpn_roi_probs_list = []
for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(
objectness_logits_pred,
anchor_deltas_pred,
iter(self.anchor_generator.cell_anchors),
self.anchor_generator.strides,
):
scores = scores.detach()
bbox_deltas = bbox_deltas.detach()
rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(
scores,
bbox_deltas,
im_info,
cell_anchors_tensor,
spatial_scale=1.0 / feat_stride,
pre_nms_topN=self.pre_nms_topk[self.training],
post_nms_topN=self.post_nms_topk[self.training],
nms_thresh=self.nms_thresh,
min_size=self.min_box_size,
# correct_transform_coords=True, # deprecated argument
angle_bound_on=True, # Default
angle_bound_lo=-180,
angle_bound_hi=180,
clip_angle_thresh=1.0, # Default
legacy_plus_one=False,
)
rpn_rois_list.append(rpn_rois)
rpn_roi_probs_list.append(rpn_roi_probs)
# For FPN in D2, in RPN all proposals from different levels are concated
# together, ranked and picked by top post_nms_topk. Then in ROIPooler
# it calculates level_assignments and calls the RoIAlign from
# the corresponding level.
if len(objectness_logits_pred) == 1:
rpn_rois = rpn_rois_list[0]
rpn_roi_probs = rpn_roi_probs_list[0]
else:
assert len(rpn_rois_list) == len(rpn_roi_probs_list)
rpn_post_nms_topN = self.post_nms_topk[self.training]
device = rpn_rois_list[0].device
input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)]
# TODO remove this after confirming rpn_max_level/rpn_min_level
# is not needed in CollectRpnProposals.
feature_strides = list(self.anchor_generator.strides)
rpn_min_level = int(math.log2(feature_strides[0]))
rpn_max_level = int(math.log2(feature_strides[-1]))
assert (rpn_max_level - rpn_min_level + 1) == len(
rpn_rois_list
), "CollectRpnProposals requires continuous levels"
rpn_rois = torch.ops._caffe2.CollectRpnProposals(
input_list,
# NOTE: in current implementation, rpn_max_level and rpn_min_level
# are not needed, only the subtraction of two matters and it
# can be infer from the number of inputs. Keep them now for
# consistency.
rpn_max_level=2 + len(rpn_rois_list) - 1,
rpn_min_level=2,
rpn_post_nms_topN=rpn_post_nms_topN,
)
rpn_rois = to_device(rpn_rois, device)
rpn_roi_probs = []
proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)
return proposals, {}
def forward(self, images, features, gt_instances=None):
assert not self.training
features = [features[f] for f in self.in_features]
objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)
return self._generate_proposals(
images,
objectness_logits_pred,
anchor_deltas_pred,
gt_instances,
)
@staticmethod
def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):
proposals = InstancesList(
im_info=im_info,
indices=rpn_rois[:, 0],
extra_fields={
"proposal_boxes": Caffe2Boxes(rpn_rois),
"objectness_logits": (torch.Tensor, rpn_roi_probs),
},
)
if not tensor_mode:
proposals = InstancesList.to_d2_instances_list(proposals)
else:
proposals = [proposals]
return proposals
class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):
@staticmethod
def c2_preprocess(box_lists):
assert all(isinstance(x, Boxes) for x in box_lists)
if all(isinstance(x, Caffe2Boxes) for x in box_lists):
# input is pure-tensor based
assert len(box_lists) == 1
pooler_fmt_boxes = box_lists[0].tensor
else:
pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)
return pooler_fmt_boxes
def forward(self, x, box_lists):
assert not self.training
pooler_fmt_boxes = self.c2_preprocess(box_lists)
num_level_assignments = len(self.level_poolers)
if num_level_assignments == 1:
if isinstance(self.level_poolers[0], ROIAlignRotated):
c2_roi_align = torch.ops._caffe2.RoIAlignRotated
aligned = True
else:
c2_roi_align = torch.ops._caffe2.RoIAlign
aligned = self.level_poolers[0].aligned
x0 = x[0]
if x0.is_quantized:
x0 = x0.dequantize()
out = c2_roi_align(
x0,
pooler_fmt_boxes,
order="NCHW",
spatial_scale=float(self.level_poolers[0].spatial_scale),
pooled_h=int(self.output_size[0]),
pooled_w=int(self.output_size[1]),
sampling_ratio=int(self.level_poolers[0].sampling_ratio),
aligned=aligned,
)
return out
device = pooler_fmt_boxes.device
assert (
self.max_level - self.min_level + 1 == 4
), "Currently DistributeFpnProposals only support 4 levels"
fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
to_device(pooler_fmt_boxes, "cpu"),
roi_canonical_scale=self.canonical_box_size,
roi_canonical_level=self.canonical_level,
roi_max_level=self.max_level,
roi_min_level=self.min_level,
legacy_plus_one=False,
)
fpn_outputs = [to_device(x, device) for x in fpn_outputs]
rois_fpn_list = fpn_outputs[:-1]
rois_idx_restore_int32 = fpn_outputs[-1]
roi_feat_fpn_list = []
for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):
if isinstance(pooler, ROIAlignRotated):
c2_roi_align = torch.ops._caffe2.RoIAlignRotated
aligned = True
else:
c2_roi_align = torch.ops._caffe2.RoIAlign
aligned = bool(pooler.aligned)
if x_level.is_quantized:
x_level = x_level.dequantize()
roi_feat_fpn = c2_roi_align(
x_level,
roi_fpn,
order="NCHW",
spatial_scale=float(pooler.spatial_scale),
pooled_h=int(self.output_size[0]),
pooled_w=int(self.output_size[1]),
sampling_ratio=int(pooler.sampling_ratio),
aligned=aligned,
)
roi_feat_fpn_list.append(roi_feat_fpn)
roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)
assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (
"Caffe2 export requires tracing with a model checkpoint + input that can produce valid"
" detections. But no detections were obtained with the given checkpoint and input!"
)
roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)
return roi_feat
class Caffe2FastRCNNOutputsInference:
def __init__(self, tensor_mode):
self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode
def __call__(self, box_predictor, predictions, proposals):
"""equivalent to FastRCNNOutputLayers.inference"""
num_classes = box_predictor.num_classes
score_thresh = box_predictor.test_score_thresh
nms_thresh = box_predictor.test_nms_thresh
topk_per_image = box_predictor.test_topk_per_image
is_rotated = len(box_predictor.box2box_transform.weights) == 5
if is_rotated:
box_dim = 5
assert box_predictor.box2box_transform.weights[4] == 1, (
"The weights for Rotated BBoxTransform in C2 have only 4 dimensions,"
+ " thus enforcing the angle weight to be 1 for now"
)
box2box_transform_weights = box_predictor.box2box_transform.weights[:4]
else:
box_dim = 4
box2box_transform_weights = box_predictor.box2box_transform.weights
class_logits, box_regression = predictions
if num_classes + 1 == class_logits.shape[1]:
class_prob = F.softmax(class_logits, -1)
else:
assert num_classes == class_logits.shape[1]
class_prob = F.sigmoid(class_logits)
# BoxWithNMSLimit will infer num_classes from the shape of the class_prob
# So append a zero column as placeholder for the background class
class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)
assert box_regression.shape[1] % box_dim == 0
cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1
input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1
rois = type(proposals[0].proposal_boxes).cat([p.proposal_boxes for p in proposals])
device, dtype = rois.tensor.device, rois.tensor.dtype
if input_tensor_mode:
im_info = proposals[0].image_size
rois = rois.tensor
else:
im_info = torch.tensor(
[[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]
)
batch_ids = cat(
[
torch.full((b, 1), i, dtype=dtype, device=device)
for i, b in enumerate(len(p) for p in proposals)
],
dim=0,
)
rois = torch.cat([batch_ids, rois.tensor], dim=1)
roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(
to_device(rois, "cpu"),
to_device(box_regression, "cpu"),
to_device(im_info, "cpu"),
weights=box2box_transform_weights,
apply_scale=True,
rotated=is_rotated,
angle_bound_on=True,
angle_bound_lo=-180,
angle_bound_hi=180,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_pred_bbox = to_device(roi_pred_bbox, device)
roi_batch_splits = to_device(roi_batch_splits, device)
nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
to_device(class_prob, "cpu"),
to_device(roi_pred_bbox, "cpu"),
to_device(roi_batch_splits, "cpu"),
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(topk_per_image),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=is_rotated,
cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,
input_boxes_include_bg_cls=False,
output_classes_include_bg_cls=False,
legacy_plus_one=False,
)
roi_score_nms = to_device(nms_outputs[0], device)
roi_bbox_nms = to_device(nms_outputs[1], device)
roi_class_nms = to_device(nms_outputs[2], device)
roi_batch_splits_nms = to_device(nms_outputs[3], device)
roi_keeps_nms = to_device(nms_outputs[4], device)
roi_keeps_size_nms = to_device(nms_outputs[5], device)
if not self.tensor_mode:
roi_class_nms = roi_class_nms.to(torch.int64)
roi_batch_ids = cat(
[
torch.full((b, 1), i, dtype=dtype, device=device)
for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)
],
dim=0,
)
roi_class_nms = alias(roi_class_nms, "class_nms")
roi_score_nms = alias(roi_score_nms, "score_nms")
roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms")
roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms")
roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms")
roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms")
results = InstancesList(
im_info=im_info,
indices=roi_batch_ids[:, 0],
extra_fields={
"pred_boxes": Caffe2Boxes(roi_bbox_nms),
"scores": roi_score_nms,
"pred_classes": roi_class_nms,
},
)
if not self.tensor_mode:
results = InstancesList.to_d2_instances_list(results)
batch_splits = roi_batch_splits_nms.int().tolist()
kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))
else:
results = [results]
kept_indices = [roi_keeps_nms]
return results, kept_indices
class Caffe2MaskRCNNInference:
def __call__(self, pred_mask_logits, pred_instances):
"""equivalent to mask_head.mask_rcnn_inference"""
if all(isinstance(x, InstancesList) for x in pred_instances):
assert len(pred_instances) == 1
mask_probs_pred = pred_mask_logits.sigmoid()
mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs")
pred_instances[0].pred_masks = mask_probs_pred
else:
mask_rcnn_inference(pred_mask_logits, pred_instances)
class Caffe2KeypointRCNNInference:
def __init__(self, use_heatmap_max_keypoint):
self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
def __call__(self, pred_keypoint_logits, pred_instances):
# just return the keypoint heatmap for now,
# there will be option to call HeatmapMaxKeypointOp
output = alias(pred_keypoint_logits, "kps_score")
if all(isinstance(x, InstancesList) for x in pred_instances):
assert len(pred_instances) == 1
if self.use_heatmap_max_keypoint:
device = output.device
output = torch.ops._caffe2.HeatmapMaxKeypoint(
to_device(output, "cpu"),
pred_instances[0].pred_boxes.tensor,
should_output_softmax=True, # worth make it configerable?
)
output = to_device(output, device)
output = alias(output, "keypoints_out")
pred_instances[0].pred_keypoints = output
return pred_keypoint_logits | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/c10.py | c10.py |
import contextlib
from unittest import mock
import torch
from detectron2.modeling import poolers
from detectron2.modeling.proposal_generator import rpn
from detectron2.modeling.roi_heads import keypoint_head, mask_head
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from .c10 import (
Caffe2Compatible,
Caffe2FastRCNNOutputsInference,
Caffe2KeypointRCNNInference,
Caffe2MaskRCNNInference,
Caffe2ROIPooler,
Caffe2RPN,
)
class GenericMixin(object):
pass
class Caffe2CompatibleConverter(object):
"""
A GenericUpdater which implements the `create_from` interface, by modifying
module object and assign it with another class replaceCls.
"""
def __init__(self, replaceCls):
self.replaceCls = replaceCls
def create_from(self, module):
# update module's class to the new class
assert isinstance(module, torch.nn.Module)
if issubclass(self.replaceCls, GenericMixin):
# replaceCls should act as mixin, create a new class on-the-fly
new_class = type(
"{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__),
(self.replaceCls, module.__class__),
{}, # {"new_method": lambda self: ...},
)
module.__class__ = new_class
else:
# replaceCls is complete class, this allow arbitrary class swap
module.__class__ = self.replaceCls
# initialize Caffe2Compatible
if isinstance(module, Caffe2Compatible):
module.tensor_mode = False
return module
def patch(model, target, updater, *args, **kwargs):
"""
recursively (post-order) update all modules with the target type and its
subclasses, make a initialization/composition/inheritance/... via the
updater.create_from.
"""
for name, module in model.named_children():
model._modules[name] = patch(module, target, updater, *args, **kwargs)
if isinstance(model, target):
return updater.create_from(model, *args, **kwargs)
return model
def patch_generalized_rcnn(model):
ccc = Caffe2CompatibleConverter
model = patch(model, rpn.RPN, ccc(Caffe2RPN))
model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler))
return model
@contextlib.contextmanager
def mock_fastrcnn_outputs_inference(
tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers
):
with mock.patch.object(
box_predictor_type,
"inference",
autospec=True,
side_effect=Caffe2FastRCNNOutputsInference(tensor_mode),
) as mocked_func:
yield
if check:
assert mocked_func.call_count > 0
@contextlib.contextmanager
def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True):
with mock.patch(
"{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference()
) as mocked_func:
yield
if check:
assert mocked_func.call_count > 0
@contextlib.contextmanager
def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True):
with mock.patch(
"{}.keypoint_rcnn_inference".format(patched_module),
side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
) as mocked_func:
yield
if check:
assert mocked_func.call_count > 0
class ROIHeadsPatcher:
def __init__(self, heads, use_heatmap_max_keypoint):
self.heads = heads
self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
@contextlib.contextmanager
def mock_roi_heads(self, tensor_mode=True):
"""
Patching several inference functions inside ROIHeads and its subclasses
Args:
tensor_mode (bool): whether the inputs/outputs are caffe2's tensor
format or not. Default to True.
"""
# NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference`
# are called inside the same file as BaseXxxHead due to using mock.patch.
kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__
mask_head_mod = mask_head.BaseMaskRCNNHead.__module__
mock_ctx_managers = [
mock_fastrcnn_outputs_inference(
tensor_mode=tensor_mode,
check=True,
box_predictor_type=type(self.heads.box_predictor),
)
]
if getattr(self.heads, "keypoint_on", False):
mock_ctx_managers += [
mock_keypoint_rcnn_inference(
tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint
)
]
if getattr(self.heads, "mask_on", False):
mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)]
with contextlib.ExitStack() as stack: # python 3.3+
for mgr in mock_ctx_managers:
stack.enter_context(mgr)
yield | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/caffe2_patch.py | caffe2_patch.py |
import collections
from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple
import torch
from torch import nn
from detectron2.structures import Boxes, Instances, ROIMasks
from detectron2.utils.registry import _convert_target_to_string, locate
from .torchscript_patch import patch_builtin_len
@dataclass
class Schema:
"""
A Schema defines how to flatten a possibly hierarchical object into tuple of
primitive objects, so it can be used as inputs/outputs of PyTorch's tracing.
PyTorch does not support tracing a function that produces rich output
structures (e.g. dict, Instances, Boxes). To trace such a function, we
flatten the rich object into tuple of tensors, and return this tuple of tensors
instead. Meanwhile, we also need to know how to "rebuild" the original object
from the flattened results, so we can evaluate the flattened results.
A Schema defines how to flatten an object, and while flattening it, it records
necessary schemas so that the object can be rebuilt using the flattened outputs.
The flattened object and the schema object is returned by ``.flatten`` classmethod.
Then the original object can be rebuilt with the ``__call__`` method of schema.
A Schema is a dataclass that can be serialized easily.
"""
# inspired by FetchMapper in tensorflow/python/client/session.py
@classmethod
def flatten(cls, obj):
raise NotImplementedError
def __call__(self, values):
raise NotImplementedError
@staticmethod
def _concat(values):
ret = ()
sizes = []
for v in values:
assert isinstance(v, tuple), "Flattened results must be a tuple"
ret = ret + v
sizes.append(len(v))
return ret, sizes
@staticmethod
def _split(values, sizes):
if len(sizes):
expected_len = sum(sizes)
assert (
len(values) == expected_len
), f"Values has length {len(values)} but expect length {expected_len}."
ret = []
for k in range(len(sizes)):
begin, end = sum(sizes[:k]), sum(sizes[: k + 1])
ret.append(values[begin:end])
return ret
@dataclass
class ListSchema(Schema):
schemas: List[Schema] # the schemas that define how to flatten each element in the list
sizes: List[int] # the flattened length of each element
def __call__(self, values):
values = self._split(values, self.sizes)
if len(values) != len(self.schemas):
raise ValueError(
f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!"
)
values = [m(v) for m, v in zip(self.schemas, values)]
return list(values)
@classmethod
def flatten(cls, obj):
res = [flatten_to_tuple(k) for k in obj]
values, sizes = cls._concat([k[0] for k in res])
return values, cls([k[1] for k in res], sizes)
@dataclass
class TupleSchema(ListSchema):
def __call__(self, values):
return tuple(super().__call__(values))
@dataclass
class IdentitySchema(Schema):
def __call__(self, values):
return values[0]
@classmethod
def flatten(cls, obj):
return (obj,), cls()
@dataclass
class DictSchema(ListSchema):
keys: List[str]
def __call__(self, values):
values = super().__call__(values)
return dict(zip(self.keys, values))
@classmethod
def flatten(cls, obj):
for k in obj.keys():
if not isinstance(k, str):
raise KeyError("Only support flattening dictionaries if keys are str.")
keys = sorted(obj.keys())
values = [obj[k] for k in keys]
ret, schema = ListSchema.flatten(values)
return ret, cls(schema.schemas, schema.sizes, keys)
@dataclass
class InstancesSchema(DictSchema):
def __call__(self, values):
image_size, fields = values[-1], values[:-1]
fields = super().__call__(fields)
return Instances(image_size, **fields)
@classmethod
def flatten(cls, obj):
ret, schema = super().flatten(obj.get_fields())
size = obj.image_size
if not isinstance(size, torch.Tensor):
size = torch.tensor(size)
return ret + (size,), schema
@dataclass
class TensorWrapSchema(Schema):
"""
For classes that are simple wrapper of tensors, e.g.
Boxes, RotatedBoxes, BitMasks
"""
class_name: str
def __call__(self, values):
return locate(self.class_name)(values[0])
@classmethod
def flatten(cls, obj):
return (obj.tensor,), cls(_convert_target_to_string(type(obj)))
# if more custom structures needed in the future, can allow
# passing in extra schemas for custom types
def flatten_to_tuple(obj):
"""
Flatten an object so it can be used for PyTorch tracing.
Also returns how to rebuild the original object from the flattened outputs.
Returns:
res (tuple): the flattened results that can be used as tracing outputs
schema: an object with a ``__call__`` method such that ``schema(res) == obj``.
It is a pure dataclass that can be serialized.
"""
schemas = [
((str, bytes), IdentitySchema),
(list, ListSchema),
(tuple, TupleSchema),
(collections.abc.Mapping, DictSchema),
(Instances, InstancesSchema),
((Boxes, ROIMasks), TensorWrapSchema),
]
for klass, schema in schemas:
if isinstance(obj, klass):
F = schema
break
else:
F = IdentitySchema
return F.flatten(obj)
class TracingAdapter(nn.Module):
"""
A model may take rich input/output format (e.g. dict or custom classes),
but `torch.jit.trace` requires tuple of tensors as input/output.
This adapter flattens input/output format of a model so it becomes traceable.
It also records the necessary schema to rebuild model's inputs/outputs from flattened
inputs/outputs.
Example:
::
outputs = model(inputs) # inputs/outputs may be rich structure
adapter = TracingAdapter(model, inputs)
# can now trace the model, with adapter.flattened_inputs, or another
# tuple of tensors with the same length and meaning
traced = torch.jit.trace(adapter, adapter.flattened_inputs)
# traced model can only produce flattened outputs (tuple of tensors)
flattened_outputs = traced(*adapter.flattened_inputs)
# adapter knows the schema to convert it back (new_outputs == outputs)
new_outputs = adapter.outputs_schema(flattened_outputs)
"""
flattened_inputs: Tuple[torch.Tensor] = None
"""
Flattened version of inputs given to this class's constructor.
"""
inputs_schema: Schema = None
"""
Schema of the inputs given to this class's constructor.
"""
outputs_schema: Schema = None
"""
Schema of the output produced by calling the given model with inputs.
"""
def __init__(
self,
model: nn.Module,
inputs,
inference_func: Optional[Callable] = None,
allow_non_tensor: bool = False,
):
"""
Args:
model: an nn.Module
inputs: An input argument or a tuple of input arguments used to call model.
After flattening, it has to only consist of tensors.
inference_func: a callable that takes (model, *inputs), calls the
model with inputs, and return outputs. By default it
is ``lambda model, *inputs: model(*inputs)``. Can be override
if you need to call the model differently.
allow_non_tensor: allow inputs/outputs to contain non-tensor objects.
This option will filter out non-tensor objects to make the
model traceable, but ``inputs_schema``/``outputs_schema`` cannot be
used anymore because inputs/outputs cannot be rebuilt from pure tensors.
This is useful when you're only interested in the single trace of
execution (e.g. for flop count), but not interested in
generalizing the traced graph to new inputs.
"""
super().__init__()
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
self.model = model
if not isinstance(inputs, tuple):
inputs = (inputs,)
self.inputs = inputs
self.allow_non_tensor = allow_non_tensor
if inference_func is None:
inference_func = lambda model, *inputs: model(*inputs) # noqa
self.inference_func = inference_func
self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs)
if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs):
return
if self.allow_non_tensor:
self.flattened_inputs = tuple(
[x for x in self.flattened_inputs if isinstance(x, torch.Tensor)]
)
self.inputs_schema = None
else:
for input in self.flattened_inputs:
if not isinstance(input, torch.Tensor):
raise ValueError(
"Inputs for tracing must only contain tensors. "
f"Got a {type(input)} instead."
)
def forward(self, *args: torch.Tensor):
with torch.no_grad(), patch_builtin_len():
if self.inputs_schema is not None:
inputs_orig_format = self.inputs_schema(args)
else:
if len(args) != len(self.flattened_inputs) or any(
x is not y for x, y in zip(args, self.flattened_inputs)
):
raise ValueError(
"TracingAdapter does not contain valid inputs_schema."
" So it cannot generalize to other inputs and must be"
" traced with `.flattened_inputs`."
)
inputs_orig_format = self.inputs
outputs = self.inference_func(self.model, *inputs_orig_format)
flattened_outputs, schema = flatten_to_tuple(outputs)
flattened_output_tensors = tuple(
[x for x in flattened_outputs if isinstance(x, torch.Tensor)]
)
if len(flattened_output_tensors) < len(flattened_outputs):
if self.allow_non_tensor:
flattened_outputs = flattened_output_tensors
self.outputs_schema = None
else:
raise ValueError(
"Model cannot be traced because some model outputs "
"cannot flatten to tensors."
)
else: # schema is valid
if self.outputs_schema is None:
self.outputs_schema = schema
else:
assert self.outputs_schema == schema, (
"Model should always return outputs with the same "
"structure so it can be traced!"
)
return flattened_outputs
def _create_wrapper(self, traced_model):
"""
Return a function that has an input/output interface the same as the
original model, but it calls the given traced model under the hood.
"""
def forward(*args):
flattened_inputs, _ = flatten_to_tuple(args)
flattened_outputs = traced_model(*flattened_inputs)
return self.outputs_schema(flattened_outputs)
return forward | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/export/flatten.py | flatten.py |
import copy
import itertools
import numpy as np
from typing import Any, Iterator, List, Union
#import pycocotools.mask as mask_util
import torch
from torch import device
from detectron2.layers.roi_align import ROIAlign
from detectron2.utils.memory import retry_if_cuda_oom
from .boxes import Boxes
def polygon_area(x, y):
# Using the shoelace formula
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
'''def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
"""
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height, width (int)
Returns:
ndarray: a bool mask of shape (height, width)
"""
if len(polygons) == 0:
# COCOAPI does not support empty polygons
return np.zeros((height, width)).astype(np.bool)
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool)'''
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
# max() to avoid division by small number
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks:
"""
This class stores the segmentation masks for all objects in one image, in
the form of bitmaps.
Attributes:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
"""
Args:
tensor: bool Tensor of N,H,W, representing N instances in the image.
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
"""
Returns:
BitMasks: Create a new :class:`BitMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
2. `new_masks = masks[2:10]`: return a slice of masks.
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BitMasks(self.tensor[item].unsqueeze(0))
m = self.tensor[item]
assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
item, m.shape
)
return BitMasks(m)
@torch.jit.unused
def __iter__(self) -> torch.Tensor:
yield from self.tensor
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each mask is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
) -> "BitMasks":
"""
Args:
polygon_masks (list[list[ndarray]] or PolygonMasks)
height, width (int)
"""
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
if len(masks):
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
else:
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
@staticmethod
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
"""
Args:
roi_masks:
height, width (int):
"""
return roi_masks.to_bitmasks(height, width)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around bitmasks.
If a mask is empty, it's bounding box will be all zero.
"""
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
x_any = torch.any(self.tensor, dim=1)
y_any = torch.any(self.tensor, dim=2)
for idx in range(self.tensor.shape[0]):
x = torch.where(x_any[idx, :])[0]
y = torch.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
boxes[idx, :] = torch.as_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
)
return Boxes(boxes)
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
"""
Concatenates a list of BitMasks into a single BitMasks
Arguments:
bitmasks_list (list[BitMasks])
Returns:
BitMasks: the concatenated BitMasks
"""
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks:
"""
This class stores the segmentation masks for all objects in one image, in the form of polygons.
Attributes:
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
"""
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
"""
Arguments:
polygons (list[list[np.ndarray]]): The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
instance, and the third level to the polygon coordinates.
The third level array should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
"""
if not isinstance(polygons, list):
raise ValueError(
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
# Use float64 for higher precision, because why not?
# Always put polygons on CPU (self.to is a no-op) since they
# are supposed to be small tensors.
# May need to change this assumption if GPU placement becomes useful
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
if not isinstance(polygons_per_instance, list):
raise ValueError(
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
# transform each polygon to a numpy array
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
if len(polygon) % 2 != 0 or len(polygon) < 6:
raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
"""
Returns:
Boxes: tight bounding boxes around polygon masks.
"""
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
"""
Find masks that are non-empty.
Returns:
Tensor:
a BoolTensor which represents whether each mask is empty (False) or not (True).
"""
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
"""
Support indexing over the instances and return a `PolygonMasks` object.
`item` can be:
1. An integer. It will return an object with only one instance.
2. A slice. It will return an object with the selected instances.
3. A list[int]. It will return an object with the selected instances,
correpsonding to the indices in the list.
4. A vector mask of type BoolTensor, whose length is num_instances.
It will return an object with the instances whose mask is nonzero.
"""
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
# Polygons is a list, so we have to move the indices back to CPU.
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
"""
Yields:
list[ndarray]: the polygons for one instance.
Each Tensor is a float64 vector representing a polygon.
"""
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
"""
Crop each mask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
# (several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
"""
poly: list[list[float]], the polygons for one instance
box: a tensor of shape (4,)
"""
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
"""
Computes area of the mask.
Only works with Polygons, using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Returns:
Tensor: a vector, area for each instance
"""
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
"""
Concatenates a list of PolygonMasks into a single PolygonMasks
Arguments:
polymasks_list (list[PolygonMasks])
Returns:
PolygonMasks: the concatenated PolygonMasks
"""
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
class ROIMasks:
"""
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
full-image bitmask can be obtained by "pasting" the mask on the region defined
by the corresponding ROI box.
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
"""
if tensor.dim() != 3:
raise ValueError("ROIMasks must take a masks of 3 dimension.")
self.tensor = tensor
def to(self, device: torch.device) -> "ROIMasks":
return ROIMasks(self.tensor.to(device))
@property
def device(self) -> device:
return self.tensor.device
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, item) -> "ROIMasks":
"""
Returns:
ROIMasks: Create a new :class:`ROIMasks` by indexing.
The following usage are allowed:
1. `new_masks = masks[2:10]`: return a slice of masks.
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
with `length = len(masks)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
t = self.tensor[item]
if t.dim() != 3:
raise ValueError(
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
)
return ROIMasks(t)
@torch.jit.unused
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@torch.jit.unused
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
"""
Args: see documentation of :func:`paste_masks_in_image`.
"""
from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
if torch.jit.is_tracing():
if isinstance(height, torch.Tensor):
paste_func = _paste_masks_tensor_shape
else:
paste_func = paste_masks_in_image
else:
paste_func = retry_if_cuda_oom(paste_masks_in_image)
bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
return BitMasks(bitmasks) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/structures/masks.py | masks.py |
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
.. code-block:: python
category_3_detections = instances[instances.pred_classes == 3]
confident_detections = instances[instances.scores > 0.9]
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def set(self, name: str, value: Any) -> None:
"""
Set the field named `name` to `value`.
The length of `value` must be the number of instances,
and must agree with other existing fields in this object.
"""
data_len = len(value)
if len(self._fields):
assert (
len(self) == data_len
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
self._fields[name] = value
def has(self, name: str) -> bool:
"""
Returns:
bool: whether the field called `name` exists.
"""
return name in self._fields
def remove(self, name: str) -> None:
"""
Remove the field called `name`.
"""
del self._fields[name]
def get(self, name: str) -> Any:
"""
Returns the field called `name`.
"""
return self._fields[name]
def get_fields(self) -> Dict[str, Any]:
"""
Returns:
dict: a dict which maps names (str) to data of the fields
Modifying the returned dict will modify this instance.
"""
return self._fields
# Tensor-like methods
def to(self, *args: Any, **kwargs: Any) -> "Instances":
"""
Returns:
Instances: all fields are called with a `to(device)`, if the field has this method.
"""
ret = Instances(self._image_size)
for k, v in self._fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
# use __len__ because len() has to be int and is not friendly to tracing
return v.__len__()
raise NotImplementedError("Empty Instances does not support __len__!")
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def cat(instance_lists: List["Instances"]) -> "Instances":
"""
Args:
instance_lists (list[Instances])
Returns:
Instances
"""
assert all(isinstance(i, Instances) for i in instance_lists)
assert len(instance_lists) > 0
if len(instance_lists) == 1:
return instance_lists[0]
image_size = instance_lists[0].image_size
if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing
for i in instance_lists[1:]:
assert i.image_size == image_size
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), "cat"):
values = type(v0).cat(values)
else:
raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self))
s += "image_height={}, ".format(self._image_size[0])
s += "image_width={}, ".format(self._image_size[1])
s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__ | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/structures/instances.py | instances.py |
from __future__ import division
from typing import Any, List, Tuple
import torch
from torch import device
from torch.nn import functional as F
from detectron2.layers.wrappers import shapes_to_tensor
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size.
The original sizes of each image is stored in `image_sizes`.
Attributes:
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
During tracing, it becomes list[Tensor] instead.
"""
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
"""
Arguments:
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
be smaller than (H, W) due to padding.
"""
self.tensor = tensor
self.image_sizes = image_sizes
def __len__(self) -> int:
return len(self.image_sizes)
def __getitem__(self, idx) -> torch.Tensor:
"""
Access the individual image in its original size.
Args:
idx: int or slice
Returns:
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
"""
size = self.image_sizes[idx]
return self.tensor[idx, ..., : size[0], : size[1]]
@torch.jit.unused
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
cast_tensor = self.tensor.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
@property
def device(self) -> device:
return self.tensor.device
@staticmethod
def from_tensors(
tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
) -> "ImageList":
"""
Args:
tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
to the same shape with `pad_value`.
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
the common height and width is divisible by `size_divisibility`.
This depends on the model and many models need a divisibility of 32.
pad_value (float): value to pad
Returns:
an `ImageList`.
"""
assert len(tensors) > 0
assert isinstance(tensors, (tuple, list))
for t in tensors:
assert isinstance(t, torch.Tensor), type(t)
assert t.shape[:-2] == tensors[0].shape[:-2], t.shape
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]
max_size = torch.stack(image_sizes_tensor).max(0).values
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride
# handle weirdness of scripting and tracing ...
if torch.jit.is_scripting():
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
else:
if torch.jit.is_tracing():
image_sizes = image_sizes_tensor
if len(tensors) == 1:
# This seems slightly (2%) faster.
# TODO: check whether it's faster for multiple images as well
image_size = image_sizes[0]
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
else:
# max_size can be a tensor in tracing mode, therefore convert to list
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
for img, pad_img in zip(tensors, batched_imgs):
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
return ImageList(batched_imgs.contiguous(), image_sizes) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/structures/image_list.py | image_list.py |
import math
from typing import List, Tuple
import torch
from detectron2.layers.rotated_boxes import pairwise_iou_rotated
from .boxes import Boxes
class RotatedBoxes(Boxes):
"""
This structure stores a list of rotated boxes as a Nx5 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx5 matrix. Each row is
(x_center, y_center, width, height, angle),
in which angle is represented in degrees.
While there's no strict range restriction for it,
the recommended principal range is between [-180, 180) degrees.
Assume we have a horizontal box B = (x_center, y_center, width, height),
where width is along the x-axis and height is along the y-axis.
The rotated box B_rot (x_center, y_center, width, height, angle)
can be seen as:
1. When angle == 0:
B_rot == B
2. When angle > 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;
3. When angle < 0:
B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.
Mathematically, since the right-handed coordinate system for image space
is (y, x), where y is top->down and x is left->right, the 4 vertices of the
rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from
the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)
in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians,
:math:`(y_c, x_c)` is the center of the rectangle):
.. math::
yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c,
xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c,
which is the standard rigid-body rotation transformation.
Intuitively, the angle is
(1) the rotation angle from y-axis in image space
to the height vector (top->down in the box's local coordinate system)
of the box in CCW, and
(2) the rotation angle from x-axis in image space
to the width vector (left->right in the box's local coordinate system)
of the box in CCW.
More intuitively, consider the following horizontal box ABCD represented
in (x1, y1, x2, y2): (3, 2, 7, 4),
covering the [3, 7] x [2, 4] region of the continuous coordinate system
which looks like this:
.. code:: none
O--------> x
|
| A---B
| | |
| D---C
|
v y
Note that each capital letter represents one 0-dimensional geometric point
instead of a 'square pixel' here.
In the example above, using (x, y) to represent a point we have:
.. math::
O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)
We name vector AB = vector DC as the width vector in box's local coordinate system, and
vector AD = vector BC as the height vector in box's local coordinate system. Initially,
when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis
in the image space, respectively.
For better illustration, we denote the center of the box as E,
.. code:: none
O--------> x
|
| A---B
| | E |
| D---C
|
v y
where the center E = ((3+7)/2, (2+4)/2) = (5, 3).
Also,
.. math::
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Therefore, the corresponding representation for the same shape in rotated box in
(x_center, y_center, width, height, angle) format is:
(5, 3, 4, 2, 0),
Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees
CCW (counter-clockwise) by definition. It looks like this:
.. code:: none
O--------> x
| B-C
| | |
| |E|
| | |
| A-D
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CCW with regard to E:
A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)
Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to
vector AD or vector BC (the top->down height vector in box's local coordinate system),
or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right
width vector in box's local coordinate system).
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)
by definition? It looks like this:
.. code:: none
O--------> x
| D-A
| | |
| |E|
| | |
| C-B
v y
The center E is still located at the same point (5, 3), while the vertices
ABCD are rotated by 90 degrees CW with regard to E:
A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)
.. math::
width = |AB| = |CD| = 5 - 1 = 4,
height = |AD| = |BC| = 6 - 4 = 2.
This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU
will be 1. However, these two will generate different RoI Pooling results and
should not be treated as an identical box.
On the other hand, it's easy to see that (X, Y, W, H, A) is identical to
(X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be
identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is
equivalent to rotating the same shape 90 degrees CW.
We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):
.. code:: none
O--------> x
|
| C---D
| | E |
| B---A
|
v y
.. math::
A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),
width = |AB| = |CD| = 7 - 3 = 4,
height = |AD| = |BC| = 4 - 2 = 2.
Finally, this is a very inaccurate (heavily quantized) illustration of
how (5, 3, 4, 2, 60) looks like in case anyone wonders:
.. code:: none
O--------> x
| B\
| / C
| /E /
| A /
| `D
v y
It's still a rectangle with center of (5, 3), width of 4 and height of 2,
but its angle (and thus orientation) is somewhere between
(5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()
self.tensor = tensor
def clone(self) -> "RotatedBoxes":
"""
Clone the RotatedBoxes.
Returns:
RotatedBoxes
"""
return RotatedBoxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return RotatedBoxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = box[:, 2] * box[:, 3]
return area
def normalize_angles(self) -> None:
"""
Restrict angles to the range of [-180, 180) degrees
"""
self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0
def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
For RRPN:
Only clip boxes that are almost horizontal with a tolerance of
clip_angle_threshold to maintain backward compatibility.
Rotated boxes beyond this threshold are not clipped for two reasons:
1. There are potentially multiple ways to clip a rotated box to make it
fit within the image.
2. It's tricky to make the entire rectangular box fit within the image
and still be able to not leave out pixels of interest.
Therefore we rely on ops like RoIAlignRotated to safely handle this.
Args:
box_size (height, width): The clipping box's size.
clip_angle_threshold:
Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),
we do the clipping as horizontal boxes.
"""
h, w = box_size
# normalize angles to be within (-180, 180] degrees
self.normalize_angles()
idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]
# convert to (x1, y1, x2, y2)
x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0
y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0
x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0
y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0
# clip
x1.clamp_(min=0, max=w)
y1.clamp_(min=0, max=h)
x2.clamp_(min=0, max=w)
y2.clamp_(min=0, max=h)
# convert back to (xc, yc, w, h)
self.tensor[idx, 0] = (x1 + x2) / 2.0
self.tensor[idx, 1] = (y1 + y2) / 2.0
# make sure widths and heights do not increase due to numerical errors
self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)
self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor: a binary vector which represents
whether each box is empty (False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2]
heights = box[:, 3]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "RotatedBoxes":
"""
Returns:
RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned RotatedBoxes might share storage with this RotatedBoxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return RotatedBoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format(
item
)
return RotatedBoxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "RotatedBoxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box covering
[0, width] x [0, height]
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
For RRPN, it might not be necessary to call this function since it's common
for rotated box to extend to outside of the image boundaries
(the clip function only clips the near-horizontal boxes)
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
cnt_x = self.tensor[..., 0]
cnt_y = self.tensor[..., 1]
half_w = self.tensor[..., 2] / 2.0
half_h = self.tensor[..., 3] / 2.0
a = self.tensor[..., 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
max_rect_dx = c * half_w + s * half_h
max_rect_dy = c * half_h + s * half_w
inds_inside = (
(cnt_x - max_rect_dx >= -boundary_threshold)
& (cnt_y - max_rect_dy >= -boundary_threshold)
& (cnt_x + max_rect_dx < width + boundary_threshold)
& (cnt_y + max_rect_dy < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return self.tensor[:, :2]
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the rotated box with horizontal and vertical scaling factors
Note: when scale_factor_x != scale_factor_y,
the rotated box does not preserve the rectangular shape when the angle
is not a multiple of 90 degrees under resize transformation.
Instead, the shape is a parallelogram (that has skew)
Here we make an approximation by fitting a rotated rectangle to the parallelogram.
"""
self.tensor[:, 0] *= scale_x
self.tensor[:, 1] *= scale_y
theta = self.tensor[:, 4] * math.pi / 180.0
c = torch.cos(theta)
s = torch.sin(theta)
# In image space, y is top->down and x is left->right
# Consider the local coordintate system for the rotated box,
# where the box center is located at (0, 0), and the four vertices ABCD are
# A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)
# the midpoint of the left edge AD of the rotated box E is:
# E = (A+D)/2 = (-w / 2, 0)
# the midpoint of the top edge AB of the rotated box F is:
# F(0, -h / 2)
# To get the old coordinates in the global system, apply the rotation transformation
# (Note: the right-handed coordinate system for image space is yOx):
# (old_x, old_y) = (s * y + c * x, c * y - s * x)
# E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)
# F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)
# After applying the scaling factor (sfx, sfy):
# E(new) = (-sfx * c * w / 2, sfy * s * w / 2)
# F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)
# The new width after scaling tranformation becomes:
# w(new) = |E(new) - O| * 2
# = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2
# = sqrt[(sfx * c)^2 + (sfy * s)^2] * w
# i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y
self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)
# h(new) = |F(new) - O| * 2
# = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2
# = sqrt[(sfx * s)^2 + (sfy * c)^2] * h
# i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]
#
# For example,
# when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;
# when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x
self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)
# The angle is the rotation angle from y-axis in image space to the height
# vector (top->down in the box's local coordinate system) of the box in CCW.
#
# angle(new) = angle_yOx(O - F(new))
# = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )
# = atan2(sfx * s * h / 2, sfy * c * h / 2)
# = atan2(sfx * s, sfy * c)
#
# For example,
# when sfx == sfy, angle(new) == atan2(s, c) == angle(old)
self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi
@classmethod
def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes":
"""
Concatenates a list of RotatedBoxes into a single RotatedBoxes
Arguments:
boxes_list (list[RotatedBoxes])
Returns:
RotatedBoxes: the concatenated RotatedBoxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, RotatedBoxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (5,) at a time.
"""
yield from self.tensor
def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None:
"""
Given two lists of rotated boxes of size N and M,
compute the IoU (intersection over union)
between **all** N x M pairs of boxes.
The box order must be (x_center, y_center, width, height, angle).
Args:
boxes1, boxes2 (RotatedBoxes):
two `RotatedBoxes`. Contains N & M rotated boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/structures/rotated_boxes.py | rotated_boxes.py |
import math
import numpy as np
from enum import IntEnum, unique
from typing import List, Tuple, Union
import torch
from torch import device
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
"""
XYXY_ABS = 0
"""
(x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
"""
XYWH_ABS = 1
"""
(x0, y0, w, h) in absolute floating points coordinates.
"""
XYXY_REL = 2
"""
Not yet supported!
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
"""
XYWH_REL = 3
"""
Not yet supported!
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
"""
XYWHA_ABS = 4
"""
(xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
"""
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: torch.device):
# Boxes are assumed float32 and does not support to(dtype)
return Boxes(self.tensor.to(device=device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
return area
def clip(self, box_size: Tuple[int, int]) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
x1 = self.tensor[:, 0].clamp(min=0, max=w)
y1 = self.tensor[:, 1].clamp(min=0, max=h)
x2 = self.tensor[:, 2].clamp(min=0, max=w)
y2 = self.tensor[:, 3].clamp(min=0, max=h)
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item) -> "Boxes":
"""
Args:
item: int, slice, or a BoolTensor
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@classmethod
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
if len(boxes_list) == 0:
return cls(torch.empty(0))
assert all([isinstance(box, Boxes) for box in boxes_list])
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> device:
return self.tensor.device
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
# https://github.com/pytorch/pytorch/issues/18627
@torch.jit.unused
def __iter__(self):
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the intersection area between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax)
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: intersection, sized [N,M].
"""
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M, compute the IoU
(intersection over union) between **all** N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoA, sized [N,M].
"""
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa
def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
"""
Pairwise distance between N points and M boxes. The distance between a
point and a box is represented by the distance from the point to 4 edges
of the box. Distances are all positive when the point is inside the box.
Args:
points: Nx2 coordinates. Each row is (x, y)
boxes: M boxes
Returns:
Tensor: distances of size (N, M, 4). The 4 values are distances from
the point to the left, top, right, bottom of the box.
"""
x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes that have the same number of boxes.
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
Args:
boxes1 (Boxes): bounding boxes, sized [N,4].
boxes2 (Boxes): same length as boxes1
Returns:
Tensor: iou, sized [N].
"""
assert len(boxes1) == len(
boxes2
), "boxlists should have the same" "number of entries, got {}, {}".format(
len(boxes1), len(boxes2)
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/structures/boxes.py | boxes.py |
import numpy as np
from typing import Any, List, Tuple, Union
import torch
from torch.nn import functional as F
class Keypoints:
"""
Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property
containing the x,y location and visibility flag of each keypoint. This tensor has shape
(N, K, 3) where N is the number of instances and K is the number of keypoints per instance.
The visibility flag follows the COCO format and must be one of three integers:
* v=0: not labeled (in which case x=y=0)
* v=1: labeled but not visible
* v=2: labeled and visible
"""
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):
"""
Arguments:
keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.
The shape should be (N, K, 3) where N is the number of
instances, and K is the number of keypoints per instance.
"""
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu")
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape
self.tensor = keypoints
def __len__(self) -> int:
return self.tensor.size(0)
def to(self, *args: Any, **kwargs: Any) -> "Keypoints":
return type(self)(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:
"""
Convert keypoint annotations to a heatmap of one-hot labels for training,
as described in :paper:`Mask R-CNN`.
Arguments:
boxes: Nx4 tensor, the boxes to draw the keypoints to
Returns:
heatmaps:
A tensor of shape (N, K), each element is integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid:
A tensor of shape (N, K) containing whether each keypoint is in the roi or not.
"""
return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
"""
Create a new `Keypoints` by indexing on this `Keypoints`.
The following usage are allowed:
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
2. `new_kpts = kpts[2:10]`: return a slice of key points.
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
Note that the returned Keypoints might share storage with this Keypoints,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Keypoints([self.tensor[item]])
return Keypoints(self.tensor[item])
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
@staticmethod
def cat(keypoints_list: List["Keypoints"]) -> "Keypoints":
"""
Concatenates a list of Keypoints into a single Keypoints
Arguments:
keypoints_list (list[Keypoints])
Returns:
Keypoints: the concatenated Keypoints
"""
assert isinstance(keypoints_list, (list, tuple))
assert len(keypoints_list) > 0
assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list)
cat_kpts = type(keypoints_list[0])(
torch.cat([kpts.tensor for kpts in keypoints_list], dim=0)
)
return cat_kpts
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def _keypoints_to_heatmap(
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
Arguments:
keypoints: tensor of keypoint locations in of shape (N, K, 3).
rois: Nx4 tensor of rois in xyxy format
heatmap_size: integer side length of square heatmap.
Returns:
heatmaps: A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid: A tensor of shape (N, K) containing whether each keypoint is in
the roi or not.
"""
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
@torch.jit.script_if_tracing
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""
Extract predicted keypoint locations from heatmaps.
Args:
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
each ROI and each keypoint.
rois (Tensor): (#ROIs, 4). The box of each ROI.
Returns:
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
(x, y, logit, score) for each keypoint.
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
"""
# The decorator use of torch.no_grad() was not supported by torchscript.
# https://github.com/pytorch/pytorch/issues/44768
maps = maps.detach()
rois = rois.detach()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = F.interpolate(
maps[[i]], size=outsize, mode="bicubic", align_corners=False
).squeeze(
0
) # #keypoints x H x W
# softmax over the spatial region
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
# so that the scores of objects of different absolute sizes will be more comparable
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/structures/keypoints.py | keypoints.py |
import logging
import os
import pickle
import torch
from fvcore.common.checkpoint import Checkpointer
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.file_io import PathManager
from .c2_model_loading import align_and_update_state_dicts
class DetectionCheckpointer(Checkpointer):
"""
Same as :class:`Checkpointer`, but is able to:
1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
2. correctly load checkpoints that are only available on the master worker
"""
def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
is_main_process = comm.is_main_process()
super().__init__(
model,
save_dir,
save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
**checkpointables,
)
self.path_manager = PathManager
def load(self, path, *args, **kwargs):
need_sync = False
if path and isinstance(self.model, DistributedDataParallel):
logger = logging.getLogger(__name__)
path = self.path_manager.get_local_path(path)
has_file = os.path.isfile(path)
all_has_file = comm.all_gather(has_file)
if not all_has_file[0]:
raise OSError(f"File {path} not found on main worker.")
if not all(all_has_file):
logger.warning(
f"Not all workers can read checkpoint {path}. "
"Training may fail to fully resume."
)
# TODO: broadcast the checkpoint file contents from main
# worker, and load from it instead.
need_sync = True
if not has_file:
path = None # don't load if not readable
ret = super().load(path, *args, **kwargs)
if need_sync:
logger.info("Broadcasting model states from main worker ...")
self.model._sync_params_and_buffers()
return ret
def _load_file(self, filename):
if filename.endswith(".pkl"):
with PathManager.open(filename, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "model" in data and "__author__" in data:
# file is in Detectron2 model zoo format
self.logger.info("Reading a file from '{}'".format(data["__author__"]))
return data
else:
# assume file is from Caffe2 / Detectron1 model zoo
if "blobs" in data:
# Detection models have "blobs", but ImageNet models don't
data = data["blobs"]
data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
elif filename.endswith(".pyth"):
# assume file is from pycls; no one else seems to use the ".pyth" extension
with PathManager.open(filename, "rb") as f:
data = torch.load(f)
assert (
"model_state" in data
), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
model_state = {
k: v
for k, v in data["model_state"].items()
if not k.endswith("num_batches_tracked")
}
return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
loaded = super()._load_file(filename) # load native pth checkpoint
if "model" not in loaded:
loaded = {"model": loaded}
return loaded
def _load_model(self, checkpoint):
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
checkpoint["model"] = align_and_update_state_dicts(
self.model.state_dict(),
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
for k in incompatible.unexpected_keys[:]:
# Ignore unexpected keys about cell anchors. They exist in old checkpoints
# but now they are non-persistent buffers and will not be in new checkpoints.
if "anchor_generator.cell_anchors" in k:
incompatible.unexpected_keys.remove(k)
return incompatible | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/checkpoint/detection_checkpoint.py | detection_checkpoint.py |
import logging
from detectron2.utils.file_io import PathHandler, PathManager
class ModelCatalog(object):
"""
Store mappings from names to third-party models.
"""
S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
# MSRA models have STRIDE_IN_1X1=True. False otherwise.
# NOTE: all BN models here have fused BN into an affine layer.
# As a result, you should only load them to a model with "FrozenBN".
# Loading them to a model with regular BN or SyncBN is wrong.
# Even when loaded to FrozenBN, it is still different from affine by an epsilon,
# which should be negligible for training.
# NOTE: all models here uses PIXEL_STD=[1,1,1]
# NOTE: Most of the BN models here are no longer used. We use the
# re-converted pre-trained models under detectron2 model zoo instead.
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
"FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
"FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
}
C2_DETECTRON_PATH_FORMAT = (
"{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
)
C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
# format: {model_name} -> part of the url
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
"35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
"48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
"35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
"35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
"36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog._get_c2_detectron_baseline(name)
if name.startswith("ImageNetPretrained/"):
return ModelCatalog._get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog: {}".format(name))
@staticmethod
def _get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
name = name[len("ImageNetPretrained/") :]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def _get_c2_detectron_baseline(name):
name = name[len("Caffe2Detectron/COCO/") :]
url = ModelCatalog.C2_DETECTRON_MODELS[name]
if "keypoint_rcnn" in name:
dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
else:
dataset = ModelCatalog.C2_DATASET_COCO
if "35998355/rpn_R-50-C4_1x" in name:
# this one model is somehow different from others ..
type = "rpn"
else:
type = "generalized_rcnn"
# Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
)
return url
class ModelCatalogHandler(PathHandler):
"""
Resolve URL like catalog://.
"""
PREFIX = "catalog://"
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path, **kwargs):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
logger.info("Catalog entry {} points to {}".format(path, catalog_path))
return PathManager.get_local_path(catalog_path, **kwargs)
def _open(self, path, mode="r", **kwargs):
return PathManager.open(self._get_local_path(path), mode, **kwargs)
PathManager.register_handler(ModelCatalogHandler()) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/checkpoint/catalog.py | catalog.py |
import copy
import logging
import re
from typing import Dict, List
import torch
from tabulate import tabulate
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Renaming Caffe2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and returns a new chkpt_state_dict with names
converted to match model_state_dict with heuristics. The returned dict can be later
loaded with fvcore checkpointer.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
result_state_dict = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
assert key_model not in result_state_dict
result_state_dict[key_model] = value_ckpt
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
# logging:
matched_model_keys = sorted(matched_keys.values())
if len(matched_model_keys) == 0:
logger.warning("No weights in checkpoint matched with model.")
return ckpt_state_dict
common_prefix = _longest_common_prefix(matched_model_keys)
rev_matched_keys = {v: k for k, v in matched_keys.items()}
original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
table = []
memo = set()
for key_model in matched_model_keys:
if key_model in memo:
continue
if key_model in model_key_groups:
group = model_key_groups[key_model]
memo |= set(group)
shapes = [tuple(model_state_dict[k].shape) for k in group]
table.append(
(
_longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
_group_str([original_keys[k] for k in group]),
" ".join([str(x).replace(" ", "") for x in shapes]),
)
)
else:
key_checkpoint = original_keys[key_model]
shape = str(tuple(model_state_dict[key_model].shape))
table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
table_str = tabulate(
table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
)
logger.info(
"Following weights matched with "
+ (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ ":\n"
+ table_str
)
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
for k in unmatched_ckpt_keys:
result_state_dict[k] = ckpt_state_dict[k]
return result_state_dict
def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
"""
Params in the same submodule are grouped together.
Args:
keys: names of all parameters
original_names: mapping from parameter name to their name in the checkpoint
Returns:
dict[name -> all other names in the same group]
"""
def _submodule_name(key):
pos = key.rfind(".")
if pos < 0:
return None
prefix = key[: pos + 1]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if len(group) <= 1:
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if len(original_name_lcp) == 0:
# don't group weights if original names don't share prefix
continue
for k in group:
if k in ret:
continue
ret[k] = group
return ret
def _longest_common_prefix(names: List[str]) -> str:
"""
["abc.zfg", "abc.zef"] -> "abc."
"""
names = [n.split(".") for n in names]
m1, m2 = min(names), max(names)
ret = [a for a, b in zip(m1, m2) if a == b]
ret = ".".join(ret) + "." if len(ret) else ""
return ret
def _longest_common_prefix_str(names: List[str]) -> str:
m1, m2 = min(names), max(names)
lcp = [a for a, b in zip(m1, m2) if a == b]
lcp = "".join(lcp)
return lcp
def _group_str(names: List[str]) -> str:
"""
Turn "common1", "common2", "common3" into "common{1,2,3}"
"""
lcp = _longest_common_prefix_str(names)
rest = [x[len(lcp) :] for x in names]
rest = "{" + ",".join(rest) + "}"
ret = lcp + rest
# add some simplification for BN specifically
ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
return ret | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/checkpoint/c2_model_loading.py | c2_model_loading.py |
import logging
from datetime import timedelta
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils import comm
__all__ = ["DEFAULT_TIMEOUT", "launch"]
DEFAULT_TIMEOUT = timedelta(minutes=30)
def _find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch(
main_func,
num_gpus_per_machine,
num_machines=1,
machine_rank=0,
dist_url=None,
args=(),
timeout=DEFAULT_TIMEOUT,
):
"""
Launch multi-gpu or distributed training.
This function must be called on all machines involved in the training.
It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
Args:
main_func: a function that will be called by `main_func(*args)`
num_gpus_per_machine (int): number of GPUs per machine
num_machines (int): the total number of machines
machine_rank (int): the rank of this machine
dist_url (str): url to connect to for distributed jobs, including protocol
e.g. "tcp://127.0.0.1:8686".
Can be set to "auto" to automatically select a free port on localhost
timeout (timedelta): timeout of the distributed workers
args (tuple): arguments passed to main_func
"""
world_size = num_machines * num_gpus_per_machine
if world_size > 1:
# https://github.com/pytorch/pytorch/pull/14391
# TODO prctl in spawned processes
if dist_url == "auto":
assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if num_machines > 1 and dist_url.startswith("file://"):
logger = logging.getLogger(__name__)
logger.warning(
"file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout,
),
daemon=False,
)
else:
main_func(*args)
def _distributed_worker(
local_rank,
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout=DEFAULT_TIMEOUT,
):
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error("Process group URL: {}".format(dist_url))
raise e
# Setup the local process group (which contains ranks within the same machine)
assert comm._LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
torch.cuda.set_device(local_rank)
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
comm.synchronize()
main_func(*args) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/engine/launch.py | launch.py |
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from . import hooks
from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"DefaultTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
PathManager.mkdirs(output_dir)
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
def state_dict(self):
ret = super().state_dict()
ret["_trainer"] = self._trainer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self._trainer.load_state_dict(state_dict["_trainer"])
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`detectron2.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
"""
If you want DefaultTrainer to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
DefaultTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/engine/defaults.py | defaults.py |
import logging
import numpy as np
import time
import weakref
from typing import List, Mapping, Optional
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.utils.events import EventStorage, get_event_storage
from detectron2.utils.logger import _log_api_usage
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
class HookBase:
"""
Base class for hooks that can be registered with :class:`TrainerBase`.
Each hook can implement 4 methods. The way they are called is demonstrated
in the following snippet:
::
hook.before_train()
for iter in range(start_iter, max_iter):
hook.before_step()
trainer.run_step()
hook.after_step()
iter += 1
hook.after_train()
Notes:
1. In the hook method, users can access ``self.trainer`` to access more
properties about the context (e.g., model, current iteration, or config
if using :class:`DefaultTrainer`).
2. A hook that does something in :meth:`before_step` can often be
implemented equivalently in :meth:`after_step`.
If the hook takes non-trivial time, it is strongly recommended to
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
The convention is that :meth:`before_step` should only take negligible time.
Following this convention will allow hooks that do care about the difference
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
function properly.
"""
trainer: "TrainerBase" = None
"""
A weak reference to the trainer object. Set by the trainer when the hook is registered.
"""
def before_train(self):
"""
Called before the first iteration.
"""
pass
def after_train(self):
"""
Called after the last iteration.
"""
pass
def before_step(self):
"""
Called before each iteration.
"""
pass
def after_step(self):
"""
Called after each iteration.
"""
pass
def state_dict(self):
"""
Hooks are stateless by default, but can be made checkpointable by
implementing `state_dict` and `load_state_dict`.
"""
return {}
class TrainerBase:
"""
Base class for iterative trainer with hooks.
The only assumption we made here is: the training runs in a loop.
A subclass can implement what the loop is.
We made no assumptions about the existence of dataloader, optimizer, model, etc.
Attributes:
iter(int): the current iteration.
start_iter(int): The iteration to start with.
By convention the minimum possible value is 0.
max_iter(int): The iteration to end training.
storage(EventStorage): An EventStorage that's opened during the course of training.
"""
def __init__(self) -> None:
self._hooks: List[HookBase] = []
self.iter: int = 0
self.start_iter: int = 0
self.max_iter: int
self.storage: EventStorage
_log_api_usage("trainer." + self.__class__.__name__)
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
"""
Register hooks to the trainer. The hooks are executed in the order
they are registered.
Args:
hooks (list[Optional[HookBase]]): list of hooks
"""
hooks = [h for h in hooks if h is not None]
for h in hooks:
assert isinstance(h, HookBase)
# To avoid circular reference, hooks and trainer cannot own each other.
# This normally does not matter, but will cause memory leak if the
# involved objects contain __del__:
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
h.trainer = weakref.proxy(self)
self._hooks.extend(hooks)
def train(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
# self.iter == max_iter can be used by `after_train` to
# tell whether the training successfully finished or failed
# due to exceptions.
self.iter += 1
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def before_train(self):
for h in self._hooks:
h.before_train()
def after_train(self):
self.storage.iter = self.iter
for h in self._hooks:
h.after_train()
def before_step(self):
# Maintain the invariant that storage.iter == trainer.iter
# for the entire execution of each step
self.storage.iter = self.iter
for h in self._hooks:
h.before_step()
def after_step(self):
for h in self._hooks:
h.after_step()
def run_step(self):
raise NotImplementedError
def state_dict(self):
ret = {"iteration": self.iter}
hooks_state = {}
for h in self._hooks:
sd = h.state_dict()
if sd:
name = type(h).__qualname__
if name in hooks_state:
# TODO handle repetitive stateful hooks
continue
hooks_state[name] = sd
if hooks_state:
ret["hooks"] = hooks_state
return ret
def load_state_dict(self, state_dict):
logger = logging.getLogger(__name__)
self.iter = state_dict["iteration"]
for key, value in state_dict.get("hooks", {}).items():
for h in self._hooks:
try:
name = type(h).__qualname__
except AttributeError:
continue
if name == key:
h.load_state_dict(value)
break
else:
logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
class SimpleTrainer(TrainerBase):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
"""
super().__init__()
"""
We set the model to training mode in the trainer.
However it's valid to train a model that's in eval mode.
If you want your model (or a submodule of it) to behave
like evaluation during training, you can overwrite its train() method.
"""
model.train()
self.model = model
self.data_loader = data_loader
self._data_loader_iter = iter(data_loader)
self.optimizer = optimizer
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def _write_metrics(
self,
loss_dict: Mapping[str, torch.Tensor],
data_time: float,
prefix: str = "",
) -> None:
SimpleTrainer.write_metrics(loss_dict, data_time, prefix)
@staticmethod
def write_metrics(
loss_dict: Mapping[str, torch.Tensor],
data_time: float,
prefix: str = "",
) -> None:
"""
Args:
loss_dict (dict): dict of scalar losses
data_time (float): time taken by the dataloader iteration
prefix (str): prefix for logging keys
"""
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
metrics_dict["data_time"] = data_time
# Gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
storage = get_event_storage()
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(metrics_dict.values())
if not np.isfinite(total_losses_reduced):
raise FloatingPointError(
f"Loss became infinite or NaN at iteration={storage.iter}!\n"
f"loss_dict = {metrics_dict}"
)
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
if len(metrics_dict) > 1:
storage.put_scalars(**metrics_dict)
def state_dict(self):
ret = super().state_dict()
ret["optimizer"] = self.optimizer.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.optimizer.load_state_dict(state_dict["optimizer"])
class AMPTrainer(SimpleTrainer):
"""
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
in the training loop.
"""
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
"""
Args:
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
grad_scaler: torch GradScaler to automatically scale gradients.
"""
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
super().__init__(model, data_loader, optimizer)
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
"""
Implement the AMP training logic.
"""
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict["grad_scaler"]) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/engine/train_loop.py | train_loop.py |
import datetime
import itertools
import logging
import math
import operator
import os
import tempfile
import time
import warnings
from collections import Counter
import torch
from fvcore.common.checkpoint import Checkpointer
from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
from fvcore.common.param_scheduler import ParamScheduler
from fvcore.common.timer import Timer
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import detectron2.utils.comm as comm
from detectron2.evaluation.testing import flatten_results_dict
from detectron2.solver import LRMultiplier
from detectron2.utils.events import EventStorage, EventWriter
from detectron2.utils.file_io import PathManager
from .train_loop import HookBase
__all__ = [
"CallbackHook",
"IterationTimer",
"PeriodicWriter",
"PeriodicCheckpointer",
"BestCheckpointer",
"LRScheduler",
"AutogradProfiler",
"EvalHook",
"PreciseBN",
"TorchProfiler",
"TorchMemoryStats",
]
"""
Implement some common hooks.
"""
class CallbackHook(HookBase):
"""
Create a hook using callback functions provided by the user.
"""
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
"""
Each argument is a function that takes one argument: the trainer.
"""
self._before_train = before_train
self._before_step = before_step
self._after_step = after_step
self._after_train = after_train
def before_train(self):
if self._before_train:
self._before_train(self.trainer)
def after_train(self):
if self._after_train:
self._after_train(self.trainer)
# The functions may be closures that hold reference to the trainer
# Therefore, delete them to avoid circular reference.
del self._before_train, self._after_train
del self._before_step, self._after_step
def before_step(self):
if self._before_step:
self._before_step(self.trainer)
def after_step(self):
if self._after_step:
self._after_step(self.trainer)
class IterationTimer(HookBase):
"""
Track the time spent for each iteration (each run_step call in the trainer).
Print a summary in the end of training.
This hook uses the time between the call to its :meth:`before_step`
and :meth:`after_step` methods.
Under the convention that :meth:`before_step` of all hooks should only
take negligible amount of time, the :class:`IterationTimer` hook should be
placed at the beginning of the list of hooks to obtain accurate timing.
"""
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
self._start_time = time.perf_counter()
self._total_timer = Timer()
def before_train(self):
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
def after_train(self):
logger = logging.getLogger(__name__)
total_time = time.perf_counter() - self._start_time
total_time_minus_hooks = self._total_timer.seconds()
hook_time = total_time - total_time_minus_hooks
num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter
if num_iter > 0 and total_time_minus_hooks > 0:
# Speed is meaningful only after warmup
# NOTE this format is parsed by grep in some scripts
logger.info(
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
num_iter,
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
total_time_minus_hooks / num_iter,
)
)
logger.info(
"Total training time: {} ({} on hooks)".format(
str(datetime.timedelta(seconds=int(total_time))),
str(datetime.timedelta(seconds=int(hook_time))),
)
)
def before_step(self):
self._step_timer.reset()
self._total_timer.resume()
def after_step(self):
# +1 because we're in after_step, the current step is done
# but not yet counted
iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1
if iter_done >= self._warmup_iter:
sec = self._step_timer.seconds()
self.trainer.storage.put_scalars(time=sec)
else:
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
class PeriodicWriter(HookBase):
"""
Write events to EventStorage (by calling ``writer.write()``) periodically.
It is executed every ``period`` iterations and after the last iteration.
Note that ``period`` does not affect how data is smoothed by each writer.
"""
def __init__(self, writers, period=20):
"""
Args:
writers (list[EventWriter]): a list of EventWriter objects
period (int):
"""
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
# If any new data is found (e.g. produced by other after_train),
# write them before closing
writer.write()
writer.close()
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
"""
Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
Note that when used as a hook,
it is unable to save additional data other than what's defined
by the given `checkpointer`.
It is executed every ``period`` iterations and after the last iteration.
"""
def before_train(self):
self.max_iter = self.trainer.max_iter
def after_step(self):
# No way to use **kwargs
self.step(self.trainer.iter)
class BestCheckpointer(HookBase):
"""
Checkpoints best weights based off given metric.
This hook should be used in conjunction to and executed after the hook
that produces the metric, e.g. `EvalHook`.
"""
def __init__(
self,
eval_period: int,
checkpointer: Checkpointer,
val_metric: str,
mode: str = "max",
file_prefix: str = "model_best",
) -> None:
"""
Args:
eval_period (int): the period `EvalHook` is set to run.
checkpointer: the checkpointer object used to save checkpoints.
val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50"
mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
maximized or minimized, e.g. for "bbox/AP50" it should be "max"
file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
"""
self._logger = logging.getLogger(__name__)
self._period = eval_period
self._val_metric = val_metric
assert mode in [
"max",
"min",
], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
if mode == "max":
self._compare = operator.gt
else:
self._compare = operator.lt
self._checkpointer = checkpointer
self._file_prefix = file_prefix
self.best_metric = None
self.best_iter = None
def _update_best(self, val, iteration):
if math.isnan(val) or math.isinf(val):
return False
self.best_metric = val
self.best_iter = iteration
return True
def _best_checking(self):
metric_tuple = self.trainer.storage.latest().get(self._val_metric)
if metric_tuple is None:
self._logger.warning(
f"Given val metric {self._val_metric} does not seem to be computed/stored."
"Will not be checkpointing based on it."
)
return
else:
latest_metric, metric_iter = metric_tuple
if self.best_metric is None:
if self._update_best(latest_metric, metric_iter):
additional_state = {"iteration": metric_iter}
self._checkpointer.save(f"{self._file_prefix}", **additional_state)
self._logger.info(
f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
)
elif self._compare(latest_metric, self.best_metric):
additional_state = {"iteration": metric_iter}
self._checkpointer.save(f"{self._file_prefix}", **additional_state)
self._logger.info(
f"Saved best model as latest eval score for {self._val_metric} is "
f"{latest_metric:0.5f}, better than last best score "
f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
)
self._update_best(latest_metric, metric_iter)
else:
self._logger.info(
f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, "
f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}."
)
def after_step(self):
# same conditions as `EvalHook`
next_iter = self.trainer.iter + 1
if (
self._period > 0
and next_iter % self._period == 0
and next_iter != self.trainer.max_iter
):
self._best_checking()
def after_train(self):
# same conditions as `EvalHook`
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._best_checking()
class LRScheduler(HookBase):
"""
A hook which executes a torch builtin LR scheduler and summarizes the LR.
It is executed after every iteration.
"""
def __init__(self, optimizer=None, scheduler=None):
"""
Args:
optimizer (torch.optim.Optimizer):
scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
in the optimizer.
If any argument is not given, will try to obtain it from the trainer.
"""
self._optimizer = optimizer
self._scheduler = scheduler
def before_train(self):
self._optimizer = self._optimizer or self.trainer.optimizer
if isinstance(self.scheduler, ParamScheduler):
self._scheduler = LRMultiplier(
self._optimizer,
self.scheduler,
self.trainer.max_iter,
last_iter=self.trainer.iter - 1,
)
self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
@staticmethod
def get_best_param_group_id(optimizer):
# NOTE: some heuristics on what LR to summarize
# summarize the param group with most parameters
largest_group = max(len(g["params"]) for g in optimizer.param_groups)
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter([g["lr"] for g in optimizer.param_groups])
lr = lr_count.most_common()[0][0]
for i, g in enumerate(optimizer.param_groups):
if g["lr"] == lr:
return i
else:
for i, g in enumerate(optimizer.param_groups):
if len(g["params"]) == largest_group:
return i
def after_step(self):
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
self.scheduler.step()
@property
def scheduler(self):
return self._scheduler or self.trainer.scheduler
def state_dict(self):
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
return self.scheduler.state_dict()
return {}
def load_state_dict(self, state_dict):
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
logger = logging.getLogger(__name__)
logger.info("Loading scheduler from state_dict ...")
self.scheduler.load_state_dict(state_dict)
class TorchProfiler(HookBase):
"""
A hook which runs `torch.profiler.profile`.
Examples:
::
hooks.TorchProfiler(
lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
)
The above example will run the profiler for iteration 10~20 and dump
results to ``OUTPUT_DIR``. We did not profile the first few iterations
because they are typically slower than the rest.
The result files can be loaded in the ``chrome://tracing`` page in chrome browser,
and the tensorboard visualizations can be visualized using
``tensorboard --logdir OUTPUT_DIR/log``
"""
def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True):
"""
Args:
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
and returns whether to enable the profiler.
It will be called once every step, and can be used to select which steps to profile.
output_dir (str): the output directory to dump tracing files.
activities (iterable): same as in `torch.profiler.profile`.
save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/
"""
self._enable_predicate = enable_predicate
self._activities = activities
self._output_dir = output_dir
self._save_tensorboard = save_tensorboard
def before_step(self):
if self._enable_predicate(self.trainer):
if self._save_tensorboard:
on_trace_ready = torch.profiler.tensorboard_trace_handler(
os.path.join(
self._output_dir,
"log",
"profiler-tensorboard-iter{}".format(self.trainer.iter),
),
f"worker{comm.get_rank()}",
)
else:
on_trace_ready = None
self._profiler = torch.profiler.profile(
activities=self._activities,
on_trace_ready=on_trace_ready,
record_shapes=True,
profile_memory=True,
with_stack=True,
with_flops=True,
)
self._profiler.__enter__()
else:
self._profiler = None
def after_step(self):
if self._profiler is None:
return
self._profiler.__exit__(None, None, None)
if not self._save_tensorboard:
PathManager.mkdirs(self._output_dir)
out_file = os.path.join(
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
)
if "://" not in out_file:
self._profiler.export_chrome_trace(out_file)
else:
# Support non-posix filesystems
with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
tmp_file = os.path.join(d, "tmp.json")
self._profiler.export_chrome_trace(tmp_file)
with open(tmp_file) as f:
content = f.read()
with PathManager.open(out_file, "w") as f:
f.write(content)
class AutogradProfiler(TorchProfiler):
"""
A hook which runs `torch.autograd.profiler.profile`.
Examples:
::
hooks.AutogradProfiler(
lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
)
The above example will run the profiler for iteration 10~20 and dump
results to ``OUTPUT_DIR``. We did not profile the first few iterations
because they are typically slower than the rest.
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
Note:
When used together with NCCL on older version of GPUs,
autograd profiler may cause deadlock because it unnecessarily allocates
memory on every device it sees. The memory management calls, if
interleaved with NCCL calls, lead to deadlock on GPUs that do not
support ``cudaLaunchCooperativeKernelMultiDevice``.
"""
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
"""
Args:
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
and returns whether to enable the profiler.
It will be called once every step, and can be used to select which steps to profile.
output_dir (str): the output directory to dump tracing files.
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
"""
warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.")
self._enable_predicate = enable_predicate
self._use_cuda = use_cuda
self._output_dir = output_dir
def before_step(self):
if self._enable_predicate(self.trainer):
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
self._profiler.__enter__()
else:
self._profiler = None
class EvalHook(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function, eval_after_train=True):
"""
Args:
eval_period (int): the period to run `eval_function`. Set to 0 to
not evaluate periodically (but still evaluate after the last iteration
if `eval_after_train` is True).
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
eval_after_train (bool): whether to evaluate after the last iteration
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
self._eval_after_train = eval_after_train
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
try:
v = float(v)
except Exception as e:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
) from e
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
comm.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
if self._period > 0 and next_iter % self._period == 0:
# do the last eval in after_train
if next_iter != self.trainer.max_iter:
self._do_eval()
def after_train(self):
# This condition is to prevent the eval from running after a failed training
if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:
self._do_eval()
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
class PreciseBN(HookBase):
"""
The standard implementation of BatchNorm uses EMA in inference, which is
sometimes suboptimal.
This class computes the true average of statistics rather than the moving average,
and put true averages to every BN layer in the given model.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, period, model, data_loader, num_iter):
"""
Args:
period (int): the period this hook is run, or 0 to not run during training.
The hook will always run in the end of training.
model (nn.Module): a module whose all BN layers in training mode will be
updated by precise BN.
Note that user is responsible for ensuring the BN layers to be
updated are in training mode when this hook is triggered.
data_loader (iterable): it will produce data to be run by `model(data)`.
num_iter (int): number of iterations used to compute the precise
statistics.
"""
self._logger = logging.getLogger(__name__)
if len(get_bn_modules(model)) == 0:
self._logger.info(
"PreciseBN is disabled because model does not contain BN layers in training mode."
)
self._disabled = True
return
self._model = model
self._data_loader = data_loader
self._num_iter = num_iter
self._period = period
self._disabled = False
self._data_iter = None
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self.update_stats()
def update_stats(self):
"""
Update the model with precise statistics. Users can manually call this method.
"""
if self._disabled:
return
if self._data_iter is None:
self._data_iter = iter(self._data_loader)
def data_loader():
for num_iter in itertools.count(1):
if num_iter % 100 == 0:
self._logger.info(
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
)
# This way we can reuse the same iterator
yield next(self._data_iter)
with EventStorage(): # capture events in a new storage to discard them
self._logger.info(
"Running precise-BN for {} iterations... ".format(self._num_iter)
+ "Note that this could produce different statistics every time."
)
update_bn_stats(self._model, data_loader(), self._num_iter)
class TorchMemoryStats(HookBase):
"""
Writes pytorch's cuda memory statistics periodically.
"""
def __init__(self, period=20, max_runs=10):
"""
Args:
period (int): Output stats each 'period' iterations
max_runs (int): Stop the logging after 'max_runs'
"""
self._logger = logging.getLogger(__name__)
self._period = period
self._max_runs = max_runs
self._runs = 0
def after_step(self):
if self._runs > self._max_runs:
return
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
if torch.cuda.is_available():
max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0
reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0
max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0
self._logger.info(
(
" iter: {} "
" max_reserved_mem: {:.0f}MB "
" reserved_mem: {:.0f}MB "
" max_allocated_mem: {:.0f}MB "
" allocated_mem: {:.0f}MB "
).format(
self.trainer.iter,
max_reserved_mb,
reserved_mb,
max_allocated_mb,
allocated_mb,
)
)
self._runs += 1
if self._runs == self._max_runs:
mem_summary = torch.cuda.memory_summary()
self._logger.info("\n" + mem_summary)
torch.cuda.reset_peak_memory_stats() | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/engine/hooks.py | hooks.py |
import logging
import math
from bisect import bisect_right
from typing import List
import torch
from fvcore.common.param_scheduler import (
CompositeParamScheduler,
ConstantParamScheduler,
LinearParamScheduler,
ParamScheduler,
)
logger = logging.getLogger(__name__)
class WarmupParamScheduler(CompositeParamScheduler):
"""
Add an initial warmup stage to another scheduler.
"""
def __init__(
self,
scheduler: ParamScheduler,
warmup_factor: float,
warmup_length: float,
warmup_method: str = "linear",
):
"""
Args:
scheduler: warmup will be added at the beginning of this scheduler
warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001
warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire
training, e.g. 0.01
warmup_method: one of "linear" or "constant"
"""
end_value = scheduler(warmup_length) # the value to reach when warmup ends
start_value = warmup_factor * scheduler(0.0)
if warmup_method == "constant":
warmup = ConstantParamScheduler(start_value)
elif warmup_method == "linear":
warmup = LinearParamScheduler(start_value, end_value)
else:
raise ValueError("Unknown warmup method: {}".format(warmup_method))
super().__init__(
[warmup, scheduler],
interval_scaling=["rescaled", "fixed"],
lengths=[warmup_length, 1 - warmup_length],
)
class LRMultiplier(torch.optim.lr_scheduler._LRScheduler):
"""
A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the
learning rate of each param in the optimizer.
Every step, the learning rate of each parameter becomes its initial value
multiplied by the output of the given :class:`ParamScheduler`.
The absolute learning rate value of each parameter can be different.
This scheduler can be used as long as the relative scale among them do
not change during training.
Examples:
::
LRMultiplier(
opt,
WarmupParamScheduler(
MultiStepParamScheduler(
[1, 0.1, 0.01],
milestones=[60000, 80000],
num_updates=90000,
), 0.001, 100 / 90000
),
max_iter=90000
)
"""
# NOTES: in the most general case, every LR can use its own scheduler.
# Supporting this requires interaction with the optimizer when its parameter
# group is initialized. For example, classyvision implements its own optimizer
# that allows different schedulers for every parameter group.
# To avoid this complexity, we use this class to support the most common cases
# where the relative scale among all LRs stay unchanged during training. In this
# case we only need a total of one scheduler that defines the relative LR multiplier.
def __init__(
self,
optimizer: torch.optim.Optimizer,
multiplier: ParamScheduler,
max_iter: int,
last_iter: int = -1,
):
"""
Args:
optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``.
``last_iter`` is the same as ``last_epoch``.
multiplier: a fvcore ParamScheduler that defines the multiplier on
every LR of the optimizer
max_iter: the total number of training iterations
"""
if not isinstance(multiplier, ParamScheduler):
raise ValueError(
"_LRMultiplier(multiplier=) must be an instance of fvcore "
f"ParamScheduler. Got {multiplier} instead."
)
self._multiplier = multiplier
self._max_iter = max_iter
super().__init__(optimizer, last_epoch=last_iter)
def state_dict(self):
# fvcore schedulers are stateless. Only keep pytorch scheduler states
return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch}
def get_lr(self) -> List[float]:
multiplier = self._multiplier(self.last_epoch / self._max_iter)
return [base_lr * multiplier for base_lr in self.base_lrs]
"""
Content below is no longer needed!
"""
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
logger.warning(
"WarmupMultiStepLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!"
)
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
logger.warning(
"WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!"
)
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
# Different definitions of half-cosine with warmup are possible. For
# simplicity we multiply the standard half-cosine schedule by the warmup
# factor. An alternative is to start the period of the cosine at warmup_iters
# instead of at 0. In the case that warmup_iters << max_iters the two are
# very close to each other.
return [
base_lr
* warmup_factor
* 0.5
* (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(
method: str, iter: int, warmup_iters: int, warmup_factor: float
) -> float:
"""
Return the learning rate warmup factor at a specific iteration.
See :paper:`ImageNet in 1h` for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iteration at which to calculate the warmup factor.
warmup_iters (int): the number of warmup iterations.
warmup_factor (float): the base warmup factor (the meaning changes according
to the method used).
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
elif method == "linear":
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
else:
raise ValueError("Unknown warmup method: {}".format(method)) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/solver/lr_scheduler.py | lr_scheduler.py |
import copy
import itertools
import logging
from collections import defaultdict
from enum import Enum
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
import torch
from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
from detectron2.config import CfgNode
from .lr_scheduler import LRMultiplier, WarmupParamScheduler
_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
_GradientClipper = Callable[[_GradientClipperInput], None]
class GradientClipType(Enum):
VALUE = "value"
NORM = "norm"
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
"""
Creates gradient clipping closure to clip by value or by norm,
according to the provided config.
"""
cfg = copy.deepcopy(cfg)
def clip_grad_norm(p: _GradientClipperInput):
torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
def clip_grad_value(p: _GradientClipperInput):
torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
_GRADIENT_CLIP_TYPE_TO_CLIPPER = {
GradientClipType.VALUE: clip_grad_value,
GradientClipType.NORM: clip_grad_norm,
}
return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
def _generate_optimizer_class_with_gradient_clipping(
optimizer: Type[torch.optim.Optimizer],
*,
per_param_clipper: Optional[_GradientClipper] = None,
global_clipper: Optional[_GradientClipper] = None,
) -> Type[torch.optim.Optimizer]:
"""
Dynamically creates a new type that inherits the type of a given instance
and overrides the `step` method to add gradient clipping
"""
assert (
per_param_clipper is None or global_clipper is None
), "Not allowed to use both per-parameter clipping and global clipping"
def optimizer_wgc_step(self, closure=None):
if per_param_clipper is not None:
for group in self.param_groups:
for p in group["params"]:
per_param_clipper(p)
else:
# global clipper for future use with detr
# (https://github.com/facebookresearch/detr/pull/287)
all_params = itertools.chain(*[g["params"] for g in self.param_groups])
global_clipper(all_params)
super(type(self), self).step(closure)
OptimizerWithGradientClip = type(
optimizer.__name__ + "WithGradientClip",
(optimizer,),
{"step": optimizer_wgc_step},
)
return OptimizerWithGradientClip
def maybe_add_gradient_clipping(
cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
) -> Type[torch.optim.Optimizer]:
"""
If gradient clipping is enabled through config options, wraps the existing
optimizer type to become a new dynamically created class OptimizerWithGradientClip
that inherits the given optimizer and overrides the `step` method to
include gradient clipping.
Args:
cfg: CfgNode, configuration options
optimizer: type. A subclass of torch.optim.Optimizer
Return:
type: either the input `optimizer` (if gradient clipping is disabled), or
a subclass of it with gradient clipping included in the `step` method.
"""
if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
return optimizer
if isinstance(optimizer, torch.optim.Optimizer):
optimizer_type = type(optimizer)
else:
assert issubclass(optimizer, torch.optim.Optimizer), optimizer
optimizer_type = optimizer
grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
optimizer_type, per_param_clipper=grad_clipper
)
if isinstance(optimizer, torch.optim.Optimizer):
optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
return optimizer
else:
return OptimizerWithGradientClip
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_default_optimizer_params(
model,
base_lr=cfg.SOLVER.BASE_LR,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
def get_default_optimizer_params(
model: torch.nn.Module,
base_lr: Optional[float] = None,
weight_decay: Optional[float] = None,
weight_decay_norm: Optional[float] = None,
bias_lr_factor: Optional[float] = 1.0,
weight_decay_bias: Optional[float] = None,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
) -> List[Dict[str, Any]]:
"""
Get default param list for optimizer, with support for a few types of
overrides. If no overrides needed, this is equivalent to `model.parameters()`.
Args:
base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
weight_decay: weight decay for every group by default. Can be omitted to use the one
in optimizer.
weight_decay_norm: override weight decay for params in normalization layers
bias_lr_factor: multiplier of lr for bias parameters.
weight_decay_bias: override weight decay for bias parameters
overrides: if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
weight decay values for all module parameters named `embedding`.
For common detection models, ``weight_decay_norm`` is the only option
needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
from Detectron1 that are not found useful.
Example:
::
torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
lr=0.01, weight_decay=1e-4, momentum=0.9)
"""
if overrides is None:
overrides = {}
defaults = {}
if base_lr is not None:
defaults["lr"] = base_lr
if weight_decay is not None:
defaults["weight_decay"] = weight_decay
bias_overrides = {}
if bias_lr_factor is not None and bias_lr_factor != 1.0:
# NOTE: unlike Detectron v1, we now by default make bias hyperparameters
# exactly the same as regular weights.
if base_lr is None:
raise ValueError("bias_lr_factor requires base_lr")
bias_overrides["lr"] = base_lr * bias_lr_factor
if weight_decay_bias is not None:
bias_overrides["weight_decay"] = weight_decay_bias
if len(bias_overrides):
if "bias" in overrides:
raise ValueError("Conflicting overrides for 'bias'")
overrides["bias"] = bias_overrides
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if isinstance(module, norm_module_types) and weight_decay_norm is not None:
hyperparams["weight_decay"] = weight_decay_norm
hyperparams.update(overrides.get(module_param_name, {}))
params.append({"params": [value], **hyperparams})
return reduce_param_groups(params)
def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Transform parameter groups into per-parameter structure.
# Later items in `params` can overwrite parameters set in previous items.
ret = defaultdict(dict)
for item in params:
assert "params" in item
cur_params = {x: y for x, y in item.items() if x != "params"}
for param in item["params"]:
ret[param].update({"params": [param], **cur_params})
return list(ret.values())
def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Reorganize the parameter groups and merge duplicated groups.
# The number of parameter groups needs to be as small as possible in order
# to efficiently use the PyTorch multi-tensor optimizer. Therefore instead
# of using a parameter_group per single parameter, we reorganize the
# parameter groups and merge duplicated groups. This approach speeds
# up multi-tensor optimizer significantly.
params = _expand_param_groups(params)
groups = defaultdict(list) # re-group all parameter groups by their hyperparams
for item in params:
cur_params = tuple((x, y) for x, y in item.items() if x != "params")
groups[cur_params].extend(item["params"])
ret = []
for param_keys, param_values in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur["params"] = param_values
ret.append(cur)
return ret
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
if len(steps) != len(cfg.SOLVER.STEPS):
logger = logging.getLogger(__name__)
logger.warning(
"SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
"These values will be ignored."
)
sched = MultiStepParamScheduler(
values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)],
milestones=steps,
num_updates=cfg.SOLVER.MAX_ITER,
)
elif name == "WarmupCosineLR":
end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR
assert end_value >= 0.0 and end_value <= 1.0, end_value
sched = CosineParamScheduler(1, end_value)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
sched = WarmupParamScheduler(
sched,
cfg.SOLVER.WARMUP_FACTOR,
min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0),
cfg.SOLVER.WARMUP_METHOD,
)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/solver/build.py | build.py |
from typing import List
import torch
from detectron2.layers import nonzero_tuple
# TODO: the name is too general
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be matched to zero or more predicted elements.
The matching is determined by the MxN match_quality_matrix, that characterizes
how well each (ground-truth, prediction)-pair match each other. For example,
if the elements are boxes, this matrix may contain box intersection-over-union
overlap values.
The matcher returns (a) a vector of length N containing the index of the
ground-truth element m in [0, M) that matches to prediction n in [0, N).
(b) a vector of length N containing the labels for each prediction.
"""
def __init__(
self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False
):
"""
Args:
thresholds (list): a list of thresholds used to stratify predictions
into levels.
labels (list): a list of values to label predictions belonging at
each level. A label can be one of {-1, 0, 1} signifying
{ignore, negative class, positive class}, respectively.
allow_low_quality_matches (bool): if True, produce additional matches
for predictions with maximum match quality lower than high_threshold.
See set_low_quality_matches_ for more details.
For example,
thresholds = [0.3, 0.5]
labels = [0, -1, 1]
All predictions with iou < 0.3 will be marked with 0 and
thus will be considered as false positives while training.
All predictions with 0.3 <= iou < 0.5 will be marked with -1 and
thus will be ignored.
All predictions with 0.5 <= iou will be marked with 1 and
thus will be considered as true positives.
"""
# Add -inf and +inf to first and last position in thresholds
thresholds = thresholds[:]
assert thresholds[0] > 0
thresholds.insert(0, -float("inf"))
thresholds.append(float("inf"))
# Currently torchscript does not support all + generator
assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])
assert all([l in [-1, 0, 1] for l in labels])
assert len(labels) == len(thresholds) - 1
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted
elements. All elements must be >= 0 (due to the us of `torch.nonzero`
for selecting indices in :meth:`set_low_quality_matches_`).
Returns:
matches (Tensor[int64]): a vector of length N, where matches[i] is a matched
ground-truth index in [0, M)
match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates
whether a prediction is a true or false positive or ignored
"""
assert match_quality_matrix.dim() == 2
if match_quality_matrix.numel() == 0:
default_matches = match_quality_matrix.new_full(
(match_quality_matrix.size(1),), 0, dtype=torch.int64
)
# When no gt boxes exist, we define IOU = 0 and therefore set labels
# to `self.labels[0]`, which usually defaults to background class 0
# To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds
default_match_labels = match_quality_matrix.new_full(
(match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8
)
return default_matches, default_match_labels
assert torch.all(match_quality_matrix >= 0)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
low_high = (matched_vals >= low) & (matched_vals < high)
match_labels[low_high] = l
if self.allow_low_quality_matches:
self.set_low_quality_matches_(match_labels, match_quality_matrix)
return matches, match_labels
def set_low_quality_matches_(self, match_labels, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth G find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth G.
This function implements the RPN assignment case (i) in Sec. 3.1.2 of
:paper:`Faster R-CNN`.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find the highest quality match available, even if it is low, including ties.
# Note that the matches qualities must be positive due to the use of
# `torch.nonzero`.
_, pred_inds_with_highest_quality = nonzero_tuple(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# If an anchor was labeled positive only due to a low-quality match
# with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B.
# This follows the implementation in Detectron, and is found to have no significant impact.
match_labels[pred_inds_with_highest_quality] = 1 | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/matcher.py | matcher.py |
import math
from typing import List
import torch
from torch import nn
from torchvision.ops import RoIPool
from detectron2.layers import ROIAlign, ROIAlignRotated, cat, nonzero_tuple, shapes_to_tensor
from detectron2.structures import Boxes
"""
To export ROIPooler to torchscript, in this file, variables that should be annotated with
`Union[List[Boxes], List[RotatedBoxes]]` are only annotated with `List[Boxes]`.
TODO: Correct these annotations when torchscript support `Union`.
https://github.com/pytorch/pytorch/issues/41412
"""
__all__ = ["ROIPooler"]
def assign_boxes_to_levels(
box_lists: List[Boxes],
min_level: int,
max_level: int,
canonical_box_size: int,
canonical_level: int,
):
"""
Map each box in `box_lists` to a feature map level index and return the assignment
vector.
Args:
box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes,
where N is the number of images in the batch.
min_level (int): Smallest feature map level index. The input is considered index 0,
the output of stage 1 is index 1, and so.
max_level (int): Largest feature map level index.
canonical_box_size (int): A canonical box size in pixels (sqrt(box area)).
canonical_level (int): The feature map level index on which a canonically-sized box
should be placed.
Returns:
A tensor of length M, where M is the total number of boxes aggregated over all
N batch images. The memory layout corresponds to the concatenation of boxes
from all images. Each element is the feature map index, as an offset from
`self.min_level`, for the corresponding box (so value i means the box is at
`self.min_level + i`).
"""
box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists]))
# Eqn.(1) in FPN paper
level_assignments = torch.floor(
canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)
)
# clamp level to (min, max), in case the box size is too large or too small
# for the available feature maps
level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level)
return level_assignments.to(torch.int64) - min_level
def convert_boxes_to_pooler_format(box_lists: List[Boxes]):
"""
Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops
(see description under Returns).
Args:
box_lists (list[Boxes] | list[RotatedBoxes]):
A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
Returns:
When input is list[Boxes]:
A tensor of shape (M, 5), where M is the total number of boxes aggregated over all
N batch images.
The 5 columns are (batch index, x0, y0, x1, y1), where batch index
is the index in [0, N) identifying which batch image the box with corners at
(x0, y0, x1, y1) comes from.
When input is list[RotatedBoxes]:
A tensor of shape (M, 6), where M is the total number of boxes aggregated over all
N batch images.
The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees),
where batch index is the index in [0, N) identifying which batch image the
rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from.
"""
boxes = torch.cat([x.tensor for x in box_lists], dim=0)
# __len__ returns Tensor in tracing.
sizes = shapes_to_tensor([x.__len__() for x in box_lists], device=boxes.device)
indices = torch.repeat_interleave(
torch.arange(len(box_lists), dtype=boxes.dtype, device=boxes.device), sizes
)
return cat([indices[:, None], boxes], dim=1)
class ROIPooler(nn.Module):
"""
Region of interest feature map pooler that supports pooling from one or more
feature maps.
"""
def __init__(
self,
output_size,
scales,
sampling_ratio,
pooler_type,
canonical_box_size=224,
canonical_level=4,
):
"""
Args:
output_size (int, tuple[int] or list[int]): output size of the pooled region,
e.g., 14 x 14. If tuple or list is given, the length must be 2.
scales (list[float]): The scale for each low-level pooling op relative to
the input image. For a feature map with stride s relative to the input
image, scale is defined as 1/s. The stride must be power of 2.
When there are multiple scales, they must form a pyramid, i.e. they must be
a monotically decreasing geometric sequence with a factor of 1/2.
sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.
pooler_type (string): Name of the type of pooling operation that should be applied.
For instance, "ROIPool" or "ROIAlignV2".
canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default
is heuristically defined as 224 pixels in the FPN paper (based on ImageNet
pre-training).
canonical_level (int): The feature map level index from which a canonically-sized box
should be placed. The default is defined as level 4 (stride=16) in the FPN paper,
i.e., a box of size 224x224 will be placed on the feature with stride=16.
The box placement for all boxes will be determined from their sizes w.r.t
canonical_box_size. For example, a box whose area is 4x that of a canonical box
should be used to pool features from feature level ``canonical_level+1``.
Note that the actual input feature maps given to this module may not have
sufficiently many levels for the input boxes. If the boxes are too large or too
small for the input feature maps, the closest level will be used.
"""
super().__init__()
if isinstance(output_size, int):
output_size = (output_size, output_size)
assert len(output_size) == 2
assert isinstance(output_size[0], int) and isinstance(output_size[1], int)
self.output_size = output_size
if pooler_type == "ROIAlign":
self.level_poolers = nn.ModuleList(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False
)
for scale in scales
)
elif pooler_type == "ROIAlignV2":
self.level_poolers = nn.ModuleList(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True
)
for scale in scales
)
elif pooler_type == "ROIPool":
self.level_poolers = nn.ModuleList(
RoIPool(output_size, spatial_scale=scale) for scale in scales
)
elif pooler_type == "ROIAlignRotated":
self.level_poolers = nn.ModuleList(
ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio)
for scale in scales
)
else:
raise ValueError("Unknown pooler type: {}".format(pooler_type))
# Map scale (defined as 1 / stride) to its feature map level under the
# assumption that stride is a power of 2.
min_level = -(math.log2(scales[0]))
max_level = -(math.log2(scales[-1]))
assert math.isclose(min_level, int(min_level)) and math.isclose(
max_level, int(max_level)
), "Featuremap stride is not power of 2!"
self.min_level = int(min_level)
self.max_level = int(max_level)
assert (
len(scales) == self.max_level - self.min_level + 1
), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!"
assert 0 <= self.min_level and self.min_level <= self.max_level
self.canonical_level = canonical_level
assert canonical_box_size > 0
self.canonical_box_size = canonical_box_size
def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):
"""
Args:
x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those
used to construct this module.
box_lists (list[Boxes] | list[RotatedBoxes]):
A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
The box coordinates are defined on the original image and
will be scaled by the `scales` argument of :class:`ROIPooler`.
Returns:
Tensor:
A tensor of shape (M, C, output_size, output_size) where M is the total number of
boxes aggregated over all N batch images and C is the number of channels in `x`.
"""
num_level_assignments = len(self.level_poolers)
assert isinstance(x, list) and isinstance(
box_lists, list
), "Arguments to pooler must be lists"
assert (
len(x) == num_level_assignments
), "unequal value, num_level_assignments={}, but x is list of {} Tensors".format(
num_level_assignments, len(x)
)
assert len(box_lists) == x[0].size(
0
), "unequal value, x[0] batch dim 0 is {}, but box_list has length {}".format(
x[0].size(0), len(box_lists)
)
if len(box_lists) == 0:
return torch.zeros(
(0, x[0].shape[1]) + self.output_size, device=x[0].device, dtype=x[0].dtype
)
pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)
if num_level_assignments == 1:
return self.level_poolers[0](x[0], pooler_fmt_boxes)
level_assignments = assign_boxes_to_levels(
box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level
)
num_boxes = pooler_fmt_boxes.size(0)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
output = torch.zeros(
(num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device
)
for level, pooler in enumerate(self.level_poolers):
inds = nonzero_tuple(level_assignments == level)[0]
pooler_fmt_boxes_level = pooler_fmt_boxes[inds]
# Use index_put_ instead of advance indexing, to avoid pytorch/issues/49852
output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))
return output | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/poolers.py | poolers.py |
import torch
from detectron2.layers import nonzero_tuple
__all__ = ["subsample_labels"]
def subsample_labels(
labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int
):
"""
Return `num_samples` (or fewer, if not enough found)
random samples from `labels` which is a mixture of positives & negatives.
It will try to return as many positives as possible without
exceeding `positive_fraction * num_samples`, and then try to
fill the remaining slots with negatives.
Args:
labels (Tensor): (N, ) label vector with values:
* -1: ignore
* bg_label: background ("negative") class
* otherwise: one or more foreground ("positive") classes
num_samples (int): The total number of labels with value >= 0 to return.
Values that are not sampled will be filled with -1 (ignore).
positive_fraction (float): The number of subsampled labels with values > 0
is `min(num_positives, int(positive_fraction * num_samples))`. The number
of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.
In order words, if there are not enough positives, the sample is filled with
negatives. If there are also not enough negatives, then as many elements are
sampled as is possible.
bg_label (int): label index of background ("negative") class.
Returns:
pos_idx, neg_idx (Tensor):
1D vector of indices. The total length of both is `num_samples` or fewer.
"""
positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0]
negative = nonzero_tuple(labels == bg_label)[0]
num_pos = int(num_samples * positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = num_samples - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx = positive[perm1]
neg_idx = negative[perm2]
return pos_idx, neg_idx | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/sampling.py | sampling.py |
import itertools
import logging
import numpy as np
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from detectron2.layers import ShapeSpec
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from .backbone import Backbone
logger = logging.getLogger(__name__)
def _to_container(cfg):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
# "Neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info("Initializing mmdet backbone weights...")
self.backbone.init_weights()
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.array(
[w / input["width"], h / input["height"]] * 2, dtype="float32"
)
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
else:
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape)
inst.pred_boxes = Boxes(bboxes)
inst.scores = scores
inst.pred_classes = labels
if segm_result is not None and len(labels) > 0:
segm_result = list(itertools.chain(*segm_result))
segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]
segm_result = torch.stack(segm_result, dim=0)
inst.pred_masks = segm_result
return inst
# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
if "loss" not in loss_name:
# put metrics to storage; don't return them
storage = get_event_storage()
value = log_vars.pop(loss_name).cpu().item()
storage.put_scalar(loss_name, value)
return log_vars | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/mmdet_wrapper.py | mmdet_wrapper.py |
import torch
from torch.nn import functional as F
from detectron2.structures import Instances, ROIMasks
# perhaps should rename to "resize_instance"
def detector_postprocess(
results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
if isinstance(output_width, torch.Tensor):
# This shape might (but not necessarily) be tensors during tracing.
# Converts integer tensors to float temporaries to ensure true
# division is performed when computing scale_x and scale_y.
output_width_tmp = output_width.float()
output_height_tmp = output_height.float()
new_size = torch.stack([output_height, output_width])
else:
new_size = (output_height, output_width)
output_width_tmp = output_width
output_height_tmp = output_height
scale_x, scale_y = (
output_width_tmp / results.image_size[1],
output_height_tmp / results.image_size[0],
)
results = Instances(new_size, **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
else:
output_boxes = None
assert output_boxes is not None, "Predictions must contain boxes!"
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
if isinstance(results.pred_masks, ROIMasks):
roi_masks = results.pred_masks
else:
# pred_masks is a tensor of shape (N, 1, M, M)
roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
results.pred_masks = roi_masks.to_bitmasks(
results.pred_boxes, output_height, output_width, mask_threshold
).tensor # TODO return ROIMasks/BitMask object in the future
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/postprocessing.py | postprocessing.py |
import collections
import math
from typing import List
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, RotatedBoxes
from detectron2.utils.registry import Registry
ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR")
ANCHOR_GENERATOR_REGISTRY.__doc__ = """
Registry for modules that creates object detection anchors for feature maps.
The registered object will be called with `obj(cfg, input_shape)`.
"""
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
"""
def __init__(self, buffers):
super().__init__()
for i, buffer in enumerate(buffers):
# Use non-persistent buffer so the values are not saved in checkpoint
self.register_buffer(str(i), buffer, persistent=False)
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device):
grid_height, grid_width = size
shifts_x = torch.arange(
offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
return shift_x, shift_y
def _broadcast_params(params, num_features, name):
"""
If one size (or aspect ratio) is specified and there are multiple feature
maps, we "broadcast" anchors of that single size (or aspect ratio)
over all feature maps.
If params is list[float], or list[list[float]] with len(params) == 1, repeat
it num_features time.
Returns:
list[list[float]]: param for each feature
"""
assert isinstance(
params, collections.abc.Sequence
), f"{name} in anchor generator has to be a list! Got {params}."
assert len(params), f"{name} in anchor generator cannot be empty!"
if not isinstance(params[0], collections.abc.Sequence): # params is list[float]
return [params] * num_features
if len(params) == 1:
return list(params) * num_features
assert len(params) == num_features, (
f"Got {name} of length {len(params)} in anchor generator, "
f"but the number of input features is {num_features}!"
)
return params
@ANCHOR_GENERATOR_REGISTRY.register()
class DefaultAnchorGenerator(nn.Module):
"""
Compute anchors in the standard ways described in
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks".
"""
box_dim: torch.jit.Final[int] = 4
"""
the dimension of each anchor box.
"""
@configurable
def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5):
"""
This interface is experimental.
Args:
sizes (list[list[float]] or list[float]):
If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes
(i.e. sqrt of anchor area) to use for the i-th feature map.
If ``sizes`` is list[float], ``sizes`` is used for all feature maps.
Anchor sizes are given in absolute lengths in units of
the input image; they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
strides (list[int]): stride of each input feature.
offset (float): Relative offset between the center of the first anchor and the top-left
corner of the image. Value has to be in [0, 1).
Recommend to use 0.5, which means half stride.
"""
super().__init__()
self.strides = strides
self.num_features = len(self.strides)
sizes = _broadcast_params(sizes, self.num_features, "sizes")
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios")
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios)
self.offset = offset
assert 0.0 <= self.offset < 1.0, self.offset
@classmethod
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
return {
"sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES,
"aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,
"strides": [x.stride for x in input_shape],
"offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,
}
def _calculate_anchors(self, sizes, aspect_ratios):
cell_anchors = [
self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)
]
return BufferList(cell_anchors)
@property
@torch.jit.unused
def num_cell_anchors(self):
"""
Alias of `num_anchors`.
"""
return self.num_anchors
@property
@torch.jit.unused
def num_anchors(self):
"""
Returns:
list[int]: Each int is the number of anchors at every pixel
location, on that feature map.
For example, if at every pixel we use anchors of 3 aspect
ratios and 5 sizes, the number of anchors is 15.
(See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config)
In standard RPN models, `num_anchors` on every feature map is the same.
"""
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def _grid_anchors(self, grid_sizes: List[List[int]]):
"""
Returns:
list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4
"""
anchors = []
# buffers() not supported by torchscript. use named_buffers() instead
buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()]
for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers):
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
return anchors
def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):
"""
Generate a tensor storing canonical anchor boxes, which are all anchor
boxes of different sizes and aspect_ratios centered at (0, 0).
We can later build the set of anchors for a full feature map by
shifting and tiling these tensors (see `meth:_grid_anchors`).
Args:
sizes (tuple[float]):
aspect_ratios (tuple[float]]):
Returns:
Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes
in XYXY format.
"""
# This is different from the anchor generator defined in the original Faster R-CNN
# code or Detectron. They yield the same AP, however the old version defines cell
# anchors in a less natural way with a shift relative to the feature grid and
# quantization that results in slightly different sizes for different aspect ratios.
# See also https://github.com/facebookresearch/Detectron/issues/227
anchors = []
for size in sizes:
area = size ** 2.0
for aspect_ratio in aspect_ratios:
# s * s = w * h
# a = h / w
# ... some algebra ...
# w = sqrt(s * s / a)
# h = a * w
w = math.sqrt(area / aspect_ratio)
h = aspect_ratio * w
x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
anchors.append([x0, y0, x1, y1])
return torch.tensor(anchors)
def forward(self, features: List[torch.Tensor]):
"""
Args:
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
Returns:
list[Boxes]: a list of Boxes containing all the anchors for each feature map
(i.e. the cell anchors repeated over all locations in the feature map).
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
where Hi, Wi are resolution of the feature map divided by anchor stride.
"""
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [Boxes(x) for x in anchors_over_all_feature_maps]
@ANCHOR_GENERATOR_REGISTRY.register()
class RotatedAnchorGenerator(nn.Module):
"""
Compute rotated anchors used by Rotated RPN (RRPN), described in
"Arbitrary-Oriented Scene Text Detection via Rotation Proposals".
"""
box_dim: int = 5
"""
the dimension of each anchor box.
"""
@configurable
def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5):
"""
This interface is experimental.
Args:
sizes (list[list[float]] or list[float]):
If sizes is list[list[float]], sizes[i] is the list of anchor sizes
(i.e. sqrt of anchor area) to use for the i-th feature map.
If sizes is list[float], the sizes are used for all feature maps.
Anchor sizes are given in absolute lengths in units of
the input image; they do not dynamically scale if the input image size changes.
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
strides (list[int]): stride of each input feature.
angles (list[list[float]] or list[float]): list of angles (in degrees CCW)
to use for anchors. Same "broadcast" rule for `sizes` applies.
offset (float): Relative offset between the center of the first anchor and the top-left
corner of the image. Value has to be in [0, 1).
Recommend to use 0.5, which means half stride.
"""
super().__init__()
self.strides = strides
self.num_features = len(self.strides)
sizes = _broadcast_params(sizes, self.num_features, "sizes")
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios")
angles = _broadcast_params(angles, self.num_features, "angles")
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles)
self.offset = offset
assert 0.0 <= self.offset < 1.0, self.offset
@classmethod
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
return {
"sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES,
"aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,
"strides": [x.stride for x in input_shape],
"offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,
"angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES,
}
def _calculate_anchors(self, sizes, aspect_ratios, angles):
cell_anchors = [
self.generate_cell_anchors(size, aspect_ratio, angle).float()
for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles)
]
return BufferList(cell_anchors)
@property
def num_cell_anchors(self):
"""
Alias of `num_anchors`.
"""
return self.num_anchors
@property
def num_anchors(self):
"""
Returns:
list[int]: Each int is the number of anchors at every pixel
location, on that feature map.
For example, if at every pixel we use anchors of 3 aspect
ratios, 2 sizes and 5 angles, the number of anchors is 30.
(See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS
and ANCHOR_GENERATOR.ANGLES in config)
In standard RRPN models, `num_anchors` on every feature map is the same.
"""
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def _grid_anchors(self, grid_sizes):
anchors = []
for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors):
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
zeros = torch.zeros_like(shift_x)
shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1)
anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5))
return anchors
def generate_cell_anchors(
self,
sizes=(32, 64, 128, 256, 512),
aspect_ratios=(0.5, 1, 2),
angles=(-90, -60, -30, 0, 30, 60, 90),
):
"""
Generate a tensor storing canonical anchor boxes, which are all anchor
boxes of different sizes, aspect_ratios, angles centered at (0, 0).
We can later build the set of anchors for a full feature map by
shifting and tiling these tensors (see `meth:_grid_anchors`).
Args:
sizes (tuple[float]):
aspect_ratios (tuple[float]]):
angles (tuple[float]]):
Returns:
Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5)
storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format.
"""
anchors = []
for size in sizes:
area = size ** 2.0
for aspect_ratio in aspect_ratios:
# s * s = w * h
# a = h / w
# ... some algebra ...
# w = sqrt(s * s / a)
# h = a * w
w = math.sqrt(area / aspect_ratio)
h = aspect_ratio * w
anchors.extend([0, 0, w, h, a] for a in angles)
return torch.tensor(anchors)
def forward(self, features):
"""
Args:
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
Returns:
list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map
(i.e. the cell anchors repeated over all locations in the feature map).
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
where Hi, Wi are resolution of the feature map divided by anchor stride.
"""
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
return [RotatedBoxes(x) for x in anchors_over_all_feature_maps]
def build_anchor_generator(cfg, input_shape):
"""
Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.
"""
anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME
return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/anchor_generator.py | anchor_generator.py |
import math
from typing import List, Tuple, Union
import torch
from fvcore.nn import giou_loss, smooth_l1_loss
from torch.nn import functional as F
from detectron2.layers import cat, ciou_loss, diou_loss
from detectron2.structures import Boxes
# Value for clamping large dw and dh predictions. The heuristic is that we clamp
# such that dw and dh are no larger than what would transform a 16px box into a
# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated", "Box2BoxTransformLinear"]
@torch.jit.script
class Box2BoxTransform(object):
"""
The box-to-box transform defined in R-CNN. The transformation is parameterized
by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
"""
def __init__(
self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP
):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_widths = src_boxes[:, 2] - src_boxes[:, 0]
src_heights = src_boxes[:, 3] - src_boxes[:, 1]
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0]
target_heights = target_boxes[:, 3] - target_boxes[:, 1]
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
deltas = deltas.float() # ensure fp32 for decoding precision
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
x1 = pred_ctr_x - 0.5 * pred_w
y1 = pred_ctr_y - 0.5 * pred_h
x2 = pred_ctr_x + 0.5 * pred_w
y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)
return pred_boxes.reshape(deltas.shape)
@torch.jit.script
class Box2BoxTransformRotated(object):
"""
The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized
by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height
by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height),
and rotate a box's angle by da (radians).
Note: angles of deltas are in radians while angles of boxes are in degrees.
"""
def __init__(
self,
weights: Tuple[float, float, float, float, float],
scale_clamp: float = _DEFAULT_SCALE_CLAMP,
):
"""
Args:
weights (5-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh, da) deltas. These are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): Nx5 source boxes, e.g., object proposals
target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1)
target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind(
target_boxes, dim=1
)
wx, wy, ww, wh, wa = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
# Angles of deltas are in radians while angles of boxes are in degrees.
# the conversion to radians serve as a way to normalize the values
da = target_angles - src_angles
da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
da *= wa * math.pi / 180.0
deltas = torch.stack((dx, dy, dw, dh, da), dim=1)
assert (
(src_widths > 0).all().item()
), "Input boxes to Box2BoxTransformRotated are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*5).
deltas[i] represents box transformation for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 5)
"""
assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5
boxes = boxes.to(deltas.dtype).unsqueeze(2)
ctr_x = boxes[:, 0]
ctr_y = boxes[:, 1]
widths = boxes[:, 2]
heights = boxes[:, 3]
angles = boxes[:, 4]
wx, wy, ww, wh, wa = self.weights
dx = deltas[:, 0::5] / wx
dy = deltas[:, 1::5] / wy
dw = deltas[:, 2::5] / ww
dh = deltas[:, 3::5] / wh
da = deltas[:, 4::5] / wa
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr
pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr
pred_boxes[:, 2::5] = torch.exp(dw) * widths # width
pred_boxes[:, 3::5] = torch.exp(dh) * heights # height
# Following original RRPN implementation,
# angles of deltas are in radians while angles of boxes are in degrees.
pred_angle = da * 180.0 / math.pi + angles
pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
pred_boxes[:, 4::5] = pred_angle
return pred_boxes
class Box2BoxTransformLinear(object):
"""
The linear box-to-box transform defined in FCOS. The transformation is parameterized
by the distance from the center of (square) src box to 4 edges of the target box.
"""
def __init__(self, normalize_by_size=True):
"""
Args:
normalize_by_size: normalize deltas by the size of src (anchor) boxes.
"""
self.normalize_by_size = normalize_by_size
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx1, dy1, dx2, dy2) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true.
The center of src must be inside target boxes.
Args:
src_boxes (Tensor): square source boxes, e.g., anchors
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
src_ctr_x = 0.5 * (src_boxes[:, 0] + src_boxes[:, 2])
src_ctr_y = 0.5 * (src_boxes[:, 1] + src_boxes[:, 3])
target_l = src_ctr_x - target_boxes[:, 0]
target_t = src_ctr_y - target_boxes[:, 1]
target_r = target_boxes[:, 2] - src_ctr_x
target_b = target_boxes[:, 3] - src_ctr_y
deltas = torch.stack((target_l, target_t, target_r, target_b), dim=1)
if self.normalize_by_size:
stride_w = src_boxes[:, 2] - src_boxes[:, 0]
stride_h = src_boxes[:, 3] - src_boxes[:, 1]
strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)
deltas = deltas / strides
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx1, dy1, dx2, dy2) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
# Ensure the output is a valid box. See Sec 2.1 of https://arxiv.org/abs/2006.09214
deltas = F.relu(deltas)
boxes = boxes.to(deltas.dtype)
ctr_x = 0.5 * (boxes[:, 0] + boxes[:, 2])
ctr_y = 0.5 * (boxes[:, 1] + boxes[:, 3])
if self.normalize_by_size:
stride_w = boxes[:, 2] - boxes[:, 0]
stride_h = boxes[:, 3] - boxes[:, 1]
strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)
deltas = deltas * strides
l = deltas[:, 0::4]
t = deltas[:, 1::4]
r = deltas[:, 2::4]
b = deltas[:, 3::4]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = ctr_x[:, None] - l # x1
pred_boxes[:, 1::4] = ctr_y[:, None] - t # y1
pred_boxes[:, 2::4] = ctr_x[:, None] + r # x2
pred_boxes[:, 3::4] = ctr_y[:, None] + b # y2
return pred_boxes
def _dense_box_regression_loss(
anchors: List[Union[Boxes, torch.Tensor]],
box2box_transform: Box2BoxTransform,
pred_anchor_deltas: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
fg_mask: torch.Tensor,
box_reg_loss_type="smooth_l1",
smooth_l1_beta=0.0,
):
"""
Compute loss for dense multi-level box regression.
Loss is accumulated over ``fg_mask``.
Args:
anchors: #lvl anchor boxes, each is (HixWixA, 4)
pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)
gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))
fg_mask: the foreground boolean mask of shape (N, R) to compute loss on
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou",
"diou", "ciou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
if isinstance(anchors[0], Boxes):
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
else:
anchors = cat(anchors)
if box_reg_loss_type == "smooth_l1":
gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
loss_box_reg = smooth_l1_loss(
cat(pred_anchor_deltas, dim=1)[fg_mask],
gt_anchor_deltas[fg_mask],
beta=smooth_l1_beta,
reduction="sum",
)
elif box_reg_loss_type == "giou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = giou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
elif box_reg_loss_type == "diou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = diou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
elif box_reg_loss_type == "ciou":
pred_boxes = [
box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
]
loss_box_reg = ciou_loss(
torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
)
else:
raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'")
return loss_box_reg | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/box_regression.py | box_regression.py |
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"]
class FPN(Backbone):
"""
This module implements :paper:`FPN`.
It creates pyramid features built on top of some input feature maps.
"""
_fuse_type: torch.jit.Final[str]
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
assert in_features, in_features
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
strides = [input_shapes[f].stride for f in in_features]
in_channels_per_feature = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(in_channels_per_feature):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(
in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(strides[idx]))
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
self.in_features = tuple(in_features)
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
bottom_up_features = self.bottom_up(x)
results = []
prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]])
results.append(self.output_convs[0](prev_features))
# Reverse feature maps into top-down order (from low to high resolution)
for idx, (lateral_conv, output_conv) in enumerate(
zip(self.lateral_convs, self.output_convs)
):
# Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336
# Therefore we loop over all modules but skip the first one
if idx > 0:
features = self.in_features[-idx - 1]
features = bottom_up_features[features]
top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
if self.top_block.in_feature in bottom_up_features:
top_block_in_feature = bottom_up_features[self.top_block.in_feature]
else:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
return {f: res for f, res in zip(self._out_features, results)}
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p5"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels, in_feature="res5"):
super().__init__()
self.num_levels = 2
self.in_feature = in_feature
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/backbone/fpn.py | fpn.py |
import numpy as np
from torch import nn
from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm
from .backbone import Backbone
__all__ = [
"AnyNet",
"RegNet",
"ResStem",
"SimpleStem",
"VanillaBlock",
"ResBasicBlock",
"ResBottleneckBlock",
]
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Helper for building a conv2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, g, b = stride, (k - 1) // 2, groups, bias
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
def gap2d():
"""Helper for building a global average pooling layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def pool2d(k, *, stride=1):
"""Helper for building a pool2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
class ResStem(CNNBlockBase):
"""ResNet stem for ImageNet: 7x7, BN, AF, MaxPool."""
def __init__(self, w_in, w_out, norm, activation_class):
super().__init__(w_in, w_out, 4)
self.conv = conv2d(w_in, w_out, 7, stride=2)
self.bn = get_norm(norm, w_out)
self.af = activation_class()
self.pool = pool2d(3, stride=2)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SimpleStem(CNNBlockBase):
"""Simple stem for ImageNet: 3x3, BN, AF."""
def __init__(self, w_in, w_out, norm, activation_class):
super().__init__(w_in, w_out, 2)
self.conv = conv2d(w_in, w_out, 3, stride=2)
self.bn = get_norm(norm, w_out)
self.af = activation_class()
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid."""
def __init__(self, w_in, w_se, activation_class):
super().__init__()
self.avg_pool = gap2d()
self.f_ex = nn.Sequential(
conv2d(w_in, w_se, 1, bias=True),
activation_class(),
conv2d(w_se, w_in, 1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class VanillaBlock(CNNBlockBase):
"""Vanilla block: [3x3 conv, BN, Relu] x2."""
def __init__(self, w_in, w_out, stride, norm, activation_class, _params):
super().__init__(w_in, w_out, stride)
self.a = conv2d(w_in, w_out, 3, stride=stride)
self.a_bn = get_norm(norm, w_out)
self.a_af = activation_class()
self.b = conv2d(w_out, w_out, 3)
self.b_bn = get_norm(norm, w_out)
self.b_af = activation_class()
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class BasicTransform(nn.Module):
"""Basic transformation: [3x3 conv, BN, Relu] x2."""
def __init__(self, w_in, w_out, stride, norm, activation_class, _params):
super().__init__()
self.a = conv2d(w_in, w_out, 3, stride=stride)
self.a_bn = get_norm(norm, w_out)
self.a_af = activation_class()
self.b = conv2d(w_out, w_out, 3)
self.b_bn = get_norm(norm, w_out)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBasicBlock(CNNBlockBase):
"""Residual basic block: x + f(x), f = basic transform."""
def __init__(self, w_in, w_out, stride, norm, activation_class, params):
super().__init__(w_in, w_out, stride)
self.proj, self.bn = None, None
if (w_in != w_out) or (stride != 1):
self.proj = conv2d(w_in, w_out, 1, stride=stride)
self.bn = get_norm(norm, w_out)
self.f = BasicTransform(w_in, w_out, stride, norm, activation_class, params)
self.af = activation_class()
def forward(self, x):
x_p = self.bn(self.proj(x)) if self.proj else x
return self.af(x_p + self.f(x))
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
def __init__(self, w_in, w_out, stride, norm, activation_class, params):
super().__init__()
w_b = int(round(w_out * params["bot_mul"]))
w_se = int(round(w_in * params["se_r"]))
groups = w_b // params["group_w"]
self.a = conv2d(w_in, w_b, 1)
self.a_bn = get_norm(norm, w_b)
self.a_af = activation_class()
self.b = conv2d(w_b, w_b, 3, stride=stride, groups=groups)
self.b_bn = get_norm(norm, w_b)
self.b_af = activation_class()
self.se = SE(w_b, w_se, activation_class) if w_se else None
self.c = conv2d(w_b, w_out, 1)
self.c_bn = get_norm(norm, w_out)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBottleneckBlock(CNNBlockBase):
"""Residual bottleneck block: x + f(x), f = bottleneck transform."""
def __init__(self, w_in, w_out, stride, norm, activation_class, params):
super().__init__(w_in, w_out, stride)
self.proj, self.bn = None, None
if (w_in != w_out) or (stride != 1):
self.proj = conv2d(w_in, w_out, 1, stride=stride)
self.bn = get_norm(norm, w_out)
self.f = BottleneckTransform(w_in, w_out, stride, norm, activation_class, params)
self.af = activation_class()
def forward(self, x):
x_p = self.bn(self.proj(x)) if self.proj else x
return self.af(x_p + self.f(x))
class AnyStage(nn.Module):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params):
super().__init__()
for i in range(d):
block = block_class(w_in, w_out, stride, norm, activation_class, params)
self.add_module("b{}".format(i + 1), block)
stride, w_in = 1, w_out
def forward(self, x):
for block in self.children():
x = block(x)
return x
class AnyNet(Backbone):
"""AnyNet model. See :paper:`dds`."""
def __init__(
self,
*,
stem_class,
stem_width,
block_class,
depths,
widths,
group_widths,
strides,
bottleneck_ratios,
se_ratio,
activation_class,
freeze_at=0,
norm="BN",
out_features=None,
):
"""
Args:
stem_class (callable): A callable taking 4 arguments (channels in, channels out,
normalization, callable returning an activation function) that returns another
callable implementing the stem module.
stem_width (int): The number of output channels that the stem produces.
block_class (callable): A callable taking 6 arguments (channels in, channels out,
stride, normalization, callable returning an activation function, a dict of
block-specific parameters) that returns another callable implementing the repeated
block module.
depths (list[int]): Number of blocks in each stage.
widths (list[int]): For each stage, the number of output channels of each block.
group_widths (list[int]): For each stage, the number of channels per group in group
convolution, if the block uses group convolution.
strides (list[int]): The stride that each network stage applies to its input.
bottleneck_ratios (list[float]): For each stage, the ratio of the number of bottleneck
channels to the number of block input channels (or, equivalently, output channels),
if the block uses a bottleneck.
se_ratio (float): The ratio of the number of channels used inside the squeeze-excitation
(SE) module to it number of input channels, if SE the block uses SE.
activation_class (callable): A callable taking no arguments that returns another
callable implementing an activation function.
freeze_at (int): The number of stages at the beginning to freeze.
see :meth:`freeze` for detailed explanation.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. RegNet's use "stem" and "s1", "s2", etc for the stages after
the stem. If None, will return the output of the last layer.
"""
super().__init__()
self.stem = stem_class(3, stem_width, norm, activation_class)
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
prev_w = stem_width
for i, (d, w, s, b, g) in enumerate(
zip(depths, widths, strides, bottleneck_ratios, group_widths)
):
params = {"bot_mul": b, "group_w": g, "se_r": se_ratio}
stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params)
name = "s{}".format(i + 1)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in stage.children()])
)
self._out_feature_channels[name] = list(stage.children())[-1].out_channels
prev_w = w
self.apply(init_weights)
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {} does not include {}".format(
", ".join(children), out_feature
)
self.freeze(freeze_at)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"Model takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def freeze(self, freeze_at=0):
"""
Freeze the first several stages of the model. Commonly used in fine-tuning.
Layers that produce the same feature map spatial size are defined as one
"stage" by :paper:`FPN`.
Args:
freeze_at (int): number of stages to freeze.
`1` means freezing the stem. `2` means freezing the stem and
one residual stage, etc.
Returns:
nn.Module: this model itself
"""
if freeze_at >= 1:
self.stem.freeze()
for idx, (stage, _) in enumerate(self.stages_and_names, start=2):
if freeze_at >= idx:
for block in stage.children():
block.freeze()
return self
def adjust_block_compatibility(ws, bs, gs):
"""Adjusts the compatibility of widths, bottlenecks, and groups."""
assert len(ws) == len(bs) == len(gs)
assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs))
vs = [int(max(1, w * b)) for w, b in zip(ws, bs)]
gs = [int(min(g, v)) for g, v in zip(gs, vs)]
ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)]
vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)]
ws = [int(v / b) for v, b in zip(vs, bs)]
assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs))
return ws, bs, gs
def generate_regnet_parameters(w_a, w_0, w_m, d, q=8):
"""Generates per stage widths and depths from RegNet parameters."""
assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0
# Generate continuous per-block ws
ws_cont = np.arange(d) * w_a + w_0
# Generate quantized per-block ws
ks = np.round(np.log(ws_cont / w_0) / np.log(w_m))
ws_all = w_0 * np.power(w_m, ks)
ws_all = np.round(np.divide(ws_all, q)).astype(int) * q
# Generate per stage ws and ds (assumes ws_all are sorted)
ws, ds = np.unique(ws_all, return_counts=True)
# Compute number of actual stages and total possible stages
num_stages, total_stages = len(ws), ks.max() + 1
# Convert numpy arrays to lists and return
ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont))
return ws, ds, num_stages, total_stages, ws_all, ws_cont
class RegNet(AnyNet):
"""RegNet model. See :paper:`dds`."""
def __init__(
self,
*,
stem_class,
stem_width,
block_class,
depth,
w_a,
w_0,
w_m,
group_width,
stride=2,
bottleneck_ratio=1.0,
se_ratio=0.0,
activation_class=None,
freeze_at=0,
norm="BN",
out_features=None,
):
"""
Build a RegNet from the parameterization described in :paper:`dds` Section 3.3.
Args:
See :class:`AnyNet` for arguments that are not listed here.
depth (int): Total number of blocks in the RegNet.
w_a (float): Factor by which block width would increase prior to quantizing block widths
by stage. See :paper:`dds` Section 3.3.
w_0 (int): Initial block width. See :paper:`dds` Section 3.3.
w_m (float): Parameter controlling block width quantization.
See :paper:`dds` Section 3.3.
group_width (int): Number of channels per group in group convolution, if the block uses
group convolution.
bottleneck_ratio (float): The ratio of the number of bottleneck channels to the number
of block input channels (or, equivalently, output channels), if the block uses a
bottleneck.
stride (int): The stride that each network stage applies to its input.
"""
ws, ds = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2]
ss = [stride for _ in ws]
bs = [bottleneck_ratio for _ in ws]
gs = [group_width for _ in ws]
ws, bs, gs = adjust_block_compatibility(ws, bs, gs)
def default_activation_class():
return nn.ReLU(inplace=True)
super().__init__(
stem_class=stem_class,
stem_width=stem_width,
block_class=block_class,
depths=ds,
widths=ws,
strides=ss,
group_widths=gs,
bottleneck_ratios=bs,
se_ratio=se_ratio,
activation_class=default_activation_class
if activation_class is None
else activation_class,
freeze_at=freeze_at,
norm=norm,
out_features=out_features,
) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/backbone/regnet.py | regnet.py |
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import (
CNNBlockBase,
Conv2d,
DeformConv,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
__all__ = [
"ResNetBlockBase",
"BasicBlock",
"BottleneckBlock",
"DeformBottleneckBlock",
"BasicStem",
"ResNet",
"make_stage",
"build_resnet_backbone",
]
class BasicBlock(CNNBlockBase):
"""
The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`,
with two 3x3 conv layers and a projection shortcut if needed.
"""
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int): Stride for the first conv.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block used by ResNet-50, 101 and 152
defined in :paper:`ResNet`. It contains 3 conv layers with kernels
1x1, 3x3, 1x1, and a projection shortcut if needed.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
num_groups (int): number of groups for the 3x3 conv layer.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
stride_in_1x1 (bool): when stride>1, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
dilation (int): the dilation rate of the 3x3 conv layer.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class DeformBottleneckBlock(CNNBlockBase):
"""
Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>`
in the 3x3 convolution.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
):
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
self.conv2_offset = Conv2d(
bottleneck_channels,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
dilation=dilation,
)
self.conv2 = deform_conv_op(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
nn.init.constant_(self.conv2_offset.weight, 0)
nn.init.constant_(self.conv2_offset.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
if self.deform_modulated:
offset_mask = self.conv2_offset(out)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BasicStem(CNNBlockBase):
"""
The standard ResNet stem (layers before the first residual block),
with a conv, relu and max_pool.
"""
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class ResNet(Backbone):
"""
Implement :paper:`ResNet`.
"""
def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[CNNBlockBase]]): several (typically 4) stages,
each contains multiple :class:`CNNBlockBase`.
num_classes (None or int): if None, will not perform classification.
Otherwise, will create a linear layer.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
freeze_at (int): The number of stages at the beginning to freeze.
see :meth:`freeze` for detailed explanation.
"""
super().__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stage_names, self.stages = [], []
if out_features is not None:
# Avoid keeping unused layers in this module. They consume extra memory
# and may cause allreduce to fail
num_stages = max(
[{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features]
)
stages = stages[:num_stages]
for i, blocks in enumerate(stages):
assert len(blocks) > 0, len(blocks)
for block in blocks:
assert isinstance(block, CNNBlockBase), block
name = "res" + str(i + 2)
stage = nn.Sequential(*blocks)
self.add_module(name, stage)
self.stage_names.append(name)
self.stages.append(stage)
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels
self.stage_names = tuple(self.stage_names) # Make it static for scripting
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
self.freeze(freeze_at)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for name, stage in zip(self.stage_names, self.stages):
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def freeze(self, freeze_at=0):
"""
Freeze the first several stages of the ResNet. Commonly used in
fine-tuning.
Layers that produce the same feature map spatial size are defined as one
"stage" by :paper:`FPN`.
Args:
freeze_at (int): number of stages to freeze.
`1` means freezing the stem. `2` means freezing the stem and
one residual stage, etc.
Returns:
nn.Module: this ResNet itself
"""
if freeze_at >= 1:
self.stem.freeze()
for idx, stage in enumerate(self.stages, start=2):
if freeze_at >= idx:
for block in stage.children():
block.freeze()
return self
@staticmethod
def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):
"""
Create a list of blocks of the same type that forms one ResNet stage.
Args:
block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this
stage. A module of this type must not change spatial resolution of inputs unless its
stride != 1.
num_blocks (int): number of blocks in this stage
in_channels (int): input channels of the entire stage.
out_channels (int): output channels of **every block** in the stage.
kwargs: other arguments passed to the constructor of
`block_class`. If the argument name is "xx_per_block", the
argument is a list of values to be passed to each block in the
stage. Otherwise, the same argument is passed to every block
in the stage.
Returns:
list[CNNBlockBase]: a list of block module.
Examples:
::
stage = ResNet.make_stage(
BottleneckBlock, 3, in_channels=16, out_channels=64,
bottleneck_channels=16, num_groups=1,
stride_per_block=[2, 1, 1],
dilations_per_block=[1, 1, 2]
)
Usually, layers that produce the same feature map spatial size are defined as one
"stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should
all be 1.
"""
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the "
f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(
block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)
)
in_channels = out_channels
return blocks
@staticmethod
def make_default_stages(depth, block_class=None, **kwargs):
"""
Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).
If it doesn't create the ResNet variant you need, please use :meth:`make_stage`
instead for fine-grained customization.
Args:
depth (int): depth of ResNet
block_class (type): the CNN block class. Has to accept
`bottleneck_channels` argument for depth > 50.
By default it is BasicBlock or BottleneckBlock, based on the
depth.
kwargs:
other arguments to pass to `make_stage`. Should not contain
stride and channels, as they are predefined for each depth.
Returns:
list[list[CNNBlockBase]]: modules in all stages; see arguments of
:class:`ResNet.__init__`.
"""
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if block_class is None:
block_class = BasicBlock if depth < 50 else BottleneckBlock
if depth < 50:
in_channels = [64, 64, 128, 256]
out_channels = [64, 128, 256, 512]
else:
in_channels = [64, 256, 512, 1024]
out_channels = [256, 512, 1024, 2048]
ret = []
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
if depth >= 50:
kwargs["bottleneck_channels"] = o // 4
ret.append(
ResNet.make_stage(
block_class=block_class,
num_blocks=n,
stride_per_block=[s] + [1] * (n - 1),
in_channels=i,
out_channels=o,
**kwargs,
)
)
return ret
ResNetBlockBase = CNNBlockBase
"""
Alias for backward compatibiltiy.
"""
def make_stage(*args, **kwargs):
"""
Deprecated alias for backward compatibiltiy.
"""
return ResNet.make_stage(*args, **kwargs)
@BACKBONE_REGISTRY.register()
def build_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
# fmt: off
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if depth in [18, 34]:
assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34"
assert not any(
deform_on_per_stage
), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34"
assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34"
assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34"
stages = []
for idx, stage_idx in enumerate(range(2, 6)):
# res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
}
# Use BasicBlock for R18 and R34.
if depth in [18, 34]:
stage_kargs["block_class"] = BasicBlock
else:
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/backbone/resnet.py | resnet.py |
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from ..backbone import Backbone, build_backbone
from ..postprocessing import detector_postprocess
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator is not None:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: List[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, same as in :meth:`forward`.
Otherwise, a list[Instances] containing raw network outputs.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator is not None:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/meta_arch/rcnn.py | rcnn.py |
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor, nn
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.modeling import Backbone
from detectron2.structures import Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from ..postprocessing import detector_postprocess
def permute_to_N_HWA_K(tensor, K: int):
"""
Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K)
"""
assert tensor.dim() == 4, tensor.shape
N, _, H, W = tensor.shape
tensor = tensor.view(N, -1, K, H, W)
tensor = tensor.permute(0, 3, 4, 1, 2)
tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)
return tensor
class DenseDetector(nn.Module):
"""
Base class for dense detector. We define a dense detector as a fully-convolutional model that
makes per-pixel (i.e. dense) predictions.
"""
def __init__(
self,
backbone: Backbone,
head: nn.Module,
head_in_features: Optional[List[str]] = None,
*,
pixel_mean,
pixel_std,
):
"""
Args:
backbone: backbone module
head: head module
head_in_features: backbone features to use in head. Default to all backbone features.
pixel_mean (Tuple[float]):
Values to be used for image normalization (BGR order).
To train on images of different number of channels, set different mean & std.
Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
pixel_std (Tuple[float]):
When using pre-trained models in Detectron1 or any MSRA models,
std has been absorbed into its conv1 weights, so the std needs to be set 1.
Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
"""
super().__init__()
self.backbone = backbone
self.head = head
if head_in_features is None:
shapes = self.backbone.output_shape()
self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride)
else:
self.head_in_features = head_in_features
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs: List[Dict[str, Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the
loss. Used during training only. In inference, the standard output format, described
in :doc:`/tutorials/models`.
"""
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
features = [features[f] for f in self.head_in_features]
predictions = self.head(features)
if self.training:
assert not torch.jit.is_scripting(), "Not supported"
assert "instances" in batched_inputs[0], "Instance annotations are missing in training!"
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
return self.forward_training(images, features, predictions, gt_instances)
else:
results = self.forward_inference(images, features, predictions)
if torch.jit.is_scripting():
return results
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def forward_training(self, images, features, predictions, gt_instances):
raise NotImplementedError()
def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
def _transpose_dense_predictions(
self, predictions: List[List[Tensor]], dims_per_anchor: List[int]
) -> List[List[Tensor]]:
"""
Transpose the dense per-level predictions.
Args:
predictions: a list of outputs, each is a list of per-level
predictions with shape (N, Ai x K, Hi, Wi), where N is the
number of images, Ai is the number of anchors per location on
level i, K is the dimension of predictions per anchor.
dims_per_anchor: the value of K for each predictions. e.g. 4 for
box prediction, #classes for classification prediction.
Returns:
List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K).
"""
assert len(predictions) == len(dims_per_anchor)
res: List[List[Tensor]] = []
for pred, dim_per_anchor in zip(predictions, dims_per_anchor):
pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred]
res.append(pred)
return res
def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9):
"""
Apply EMA update to `self.name` using `value`.
This is mainly used for loss normalizer. In Detectron1, loss is normalized by number
of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a
large variance and using it lead to lower performance. Therefore we maintain an EMA of
#foreground to stabilize the normalizer.
Args:
name: name of the normalizer
value: the new value to update
initial_value: the initial value to start with
momentum: momentum of EMA
Returns:
float: the updated EMA value
"""
if hasattr(self, name):
old = getattr(self, name)
else:
old = initial_value
new = old * momentum + value * (1 - momentum)
setattr(self, name, new)
return new
def _decode_per_level_predictions(
self,
anchors: Boxes,
pred_scores: Tensor,
pred_deltas: Tensor,
score_thresh: float,
topk_candidates: int,
image_size: Tuple[int, int],
) -> Instances:
"""
Decode boxes and classification predictions of one featuer level, by
the following steps:
1. filter the predictions based on score threshold and top K scores.
2. transform the box regression outputs
3. return the predicted scores, classes and boxes
Args:
anchors: Boxes, anchor for this feature level
pred_scores: HxWxA,K
pred_deltas: HxWxA,4
Returns:
Instances: with field "scores", "pred_boxes", "pred_classes".
"""
# Apply two filtering to make NMS faster.
# 1. Keep boxes with confidence score higher than threshold
keep_idxs = pred_scores > score_thresh
pred_scores = pred_scores[keep_idxs]
topk_idxs = torch.nonzero(keep_idxs) # Kx2
# 2. Keep top k top scoring boxes only
num_topk = min(topk_candidates, topk_idxs.size(0))
pred_scores, idxs = pred_scores.topk(num_topk)
topk_idxs = topk_idxs[idxs]
anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1)
pred_boxes = self.box2box_transform.apply_deltas(
pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs]
)
return Instances(
image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs
)
def _decode_multi_level_predictions(
self,
anchors: List[Boxes],
pred_scores: List[Tensor],
pred_deltas: List[Tensor],
score_thresh: float,
topk_candidates: int,
image_size: Tuple[int, int],
) -> Instances:
"""
Run `_decode_per_level_predictions` for all feature levels and concat the results.
"""
predictions = [
self._decode_per_level_predictions(
anchors_i,
box_cls_i,
box_reg_i,
self.test_score_thresh,
self.test_topk_candidates,
image_size,
)
# Iterate over every feature level
for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors)
]
return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is
def visualize_training(self, batched_inputs, results):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements returned by forward_inference().
"""
from detectron2.utils.visualizer import Visualizer
assert len(batched_inputs) == len(
results
), "Cannot visualize inputs and results of different sizes"
storage = get_event_storage()
max_boxes = 20
image_index = 0 # only visualize a single image
img = batched_inputs[image_index]["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes)
anno_img = v_gt.get_image()
processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])
predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results"
storage.put_image(vis_name, vis_img) | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/meta_arch/dense_detector.py | dense_detector.py |
import numpy as np
from typing import Callable, Dict, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.structures import ImageList
from detectron2.utils.registry import Registry
from ..backbone import Backbone, build_backbone
from ..postprocessing import sem_seg_postprocess
from .build import META_ARCH_REGISTRY
__all__ = [
"SemanticSegmentor",
"SEM_SEG_HEADS_REGISTRY",
"SemSegFPNHead",
"build_sem_seg_head",
]
SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS")
SEM_SEG_HEADS_REGISTRY.__doc__ = """
Registry for semantic segmentation heads, which make semantic segmentation predictions
from feature maps.
"""
@META_ARCH_REGISTRY.register()
class SemanticSegmentor(nn.Module):
"""
Main class for semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "sem_seg": semantic segmentation ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "sem_seg" whose value is a
Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "sem_seg" in batched_inputs[0]:
targets = [x["sem_seg"].to(self.device) for x in batched_inputs]
targets = ImageList.from_tensors(
targets, self.backbone.size_divisibility, self.sem_seg_head.ignore_value
).tensor
else:
targets = None
results, losses = self.sem_seg_head(features, targets)
if self.training:
return losses
processed_results = []
for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = sem_seg_postprocess(result, image_size, height, width)
processed_results.append({"sem_seg": r})
return processed_results
def build_sem_seg_head(cfg, input_shape):
"""
Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.NAME
return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
@SEM_SEG_HEADS_REGISTRY.register()
class SemSegFPNHead(nn.Module):
"""
A semantic segmentation head described in :paper:`PanopticFPN`.
It takes a list of FPN features as input, and applies a sequence of
3x3 convs and upsampling to scale all of them to the stride defined by
``common_stride``. Then these features are added and used to make final
predictions by another 1x1 conv layer.
"""
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
conv_dims: int,
common_stride: int,
loss_weight: float = 1.0,
norm: Optional[Union[str, Callable]] = None,
ignore_value: int = -1,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
conv_dims: number of output channels for the intermediate conv layers.
common_stride: the common stride that all features will be upscaled to
loss_weight: loss weight
norm (str or callable): normalization for all conv layers
ignore_value: category id to be ignored during training.
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
if not len(input_shape):
raise ValueError("SemSegFPNHead(input_shape=) cannot be empty!")
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = common_stride
self.loss_weight = loss_weight
self.scale_heads = []
for in_feature, stride, channels in zip(
self.in_features, feature_strides, feature_channels
):
head_ops = []
head_length = max(1, int(np.log2(stride) - np.log2(self.common_stride)))
for k in range(head_length):
norm_module = get_norm(norm, conv_dims)
conv = Conv2d(
channels if k == 0 else conv_dims,
conv_dims,
kernel_size=3,
stride=1,
padding=1,
bias=not norm,
norm=norm_module,
activation=F.relu,
)
weight_init.c2_msra_fill(conv)
head_ops.append(conv)
if stride != self.common_stride:
head_ops.append(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
)
self.scale_heads.append(nn.Sequential(*head_ops))
self.add_module(in_feature, self.scale_heads[-1])
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
weight_init.c2_msra_fill(self.predictor)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"conv_dims": cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM,
"common_stride": cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE,
"norm": cfg.MODEL.SEM_SEG_HEAD.NORM,
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
}
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x = self.layers(features)
if self.training:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def layers(self, features):
for i, f in enumerate(self.in_features):
if i == 0:
x = self.scale_heads[i](features[f])
else:
x = x + self.scale_heads[i](features[f])
x = self.predictor(x)
return x
def losses(self, predictions, targets):
predictions = predictions.float() # https://github.com/pytorch/pytorch/issues/48163
predictions = F.interpolate(
predictions,
scale_factor=self.common_stride,
mode="bilinear",
align_corners=False,
)
loss = F.cross_entropy(
predictions, targets, reduction="mean", ignore_index=self.ignore_value
)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/meta_arch/semantic_seg.py | semantic_seg.py |
import logging
from typing import List, Optional, Tuple
import torch
from fvcore.nn import sigmoid_focal_loss_jit
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, batched_nms
from detectron2.modeling.matcher import Matcher
from detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance
from detectron2.utils.events import get_event_storage
from ..anchor_generator import DefaultAnchorGenerator
from ..backbone import Backbone
from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss
from .dense_detector import DenseDetector
from .retinanet import RetinaNetHead
__all__ = ["FCOS"]
logger = logging.getLogger(__name__)
class FCOS(DenseDetector):
"""
Implement FCOS in :paper:`fcos`.
"""
def __init__(
self,
*,
backbone: Backbone,
head: nn.Module,
head_in_features: Optional[List[str]] = None,
box2box_transform=None,
num_classes,
center_sampling_radius: float = 1.5,
focal_loss_alpha=0.25,
focal_loss_gamma=2.0,
test_score_thresh=0.2,
test_topk_candidates=1000,
test_nms_thresh=0.6,
max_detections_per_image=100,
pixel_mean,
pixel_std,
):
"""
Args:
center_sampling_radius: radius of the "center" of a groundtruth box,
within which all anchor points are labeled positive.
Other arguments mean the same as in :class:`RetinaNet`.
"""
super().__init__(
backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std
)
self.num_classes = num_classes
# FCOS uses one anchor point per location.
# We represent the anchor point by a box whose size equals the anchor stride.
feature_shapes = backbone.output_shape()
fpn_strides = [feature_shapes[k].stride for k in self.head_in_features]
self.anchor_generator = DefaultAnchorGenerator(
sizes=[[k] for k in fpn_strides], aspect_ratios=[1.0], strides=fpn_strides
)
# FCOS parameterizes box regression by a linear transform,
# where predictions are normalized by anchor stride (equal to anchor size).
if box2box_transform is None:
box2box_transform = Box2BoxTransformLinear(normalize_by_size=True)
self.box2box_transform = box2box_transform
self.center_sampling_radius = float(center_sampling_radius)
# Loss parameters:
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
# Inference parameters:
self.test_score_thresh = test_score_thresh
self.test_topk_candidates = test_topk_candidates
self.test_nms_thresh = test_nms_thresh
self.max_detections_per_image = max_detections_per_image
def forward_training(self, images, features, predictions, gt_instances):
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions(
predictions, [self.num_classes, 4, 1]
)
anchors = self.anchor_generator(features)
gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances)
return self.losses(
anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness
)
@torch.no_grad()
def _match_anchors(self, gt_boxes: Boxes, anchors: List[Boxes]):
"""
Match ground-truth boxes to a set of multi-level anchors.
Args:
gt_boxes: Ground-truth boxes from instances of an image.
anchors: List of anchors for each feature map (of different scales).
Returns:
torch.Tensor
A tensor of shape `(M, R)`, given `M` ground-truth boxes and total
`R` anchor points from all feature levels, indicating the quality
of match between m-th box and r-th anchor. Higher value indicates
better match.
"""
# Naming convention: (M = ground-truth boxes, R = anchor points)
# Anchor points are represented as square boxes of size = stride.
num_anchors_per_level = [len(x) for x in anchors]
anchors = Boxes.cat(anchors) # (R, 4)
anchor_centers = anchors.get_centers() # (R, 2)
anchor_sizes = anchors.tensor[:, 2] - anchors.tensor[:, 0] # (R, )
lower_bound = anchor_sizes * 4
lower_bound[: num_anchors_per_level[0]] = 0
upper_bound = anchor_sizes * 8
upper_bound[-num_anchors_per_level[-1] :] = float("inf")
gt_centers = gt_boxes.get_centers()
# FCOS with center sampling: anchor point must be close enough to
# ground-truth box center.
center_dists = (anchor_centers[None, :, :] - gt_centers[:, None, :]).abs_()
sampling_regions = self.center_sampling_radius * anchor_sizes[None, :]
match_quality_matrix = center_dists.max(dim=2).values < sampling_regions
pairwise_dist = pairwise_point_box_distance(anchor_centers, gt_boxes)
pairwise_dist = pairwise_dist.permute(1, 0, 2) # (M, R, 4)
# The original FCOS anchor matching rule: anchor point must be inside GT.
match_quality_matrix &= pairwise_dist.min(dim=2).values > 0
# Multilevel anchor matching in FCOS: each anchor is only responsible
# for certain scale range.
pairwise_dist = pairwise_dist.max(dim=2).values
match_quality_matrix &= (pairwise_dist > lower_bound[None, :]) & (
pairwise_dist < upper_bound[None, :]
)
# Match the GT box with minimum area, if there are multiple GT matches.
gt_areas = gt_boxes.area() # (M, )
match_quality_matrix = match_quality_matrix.to(torch.float32)
match_quality_matrix *= 1e8 - gt_areas[:, None]
return match_quality_matrix # (M, R)
@torch.no_grad()
def label_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]):
"""
Same interface as :meth:`RetinaNet.label_anchors`, but implemented with FCOS
anchor matching rule.
Unlike RetinaNet, there are no ignored anchors.
"""
gt_labels, matched_gt_boxes = [], []
for inst in gt_instances:
if len(inst) > 0:
match_quality_matrix = self._match_anchors(inst.gt_boxes, anchors)
# Find matched ground-truth box per anchor. Un-matched anchors are
# assigned -1. This is equivalent to using an anchor matcher as used
# in R-CNN/RetinaNet: `Matcher(thresholds=[1e-5], labels=[0, 1])`
match_quality, matched_idxs = match_quality_matrix.max(dim=0)
matched_idxs[match_quality < 1e-5] = -1
matched_gt_boxes_i = inst.gt_boxes.tensor[matched_idxs.clip(min=0)]
gt_labels_i = inst.gt_classes[matched_idxs.clip(min=0)]
# Anchors with matched_idxs = -1 are labeled background.
gt_labels_i[matched_idxs < 0] = self.num_classes
else:
matched_gt_boxes_i = torch.zeros_like(Boxes.cat(anchors).tensor)
gt_labels_i = torch.full(
(len(matched_gt_boxes_i), ),
fill_value=self.num_classes,
dtype=torch.long,
device=matched_gt_boxes_i.device,
)
gt_labels.append(gt_labels_i)
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
def losses(
self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness
):
"""
This method is almost identical to :meth:`RetinaNet.losses`, with an extra
"loss_centerness" in the returned dict.
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (M, R)
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 300)
# classification and regression loss
gt_labels_target = F.one_hot(gt_labels, num_classes=self.num_classes + 1)[
:, :, :-1
] # no loss for the last (background) class
loss_cls = sigmoid_focal_loss_jit(
torch.cat(pred_logits, dim=1),
gt_labels_target.to(pred_logits[0].dtype),
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
loss_box_reg = _dense_box_regression_loss(
anchors,
self.box2box_transform,
pred_anchor_deltas,
gt_boxes,
pos_mask,
box_reg_loss_type="giou",
)
ctrness_targets = self.compute_ctrness_targets(anchors, gt_boxes) # (M, R)
pred_centerness = torch.cat(pred_centerness, dim=1).squeeze(dim=2) # (M, R)
ctrness_loss = F.binary_cross_entropy_with_logits(
pred_centerness[pos_mask], ctrness_targets[pos_mask], reduction="sum"
)
return {
"loss_fcos_cls": loss_cls / normalizer,
"loss_fcos_loc": loss_box_reg / normalizer,
"loss_fcos_ctr": ctrness_loss / normalizer,
}
def compute_ctrness_targets(
self, anchors: List[Boxes], gt_boxes: List[torch.Tensor]
):
anchors = Boxes.cat(anchors).tensor # Rx4
reg_targets = [self.box2box_transform.get_deltas(anchors, m) for m in gt_boxes]
reg_targets = torch.stack(reg_targets, dim=0) # NxRx4
if len(reg_targets) == 0:
return reg_targets.new_zeros(len(reg_targets))
left_right = reg_targets[:, :, [0, 2]]
top_bottom = reg_targets[:, :, [1, 3]]
ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]
)
return torch.sqrt(ctrness)
def forward_inference(
self,
images: ImageList,
features: List[torch.Tensor],
predictions: List[List[torch.Tensor]],
):
pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions(
predictions, [self.num_classes, 4, 1]
)
anchors = self.anchor_generator(features)
results: List[Instances] = []
for img_idx, image_size in enumerate(images.image_sizes):
scores_per_image = [
# Multiply and sqrt centerness & classification scores
# (See eqn. 4 in https://arxiv.org/abs/2006.09214)
torch.sqrt(x[img_idx].sigmoid_() * y[img_idx].sigmoid_())
for x, y in zip(pred_logits, pred_centerness)
]
deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
results_per_image = self.inference_single_image(
anchors, scores_per_image, deltas_per_image, image_size
)
results.append(results_per_image)
return results
def inference_single_image(
self,
anchors: List[Boxes],
box_cls: List[torch.Tensor],
box_delta: List[torch.Tensor],
image_size: Tuple[int, int],
):
"""
Identical to :meth:`RetinaNet.inference_single_image.
"""
pred = self._decode_multi_level_predictions(
anchors,
box_cls,
box_delta,
self.test_score_thresh,
self.test_topk_candidates,
image_size,
)
keep = batched_nms(
pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh
)
return pred[keep[: self.max_detections_per_image]]
class FCOSHead(RetinaNetHead):
"""
The head used in :paper:`fcos`. It adds an additional centerness
prediction branch on top of :class:`RetinaNetHead`.
"""
def __init__(self, *, input_shape: List[ShapeSpec], conv_dims: List[int], **kwargs):
super().__init__(input_shape=input_shape, conv_dims=conv_dims, num_anchors=1, **kwargs)
# Unlike original FCOS, we do not add an additional learnable scale layer
# because it's found to have no benefits after normalizing regression targets by stride.
self._num_features = len(input_shape)
self.ctrness = nn.Conv2d(conv_dims[-1], 1, kernel_size=3, stride=1, padding=1)
torch.nn.init.normal_(self.ctrness.weight, std=0.01)
torch.nn.init.constant_(self.ctrness.bias, 0)
def forward(self, features):
assert len(features) == self._num_features
logits = []
bbox_reg = []
ctrness = []
for feature in features:
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_feature = self.bbox_subnet(feature)
bbox_reg.append(self.bbox_pred(bbox_feature))
ctrness.append(self.ctrness(bbox_feature))
return logits, bbox_reg, ctrness | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/meta_arch/fcos.py | fcos.py |
import logging
import math
from typing import List, Tuple
import torch
from fvcore.nn import sigmoid_focal_loss_jit
from torch import Tensor, nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from ..anchor_generator import build_anchor_generator
from ..backbone import Backbone, build_backbone
from ..box_regression import Box2BoxTransform, _dense_box_regression_loss
from ..matcher import Matcher
from .build import META_ARCH_REGISTRY
from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
__all__ = ["RetinaNet"]
logger = logging.getLogger(__name__)
@META_ARCH_REGISTRY.register()
class RetinaNet(DenseDetector):
"""
Implement RetinaNet in :paper:`RetinaNet`.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
head: nn.Module,
head_in_features,
anchor_generator,
box2box_transform,
anchor_matcher,
num_classes,
focal_loss_alpha=0.25,
focal_loss_gamma=2.0,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
test_score_thresh=0.05,
test_topk_candidates=1000,
test_nms_thresh=0.5,
max_detections_per_image=100,
pixel_mean,
pixel_std,
vis_period=0,
input_format="BGR",
):
"""
NOTE: this interface is experimental.
Args:
backbone: a backbone module, must follow detectron2's backbone interface
head (nn.Module): a module that predicts logits and regression deltas
for each level from a list of per-level features
head_in_features (Tuple[str]): Names of the input feature maps to be used in head
anchor_generator (nn.Module): a module that creates anchors from a
list of features. Usually an instance of :class:`AnchorGenerator`
box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to
instance boxes
anchor_matcher (Matcher): label the anchors by matching them with ground truth.
num_classes (int): number of classes. Used to label background proposals.
# Loss parameters:
focal_loss_alpha (float): focal_loss_alpha
focal_loss_gamma (float): focal_loss_gamma
smooth_l1_beta (float): smooth_l1_beta
box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou"
# Inference parameters:
test_score_thresh (float): Inference cls score threshold, only anchors with
score > INFERENCE_TH are considered for inference (to improve speed)
test_topk_candidates (int): Select topk candidates before NMS
test_nms_thresh (float): Overlap threshold used for non-maximum suppression
(suppress boxes with IoU >= this threshold)
max_detections_per_image (int):
Maximum number of detections to return per image during inference
(100 is based on the limit established for the COCO dataset).
pixel_mean, pixel_std: see :class:`DenseDetector`.
"""
super().__init__(
backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std
)
self.num_classes = num_classes
# Anchors
self.anchor_generator = anchor_generator
self.box2box_transform = box2box_transform
self.anchor_matcher = anchor_matcher
# Loss parameters:
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
# Inference parameters:
self.test_score_thresh = test_score_thresh
self.test_topk_candidates = test_topk_candidates
self.test_nms_thresh = test_nms_thresh
self.max_detections_per_image = max_detections_per_image
# Vis parameters
self.vis_period = vis_period
self.input_format = input_format
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES]
head = RetinaNetHead(cfg, feature_shapes)
anchor_generator = build_anchor_generator(cfg, feature_shapes)
return {
"backbone": backbone,
"head": head,
"anchor_generator": anchor_generator,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS),
"anchor_matcher": Matcher(
cfg.MODEL.RETINANET.IOU_THRESHOLDS,
cfg.MODEL.RETINANET.IOU_LABELS,
allow_low_quality_matches=True,
),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"num_classes": cfg.MODEL.RETINANET.NUM_CLASSES,
"head_in_features": cfg.MODEL.RETINANET.IN_FEATURES,
# Loss parameters:
"focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA,
"focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA,
"smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA,
"box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE,
# Inference parameters:
"test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST,
"test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST,
"test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST,
"max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
# Vis parameters
"vis_period": cfg.VIS_PERIOD,
"input_format": cfg.INPUT.FORMAT,
}
def forward_training(self, images, features, predictions, gt_instances):
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits, pred_anchor_deltas = self._transpose_dense_predictions(
predictions, [self.num_classes, 4]
)
anchors = self.anchor_generator(features)
gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances)
return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes)
def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes):
"""
Args:
anchors (list[Boxes]): a list of #feature level Boxes
gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`.
Their shapes are (N, R) and (N, R, 4), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x Ai)
pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the
list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4).
Where K is the number of classes used in `pred_logits`.
Returns:
dict[str, Tensor]:
mapping from a named loss to a scalar tensor storing the loss.
Used during training only. The dict keys are: "loss_cls" and "loss_box_reg"
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (N, R)
valid_mask = gt_labels >= 0
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100)
# classification and regression loss
gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[
:, :-1
] # no loss for the last (background) class
loss_cls = sigmoid_focal_loss_jit(
cat(pred_logits, dim=1)[valid_mask],
gt_labels_target.to(pred_logits[0].dtype),
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
loss_box_reg = _dense_box_regression_loss(
anchors,
self.box2box_transform,
pred_anchor_deltas,
gt_boxes,
pos_mask,
box_reg_loss_type=self.box_reg_loss_type,
smooth_l1_beta=self.smooth_l1_beta,
)
return {
"loss_cls": loss_cls / normalizer,
"loss_box_reg": loss_box_reg / normalizer,
}
@torch.no_grad()
def label_anchors(self, anchors, gt_instances):
"""
Args:
anchors (list[Boxes]): A list of #feature level Boxes.
The Boxes contains anchors of this image on the specific feature level.
gt_instances (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image.
Returns:
list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across all feature maps (sum(Hi * Wi * A)).
Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background.
list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors
across feature maps. The values are the matched gt boxes for each anchor.
Values are undefined for those anchors not labeled as foreground.
"""
anchors = Boxes.cat(anchors) # Rx4
gt_labels = []
matched_gt_boxes = []
for gt_per_image in gt_instances:
match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors)
matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix)
del match_quality_matrix
if len(gt_per_image) > 0:
matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs]
gt_labels_i = gt_per_image.gt_classes[matched_idxs]
# Anchors with label 0 are treated as background.
gt_labels_i[anchor_labels == 0] = self.num_classes
# Anchors with label -1 are ignored.
gt_labels_i[anchor_labels == -1] = -1
else:
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes
gt_labels.append(gt_labels_i)
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
def forward_inference(
self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]]
):
pred_logits, pred_anchor_deltas = self._transpose_dense_predictions(
predictions, [self.num_classes, 4]
)
anchors = self.anchor_generator(features)
results: List[Instances] = []
for img_idx, image_size in enumerate(images.image_sizes):
scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits]
deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
results_per_image = self.inference_single_image(
anchors, scores_per_image, deltas_per_image, image_size
)
results.append(results_per_image)
return results
def inference_single_image(
self,
anchors: List[Boxes],
box_cls: List[Tensor],
box_delta: List[Tensor],
image_size: Tuple[int, int],
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors in that feature level.
box_cls (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (H x W x A, K)
box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.
image_size (tuple(H, W)): a tuple of the image height and width.
Returns:
Same as `inference`, but for only one image.
"""
pred = self._decode_multi_level_predictions(
anchors,
box_cls,
box_delta,
self.test_score_thresh,
self.test_topk_candidates,
image_size,
)
keep = batched_nms( # per-class NMS
pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh
)
return pred[keep[: self.max_detections_per_image]]
class RetinaNetHead(nn.Module):
"""
The head used in RetinaNet for object classification and box regression.
It has two subnets for the two tasks, with a common structure but separate parameters.
"""
@configurable
def __init__(
self,
*,
input_shape: List[ShapeSpec],
num_classes,
num_anchors,
conv_dims: List[int],
norm="",
prior_prob=0.01,
):
"""
NOTE: this interface is experimental.
Args:
input_shape (List[ShapeSpec]): input shape
num_classes (int): number of classes. Used to label background proposals.
num_anchors (int): number of generated anchors
conv_dims (List[int]): dimensions for each convolution layer
norm (str or callable):
Normalization for conv layers except for the two output layers.
See :func:`detectron2.layers.get_norm` for supported types.
prior_prob (float): Prior weight for computing bias
"""
super().__init__()
self._num_features = len(input_shape)
if norm == "BN" or norm == "SyncBN":
logger.info(
f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}."
)
bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm
def norm(c):
return CycleBatchNormList(
length=self._num_features, bn_class=bn_class, num_features=c
)
else:
norm_name = str(type(get_norm(norm, 1)))
if "BN" in norm_name:
logger.warning(
f"Shared BatchNorm (type={norm_name}) may not work well in RetinaNetHead."
)
cls_subnet = []
bbox_subnet = []
for in_channels, out_channels in zip(
[input_shape[0].channels] + list(conv_dims), conv_dims
):
cls_subnet.append(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
)
if norm:
cls_subnet.append(get_norm(norm, out_channels))
cls_subnet.append(nn.ReLU())
bbox_subnet.append(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
)
if norm:
bbox_subnet.append(get_norm(norm, out_channels))
bbox_subnet.append(nn.ReLU())
self.cls_subnet = nn.Sequential(*cls_subnet)
self.bbox_subnet = nn.Sequential(*bbox_subnet)
self.cls_score = nn.Conv2d(
conv_dims[-1], num_anchors * num_classes, kernel_size=3, stride=1, padding=1
)
self.bbox_pred = nn.Conv2d(
conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1
)
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]:
for layer in modules.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
torch.nn.init.constant_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
torch.nn.init.constant_(self.cls_score.bias, bias_value)
@classmethod
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors
assert (
len(set(num_anchors)) == 1
), "Using different number of anchors between levels is not currently supported!"
num_anchors = num_anchors[0]
return {
"input_shape": input_shape,
"num_classes": cfg.MODEL.RETINANET.NUM_CLASSES,
"conv_dims": [input_shape[0].channels] * cfg.MODEL.RETINANET.NUM_CONVS,
"prior_prob": cfg.MODEL.RETINANET.PRIOR_PROB,
"norm": cfg.MODEL.RETINANET.NORM,
"num_anchors": num_anchors,
}
def forward(self, features: List[Tensor]):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
The tensor predicts the classification probability
at each spatial position for each of the A anchors and K object
classes.
bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
The tensor predicts 4-vector (dx,dy,dw,dh) box
regression values for every anchor. These values are the
relative offset between the anchor and the ground truth box.
"""
assert len(features) == self._num_features
logits = []
bbox_reg = []
for feature in features:
logits.append(self.cls_score(self.cls_subnet(feature)))
bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature)))
return logits, bbox_reg | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/meta_arch/retinanet.py | retinanet.py |
import logging
from typing import Dict, List
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.structures import ImageList
from ..postprocessing import detector_postprocess, sem_seg_postprocess
from .build import META_ARCH_REGISTRY
from .rcnn import GeneralizedRCNN
from .semantic_seg import build_sem_seg_head
__all__ = ["PanopticFPN"]
@META_ARCH_REGISTRY.register()
class PanopticFPN(GeneralizedRCNN):
"""
Implement the paper :paper:`PanopticFPN`.
"""
@configurable
def __init__(
self,
*,
sem_seg_head: nn.Module,
combine_overlap_thresh: float = 0.5,
combine_stuff_area_thresh: float = 4096,
combine_instances_score_thresh: float = 0.5,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
sem_seg_head: a module for the semantic segmentation head.
combine_overlap_thresh: combine masks into one instances if
they have enough overlap
combine_stuff_area_thresh: ignore stuff areas smaller than this threshold
combine_instances_score_thresh: ignore instances whose score is
smaller than this threshold
Other arguments are the same as :class:`GeneralizedRCNN`.
"""
super().__init__(**kwargs)
self.sem_seg_head = sem_seg_head
# options when combining instance & semantic outputs
self.combine_overlap_thresh = combine_overlap_thresh
self.combine_stuff_area_thresh = combine_stuff_area_thresh
self.combine_instances_score_thresh = combine_instances_score_thresh
@classmethod
def from_config(cls, cfg):
ret = super().from_config(cfg)
ret.update(
{
"combine_overlap_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH,
"combine_stuff_area_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT,
"combine_instances_score_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa
}
)
ret["sem_seg_head"] = build_sem_seg_head(cfg, ret["backbone"].output_shape())
logger = logging.getLogger(__name__)
if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED:
logger.warning(
"PANOPTIC_FPN.COMBINED.ENABLED is no longer used. "
" model.inference(do_postprocess=) should be used to toggle postprocessing."
)
if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0:
w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT
logger.warning(
"PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head."
)
def update_weight(x):
if isinstance(x, dict):
return {k: v * w for k, v in x.items()}
else:
return x * w
roi_heads = ret["roi_heads"]
roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight)
roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight)
return ret
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": Instances
* "sem_seg": semantic segmentation ground truth.
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "instances": see :meth:`GeneralizedRCNN.forward` for its format.
* "sem_seg": see :meth:`SemanticSegmentor.forward` for its format.
* "panoptic_seg": See the return value of
:func:`combine_semantic_and_instance_outputs` for its format.
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
assert "sem_seg" in batched_inputs[0]
gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs]
gt_sem_seg = ImageList.from_tensors(
gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value
).tensor
sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg)
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
detector_results, detector_losses = self.roi_heads(
images, features, proposals, gt_instances
)
losses = sem_seg_losses
losses.update(proposal_losses)
losses.update(detector_losses)
return losses
def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, see docs in :meth:`forward`.
Otherwise, returns a (list[Instances], list[Tensor]) that contains
the raw detector outputs, and raw semantic segmentation outputs.
"""
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None)
proposals, _ = self.proposal_generator(images, features, None)
detector_results, _ = self.roi_heads(images, features, proposals, None)
if do_postprocess:
processed_results = []
for sem_seg_result, detector_result, input_per_image, image_size in zip(
sem_seg_results, detector_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width)
detector_r = detector_postprocess(detector_result, height, width)
processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r})
panoptic_r = combine_semantic_and_instance_outputs(
detector_r,
sem_seg_r.argmax(dim=0),
self.combine_overlap_thresh,
self.combine_stuff_area_thresh,
self.combine_instances_score_thresh,
)
processed_results[-1]["panoptic_seg"] = panoptic_r
return processed_results
else:
return detector_results, sem_seg_results
def combine_semantic_and_instance_outputs(
instance_results,
semantic_results,
overlap_threshold,
stuff_area_thresh,
instances_score_thresh,
):
"""
Implement a simple combining logic following
"combine_semantic_and_instance_predictions.py" in panopticapi
to produce panoptic segmentation outputs.
Args:
instance_results: output of :func:`detector_postprocess`.
semantic_results: an (H, W) tensor, each element is the contiguous semantic
category id
Returns:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32)
# sort instance outputs by scores
sorted_inds = torch.argsort(-instance_results.scores)
current_segment_id = 0
segments_info = []
instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device)
# Add instances one-by-one, check for overlaps with existing ones
for inst_id in sorted_inds:
score = instance_results.scores[inst_id].item()
if score < instances_score_thresh:
break
mask = instance_masks[inst_id] # H,W
mask_area = mask.sum().item()
if mask_area == 0:
continue
intersect = (mask > 0) & (panoptic_seg > 0)
intersect_area = intersect.sum().item()
if intersect_area * 1.0 / mask_area > overlap_threshold:
continue
if intersect_area > 0:
mask = mask & (panoptic_seg == 0)
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": True,
"score": score,
"category_id": instance_results.pred_classes[inst_id].item(),
"instance_id": inst_id.item(),
}
)
# Add semantic results to remaining empty areas
semantic_labels = torch.unique(semantic_results).cpu().tolist()
for semantic_label in semantic_labels:
if semantic_label == 0: # 0 is a special "thing" class
continue
mask = (semantic_results == semantic_label) & (panoptic_seg == 0)
mask_area = mask.sum().item()
if mask_area < stuff_area_thresh:
continue
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": False,
"category_id": semantic_label,
"area": mask_area,
}
)
return panoptic_seg, segments_info | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/meta_arch/panoptic_fpn.py | panoptic_fpn.py |
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, cat
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.memory import retry_if_cuda_oom
from detectron2.utils.registry import Registry
from ..anchor_generator import build_anchor_generator
from ..box_regression import Box2BoxTransform, _dense_box_regression_loss
from ..matcher import Matcher
from ..sampling import subsample_labels
from .build import PROPOSAL_GENERATOR_REGISTRY
from .proposal_utils import find_top_rpn_proposals
RPN_HEAD_REGISTRY = Registry("RPN_HEAD")
RPN_HEAD_REGISTRY.__doc__ = """
Registry for RPN heads, which take feature maps and perform
objectness classification and bounding box regression for anchors.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
"""
Shape shorthand in this module:
N: number of images in the minibatch
L: number of feature maps per image on which RPN is run
A: number of cell anchors (must be the same for all feature maps)
Hi, Wi: height and width of the i-th feature map
B: size of the box parameterization
Naming convention:
objectness: refers to the binary classification of an anchor as object vs. not object.
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`), or 5d for rotated boxes.
pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use
sigmoid(pred_objectness_logits) to estimate P(object).
gt_labels: ground-truth binary classification labels for objectness
pred_anchor_deltas: predicted box2box transform deltas
gt_anchor_deltas: ground-truth box2box transform deltas
"""
def build_rpn_head(cfg, input_shape):
"""
Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.
"""
name = cfg.MODEL.RPN.HEAD_NAME
return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape)
@RPN_HEAD_REGISTRY.register()
class StandardRPNHead(nn.Module):
"""
Standard RPN classification and regression heads described in :paper:`Faster R-CNN`.
Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts
objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas
specifying how to deform each anchor into an object proposal.
"""
@configurable
def __init__(
self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,)
):
"""
NOTE: this interface is experimental.
Args:
in_channels (int): number of input feature channels. When using multiple
input features, they must have the same number of channels.
num_anchors (int): number of anchors to predict for *each spatial position*
on the feature map. The total number of anchors for each
feature map will be `num_anchors * H * W`.
box_dim (int): dimension of a box, which is also the number of box regression
predictions to make for each anchor. An axis aligned box has
box_dim=4, while a rotated box has box_dim=5.
conv_dims (list[int]): a list of integers representing the output channels
of N conv layers. Set it to -1 to use the same number of output channels
as input channels.
"""
super().__init__()
cur_channels = in_channels
# Keeping the old variable names and structure for backwards compatiblity.
# Otherwise the old checkpoints will fail to load.
if len(conv_dims) == 1:
out_channels = cur_channels if conv_dims[0] == -1 else conv_dims[0]
# 3x3 conv for the hidden representation
self.conv = self._get_rpn_conv(cur_channels, out_channels)
cur_channels = out_channels
else:
self.conv = nn.Sequential()
for k, conv_dim in enumerate(conv_dims):
out_channels = cur_channels if conv_dim == -1 else conv_dim
if out_channels <= 0:
raise ValueError(
f"Conv output channels should be greater than 0. Got {out_channels}"
)
conv = self._get_rpn_conv(cur_channels, out_channels)
self.conv.add_module(f"conv{k}", conv)
cur_channels = out_channels
# 1x1 conv for predicting objectness logits
self.objectness_logits = nn.Conv2d(cur_channels, num_anchors, kernel_size=1, stride=1)
# 1x1 conv for predicting box2box transform deltas
self.anchor_deltas = nn.Conv2d(cur_channels, num_anchors * box_dim, kernel_size=1, stride=1)
# Keeping the order of weights initialization same for backwards compatiblility.
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
nn.init.normal_(layer.weight, std=0.01)
nn.init.constant_(layer.bias, 0)
def _get_rpn_conv(self, in_channels, out_channels):
return Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
activation=nn.ReLU(),
)
@classmethod
def from_config(cls, cfg, input_shape):
# Standard RPN is shared across levels:
in_channels = [s.channels for s in input_shape]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
# RPNHead should take the same input as anchor generator
# NOTE: it assumes that creating an anchor generator does not have unwanted side effect.
anchor_generator = build_anchor_generator(cfg, input_shape)
num_anchors = anchor_generator.num_anchors
box_dim = anchor_generator.box_dim
assert (
len(set(num_anchors)) == 1
), "Each level must have the same number of anchors per spatial position"
return {
"in_channels": in_channels,
"num_anchors": num_anchors[0],
"box_dim": box_dim,
"conv_dims": cfg.MODEL.RPN.CONV_DIMS,
}
def forward(self, features: List[torch.Tensor]):
"""
Args:
features (list[Tensor]): list of feature maps
Returns:
list[Tensor]: A list of L elements.
Element i is a tensor of shape (N, A, Hi, Wi) representing
the predicted objectness logits for all anchors. A is the number of cell anchors.
list[Tensor]: A list of L elements. Element i is a tensor of shape
(N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors
to proposals.
"""
pred_objectness_logits = []
pred_anchor_deltas = []
for x in features:
t = self.conv(x)
pred_objectness_logits.append(self.objectness_logits(t))
pred_anchor_deltas.append(self.anchor_deltas(t))
return pred_objectness_logits, pred_anchor_deltas
@PROPOSAL_GENERATOR_REGISTRY.register()
class RPN(nn.Module):
"""
Region Proposal Network, introduced by :paper:`Faster R-CNN`.
"""
@configurable
def __init__(
self,
*,
in_features: List[str],
head: nn.Module,
anchor_generator: nn.Module,
anchor_matcher: Matcher,
box2box_transform: Box2BoxTransform,
batch_size_per_image: int,
positive_fraction: float,
pre_nms_topk: Tuple[float, float],
post_nms_topk: Tuple[float, float],
nms_thresh: float = 0.7,
min_box_size: float = 0.0,
anchor_boundary_thresh: float = -1.0,
loss_weight: Union[float, Dict[str, float]] = 1.0,
box_reg_loss_type: str = "smooth_l1",
smooth_l1_beta: float = 0.0,
):
"""
NOTE: this interface is experimental.
Args:
in_features (list[str]): list of names of input features to use
head (nn.Module): a module that predicts logits and regression deltas
for each level from a list of per-level features
anchor_generator (nn.Module): a module that creates anchors from a
list of features. Usually an instance of :class:`AnchorGenerator`
anchor_matcher (Matcher): label the anchors by matching them with ground truth.
box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to
instance boxes
batch_size_per_image (int): number of anchors per image to sample for training
positive_fraction (float): fraction of foreground anchors to sample for training
pre_nms_topk (tuple[float]): (train, test) that represents the
number of top k proposals to select before NMS, in
training and testing.
post_nms_topk (tuple[float]): (train, test) that represents the
number of top k proposals to select after NMS, in
training and testing.
nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals
min_box_size (float): remove proposal boxes with any side smaller than this threshold,
in the unit of input image pixels
anchor_boundary_thresh (float): legacy option
loss_weight (float|dict): weights to use for losses. Can be single float for weighting
all rpn losses together, or a dict of individual weightings. Valid dict keys are:
"loss_rpn_cls" - applied to classification loss
"loss_rpn_loc" - applied to box regression loss
box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou".
smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
"""
super().__init__()
self.in_features = in_features
self.rpn_head = head
self.anchor_generator = anchor_generator
self.anchor_matcher = anchor_matcher
self.box2box_transform = box2box_transform
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
# Map from self.training state to train/test settings
self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]}
self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]}
self.nms_thresh = nms_thresh
self.min_box_size = float(min_box_size)
self.anchor_boundary_thresh = anchor_boundary_thresh
if isinstance(loss_weight, float):
loss_weight = {"loss_rpn_cls": loss_weight, "loss_rpn_loc": loss_weight}
self.loss_weight = loss_weight
self.box_reg_loss_type = box_reg_loss_type
self.smooth_l1_beta = smooth_l1_beta
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
in_features = cfg.MODEL.RPN.IN_FEATURES
ret = {
"in_features": in_features,
"min_box_size": cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE,
"nms_thresh": cfg.MODEL.RPN.NMS_THRESH,
"batch_size_per_image": cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE,
"positive_fraction": cfg.MODEL.RPN.POSITIVE_FRACTION,
"loss_weight": {
"loss_rpn_cls": cfg.MODEL.RPN.LOSS_WEIGHT,
"loss_rpn_loc": cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT,
},
"anchor_boundary_thresh": cfg.MODEL.RPN.BOUNDARY_THRESH,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS),
"box_reg_loss_type": cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE,
"smooth_l1_beta": cfg.MODEL.RPN.SMOOTH_L1_BETA,
}
ret["pre_nms_topk"] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST)
ret["post_nms_topk"] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST)
ret["anchor_generator"] = build_anchor_generator(cfg, [input_shape[f] for f in in_features])
ret["anchor_matcher"] = Matcher(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True
)
ret["head"] = build_rpn_head(cfg, [input_shape[f] for f in in_features])
return ret
def _subsample_labels(self, label):
"""
Randomly sample a subset of positive and negative examples, and overwrite
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
Args:
labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.
"""
pos_idx, neg_idx = subsample_labels(
label, self.batch_size_per_image, self.positive_fraction, 0
)
# Fill with the ignore label (-1), then set positive and negative labels
label.fill_(-1)
label.scatter_(0, pos_idx, 1)
label.scatter_(0, neg_idx, 0)
return label
@torch.jit.unused
@torch.no_grad()
def label_and_sample_anchors(
self, anchors: List[Boxes], gt_instances: List[Instances]
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Args:
anchors (list[Boxes]): anchors for each feature map.
gt_instances: the ground-truth instances for each image.
Returns:
list[Tensor]:
List of #img tensors. i-th element is a vector of labels whose length is
the total number of anchors across all feature maps R = sum(Hi * Wi * A).
Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative
class; 1 = positive class.
list[Tensor]:
i-th element is a Rx4 tensor. The values are the matched gt boxes for each
anchor. Values are undefined for those anchors not labeled as 1.
"""
anchors = Boxes.cat(anchors)
gt_boxes = [x.gt_boxes for x in gt_instances]
image_sizes = [x.image_size for x in gt_instances]
del gt_instances
gt_labels = []
matched_gt_boxes = []
for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes):
"""
image_size_i: (h, w) for the i-th image
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors)
matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
# Matching is memory-expensive and may result in CPU tensors. But the result is small
gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
del match_quality_matrix
if self.anchor_boundary_thresh >= 0:
# Discard anchors that go out of the boundaries of the image
# NOTE: This is legacy functionality that is turned off by default in Detectron2
anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh)
gt_labels_i[~anchors_inside_image] = -1
# A vector of labels (-1, 0, 1) for each anchor
gt_labels_i = self._subsample_labels(gt_labels_i)
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
else:
# TODO wasted indexing computation for ignored boxes
matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
gt_labels.append(gt_labels_i) # N,AHW
matched_gt_boxes.append(matched_gt_boxes_i)
return gt_labels, matched_gt_boxes
@torch.jit.unused
def losses(
self,
anchors: List[Boxes],
pred_objectness_logits: List[torch.Tensor],
gt_labels: List[torch.Tensor],
pred_anchor_deltas: List[torch.Tensor],
gt_boxes: List[torch.Tensor],
) -> Dict[str, torch.Tensor]:
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Args:
anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each
has shape (Hi*Wi*A, B), where B is box dimension (4 or 5).
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, Hi*Wi*A) representing
the predicted objectness logits for all anchors.
gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors
to proposals.
gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai))
# Log the number of positive/negative anchors per-image that's used in training
pos_mask = gt_labels == 1
num_pos_anchors = pos_mask.sum().item()
num_neg_anchors = (gt_labels == 0).sum().item()
storage = get_event_storage()
storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images)
storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images)
localization_loss = _dense_box_regression_loss(
anchors,
self.box2box_transform,
pred_anchor_deltas,
gt_boxes,
pos_mask,
box_reg_loss_type=self.box_reg_loss_type,
smooth_l1_beta=self.smooth_l1_beta,
)
valid_mask = gt_labels >= 0
objectness_loss = F.binary_cross_entropy_with_logits(
cat(pred_objectness_logits, dim=1)[valid_mask],
gt_labels[valid_mask].to(torch.float32),
reduction="sum",
)
normalizer = self.batch_size_per_image * num_images
losses = {
"loss_rpn_cls": objectness_loss / normalizer,
# The original Faster R-CNN paper uses a slightly different normalizer
# for loc loss. But it doesn't matter in practice
"loss_rpn_loc": localization_loss / normalizer,
}
losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
return losses
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
gt_instances: Optional[List[Instances]] = None,
):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str, Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits"
loss: dict[Tensor] or None
"""
features = [features[f] for f in self.in_features]
anchors = self.anchor_generator(features)
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_objectness_logits = [
# (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score.permute(0, 2, 3, 1).flatten(1)
for score in pred_objectness_logits
]
pred_anchor_deltas = [
# (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B)
x.view(x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1])
.permute(0, 3, 4, 1, 2)
.flatten(1, -2)
for x in pred_anchor_deltas
]
if self.training:
assert gt_instances is not None, "RPN requires gt_instances in training!"
gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances)
losses = self.losses(
anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes
)
else:
losses = {}
proposals = self.predict_proposals(
anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes
)
return proposals, losses
def predict_proposals(
self,
anchors: List[Boxes],
pred_objectness_logits: List[torch.Tensor],
pred_anchor_deltas: List[torch.Tensor],
image_sizes: List[Tuple[int, int]],
):
"""
Decode all the predicted box regression deltas to proposals. Find the top proposals
by applying NMS and removing boxes that are too small.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i, sorted by their
objectness score in descending order.
"""
# The proposals are treated as fixed for joint training with roi heads.
# This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that
# are also network responses.
with torch.no_grad():
pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
return find_top_rpn_proposals(
pred_proposals,
pred_objectness_logits,
image_sizes,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_size,
self.training,
)
def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]):
"""
Transform anchors into proposals by applying the predicted anchor deltas.
Returns:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A, B)
"""
N = pred_anchor_deltas[0].shape[0]
proposals = []
# For each feature map
for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas):
B = anchors_i.tensor.size(1)
pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B)
# Expand anchors to shape (N*Hi*Wi*A, B)
anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B)
proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals.append(proposals_i.view(N, -1, B))
return proposals | ytd | /ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/modeling/proposal_generator/rpn.py | rpn.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.