id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,289,200 | 中文git.py | DuckDuckStudio_Chinese_git/zh-tw/中文git.py | import subprocess
import sys
import os
script_path = os.path.dirname(__file__)
full_path = os.path.join(script_path, "中文git.py")
def git_command(command, *args):
git_command_mapping = {
"拉取": "pull",
"推送": "push",
"提交": "commit",
"新建分支": "checkout -b",
"切換分支": "checkout",
"合併": "merge",
"暫存": "add",
"查看狀態": "status",
"查看日誌": "log",
"重置": "reset",
"刪除分支": "branch -D",
"遠端地址": "remote -v",
"遠端更新": "remote update",
"查看遠端分支": "branch -r",
"版本": "-v",
"刪除提交": "reset --hard HEAD~",
"克隆": "clone",
"配置": "config",
"簽出到": "checkout",
"查看圖形化日誌": "log --graph",
"是否忽略": "check-ignore -v",
"初始化": "init",
"查看本地分支": "branch",
# 可根據需要添加更多映射
}
git_config_subcommands = {
"全局": "--global",
"系統": "--system"
}
if command == "幫助":
print("使用方法:")
print("python 中文git.py <中文指令> [參數]")
print("即:python 中文git.py <你想幹什麼> [具體要啥]")
print("支持的中文指令:")
for cmd in git_command_mapping:
print("-", cmd)
print("詳細支持命令請查看README_DEV文件:https://github.com/DuckDuckStudio/Chinese_git/blob/main/README_DEV.md#可用命令")
return
git_command = git_command_mapping.get(command)
if git_command:
try:
if command == "提交":
if not args:
commit_message = input("請輸入提交信息: ")
result = subprocess.run(['git', git_command, '-m', commit_message], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command, '-m', args[0]], capture_output=True, text=True)
elif command == "暫存":
if args and args[0] == "所有":
result = subprocess.run(['git', 'add', '.'], capture_output=True, text=True)
elif not args:
print("你要暫存什麼你沒告訴我啊")
else:
result = subprocess.run(['git', 'add'] + list(args), capture_output=True, text=True)
elif command == "切換分支" or command == "簽出到":
if not args:
branch = input("請輸入需要切換的分支:")
result = subprocess.run(['git', git_command, branch], capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
else:
print("多餘的參數")
elif command == "新建分支":
if not args:
new_branch = input("請輸入新分支名稱: ")
result = subprocess.run(['git', git_command, new_branch], capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
else:
print("多餘的參數")
elif command == "刪除分支":
if not args:
print("刪除分支命令需要指定要刪除的分支名稱。")
elif len(args) > 2:
print("多餘的參數")
return
elif len(args) == 2 and args[1] == "+確認":
git_command = "git branch -d"
else:
print("無效的附加參數")
return
result = subprocess.run(['git', git_command, args[0]], capture_output=True, text=True)
elif command == "版本":
print("中文Git by 鸭鸭「カモ」")
print("版本:v1.5")
print("繁體中文翻譯版")
print("安裝在:", full_path)
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
elif command == "刪除提交":
if not args:
print("請輸入要刪除的提交類型(最新提交/倒數第n個提交/具體某個提交)。")
else:
if args[0] == "最新提交":
result = subprocess.run(['git', git_command, 'HEAD~1'], capture_output=True, text=True)
elif args[0].startswith("倒數第"):
try:
num = int(args[0][3:])
result = subprocess.run(['git', git_command, f'HEAD~{num}'], capture_output=True, text=True)
except ValueError:
print("參數錯誤,請輸入倒數第n個提交,n為正整數。")
return
else:
result = subprocess.run(['git', git_command, args[0]], capture_output=True, text=True)
elif command == "克隆":
if not args:
repository = input("請輸入遠程倉庫鏈接(以.git結尾):")
result = subprocess.run(['git', git_command, repository], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
elif command == "配置":
if not args:
print("配置命令需要指定配置選項和值。")
elif len(args) == 1:
print("配置命令需要指定配置值。")
else:
config_option = args[0]
config_value = args[1]
config_subcommand = None
# 检查是否存在配置范围
if len(args) == 3:
config_subcommand = args[2]
if config_subcommand not in git_config_subcommands:
print("配置範圍錯誤,可選範圍為:全局、系統。")
return
git_config_command = ['git', git_command, config_option, config_value]
if config_subcommand:# 如果存在配置范围
git_config_command.insert(2, git_config_subcommands[config_subcommand])
result = subprocess.run(git_config_command, capture_output=True, text=True)
elif command == "是否忽略":
if not args:
file = input("請輸入需要檢查的文件/文件夾:")
result = subprocess.run(['git', git_command, file], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
elif command == "查看本地分支":
if len(args) > 2:
print("多餘的參數")
return
elif args[0] == "+最後提交":
git_command = "branch -v"
elif (args[0] == "+最後提交" and args[1] == "+與上游分支關係") or (args[0] == "+與上游分支關係" and args[1] == "+最后提交"):
git_command = "branch -vv"
else:
print("無效的參數")
result = subprocess.run(['git', git_command], capture_output=True, text=True)
elif command == "合併":
if not args:
branch = input("請輸入需要合併到當前分支的分支:")
result = subprocess.run(['git', git_command, branch], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
if result.returncode == 0:
print(result.stdout)
else:
print("錯誤:", result.stderr)
except Exception as e:
print("執行git命令時出錯:", e)
else:
print("不支持的git命令:", command)
if __name__ == "__main__":
if len(sys.argv) > 1:
git_command(sys.argv[1], *sys.argv[2:])
else:
print("使用方法:")
print("python 中文git.py <中文指令> [參數]")
print("即:python 中文git.py <你想幹什麼> [具體要啥]")
| 8,878 | Python | .py | 175 | 29.4 | 120 | 0.475082 | DuckDuckStudio/Chinese_git | 8 | 1 | 6 | GPL-2.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,201 | 中文git-pack.py | DuckDuckStudio_Chinese_git/zh-tw/中文git-pack.py | import subprocess
import sys
import os
script_path = os.path.dirname(__file__)
full_path = os.path.join(script_path, "中文git.exe")
def git_command(command, *args):
git_command_mapping = {
"拉取": "pull",
"推送": "push",
"提交": "commit",
"新建分支": "checkout -b",
"切換分支": "checkout",
"合併": "merge",
"暫存": "add",
"查看狀態": "status",
"查看日誌": "log",
"重置": "reset",
"刪除分支": "branch -D",
"遠端地址": "remote -v",
"遠端更新": "remote update",
"查看遠端分支": "branch -r",
"版本": "-v",
"刪除提交": "reset --hard HEAD~",
"克隆": "clone",
"配置": "config",
"簽出到": "checkout",
"查看圖形化日誌": "log --graph",
"是否忽略": "check-ignore -v",
"初始化": "init",
"查看本地分支": "branch",
# 可根據需要添加更多映射
}
git_config_subcommands = {
"全局": "--global",
"系統": "--system"
}
if command == "幫助":
print("使用方法:")
print(full_path, " <中文指令> [參數]")
print("即:", full_path, "<你想幹什麼> [具體要啥]")
print("支持的中文指令:")
for cmd in git_command_mapping:
print("-", cmd)
print("詳細支持命令請查看README_DEV文件:https://github.com/DuckDuckStudio/Chinese_git/blob/main/README_DEV.md#可用命令")
return
git_command = git_command_mapping.get(command)
if git_command:
try:
if command == "提交":
if not args:
commit_message = input("請輸入提交信息: ")
result = subprocess.run(['git', git_command, '-m', commit_message], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command, '-m', args[0]], capture_output=True, text=True)
elif command == "暫存":
if args and args[0] == "所有":
result = subprocess.run(['git', 'add', '.'], capture_output=True, text=True)
elif not args:
print("你要暫存什麼你沒告訴我啊")
else:
result = subprocess.run(['git', 'add'] + list(args), capture_output=True, text=True)
elif command == "切換分支" or command == "簽出到":
if not args:
branch = input("請輸入需要切換的分支:")
result = subprocess.run(['git', git_command, branch], capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
else:
print("多餘的參數")
elif command == "新建分支":
if not args:
new_branch = input("請輸入新分支名稱: ")
result = subprocess.run(['git', git_command, new_branch], capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
else:
print("多餘的參數")
elif command == "刪除分支":
if not args:
print("刪除分支命令需要指定要刪除的分支名稱。")
elif len(args) > 2:
print("多餘的參數")
return
elif len(args) == 2 and args[1] == "+確認":
git_command = "git branch -d"
else:
print("無效的附加參數")
return
result = subprocess.run(['git', git_command, args[0]], capture_output=True, text=True)
elif command == "版本":
print("中文Git by 鸭鸭「カモ」")
print("版本:v1.5-pack")
print("繁體中文翻譯版")
print("安裝在:", full_path)
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
elif command == "刪除提交":
if not args:
print("請輸入要刪除的提交類型(最新提交/倒數第n個提交/具體某個提交)。")
else:
if args[0] == "最新提交":
result = subprocess.run(['git', git_command, 'HEAD~1'], capture_output=True, text=True)
elif args[0].startswith("倒數第"):
try:
num = int(args[0][3:])
result = subprocess.run(['git', git_command, f'HEAD~{num}'], capture_output=True, text=True)
except ValueError:
print("參數錯誤,請輸入倒數第n個提交,n為正整數。")
return
else:
result = subprocess.run(['git', git_command, args[0]], capture_output=True, text=True)
elif command == "克隆":
if not args:
repository = input("請輸入遠程倉庫鏈接(以.git結尾):")
result = subprocess.run(['git', git_command, repository], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
elif command == "配置":
if not args:
print("配置命令需要指定配置選項和值。")
elif len(args) == 1:
print("配置命令需要指定配置值。")
else:
config_option = args[0]
config_value = args[1]
config_subcommand = None
# 检查是否存在配置范围
if len(args) == 3:
config_subcommand = args[2]
if config_subcommand not in git_config_subcommands:
print("配置範圍錯誤,可選範圍為:全局、系統。")
return
git_config_command = ['git', git_command, config_option, config_value]
if config_subcommand:# 如果存在配置范围
git_config_command.insert(2, git_config_subcommands[config_subcommand])
result = subprocess.run(git_config_command, capture_output=True, text=True)
elif command == "是否忽略":
if not args:
file = input("請輸入需要檢查的文件/文件夾:")
result = subprocess.run(['git', git_command, file], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
elif command == "查看本地分支":
if len(args) > 2:
print("多餘的參數")
return
elif args[0] == "+最後提交":
git_command = "branch -v"
elif (args[0] == "+最後提交" and args[1] == "+與上游分支關係") or (args[0] == "+與上游分支關係" and args[1] == "+最后提交"):
git_command = "branch -vv"
else:
print("無效的參數")
result = subprocess.run(['git', git_command], capture_output=True, text=True)
elif command == "合併":
if not args:
branch = input("請輸入需要合併到當前分支的分支:")
result = subprocess.run(['git', git_command, branch], capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
else:
result = subprocess.run(['git', git_command] + list(args), capture_output=True, text=True)
if result.returncode == 0:
print(result.stdout)
else:
print("錯誤:", result.stderr)
except Exception as e:
print("執行git命令時出錯:", e)
else:
print("不支持的git命令:", command)
if __name__ == "__main__":
if len(sys.argv) > 1:
git_command(sys.argv[1], *sys.argv[2:])
else:
print("使用方法:")
print(full_path, " <中文指令> [參數]")
print("即:", full_path, "<你想幹什麼> [具體要啥]")
| 8,857 | Python | .py | 175 | 29.377143 | 120 | 0.47311 | DuckDuckStudio/Chinese_git | 8 | 1 | 6 | GPL-2.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,202 | 中文git-pypi.py | DuckDuckStudio_Chinese_git/中文git-pypi.py | import os
import sys
import json
import requests
import subprocess
from colorama import init, Fore
# ----------- 此代码为PyPi专用,非函数代码请写在main()函数中! -----------
# --- 读取配置文件 ---
def fetch_json():
global exit_code
config_url = "https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/files/json/config.json"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
try:
response = requests.get(config_url, headers=headers)
if response.status_code == 200:
json_data = response.json()
print(f"{Fore.GREEN}✓{Fore.RESET} 获取最新默认配置文件成功")
return json_data
else:
print(f"{Fore.RED}✕{Fore.RESET} 无法获取最新默认配置文件\n{Fore.BLUE}[!]{Fore.RESET} 返回状态码: {Fore.YELLOW}{response.status_code}{Fore.RESET}")
exit_code = 1
return None
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 尝试获取最新默认配置文件失败,错误: {Fore.RED}{e}{Fore.RESET}")
exit_code = 1
return None
def merge_json(old_json, new_json):
# 合并两个 JSON 对象
updated_json = old_json.copy()
# 处理旧 JSON 中的键
keys_to_remove = []
for key in updated_json:
if key not in new_json:
keys_to_remove.append(key)
for key in keys_to_remove:
del updated_json[key]
# 合并新 JSON 中的值
for key in new_json:
if key in updated_json and isinstance(updated_json[key], dict) and isinstance(new_json[key], dict):
# 如果是字典类型,递归合并
updated_json[key] = merge_json(updated_json[key], new_json[key])
else:
# 直接更新值
updated_json[key] = new_json[key]
return updated_json
def update_json():
global exit_code
new_json = fetch_json()
if not new_json:
return 1
try:
with open(config_file, 'r') as f:
old_json = json.load(f)
updated_json = merge_json(old_json, new_json)
# 将更新后的配置写入文件
with open(config_file, 'w') as f:
json.dump(updated_json, f, indent=4)
print(f"{Fore.GREEN}✓{Fore.RESET} 默认配置文件更新成功")
return 0
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 更新配置文件时出错:\n{Fore.RED}{e}{Fore.RESET}")
exit_code = 1
return 1
def always_check():# 每次执行命令都要检查的
# ----------- 检查更新 ----------
current_version = VERSION
try:
response = requests.get(url)
data = response.json()
latest_version = data['tag_name'] # 从 GitHub 获取最新版本号
if latest_version != current_version:
print(f"{Fore.BLUE}[!]{Fore.RESET} 发现新版本 {Fore.RED}{current_version}{Fore.RESET} → {Fore.GREEN}{latest_version}{Fore.RESET}\n运行 {Fore.BLUE}中文git 更新{Fore.RESET} 命令以更新。")
except:
pass
def check_for_updates():
global exit_code
# 提取版本号
current_version = VERSION.split('-')[0] # 分离可能的 '-pack' 后缀
try:
response = requests.get(url)
data = response.json()
latest_version = data['tag_name'] # 从 GitHub 获取最新版本号
if latest_version != current_version:
print(f"{Fore.BLUE}[!]{Fore.RESET} 发现新版本 {Fore.RED}{current_version}{Fore.RESET} → {Fore.GREEN}{latest_version}{Fore.RESET} 可用!")
return latest_version
else:
print(f"{Fore.GREEN}✓{Fore.RESET} 您已安装最新版本 {Fore.BLUE}{current_version}{Fore.RESET}。")
return None
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 检查更新时出错: {e}")
exit_code = 1
return None
def download_update_file(version):
global exit_code
# 根据版本确定下载 URL
download_url = f'https://github.com/DuckDuckStudio/Chinese_git/releases/download/{version}/Chinese_git.py'
spare_download_url = 'https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/Spare-Download/Chinese_git.py'
spare_download_version_url = 'https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/Spare-Download/info.json'
try:
response = requests.get(download_url)
# 重命名下载的文件为"中文Git.exe" 或 "中文Git.py"
new_filename = '中文Git.py'
with open(new_filename, 'wb') as f:
f.write(response.content)
print(f"{Fore.GREEN}✓{Fore.RESET} 更新成功下载。")
return new_filename
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 下载更新文件时出错: {e}")
exit_code = 1
choice = input(f"{Fore.BLUE}?{Fore.RESET} 是否切换备用下载路线(是/否): ").lower()
if choice in ['是', 'y', 'yes']:
try:
spare_download_version = requests.get(spare_download_version_url)
data = spare_download_version.json()
spare_download_version = data['version']# 获取备用路线的程序的版本号
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 获取备用路线版本信息时出错: {Fore.RED}{e}{Fore.RESET}")
exit_code = 1
return None
if spare_download_version == version:
try:
response = requests.get(spare_download_url)
new_filename = '中文git.py'
with open(new_filename, 'wb') as f:
f.write(response.content)
print(f"{Fore.GREEN}✓{Fore.RESET} 更新成功下载。")
return new_filename
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 下载更新文件时出错: {e}")
exit_code = 1
return None
else:
print(f"{Fore.RED}✕{Fore.RESET} 备用路线{Fore.YELLOW}版本不一致{Fore.RESET}\n备用路线版本为{Fore.BLUE}{spare_download_version}{Fore.RESET},而GitHub Releases上的最新版为{Fore.BLUE}{version}{Fore.BLUE}\n{Fore.YELLOW}如果你遇到了这个错误,请前往GitHub提交Issue,感谢!{Fore.RESET}")
exit_code = 1
return None
return None
def replace_current_program(new_filename):
global exit_code
try:
# 用下载的文件替换当前程序
os.replace(new_filename, sys.argv[0])
if update_json() == 1:
print(f"{Fore.YELLOW}⚠{Fore.RESET} 请手动更新配置文件并提交issue")
print(f"{Fore.GREEN}✓{Fore.RESET} 程序已成功更新。")
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 替换当前程序时出错: {e}")
exit_code = 1
# 自动检查更新并提示用户安装
def auto_update():
new_version = check_for_updates()
if new_version:
# 询问用户是否安装更新
choice = input(f"{Fore.BLUE}?{Fore.RESET} 是否要安装此更新? (是/否): ").lower()
if choice in ['是','y','yes']:
new_filename = download_update_file(new_version)
if new_filename:
replace_current_program(new_filename)
else:
print(f"{Fore.BLUE}[!]{Fore.RESET} 已跳过更新。")
# ---------- 版本...更新 结束 ----------
# ---------- 公告获取 -----------------
def get_notice_content(url, manual=False):
global exit_code
try:
response = requests.get(url)
if response.status_code == 200:
content = response.text
return content
else:
if manual:
print(f"{Fore.RED}✕{Fore.RESET} 获取最新公告失败!\n状态码: {Fore.BLUE}{response.status_code}{Fore.RESET}")
t = input(f"{Fore.BLUE}?{Fore.RESET} 是否读取本地最新公告({Fore.GREEN}是{Fore.RESET}/{Fore.RED}否{Fore.RESET}):").lower()
if t in ['是', 'y', 'yes']:
display_notice('本地')
else:
exit_code = 1
return None
except Exception as e:
if manual:
print(f"{Fore.RED}✕{Fore.RESET} 获取最新公告失败!\n错误信息: {Fore.RED}{e}{Fore.RESET}")
t = input(f"{Fore.BLUE}?{Fore.RESET} 是否读取本地最新公告({Fore.GREEN}是{Fore.RESET}/{Fore.RED}否{Fore.RESET}):").lower()
if t in ['是', 'y', 'yes']:
display_notice('本地')
else:
exit_code = 1
return None
def save_previous_notice(content):
with open(previous_notice_file, 'w') as file:
file.write(content)
def read_previous_notice():
try:
with open(previous_notice_file, 'r') as file:
return file.read()
except FileNotFoundError:
return ""
except Exception:
return "" # 以防出现像 microsoft/winget-pkgs #156224 中的错误
def display_notice(manual=False):
global exit_code
if manual == True:
content = get_notice_content(notice_url, True)
elif manual == False:
content = get_notice_content(notice_url)
if manual == "本地":
content = read_previous_notice()
if content == "":
print(f"{Fore.RED}✕{Fore.RESET} 没有本地公告")
exit_code = 1
return
else:
previous_notice = read_previous_notice()
if content:
try:
lines = content.split('\n')
# ---- 值提取 ----
level_line = lines[0].strip()
level = int(level_line.split(':')[1])
# -- 等级↑ 是否强制↓ --
force_line = lines[1].strip()
force = bool(force_line.split(':')[1])
# ----------------
except Exception as e:
if not manual:
return
else:
print(f"{Fore.RED}✕{Fore.RESET} 最新公告{Fore.YELLOW}不符合{Fore.RESET}规范,请联系开发者反馈!")
print(f"{Fore.RED}✕{Fore.RESET} 反馈时{Fore.YELLOW}请带上错误信息{Fore.RESET}:\n{Fore.RED}{e} | {Fore.CYAN}{level_line} {Fore.RED}|{Fore.CYAN} {force_line}{Fore.RESET}")
exit_code = 1
return
if level == 1:
color = Fore.RED
elif level == 2:
color = Fore.YELLOW
elif level == 3:
color = Fore.GREEN
elif level == 4:
color = Fore.BLUE
else:
color = ''
if manual == True:
print(f"{color}[!最新公告({level}级)!]{Fore.RESET}")
for line in lines[2:]:
print(line)
print(f"{color}[!------------!]{Fore.RESET}")
elif manual == "本地":
print(f"{color}[!最新本地公告({level}级)!]{Fore.RESET}")
for line in lines[2:]:
print(line)
print(f"{color}[!------------!]{Fore.RESET}")
else:
if content != previous_notice:
if force:
print(f"\n{color}[!有新公告({level}级)!]{Fore.RESET}")
for line in lines[2:]:
print(line)
print(f"{color}[!------------!]{Fore.RESET}")
save_previous_notice(content)
# ---------- 公告获取 结束 ------------
# ---------- 各命令函数 ---------------
def check_git_stash():
staged_changes = False
unstaged_changes = False
git_stash = subprocess.run(["git", "stash", "show"], capture_output=True, text=True)
output_lines = git_stash.stdout.split('\n')
if output_lines != ['']:
staged_changes = True
# --------
git_stash = subprocess.run(["git", "diff", "--name-only"], capture_output=True, text=True)
output_lines = git_stash.stdout.split('\n')
if output_lines != ['']:
unstaged_changes = True
return staged_changes, unstaged_changes
# ------------------------------------------
def git_command(command, *args):
global exit_code
git_command_mapping = {
"拉取": "pull",
"推送": "push",
"提交": "commit",
"新建分支": "checkout -b",
"切换分支": "checkout",
"合并": "merge",
"暂存": "add",
"状态": "status",
"日志": "log",
"删除分支": "branch -D",
"远程地址": "remote -v",
"远程更新": "remote update",
"远程分支": "branch -r",
"克隆": "clone",
"签出到": "checkout",
"图形化日志" :"log --graph",
"是否忽略": "check-ignore -v",
"初始化": "init",
"本地分支": "branch",
"强推": "push --force",
"更名分支": "branch -m",
# --- 特殊功能 ---
"版本": "--version",
"更新": "update",
"公告": "notice",
# --- 结束 ---
"还原": "revert",
"重置": "reset",
"差异": "diff",
"清理引用": "remote prune origin",
# 可根据需要添加更多映射
}
if command == "帮助":
print("使用方法:")
print("中文git <中文指令> [参数]")
print("即:中文git <你想干什么> [具体要啥]")
print("\n支持的中文指令:")
print("中文git", end=" ")
for cmd in git_command_mapping:
print(f"[{cmd}]", end=" ")
print("\n详细支持命令请查看用户手册:https://github.com/DuckDuckStudio/Chinese_git/blob/main/USER_HANDBOOK.md#可用命令")
return
git_command = git_command_mapping.get(command)
if git_command:
try:
if command == "提交":
staged, unstaged = check_git_stash()
if staged:
print(f"{Fore.BLUE}[!]{Fore.BLUE} 将提交暂存区的内容")
elif unstaged:
print(f"{Fore.YELLOW}⚠{Fore.RESET} 没有已暂存的更改,但检测到未暂存的更改")
if input(f"{Fore.BLUE}?{Fore.RESET} 是否暂存所有并提交({Fore.GREEN}是{Fore.RESET}/{Fore.RED}否{Fore.RESET}):").lower() in ['y', 'yes', '是']:
subprocess.run('git ' + 'add ' + '--all')
print(f"{Fore.GREEN}✓{Fore.RESET} 已暂存所有更改")
else:
print(f"{Fore.RED}✕{Fore.RESET} 没有已暂存的更改")
exit_code = 1
else:
print(f"{Fore.RED}✕{Fore.RESET} 没有更改")
exit_code = 1
if not args and exit_code != 1:
commit_message = input("请输入提交信息: ")
if not commit_message:
# 还不输提交信息?玩我呢
print(f"{Fore.RED}✕{Fore.RESET} 请提供提交信息")
exit_code = 1
result = subprocess.run('git ' + git_command + ' -m "' + commit_message + '"', capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "暂存":
if args and args[0] == "所有":
result = subprocess.run('git ' + git_command + ' --all', capture_output=True, text=True)
elif not args:
print(f"{Fore.RED}✕{Fore.RESET} 你要暂存什么你没告诉我啊")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "切换分支" or command == "签出到":
if not args:
branch = input("请输入需要切换的分支:")
result = subprocess.run('git ' + git_command + ' ' + branch, capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
else:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif command == "新建分支":
if not args:
new_branch = input("请输入新分支名称: ")
result = subprocess.run('git ' + git_command + ' ' + new_branch, capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
else:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
elif command == "删除分支":
if not args:
print(f"{Fore.RED}✕{Fore.RESET} 删除分支命令需要指定要删除的分支名称")
exit_code = 1
elif len(args) > 2:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif len(args) == 2:
if args[1] == "+确认":
git_command = "branch -d"
else:
print(f"{Fore.RED}✕{Fore.RESET} 无效的附加参数")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "版本":
print("中文Git by 鸭鸭「カモ」")
print(f"版本:{Fore.BLUE}{VERSION}{Fore.RESET}")
print(f"安装在: {Fore.BLUE}{full_path}{Fore.RESET}")
result = subprocess.run('git ' + '--version', capture_output=True, text=True)
elif command == "公告":
display_notice(True)
return
elif command == "还原":
if not args:
print(f"{Fore.RED}✕{Fore.RESET} 还原命令需要参数")
exit_code = 1
else:
if args[0] == "最新提交":
result = subprocess.run('git ' + git_command + ' HEAD', capture_output=True, text=True)
elif args[0].startswith("倒数第"):
try:
if args[0].endswith('个提交'):
num = args[0]
num = num[3:-3]
else:
num = int(args[0][3:])
result = subprocess.run(['git ', git_command, f'HEAD~{num}'], capture_output=True, text=True)
except ValueError:
print(f"{Fore.RED}✕{Fore.RESET} 参数错误,请输入倒数第n个提交,n为正整数。")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + args[0], capture_output=True, text=True)
elif command == "克隆":
if not args:
repository = input("请输入远程仓库链接(以.git结尾):")
result = subprocess.run('git ' + git_command + ' ' + repository, capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "是否忽略":
if not args:
file = input("请输入需要检查的文件/文件夹:")
if not file:
print(f"{Fore.RED}✕{Fore.RESET} 文件/文件夹名不能为空")
exit_code = 1
result = subprocess.run('git ' + git_command + ' ' + file, capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "查看本地分支":
if len(args) > 2:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif args[0] == "+最后提交":
git_command = "branch -v"
elif (args[0] == "+最后提交" and args[1] == "+与上游分支关系") or (args[0] == "+与上游分支关系" and args[1] == "+最后提交"):
git_command = "branch -vv"
else:
print(f"{Fore.RED}✕{Fore.RESET} 无效的参数")
exit_code = 1
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "合并":
if not args:
branch = input("请输入需要合并到当前分支的分支:")
result = subprocess.run('git ' + git_command + ' ' + branch, capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "更名分支":
if not args:
old_branch = input("请输入旧分支名:")
new_branch = input("请输入新分支名:")
if old_branch == new_branch:
print(f"{Fore.RED}✕{Fore.RESET} 新旧分支名称相同")
exit_code = 1
result = subprocess.run('git ' + git_command + ' ' + old_branch + ' ' + new_branch, capture_output=True, text=True)
if args < 2:
print(f"{Fore.RED}✕{Fore.RESET} 缺少参数")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "更新":
print("中文Git by 鸭鸭「カモ」")
print(f"当前版本:{Fore.BLUE}{VERSION}{Fore.RESET}")
print("正在检查更新...")
auto_update()
return
elif command == "重置":
if not args:
print(f"{Fore.RED}✕{Fore.RESET} 重置指令需要具体的参数。")
exit_code = 1
elif len(args) > 2:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif len(args) == 2:
if args[1] == "+保留更改":# 默认
git_command = "reset --mixed"
elif args[1] == "+删除更改":
git_command = "reset --hard"
else:
print(f"{Fore.RED}✕{Fore.RESET} 无效的附加参数")
exit_code = 1
if args[0] in ["最新提交", "HEAD"]:
print(f"{Fore.YELLOW}⚠{Fore.RESET} 虽然您这样做不会出错,但这样做有意义吗(思考)")
result = subprocess.run('git ' + git_command + ' HEAD', capture_output=True, text=True)
elif args[0].startswith("倒数第"):
try:
if args[0].endswith('个提交'):
num = args[0]
num = num[3:-3]
else:
num = int(args[0][3:])
result = subprocess.run(['git ', git_command, f'HEAD~{num}'], capture_output=True, text=True)
except ValueError:
print(f"{Fore.RED}✕{Fore.RESET} 参数错误,请输入倒数第n个提交,n为正整数。")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
if result.returncode == 0 and exit_code == 0:# 习惯性用 && 了...
print(result.stdout)
elif exit_code != 1:# 已设置错误代码的都已输出错误信息
print(f"{Fore.RED}✕{Fore.RESET} 错误: {result.stderr}")
exit_code = 1
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice() # 自动公告获取
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 执行命令时出错: {e}")
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice() # 自动公告获取
exit_code = 1
else:
print("不支持的命令:", command)
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice() # 自动公告获取
exit_code = 1
def main():
init(autoreset=True)
#--- 公用变量 ---
global notice_url,previous_notice_file,VERSION,url,config_file,full_path,auto_get_notice,auto_check_update
#---------------
script_path = os.path.dirname(os.path.abspath(__file__))
full_path = os.path.join(script_path, "中文git.py")
exit_code = 0 # 只有不正常退出需要定义
notice_url = 'https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/notice/notice.txt'
previous_notice_file = os.path.join(script_path, 'previous_notice.txt')# 显示过的公告
# ---------- 版本定义及更新 ----------
# 定义版本号
VERSION = 'v2.10'
# GitHub releases API URL
url = 'https://api.github.com/repos/DuckDuckStudio/Chinese_git/releases/latest'
config_file = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "config.json")
if os.path.exists(config_file):
try:
with open(config_file, 'r') as file:
config_data = json.load(file)
auto_check_update = config_data['application']['run']['auto_check_update']
auto_get_notice = config_data['application']['run']['auto_get_notice']
except Exception as e:
auto_check_update = True
auto_get_notice = True
print(f"{Fore.RED}✕{Fore.RESET} 读取配置文件时出错:\n{Fore.RED}{e}{Fore.RESET}\n{Fore.BLUE}[!]{Fore.RESET} 请检查配置文件是否正确,您可以先删除配置文件然后运行任意中文git的命令来重新生成默认配置文件。")
exit_code = 1
else:
# 没有配置文件就默认都要
auto_check_update = True
auto_get_notice = True
print(f"{Fore.YELLOW}⚠{Fore.RESET} 您的中文Git的安装目录下似乎{Fore.YELLOW}缺少配置文件{Fore.RESET},程序将尝试自动生成默认配置文件!")
try:
# 生成一个默认配置文件
# 将数据结构转换为 JSON 格式的字符串
json_str = {
"information": {
"version": "v2.10"
},
"application": {
"notice": {
"time": "",
"level": "",
"content": ""
},
"run": {
"auto_check_update": "True",
"auto_get_notice": "True"
}
}
}
json_str = json.dumps(json_str, indent=4) # indent 参数用于设置缩进(4空)
# 将 JSON 字符串写入文件
with open(config_file, 'w') as f:
f.write(json_str)
print(f"{Fore.GREEN}✓{Fore.RESET} 默认配置文件生成成功")
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 默认配置文件生成失败!请{Fore.YELLOW}手动添加{Fore.RESET}配置文件,否则将无法运行一些功能!")
exit_code = 1
print(f"{Fore.BLUE}[!]{Fore.RESET} 如果你觉得这不应该可以提交Issue")
# -------------------
if len(sys.argv) > 1:
git_command(sys.argv[1], *sys.argv[2:])
else:
print("使用方法:")
print("中文git <中文指令> [参数]")
print("即:中文git <你想干什么> [具体要啥]")
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice()# 自动公告获取
exit_code = 1
sys.exit(exit_code)
| 29,537 | Python | .pyp | 596 | 30.67953 | 252 | 0.501433 | DuckDuckStudio/Chinese_git | 8 | 1 | 6 | GPL-2.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,203 | 中文git_pypi.py | DuckDuckStudio_Chinese_git/ChineseGit/中文git/中文git_pypi.py | import os
import sys
import json
import requests
import subprocess
from colorama import init, Fore
# ----------- 此代码为PyPi专用,非函数代码请写在main()函数中! -----------
# --- 读取配置文件 ---
def fetch_json():
global exit_code
config_url = "https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/files/json/config.json"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
try:
response = requests.get(config_url, headers=headers)
if response.status_code == 200:
json_data = response.json()
print(f"{Fore.GREEN}✓{Fore.RESET} 获取最新默认配置文件成功")
return json_data
else:
print(f"{Fore.RED}✕{Fore.RESET} 无法获取最新默认配置文件\n{Fore.BLUE}[!]{Fore.RESET} 返回状态码: {Fore.YELLOW}{response.status_code}{Fore.RESET}")
exit_code = 1
return None
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 尝试获取最新默认配置文件失败,错误: {Fore.RED}{e}{Fore.RESET}")
exit_code = 1
return None
def merge_json(old_json, new_json):
# 合并两个 JSON 对象
updated_json = old_json.copy()
# 处理旧 JSON 中的键
keys_to_remove = []
for key in updated_json:
if key not in new_json:
keys_to_remove.append(key)
for key in keys_to_remove:
del updated_json[key]
# 合并新 JSON 中的值
for key in new_json:
if key in updated_json and isinstance(updated_json[key], dict) and isinstance(new_json[key], dict):
# 如果是字典类型,递归合并
updated_json[key] = merge_json(updated_json[key], new_json[key])
else:
# 直接更新值
updated_json[key] = new_json[key]
return updated_json
def update_json():
global exit_code
new_json = fetch_json()
if not new_json:
return 1
try:
with open(config_file, 'r') as f:
old_json = json.load(f)
updated_json = merge_json(old_json, new_json)
# 将更新后的配置写入文件
with open(config_file, 'w') as f:
json.dump(updated_json, f, indent=4)
print(f"{Fore.GREEN}✓{Fore.RESET} 默认配置文件更新成功")
return 0
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 更新配置文件时出错:\n{Fore.RED}{e}{Fore.RESET}")
exit_code = 1
return 1
def always_check():# 每次执行命令都要检查的
# ----------- 检查更新 ----------
current_version = VERSION
try:
response = requests.get(url)
data = response.json()
latest_version = data['tag_name'] # 从 GitHub 获取最新版本号
if latest_version != current_version:
print(f"{Fore.BLUE}[!]{Fore.RESET} 发现新版本 {Fore.RED}{current_version}{Fore.RESET} → {Fore.GREEN}{latest_version}{Fore.RESET}\n运行 {Fore.BLUE}中文git 更新{Fore.RESET} 命令以更新。")
except:
pass
def check_for_updates():
global exit_code
# 提取版本号
current_version = VERSION.split('-')[0] # 分离可能的 '-pack' 后缀
try:
response = requests.get(url)
data = response.json()
latest_version = data['tag_name'] # 从 GitHub 获取最新版本号
if latest_version != current_version:
print(f"{Fore.BLUE}[!]{Fore.RESET} 发现新版本 {Fore.RED}{current_version}{Fore.RESET} → {Fore.GREEN}{latest_version}{Fore.RESET} 可用!")
return latest_version
else:
print(f"{Fore.GREEN}✓{Fore.RESET} 您已安装最新版本 {Fore.BLUE}{current_version}{Fore.RESET}。")
return None
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 检查更新时出错: {e}")
exit_code = 1
return None
def download_update_file(version):
global exit_code
# 根据版本确定下载 URL
download_url = f'https://github.com/DuckDuckStudio/Chinese_git/releases/download/{version}/Chinese_git.py'
spare_download_url = 'https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/Spare-Download/Chinese_git.py'
spare_download_version_url = 'https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/Spare-Download/info.json'
try:
response = requests.get(download_url)
# 重命名下载的文件为"中文Git.exe" 或 "中文Git.py"
new_filename = '中文Git.py'
with open(new_filename, 'wb') as f:
f.write(response.content)
print(f"{Fore.GREEN}✓{Fore.RESET} 更新成功下载。")
return new_filename
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 下载更新文件时出错: {e}")
exit_code = 1
choice = input(f"{Fore.BLUE}?{Fore.RESET} 是否切换备用下载路线(是/否): ").lower()
if choice in ['是', 'y', 'yes']:
try:
spare_download_version = requests.get(spare_download_version_url)
data = spare_download_version.json()
spare_download_version = data['version']# 获取备用路线的程序的版本号
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 获取备用路线版本信息时出错: {Fore.RED}{e}{Fore.RESET}")
exit_code = 1
return None
if spare_download_version == version:
try:
response = requests.get(spare_download_url)
new_filename = '中文git.py'
with open(new_filename, 'wb') as f:
f.write(response.content)
print(f"{Fore.GREEN}✓{Fore.RESET} 更新成功下载。")
return new_filename
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 下载更新文件时出错: {e}")
exit_code = 1
return None
else:
print(f"{Fore.RED}✕{Fore.RESET} 备用路线{Fore.YELLOW}版本不一致{Fore.RESET}\n备用路线版本为{Fore.BLUE}{spare_download_version}{Fore.RESET},而GitHub Releases上的最新版为{Fore.BLUE}{version}{Fore.BLUE}\n{Fore.YELLOW}如果你遇到了这个错误,请前往GitHub提交Issue,感谢!{Fore.RESET}")
exit_code = 1
return None
return None
def replace_current_program(new_filename):
global exit_code
try:
# 用下载的文件替换当前程序
os.replace(new_filename, sys.argv[0])
if update_json() == 1:
print(f"{Fore.YELLOW}⚠{Fore.RESET} 请手动更新配置文件并提交issue")
print(f"{Fore.GREEN}✓{Fore.RESET} 程序已成功更新。")
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 替换当前程序时出错: {e}")
exit_code = 1
# 自动检查更新并提示用户安装
def auto_update():
new_version = check_for_updates()
if new_version:
# 询问用户是否安装更新
choice = input(f"{Fore.BLUE}?{Fore.RESET} 是否要安装此更新? (是/否): ").lower()
if choice in ['是','y','yes']:
new_filename = download_update_file(new_version)
if new_filename:
replace_current_program(new_filename)
else:
print(f"{Fore.BLUE}[!]{Fore.RESET} 已跳过更新。")
# ---------- 版本...更新 结束 ----------
# ---------- 公告获取 -----------------
def get_notice_content(url, manual=False):
global exit_code
try:
response = requests.get(url)
if response.status_code == 200:
content = response.text
return content
else:
if manual:
print(f"{Fore.RED}✕{Fore.RESET} 获取最新公告失败!\n状态码: {Fore.BLUE}{response.status_code}{Fore.RESET}")
t = input(f"{Fore.BLUE}?{Fore.RESET} 是否读取本地最新公告({Fore.GREEN}是{Fore.RESET}/{Fore.RED}否{Fore.RESET}):").lower()
if t in ['是', 'y', 'yes']:
display_notice('本地')
else:
exit_code = 1
return None
except Exception as e:
if manual:
print(f"{Fore.RED}✕{Fore.RESET} 获取最新公告失败!\n错误信息: {Fore.RED}{e}{Fore.RESET}")
t = input(f"{Fore.BLUE}?{Fore.RESET} 是否读取本地最新公告({Fore.GREEN}是{Fore.RESET}/{Fore.RED}否{Fore.RESET}):").lower()
if t in ['是', 'y', 'yes']:
display_notice('本地')
else:
exit_code = 1
return None
def save_previous_notice(content):
with open(previous_notice_file, 'w') as file:
file.write(content)
def read_previous_notice():
try:
with open(previous_notice_file, 'r') as file:
return file.read()
except FileNotFoundError:
return ""
except Exception:
return "" # 以防出现像 microsoft/winget-pkgs #156224 中的错误
def display_notice(manual=False):
global exit_code
if manual == True:
content = get_notice_content(notice_url, True)
elif manual == False:
content = get_notice_content(notice_url)
if manual == "本地":
content = read_previous_notice()
if content == "":
print(f"{Fore.RED}✕{Fore.RESET} 没有本地公告")
exit_code = 1
return
else:
previous_notice = read_previous_notice()
if content:
try:
lines = content.split('\n')
# ---- 值提取 ----
level_line = lines[0].strip()
level = int(level_line.split(':')[1])
# -- 等级↑ 是否强制↓ --
force_line = lines[1].strip()
force = bool(force_line.split(':')[1])
# ----------------
except Exception as e:
if not manual:
return
else:
print(f"{Fore.RED}✕{Fore.RESET} 最新公告{Fore.YELLOW}不符合{Fore.RESET}规范,请联系开发者反馈!")
print(f"{Fore.RED}✕{Fore.RESET} 反馈时{Fore.YELLOW}请带上错误信息{Fore.RESET}:\n{Fore.RED}{e} | {Fore.CYAN}{level_line} {Fore.RED}|{Fore.CYAN} {force_line}{Fore.RESET}")
exit_code = 1
return
if level == 1:
color = Fore.RED
elif level == 2:
color = Fore.YELLOW
elif level == 3:
color = Fore.GREEN
elif level == 4:
color = Fore.BLUE
else:
color = ''
if manual == True:
print(f"{color}[!最新公告({level}级)!]{Fore.RESET}")
for line in lines[2:]:
print(line)
print(f"{color}[!------------!]{Fore.RESET}")
elif manual == "本地":
print(f"{color}[!最新本地公告({level}级)!]{Fore.RESET}")
for line in lines[2:]:
print(line)
print(f"{color}[!------------!]{Fore.RESET}")
else:
if content != previous_notice:
if force:
print(f"\n{color}[!有新公告({level}级)!]{Fore.RESET}")
for line in lines[2:]:
print(line)
print(f"{color}[!------------!]{Fore.RESET}")
save_previous_notice(content)
# ---------- 公告获取 结束 ------------
# ---------- 各命令函数 ---------------
def check_git_stash():
staged_changes = False
unstaged_changes = False
git_stash = subprocess.run(["git", "stash", "show"], capture_output=True, text=True)
output_lines = git_stash.stdout.split('\n')
if output_lines != ['']:
staged_changes = True
# --------
git_stash = subprocess.run(["git", "diff", "--name-only"], capture_output=True, text=True)
output_lines = git_stash.stdout.split('\n')
if output_lines != ['']:
unstaged_changes = True
return staged_changes, unstaged_changes
# ------------------------------------------
def git_command(command, *args):
global exit_code
git_command_mapping = {
"拉取": "pull",
"推送": "push",
"提交": "commit",
"新建分支": "checkout -b",
"切换分支": "checkout",
"合并": "merge",
"暂存": "add",
"状态": "status",
"日志": "log",
"删除分支": "branch -D",
"远程地址": "remote -v",
"远程更新": "remote update",
"远程分支": "branch -r",
"克隆": "clone",
"签出到": "checkout",
"图形化日志" :"log --graph",
"是否忽略": "check-ignore -v",
"初始化": "init",
"本地分支": "branch",
"强推": "push --force",
"更名分支": "branch -m",
# --- 特殊功能 ---
"版本": "--version",
"更新": "update",
"公告": "notice",
# --- 结束 ---
"还原": "revert",
"重置": "reset",
"差异": "diff",
"清理引用": "remote prune origin",
# 可根据需要添加更多映射
}
if command == "帮助":
print("使用方法:")
print("中文git <中文指令> [参数]")
print("即:中文git <你想干什么> [具体要啥]")
print("\n支持的中文指令:")
print("中文git", end=" ")
for cmd in git_command_mapping:
print(f"[{cmd}]", end=" ")
print("\n详细支持命令请查看用户手册:https://github.com/DuckDuckStudio/Chinese_git/blob/main/USER_HANDBOOK.md#可用命令")
return
git_command = git_command_mapping.get(command)
if git_command:
try:
if command == "提交":
staged, unstaged = check_git_stash()
if staged:
print(f"{Fore.BLUE}[!]{Fore.BLUE} 将提交暂存区的内容")
elif unstaged:
print(f"{Fore.YELLOW}⚠{Fore.RESET} 没有已暂存的更改,但检测到未暂存的更改")
if input(f"{Fore.BLUE}?{Fore.RESET} 是否暂存所有并提交({Fore.GREEN}是{Fore.RESET}/{Fore.RED}否{Fore.RESET}):").lower() in ['y', 'yes', '是']:
subprocess.run('git ' + 'add ' + '--all')
print(f"{Fore.GREEN}✓{Fore.RESET} 已暂存所有更改")
else:
print(f"{Fore.RED}✕{Fore.RESET} 没有已暂存的更改")
exit_code = 1
else:
print(f"{Fore.RED}✕{Fore.RESET} 没有更改")
exit_code = 1
if not args and exit_code != 1:
commit_message = input("请输入提交信息: ")
if not commit_message:
# 还不输提交信息?玩我呢
print(f"{Fore.RED}✕{Fore.RESET} 请提供提交信息")
exit_code = 1
result = subprocess.run('git ' + git_command + ' -m "' + commit_message + '"', capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "暂存":
if args and args[0] == "所有":
result = subprocess.run('git ' + git_command + ' --all', capture_output=True, text=True)
elif not args:
print(f"{Fore.RED}✕{Fore.RESET} 你要暂存什么你没告诉我啊")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "切换分支" or command == "签出到":
if not args:
branch = input("请输入需要切换的分支:")
result = subprocess.run('git ' + git_command + ' ' + branch, capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
else:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif command == "新建分支":
if not args:
new_branch = input("请输入新分支名称: ")
result = subprocess.run('git ' + git_command + ' ' + new_branch, capture_output=True, text=True)
elif len(args) == 1:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
else:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
elif command == "删除分支":
if not args:
print(f"{Fore.RED}✕{Fore.RESET} 删除分支命令需要指定要删除的分支名称")
exit_code = 1
elif len(args) > 2:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif len(args) == 2:
if args[1] == "+确认":
git_command = "branch -d"
else:
print(f"{Fore.RED}✕{Fore.RESET} 无效的附加参数")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "版本":
print("中文Git by 鸭鸭「カモ」")
print(f"版本:{Fore.BLUE}{VERSION}{Fore.RESET}")
print(f"安装在: {Fore.BLUE}{full_path}{Fore.RESET}")
result = subprocess.run('git ' + '--version', capture_output=True, text=True)
elif command == "公告":
display_notice(True)
return
elif command == "还原":
if not args:
print(f"{Fore.RED}✕{Fore.RESET} 还原命令需要参数")
exit_code = 1
else:
if args[0] == "最新提交":
result = subprocess.run('git ' + git_command + ' HEAD', capture_output=True, text=True)
elif args[0].startswith("倒数第"):
try:
if args[0].endswith('个提交'):
num = args[0]
num = num[3:-3]
else:
num = int(args[0][3:])
result = subprocess.run(['git ', git_command, f'HEAD~{num}'], capture_output=True, text=True)
except ValueError:
print(f"{Fore.RED}✕{Fore.RESET} 参数错误,请输入倒数第n个提交,n为正整数。")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + args[0], capture_output=True, text=True)
elif command == "克隆":
if not args:
repository = input("请输入远程仓库链接(以.git结尾):")
result = subprocess.run('git ' + git_command + ' ' + repository, capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "是否忽略":
if not args:
file = input("请输入需要检查的文件/文件夹:")
if not file:
print(f"{Fore.RED}✕{Fore.RESET} 文件/文件夹名不能为空")
exit_code = 1
result = subprocess.run('git ' + git_command + ' ' + file, capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "查看本地分支":
if len(args) > 2:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif args[0] == "+最后提交":
git_command = "branch -v"
elif (args[0] == "+最后提交" and args[1] == "+与上游分支关系") or (args[0] == "+与上游分支关系" and args[1] == "+最后提交"):
git_command = "branch -vv"
else:
print(f"{Fore.RED}✕{Fore.RESET} 无效的参数")
exit_code = 1
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "合并":
if not args:
branch = input("请输入需要合并到当前分支的分支:")
result = subprocess.run('git ' + git_command + ' ' + branch, capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "更名分支":
if not args:
old_branch = input("请输入旧分支名:")
new_branch = input("请输入新分支名:")
if old_branch == new_branch:
print(f"{Fore.RED}✕{Fore.RESET} 新旧分支名称相同")
exit_code = 1
result = subprocess.run('git ' + git_command + ' ' + old_branch + ' ' + new_branch, capture_output=True, text=True)
if args < 2:
print(f"{Fore.RED}✕{Fore.RESET} 缺少参数")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
elif command == "更新":
print("中文Git by 鸭鸭「カモ」")
print(f"当前版本:{Fore.BLUE}{VERSION}{Fore.RESET}")
print("正在检查更新...")
auto_update()
return
elif command == "重置":
if not args:
print(f"{Fore.RED}✕{Fore.RESET} 重置指令需要具体的参数。")
exit_code = 1
elif len(args) > 2:
print(f"{Fore.RED}✕{Fore.RESET} 多余的参数")
exit_code = 1
elif len(args) == 2:
if args[1] == "+保留更改":# 默认
git_command = "reset --mixed"
elif args[1] == "+删除更改":
git_command = "reset --hard"
else:
print(f"{Fore.RED}✕{Fore.RESET} 无效的附加参数")
exit_code = 1
if args[0] in ["最新提交", "HEAD"]:
print(f"{Fore.YELLOW}⚠{Fore.RESET} 虽然您这样做不会出错,但这样做有意义吗(思考)")
result = subprocess.run('git ' + git_command + ' HEAD', capture_output=True, text=True)
elif args[0].startswith("倒数第"):
try:
if args[0].endswith('个提交'):
num = args[0]
num = num[3:-3]
else:
num = int(args[0][3:])
result = subprocess.run(['git ', git_command, f'HEAD~{num}'], capture_output=True, text=True)
except ValueError:
print(f"{Fore.RED}✕{Fore.RESET} 参数错误,请输入倒数第n个提交,n为正整数。")
exit_code = 1
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
else:
result = subprocess.run('git ' + git_command + ' ' + ' '.join(args), capture_output=True, text=True)
if result.returncode == 0 and exit_code == 0:# 习惯性用 && 了...
print(result.stdout)
elif exit_code != 1:# 已设置错误代码的都已输出错误信息
print(f"{Fore.RED}✕{Fore.RESET} 错误: {result.stderr}")
exit_code = 1
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice() # 自动公告获取
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 执行命令时出错: {e}")
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice() # 自动公告获取
exit_code = 1
else:
print("不支持的命令:", command)
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice() # 自动公告获取
exit_code = 1
def main():
init(autoreset=True)
#--- 公用变量 ---
global notice_url,previous_notice_file,VERSION,url,config_file,full_path,auto_get_notice,auto_check_update
#---------------
script_path = os.path.dirname(os.path.abspath(__file__))
full_path = os.path.join(script_path, "中文git.py")
exit_code = 0 # 只有不正常退出需要定义
notice_url = 'https://duckduckstudio.github.io/yazicbs.github.io/Tools/chinese_git/notice/notice.txt'
previous_notice_file = os.path.join(script_path, 'previous_notice.txt')# 显示过的公告
# ---------- 版本定义及更新 ----------
# 定义版本号
VERSION = 'v2.10'
# GitHub releases API URL
url = 'https://api.github.com/repos/DuckDuckStudio/Chinese_git/releases/latest'
config_file = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "config.json")
if os.path.exists(config_file):
try:
with open(config_file, 'r') as file:
config_data = json.load(file)
auto_check_update = config_data['application']['run']['auto_check_update']
auto_get_notice = config_data['application']['run']['auto_get_notice']
except Exception as e:
auto_check_update = True
auto_get_notice = True
print(f"{Fore.RED}✕{Fore.RESET} 读取配置文件时出错:\n{Fore.RED}{e}{Fore.RESET}\n{Fore.BLUE}[!]{Fore.RESET} 请检查配置文件是否正确,您可以先删除配置文件然后运行任意中文git的命令来重新生成默认配置文件。")
exit_code = 1
else:
# 没有配置文件就默认都要
auto_check_update = True
auto_get_notice = True
print(f"{Fore.YELLOW}⚠{Fore.RESET} 您的中文Git的安装目录下似乎{Fore.YELLOW}缺少配置文件{Fore.RESET},程序将尝试自动生成默认配置文件!")
try:
# 生成一个默认配置文件
# 将数据结构转换为 JSON 格式的字符串
json_str = {
"information": {
"version": "v2.10"
},
"application": {
"notice": {
"time": "",
"level": "",
"content": ""
},
"run": {
"auto_check_update": "True",
"auto_get_notice": "True"
}
}
}
json_str = json.dumps(json_str, indent=4) # indent 参数用于设置缩进(4空)
# 将 JSON 字符串写入文件
with open(config_file, 'w') as f:
f.write(json_str)
print(f"{Fore.GREEN}✓{Fore.RESET} 默认配置文件生成成功")
except Exception as e:
print(f"{Fore.RED}✕{Fore.RESET} 默认配置文件生成失败!请{Fore.YELLOW}手动添加{Fore.RESET}配置文件,否则将无法运行一些功能!")
exit_code = 1
print(f"{Fore.BLUE}[!]{Fore.RESET} 如果你觉得这不应该可以提交Issue")
# -------------------
if len(sys.argv) > 1:
git_command(sys.argv[1], *sys.argv[2:])
else:
print("使用方法:")
print("中文git <中文指令> [参数]")
print("即:中文git <你想干什么> [具体要啥]")
if auto_check_update == "True":
always_check()# 自动检查更新
if auto_get_notice == "True":
display_notice()# 自动公告获取
exit_code = 1
sys.exit(exit_code)
| 29,537 | Python | .pyp | 596 | 30.67953 | 252 | 0.501433 | DuckDuckStudio/Chinese_git | 8 | 1 | 6 | GPL-2.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,204 | setup.py | 15525730080_pc_perf/setup.py |
# coding=utf-8
from setuptools import setup, find_packages
setup(
# 包的名称,通常与包的目录名称相同
name='pc-perf',
# 版本号,遵循语义化版本控制规则
version='1.3.2',
# 项目简短描述
description='pc 进程性能测试平台,支持 windows / mac / linux 平台进程cpu、memory、fps(仅支持windows下OpenGL DirectX 引擎应用)、gpu、thread_num、handle_num 等指标的实时监控和可视化展示',
# 项目的URL,通常是项目主页或源代码仓库
url='https://github.com/15525730080/pc_perf',
# 作者
author='范博洲',
# 作者的电子邮件地址
author_email='[email protected]',
# 包的许可证
license='MIT',
# 包的关键词
keywords='pc fps cpu memory gpu monitor',
# 定义项目所需的依赖
install_requires=[
"concurrent_log_handler==0.9.25",
"fastapi==0.110.2",
"numpy==1.23.5",
"pandas==2.2.2",
"Pillow==10.3.0",
"psutil==5.9.8",
"PyGetWindow==0.0.9",
"pynvml==11.5.0",
"SQLAlchemy==2.0.29",
"SQLAlchemy_serializer==1.4.12",
"starlette==0.37.2",
"uvicorn==0.29.0",
"aiosqlite==0.20.0",
"APScheduler==3.10.4",
"greenlet==3.0.3",
"gunicorn==23.0.0",
"Cython==3.0.10"
],
# 从包中自动寻找所有的子包和子模块
py_modules=['pc_perf'],
packages=['app', 'app.core'],
# 包含数据文件,比如配置文件
include_package_data=True,
# 定义包中非.py文件的内容
package_data={
# 如果你的包中有数据文件,可以在这里指定
'': ['../*.exe', '../test_result/*.html'],
},
# 指定Python版本要求
python_requires='>=3.9',
# 指定分发文件的类别,例如:"Programming Language :: Python :: 3"
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries',
],
)
| 2,371 | Python | .py | 64 | 23.71875 | 148 | 0.580214 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,205 | pc_perf.py | 15525730080_pc_perf/pc_perf.py | import ctypes
import multiprocessing
import os
import platform
import subprocess
import sys
import threading
import time
import webbrowser
def open_url():
time.sleep(2)
webbrowser.open("http://127.0.0.1:20223")
def is_admin():
"""检查是否有管理员权限(仅适用于 Windows)。"""
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def win_main():
if not is_admin():
# 如果没有管理员权限,重新启动脚本并请求管理员权限
ctypes.windll.shell32.ShellExecuteW(
None, "runas", sys.executable, " ".join(sys.argv), None, 1
)
sys.exit(0) # 退出当前进程
import uvicorn
from app.view import app
multiprocessing.freeze_support()
threading.Thread(target=open_url).start()
uvicorn.run(app, host="0.0.0.0", port=20223, log_level="error", reload=False)
def unix_main():
threading.Thread(target=open_url).start()
start_cmd = "{0} -m gunicorn -b 0.0.0.0:20223 --workers {1} --preload --worker-class=uvicorn.workers.UvicornWorker app.view:app".format(
sys.executable, os.cpu_count())
subprocess.run(start_cmd.split())
if __name__ == '__main__':
if platform.system() == "Windows":
win_main()
else:
unix_main()
| 1,310 | Python | .py | 40 | 25.45 | 140 | 0.667238 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,206 | util.py | 15525730080_pc_perf/app/util.py | import asyncio
import json
from pathlib import Path
import numpy as np
import pandas as pd
from app.log import log as logger
from concurrent.futures import ProcessPoolExecutor, wait
class DataCollect(object):
def __init__(self, save_dir):
self.save_dir = save_dir
self.csv_files: list[Path] = self.get_all_csv()
def get_all_csv(self):
return [sub_file for sub_file in Path(self.save_dir).iterdir() if
sub_file.is_file() and sub_file.suffix == ".csv"]
@staticmethod
async def csv2json(file_path):
df = await asyncio.to_thread(pd.read_csv, file_path)
return json.loads(df.to_json(orient='records', double_precision=4))
async def get_all_data(self, is_format=True):
result = await asyncio.gather(*[self.csv2json(file.resolve()) for file in self.csv_files])
all_data = [{"name": file.stem, "value": result[index]} for index, file in enumerate(self.csv_files)]
if is_format:
return self.format_all_data_value(all_data)
@staticmethod
def format_all_data_value(all_data: list[dict]):
start_time = min([data.get("value")[0].get("time") for data in all_data if data.get("value")])
end_time = max([data.get("value")[-1].get("time") for data in all_data if data.get("value")])
# 内存操作是cpu密集型但是瓶颈并不在这里使用多进程效果并不明显
for data in all_data:
format_all_data_dict = {cur_time: {"time": cur_time} for cur_time in range(start_time, end_time + 1)}
if data.get("value"):
old_value = data.get("value")
for value in old_value:
format_all_data_dict[value.get("time")] = value
data["value"] = list(format_all_data_dict.values())
return all_data
# def process_data(data, start_time, end_time):
# try:
# format_all_data_dict = {cur_time: {"time": cur_time} for cur_time in range(start_time, end_time + 1)}
# if data.get("value"):
# old_value = data.get("value")
# for value in old_value:
# format_all_data_dict[value.get("time")] = value
# data["value"] = list(format_all_data_dict.values())
# return data
# except Exception as e:
# logger.error(e)
#
# # 假设 all_data 是你的数据列表,start_time 和 end_time 是你的开始和结束时间
# with ProcessPoolExecutor() as executor:
# futures = [executor.submit(process_data, data, start_time, end_time) for data in all_data]
# done, not_done = wait(futures)
# return done
# numpy 增强版性能表现更好
@staticmethod
def format_all_data_value(all_data):
start_time = min([data.get("value")[0].get("time") for data in all_data if data.get("value")])
end_time = max([data.get("value")[-1].get("time") for data in all_data if data.get("value")])
# 生成时间序列
all_times = np.arange(start_time, end_time + 1)
# 遍历所有数据
for data in all_data:
if 'value' in data:
# 将原始数据的时间转换为数组
original_times = np.array([item['time'] for item in data['value']])
# 找出缺失的时间点
missing_times = np.setdiff1d(all_times, original_times)
# 为缺失的时间点创建新的对象
missing_data = [{'time': int(time)} for time in missing_times]
# 将新创建的对象添加到原始数据中
data['value'].extend(missing_data)
# 确保数据按照时间排序
data['value'].sort(key=lambda x: x['time'])
return all_data
| 3,872 | Python | .py | 74 | 39.283784 | 119 | 0.582838 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,207 | task_handle.py | 15525730080_pc_perf/app/task_handle.py | # coding=utf-8
import asyncio
import os
import traceback
from builtins import *
from multiprocessing.context import Process
import psutil
from app.database import TaskCollection
from app.log import log as logger
from app.core.pc_tools import perf as pc_perf
class TaskHandle(Process):
def __init__(self, serialno: str, target_pid: int, file_dir: str, task_id: int, platform: str):
super(TaskHandle, self).__init__()
self.serialno = serialno
self.target_pid = target_pid
self.file_dir = file_dir
if not os.path.exists(self.file_dir):
os.makedirs(self.file_dir)
self.daemon = True
self.task_id = task_id
self.platform = platform # platform.system()
def start(self):
logger.info("join task handle")
super().start()
def run(self):
logger.info("join task handle run")
asyncio.run(TaskCollection.set_task_running(self.task_id, self.pid))
asyncio.run(pc_perf(self.target_pid, self.file_dir))
@staticmethod
def stop_handle(monitor_pid):
logger.info("Stopping task handle and subprocesses... {0}".format(monitor_pid))
# kill the pc_perf subprocess
current_process = psutil.Process(monitor_pid)
try:
for child in current_process.children(recursive=True):
os.kill(child.pid, 9)
except Exception as e:
logger.error(e)
finally:
try:
os.kill(current_process.pid, 9)
except:
logger.error(traceback.format_exc())
| 1,588 | Python | .py | 43 | 29.186047 | 99 | 0.644951 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,208 | log.py | 15525730080_pc_perf/app/log.py | from logging import getLogger, INFO, StreamHandler, Formatter
from concurrent_log_handler import ConcurrentRotatingFileHandler
import os
log = getLogger(__name__)
# Use an absolute path to prevent file rotation trouble.
logfile = os.path.abspath("log.log")
# Rotate log after reaching 512K, keep 5 old copies.
rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 512 * 1024 * 1024, 1)
log.addHandler(rotateHandler)
log.setLevel(INFO)
streamHandler = StreamHandler()
streamHandler.setLevel(INFO)
streamHandler.setFormatter(Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s' # 日志格式
))
log.addHandler(streamHandler) | 648 | Python | .py | 16 | 38.75 | 81 | 0.793269 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,209 | database.py | 15525730080_pc_perf/app/database.py | import asyncio
import datetime
import os
import platform
from sqlalchemy import Column, String, Integer, DateTime, select, or_
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from contextlib import asynccontextmanager
from sqlalchemy_serializer import SerializerMixin
from sqlalchemy.orm import declarative_base
from app.log import log as logger
logger.info("工作空间{0}".format(os.getcwd()))
db_path = os.path.join(os.getcwd(), "task.sqlite")
logger.info("db path {0}".format(db_path))
async_engine = create_async_engine('sqlite+aiosqlite:///{0}'.format(db_path), echo=False,
pool_pre_ping=True, connect_args={'check_same_thread': False}, pool_recycle=1800)
logger.info("current path {0}".format(os.getcwd()))
AsyncSessionLocal = sessionmaker(bind=async_engine, class_=AsyncSession, expire_on_commit=True,
autocommit=False, autoflush=False)
Base = declarative_base()
@asynccontextmanager
async def async_connect():
session = AsyncSessionLocal()
try:
logger.info("sql begin")
yield session
await session.commit()
logger.info("sql success")
except BaseException as e:
await session.rollback()
logger.error(e)
raise e
finally:
logger.info("sql end")
await session.close()
async def update_table_structure():
async with async_engine.begin() as conn:
# 反射现有的数据库结构
await conn.run_sync(Base.metadata.create_all)
class Task(Base, SerializerMixin):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True, autoincrement=True)
start_time = Column(DateTime, default=None)
end_time = Column(DateTime, default=None)
serialno = Column(String(255), default=None)
status = Column(Integer) # 0未开始, 1 执行中 , 2 执行完成 3.暂停
file_dir = Column(String(255), default=None) # 存储csv文件的路径
target_pid = Column(Integer) # 被测进程pid
target_pid_name = Column(String(255), default=None) # 被测进程pid 名称
monitor_pid = Column(Integer, default=None) # 当前任务运行的进程pid,任务执行的进程,里面有各个性能指标的线程
platform = Column(String(50), default="win") # win | mac | linux 任务
name = Column(String(255), default=None) # 任务名称
class TaskLabel(Base, SerializerMixin):
__tablename__ = 'task_label'
id = Column(Integer, primary_key=True, autoincrement=True)
start_time = Column(DateTime, default=None) # 标签开始
end_time = Column(DateTime, default=None) # 标签结束
label_name = Column(String(255), default=None) # 标签名称
class TaskCollection(object):
@classmethod
async def set_task_running(cls, task_id, monitor_pid):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task).filter(Task.id == task_id))
task = result.scalars().first()
assert task, "NOT FIND TASK"
assert task.status == 0, "TASK RUNNING FAIL, TASK STATUS IS {0}".format(task.status)
task.status = 1
task.monitor_pid = monitor_pid
return task.to_dict()
@classmethod
async def get_all_task(cls):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task))
task = result.scalars().fetchall()
result_list = [t.to_dict() for t in task]
result_list.sort(key=lambda x: x.get("start_time"), reverse=True)
return result_list
@classmethod
async def get_item_task(cls, task_id):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task).filter(Task.id == task_id))
task = result.scalars().first()
assert task, "NOT FIND TASK"
return task.to_dict()
@classmethod
async def get_all_stop_task_monitor_pid(cls):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task).filter(Task.status == 2))
tasks = result.scalars().fetchall()
return [task.monitor_pid for task in tasks]
@classmethod
async def create_task(cls, pid, pid_name, file_dir, name):
async with async_connect() as session:
async with session.begin():
result = await session.execute(
select(Task).filter(Task.target_pid == pid).filter(or_(
Task.status == 0,
Task.status == 1,
)))
task = result.scalars().first()
assert not task, "MONITOR PID {0} TASK {1} IS RUN".format(pid, task.name)
new_task = Task(start_time=datetime.datetime.now(), serialno=platform.node(), status=0,
target_pid=pid, platform=platform.system(), name=name, target_pid_name=pid_name)
session.add(new_task)
await session.flush()
file_dir = os.path.join(file_dir, str(new_task.id))
new_task.file_dir = file_dir
await session.flush()
return new_task.id, file_dir
@classmethod
async def stop_task(cls, task_id):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task).filter(Task.id == task_id))
task = result.scalars().first()
assert task, "NOT FIND TASK"
assert task.status != 0, "TASK NOT RUNNING, TASK STATUS IS {0}".format(task.status)
task.status = 2
task.end_time = datetime.datetime.now()
return task.to_dict()
@classmethod
async def delete_task(cls, task_id):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task).filter(Task.id == task_id))
task = result.scalars().first()
assert task, "NOT FIND TASK"
assert task.status != 1, "TASK RUNNING NOT DELETE, TASK STATUS IS {0}".format(task.status)
res = task.to_dict()
await session.delete(task)
return res
@classmethod
async def change_task_name(cls, task_id, new_name):
async with async_connect() as session:
async with session.begin():
result = await session.execute(select(Task).filter(Task.id == task_id))
task = result.scalars().first()
assert task, "NOT FIND TASK"
task.name = new_name
return task.to_dict()
async def create_table():
await update_table_structure()
asyncio.run(create_table())
| 7,119 | Python | .py | 147 | 36.782313 | 116 | 0.614009 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,210 | view.py | 15525730080_pc_perf/app/view.py | import pyximport
pyximport.install(language_level=3)
import asyncio
import base64
import os
import platform
import shutil
import time
import traceback
from pathlib import Path
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import JSONResponse, RedirectResponse
from starlette.staticfiles import StaticFiles
from app.core.pc_tools import sys_info, pids, screenshot
from app.database import TaskCollection
from app.log import log as logger
from app.task_handle import TaskHandle
from app.util import DataCollect
from apscheduler.schedulers.background import BackgroundScheduler
app = FastAPI()
scheduler = BackgroundScheduler()
logger.info("工作空间{0}".format(os.getcwd()))
cur_file = Path(__file__)
BASE_CSV_DIR = cur_file.parent.parent.joinpath("test_result")
BASE_CSV_DIR.mkdir(exist_ok=True)
app.mount("/static", StaticFiles(directory=BASE_CSV_DIR.resolve()), name="static")
class ResultBean(dict):
def __init__(self, code=200, msg="success"):
super().__init__(code=code, msg=msg)
@app.middleware("http")
async def http_filter(request: Request, call_next):
try:
response = await call_next(request)
except BaseException as e:
logger.error(traceback.format_exc())
return JSONResponse(content=ResultBean(code=500, msg=str(e)))
return response
@app.get("/")
def index():
return RedirectResponse(url="/static/index.html")
@app.get("/system_info/")
async def system_info():
return JSONResponse(content=ResultBean(msg=await sys_info()))
@app.get("/pid_img/")
async def pid_img(pid: int):
img_bytes = await screenshot(pid, None)
base64_encoded = base64.b64encode(img_bytes).decode('utf-8')
return base64_encoded
@app.get("/get_pids/")
async def get_pids():
return JSONResponse(content=ResultBean(msg=await pids()))
@app.get("/get_all_task/")
async def get_all_task():
return JSONResponse(content=ResultBean(msg=await TaskCollection.get_all_task()))
@app.get("/run_task/")
async def run_task(request: Request, pid: int, pid_name: str, task_name: str):
start_time = time.time()
status = 0
return_task_id, file_dir = await TaskCollection.create_task(pid, pid_name, BASE_CSV_DIR.resolve(), task_name)
task_process = TaskHandle(serialno=platform.node(), file_dir=file_dir,
task_id=return_task_id, platform=platform.system(), target_pid=pid)
task_process.start()
return JSONResponse(content=ResultBean())
@app.get("/stop_task/")
async def stop_task(request: Request, task_id: int):
task = await TaskCollection.stop_task(task_id)
try:
TaskHandle.stop_handle(task.get("monitor_pid"))
except BaseException as e:
logger.error(e)
logger.error(traceback.format_exc())
return JSONResponse(content=ResultBean())
def check_stop_task_monitor_pid_close():
async def func():
logger.info('定期任务执行时间:检查是否有漏杀死monitor进程')
monitor_pid = await TaskCollection.get_all_stop_task_monitor_pid()
all_pids = await pids()
for i in all_pids:
if int(i["pid"]) in monitor_pid:
try:
logger.info("check kill {0}".format(i["pid"]))
TaskHandle.stop_handle(i["pid"])
except:
logger.error(traceback.format_exc())
logger.info('定期任务执行时间:检查是否有漏杀死monitor进程end')
asyncio.run(func())
@app.get("/result/")
async def task_result(request: Request, task_id: int):
item_task = await TaskCollection.get_item_task(task_id)
result = await DataCollect(item_task.get("file_dir")).get_all_data()
return JSONResponse(content=ResultBean(msg=result))
@app.get("/task_status/")
async def task_task(request: Request, task_id: int):
item_task = await TaskCollection.get_item_task(task_id)
return JSONResponse(content=ResultBean(msg=item_task.get("status"))) # 0未开始, 1 执行中 , 2 执行完成 3.暂停
@app.get("/delete_task/")
async def delete_task(request: Request, task_id: int):
item = await TaskCollection.delete_task(task_id)
if os.path.exists(item.get("file_dir")):
try:
shutil.rmtree(item.get("file_dir"))
except:
logger.error(traceback.format_exc())
return JSONResponse(content=ResultBean())
@app.on_event("startup")
async def app_start():
scheduler.add_job(check_stop_task_monitor_pid_close, 'interval', seconds=60)
scheduler.start()
| 4,543 | Python | .py | 109 | 35.706422 | 113 | 0.707965 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,211 | monitor.py | 15525730080_pc_perf/app/core/monitor.py | import asyncio
import csv
import inspect
import json
import time
import traceback
from pathlib import Path
from app.log import log as logger
def print_json(data, *args, **kwargs):
data_json = json.dumps(data)
logger.info(data_json, *args, **kwargs)
class MonitorIter(object):
def __init__(self, stop_event):
self.stop_event = stop_event
def __aiter__(self):
return self
def __anext__(self):
if self.stop_event.is_set():
future = asyncio.Future()
future.set_result(None)
return future
elif not self.stop_event.is_set():
raise StopAsyncIteration()
class Monitor(object):
def __init__(self, func, **kwargs):
super(Monitor, self).__init__()
self.stop_event = asyncio.Event()
self.func = func
self.kwargs = kwargs
self.stop_event.set()
self.key_value = kwargs.get("key_value", [])
self.name = self.func.__name__
self.save_dir = kwargs.get("save_dir")
self.is_out = self.kwargs.get("is_out", True)
if self.is_out:
dir_instance = Path(self.save_dir)
if not dir_instance.exists():
dir_instance.mkdir()
csv_instance = dir_instance.joinpath(self.name + ".csv")
self.csv_path = csv_instance.resolve()
with open(self.csv_path, "w", encoding="utf-8", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(self.key_value)
self.key_value = [key.split("(")[0] for key in self.key_value]
async def run(self):
async for _ in MonitorIter(self.stop_event):
before_func = time.time()
param_names = inspect.signature(self.func).parameters.keys()
params = {name: self.kwargs.get(name) for name in param_names}
try:
res = await self.func(**params)
if self.is_out and res:
with open(self.csv_path, "a+", encoding="utf-8", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow([res.get(key, "") for key in self.key_value])
except:
logger.error(traceback.format_exc())
finally:
end_func = time.time()
if interval_time := (int(end_func) - int(before_func)) <= 1:
await asyncio.sleep(interval_time)
def stop(self):
self.stop_event.clear()
| 2,513 | Python | .py | 63 | 29.666667 | 89 | 0.567501 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,212 | pc_tools.py | 15525730080_pc_perf/app/core/pc_tools.py | import asyncio
import json
import platform
import subprocess
import threading
import time
import traceback
from io import BytesIO
import psutil
import pynvml
from pathlib import Path
from app.log import log
from app.core.monitor import Monitor
SUPPORT_GPU = True
try:
pynvml.nvmlInit()
except:
log.error(traceback.format_exc())
log.info("本设备gpu获取不适配")
SUPPORT_GPU = False
from PIL import ImageGrab
def print_json(msg):
log.info(json.dumps(msg))
class WinFps(object):
frame_que = list()
single_instance = None
fps_process = None
def __init__(self, pid):
self.pid = pid
def __new__(cls, *args, **kwargs):
if not cls.single_instance:
cls.single_instance = super().__new__(cls)
return cls.single_instance
def fps(self):
if not WinFps.fps_process:
threading.Thread(target=self.start_fps_collect, args=(self.pid,)).start()
if self.check_queue_head_frames_complete():
return self.pop_complete_fps()
@staticmethod
def check_queue_head_frames_complete():
if not WinFps.frame_que:
return False
head_time = int(WinFps.frame_que[0])
end_time = int(WinFps.frame_que[-1])
if head_time == end_time:
return False
return True
@staticmethod
def pop_complete_fps():
head_time = int(WinFps.frame_que[0])
complete_fps = []
while int(WinFps.frame_que[0]) == head_time:
complete_fps.append(WinFps.frame_que.pop(0))
return complete_fps
def start_fps_collect(self, pid):
start_fps_collect_time = int(time.time())
PresentMon = Path(__file__).parent.parent.parent.joinpath("PresentMon.exe")
res_terminate = subprocess.Popen(
[PresentMon, "-process_id", str(pid), "-output_stdout", "-stop_existing_session"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
WinFps.fps_process = res_terminate
res_terminate.stdout.readline()
while not res_terminate.poll():
line = res_terminate.stdout.readline()
if not line:
try:
res_terminate.kill()
except:
traceback.print_exc()
break
try:
line = line.decode(encoding="utf-8")
line_list = line.split(",")
print("line ", line_list)
WinFps.frame_que.append(start_fps_collect_time + round(float(line_list[7]), 7))
except:
time.sleep(1)
log.error(traceback.format_exc())
async def sys_info():
def real_func():
current_platform = platform.system()
computer_name = platform.node()
res = {"platform": current_platform, "computer_name": computer_name, "time": time.time(),
"cpu_cores": psutil.cpu_count(), "ram": "{0}G".format(int(psutil.virtual_memory().total / 1024 ** 3)),
"rom": "{0}G".format(int(psutil.disk_usage('/').total / 1024 ** 3))}
print_json(res)
return res
return await asyncio.wait_for(asyncio.to_thread(real_func), timeout=10)
async def pids():
def real_func():
process_list = []
for proc in psutil.process_iter(attrs=['name', 'pid', 'cmdline', 'username']):
try:
if proc.is_running():
process_list.append(
{"name": proc.info['name'], "pid": proc.info['pid'], "cmd": proc.info['cmdline'],
"username": proc.username()})
except Exception as e:
log.error(e)
process_list.sort(key=lambda x: x['name'])
# print_json(process_list)
return process_list
return await asyncio.wait_for(asyncio.to_thread(real_func), timeout=10)
async def screenshot(pid, save_dir):
def real_func(pid, save_dir):
start_time = int(time.time())
if pid:
window = None
if platform.system() == "Windows":
import ctypes
import pygetwindow as gw
def get_pid(hwnd):
pid = ctypes.wintypes.DWORD()
ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
return pid.value
def get_window_by_pid(pid):
for window in gw.getAllWindows():
if get_pid(window._hWnd) == pid:
return window
return None
window = get_window_by_pid(int(pid))
if window:
screenshot = ImageGrab.grab(
bbox=(window.left, window.top, window.left + window.width, window.top + window.height),
all_screens=True)
else:
screenshot = ImageGrab.grab(all_screens=True)
if save_dir:
dir_instance = Path(save_dir)
screenshot_dir = dir_instance.joinpath("screenshot")
screenshot_dir.mkdir(exist_ok=True)
screenshot.save(screenshot_dir.joinpath(str(start_time) + ".png"), format="PNG")
else:
output_buffer = BytesIO()
screenshot.save(output_buffer, format='PNG')
output_buffer.seek(0) # 重置缓冲区指针
image_data = output_buffer.getvalue()
return image_data
return await asyncio.wait_for(asyncio.to_thread(real_func, pid, save_dir), timeout=10)
async def cpu(pid):
def real_func(pid):
start_time = int(time.time())
proc = psutil.Process(pid=int(pid))
cpu_usage = proc.cpu_percent(interval=1)
cpu_count = psutil.cpu_count()
res = {"cpu_usage": cpu_usage / cpu_count, "cpu_usage_all": cpu_usage,
"cpu_core_num": cpu_count, "time": start_time}
print_json(res)
return res
return await asyncio.wait_for(asyncio.to_thread(real_func, pid), timeout=10)
async def memory(pid):
def real_func(pid):
start_time = int(time.time())
process = psutil.Process(int(pid))
process_memory_info = process.memory_info()
process_memory_usage = process_memory_info.rss / (1024 ** 2) # In MB
memory_info = {"process_memory_usage": process_memory_usage, "time": start_time}
print_json(memory_info)
return memory_info
return await asyncio.wait_for(asyncio.to_thread(real_func, pid), timeout=10)
async def fps(pid):
if platform.system() != "Windows":
return {"type": "fps", "time": int(time.time())}
frames = WinFps(pid).fps()
if not frames:
return frames
res = {"type": "fps", "fps": len(frames), "frames": frames, "time": int(frames[0]) if frames else int(time.time())}
print_json(res)
return res
async def gpu(pid):
def real_func(pid):
pid = int(pid)
start_time = int(time.time())
if SUPPORT_GPU:
device_count = pynvml.nvmlDeviceGetCount()
res = None
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for process in processes:
print(process)
if process.pid == pid:
gpu_Utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
gpu_utilization_percentage = gpu_Utilization.gpu # GPU的计算使用率
res = {"gpu": gpu_utilization_percentage, "time": start_time}
print_json(res)
return res
return res
else:
return {"time": start_time}
return await asyncio.wait_for(asyncio.to_thread(real_func, pid), timeout=10)
async def process_info(pid):
def real_func(pid):
start_time = int(time.time())
process = psutil.Process(int(pid))
num_handles = None
num_threads = None
try:
num_handles = process.num_handles()
except:
log.error(traceback.format_exc())
try:
num_threads = process.num_threads()
except:
log.error(traceback.format_exc())
res = {"time": start_time}
if num_handles: res["num_handles"] = num_handles
if num_threads: res["num_threads"] = num_threads
print_json(res)
return res
return await asyncio.wait_for(asyncio.to_thread(real_func, pid), timeout=10)
async def perf(pid, save_dir):
monitors = {
"cpu": Monitor(cpu,
pid=pid,
key_value=["time", "cpu_usage(%)", "cpu_usage_all(%)", "cpu_core_num(个)"],
save_dir=save_dir),
"memory": Monitor(memory,
pid=pid,
key_value=["time", "process_memory_usage(M)"],
save_dir=save_dir),
"process_info": Monitor(process_info,
pid=pid,
key_value=["time", "num_threads(个)", "num_handles(个)"],
save_dir=save_dir),
"fps": Monitor(fps,
pid=pid,
key_value=["time", "fps(帧)", "frames"],
save_dir=save_dir),
"gpu": Monitor(gpu,
pid=pid,
key_value=["time", "gpu(%)"],
save_dir=save_dir),
"screenshot": Monitor(screenshot,
pid=pid,
save_dir=save_dir, is_out=False)
}
run_monitors = [monitor.run() for name, monitor in monitors.items()]
await asyncio.gather(*run_monitors)
| 9,888 | Python | .py | 239 | 29.531381 | 119 | 0.556323 | 15525730080/pc_perf | 8 | 1 | 1 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,213 | ensemble.py | huchenlei_sd-webui-controlnet-marigold/marigold/util/ensemble.py | # Test align depth images
# Author: Bingxin Ke
# Last modified: 2023-12-11
import numpy as np
import torch
from scipy.optimize import minimize
def inter_distances(tensors):
"""
To calculate the distance between each two depth maps.
"""
distances = []
for i, j in torch.combinations(torch.arange(tensors.shape[0])):
arr1 = tensors[i:i+1]
arr2 = tensors[j:j+1]
distances.append(arr1 - arr2)
dist = torch.concatenate(distances, dim=0)
return dist
def ensemble_depths(input_images, regularizer_strength=0.02, max_iter=2, tol=1e-3, reduction='median', max_res=None, disp=False, device='cuda'):
"""
To ensemble multiple affine-invariant depth images (up to scale and shift),
by aligning estimating the scale and shift
"""
device = input_images.device
original_input = input_images.clone()
n_img = input_images.shape[0]
ori_shape = input_images.shape
if max_res is not None:
scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
if scale_factor < 1:
downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode='nearest')
input_images = downscaler(torch.from_numpy(input_images)).numpy()
# init guess
_min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
_max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))
t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))
x = np.concatenate([s_init, t_init]).reshape(-1)
input_images = input_images.to(device)
# objective function
def closure(x):
x = x.astype(np.float32)
l = len(x)
s = x[:int(l/2)]
t = x[int(l/2):]
s = torch.from_numpy(s).to(device)
t = torch.from_numpy(t).to(device)
transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
dists = inter_distances(transformed_arrays)
sqrt_dist = torch.sqrt(torch.mean(dists**2))
if 'mean' == reduction:
pred = torch.mean(transformed_arrays, dim=0)
elif 'median' == reduction:
pred = torch.median(transformed_arrays, dim=0).values
else:
raise ValueError
near_err = torch.sqrt((0 - torch.min(pred))**2)
far_err = torch.sqrt((1 - torch.max(pred))**2)
err = sqrt_dist + (near_err + far_err) * regularizer_strength
err = err.detach().cpu().numpy()
return err
res = minimize(closure, x, method='BFGS', tol=tol, options={'maxiter': max_iter, 'disp': disp})
x = res.x
l = len(x)
s = x[:int(l/2)]
t = x[int(l/2):]
# Prediction
s = torch.from_numpy(s).to(device)
t = torch.from_numpy(t).to(device)
transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)
if 'mean' == reduction:
aligned_images = torch.mean(transformed_arrays, dim=0)
std = torch.std(transformed_arrays, dim=0)
uncertainty = std
elif 'median' == reduction:
aligned_images = torch.median(transformed_arrays, dim=0).values
# MAD (median absolute deviation) as uncertainty indicator
abs_dev = torch.abs(transformed_arrays - aligned_images)
mad = torch.median(abs_dev, dim=0).values
uncertainty = mad
else:
raise ValueError
# Scale and shift to [0, 1]
_min = torch.min(aligned_images)
_max = torch.max(aligned_images)
aligned_images = (aligned_images - _min) / (_max - _min)
uncertainty /= (_max - _min)
return aligned_images, uncertainty
| 3,688 | Python | .py | 87 | 34.942529 | 144 | 0.62156 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,214 | image_util.py | huchenlei_sd-webui-controlnet-marigold/marigold/util/image_util.py |
import matplotlib
import numpy as np
import torch
from PIL import Image
def colorize_depth_maps(depth_map, min_depth, max_depth, cmap='Spectral', valid_mask=None):
"""
Colorize depth maps.
"""
assert len(depth_map.shape) >= 2, "Invalid dimension"
if isinstance(depth_map, torch.Tensor):
depth = depth_map.detach().clone().squeeze().numpy()
elif isinstance(depth_map, np.ndarray):
depth = depth_map.copy().squeeze()
# reshape to [ (B,) H, W ]
if depth.ndim < 3:
depth = depth[np.newaxis, :, :]
# colorize
cm = matplotlib.colormaps[cmap]
depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
img_colored_np = cm(depth, bytes=False)[:,:,:,0:3] # value from 0 to 1
img_colored_np = np.rollaxis(img_colored_np, 3, 1)
if valid_mask is not None:
if isinstance(depth_map, torch.Tensor):
valid_mask = valid_mask.detach().numpy()
valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
if valid_mask.ndim < 3:
valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
else:
valid_mask = valid_mask[:, np.newaxis, :, :]
valid_mask = np.repeat(valid_mask, 3, axis=1)
img_colored_np[~valid_mask] = 0
if isinstance(depth_map, torch.Tensor):
img_colored = torch.from_numpy(img_colored_np).float()
elif isinstance(depth_map, np.ndarray):
img_colored = img_colored_np
return img_colored
def chw2hwc(chw):
assert 3 == len(chw.shape)
if isinstance(chw, torch.Tensor):
hwc = torch.permute(chw, (1, 2, 0))
elif isinstance(chw, np.ndarray):
hwc = np.moveaxis(chw, 0, -1)
return hwc
def resize_max_res(img: Image.Image, max_edge_resolution):
original_width, original_height = img.size
downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)
new_width = int(original_width * downscale_factor)
new_height = int(original_height * downscale_factor)
resized_img = img.resize((new_width, new_height))
return resized_img
| 2,149 | Python | .py | 50 | 35.92 | 103 | 0.642857 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,215 | batchsize.py | huchenlei_sd-webui-controlnet-marigold/marigold/util/batchsize.py | # Author: Bingxin Ke
# Last modified: 2023-12-11
import torch
import math
# Search table for suggested max. inference batch size
bs_search_table = [
# tested on A100-PCIE-80GB
{"res": 768, "total_vram": 79, "bs": 35},
{"res": 1024, "total_vram": 79, "bs": 20},
# tested on A100-PCIE-40GB
{"res": 768, "total_vram": 39, "bs": 15},
{"res": 1024, "total_vram": 39, "bs": 8},
# tested on RTX3090, RTX4090
{"res": 512, "total_vram": 23, "bs": 20},
{"res": 768, "total_vram": 23, "bs": 7},
{"res": 1024, "total_vram": 23, "bs": 3},
# tested on GTX1080Ti
{"res": 512, "total_vram": 10, "bs": 5},
{"res": 768, "total_vram": 10, "bs": 2},
]
def find_batch_size(n_repeat, input_res):
total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
for settings in sorted(bs_search_table, key=lambda k: (k['res'], -k['total_vram'])):
if input_res <= settings['res'] and total_vram >= settings['total_vram']:
bs = settings['bs']
if bs > n_repeat:
bs = n_repeat
elif bs > math.ceil(n_repeat / 2) and bs < n_repeat:
bs = math.ceil(n_repeat / 2)
return bs
return 1 | 1,205 | Python | .py | 31 | 32.548387 | 88 | 0.558419 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,216 | seed_all.py | huchenlei_sd-webui-controlnet-marigold/marigold/util/seed_all.py |
import numpy as np
import random
import torch
def seed_all(seed: int = 0):
"""
Set random seeds of all components.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) | 245 | Python | .py | 11 | 18.545455 | 39 | 0.689655 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,217 | stacked_depth_AE.py | huchenlei_sd-webui-controlnet-marigold/marigold/model/stacked_depth_AE.py | # Author: Bingxin Ke
# Last modified: 2023-12-05
import torch
import torch.nn as nn
import logging
from diffusers import AutoencoderKL
class StackedDepthAE(nn.Module):
"""
Tailored pretrained image VAE for depth map.
Encode: Depth images are repeated into 3 channels.
Decode: The average of 3 chennels are taken as output.
"""
def __init__(self, pretrained_path, subfolder=None) -> None:
super().__init__()
self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
def forward(self, depth_in):
depth_latent = self.encode(depth_in)
depth_out = self.decode(depth_latent)
return depth_out
def to(self, *args, **kwargs):
self.vae.to(*args, **kwargs)
@staticmethod
def _stack_depth_images(depth_in):
if 4 == len(depth_in.shape):
stacked = depth_in.repeat(1, 3, 1, 1)
elif 3 == len(depth_in.shape):
stacked = depth_in.unsqueeze(1)
stacked = depth_in.repeat(1, 3, 1, 1)
return stacked
def encode(self, depth_in):
stacked = self._stack_depth_images(depth_in)
h = self.vae.encoder(stacked)
moments = self.vae.quant_conv(h)
mean, logvar = torch.chunk(moments, 2, dim=1)
depth_latent = mean
return depth_latent
def decode(self, depth_latent):
z = self.vae.post_quant_conv(depth_latent)
stacked = self.vae.decoder(z)
depth_mean = stacked.mean(dim=1, keepdim=True)
return depth_mean | 1,640 | Python | .py | 42 | 31.833333 | 101 | 0.646948 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,218 | rgb_encoder.py | huchenlei_sd-webui-controlnet-marigold/marigold/model/rgb_encoder.py | # Author: Bingxin Ke
# Last modified: 2023-12-05
import torch
import torch.nn as nn
import logging
from diffusers import AutoencoderKL
class RGBEncoder(nn.Module):
"""
The encoder of pretrained Stable Diffusion VAE
"""
def __init__(self, pretrained_path, subfolder=None) -> None:
super().__init__()
vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
self.rgb_encoder = nn.Sequential(
vae.encoder,
vae.quant_conv,
)
def to(self, *args, **kwargs):
self.rgb_encoder.to(*args, **kwargs)
def forward(self, rgb_in):
return self.encode(rgb_in)
def encode(self, rgb_in):
moments = self.rgb_encoder(rgb_in) # [B, 8, H/8, W/8]
mean, logvar = torch.chunk(moments, 2, dim=1)
rgb_latent = mean
return rgb_latent | 994 | Python | .py | 27 | 29 | 96 | 0.64617 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,219 | marigold_pipeline.py | huchenlei_sd-webui-controlnet-marigold/marigold/model/marigold_pipeline.py | # Author: Bingxin Ke
# Last modified: 2023-12-11
import logging
from typing import Dict
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
DDPMScheduler,
PNDMScheduler,
DEISMultistepScheduler,
SchedulerMixin,
UNet2DConditionModel,
)
from torch import nn
from torch.nn import Conv2d
from torch.nn.parameter import Parameter
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from .rgb_encoder import RGBEncoder
from .stacked_depth_AE import StackedDepthAE
class MarigoldPipeline(nn.Module):
"""
Marigold monocular depth estimator.
"""
def __init__(
self,
unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
rgb_encoder_pretrained_path: Dict,
depht_ae_pretrained_path: Dict,
noise_scheduler_pretrained_path: Dict,
tokenizer_pretrained_path: Dict,
text_encoder_pretrained_path: Dict,
empty_text_embed=None,
trainable_unet=False,
rgb_latent_scale_factor=0.18215,
depth_latent_scale_factor=0.18215,
noise_scheduler_type=None,
enable_gradient_checkpointing=False,
enable_xformers=True,
) -> None:
super().__init__()
self.rgb_latent_scale_factor = rgb_latent_scale_factor
self.depth_latent_scale_factor = depth_latent_scale_factor
self.device = "cpu"
# ******* Initialize modules *******
# Trainable modules
self.trainable_module_dic: Dict[str, nn.Module] = {}
self.trainable_unet = trainable_unet
# Denoising UNet
self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
)
logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
if 8 != self.unet.config["in_channels"]:
self._replace_unet_conv_in()
logging.warning("Unet conv_in layer is replaced")
if enable_xformers:
self.unet.enable_xformers_memory_efficient_attention()
else:
self.unet.disable_xformers_memory_efficient_attention()
# Image encoder
self.rgb_encoder = RGBEncoder(
pretrained_path=rgb_encoder_pretrained_path["path"],
subfolder=rgb_encoder_pretrained_path["subfolder"],
)
logging.info(
f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}"
)
self.rgb_encoder.requires_grad_(False)
# Depth encoder-decoder
self.depth_ae = StackedDepthAE(
pretrained_path=depht_ae_pretrained_path["path"],
subfolder=depht_ae_pretrained_path["subfolder"],
)
logging.info(
f"pretrained Depth Autoencoder loaded from: {rgb_encoder_pretrained_path}"
)
# Trainability
# unet
if self.trainable_unet:
self.unet.requires_grad_(True)
self.trainable_module_dic["unet"] = self.unet
logging.debug(f"UNet is set to trainable")
else:
self.unet.requires_grad_(False)
logging.debug(f"UNet is set to frozen")
# Gradient checkpointing
if enable_gradient_checkpointing:
self.unet.enable_gradient_checkpointing()
self.depth_ae.vae.enable_gradient_checkpointing()
# Noise scheduler
if "DDPMScheduler" == noise_scheduler_type:
self.noise_scheduler: SchedulerMixin = DDPMScheduler.from_pretrained(
noise_scheduler_pretrained_path["path"],
subfolder=noise_scheduler_pretrained_path["subfolder"],
)
elif "DDIMScheduler" == noise_scheduler_type:
self.noise_scheduler: SchedulerMixin = DDIMScheduler.from_pretrained(
noise_scheduler_pretrained_path["path"],
subfolder=noise_scheduler_pretrained_path["subfolder"],
)
elif "PNDMScheduler" == noise_scheduler_type:
self.noise_scheduler: SchedulerMixin = PNDMScheduler.from_pretrained(
noise_scheduler_pretrained_path["path"],
subfolder=noise_scheduler_pretrained_path["subfolder"],
)
elif "DEISMultistepScheduler" == noise_scheduler_type:
self.noise_scheduler: SchedulerMixin = DEISMultistepScheduler.from_pretrained(
noise_scheduler_pretrained_path["path"],
subfolder=noise_scheduler_pretrained_path["subfolder"],
)
else:
raise NotImplementedError
# Text embed for empty prompt (always in CPU)
if empty_text_embed is None:
tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(
tokenizer_pretrained_path["path"],
subfolder=tokenizer_pretrained_path["subfolder"],
)
text_encoder: CLIPTextModel = CLIPTextModel.from_pretrained(
text_encoder_pretrained_path["path"],
subfolder=text_encoder_pretrained_path["subfolder"],
)
with torch.no_grad():
self.empty_text_embed = self._encode_text(
"", tokenizer, text_encoder
).detach()#.to(dtype=precision) # [1, 2, 1024]
else:
self.empty_text_embed = empty_text_embed
def from_pretrained(pretrained_path, **kwargs):
return __class__(
unet_pretrained_path={"path": pretrained_path, "subfolder": "unet"},
rgb_encoder_pretrained_path={"path": pretrained_path, "subfolder": "vae"},
depht_ae_pretrained_path={"path": pretrained_path, "subfolder": "vae"},
noise_scheduler_pretrained_path={
"path": pretrained_path,
"subfolder": "scheduler",
},
tokenizer_pretrained_path={
"path": pretrained_path,
"subfolder": "tokenizer",
},
text_encoder_pretrained_path={
"path": pretrained_path,
"subfolder": "text_encoder",
},
**kwargs,
)
def _replace_unet_conv_in(self):
# Replace the first layer to accept 8 in_channels. Only applied when loading pretrained SD U-Net
_weight = self.unet.conv_in.weight.clone() # [320, 4, 3, 3]
_bias = self.unet.conv_in.bias.clone() # [320]
_weight = _weight.repeat((1, 2, 1, 1)) # Keep selected channel(s)
# half the activation magnitude
_weight *= 0.5
_bias *= 0.5
# new conv_in channel
_n_convin_out_channel = self.unet.conv_in.out_channels
_new_conv_in = Conv2d(
8, _n_convin_out_channel, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
)
_new_conv_in.weight = Parameter(_weight)
_new_conv_in.bias = Parameter(_bias)
self.unet.conv_in = _new_conv_in
# replace config
self.unet.config["in_channels"] = 8
return
def to(self, device):
self.rgb_encoder.to(device)
self.depth_ae.to(device)
self.unet.to(device)
self.empty_text_embed = self.empty_text_embed.to(device)
self.device = device
return self
def forward(
self,
rgb_in,
num_inference_steps: int = 50,
num_output_inter_results: int = 0,
show_pbar=False,
init_depth_latent=None,
return_depth_latent=False,
):
device = rgb_in.device
precision = self.unet.dtype
# Set timesteps
self.noise_scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.noise_scheduler.timesteps # [T]
# Encode image
rgb_latent = self.encode_rgb(rgb_in)
# Initial depth map (noise)
if init_depth_latent is not None:
init_depth_latent = init_depth_latent.to(dtype=precision)
assert (
init_depth_latent.shape == rgb_latent.shape
), "initial depth latent should be the size of [B, 4, H/8, W/8]"
depth_latent = init_depth_latent
depth_latent = torch.randn(rgb_latent.shape, device=device, dtype=precision)
else:
depth_latent = torch.randn(rgb_latent.shape, device=device) # [B, 4, h, w]
# Expand text embeding for batch
batch_empty_text_embed = self.empty_text_embed.repeat(
(rgb_latent.shape[0], 1, 1)
).to(device=device, dtype=precision) # [B, 2, 1024]
# Export intermediate denoising steps
if num_output_inter_results > 0:
depth_latent_ls = []
inter_steps = []
_idx = (
-1
* (
np.arange(0, num_output_inter_results)
* num_inference_steps
/ num_output_inter_results
)
.round()
.astype(int)
- 1
)
steps_to_output = timesteps[_idx]
# Denoising loop
if show_pbar:
iterable = tqdm(enumerate(timesteps), total=len(timesteps), leave=False, desc="denoising")
else:
iterable = enumerate(timesteps)
for i, t in iterable:
unet_input = torch.cat(
[rgb_latent, depth_latent], dim=1
) # this order is important
unet_input = unet_input.to(dtype=precision)
# predict the noise residual
noise_pred = self.unet(
unet_input, t, encoder_hidden_states=batch_empty_text_embed
).sample # [B, 4, h, w]
# compute the previous noisy sample x_t -> x_t-1
depth_latent = self.noise_scheduler.step(
noise_pred, t, depth_latent
).prev_sample.to(dtype=precision)
if num_output_inter_results > 0 and t in steps_to_output:
depth_latent_ls.append(depth_latent.detach().clone())
#depth_latent_ls = depth_latent_ls.to(dtype=precision)
inter_steps.append(t - 1)
# Decode depth latent
if num_output_inter_results > 0:
assert 0 in inter_steps
depth = [self.decode_depth(lat) for lat in depth_latent_ls]
if return_depth_latent:
return depth, inter_steps, depth_latent_ls
else:
return depth, inter_steps
else:
depth = self.decode_depth(depth_latent)
if return_depth_latent:
return depth, depth_latent
else:
return depth
def encode_rgb(self, rgb_in):
rgb_latent = self.rgb_encoder(rgb_in) # [B, 4, h, w]
rgb_latent = rgb_latent * self.rgb_latent_scale_factor
return rgb_latent
def encode_depth(self, depth_in):
depth_latent = self.depth_ae.encode(depth_in)
depth_latent = depth_latent * self.depth_latent_scale_factor
return depth_latent
def decode_depth(self, depth_latent):
#depth_latent = depth_latent.to(dtype=torch.float16)
depth_latent = depth_latent / self.depth_latent_scale_factor
depth = self.depth_ae.decode(depth_latent) # [B, 1, H, W]
return depth
@staticmethod
def _encode_text(prompt, tokenizer, text_encoder):
text_inputs = tokenizer(
prompt,
padding="do_not_pad",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids.to(text_encoder.device)
text_embed = text_encoder(text_input_ids)[0]
return text_embed
| 11,755 | Python | .py | 282 | 30.907801 | 104 | 0.595626 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,220 | preprocessor_marigold.py | huchenlei_sd-webui-controlnet-marigold/scripts/preprocessor_marigold.py | import torch
import numpy as np
from marigold.model.marigold_pipeline import MarigoldPipeline
# sd-webui-controlnet
from internal_controlnet.external_code import Preprocessor, PreprocessorParameter
from scripts.utils import resize_image_with_pad
# A1111
from modules import devices
@torch.no_grad()
@torch.inference_mode()
def numpy_to_pytorch(x):
y = x.astype(np.float32) / 255.0
y = y[None]
y = np.ascontiguousarray(y.copy())
y = torch.from_numpy(y).float()
return y
class PreprocessorMarigold(Preprocessor):
def __init__(self, device=None):
super().__init__(name = "depth_marigold")
self.tags = ["Depth"]
self.slider_resolution = PreprocessorParameter(
label="Resolution",
minimum=128,
maximum=2048,
value=768,
step=8,
visible=True,
)
self.slider_1 = PreprocessorParameter(
label="Steps",
minimum=1,
maximum=50,
value=20,
step=1,
visible=True,
)
self.show_control_mode = True
self.do_not_need_model = False
self.sorting_priority = 100 # higher goes to top in the list
self.model = None
self.device = (
devices.get_device_for("controlnet")
if device is None
else torch.device("cpu")
)
def load_model(self):
if self.model is None:
self.model = MarigoldPipeline.from_pretrained(
pretrained_path="Bingxin/Marigold",
enable_xformers=False,
noise_scheduler_type="DDIMScheduler",
)
return self.model.to(device=self.device)
def unload_model(self):
self.model.to(device="cpu")
def __call__(
self,
input_image,
resolution,
slider_1=None,
slider_2=None,
slider_3=None,
**kwargs
):
input_image, remove_pad = resize_image_with_pad(input_image, resolution)
pipeline = self.load_model()
with torch.no_grad():
img = (
numpy_to_pytorch(input_image).movedim(-1, 1).to(device=pipeline.device)
)
img = img * 2.0 - 1.0
depth = pipeline(img, num_inference_steps=slider_1, show_pbar=False)
depth = 0.5 - depth * 0.5
depth = depth.movedim(1, -1)[0].cpu().numpy()
depth = np.concatenate([depth, depth, depth], axis=2) # Expand to RGB
depth_image = (depth * 255.0).clip(0, 255).astype(np.uint8)
self.unload_model()
return remove_pad(depth_image)
Preprocessor.add_supported_preprocessor(PreprocessorMarigold())
| 2,725 | Python | .py | 79 | 25.443038 | 87 | 0.591635 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,221 | stacked_depth_AE.py | huchenlei_sd-webui-controlnet-marigold/marigold/model/stacked_depth_AE.py | # Author: Bingxin Ke
# Last modified: 2023-12-05
import torch
import torch.nn as nn
import logging
from diffusers import AutoencoderKL
class StackedDepthAE(nn.Module):
"""
Tailored pretrained image VAE for depth map.
Encode: Depth images are repeated into 3 channels.
Decode: The average of 3 chennels are taken as output.
"""
def __init__(self, pretrained_path, subfolder=None) -> None:
super().__init__()
self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
def forward(self, depth_in):
depth_latent = self.encode(depth_in)
depth_out = self.decode(depth_latent)
return depth_out
def to(self, *args, **kwargs):
self.vae.to(*args, **kwargs)
@staticmethod
def _stack_depth_images(depth_in):
if 4 == len(depth_in.shape):
stacked = depth_in.repeat(1, 3, 1, 1)
elif 3 == len(depth_in.shape):
stacked = depth_in.unsqueeze(1)
stacked = depth_in.repeat(1, 3, 1, 1)
return stacked
def encode(self, depth_in):
stacked = self._stack_depth_images(depth_in)
h = self.vae.encoder(stacked)
moments = self.vae.quant_conv(h)
mean, logvar = torch.chunk(moments, 2, dim=1)
depth_latent = mean
return depth_latent
def decode(self, depth_latent):
z = self.vae.post_quant_conv(depth_latent)
stacked = self.vae.decoder(z)
depth_mean = stacked.mean(dim=1, keepdim=True)
return depth_mean | 1,640 | Python | .tac | 42 | 31.833333 | 101 | 0.646948 | huchenlei/sd-webui-controlnet-marigold | 8 | 0 | 2 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,222 | sunoise.py | bvhari_ComfyUI_SUNoise/sunoise.py | import comfy.samplers
import comfy.model_patcher
from comfy.k_diffusion.sampling import get_ancestral_step, to_d, BrownianTreeNoiseSampler
import torch
import numpy as np
from tqdm.auto import trange
def su_noise_sampler(x, _seed, noise_type):
def scaled_uniform_noise_multires(sigma_down):
range_limit = sigma_down.item()
noise_batch = []
noise_channels = []
noise_stack = []
noise_generator = torch.Generator(device='cpu')
seed = _seed + int(1000*range_limit)
noise_generator.manual_seed(seed)
latent_size = torch.tensor(x.size()[2:])
for i in range(x.size()[0]): # batch
noise_channels = []
for j in range(x.size()[1]): # channels
noise_stack = []
for f in (1, 2):
noise = torch.rand(*((latent_size/f).to(dtype=torch.int32).tolist()), generator=noise_generator, dtype=torch.float32, device="cpu")
noise = torch.unsqueeze(noise, 0)
noise = torch.unsqueeze(noise, 0)
noise = torch.nn.functional.interpolate(noise, size=x.size()[2:], mode='nearest-exact')
noise = torch.squeeze(noise, (0, 1))
noise_stack.append(noise)
noise_stack = torch.stack(noise_stack, 0)
noise_channels_multires = torch.sum(noise_stack, dim=0, keepdim=False)
scaled_noise = ((noise_channels_multires-noise_channels_multires.min())*(2*range_limit/(noise_channels_multires.max()-noise_channels_multires.min()))) - range_limit
noise_channels.append(scaled_noise)
scaled_noise_channels = torch.stack(noise_channels, 0)
noise_batch.append(scaled_noise_channels)
scaled_noise_batch = torch.stack(noise_batch, 0)
scaled_noise_batch = scaled_noise_batch.to(device=x.device, dtype=x.dtype)
return scaled_noise_batch
def scaled_uniform_noise(sigma_down):
range_limit = sigma_down.item()
noise_batch = []
noise_channels = []
noise_generator = torch.Generator(device='cpu')
seed = _seed + int(1000*range_limit)
noise_generator.manual_seed(seed)
for i in range(x.size()[0]): # batch
noise_channels = []
for j in range(x.size()[1]): # channels
noise = torch.rand(x.size()[2:], generator=noise_generator, dtype=torch.float32, device="cpu")
scaled_noise = (-1*range_limit) + (2*range_limit*noise)
noise_channels.append(scaled_noise)
scaled_noise_channels = torch.stack(noise_channels, 0)
noise_batch.append(scaled_noise_channels)
scaled_noise_batch = torch.stack(noise_batch, 0)
scaled_noise_batch = scaled_noise_batch.to(device=x.device, dtype=x.dtype)
return scaled_noise_batch
if noise_type=='standard':
return scaled_uniform_noise
elif noise_type=='multires':
return scaled_uniform_noise_multires
def prepare_su_noise(latent_image, _seed, noise_inds=None, scale=1.0):
noise_batch = []
noise_channels = []
noise_generator = torch.Generator(device='cpu')
seed = _seed + int(1000*scale)
noise_generator.manual_seed(seed)
if noise_inds is None:
for i in range(latent_image.size()[0]): # channels
noise = torch.rand(latent_image.size()[1:], dtype=torch.float32, layout=latent_image.layout, generator=noise_generator, device="cpu")
scaled_noise = (-1*scale) + (2*scale*noise)
noise_channels.append(scaled_noise)
return torch.stack(noise_channels, 0)
unique_inds, inverse = np.unique(noise_inds, return_inverse=True)
for i in range(unique_inds[-1]+1):
for j in range(latent_image.size()[1]): # channels
noise = torch.rand(latent_image.size()[2:], dtype=torch.float32, layout=latent_image.layout, generator=noise_generator, device="cpu")
scaled_noise = (-1*scale) + (2*scale*noise)
noise_channels.append(scaled_noise)
scaled_noise_channels = torch.stack(noise_channels, 0)
if i in unique_inds:
noise_batch.append(scaled_noise_channels)
noises = [noise_batch[i] for i in inverse]
noises = torch.stack(noises, 0)
return noises
def prepare_su_noise_multires(latent_image, _seed, noise_inds=None, scale=1.0):
noise_batch = []
noise_channels = []
noise_generator = torch.Generator(device='cpu')
seed = _seed + int(1000*scale)
noise_generator.manual_seed(seed)
latent_size = torch.tensor(latent_image.size()[1:])
if noise_inds is None:
for i in range(latent_image.size()[0]): # channels
noise_stack = []
for f in (1, 2):
noise = torch.rand(*((latent_size/f).to(dtype=torch.int32).tolist()), generator=noise_generator, dtype=torch.float32, device="cpu")
noise = torch.unsqueeze(noise, 0)
noise = torch.unsqueeze(noise, 0)
noise = torch.nn.functional.interpolate(noise, size=latent_image.size()[1:], mode='nearest-exact')
noise = torch.squeeze(noise, (0, 1))
noise_stack.append(noise)
noise_stack = torch.stack(noise_stack, 0)
noise_multires = torch.sum(noise_stack, dim=0, keepdim=False)
scaled_noise = ((noise_multires-noise_multires.min())*(2*scale/(noise_multires.max()-noise_multires.min()))) - scale
noise_channels.append(scaled_noise)
return torch.stack(noise_channels, 0)
unique_inds, inverse = np.unique(noise_inds, return_inverse=True)
for i in range(unique_inds[-1]+1):
for j in range(latent_image.size()[1]): # channels
noise_stack = []
for f in (1, 2):
noise = torch.rand(*((latent_size/f).to(dtype=torch.int32).tolist()), generator=noise_generator, dtype=torch.float32, device="cpu")
noise = torch.unsqueeze(noise, 0)
noise = torch.unsqueeze(noise, 0)
noise = torch.nn.functional.interpolate(noise, size=latent_image.size()[1:], mode='nearest-exact')
noise = torch.squeeze(noise, (0, 1))
noise_stack.append(noise)
noise_stack = torch.stack(noise_stack, 0)
noise_multires = torch.sum(noise_stack, dim=0, keepdim=False)
scaled_noise = ((noise_multires-noise_multires.min())*(2*scale/(noise_multires.max()-noise_multires.min()))) - scale
noise_channels.append(scaled_noise)
scaled_noise_channels = torch.stack(noise_channels, 0)
if i in unique_inds:
noise_batch.append(scaled_noise_channels)
noises = [noise_batch[i] for i in inverse]
noises = torch.stack(noises, 0)
return noises
@torch.no_grad()
def sample_euler_ancestral_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, noise_type='standard'):
"""Ancestral sampling with Euler method steps."""
extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed", None)
# sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
if sigmas[i + 1] > 0:
x = x + (noise_sampler(sigma_down) if i%2!=0 else torch.randn_like(x) * s_noise * sigma_up)
return x
@torch.no_grad()
def sample_euler_ancestral_cfg_pp_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, noise_type='standard'):
"""Ancestral sampling with Euler method steps."""
extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed", None)
# sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
temp = [0]
def post_cfg_function(args):
temp[0] = args["uncond_denoised"]
return args["denoised"]
model_options = extra_args.get("model_options", {}).copy()
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
d = to_d(x, sigmas[i], temp[0])
# Euler method
dt = sigma_down - sigmas[i]
x = denoised + (d * sigma_down)
if sigmas[i + 1] > 0:
x = x + (noise_sampler(sigma_down) if i%2!=0 else torch.randn_like(x) * s_noise * sigma_up)
return x
@torch.no_grad()
def sample_dpm_2_ancestral_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, noise_type='standard'):
"""Ancestral sampling with DPM-Solver second-order steps."""
extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed", None)
# sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
d = to_d(x, sigmas[i], denoised)
if sigma_down == 0:
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
else:
# DPM-Solver-2
sigma_mid = sigmas[i].log().lerp(sigma_down.log(), 0.5).exp()
dt_1 = sigma_mid - sigmas[i]
dt_2 = sigma_down - sigmas[i]
x_2 = x + d * dt_1
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
x = x + (noise_sampler(sigma_down) if i%2!=0 else torch.randn_like(x) * s_noise * sigma_up)
return x
@torch.no_grad()
def sample_dpmpp_2s_ancestral_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, noise_type='standard'):
"""Ancestral sampling with DPM-Solver++(2S) second-order steps."""
extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed", None)
# sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigma_down == 0:
# Euler method
d = to_d(x, sigmas[i], denoised)
dt = sigma_down - sigmas[i]
x = x + d * dt
else:
# DPM-Solver++(2S)
t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
r = 1 / 2
h = t_next - t
s = t + r * h
x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2
# Noise addition
if sigmas[i + 1] > 0:
x = x + (noise_sampler(sigma_down) if i%2!=0 else torch.randn_like(x) * s_noise * sigma_up)
return x
@torch.no_grad()
def sample_dpmpp_sde_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1/2, noise_type='standard'):
"""DPM-Solver++ (stochastic)."""
seed = extra_args.get("seed", None)
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler_bt = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigmas[i + 1] == 0:
# Euler method
d = to_d(x, sigmas[i], denoised)
dt = sigmas[i + 1] - sigmas[i]
x = x + d * dt
else:
# DPM-Solver++
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
h = t_next - t
s = t + h * r
fac = 1 / (2 * r)
# Step 1
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
s_2 = t_fn(sd)
x_2 = (sigma_fn(s_2) / sigma_fn(t)) * x - (t - s_2).expm1() * denoised
x_2 = x_2 + (noise_sampler(sd) if i%2!=0 else noise_sampler_bt(sigma_fn(t), sigma_fn(s)) * s_noise * su)
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
# Step 2
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
t_next = t_fn(sd)
denoised_d = (1 - fac) * denoised + fac * denoised_2
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (t - t_next).expm1() * denoised_d
x = x + (noise_sampler(sd) if i%2!=0 else noise_sampler_bt(sigma_fn(t), sigma_fn(t_next)) * s_noise * su)
return x
@torch.no_grad()
def sample_dpmpp_2m_sde_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint', noise_type='standard'):
"""DPM-Solver++(2M) SDE."""
if solver_type not in {'heun', 'midpoint'}:
raise ValueError('solver_type must be \'heun\' or \'midpoint\'')
seed = extra_args.get("seed", None)
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler_bt = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
old_denoised = None
h_last = None
h = None
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigmas[i + 1] == 0:
# Denoising step
x = denoised
else:
# DPM-Solver++(2M) SDE
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
h = s - t
eta_h = eta * h
x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised
if old_denoised is not None:
r = h_last / h
if solver_type == 'heun':
x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)
elif solver_type == 'midpoint':
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
if eta:
x = x + (noise_sampler(sigma_down) if i%2!=0 else noise_sampler_bt(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise)
old_denoised = denoised
h_last = h
return x
@torch.no_grad()
def sample_dpmpp_3m_sde_sun(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, noise_type='standard'):
"""DPM-Solver++(3M) SDE."""
seed = extra_args.get("seed", None)
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler_bt = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
noise_sampler = su_noise_sampler(x, seed, noise_type) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
denoised_1, denoised_2 = None, None
h, h_1, h_2 = None, None, None
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigmas[i + 1] == 0:
# Denoising step
x = denoised
else:
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
h = s - t
h_eta = h * (eta + 1)
x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised
if h_2 is not None:
r0 = h_1 / h
r1 = h_2 / h
d1_0 = (denoised - denoised_1) / r0
d1_1 = (denoised_1 - denoised_2) / r1
d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
d2 = (d1_0 - d1_1) / (r0 + r1)
phi_2 = h_eta.neg().expm1() / h_eta + 1
phi_3 = phi_2 / h_eta - 0.5
x = x + phi_2 * d1 - phi_3 * d2
elif h_1 is not None:
r = h_1 / h
d = (denoised - denoised_1) / r
phi_2 = h_eta.neg().expm1() / h_eta + 1
x = x + phi_2 * d
if eta:
x = x + (noise_sampler(sigma_down) if i%2!=0 else noise_sampler_bt(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise)
denoised_1, denoised_2 = denoised, denoised_1
h_1, h_2 = h, h_1
return x
class SamplersSUNoise:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"sampler_name": (["euler_ancestral", "euler_ancestral_cfg_pp", "dpm_2_ancestral", "dpmpp_2s_ancestral",
"dpmpp_sde", "dpmpp_2m_sde", "dpmpp_3m_sde"], ),
"noise_type": (['standard', 'multires'], ),
}
}
RETURN_TYPES = ("SAMPLER",)
CATEGORY = "sampling/custom_sampling/samplers"
FUNCTION = "get_sampler"
def get_sampler(self, sampler_name, noise_type):
s_noise = 1.0
solver_type = 'heun'
eta = 1.0
r = 0.5
if sampler_name == "euler_ancestral":
sampler = comfy.samplers.KSAMPLER(sample_euler_ancestral_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "euler_ancestral_cfg_pp":
sampler = comfy.samplers.KSAMPLER(sample_euler_ancestral_cfg_pp_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "dpm_2_ancestral":
sampler = comfy.samplers.KSAMPLER(sample_dpm_2_ancestral_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_2s_ancestral":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_2s_ancestral_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_sde":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_sde_sun, {"eta": eta, "s_noise": s_noise, "r": r, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_2m_sde":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_2m_sde_sun, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_3m_sde":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_3m_sde_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
return (sampler, )
class SamplersSUNoiseAdvanced:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"sampler_name": (["euler_ancestral", "euler_ancestral_cfg_pp", "dpm_2_ancestral", "dpmpp_2s_ancestral",
"dpmpp_sde", "dpmpp_2m_sde", "dpmpp_3m_sde"], ),
"noise_type": (['standard', 'multires'], ),
"s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step":0.01, "round": False}),
"solver_type": (['midpoint', 'heun'], ),
"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
"r": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
}
}
RETURN_TYPES = ("SAMPLER",)
CATEGORY = "sampling/custom_sampling/samplers"
FUNCTION = "get_sampler"
def get_sampler(self, sampler_name, noise_type, s_noise, solver_type, eta, r):
if sampler_name == "euler_ancestral":
sampler = comfy.samplers.KSAMPLER(sample_euler_ancestral_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "euler_ancestral_cfg_pp":
sampler = comfy.samplers.KSAMPLER(sample_euler_ancestral_cfg_pp_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "dpm_2_ancestral":
sampler = comfy.samplers.KSAMPLER(sample_dpm_2_ancestral_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_2s_ancestral":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_2s_ancestral_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_sde":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_sde_sun, {"eta": eta, "s_noise": s_noise, "r": r, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_2m_sde":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_2m_sde_sun, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type, "noise_type": noise_type}, {})
elif sampler_name == "dpmpp_3m_sde":
sampler = comfy.samplers.KSAMPLER(sample_dpmpp_3m_sde_sun, {"eta": eta, "s_noise": s_noise, "noise_type": noise_type}, {})
return (sampler, )
class Noise_SUNoise:
def __init__(self, seed, scale, noise_type):
self.seed = seed
self.scale = scale
self.noise_type = noise_type
def generate_noise(self, input_latent):
latent_image = input_latent["samples"]
batch_inds = input_latent["batch_index"] if "batch_index" in input_latent else None
if self.noise_type=='standard':
return prepare_su_noise(latent_image, self.seed, batch_inds, self.scale)
elif self.noise_type=='multires':
return prepare_su_noise_multires(latent_image, self.seed, batch_inds, self.scale)
class SUNoiseLatent:
@classmethod
def INPUT_TYPES(s):
return {"required":{
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"scale": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100, "step": 0.01}),
"noise_type": (['standard', 'multires'], ),
}
}
RETURN_TYPES = ("NOISE",)
FUNCTION = "get_noise"
CATEGORY = "sampling/custom_sampling/noise"
def get_noise(self, noise_seed, scale, noise_type):
return (Noise_SUNoise(noise_seed, scale, noise_type),)
NODE_CLASS_MAPPINGS = {
"SamplersSUNoise": SamplersSUNoise,
"SamplersSUNoiseAdvanced": SamplersSUNoiseAdvanced,
"SUNoiseLatent": SUNoiseLatent,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"SamplersSUNoise": "SamplersSUNoise",
"SamplersSUNoiseAdvanced": "SamplersSUNoiseAdvanced",
"SUNoiseLatent": 'SUNoiseLatent',
}
| 25,830 | Python | .py | 455 | 45.641758 | 181 | 0.573585 | bvhari/ComfyUI_SUNoise | 8 | 3 | 0 | GPL-3.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,223 | setup.py | WindyLab_Gym-PPS/setup.py | import os.path
import sys
from setuptools import find_packages, setup
# Don't import gym module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "gym"))
from version import VERSION
# Environment-specific dependencies.
extras = {
"atari": ["atari-py==0.2.6", "opencv-python>=3."],
"box2d": ["box2d-py~=2.3.5", "pyglet>=1.4.0"],
"classic_control": ["pyglet==1.5.27"],
"mujoco": ["mujoco_py>=1.50, <2.0"],
"robotics": ["mujoco_py>=1.50, <2.0"],
"toy_text": ["scipy>=1.4.1"],
"other": ["lz4>=3.1.0", "opencv-python>=3."],
}
# Meta dependency groups.
extras["nomujoco"] = list(
set(
[
item
for name, group in extras.items()
if name != "mujoco" and name != "robotics"
for item in group
]
)
)
extras["all"] = list(set([item for group in extras.values() for item in group]))
setup(
name="gym",
version=VERSION,
description="Gym: A universal API for reinforcement learning environments.",
url="https://github.com/openai/gym",
author="OpenAI",
author_email="[email protected]",
license="",
packages=[package for package in find_packages() if package.startswith("gym")],
zip_safe=False,
install_requires=[
"numpy==1.18.0",
"cloudpickle>=1.2.0",
"setuptools==57.5.0",
"pyglet==1.5.27"
],
extras_require=extras,
package_data={
"gym": [
"envs/mujoco/assets/*.xml",
"envs/classic_control/assets/*.png",
"envs/robotics/assets/LICENSE.md",
"envs/robotics/assets/fetch/*.xml",
"envs/robotics/assets/hand/*.xml",
"envs/robotics/assets/stls/fetch/*.stl",
"envs/robotics/assets/stls/hand/*.stl",
"envs/robotics/assets/textures/*.png",
"utils/*.so",
]
},
tests_require=["pytest", "mock"],
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 2,239 | Python | .py | 68 | 26.352941 | 83 | 0.583179 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,224 | custom_env.py | WindyLab_Gym-PPS/NJP_algorithm/custom_env.py | import gym
from gym import spaces
import numpy as np
"""Define your own Observation and Reward in this script:
You may use the following properties to define your observation/reward functions:
self.env.p, dp, ddp, theta, heading, d_b2b_center, is_collide_b2b, energy
"""
class MyObs(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = spaces.Box(shape=(7+2*(env.topo_n_p2p+env.topo_n_p2e+env.topo_n_e2p+env.topo_n_e2e),env.n_p+env.n_e), low=-np.inf, high=np.inf)
def observation(self, obs):
r"""Example::
n_pe = self.env.n_p + self.env.n_e
obs = np.ones((2, n_pe))
return obs
"""
return obs
class MyReward(gym.RewardWrapper):
def reward(self, reward):
r"""Example::
reward = np.sum(self.env.is_collide_b2b)
"""
return reward
def _get_reward(self, a):
r"""Example::
reward_p = 5.0 * self._is_collide_b2b[self._n_p:self._n_pe, :self._n_p].sum(axis=0, keepdims=True).astype(float)
reward_e = - 5.0 * self._is_collide_b2b[self._n_p:self._n_pe, :self._n_p].sum(axis=1, keepdims=True).astype(float).reshape(1,self.n_e)
if self._penalize_distance:
reward_p += - self._d_b2b_center[self._n_p:self._n_pe, :self._n_p].sum(axis=0, keepdims=True)
reward_e += self._d_b2b_center[self._n_p:self._n_pe, :self._n_p].sum(axis=1, keepdims=True).reshape(1,self.n_e)
if self._penalize_control_effort:
if self._dynamics_mode == 'Cartesian':
reward_p -= 1*np.sqrt( a[[0],:self._n_p]**2 + a[[1],:self._n_p]**2 )
reward_e -= 1*np.sqrt( a[[0], self._n_p:self._n_pe]**2 + a[[1], self._n_p:self._n_pe]**2 )
elif self._dynamics_mode == 'Polar':
print("control_effort using MyReward")
reward_p -= 1 * np.abs( a[[0], :self._n_p] ) + 0 * np.abs( a[[1], :self._n_p] )
reward_e -= 1 * np.abs( a[[0], self._n_p:self._n_pe]) + 0 * np.abs( a[[1], self._n_p:self._n_pe])
if self._penalize_collide_agents:
reward_p -= self._is_collide_b2b[:self._n_p, :self._n_p].sum(axis=0, keepdims=True)
reward_e -= self._is_collide_b2b[self._n_p:self._n_pe, self._n_p:self._n_pe].sum(axis=0, keepdims=True)
if self._penalize_collide_obstacles:
reward_p -= 5 * self._is_collide_b2b[self._n_pe:self._n_peo, 0:self._n_p].sum(axis=0, keepdims=True)
reward_e -= 5 * self._is_collide_b2b[self._n_pe:self._n_peo, self._n_p:self._n_pe].sum(axis=0, keepdims=True)
if self._penalize_collide_walls and self._is_periodic == False:
reward_p -= 1 * self.is_collide_b2w[:, :self._n_p].sum(axis=0, keepdims=True)
reward_e -= 1 * self.is_collide_b2w[:, self._n_p:self._n_pe].sum(axis=0, keepdims=True)
if self._reward_sharing_mode == 'sharing_mean':
reward_p[:] = np.mean(reward_p)
reward_e[:] = np.mean(reward_e)
elif self._reward_sharing_mode == 'sharing_max':
reward_p[:] = np.max(reward_p)
reward_e[:] = np.max(reward_e)
elif self._reward_sharing_mode == 'individual':
pass
else:
print('reward mode error !!')
reward = np.concatenate((reward_p, reward_e), axis=1)
return reward
"""
| 3,491 | Python | .py | 61 | 46.131148 | 160 | 0.564148 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,225 | arguments.py | WindyLab_Gym-PPS/NJP_algorithm/arguments.py | '''
Specify parameters of the env
'''
from typing import Union
import numpy as np
import argparse
parser = argparse.ArgumentParser("Gym-PredatorPreySwarm Arguments")
## ==================== User settings ===================='''
parser.add_argument("--n-p", type=int, default=0, help='number of predators')
parser.add_argument("--n-e", type=int, default=50, help='number of prey')
parser.add_argument("--is-periodic", type=bool, default=True, help='Set whether has wall or periodic boundaries')
parser.add_argument("--dynamics-mode", type=str, default='Polar', help=" select one from ['Cartesian', 'Polar']")
parser.add_argument("--pursuer-strategy", type=str, default='nearest', help="select one from ['input', 'static', 'random', 'nearest']")
parser.add_argument("--escaper-strategy", type=str, default='input', help="select one from ['input', 'static', 'nearest']")
parser.add_argument("--render-traj", type=bool, default=True, help=" whether render trajectories of agents")
parser.add_argument("--traj_len", type=int, default=15, help="length of the trajectory")
parser.add_argument("--billiards-mode", type=float, default=False, help="billiards mode")
parser.add_argument("--size_p", type=float, default=0.06, help="predators size")
parser.add_argument("--size_e", type=float, default=0.035, help="evadors size")
parser.add_argument("--size_o", type=float, default=0.2, help="obstacles size")
parser.add_argument("--topo_n_p2p", type=float, default=6, help="pursuer to pursuer")
parser.add_argument("--topo_n_p2e", type=float, default=6, help="pursuer to escaper")
parser.add_argument("--topo_n_e2p", type=float, default=6, help="escaper to pursuer")
parser.add_argument("--topo_n_e2e", type=float, default=6, help="escaper to escaper")
parser.add_argument("--penalize_control_effort", type=float, default=True, help="penalize_control_effort")
parser.add_argument("--penalize_collide_walls", type=float, default=False, help="penalize_collide_walls")
parser.add_argument("--penalize_collide_agents", type=float, default=False, help="penalize_collide_agents")
parser.add_argument("--penalize_collide_obstacles", type=float, default=True, help="penalize_collide_obstacles")
## ==================== End of User settings ====================
## ==================== Advanced Settings ====================
# parser.add_argument("--action-space", type=list, default=[0, 1, 2, 3, 4] ) # up, right, down, left, stay
# parser.add_argument("--debug", type=bool, default=False )
# parser.add_argument("--animation-interval", type=float, default = 0.2)
## ==================== End of Advanced settings ====================
gpsargs = parser.parse_args()
def validate_environment_parameters(env_size, start_state, target_state, forbidden_states):
pass
# if not (isinstance(env_size, tuple) or isinstance(env_size, list) or isinstance(env_size, np.ndarray)) and len(env_size) != 2:
# raise ValueError("Invalid environment size. Expected a tuple (rows, cols) with positive dimensions.")
# for i in range(2):
# assert start_state[i] < env_size[i]
# assert target_state[i] < env_size[i]
# for j in range(len(forbidden_states)):
# assert forbidden_states[j][i] < env_size[i]
# try:
# validate_environment_parameters(gpsargs.env_size, gpsargs.start_state, args.target_state, args.forbidden_states)
# except ValueError as e:
# print("Error:", e) | 3,412 | Python | .py | 48 | 69.020833 | 136 | 0.688637 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,226 | custom_param.py | WindyLab_Gym-PPS/NJP_algorithm/custom_param.py | '''
Specify parameters of the PredatorPreySwarm environment
'''
from typing import Union
import numpy as np
import argparse
parser = argparse.ArgumentParser("Gym-PredatorPreySwarm Arguments")
parser.add_argument("--n-p", type=int, default=3, help='number of predators')
parser.add_argument("--n-e", type=int, default=10, help='number of prey')
parser.add_argument("--is-periodic", type=bool, default=False, help='Set whether has wall or periodic boundaries')
parser.add_argument("--dynamics-mode", type=str, default='Polar', help=" select one from ['Cartesian', 'Polar']")
parser.add_argument("--pursuer-strategy", type=str, default='nearest', help="select one from ['input', 'static', 'random', 'nearest']")
parser.add_argument("--escaper-strategy", type=str, default='random', help="select one from ['input', 'static', 'random', 'nearest']")
ppsargs = parser.parse_args()
| 884 | Python | .py | 14 | 61.428571 | 136 | 0.737875 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,227 | testmodel .py | WindyLab_Gym-PPS/NJP_algorithm/testmodel .py | import argparse
import torch
import time
import os
import numpy as np
import gym
from gym.wrappers import NJP
from arguments import gpsargs as args
from gym.wrappers import PredatorPreySwarmCustomizer
from gym.spaces import Box, Discrete
from torch.autograd import Variable
from algorithms.maddpg import MADDPG
from pathlib import Path
from utils.buffer import ReplayBuffer
from tensorboardX import SummaryWriter
from custom_env import MyObs, MyReward
USE_CUDA = False
def run(config):
model_dir = Path('./models') / config.env_id
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
torch.manual_seed(config.seed)
np.random.seed(config.seed)
if not USE_CUDA:
torch.set_num_threads(config.n_training_threads)
scenario_name = 'PredatorPreySwarm-v0'
base_env = gym.make(scenario_name).unwrapped
# env = NJP(base_env, args)
custom_param = 'custom_param.json'
custom_param = os.path.dirname(os.path.realpath(__file__)) + '/' + custom_param
env = NJP(base_env, custom_param)
start_stop_num=[slice(0,env.num_predator),slice(env.num_predator, env.num_predator+env.num_prey)]
maddpg = MADDPG.init_from_save('./models/model_1/run5/incremental/model_ep1300.pt')
adversary_buffer = ReplayBuffer(config.buffer_length, env.num_predator, state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0],
start_stop_index=start_stop_num[0])
agent_buffer = ReplayBuffer(config.buffer_length, env.num_prey, state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0],
start_stop_index=start_stop_num[1])
buffer_total=[adversary_buffer, agent_buffer]
t = 0
p_store = []
h_store = []
torch_agent_actions=[]
explr_pct_remaining = 0.1
print('Showing Starts...')
print(env.penalize_control_effort)
episode_reward = 0
obs=env.reset()
maddpg.prep_rollouts(device='cpu')
maddpg.scale_noise(maddpg.noise, maddpg.epsilon)
maddpg.reset_noise()
M_p, N_p = np.shape(env.p)
M_h, N_h =np.shape(env.heading)
p_store = np.zeros((M_p, N_p, config.episode_length))
h_store = np.zeros((M_h, N_h, config.episode_length))
for et_i in range(config.episode_length):
env.render()
# for i, species in enumerate(num_agent):
# Obtain observation for per agent and convert to torch variable
p_store[:, :, et_i] = env.p
h_store[:, :, et_i] = env.heading
torch_obs = torch.Tensor(obs).requires_grad_(False)
torch_agent_actions = maddpg.step(torch_obs, start_stop_num, explore=True)
# convert actions to numpy.arrays
agent_actions = np.column_stack([ac.data.numpy() for ac in torch_agent_actions])
# obtain reward and next state
next_obs, rewards, dones, infos = env.step(agent_actions)
agent_buffer.push(obs, agent_actions, rewards, next_obs, dones)
adversary_buffer.push(obs, agent_actions, rewards, next_obs, dones)
obs = next_obs
t += config.n_rollout_threads
episode_reward += rewards
maddpg.noise = max(0.05, maddpg.noise-5e-5)
maddpg.epsilon = max(0.05, maddpg.epsilon-5e-5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="model_1", type=str)
parser.add_argument("--seed",
default=226, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=1, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(5e5), type=int)
parser.add_argument("--n_episodes", default=2000, type=int)
parser.add_argument("--episode_length", default=1000, type=int)
parser.add_argument("--batch_size",
default=256, type=int,
help="Batch size for model training")
parser.add_argument("--n_exploration_eps", default=25000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1, type=int)
parser.add_argument("--hidden_dim", default=64, type=int)
parser.add_argument("--lr_actor", default=1e-4, type=float)
parser.add_argument("--lr_critic", default=1e-3, type=float)
parser.add_argument("--epsilon", default=0.1, type=float)
parser.add_argument("--noise", default=0.1, type=float)
parser.add_argument("--tau", default=0.01, type=float)
parser.add_argument("--agent_alg",
default="MADDPG", type=str,
choices=['MADDPG', 'DDPG'])
parser.add_argument("--adversary_alg",
default="MADDPG", type=str,
choices=['MADDPG', 'DDPG'])
config = parser.parse_args()
run(config)
| 5,478 | Python | .py | 114 | 39.131579 | 161 | 0.633358 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,228 | main.py | WindyLab_Gym-PPS/NJP_algorithm/main.py | import argparse
import torch
import time
import os
import numpy as np
import gym
from gym.wrappers import NJP
from arguments import gpsargs as args
from gym.spaces import Box, Discrete
from torch.autograd import Variable
from algorithms.maddpg import MADDPG
from pathlib import Path
from utils.buffer import ReplayBuffer
from tensorboardX import SummaryWriter
USE_CUDA = False
def run(config):
model_dir = Path('./models') / config.env_id
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
log_dir = run_dir / 'logs'
os.makedirs(log_dir)
logger = SummaryWriter(str(log_dir))
torch.manual_seed(config.seed)
np.random.seed(config.seed)
if not USE_CUDA:
torch.set_num_threads(config.n_training_threads)
scenario_name = 'PredatorPreySwarm-v0'
base_env = gym.make(scenario_name).unwrapped
# env = NJP(base_env, args)
custom_param = 'custom_param.json'
custom_param = os.path.dirname(os.path.realpath(__file__)) + '/' + custom_param
env = NJP(base_env, custom_param)
start_stop_num=[slice(0,env.num_predator),slice(env.num_predator, env.num_predator+env.num_prey)]
maddpg = MADDPG.init_from_env(env, agent_alg=config.agent_alg,
adversary_alg=config.adversary_alg,
tau=config.tau,
lr_actor=config.lr_actor, lr_critic=config.lr_critic, epsilon=config.epsilon, noise=config.noise,
hidden_dim=config.hidden_dim)
# maddpg = MADDPG.init_from_save('./models/model_1/run95/incremental/model_ep1000.pt')
adversary_buffer = ReplayBuffer(config.buffer_length, env.num_predator, state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0],
start_stop_index=start_stop_num[0])
agent_buffer = ReplayBuffer(config.buffer_length, env.num_prey, state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0],
start_stop_index=start_stop_num[1])
buffer_total=[adversary_buffer, agent_buffer]
t = 0
p_store = []
h_store = []
torch_agent_actions=[]
explr_pct_remaining = 0.1
print('Training Starts...')
for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
if ep_i % 10 == 0:
print("Episodes %i of %i" % (ep_i, config.n_episodes))
episode_reward = 0
obs=env.reset()
maddpg.prep_rollouts(device='cpu')
# explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
# maddpg.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
maddpg.scale_noise(maddpg.noise, maddpg.epsilon)
maddpg.reset_noise()
M_p, N_p = np.shape(env.p)
M_h, N_h =np.shape(env.heading)
p_store = np.zeros((M_p, N_p, config.episode_length))
h_store = np.zeros((M_h, N_h, config.episode_length))
for et_i in range(config.episode_length):
if ep_i % 50 == 0:
env.render()
# for i, species in enumerate(num_agent):
# Obtain observation for per agent and convert to torch variable
p_store[:, :, et_i] = env.p
h_store[:, :, et_i] = env.heading
torch_obs = torch.Tensor(obs).requires_grad_(False)
torch_agent_actions = maddpg.step(torch_obs, start_stop_num, explore=True)
# convert actions to numpy.arrays
agent_actions = np.column_stack([ac.data.numpy() for ac in torch_agent_actions])
# obtain reward and next state
next_obs, rewards, dones, infos = env.step(agent_actions)
agent_buffer.push(obs, agent_actions, rewards, next_obs, dones)
adversary_buffer.push(obs, agent_actions, rewards, next_obs, dones)
obs = next_obs
t += config.n_rollout_threads
episode_reward += rewards
for _ in range(30):
maddpg.prep_training(device='cpu')
for a_i in range(maddpg.nagents):
if len(buffer_total[a_i]) >= config.batch_size:
sample = buffer_total[a_i].sample(config.batch_size, to_gpu=USE_CUDA)
obs_sample, acs_sample, rews_sample, next_obs_sample, dones_sample = sample
# assert obs_sample.size(0) == acs_sample.size(0) == rews_sample.size(0) == dones_sample.size(0)
maddpg.update(obs_sample, acs_sample, rews_sample, next_obs_sample, dones_sample, a_i, logger=logger) # parameter update
maddpg.update_all_targets()
maddpg.prep_rollouts(device='cpu')
# print("reward", episode_reward)
DOS_epi, DOA_epi = env.dos_and_doa(x=p_store[:, start_stop_num[1], :], h=h_store[:, start_stop_num[1], :], T=config.episode_length, N=env.num_prey, D=np.sqrt(2))
if ep_i % 10 == 0:
print("DOS_episode:", DOS_epi, "DOA_episode:", DOA_epi)
maddpg.noise = max(0.05, maddpg.noise-5e-5)
maddpg.epsilon = max(0.05, maddpg.epsilon-5e-5)
logger.add_scalar('DOS_epi', DOS_epi, global_step=ep_i)
logger.add_scalar('DOA_epi', DOA_epi, global_step=ep_i)
if ep_i % config.save_interval < config.n_rollout_threads:
os.makedirs(run_dir / 'incremental', exist_ok=True)
maddpg.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))
maddpg.save(run_dir / 'model.pt')
maddpg.save(run_dir / 'model.pt')
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="model_1", type=str)
parser.add_argument("--seed",
default=226, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=1, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(5e5), type=int)
parser.add_argument("--n_episodes", default=2000, type=int)
parser.add_argument("--episode_length", default=500, type=int)
parser.add_argument("--batch_size",
default=256, type=int,
help="Batch size for model training")
parser.add_argument("--n_exploration_eps", default=25000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1, type=int)
parser.add_argument("--hidden_dim", default=64, type=int)
parser.add_argument("--lr_actor", default=1e-4, type=float)
parser.add_argument("--lr_critic", default=1e-3, type=float)
parser.add_argument("--epsilon", default=0.1, type=float)
parser.add_argument("--noise", default=0.1, type=float)
parser.add_argument("--tau", default=0.01, type=float)
parser.add_argument("--agent_alg",
default="MADDPG", type=str,
choices=['MADDPG', 'DDPG'])
parser.add_argument("--adversary_alg",
default="MADDPG", type=str,
choices=['MADDPG', 'DDPG'])
config = parser.parse_args()
run(config)
| 7,986 | Python | .py | 148 | 42.445946 | 169 | 0.60121 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,229 | maddpg.py | WindyLab_Gym-PPS/NJP_algorithm/algorithms/maddpg.py | import torch
import torch.nn.functional as F
from gym.spaces import Box, Discrete
from utils.networks import MLPNetwork
from utils.misc import soft_update, average_gradients, onehot_from_logits, gumbel_softmax
from utils.agents import DDPGAgent
MSELoss = torch.nn.MSELoss()
class MADDPG(object):
"""
Wrapper class for DDPG-esque (i.e. also MADDPG) agents in multi-agent task
"""
def __init__(self, agent_init_params, alg_types, epsilon, noise,
gamma=0.95, tau=0.01, lr_actor=1e-4, lr_critic=1e-3, hidden_dim=64,
discrete_action=False):
"""
Inputs:
agent_init_params (list of dict): List of dicts with parameters to
initialize each agent
num_in_pol (int): Input dimensions to policy
num_out_pol (int): Output dimensions to policy
num_in_critic (int): Input dimensions to critic
alg_types (list of str): Learning algorithm for each agent (DDPG
or MADDPG)
gamma (float): Discount factor
tau (float): Target update rate
lr (float): Learning rate for policy and critic
hidden_dim (int): Number of hidden dimensions for networks
discrete_action (bool): Whether or not to use discrete action space
"""
self.nagents = len(alg_types)
self.alg_types = alg_types
self.epsilon = epsilon
self.noise = noise
self.agents = [DDPGAgent(lr_actor=lr_actor, lr_critic=lr_critic, discrete_action=discrete_action, # 每个 agent 除了 agent inital parameters 不一样之外,都是同构的
hidden_dim=hidden_dim, epsilon=self.epsilon, noise=self.noise,
**params)
for params in agent_init_params]
self.agent_init_params = agent_init_params
self.gamma = gamma
self.tau = tau
self.lr_actor = lr_actor
self.lr_critic = lr_critic
self.discrete_action = discrete_action
self.pol_dev = 'cpu'
self.critic_dev = 'cpu'
self.trgt_pol_dev = 'cpu'
self.trgt_critic_dev = 'cpu'
self.niter = 0
@property
def policies(self):
return [a.policy for a in self.agents]
def target_policies(self, agent_i, obs):
return self.agents[agent_i].target_policy(obs)
def scale_noise(self, scale, new_epsilon):
"""
Scale noise for each agent
Inputs:
scale (float): scale of noise
"""
for a in self.agents:
a.scale_noise(scale) # 这个没有 return,目的是修改类里面的属性值
a.epsilon = new_epsilon
def reset_noise(self):
for a in self.agents:
a.reset_noise()
def step(self, observations, start_stop_num, explore=False):
"""
Take a step forward in environment with all agents
Inputs:
observations: List of observations for each agent
explore (boolean): Whether or not to add exploration noise
Outputs:
actions: List of actions for each agent
"""
return [self.agents[i].step(observations[:, start_stop_num[i]].t(), explore=explore) for i in range(len(start_stop_num))]
def update(self, obs, acs, rews, next_obs, dones, agent_i, parallel=False, logger=None):
"""
Update parameters of agent model based on sample from replay buffer
Inputs:
sample: tuple of (observations, actions, rewards, next
observations, and episode end masks) sampled randomly from
the replay buffer. Each is a list with entries
corresponding to each agent
agent_i (int): index of agent to update
parallel (bool): If true, will average gradients across threads
logger (SummaryWriter from Tensorboard-Pytorch):
If passed in, important quantities will be logged
"""
# obs, acs, rews, next_obs, dones = sample
curr_agent = self.agents[agent_i]
curr_agent.critic_optimizer.zero_grad()
all_trgt_acs = self.target_policies(agent_i, next_obs)
trgt_vf_in = torch.cat((next_obs, all_trgt_acs), dim=1)
target_value = (rews + self.gamma *
curr_agent.target_critic(trgt_vf_in) *
(1 - dones))
vf_in = torch.cat((obs, acs), dim=1)
actual_value = curr_agent.critic(vf_in)
vf_loss = MSELoss(actual_value, target_value.detach())
# vf_loss = (actual_value-target_value.detach()) ** 2
vf_loss.backward()
if parallel:
average_gradients(curr_agent.critic)
# torch.nn.utils.clip_grad_norm(curr_agent.critic.parameters(), 0.5)
curr_agent.critic_optimizer.step()
curr_agent.policy_optimizer.zero_grad()
if not self.discrete_action:
curr_pol_out = curr_agent.policy(obs)
curr_pol_vf_in = curr_pol_out
all_pol_acs = curr_pol_vf_in
vf_in = torch.cat((obs, all_pol_acs), dim=1)
pol_loss = -curr_agent.critic(vf_in).mean()
pol_loss.backward()
if parallel:
average_gradients(curr_agent.policy)
# torch.nn.utils.clip_grad_norm(curr_agent.policy.parameters(), 0.5)
curr_agent.policy_optimizer.step()
if logger is not None:
logger.add_scalars('agent%i/losses' % agent_i,
{'vf_loss': vf_loss,
'pol_loss': pol_loss},
self.niter)
def update_all_targets(self):
"""
Update all target networks (called after normal updates have been
performed for each agent)
"""
for a in self.agents:
soft_update(a.target_critic, a.critic, self.tau)
soft_update(a.target_policy, a.policy, self.tau)
self.niter += 1
def prep_training(self, device='gpu'):
for a in self.agents:
a.policy.train()
a.target_policy.train()
a.target_critic.train()
if device == 'gpu':
fn = lambda x: x.cuda()
else:
fn = lambda x: x.cpu()
if not self.pol_dev == device:
for a in self.agents:
a.policy = fn(a.policy)
self.pol_dev = device
if not self.critic_dev == device:
for a in self.agents:
a.critic = fn(a.critic)
self.critic_dev = device
if not self.trgt_pol_dev == device:
for a in self.agents:
a.target_policy = fn(a.target_policy)
self.trgt_pol_dev = device
if not self.trgt_critic_dev == device:
for a in self.agents:
a.target_critic = fn(a.target_critic)
self.trgt_critic_dev = device
def prep_rollouts(self, device='cpu'):
for a in self.agents:
a.policy.eval()
if device == 'gpu':
fn = lambda x: x.cuda()
else:
fn = lambda x: x.cpu()
# only need main policy for rollouts
if not self.pol_dev == device:
for a in self.agents:
a.policy = fn(a.policy)
self.pol_dev = device
def save(self, filename):
"""
Save trained parameters of all agents into one file
"""
self.prep_training(device='cpu') # move parameters to CPU before saving
save_dict = {'init_dict': self.init_dict,
'agent_params': [a.get_params() for a in self.agents]}
torch.save(save_dict, filename)
@classmethod
def init_from_env(cls, env, agent_alg="MADDPG", adversary_alg="MADDPG",
gamma=0.95, tau=0.01, lr_actor=1e-4, lr_critic=1e-3, hidden_dim=64, epsilon=0.1, noise=0.1):
"""
Instantiate instance of this class from multi-agent environment
"""
agent_init_params = []
num_in_pol=env.observation_space.shape[0]
num_out_pol=env.action_space.shape[0]
num_in_critic=env.observation_space.shape[0] + env.action_space.shape[0]
# print("num in pol", num_in_pol, "num out pol", num_out_pol, "num in critic", num_in_critic)
alg_types = [adversary_alg if atype == 'adversary' else agent_alg for
atype in env.agent_types]
for algtype in alg_types:
agent_init_params.append({'num_in_pol': num_in_pol,
'num_out_pol': num_out_pol,
'num_in_critic': num_in_critic})
init_dict = {'gamma': gamma, 'tau': tau, 'lr_actor': lr_actor, 'lr_critic': lr_critic, 'epsilon': epsilon, 'noise': noise,
'hidden_dim': hidden_dim,
'alg_types': alg_types,
'agent_init_params': agent_init_params}
instance = cls(**init_dict)
instance.init_dict = init_dict
return instance
@classmethod
def init_from_save(cls, filename):
"""
Instantiate instance of this class from file created by 'save' method
"""
save_dict = torch.load(filename)
instance = cls(**save_dict['init_dict'])
instance.init_dict = save_dict['init_dict']
for a, params in zip(instance.agents, save_dict['agent_params']):
a.load_params(params)
return instance | 9,862 | Python | .py | 211 | 33.407583 | 157 | 0.561178 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,230 | networks.py | WindyLab_Gym-PPS/NJP_algorithm/utils/networks.py | import torch.nn as nn
import torch.nn.functional as F
class MLPNetwork(nn.Module):
"""
MLP network (can be used as value or policy)
"""
def __init__(self, input_dim, out_dim, hidden_dim=64, nonlin=F.relu,
constrain_out=False, norm_in=False, discrete_action=False):
"""
Inputs:
input_dim (int): Number of dimensions in input
out_dim (int): Number of dimensions in output
hidden_dim (int): Number of hidden dimensions
nonlin (PyTorch function): Nonlinearity to apply to hidden layers
"""
super(MLPNetwork, self).__init__()
# self.in_fn = lambda x: x
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.fc4 = nn.Linear(hidden_dim, out_dim)
self.nonlin = nonlin
if constrain_out and not discrete_action:
# initialize small to prevent saturation
self.out_fn = F.tanh
else: # logits for discrete action (will softmax later)
self.out_fn = lambda x: x
def forward(self, X):
"""
Inputs:
X (PyTorch Matrix): Batch of observations
Outputs:
out (PyTorch Matrix): Output of network (actions, values, etc)
"""
h1 = self.nonlin(self.fc1(X))
h2 = self.nonlin(self.fc2(h1))
h3 = self.nonlin(self.fc3(h2))
out = self.out_fn(self.fc4(h3))
return out | 1,535 | Python | .py | 39 | 30.25641 | 77 | 0.593039 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,231 | buffer.py | WindyLab_Gym-PPS/NJP_algorithm/utils/buffer.py | import numpy as np
from torch import Tensor
from torch.autograd import Variable
class ReplayBuffer(object):
"""
Replay Buffer for multi-agent RL with parallel rollouts
"""
def __init__(self, max_steps, num_agents, start_stop_index, state_dim, action_dim):
"""
Inputs:
max_steps (int): Maximum number of timepoints to store in buffer
num_agents (int): Number of agents in environment
obs_dims (list of ints): number of obervation dimensions for each
agent
ac_dims (list of ints): number of action dimensions for each agent
"""
self.max_steps = max_steps
self.num_agents = num_agents
self.obs_buffs = []
self.ac_buffs = []
self.rew_buffs = []
self.next_obs_buffs = []
self.done_buffs = []
# for odim, adim in zip(obs_dims, ac_dims):
# self.start_number=start_index
# self.stop_number=stop_index
# odim1, odim2 = obs_dims[0], obs_dims[1]
# adim1, adim2 = ac_dims[0], ac_dims[1]
# print(odim1)
# print(adim1)
self.obs_buffs = np.zeros((self.max_steps * self.num_agents, state_dim))
self.ac_buffs = np.zeros((self.max_steps * self.num_agents, action_dim))
self.rew_buffs = np.zeros((self.max_steps * self.num_agents, 1))
self.next_obs_buffs = np.zeros((self.max_steps * self.num_agents, state_dim))
self.done_buffs = np.zeros((self.max_steps * self.num_agents, 1))
self.filled_i = 0 # index of first empty location in buffer (last index when full)
self.curr_i = 0 # current index to write to (ovewrite oldest data)
# self.curr_i_obs = 0
# self.curr_i_act = 0
# self.curr_i_rew = 0
# self.curr_i_next_obs = 0
# self.curr_i_done = 0
self.agent= start_stop_index
def __len__(self):
return self.filled_i
def push(self, observations_original, actions_original, rewards_original, next_observations_original, dones_original):
agent_i = self.agent
observations = observations_original[:, agent_i].T
actions = actions_original[:,agent_i].T
rewards = rewards_original[:, agent_i].T
next_observations = next_observations_original[:, agent_i].T
dones = dones_original[:, agent_i].T
# assert self.nentries_obs == self.nentries_next_obs == self.nentries_act == self.nentries_rew == self.nentries_done
if self.curr_i + self.num_agents > self.max_steps * self.num_agents:
rollover = self.max_steps * self.num_agents - self.curr_i # num of indices to roll over
self.obs_buffs = np.roll(self.obs_buffs,
rollover, axis=0)
self.ac_buffs = np.roll(self.ac_buffs,
rollover, axis=0)
self.rew_buffs = np.roll(self.rew_buffs,
rollover, axis=0)
self.next_obs_buffs = np.roll(self.next_obs_buffs,
rollover, axis=0)
self.done_buffs = np.roll(self.done_buffs,
rollover, axis=0)
self.curr_i = 0
self.filled_i = self.max_steps
self.obs_buffs[self.curr_i:self.curr_i + self.num_agents, :] = observations
# actions are already batched by agent, so they are indexed differently
self.ac_buffs[self.curr_i:self.curr_i + self.num_agents, :] = actions
self.rew_buffs[self.curr_i:self.curr_i + self.num_agents, :] = rewards
self.next_obs_buffs[self.curr_i:self.curr_i + self.num_agents, :] = next_observations
self.done_buffs[self.curr_i:self.curr_i + self.num_agents, :] = dones
# self.curr_i += nentries
self.curr_i += self.num_agents
if self.filled_i < self.max_steps:
self.filled_i += self.num_agents
if self.curr_i == self.max_steps * self.num_agents:
self.curr_i = 0
def sample(self, N, to_gpu=False, norm_rews=True):
# print("filled_i", self.filled_i)
# print("self.max_steps * self.nentries_obs", self.max_steps * self.nentries_obs)
inds = np.random.choice(np.arange(self.filled_i), size=N,
replace=False)
# extracted_elements = self.obs_buffs[index_obs, :]
if to_gpu:
cast = lambda x: Tensor(x).requires_grad_(False).cuda()
else:
cast = lambda x: Tensor(x).requires_grad_(False)
ret_rews = cast(self.rew_buffs[inds, :])
return (cast(self.obs_buffs[inds, :]),
cast(self.ac_buffs[inds, :]),
ret_rews,
cast(self.next_obs_buffs[inds, :]),
cast(self.done_buffs[inds, :]))
def get_average_rewards(self, N):
if self.filled_i == self.max_steps:
inds = np.arange(self.curr_i - N, self.curr_i) # allow for negative indexing
else:
inds = np.arange(max(0, self.curr_i - N), self.curr_i)
return [self.rew_buffs[i][inds].mean() for i in range(self.num_agents)]
| 5,453 | Python | .py | 101 | 39.742574 | 125 | 0.562906 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,232 | noise.py | WindyLab_Gym-PPS/NJP_algorithm/utils/noise.py | import numpy as np
# from https://github.com/songrotek/DDPG/blob/master/ou_noise.py
class OUNoise:
def __init__(self, action_dimension, scale=0.1, mu=0, theta=0.15, sigma=0.2):
self.action_dimension = action_dimension
self.scale = scale
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state * self.scale
class GaussianNoise:
def __init__(self, action_dimension, scale):
self.action_dimension = action_dimension
self.scale = scale
def noise(self):
return np.random.randn(self.action_dimension) * self.scale
def reset(self):
pass
| 979 | Python | .py | 26 | 29.923077 | 81 | 0.629153 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,233 | agents.py | WindyLab_Gym-PPS/NJP_algorithm/utils/agents.py | from torch import Tensor
from torch.autograd import Variable
from torch.optim import Adam
from .networks import MLPNetwork
from .misc import hard_update, gumbel_softmax, onehot_from_logits
from .noise import OUNoise, GaussianNoise
import numpy as np
class DDPGAgent(object):
"""
General class for DDPG agents (policy, critic, target policy, target
critic, exploration noise)
"""
def __init__(self, num_in_pol, num_out_pol, num_in_critic,
lr_actor, lr_critic, hidden_dim=64, discrete_action=False, epsilon=0.1, noise=0.1):
"""
Inputs:
num_in_pol (int): number of dimensions for policy input
num_out_pol (int): number of dimensions for policy output
num_in_critic (int): number of dimensions for critic input
"""
self.policy = MLPNetwork(num_in_pol, num_out_pol,
hidden_dim=hidden_dim,
constrain_out=True,
discrete_action=discrete_action)
self.critic = MLPNetwork(num_in_critic, 1,
hidden_dim=hidden_dim,
constrain_out=False)
self.target_policy = MLPNetwork(num_in_pol, num_out_pol,
hidden_dim=hidden_dim,
constrain_out=True,
discrete_action=discrete_action)
self.target_critic = MLPNetwork(num_in_critic, 1,
hidden_dim=hidden_dim,
constrain_out=False)
# print("target_policy_parameter", list(self.target_policy.parameters()), "policy_parameter", list(self.policy.parameters()))
hard_update(self.target_policy, self.policy)
# print("target_policy_parameter_after_update", list(self.target_policy.parameters()), "policy_parameter_after_update", list(self.policy.parameters()))
hard_update(self.target_critic, self.critic)
self.policy_optimizer = Adam(self.policy.parameters(), lr_actor)
self.critic_optimizer = Adam(self.critic.parameters(), lr_critic)
self.epsilon = epsilon
self.noise = noise
if not discrete_action:
# self.exploration = OUNoise(num_out_pol)
self.exploration = GaussianNoise(num_out_pol, noise)
else:
self.exploration = 0.3 # epsilon for eps-greedy
self.discrete_action = discrete_action
def reset_noise(self):
if not self.discrete_action:
self.exploration.reset()
def scale_noise(self, scale):
if self.discrete_action:
self.exploration = scale
else:
self.exploration.scale = scale
def step(self, obs, explore=False):
"""
Take a step forward in environment for a minibatch of observations
Inputs:
obs (PyTorch Variable): Observations for this agent
explore (boolean): Whether or not to add exploration noise
Outputs:
action (PyTorch Variable): Actions for this agent
"""
action = self.policy(obs)
if self.discrete_action:
if explore:
action = gumbel_softmax(action, hard=True)
else:
action = onehot_from_logits(action)
else: # continuous action
if explore:
if np.random.rand() < self.epsilon:
action = Tensor(np.random.uniform(-1, 1, size=action.shape)).requires_grad_(False)
else:
action += Tensor(self.exploration.noise()).requires_grad_(False)
action = action.clamp(-1, 1)
return action.t()
def get_params(self):
return {'policy': self.policy.state_dict(),
'critic': self.critic.state_dict(),
'target_policy': self.target_policy.state_dict(),
'target_critic': self.target_critic.state_dict(),
'policy_optimizer': self.policy_optimizer.state_dict(),
'critic_optimizer': self.critic_optimizer.state_dict()}
def load_params(self, params):
self.policy.load_state_dict(params['policy'])
self.critic.load_state_dict(params['critic'])
self.target_policy.load_state_dict(params['target_policy'])
self.target_critic.load_state_dict(params['target_critic'])
self.policy_optimizer.load_state_dict(params['policy_optimizer'])
self.critic_optimizer.load_state_dict(params['critic_optimizer'])
| 4,700 | Python | .py | 93 | 36.645161 | 159 | 0.584568 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,234 | misc.py | WindyLab_Gym-PPS/NJP_algorithm/utils/misc.py | import os
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
import numpy as np
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L11
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L15
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def init_processes(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
def onehot_from_logits(logits, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float() # 这个.max 是从哪儿来的 # 返回一个包含每个样本中最高分数的动作的张量
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))]) # 遍历每个样本并为每个样本生成一个随机数。
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data))
return F.softmax(y / temperature, dim=1)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, temperature=1.0, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
| 4,208 | Python | .py | 84 | 43.833333 | 109 | 0.705326 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,235 | lint_python.yml | WindyLab_Gym-PPS/.github/workflows/lint_python.yml | name: lint_python
on: [pull_request, push]
jobs:
lint_python:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install isort mypy pytest pyupgrade safety
- run: isort --check-only --profile black . || true
- run: pip install -e .[nomujoco]
- run: mypy --install-types --non-interactive . || true
- run: pytest . || true
- run: pytest --doctest-modules . || true
- run: shopt -s globstar && pyupgrade --py36-plus **/*.py || true
| 544 | Python | .py | 15 | 31 | 71 | 0.620038 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,236 | test_manipulate.py | WindyLab_Gym-PPS/tests/gym/envs/robotics/hand/test_manipulate.py | import pickle
import unittest
import pytest
from gym import envs
from gym.envs.tests.spec_list import skip_mujoco, SKIP_MUJOCO_WARNING_MESSAGE
ENVIRONMENT_IDS = (
"HandManipulateEgg-v0",
"HandManipulatePen-v0",
"HandManipulateBlock-v0",
)
@pytest.mark.skipif(skip_mujoco, reason=SKIP_MUJOCO_WARNING_MESSAGE)
@pytest.mark.parametrize("environment_id", ENVIRONMENT_IDS)
def test_serialize_deserialize(environment_id):
env1 = envs.make(environment_id, target_position="fixed")
env1.reset()
env2 = pickle.loads(pickle.dumps(env1))
assert env1.target_position == env2.target_position, (
env1.target_position,
env2.target_position,
)
| 683 | Python | .py | 20 | 30.4 | 77 | 0.757622 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,237 | test_reach.py | WindyLab_Gym-PPS/tests/gym/envs/robotics/hand/test_reach.py | import pickle
import pytest
from gym import envs
from gym.envs.tests.spec_list import skip_mujoco, SKIP_MUJOCO_WARNING_MESSAGE
@pytest.mark.skipif(skip_mujoco, reason=SKIP_MUJOCO_WARNING_MESSAGE)
def test_serialize_deserialize():
env1 = envs.make("HandReach-v0", distance_threshold=1e-6)
env1.reset()
env2 = pickle.loads(pickle.dumps(env1))
assert env1.distance_threshold == env2.distance_threshold, (
env1.distance_threshold,
env2.distance_threshold,
)
| 495 | Python | .py | 13 | 33.923077 | 77 | 0.754717 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,238 | test_manipulate_touch_sensors.py | WindyLab_Gym-PPS/tests/gym/envs/robotics/hand/test_manipulate_touch_sensors.py | import pickle
import pytest
from gym import envs
from gym.envs.tests.spec_list import skip_mujoco, SKIP_MUJOCO_WARNING_MESSAGE
ENVIRONMENT_IDS = (
"HandManipulateEggTouchSensors-v1",
"HandManipulatePenTouchSensors-v0",
"HandManipulateBlockTouchSensors-v0",
)
@pytest.mark.skipif(skip_mujoco, reason=SKIP_MUJOCO_WARNING_MESSAGE)
@pytest.mark.parametrize("environment_id", ENVIRONMENT_IDS)
def test_serialize_deserialize(environment_id):
env1 = envs.make(environment_id, target_position="fixed")
env1.reset()
env2 = pickle.loads(pickle.dumps(env1))
assert env1.target_position == env2.target_position, (
env1.target_position,
env2.target_position,
)
| 703 | Python | .py | 19 | 33.105263 | 77 | 0.766617 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,239 | nested_dict_test.py | WindyLab_Gym-PPS/tests/gym/wrappers/nested_dict_test.py | """Tests for the filter observation wrapper."""
import pytest
import numpy as np
import gym
from gym.spaces import Dict, Box, Discrete, Tuple
from gym.wrappers import FilterObservation, FlattenObservation
class FakeEnvironment(gym.Env):
def __init__(self, observation_space):
self.observation_space = observation_space
self.obs_keys = self.observation_space.spaces.keys()
self.action_space = Box(shape=(1,), low=-1, high=1, dtype=np.float32)
def render(self, width=32, height=32, *args, **kwargs):
del args
del kwargs
image_shape = (height, width, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self):
observation = self.observation_space.sample()
return observation
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
NESTED_DICT_TEST_CASES = (
(
Dict(
{
"key1": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
"key2": Dict(
{
"subkey1": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
"subkey2": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
}
),
(6,),
),
(
Dict(
{
"key1": Box(shape=(2, 3), low=-1, high=1, dtype=np.float32),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(9,),
),
(
Dict(
{
"key1": Tuple(
(
Box(shape=(2,), low=-1, high=1, dtype=np.float32),
Box(shape=(2,), low=-1, high=1, dtype=np.float32),
)
),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(7,),
),
(
Dict(
{
"key1": Tuple((Box(shape=(2,), low=-1, high=1, dtype=np.float32),)),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(5,),
),
(
Dict(
{
"key1": Tuple(
(Dict({"key9": Box(shape=(2,), low=-1, high=1, dtype=np.float32)}),)
),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
(5,),
),
)
class TestNestedDictWrapper(object):
@pytest.mark.parametrize("observation_space, flat_shape", NESTED_DICT_TEST_CASES)
def test_nested_dicts_size(self, observation_space, flat_shape):
env = FakeEnvironment(observation_space=observation_space)
# Make sure we are testing the right environment for the test.
observation_space = env.observation_space
assert isinstance(observation_space, Dict)
wrapped_env = FlattenObservation(FilterObservation(env, env.obs_keys))
assert wrapped_env.observation_space.shape == flat_shape
assert wrapped_env.observation_space.dtype == np.float32
@pytest.mark.parametrize("observation_space, flat_shape", NESTED_DICT_TEST_CASES)
def test_nested_dicts_ravel(self, observation_space, flat_shape):
env = FakeEnvironment(observation_space=observation_space)
wrapped_env = FlattenObservation(FilterObservation(env, env.obs_keys))
obs = wrapped_env.reset()
assert obs.shape == wrapped_env.observation_space.shape
| 3,924 | Python | .py | 103 | 27.504854 | 88 | 0.538502 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,240 | flatten_test.py | WindyLab_Gym-PPS/tests/gym/wrappers/flatten_test.py | """Tests for the flatten observation wrapper."""
from collections import OrderedDict
import numpy as np
import pytest
import gym
from gym.spaces import Box, Dict, unflatten, flatten
from gym.wrappers import FlattenObservation
class FakeEnvironment(gym.Env):
def __init__(self, observation_space):
self.observation_space = observation_space
def reset(self):
self.observation = self.observation_space.sample()
return self.observation
OBSERVATION_SPACES = (
(
Dict(
OrderedDict(
[
("key1", Box(shape=(2, 3), low=0, high=0, dtype=np.float32)),
("key2", Box(shape=(), low=1, high=1, dtype=np.float32)),
("key3", Box(shape=(2,), low=2, high=2, dtype=np.float32)),
]
)
),
True,
),
(
Dict(
OrderedDict(
[
("key2", Box(shape=(), low=0, high=0, dtype=np.float32)),
("key3", Box(shape=(2,), low=1, high=1, dtype=np.float32)),
("key1", Box(shape=(2, 3), low=2, high=2, dtype=np.float32)),
]
)
),
True,
),
(
Dict(
{
"key1": Box(shape=(2, 3), low=-1, high=1, dtype=np.float32),
"key2": Box(shape=(), low=-1, high=1, dtype=np.float32),
"key3": Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
),
False,
),
)
class TestFlattenEnvironment(object):
@pytest.mark.parametrize("observation_space, ordered_values", OBSERVATION_SPACES)
def test_flattened_environment(self, observation_space, ordered_values):
"""
make sure that flattened observations occur in the order expected
"""
env = FakeEnvironment(observation_space=observation_space)
wrapped_env = FlattenObservation(env)
flattened = wrapped_env.reset()
unflattened = unflatten(env.observation_space, flattened)
original = env.observation
self._check_observations(original, flattened, unflattened, ordered_values)
@pytest.mark.parametrize("observation_space, ordered_values", OBSERVATION_SPACES)
def test_flatten_unflatten(self, observation_space, ordered_values):
"""
test flatten and unflatten functions directly
"""
original = observation_space.sample()
flattened = flatten(observation_space, original)
unflattened = unflatten(observation_space, flattened)
self._check_observations(original, flattened, unflattened, ordered_values)
def _check_observations(self, original, flattened, unflattened, ordered_values):
# make sure that unflatten(flatten(original)) == original
assert set(unflattened.keys()) == set(original.keys())
for k, v in original.items():
np.testing.assert_allclose(unflattened[k], v)
if ordered_values:
# make sure that the values were flattened in the order they appeared in the
# OrderedDict
np.testing.assert_allclose(sorted(flattened), flattened)
| 3,188 | Python | .py | 79 | 30.78481 | 88 | 0.603816 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,241 | error.py | WindyLab_Gym-PPS/gym/error.py | import sys
class Error(Exception):
pass
# Local errors
class Unregistered(Error):
"""Raised when the user requests an item from the registry that does
not actually exist.
"""
pass
class UnregisteredEnv(Unregistered):
"""Raised when the user requests an env from the registry that does
not actually exist.
"""
pass
class UnregisteredBenchmark(Unregistered):
"""Raised when the user requests an env from the registry that does
not actually exist.
"""
pass
class DeprecatedEnv(Error):
"""Raised when the user requests an env from the registry with an
older version number than the latest env with the same name.
"""
pass
class UnseedableEnv(Error):
"""Raised when the user tries to seed an env that does not support
seeding.
"""
pass
class DependencyNotInstalled(Error):
pass
class UnsupportedMode(Exception):
"""Raised when the user requests a rendering mode not supported by the
environment.
"""
pass
class ResetNeeded(Exception):
"""When the monitor is active, raised when the user tries to step an
environment that's already done.
"""
pass
class ResetNotAllowed(Exception):
"""When the monitor is active, raised when the user tries to step an
environment that's not yet done.
"""
pass
class InvalidAction(Exception):
"""Raised when the user performs an action not contained within the
action space
"""
pass
# API errors
class APIError(Error):
def __init__(
self,
message=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(APIError, self).__init__(message)
if http_body and hasattr(http_body, "decode"):
try:
http_body = http_body.decode("utf-8")
except:
http_body = (
"<Could not decode body as utf-8. "
"Please report to [email protected]>"
)
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get("request-id", None)
def __unicode__(self):
if self.request_id is not None:
msg = self._message or "<empty message>"
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return self._message
def __str__(self):
try: # Python 2
return unicode(self).encode("utf-8")
except NameError: # Python 3
return self.__unicode__()
class APIConnectionError(APIError):
pass
class InvalidRequestError(APIError):
def __init__(
self,
message,
param,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body, headers
)
self.param = param
class AuthenticationError(APIError):
pass
class RateLimitError(APIError):
pass
# Video errors
class VideoRecorderError(Error):
pass
class InvalidFrame(Error):
pass
# Wrapper errors
class DoubleWrapperError(Error):
pass
class WrapAfterConfigureError(Error):
pass
class RetriesExceededError(Error):
pass
# Vectorized environments errors
class AlreadyPendingCallError(Exception):
"""
Raised when `reset`, or `step` is called asynchronously (e.g. with
`reset_async`, or `step_async` respectively), and `reset_async`, or
`step_async` (respectively) is called again (without a complete call to
`reset_wait`, or `step_wait` respectively).
"""
def __init__(self, message, name):
super(AlreadyPendingCallError, self).__init__(message)
self.name = name
class NoAsyncCallError(Exception):
"""
Raised when an asynchronous `reset`, or `step` is not running, but
`reset_wait`, or `step_wait` (respectively) is called.
"""
def __init__(self, message, name):
super(NoAsyncCallError, self).__init__(message)
self.name = name
class ClosedEnvironmentError(Exception):
"""
Trying to call `reset`, or `step`, while the environment is closed.
"""
pass
class CustomSpaceError(Exception):
"""
The space is a custom gym.Space instance, and is not supported by
`AsyncVectorEnv` with `shared_memory=True`.
"""
pass
| 4,565 | Python | .py | 149 | 24.208054 | 75 | 0.651302 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,242 | __init__.py | WindyLab_Gym-PPS/gym/__init__.py | import distutils.version
import os
import sys
import warnings
from gym import error
from gym.version import VERSION as __version__
from gym.core import (
Env,
GoalEnv,
Wrapper,
ObservationWrapper,
ActionWrapper,
RewardWrapper,
)
from gym.spaces import Space
from gym.envs import make, spec, register
from gym import logger
from gym import vector
from gym import wrappers
__all__ = ["Env", "Space", "Wrapper", "make", "spec", "register"]
| 464 | Python | .py | 20 | 20.85 | 65 | 0.759637 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,243 | logger.py | WindyLab_Gym-PPS/gym/logger.py | import warnings
from gym.utils import colorize
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
MIN_LEVEL = 30
def set_level(level):
"""
Set logging threshold on current logger.
"""
global MIN_LEVEL
MIN_LEVEL = level
def debug(msg, *args):
if MIN_LEVEL <= DEBUG:
print("%s: %s" % ("DEBUG", msg % args))
def info(msg, *args):
if MIN_LEVEL <= INFO:
print("%s: %s" % ("INFO", msg % args))
def warn(msg, *args):
if MIN_LEVEL <= WARN:
warnings.warn(colorize("%s: %s" % ("WARN", msg % args), "yellow"))
def error(msg, *args):
if MIN_LEVEL <= ERROR:
print(colorize("%s: %s" % ("ERROR", msg % args), "red"))
# DEPRECATED:
setLevel = set_level
| 725 | Python | .py | 28 | 21.928571 | 74 | 0.602639 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,244 | core.py | WindyLab_Gym-PPS/gym/core.py | from abc import abstractmethod
import gym
from gym import error
# from gym.utils import closer
class Env(object):
"""The main OpenAI Gym class. It encapsulates an environment with
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
step
reset
render
close
seed
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
The methods are accessed publicly as "step", "reset", etc...
"""
# Set this in SOME subclasses
metadata = {"render.modes": []}
reward_range = (-float("inf"), float("inf"))
spec = None
# Set these in ALL subclasses
action_space = None
observation_space = None
@abstractmethod
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
@abstractmethod
def reset(self):
"""Resets the environment to an initial state and returns an initial
observation.
Note that this function should not reset the environment's random
number generator(s); random variables in the environment's state should
be sampled independently between multiple calls to `reset()`. In other
words, each call of `reset()` should yield an environment suitable for
a new episode, independent of previous episodes.
Returns:
observation (object): the initial observation.
"""
raise NotImplementedError
@abstractmethod
def render(self, mode="human"):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
Example:
class MyEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def render(self, mode='human'):
if mode == 'rgb_array':
return np.array(...) # return RGB frame suitable for video
elif mode == 'human':
... # pop up a window and render
else:
super(MyEnv, self).render(mode=mode) # just raise an exception
"""
raise NotImplementedError
def close(self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
return
@property
def unwrapped(self):
"""Completely unwrap this env.
Returns:
gym.Env: The base non-wrapped gym.Env instance
"""
return self
def __str__(self):
if self.spec is None:
return "<{} instance>".format(type(self).__name__)
else:
return "<{}<{}>>".format(type(self).__name__, self.spec.id)
def __enter__(self):
"""Support with-statement for the environment."""
return self
def __exit__(self, *args):
"""Support with-statement for the environment."""
self.close()
# propagate exception
return False
class GoalEnv(Env):
"""A goal-based environment. It functions just as any regular OpenAI Gym environment but it
imposes a required structure on the observation_space. More concretely, the observation
space is required to contain at least three elements, namely `observation`, `desired_goal`, and
`achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
`achieved_goal` is the goal that it currently achieved instead. `observation` contains the
actual observations of the environment as per usual.
"""
def reset(self):
# Enforce that each GoalEnv uses a Goal-compatible observation space.
if not isinstance(self.observation_space, gym.spaces.Dict):
raise error.Error(
"GoalEnv requires an observation space of type gym.spaces.Dict"
)
for key in ["observation", "achieved_goal", "desired_goal"]:
if key not in self.observation_space.spaces:
raise error.Error(
'GoalEnv requires the "{}" key to be part of the observation dictionary.'.format(
key
)
)
@abstractmethod
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the step reward. This externalizes the reward function and makes
it dependent on a desired goal and the one that was achieved. If you wish to include
additional rewards that are independent of the goal, you can include the necessary values
to derive it in 'info' and compute it accordingly.
Args:
achieved_goal (object): the goal that was achieved during execution
desired_goal (object): the desired goal that we asked the agent to attempt to achieve
info (dict): an info dictionary with additional information
Returns:
float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
goal. Note that the following should always hold true:
ob, reward, done, info = env.step()
assert reward == env.compute_reward(ob['achieved_goal'], ob['desired_goal'], info)
"""
raise NotImplementedError
class Wrapper(Env):
"""Wraps the environment to allow a modular transformation.
This class is the base class for all wrappers. The subclass could override
some methods to change the behavior of the original environment without touching the
original code.
.. note::
Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name)
)
return getattr(self.env, name)
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def render(self, mode="human", **kwargs):
return self.env.render(mode, **kwargs)
def close(self):
return self.env.close()
def seed(self, seed=None):
return self.env.seed(seed)
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
def __str__(self):
return "<{}{}>".format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
@property
def unwrapped(self):
return self.env.unwrapped
class ObservationWrapper(Wrapper):
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.observation(observation)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
@abstractmethod
def observation(self, observation):
raise NotImplementedError
class RewardWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
@abstractmethod
def reward(self, reward):
raise NotImplementedError
class ActionWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(self.action(action))
@abstractmethod
def action(self, action):
raise NotImplementedError
@abstractmethod
def reverse_action(self, action):
raise NotImplementedError
| 10,904 | Python | .py | 241 | 36.224066 | 120 | 0.650983 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,245 | play.py | WindyLab_Gym-PPS/gym/utils/play.py | import gym
import pygame
import matplotlib
import argparse
from gym import logger
try:
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
except ImportError as e:
logger.warn("failed to set matplotlib backend, plotting will not work: %s" % str(e))
plt = None
from collections import deque
from pygame.locals import VIDEORESIZE
def display_arr(screen, arr, video_size, transpose):
arr_min, arr_max = arr.min(), arr.max()
arr = 255.0 * (arr - arr_min) / (arr_max - arr_min)
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr)
pyg_img = pygame.transform.scale(pyg_img, video_size)
screen.blit(pyg_img, (0, 0))
def play(env, transpose=True, fps=30, zoom=None, callback=None, keys_to_action=None):
"""Allows one to play the game using keyboard.
To simply play the game use:
play(gym.make("Pong-v4"))
Above code works also if env is wrapped, so it's particularly useful in
verifying that the frame-level preprocessing does not render the game
unplayable.
If you wish to plot real time statistics as you play, you can use
gym.utils.play.PlayPlot. Here's a sample code for plotting the reward
for last 5 second of gameplay.
def callback(obs_t, obs_tp1, action, rew, done, info):
return [rew,]
plotter = PlayPlot(callback, 30 * 5, ["reward"])
env = gym.make("Pong-v4")
play(env, callback=plotter.callback)
Arguments
---------
env: gym.Env
Environment to use for playing.
transpose: bool
If True the output of observation is transposed.
Defaults to true.
fps: int
Maximum number of steps of the environment to execute every second.
Defaults to 30.
zoom: float
Make screen edge this many times bigger
callback: lambda or None
Callback if a callback is provided it will be executed after
every step. It takes the following input:
obs_t: observation before performing action
obs_tp1: observation after performing action
action: action that was executed
rew: reward that was received
done: whether the environment is done or not
info: debug info
keys_to_action: dict: tuple(int) -> int or None
Mapping from keys pressed to action performed.
For example if pressed 'w' and space at the same time is supposed
to trigger action number 2 then key_to_action dict would look like this:
{
# ...
sorted(ord('w'), ord(' ')) -> 2
# ...
}
If None, default key_to_action mapping for that env is used, if provided.
"""
env.reset()
rendered = env.render(mode="rgb_array")
if keys_to_action is None:
if hasattr(env, "get_keys_to_action"):
keys_to_action = env.get_keys_to_action()
elif hasattr(env.unwrapped, "get_keys_to_action"):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
assert False, (
env.spec.id
+ " does not have explicit key to action mapping, "
+ "please specify one manually"
)
relevant_keys = set(sum(map(list, keys_to_action.keys()), []))
video_size = [rendered.shape[1], rendered.shape[0]]
if zoom is not None:
video_size = int(video_size[0] * zoom), int(video_size[1] * zoom)
pressed_keys = []
running = True
env_done = True
screen = pygame.display.set_mode(video_size)
clock = pygame.time.Clock()
while running:
if env_done:
env_done = False
obs = env.reset()
else:
action = keys_to_action.get(tuple(sorted(pressed_keys)), 0)
prev_obs = obs
obs, rew, env_done, info = env.step(action)
if callback is not None:
callback(prev_obs, obs, action, rew, env_done, info)
if obs is not None:
rendered = env.render(mode="rgb_array")
display_arr(screen, rendered, transpose=transpose, video_size=video_size)
# process pygame events
for event in pygame.event.get():
# test events, set key states
if event.type == pygame.KEYDOWN:
if event.key in relevant_keys:
pressed_keys.append(event.key)
elif event.key == 27:
running = False
elif event.type == pygame.KEYUP:
if event.key in relevant_keys:
pressed_keys.remove(event.key)
elif event.type == pygame.QUIT:
running = False
elif event.type == VIDEORESIZE:
video_size = event.size
screen = pygame.display.set_mode(video_size)
print(video_size)
pygame.display.flip()
clock.tick(fps)
pygame.quit()
class PlayPlot(object):
def __init__(self, callback, horizon_timesteps, plot_names):
self.data_callback = callback
self.horizon_timesteps = horizon_timesteps
self.plot_names = plot_names
assert plt is not None, "matplotlib backend failed, plotting will not work"
num_plots = len(self.plot_names)
self.fig, self.ax = plt.subplots(num_plots)
if num_plots == 1:
self.ax = [self.ax]
for axis, name in zip(self.ax, plot_names):
axis.set_title(name)
self.t = 0
self.cur_plot = [None for _ in range(num_plots)]
self.data = [deque(maxlen=horizon_timesteps) for _ in range(num_plots)]
def callback(self, obs_t, obs_tp1, action, rew, done, info):
points = self.data_callback(obs_t, obs_tp1, action, rew, done, info)
for point, data_series in zip(points, self.data):
data_series.append(point)
self.t += 1
xmin, xmax = max(0, self.t - self.horizon_timesteps), self.t
for i, plot in enumerate(self.cur_plot):
if plot is not None:
plot.remove()
self.cur_plot[i] = self.ax[i].scatter(
range(xmin, xmax), list(self.data[i]), c="blue"
)
self.ax[i].set_xlim(xmin, xmax)
plt.pause(0.000001)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
type=str,
default="MontezumaRevengeNoFrameskip-v4",
help="Define Environment",
)
args = parser.parse_args()
env = gym.make(args.env)
play(env, zoom=4, fps=60)
if __name__ == "__main__":
main()
| 6,639 | Python | .py | 163 | 31.576687 | 88 | 0.606239 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,246 | json_utils.py | WindyLab_Gym-PPS/gym/utils/json_utils.py | import numpy as np
def json_encode_np(obj):
"""
JSON can't serialize numpy types, so convert to pure python
"""
if isinstance(obj, np.ndarray):
return list(obj)
elif isinstance(obj, np.float32):
return float(obj)
elif isinstance(obj, np.float64):
return float(obj)
elif isinstance(obj, np.int8):
return int(obj)
elif isinstance(obj, np.int16):
return int(obj)
elif isinstance(obj, np.int32):
return int(obj)
elif isinstance(obj, np.int64):
return int(obj)
else:
return obj
| 583 | Python | .py | 21 | 21.52381 | 63 | 0.632143 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,247 | colorize.py | WindyLab_Gym-PPS/gym/utils/colorize.py | """A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38,
)
def colorize(string, color, bold=False, highlight=False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append("1")
attrs = ";".join(attr)
return "\x1b[%sm%s\x1b[0m" % (attrs, string)
| 753 | Python | .py | 28 | 22.321429 | 70 | 0.651872 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,248 | seeding.py | WindyLab_Gym-PPS/gym/utils/seeding.py | import hashlib
import numpy as np
import os
import random as _random
import struct
import sys
from gym import error
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error(
"Seed must be a non-negative integer or omitted, not {}".format(seed)
)
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode("utf8")).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode("utf8")
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2 ** (8 * max_bytes)
else:
raise error.Error("Invalid type for seed: {} ({})".format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = sizeof_int - len(bytes) % sizeof_int
bytes += b"\0" * padding
int_count = int(len(bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error("Seed must be non-negative, not {}".format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints
| 3,120 | Python | .py | 78 | 34.397436 | 95 | 0.67218 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,249 | atomic_write.py | WindyLab_Gym-PPS/gym/utils/atomic_write.py | # Based on http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python
import os
from contextlib import contextmanager
# We would ideally atomically replace any existing file with the new
# version. However, on Windows there's no Python-only solution prior
# to Python 3.3. (This library includes a C extension to do so:
# https://pypi.python.org/pypi/pyosreplace/0.1.)
#
# Correspondingly, we make a best effort, but on Python < 3.3 use a
# replace method which could result in the file temporarily
# disappearing.
import sys
if sys.version_info >= (3, 3):
# Python 3.3 and up have a native `replace` method
from os import replace
elif sys.platform.startswith("win"):
def replace(src, dst):
# TODO: on Windows, this will raise if the file is in use,
# which is possible. We'll need to make this more robust over
# time.
try:
os.remove(dst)
except OSError:
pass
os.rename(src, dst)
else:
# POSIX rename() is always atomic
from os import rename as replace
@contextmanager
def atomic_write(filepath, binary=False, fsync=False):
"""Writeable file object that atomically updates a file (using a temporary file). In some cases (namely Python < 3.3 on Windows), this could result in an existing file being temporarily unlinked.
:param filepath: the file path to be opened
:param binary: whether to open the file in a binary mode instead of textual
:param fsync: whether to force write the file to disk
"""
tmppath = filepath + "~"
while os.path.isfile(tmppath):
tmppath += "~"
try:
with open(tmppath, "wb" if binary else "w") as file:
yield file
if fsync:
file.flush()
os.fsync(file.fileno())
replace(tmppath, filepath)
finally:
try:
os.remove(tmppath)
except (IOError, OSError):
pass
| 1,955 | Python | .py | 50 | 32.94 | 199 | 0.675462 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,250 | ezpickle.py | WindyLab_Gym-PPS/gym/utils/ezpickle.py | class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {
"_ezpickle_args": self._ezpickle_args,
"_ezpickle_kwargs": self._ezpickle_kwargs,
}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
| 1,088 | Python | .py | 26 | 33.346154 | 88 | 0.61327 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,251 | closer.py | WindyLab_Gym-PPS/gym/utils/closer.py | import atexit
import threading
import weakref
class Closer(object):
"""A registry that ensures your objects get closed, whether manually,
upon garbage collection, or upon exit. To work properly, your
objects need to cooperate and do something like the following:
```
closer = Closer()
class Example(object):
def __init__(self):
self._id = closer.register(self)
def close(self):
# Probably worth making idempotent too!
...
closer.unregister(self._id)
def __del__(self):
self.close()
```
That is, your objects should:
- register() themselves and save the returned ID
- unregister() themselves upon close()
- include a __del__ method which close()'s the object
"""
def __init__(self, atexit_register=True):
self.lock = threading.Lock()
self.next_id = -1
self.closeables = weakref.WeakValueDictionary()
if atexit_register:
atexit.register(self.close)
def generate_next_id(self):
with self.lock:
self.next_id += 1
return self.next_id
def register(self, closeable):
"""Registers an object with a 'close' method.
Returns:
int: The registration ID of this object. It is the caller's responsibility to save this ID if early closing is desired.
"""
assert hasattr(closeable, "close"), "No close method for {}".format(closeable)
next_id = self.generate_next_id()
self.closeables[next_id] = closeable
return next_id
def unregister(self, id):
assert id is not None
if id in self.closeables:
del self.closeables[id]
def close(self):
# Explicitly fetch all monitors first so that they can't disappear while
# we iterate. cf. http://stackoverflow.com/a/12429620
closeables = list(self.closeables.values())
for closeable in closeables:
closeable.close()
| 2,020 | Python | .py | 53 | 29.886792 | 131 | 0.630123 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,252 | __init__.py | WindyLab_Gym-PPS/gym/utils/__init__.py | """A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
# These submodules should not have any import-time dependencies.
# We want this since we use `utils` during our import-time sanity checks
# that verify that our dependencies are actually present.
from .colorize import colorize
from .ezpickle import EzPickle
from .lib import *
| 421 | Python | .py | 9 | 45.555556 | 72 | 0.804878 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,253 | env_checker.py | WindyLab_Gym-PPS/gym/utils/env_checker.py | """
This file is originally from the Stable Baselines3 repository hosted on GitHub
(https://github.com/DLR-RM/stable-baselines3/)
Original Author: Antonin Raffin
It also uses some warnings/assertions from the PettingZoo repository hosted on GitHub
(https://github.com/PettingZoo-Team/PettingZoo)
Original Author: Justin Terry
These projects are covered by the MIT License.
"""
import warnings
from typing import Union
import gym
import numpy as np
from gym import spaces
def _is_numpy_array_space(space: spaces.Space) -> bool:
"""
Returns False if provided space is not representable as a single numpy array
(e.g. Dict and Tuple spaces return False)
"""
return not isinstance(space, (spaces.Dict, spaces.Tuple))
def _check_image_input(observation_space: spaces.Box, key: str = "") -> None:
"""
Check that the input adheres to general standards
when the observation is apparently an image.
"""
if observation_space.dtype != np.uint8:
warnings.warn(
f"It seems that your observation {key} is an image but the `dtype` "
"of your observation_space is not `np.uint8`. "
"If your observation is not an image, we recommend you to flatten the observation "
"to have only a 1D vector"
)
if np.any(observation_space.low != 0) or np.any(observation_space.high != 255):
warnings.warn(
f"It seems that your observation space {key} is an image but the "
"upper and lower bounds are not in [0, 255]. "
"Generally, CNN policies assume observations are within that range, "
"so you may encounter an issue if the observation values are not."
)
def _check_nan(env: gym.Env, check_inf: bool = True) -> None:
"""Check for NaN and Inf."""
for _ in range(10):
action = env.action_space.sample()
observation, reward, _, _ = env.step(action)
if np.any(np.isnan(observation)):
warnings.warn("Encountered NaN value in observations.")
if np.any(np.isnan(reward)):
warnings.warn("Encountered NaN value in rewards.")
if check_inf and np.any(np.isinf(observation)):
warnings.warn("Encountered inf value in observations.")
if check_inf and np.any(np.isinf(reward)):
warnings.warn("Encountered inf value in rewards.")
def _check_obs(
obs: Union[tuple, dict, np.ndarray, int],
observation_space: spaces.Space,
method_name: str,
) -> None:
"""
Check that the observation returned by the environment
correspond to the declared one.
"""
if not isinstance(observation_space, spaces.Tuple):
assert not isinstance(
obs, tuple
), f"The observation returned by the `{method_name}()` method should be a single value, not a tuple"
# The check for a GoalEnv is done by the base class
if isinstance(observation_space, spaces.Discrete):
assert isinstance(
obs, int
), f"The observation returned by `{method_name}()` method must be an int"
elif _is_numpy_array_space(observation_space):
assert isinstance(
obs, np.ndarray
), f"The observation returned by `{method_name}()` method must be a numpy array"
assert observation_space.contains(
obs
), f"The observation returned by the `{method_name}()` method does not match the given observation space"
def _check_box_obs(observation_space: spaces.Box, key: str = "") -> None:
"""
Check that the observation space is correctly formatted
when dealing with a ``Box()`` space. In particular, it checks:
- that the dimensions are big enough when it is an image, and that the type matches
- that the observation has an expected shape (warn the user if not)
"""
# If image, check the low and high values, the type and the number of channels
# and the shape (minimal value)
if len(observation_space.shape) == 3:
_check_image_input(observation_space)
if len(observation_space.shape) not in [1, 3]:
warnings.warn(
f"Your observation {key} has an unconventional shape (neither an image, nor a 1D vector). "
"We recommend you to flatten the observation "
"to have only a 1D vector or use a custom policy to properly process the data."
)
if np.any(np.equal(observation_space.low, -np.inf)):
warnings.warn(
"Agent's minimum observation space value is -infinity. This is probably too low."
)
if np.any(np.equal(observation_space.high, np.inf)):
warnings.warn(
"Agent's maxmimum observation space value is infinity. This is probably too high"
)
if np.any(np.equal(observation_space.low, observation_space.high)):
warnings.warn("Agent's maximum and minimum observation space values are equal")
if np.any(np.greater(observation_space.low, observation_space.high)):
assert False, "Agent's minimum observation value is greater than it's maximum"
if observation_space.low.shape != observation_space.shape:
assert (
False
), "Agent's observation_space.low and observation_space have different shapes"
if observation_space.high.shape != observation_space.shape:
assert (
False
), "Agent's observation_space.high and observation_space have different shapes"
def _check_box_action(action_space: spaces.Box):
if np.any(np.equal(action_space.low, -np.inf)):
warnings.warn(
"Agent's minimum action space value is -infinity. This is probably too low."
)
if np.any(np.equal(action_space.high, np.inf)):
warnings.warn(
"Agent's maxmimum action space value is infinity. This is probably too high"
)
if np.any(np.equal(action_space.low, action_space.high)):
warnings.warn("Agent's maximum and minimum action space values are equal")
if np.any(np.greater(action_space.low, action_space.high)):
assert False, "Agent's minimum action value is greater than it's maximum"
if action_space.low.shape != action_space.shape:
assert False, "Agent's action_space.low and action_space have different shapes"
if action_space.high.shape != action_space.shape:
assert False, "Agent's action_space.high and action_space have different shapes"
def _check_normalized_action(action_space: spaces.Box):
if (
np.any(np.abs(action_space.low) != np.abs(action_space.high))
or np.any(np.abs(action_space.low) > 1)
or np.any(np.abs(action_space.high) > 1)
):
warnings.warn(
"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) "
"cf https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html"
)
def _check_returned_values(
env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space
) -> None:
"""
Check the returned values by the env when calling `.reset()` or `.step()` methods.
"""
# because env inherits from gym.Env, we assume that `reset()` and `step()` methods exists
obs = env.reset()
if isinstance(observation_space, spaces.Dict):
assert isinstance(
obs, dict
), "The observation returned by `reset()` must be a dictionary"
for key in observation_space.spaces.keys():
try:
_check_obs(obs[key], observation_space.spaces[key], "reset")
except AssertionError as e:
raise AssertionError(f"Error while checking key={key}: " + str(e))
else:
_check_obs(obs, observation_space, "reset")
# Sample a random action
action = action_space.sample()
data = env.step(action)
assert (
len(data) == 4
), "The `step()` method must return four values: obs, reward, done, info"
# Unpack
obs, reward, done, info = data
if isinstance(observation_space, spaces.Dict):
assert isinstance(
obs, dict
), "The observation returned by `step()` must be a dictionary"
for key in observation_space.spaces.keys():
try:
_check_obs(obs[key], observation_space.spaces[key], "step")
except AssertionError as e:
raise AssertionError(f"Error while checking key={key}: " + str(e))
else:
_check_obs(obs, observation_space, "step")
# We also allow int because the reward will be cast to float
assert isinstance(
reward, (float, int, np.float32)
), "The reward returned by `step()` must be a float"
assert isinstance(done, bool), "The `done` signal must be a boolean"
assert isinstance(
info, dict
), "The `info` returned by `step()` must be a python dictionary"
if isinstance(env, gym.GoalEnv):
# For a GoalEnv, the keys are checked at reset
assert reward == env.compute_reward(
obs["achieved_goal"], obs["desired_goal"], info
)
def _check_spaces(env: gym.Env) -> None:
"""
Check that the observation and action spaces are defined
and inherit from gym.spaces.Space.
"""
# Helper to link to the code, because gym has no proper documentation
gym_spaces = " cf https://github.com/openai/gym/blob/master/gym/spaces/"
assert hasattr(env, "observation_space"), (
"You must specify an observation space (cf gym.spaces)" + gym_spaces
)
assert hasattr(env, "action_space"), (
"You must specify an action space (cf gym.spaces)" + gym_spaces
)
assert isinstance(env.observation_space, spaces.Space), (
"The observation space must inherit from gym.spaces" + gym_spaces
)
assert isinstance(env.action_space, spaces.Space), (
"The action space must inherit from gym.spaces" + gym_spaces
)
# Check render cannot be covered by CI
def _check_render(
env: gym.Env, warn: bool = True, headless: bool = False
) -> None: # pragma: no cover
"""
Check the declared render modes and the `render()`/`close()`
method of the environment.
:param env: The environment to check
:param warn: Whether to output additional warnings
:param headless: Whether to disable render modes
that require a graphical interface. False by default.
"""
render_modes = env.metadata.get("render.modes")
if render_modes is None:
if warn:
warnings.warn(
"No render modes was declared in the environment "
" (env.metadata['render.modes'] is None or not defined), "
"you may have trouble when calling `.render()`"
)
else:
# Don't check render mode that require a
# graphical interface (useful for CI)
if headless and "human" in render_modes:
render_modes.remove("human")
# Check all declared render modes
for render_mode in render_modes:
env.render(mode=render_mode)
env.close()
def check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:
"""
Check that an environment follows Gym API.
This is particularly useful when using a custom environment.
Please take a look at https://github.com/openai/gym/blob/master/gym/core.py
for more information about the API.
It also optionally check that the environment is compatible with Stable-Baselines.
:param env: The Gym environment that will be checked
:param warn: Whether to output additional warnings
mainly related to the interaction with Stable Baselines
:param skip_render_check: Whether to skip the checks for the render method.
True by default (useful for the CI)
"""
assert isinstance(
env, gym.Env
), "Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py"
# ============= Check the spaces (observation and action) ================
_check_spaces(env)
# Define aliases for convenience
observation_space = env.observation_space
action_space = env.action_space
# Warn the user if needed.
# A warning means that the environment may run but not work properly with popular RL libraries.
if warn:
obs_spaces = (
observation_space.spaces
if isinstance(observation_space, spaces.Dict)
else {"": observation_space}
)
for key, space in obs_spaces.items():
if isinstance(space, spaces.Box):
_check_box_obs(space, key)
# Check for the action space, it may lead to hard-to-debug issues
if isinstance(action_space, spaces.Box):
_check_box_action(action_space)
_check_normalized_action(action_space)
# ============ Check the returned values ===============
_check_returned_values(env, observation_space, action_space)
# ==== Check the render method and the declared render modes ====
if not skip_render_check:
_check_render(env, warn=warn) # pragma: no cover
# The check only works with numpy arrays
if _is_numpy_array_space(observation_space) and _is_numpy_array_space(action_space):
_check_nan(env)
| 13,259 | Python | .py | 284 | 39.257042 | 118 | 0.661509 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,254 | test_seeding.py | WindyLab_Gym-PPS/gym/utils/tests/test_seeding.py | from gym import error
from gym.utils import seeding
def test_invalid_seeds():
for seed in [-1, "test"]:
try:
seeding.np_random(seed)
except error.Error:
pass
else:
assert False, "Invalid seed {} passed validation".format(seed)
def test_valid_seeds():
for seed in [0, 1]:
random, seed1 = seeding.np_random(seed)
assert seed == seed1
| 420 | Python | .py | 14 | 22.714286 | 74 | 0.604478 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,255 | test_atexit.py | WindyLab_Gym-PPS/gym/utils/tests/test_atexit.py | from gym.utils.closer import Closer
class Closeable(object):
close_called = False
def close(self):
self.close_called = True
def test_register_unregister():
registry = Closer(atexit_register=False)
c1 = Closeable()
c2 = Closeable()
assert not c1.close_called
assert not c2.close_called
registry.register(c1)
id2 = registry.register(c2)
registry.unregister(id2)
registry.close()
assert c1.close_called
assert not c2.close_called
| 494 | Python | .py | 17 | 24.117647 | 44 | 0.712766 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,256 | test_env_checker.py | WindyLab_Gym-PPS/gym/utils/tests/test_env_checker.py | import gym
import numpy as np
import pytest
from gym.spaces import Box, Dict, Discrete
from gym.utils.env_checker import check_env
class ActionDictTestEnv(gym.Env):
action_space = Dict({"position": Discrete(1), "velocity": Discrete(1)})
observation_space = Box(low=-1.0, high=2.0, shape=(3,), dtype=np.float32)
def step(self, action):
observation = np.array([1.0, 1.5, 0.5])
reward = 1
done = True
return observation, reward, done
def reset(self):
return np.array([1.0, 1.5, 0.5])
def render(self, mode="human"):
pass
def test_check_env_dict_action():
# Environment.step() only returns 3 values: obs, reward, done. Not info!
test_env = ActionDictTestEnv()
with pytest.raises(AssertionError) as errorinfo:
check_env(env=test_env, warn=True)
assert (
str(errorinfo.value)
== "The `step()` method must return four values: obs, reward, done, info"
)
| 983 | Python | .py | 26 | 31.538462 | 85 | 0.648734 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,257 | test_core.py | WindyLab_Gym-PPS/gym/tests/test_core.py | from gym import core
class ArgumentEnv(core.Env):
calls = 0
def __init__(self, arg):
self.calls += 1
self.arg = arg
def test_env_instantiation():
# This looks like a pretty trivial, but given our usage of
# __new__, it's worth having.
env = ArgumentEnv("arg")
assert env.arg == "arg"
assert env.calls == 1
| 355 | Python | .py | 12 | 24.5 | 62 | 0.627219 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,258 | registration.py | WindyLab_Gym-PPS/gym/envs/registration.py | import re
import copy
import importlib
from gym import error, logger
# This format is true today, but it's *not* an official spec.
# [username/](env-name)-v(version) env-name is group 1, version is group 2
#
# 2016-10-31: We're experimentally expanding the environment ID format
# to include an optional username.
env_id_re = re.compile(r"^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$")
def load(name):
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
class EnvSpec(object):
"""A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before the task is considered solved
nondeterministic (bool): Whether this environment is non-deterministic even after seeding
max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
kwargs (dict): The kwargs to pass to the environment class
"""
def __init__(
self,
id,
entry_point=None,
reward_threshold=None,
nondeterministic=False,
max_episode_steps=None,
kwargs=None,
):
self.id = id
self.entry_point = entry_point
self.reward_threshold = reward_threshold
self.nondeterministic = nondeterministic
self.max_episode_steps = max_episode_steps
self._kwargs = {} if kwargs is None else kwargs
match = env_id_re.search(id)
if not match:
raise error.Error(
"Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)".format(
id, env_id_re.pattern
)
)
self._env_name = match.group(1)
def make(self, **kwargs):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self.entry_point is None:
raise error.Error(
"Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)".format(
self.id
)
)
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entry_point):
env = self.entry_point(**_kwargs)
else:
cls = load(self.entry_point)
env = cls(**_kwargs)
# Make the environment aware of which spec it came from.
spec = copy.deepcopy(self)
spec._kwargs = _kwargs
env.unwrapped.spec = spec
return env
def __repr__(self):
return "EnvSpec({})".format(self.id)
class EnvRegistry(object):
"""Register an env by ID. IDs remain stable over time and are
guaranteed to resolve to the same environment dynamics (or be
desupported). The goal is that results on a particular environment
should always be comparable, and not depend on the version of the
code that was running.
"""
def __init__(self):
self.env_specs = {}
def make(self, path, **kwargs):
if len(kwargs) > 0:
logger.info("Making new env: %s (%s)", path, kwargs)
else:
logger.info("Making new env: %s", path)
spec = self.spec(path)
env = spec.make(**kwargs)
if env.spec.max_episode_steps is not None:
print('Not None')
from gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env, max_episode_steps=env.spec.max_episode_steps)
return env
def all(self):
return self.env_specs.values()
def spec(self, path):
if ":" in path:
mod_name, _, id = path.partition(":")
try:
importlib.import_module(mod_name)
except ModuleNotFoundError:
raise error.Error(
"A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`".format(
mod_name
)
)
else:
id = path
match = env_id_re.search(id)
if not match:
raise error.Error(
"Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)".format(
id.encode("utf-8"), env_id_re.pattern
)
)
try:
return self.env_specs[id]
except KeyError:
# Parse the env name and check to see if it matches the non-version
# part of a valid env (could also check the exact number here)
env_name = match.group(1)
matching_envs = [
valid_env_name
for valid_env_name, valid_env_spec in self.env_specs.items()
if env_name == valid_env_spec._env_name
]
algorithmic_envs = [
"Copy",
"RepeatCopy",
"DuplicatedInput",
"Reverse",
"ReversedAdiiton",
"ReversedAddition3",
]
if matching_envs:
raise error.DeprecatedEnv(
"Env {} not found (valid versions include {})".format(
id, matching_envs
)
)
elif env_name in algorithmic_envs:
raise error.UnregisteredEnv(
"Algorithmic environments like {} have been moved out of Gym. Install them via `pip install gym-algorithmic` and add `import gym_algorithmic` before using them.".format(
id
)
)
else:
raise error.UnregisteredEnv("No registered env with id: {}".format(id))
def register(self, id, **kwargs):
if id in self.env_specs:
logger.warn("Overriding environment {}".format(id))
self.env_specs[id] = EnvSpec(id, **kwargs)
# Have a global registry
registry = EnvRegistry()
def register(id, **kwargs):
return registry.register(id, **kwargs)
def make(id, **kwargs):
return registry.make(id, **kwargs)
def spec(id):
return registry.spec(id)
| 6,475 | Python | .py | 159 | 30.194969 | 189 | 0.581543 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,259 | __init__.py | WindyLab_Gym-PPS/gym/envs/__init__.py | from gym.envs.registration import registry, register, make, spec
# Classic
# ----------------------------------------
register(
id="CartPole-v0",
entry_point="gym.envs.classic_control:CartPoleEnv",
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id="CartPole-v1",
entry_point="gym.envs.classic_control:CartPoleEnv",
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id="MountainCar-v0",
entry_point="gym.envs.classic_control:MountainCarEnv",
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id="MountainCarContinuous-v0",
entry_point="gym.envs.classic_control:Continuous_MountainCarEnv",
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id="Pendulum-v0",
entry_point="gym.envs.classic_control:PendulumEnv",
max_episode_steps=200,
)
register(
id="Acrobot-v1",
entry_point="gym.envs.classic_control:AcrobotEnv",
reward_threshold=-100.0,
max_episode_steps=500,
)
register(
id='PredatorPreySwarm-v0',
entry_point='gym.envs.pps:PredatorPreySwarmEnv',
max_episode_steps=None,
reward_threshold=None,
)
# Box2d
# ----------------------------------------
register(
id="LunarLander-v2",
entry_point="gym.envs.box2d:LunarLander",
max_episode_steps=1000,
reward_threshold=200,
)
register(
id="LunarLanderContinuous-v2",
entry_point="gym.envs.box2d:LunarLanderContinuous",
max_episode_steps=1000,
reward_threshold=200,
)
register(
id="BipedalWalker-v3",
entry_point="gym.envs.box2d:BipedalWalker",
max_episode_steps=1600,
reward_threshold=300,
)
register(
id="BipedalWalkerHardcore-v3",
entry_point="gym.envs.box2d:BipedalWalkerHardcore",
max_episode_steps=2000,
reward_threshold=300,
)
register(
id="CarRacing-v0",
entry_point="gym.envs.box2d:CarRacing",
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id="Blackjack-v0",
entry_point="gym.envs.toy_text:BlackjackEnv",
)
register(
id="KellyCoinflip-v0",
entry_point="gym.envs.toy_text:KellyCoinflipEnv",
reward_threshold=246.61,
)
register(
id="KellyCoinflipGeneralized-v0",
entry_point="gym.envs.toy_text:KellyCoinflipGeneralizedEnv",
)
register(
id="FrozenLake-v1",
entry_point="gym.envs.toy_text:FrozenLakeEnv",
kwargs={"map_name": "4x4"},
max_episode_steps=100,
reward_threshold=0.70, # optimum = 0.74
)
register(
id="FrozenLake8x8-v1",
entry_point="gym.envs.toy_text:FrozenLakeEnv",
kwargs={"map_name": "8x8"},
max_episode_steps=200,
reward_threshold=0.85, # optimum = 0.91
)
register(
id="CliffWalking-v0",
entry_point="gym.envs.toy_text:CliffWalkingEnv",
)
register(
id="NChain-v0",
entry_point="gym.envs.toy_text:NChainEnv",
max_episode_steps=1000,
)
register(
id="Roulette-v0",
entry_point="gym.envs.toy_text:RouletteEnv",
max_episode_steps=100,
)
register(
id="Taxi-v3",
entry_point="gym.envs.toy_text:TaxiEnv",
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id="GuessingGame-v0",
entry_point="gym.envs.toy_text:GuessingGame",
max_episode_steps=200,
)
register(
id="HotterColder-v0",
entry_point="gym.envs.toy_text:HotterColder",
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id="Reacher-v2",
entry_point="gym.envs.mujoco:ReacherEnv",
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id="Pusher-v2",
entry_point="gym.envs.mujoco:PusherEnv",
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id="Thrower-v2",
entry_point="gym.envs.mujoco:ThrowerEnv",
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id="Striker-v2",
entry_point="gym.envs.mujoco:StrikerEnv",
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id="InvertedPendulum-v2",
entry_point="gym.envs.mujoco:InvertedPendulumEnv",
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id="InvertedDoublePendulum-v2",
entry_point="gym.envs.mujoco:InvertedDoublePendulumEnv",
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id="HalfCheetah-v2",
entry_point="gym.envs.mujoco:HalfCheetahEnv",
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id="HalfCheetah-v3",
entry_point="gym.envs.mujoco.half_cheetah_v3:HalfCheetahEnv",
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id="Hopper-v2",
entry_point="gym.envs.mujoco:HopperEnv",
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id="Hopper-v3",
entry_point="gym.envs.mujoco.hopper_v3:HopperEnv",
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id="Swimmer-v2",
entry_point="gym.envs.mujoco:SwimmerEnv",
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id="Swimmer-v3",
entry_point="gym.envs.mujoco.swimmer_v3:SwimmerEnv",
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id="Walker2d-v2",
max_episode_steps=1000,
entry_point="gym.envs.mujoco:Walker2dEnv",
)
register(
id="Walker2d-v3",
max_episode_steps=1000,
entry_point="gym.envs.mujoco.walker2d_v3:Walker2dEnv",
)
register(
id="Ant-v2",
entry_point="gym.envs.mujoco:AntEnv",
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id="Ant-v3",
entry_point="gym.envs.mujoco.ant_v3:AntEnv",
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id="Humanoid-v2",
entry_point="gym.envs.mujoco:HumanoidEnv",
max_episode_steps=1000,
)
register(
id="Humanoid-v3",
entry_point="gym.envs.mujoco.humanoid_v3:HumanoidEnv",
max_episode_steps=1000,
)
register(
id="HumanoidStandup-v2",
entry_point="gym.envs.mujoco:HumanoidStandupEnv",
max_episode_steps=1000,
)
# Robotics
# ----------------------------------------
def _merge(a, b):
a.update(b)
return a
for reward_type in ["sparse", "dense"]:
suffix = "Dense" if reward_type == "dense" else ""
kwargs = {
"reward_type": reward_type,
}
# Fetch
register(
id="FetchSlide{}-v1".format(suffix),
entry_point="gym.envs.robotics:FetchSlideEnv",
kwargs=kwargs,
max_episode_steps=50,
)
register(
id="FetchPickAndPlace{}-v1".format(suffix),
entry_point="gym.envs.robotics:FetchPickAndPlaceEnv",
kwargs=kwargs,
max_episode_steps=50,
)
register(
id="FetchReach{}-v1".format(suffix),
entry_point="gym.envs.robotics:FetchReachEnv",
kwargs=kwargs,
max_episode_steps=50,
)
register(
id="FetchPush{}-v1".format(suffix),
entry_point="gym.envs.robotics:FetchPushEnv",
kwargs=kwargs,
max_episode_steps=50,
)
# Hand
register(
id="HandReach{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandReachEnv",
kwargs=kwargs,
max_episode_steps=50,
)
register(
id="HandManipulateBlockRotateZ{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockEnv",
kwargs=_merge({"target_position": "ignore", "target_rotation": "z"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateZTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "z",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateZTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "z",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateParallel{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockEnv",
kwargs=_merge(
{"target_position": "ignore", "target_rotation": "parallel"}, kwargs
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateParallelTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "parallel",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateParallelTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "parallel",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateXYZ{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockEnv",
kwargs=_merge({"target_position": "ignore", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateXYZTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "xyz",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockRotateXYZTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "xyz",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockFull{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockEnv",
kwargs=_merge({"target_position": "random", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id="HandManipulateBlock{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockEnv",
kwargs=_merge({"target_position": "random", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulateBlockTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "random",
"target_rotation": "xyz",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateBlockTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandBlockTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "random",
"target_rotation": "xyz",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateEggRotate{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandEggEnv",
kwargs=_merge({"target_position": "ignore", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulateEggRotateTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandEggTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "xyz",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateEggRotateTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandEggTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "xyz",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateEggFull{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandEggEnv",
kwargs=_merge({"target_position": "random", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id="HandManipulateEgg{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandEggEnv",
kwargs=_merge({"target_position": "random", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulateEggTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandEggTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "random",
"target_rotation": "xyz",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulateEggTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandEggTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "random",
"target_rotation": "xyz",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulatePenRotate{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandPenEnv",
kwargs=_merge({"target_position": "ignore", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulatePenRotateTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandPenTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "xyz",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulatePenRotateTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandPenTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "ignore",
"target_rotation": "xyz",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulatePenFull{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandPenEnv",
kwargs=_merge({"target_position": "random", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id="HandManipulatePen{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandPenEnv",
kwargs=_merge({"target_position": "random", "target_rotation": "xyz"}, kwargs),
max_episode_steps=100,
)
register(
id="HandManipulatePenTouchSensors{}-v0".format(suffix),
entry_point="gym.envs.robotics:HandPenTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "random",
"target_rotation": "xyz",
"touch_get_obs": "boolean",
},
kwargs,
),
max_episode_steps=100,
)
register(
id="HandManipulatePenTouchSensors{}-v1".format(suffix),
entry_point="gym.envs.robotics:HandPenTouchSensorsEnv",
kwargs=_merge(
{
"target_position": "random",
"target_rotation": "xyz",
"touch_get_obs": "sensordata",
},
kwargs,
),
max_episode_steps=100,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in [
"adventure",
"air_raid",
"alien",
"amidar",
"assault",
"asterix",
"asteroids",
"atlantis",
"bank_heist",
"battle_zone",
"beam_rider",
"berzerk",
"bowling",
"boxing",
"breakout",
"carnival",
"centipede",
"chopper_command",
"crazy_climber",
"defender",
"demon_attack",
"double_dunk",
"elevator_action",
"enduro",
"fishing_derby",
"freeway",
"frostbite",
"gopher",
"gravitar",
"hero",
"ice_hockey",
"jamesbond",
"journey_escape",
"kangaroo",
"krull",
"kung_fu_master",
"montezuma_revenge",
"ms_pacman",
"name_this_game",
"phoenix",
"pitfall",
"pong",
"pooyan",
"private_eye",
"qbert",
"riverraid",
"road_runner",
"robotank",
"seaquest",
"skiing",
"solaris",
"space_invaders",
"star_gunner",
"tennis",
"time_pilot",
"tutankham",
"up_n_down",
"venture",
"video_pinball",
"wizard_of_wor",
"yars_revenge",
"zaxxon",
]:
for obs_type in ["image", "ram"]:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = "".join([g.capitalize() for g in game.split("_")])
if obs_type == "ram":
name = "{}-ram".format(name)
nondeterministic = False
if game == "elevator_action" and obs_type == "ram":
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id="{}-v0".format(name),
entry_point="gym.envs.atari:AtariEnv",
kwargs={
"game": game,
"obs_type": obs_type,
"repeat_action_probability": 0.25,
},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id="{}-v4".format(name),
entry_point="gym.envs.atari:AtariEnv",
kwargs={"game": game, "obs_type": obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == "space_invaders":
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id="{}Deterministic-v0".format(name),
entry_point="gym.envs.atari:AtariEnv",
kwargs={
"game": game,
"obs_type": obs_type,
"frameskip": frameskip,
"repeat_action_probability": 0.25,
},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id="{}Deterministic-v4".format(name),
entry_point="gym.envs.atari:AtariEnv",
kwargs={"game": game, "obs_type": obs_type, "frameskip": frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id="{}NoFrameskip-v0".format(name),
entry_point="gym.envs.atari:AtariEnv",
kwargs={
"game": game,
"obs_type": obs_type,
"frameskip": 1,
"repeat_action_probability": 0.25,
}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id="{}NoFrameskip-v4".format(name),
entry_point="gym.envs.atari:AtariEnv",
kwargs={
"game": game,
"obs_type": obs_type,
"frameskip": 1,
}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Unit test
# ---------
register(
id="CubeCrash-v0",
entry_point="gym.envs.unittest:CubeCrash",
reward_threshold=0.9,
)
register(
id="CubeCrashSparse-v0",
entry_point="gym.envs.unittest:CubeCrashSparse",
reward_threshold=0.9,
)
register(
id="CubeCrashScreenBecomesBlack-v0",
entry_point="gym.envs.unittest:CubeCrashScreenBecomesBlack",
reward_threshold=0.9,
)
register(
id="MemorizeDigits-v0",
entry_point="gym.envs.unittest:MemorizeDigits",
reward_threshold=20,
)
| 21,354 | Python | .py | 737 | 21.537313 | 90 | 0.589279 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,260 | fetch_env.py | WindyLab_Gym-PPS/gym/envs/robotics/fetch_env.py | import numpy as np
from gym.envs.robotics import rotations, robot_env, utils
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
class FetchEnv(robot_env.RobotEnv):
"""Superclass for all Fetch environments."""
def __init__(
self,
model_path,
n_substeps,
gripper_extra_height,
block_gripper,
has_object,
target_in_the_air,
target_offset,
obj_range,
target_range,
distance_threshold,
initial_qpos,
reward_type,
):
"""Initializes a new Fetch environment.
Args:
model_path (string): path to the environments XML file
n_substeps (int): number of substeps the simulation runs on every call to step
gripper_extra_height (float): additional height above the table when positioning the gripper
block_gripper (boolean): whether or not the gripper is blocked (i.e. not movable) or not
has_object (boolean): whether or not the environment has an object
target_in_the_air (boolean): whether or not the target should be in the air above the table or on the table surface
target_offset (float or array with 3 elements): offset of the target
obj_range (float): range of a uniform distribution for sampling initial object positions
target_range (float): range of a uniform distribution for sampling a target
distance_threshold (float): the threshold after which a goal is considered achieved
initial_qpos (dict): a dictionary of joint names and values that define the initial configuration
reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense
"""
self.gripper_extra_height = gripper_extra_height
self.block_gripper = block_gripper
self.has_object = has_object
self.target_in_the_air = target_in_the_air
self.target_offset = target_offset
self.obj_range = obj_range
self.target_range = target_range
self.distance_threshold = distance_threshold
self.reward_type = reward_type
super(FetchEnv, self).__init__(
model_path=model_path,
n_substeps=n_substeps,
n_actions=4,
initial_qpos=initial_qpos,
)
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal, goal, info):
# Compute distance between goal and the achieved goal.
d = goal_distance(achieved_goal, goal)
if self.reward_type == "sparse":
return -(d > self.distance_threshold).astype(np.float32)
else:
return -d
# RobotEnv methods
# ----------------------------
def _step_callback(self):
if self.block_gripper:
self.sim.data.set_joint_qpos("robot0:l_gripper_finger_joint", 0.0)
self.sim.data.set_joint_qpos("robot0:r_gripper_finger_joint", 0.0)
self.sim.forward()
def _set_action(self, action):
assert action.shape == (4,)
action = (
action.copy()
) # ensure that we don't change the action outside of this scope
pos_ctrl, gripper_ctrl = action[:3], action[3]
pos_ctrl *= 0.05 # limit maximum change in position
rot_ctrl = [
1.0,
0.0,
1.0,
0.0,
] # fixed rotation of the end effector, expressed as a quaternion
gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
assert gripper_ctrl.shape == (2,)
if self.block_gripper:
gripper_ctrl = np.zeros_like(gripper_ctrl)
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
# Apply action to simulation.
utils.ctrl_set_action(self.sim, action)
utils.mocap_set_action(self.sim, action)
def _get_obs(self):
# positions
grip_pos = self.sim.data.get_site_xpos("robot0:grip")
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
grip_velp = self.sim.data.get_site_xvelp("robot0:grip") * dt
robot_qpos, robot_qvel = utils.robot_get_obs(self.sim)
if self.has_object:
object_pos = self.sim.data.get_site_xpos("object0")
# rotations
object_rot = rotations.mat2euler(self.sim.data.get_site_xmat("object0"))
# velocities
object_velp = self.sim.data.get_site_xvelp("object0") * dt
object_velr = self.sim.data.get_site_xvelr("object0") * dt
# gripper state
object_rel_pos = object_pos - grip_pos
object_velp -= grip_velp
else:
object_pos = (
object_rot
) = object_velp = object_velr = object_rel_pos = np.zeros(0)
gripper_state = robot_qpos[-2:]
gripper_vel = (
robot_qvel[-2:] * dt
) # change to a scalar if the gripper is made symmetric
if not self.has_object:
achieved_goal = grip_pos.copy()
else:
achieved_goal = np.squeeze(object_pos.copy())
obs = np.concatenate(
[
grip_pos,
object_pos.ravel(),
object_rel_pos.ravel(),
gripper_state,
object_rot.ravel(),
object_velp.ravel(),
object_velr.ravel(),
grip_velp,
gripper_vel,
]
)
return {
"observation": obs.copy(),
"achieved_goal": achieved_goal.copy(),
"desired_goal": self.goal.copy(),
}
def _viewer_setup(self):
body_id = self.sim.model.body_name2id("robot0:gripper_link")
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 2.5
self.viewer.cam.azimuth = 132.0
self.viewer.cam.elevation = -14.0
def _render_callback(self):
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id("target0")
self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]
self.sim.forward()
def _reset_sim(self):
self.sim.set_state(self.initial_state)
# Randomize start position of object.
if self.has_object:
object_xpos = self.initial_gripper_xpos[:2]
while np.linalg.norm(object_xpos - self.initial_gripper_xpos[:2]) < 0.1:
object_xpos = self.initial_gripper_xpos[:2] + self.np_random.uniform(
-self.obj_range, self.obj_range, size=2
)
object_qpos = self.sim.data.get_joint_qpos("object0:joint")
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos("object0:joint", object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
if self.has_object:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(
-self.target_range, self.target_range, size=3
)
goal += self.target_offset
goal[2] = self.height_offset
if self.target_in_the_air and self.np_random.uniform() < 0.5:
goal[2] += self.np_random.uniform(0, 0.45)
else:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(
-self.target_range, self.target_range, size=3
)
return goal.copy()
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
utils.reset_mocap_welds(self.sim)
self.sim.forward()
# Move end effector into position.
gripper_target = np.array(
[-0.498, 0.005, -0.431 + self.gripper_extra_height]
) + self.sim.data.get_site_xpos("robot0:grip")
gripper_rotation = np.array([1.0, 0.0, 1.0, 0.0])
self.sim.data.set_mocap_pos("robot0:mocap", gripper_target)
self.sim.data.set_mocap_quat("robot0:mocap", gripper_rotation)
for _ in range(10):
self.sim.step()
# Extract information for sampling goals.
self.initial_gripper_xpos = self.sim.data.get_site_xpos("robot0:grip").copy()
if self.has_object:
self.height_offset = self.sim.data.get_site_xpos("object0")[2]
def render(self, mode="human", width=500, height=500):
return super(FetchEnv, self).render(mode, width, height)
| 8,927 | Python | .py | 201 | 33.895522 | 127 | 0.592043 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,261 | hand_env.py | WindyLab_Gym-PPS/gym/envs/robotics/hand_env.py | import os
import copy
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
from gym.envs.robotics import robot_env
class HandEnv(robot_env.RobotEnv):
def __init__(self, model_path, n_substeps, initial_qpos, relative_control):
self.relative_control = relative_control
super(HandEnv, self).__init__(
model_path=model_path,
n_substeps=n_substeps,
n_actions=20,
initial_qpos=initial_qpos,
)
# RobotEnv methods
# ----------------------------
def _set_action(self, action):
assert action.shape == (20,)
ctrlrange = self.sim.model.actuator_ctrlrange
actuation_range = (ctrlrange[:, 1] - ctrlrange[:, 0]) / 2.0
if self.relative_control:
actuation_center = np.zeros_like(action)
for i in range(self.sim.data.ctrl.shape[0]):
actuation_center[i] = self.sim.data.get_joint_qpos(
self.sim.model.actuator_names[i].replace(":A_", ":")
)
for joint_name in ["FF", "MF", "RF", "LF"]:
act_idx = self.sim.model.actuator_name2id(
"robot0:A_{}J1".format(joint_name)
)
actuation_center[act_idx] += self.sim.data.get_joint_qpos(
"robot0:{}J0".format(joint_name)
)
else:
actuation_center = (ctrlrange[:, 1] + ctrlrange[:, 0]) / 2.0
self.sim.data.ctrl[:] = actuation_center + action * actuation_range
self.sim.data.ctrl[:] = np.clip(
self.sim.data.ctrl, ctrlrange[:, 0], ctrlrange[:, 1]
)
def _viewer_setup(self):
body_id = self.sim.model.body_name2id("robot0:palm")
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 0.5
self.viewer.cam.azimuth = 55.0
self.viewer.cam.elevation = -25.0
def render(self, mode="human", width=500, height=500):
return super(HandEnv, self).render(mode, width, height)
| 2,153 | Python | .py | 51 | 32.333333 | 79 | 0.576206 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,262 | robot_env.py | WindyLab_Gym-PPS/gym/envs/robotics/robot_env.py | import os
import copy
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(
e
)
)
DEFAULT_SIZE = 500
class RobotEnv(gym.GoalEnv):
def __init__(self, model_path, initial_qpos, n_actions, n_substeps):
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not os.path.exists(fullpath):
raise IOError("File {} does not exist".format(fullpath))
model = mujoco_py.load_model_from_path(fullpath)
self.sim = mujoco_py.MjSim(model, nsubsteps=n_substeps)
self.viewer = None
self._viewers = {}
self.metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": int(np.round(1.0 / self.dt)),
}
self.seed()
self._env_setup(initial_qpos=initial_qpos)
self.initial_state = copy.deepcopy(self.sim.get_state())
self.goal = self._sample_goal()
obs = self._get_obs()
self.action_space = spaces.Box(-1.0, 1.0, shape=(n_actions,), dtype="float32")
self.observation_space = spaces.Dict(
dict(
desired_goal=spaces.Box(
-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype="float32"
),
achieved_goal=spaces.Box(
-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype="float32"
),
observation=spaces.Box(
-np.inf, np.inf, shape=obs["observation"].shape, dtype="float32"
),
)
)
@property
def dt(self):
return self.sim.model.opt.timestep * self.sim.nsubsteps
# Env methods
# ----------------------------
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
action = np.clip(action, self.action_space.low, self.action_space.high)
self._set_action(action)
self.sim.step()
self._step_callback()
obs = self._get_obs()
done = False
info = {
"is_success": self._is_success(obs["achieved_goal"], self.goal),
}
reward = self.compute_reward(obs["achieved_goal"], self.goal, info)
return obs, reward, done, info
def reset(self):
# Attempt to reset the simulator. Since we randomize initial conditions, it
# is possible to get into a state with numerical issues (e.g. due to penetration or
# Gimbel lock) or we may not achieve an initial condition (e.g. an object is within the hand).
# In this case, we just keep randomizing until we eventually achieve a valid initial
# configuration.
super(RobotEnv, self).reset()
did_reset_sim = False
while not did_reset_sim:
did_reset_sim = self._reset_sim()
self.goal = self._sample_goal().copy()
obs = self._get_obs()
return obs
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
self._viewers = {}
def render(self, mode="human", width=DEFAULT_SIZE, height=DEFAULT_SIZE):
self._render_callback()
if mode == "rgb_array":
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == "human":
self._get_viewer(mode).render()
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == "human":
self.viewer = mujoco_py.MjViewer(self.sim)
elif mode == "rgb_array":
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, device_id=-1)
self._viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
# Extension methods
# ----------------------------
def _reset_sim(self):
"""Resets a simulation and indicates whether or not it was successful.
If a reset was unsuccessful (e.g. if a randomized state caused an error in the
simulation), this method should indicate such a failure by returning False.
In such a case, this method will be called again to attempt a the reset again.
"""
self.sim.set_state(self.initial_state)
self.sim.forward()
return True
def _get_obs(self):
"""Returns the observation."""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation."""
raise NotImplementedError()
def _is_success(self, achieved_goal, desired_goal):
"""Indicates whether or not the achieved goal successfully achieved the desired goal."""
raise NotImplementedError()
def _sample_goal(self):
"""Samples a new goal and returns it."""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
pass
def _viewer_setup(self):
"""Initial configuration of the viewer. Can be used to set the camera position,
for example.
"""
pass
def _render_callback(self):
"""A custom callback that is called before rendering. Can be used
to implement custom visualizations.
"""
pass
def _step_callback(self):
"""A custom callback that is called after stepping the simulation. Can be used
to enforce additional constraints on the simulation state.
"""
pass
| 6,244 | Python | .py | 151 | 32.086093 | 144 | 0.60033 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,263 | utils.py | WindyLab_Gym-PPS/gym/envs/robotics/utils.py | import numpy as np
from gym import error
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(
e
)
)
def robot_get_obs(sim):
"""Returns all joint positions and velocities associated with
a robot.
"""
if sim.data.qpos is not None and sim.model.joint_names:
names = [n for n in sim.model.joint_names if n.startswith("robot")]
return (
np.array([sim.data.get_joint_qpos(name) for name in names]),
np.array([sim.data.get_joint_qvel(name) for name in names]),
)
return np.zeros(0), np.zeros(0)
def ctrl_set_action(sim, action):
"""For torque actuators it copies the action into mujoco ctrl field.
For position actuators it sets the target relative to the current qpos.
"""
if sim.model.nmocap > 0:
_, action = np.split(action, (sim.model.nmocap * 7,))
if sim.data.ctrl is not None:
for i in range(action.shape[0]):
if sim.model.actuator_biastype[i] == 0:
sim.data.ctrl[i] = action[i]
else:
idx = sim.model.jnt_qposadr[sim.model.actuator_trnid[i, 0]]
sim.data.ctrl[i] = sim.data.qpos[idx] + action[i]
def mocap_set_action(sim, action):
"""The action controls the robot using mocaps. Specifically, bodies
on the robot (for example the gripper wrist) is controlled with
mocap bodies. In this case the action is the desired difference
in position and orientation (quaternion), in world coordinates,
of the of the target body. The mocap is positioned relative to
the target body according to the delta, and the MuJoCo equality
constraint optimizer tries to center the welded body on the mocap.
"""
if sim.model.nmocap > 0:
action, _ = np.split(action, (sim.model.nmocap * 7,))
action = action.reshape(sim.model.nmocap, 7)
pos_delta = action[:, :3]
quat_delta = action[:, 3:]
reset_mocap2body_xpos(sim)
sim.data.mocap_pos[:] = sim.data.mocap_pos + pos_delta
sim.data.mocap_quat[:] = sim.data.mocap_quat + quat_delta
def reset_mocap_welds(sim):
"""Resets the mocap welds that we use for actuation."""
if sim.model.nmocap > 0 and sim.model.eq_data is not None:
for i in range(sim.model.eq_data.shape[0]):
if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:
sim.model.eq_data[i, :] = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
sim.forward()
def reset_mocap2body_xpos(sim):
"""Resets the position and orientation of the mocap bodies to the same
values as the bodies they're welded to.
"""
if (
sim.model.eq_type is None
or sim.model.eq_obj1id is None
or sim.model.eq_obj2id is None
):
return
for eq_type, obj1_id, obj2_id in zip(
sim.model.eq_type, sim.model.eq_obj1id, sim.model.eq_obj2id
):
if eq_type != mujoco_py.const.EQ_WELD:
continue
mocap_id = sim.model.body_mocapid[obj1_id]
if mocap_id != -1:
# obj1 is the mocap, obj2 is the welded body
body_idx = obj2_id
else:
# obj2 is the mocap, obj1 is the welded body
mocap_id = sim.model.body_mocapid[obj2_id]
body_idx = obj1_id
assert mocap_id != -1
sim.data.mocap_pos[mocap_id][:] = sim.data.body_xpos[body_idx]
sim.data.mocap_quat[mocap_id][:] = sim.data.body_xquat[body_idx]
| 3,653 | Python | .py | 84 | 35.714286 | 144 | 0.630912 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,264 | __init__.py | WindyLab_Gym-PPS/gym/envs/robotics/__init__.py | from gym.envs.robotics.fetch_env import FetchEnv
from gym.envs.robotics.fetch.slide import FetchSlideEnv
from gym.envs.robotics.fetch.pick_and_place import FetchPickAndPlaceEnv
from gym.envs.robotics.fetch.push import FetchPushEnv
from gym.envs.robotics.fetch.reach import FetchReachEnv
from gym.envs.robotics.hand.reach import HandReachEnv
from gym.envs.robotics.hand.manipulate import HandBlockEnv
from gym.envs.robotics.hand.manipulate import HandEggEnv
from gym.envs.robotics.hand.manipulate import HandPenEnv
from gym.envs.robotics.hand.manipulate_touch_sensors import HandBlockTouchSensorsEnv
from gym.envs.robotics.hand.manipulate_touch_sensors import HandEggTouchSensorsEnv
from gym.envs.robotics.hand.manipulate_touch_sensors import HandPenTouchSensorsEnv
| 767 | Python | .py | 12 | 62.75 | 84 | 0.877822 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,265 | rotations.py | WindyLab_Gym-PPS/gym/envs/robotics/rotations.py | # Copyright (c) 2009-2017, Matthew Brett and Christoph Gohlke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Many methods borrow heavily or entirely from transforms3d:
# https://github.com/matthew-brett/transforms3d
# They have mostly been modified to support batched operations.
import numpy as np
import itertools
"""
Rotations
=========
Note: these have caused many subtle bugs in the past.
Be careful while updating these methods and while using them in clever ways.
See MuJoCo documentation here: http://mujoco.org/book/modeling.html#COrientation
Conventions
-----------
- All functions accept batches as well as individual rotations
- All rotation conventions match respective MuJoCo defaults
- All angles are in radians
- Matricies follow LR convention
- Euler Angles are all relative with 'xyz' axes ordering
- See specific representation for more information
Representations
---------------
Euler
There are many euler angle frames -- here we will strive to use the default
in MuJoCo, which is eulerseq='xyz'.
This frame is a relative rotating frame, about x, y, and z axes in order.
Relative rotating means that after we rotate about x, then we use the
new (rotated) y, and the same for z.
Quaternions
These are defined in terms of rotation (angle) about a unit vector (x, y, z)
We use the following <q0, q1, q2, q3> convention:
q0 = cos(angle / 2)
q1 = sin(angle / 2) * x
q2 = sin(angle / 2) * y
q3 = sin(angle / 2) * z
This is also sometimes called qw, qx, qy, qz.
Note that quaternions are ambiguous, because we can represent a rotation by
angle about vector <x, y, z> and -angle about vector <-x, -y, -z>.
To choose between these, we pick "first nonzero positive", where we
make the first nonzero element of the quaternion positive.
This can result in mismatches if you're converting an quaternion that is not
"first nonzero positive" to a different representation and back.
Axis Angle
(Not currently implemented)
These are very straightforward. Rotation is angle about a unit vector.
XY Axes
(Not currently implemented)
We are given x axis and y axis, and z axis is cross product of x and y.
Z Axis
This is NOT RECOMMENDED. Defines a unit vector for the Z axis,
but rotation about this axis is not well defined.
Instead pick a fixed reference direction for another axis (e.g. X)
and calculate the other (e.g. Y = Z cross-product X),
then use XY Axes rotation instead.
SO3
(Not currently implemented)
While not supported by MuJoCo, this representation has a lot of nice features.
We expect to add support for these in the future.
TODO / Missing
--------------
- Rotation integration or derivatives (e.g. velocity conversions)
- More representations (SO3, etc)
- Random sampling (e.g. sample uniform random rotation)
- Performance benchmarks/measurements
- (Maybe) define everything as to/from matricies, for simplicity
"""
# For testing whether a number is close to zero
_FLOAT_EPS = np.finfo(np.float64).eps
_EPS4 = _FLOAT_EPS * 4.0
def euler2mat(euler):
"""Convert Euler Angles to Rotation Matrix. See rotation.py for notes"""
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler)
ai, aj, ak = -euler[..., 2], -euler[..., 1], -euler[..., 0]
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
mat = np.empty(euler.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 2, 2] = cj * ck
mat[..., 2, 1] = sj * sc - cs
mat[..., 2, 0] = sj * cc + ss
mat[..., 1, 2] = cj * sk
mat[..., 1, 1] = sj * ss + cc
mat[..., 1, 0] = sj * cs - sc
mat[..., 0, 2] = -sj
mat[..., 0, 1] = cj * si
mat[..., 0, 0] = cj * ci
return mat
def euler2quat(euler):
"""Convert Euler Angles to Quaternions. See rotation.py for notes"""
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shape euler {}".format(euler)
ai, aj, ak = euler[..., 2] / 2, -euler[..., 1] / 2, euler[..., 0] / 2
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
quat = np.empty(euler.shape[:-1] + (4,), dtype=np.float64)
quat[..., 0] = cj * cc + sj * ss
quat[..., 3] = cj * sc - sj * cs
quat[..., 2] = -(cj * ss + sj * cc)
quat[..., 1] = cj * cs - sj * sc
return quat
def mat2euler(mat):
"""Convert Rotation Matrix to Euler Angles. See rotation.py for notes"""
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
cy = np.sqrt(mat[..., 2, 2] * mat[..., 2, 2] + mat[..., 1, 2] * mat[..., 1, 2])
condition = cy > _EPS4
euler = np.empty(mat.shape[:-1], dtype=np.float64)
euler[..., 2] = np.where(
condition,
-np.arctan2(mat[..., 0, 1], mat[..., 0, 0]),
-np.arctan2(-mat[..., 1, 0], mat[..., 1, 1]),
)
euler[..., 1] = np.where(
condition, -np.arctan2(-mat[..., 0, 2], cy), -np.arctan2(-mat[..., 0, 2], cy)
)
euler[..., 0] = np.where(
condition, -np.arctan2(mat[..., 1, 2], mat[..., 2, 2]), 0.0
)
return euler
def mat2quat(mat):
"""Convert Rotation Matrix to Quaternion. See rotation.py for notes"""
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
Qxx, Qyx, Qzx = mat[..., 0, 0], mat[..., 0, 1], mat[..., 0, 2]
Qxy, Qyy, Qzy = mat[..., 1, 0], mat[..., 1, 1], mat[..., 1, 2]
Qxz, Qyz, Qzz = mat[..., 2, 0], mat[..., 2, 1], mat[..., 2, 2]
# Fill only lower half of symmetric matrix
K = np.zeros(mat.shape[:-2] + (4, 4), dtype=np.float64)
K[..., 0, 0] = Qxx - Qyy - Qzz
K[..., 1, 0] = Qyx + Qxy
K[..., 1, 1] = Qyy - Qxx - Qzz
K[..., 2, 0] = Qzx + Qxz
K[..., 2, 1] = Qzy + Qyz
K[..., 2, 2] = Qzz - Qxx - Qyy
K[..., 3, 0] = Qyz - Qzy
K[..., 3, 1] = Qzx - Qxz
K[..., 3, 2] = Qxy - Qyx
K[..., 3, 3] = Qxx + Qyy + Qzz
K /= 3.0
# TODO: vectorize this -- probably could be made faster
q = np.empty(K.shape[:-2] + (4,))
it = np.nditer(q[..., 0], flags=["multi_index"])
while not it.finished:
# Use Hermitian eigenvectors, values for speed
vals, vecs = np.linalg.eigh(K[it.multi_index])
# Select largest eigenvector, reorder to w,x,y,z quaternion
q[it.multi_index] = vecs[[3, 0, 1, 2], np.argmax(vals)]
# Prefer quaternion with positive w
# (q * -1 corresponds to same rotation as q)
if q[it.multi_index][0] < 0:
q[it.multi_index] *= -1
it.iternext()
return q
def quat2euler(quat):
"""Convert Quaternion to Euler Angles. See rotation.py for notes"""
return mat2euler(quat2mat(quat))
def subtract_euler(e1, e2):
assert e1.shape == e2.shape
assert e1.shape[-1] == 3
q1 = euler2quat(e1)
q2 = euler2quat(e2)
q_diff = quat_mul(q1, quat_conjugate(q2))
return quat2euler(q_diff)
def quat2mat(quat):
"""Convert Quaternion to Euler Angles. See rotation.py for notes"""
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, "Invalid shape quat {}".format(quat)
w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]
Nq = np.sum(quat * quat, axis=-1)
s = 2.0 / Nq
X, Y, Z = x * s, y * s, z * s
wX, wY, wZ = w * X, w * Y, w * Z
xX, xY, xZ = x * X, x * Y, x * Z
yY, yZ, zZ = y * Y, y * Z, z * Z
mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 0, 0] = 1.0 - (yY + zZ)
mat[..., 0, 1] = xY - wZ
mat[..., 0, 2] = xZ + wY
mat[..., 1, 0] = xY + wZ
mat[..., 1, 1] = 1.0 - (xX + zZ)
mat[..., 1, 2] = yZ - wX
mat[..., 2, 0] = xZ - wY
mat[..., 2, 1] = yZ + wX
mat[..., 2, 2] = 1.0 - (xX + yY)
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
def quat_conjugate(q):
inv_q = -q
inv_q[..., 0] *= -1
return inv_q
def quat_mul(q0, q1):
assert q0.shape == q1.shape
assert q0.shape[-1] == 4
assert q1.shape[-1] == 4
w0 = q0[..., 0]
x0 = q0[..., 1]
y0 = q0[..., 2]
z0 = q0[..., 3]
w1 = q1[..., 0]
x1 = q1[..., 1]
y1 = q1[..., 2]
z1 = q1[..., 3]
w = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1
x = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1
y = w0 * y1 + y0 * w1 + z0 * x1 - x0 * z1
z = w0 * z1 + z0 * w1 + x0 * y1 - y0 * x1
q = np.array([w, x, y, z])
if q.ndim == 2:
q = q.swapaxes(0, 1)
assert q.shape == q0.shape
return q
def quat_rot_vec(q, v0):
q_v0 = np.array([0, v0[0], v0[1], v0[2]])
q_v = quat_mul(q, quat_mul(q_v0, quat_conjugate(q)))
v = q_v[1:]
return v
def quat_identity():
return np.array([1, 0, 0, 0])
def quat2axisangle(quat):
theta = 0
axis = np.array([0, 0, 1])
sin_theta = np.linalg.norm(quat[1:])
if sin_theta > 0.0001:
theta = 2 * np.arcsin(sin_theta)
theta *= 1 if quat[0] >= 0 else -1
axis = quat[1:] / sin_theta
return axis, theta
def euler2point_euler(euler):
_euler = euler.copy()
if len(_euler.shape) < 2:
_euler = np.expand_dims(_euler, 0)
assert _euler.shape[1] == 3
_euler_sin = np.sin(_euler)
_euler_cos = np.cos(_euler)
return np.concatenate([_euler_sin, _euler_cos], axis=-1)
def point_euler2euler(euler):
_euler = euler.copy()
if len(_euler.shape) < 2:
_euler = np.expand_dims(_euler, 0)
assert _euler.shape[1] == 6
angle = np.arctan(_euler[..., :3] / _euler[..., 3:])
angle[_euler[..., 3:] < 0] += np.pi
return angle
def quat2point_quat(quat):
# Should be in qw, qx, qy, qz
_quat = quat.copy()
if len(_quat.shape) < 2:
_quat = np.expand_dims(_quat, 0)
assert _quat.shape[1] == 4
angle = np.arccos(_quat[:, [0]]) * 2
xyz = _quat[:, 1:]
xyz[np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5] = (xyz / np.sin(angle / 2))[
np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5
]
return np.concatenate([np.sin(angle), np.cos(angle), xyz], axis=-1)
def point_quat2quat(quat):
_quat = quat.copy()
if len(_quat.shape) < 2:
_quat = np.expand_dims(_quat, 0)
assert _quat.shape[1] == 5
angle = np.arctan(_quat[:, [0]] / _quat[:, [1]])
qw = np.cos(angle / 2)
qxyz = _quat[:, 2:]
qxyz[np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5] = (qxyz * np.sin(angle / 2))[
np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5
]
return np.concatenate([qw, qxyz], axis=-1)
def normalize_angles(angles):
"""Puts angles in [-pi, pi] range."""
angles = angles.copy()
if angles.size > 0:
angles = (angles + np.pi) % (2 * np.pi) - np.pi
assert -np.pi - 1e-6 <= angles.min() and angles.max() <= np.pi + 1e-6
return angles
def round_to_straight_angles(angles):
"""Returns closest angle modulo 90 degrees"""
angles = np.round(angles / (np.pi / 2)) * (np.pi / 2)
return normalize_angles(angles)
def get_parallel_rotations():
mult90 = [0, np.pi / 2, -np.pi / 2, np.pi]
parallel_rotations = []
for euler in itertools.product(mult90, repeat=3):
canonical = mat2euler(euler2mat(euler))
canonical = np.round(canonical / (np.pi / 2))
if canonical[0] == -2:
canonical[0] = 2
if canonical[2] == -2:
canonical[2] = 2
canonical *= np.pi / 2
if all([(canonical != rot).any() for rot in parallel_rotations]):
parallel_rotations += [canonical]
assert len(parallel_rotations) == 24
return parallel_rotations
| 13,271 | Python | .py | 320 | 36.425 | 85 | 0.597252 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,266 | manipulate.py | WindyLab_Gym-PPS/gym/envs/robotics/hand/manipulate.py | import os
import numpy as np
from gym import utils, error
from gym.envs.robotics import rotations, hand_env
from gym.envs.robotics.utils import robot_get_obs
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(
e
)
)
def quat_from_angle_and_axis(angle, axis):
assert axis.shape == (3,)
axis /= np.linalg.norm(axis)
quat = np.concatenate([[np.cos(angle / 2.0)], np.sin(angle / 2.0) * axis])
quat /= np.linalg.norm(quat)
return quat
# Ensure we get the path separator correct on windows
MANIPULATE_BLOCK_XML = os.path.join("hand", "manipulate_block.xml")
MANIPULATE_EGG_XML = os.path.join("hand", "manipulate_egg.xml")
MANIPULATE_PEN_XML = os.path.join("hand", "manipulate_pen.xml")
class ManipulateEnv(hand_env.HandEnv):
def __init__(
self,
model_path,
target_position,
target_rotation,
target_position_range,
reward_type,
initial_qpos=None,
randomize_initial_position=True,
randomize_initial_rotation=True,
distance_threshold=0.01,
rotation_threshold=0.1,
n_substeps=20,
relative_control=False,
ignore_z_target_rotation=False,
):
"""Initializes a new Hand manipulation environment.
Args:
model_path (string): path to the environments XML file
target_position (string): the type of target position:
- ignore: target position is fully ignored, i.e. the object can be positioned arbitrarily
- fixed: target position is set to the initial position of the object
- random: target position is fully randomized according to target_position_range
target_rotation (string): the type of target rotation:
- ignore: target rotation is fully ignored, i.e. the object can be rotated arbitrarily
- fixed: target rotation is set to the initial rotation of the object
- xyz: fully randomized target rotation around the X, Y and Z axis
- z: fully randomized target rotation around the Z axis
- parallel: fully randomized target rotation around Z and axis-aligned rotation around X, Y
ignore_z_target_rotation (boolean): whether or not the Z axis of the target rotation is ignored
target_position_range (np.array of shape (3, 2)): range of the target_position randomization
reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense
initial_qpos (dict): a dictionary of joint names and values that define the initial configuration
randomize_initial_position (boolean): whether or not to randomize the initial position of the object
randomize_initial_rotation (boolean): whether or not to randomize the initial rotation of the object
distance_threshold (float, in meters): the threshold after which the position of a goal is considered achieved
rotation_threshold (float, in radians): the threshold after which the rotation of a goal is considered achieved
n_substeps (int): number of substeps the simulation runs on every call to step
relative_control (boolean): whether or not the hand is actuated in absolute joint positions or relative to the current state
"""
self.target_position = target_position
self.target_rotation = target_rotation
self.target_position_range = target_position_range
self.parallel_quats = [
rotations.euler2quat(r) for r in rotations.get_parallel_rotations()
]
self.randomize_initial_rotation = randomize_initial_rotation
self.randomize_initial_position = randomize_initial_position
self.distance_threshold = distance_threshold
self.rotation_threshold = rotation_threshold
self.reward_type = reward_type
self.ignore_z_target_rotation = ignore_z_target_rotation
assert self.target_position in ["ignore", "fixed", "random"]
assert self.target_rotation in ["ignore", "fixed", "xyz", "z", "parallel"]
initial_qpos = initial_qpos or {}
hand_env.HandEnv.__init__(
self,
model_path,
n_substeps=n_substeps,
initial_qpos=initial_qpos,
relative_control=relative_control,
)
def _get_achieved_goal(self):
# Object position and rotation.
object_qpos = self.sim.data.get_joint_qpos("object:joint")
assert object_qpos.shape == (7,)
return object_qpos
def _goal_distance(self, goal_a, goal_b):
assert goal_a.shape == goal_b.shape
assert goal_a.shape[-1] == 7
d_pos = np.zeros_like(goal_a[..., 0])
d_rot = np.zeros_like(goal_b[..., 0])
if self.target_position != "ignore":
delta_pos = goal_a[..., :3] - goal_b[..., :3]
d_pos = np.linalg.norm(delta_pos, axis=-1)
if self.target_rotation != "ignore":
quat_a, quat_b = goal_a[..., 3:], goal_b[..., 3:]
if self.ignore_z_target_rotation:
# Special case: We want to ignore the Z component of the rotation.
# This code here assumes Euler angles with xyz convention. We first transform
# to euler, then set the Z component to be equal between the two, and finally
# transform back into quaternions.
euler_a = rotations.quat2euler(quat_a)
euler_b = rotations.quat2euler(quat_b)
euler_a[2] = euler_b[2]
quat_a = rotations.euler2quat(euler_a)
# Subtract quaternions and extract angle between them.
quat_diff = rotations.quat_mul(quat_a, rotations.quat_conjugate(quat_b))
angle_diff = 2 * np.arccos(np.clip(quat_diff[..., 0], -1.0, 1.0))
d_rot = angle_diff
assert d_pos.shape == d_rot.shape
return d_pos, d_rot
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal, goal, info):
if self.reward_type == "sparse":
success = self._is_success(achieved_goal, goal).astype(np.float32)
return success - 1.0
else:
d_pos, d_rot = self._goal_distance(achieved_goal, goal)
# We weigh the difference in position to avoid that `d_pos` (in meters) is completely
# dominated by `d_rot` (in radians).
return -(10.0 * d_pos + d_rot)
# RobotEnv methods
# ----------------------------
def _is_success(self, achieved_goal, desired_goal):
d_pos, d_rot = self._goal_distance(achieved_goal, desired_goal)
achieved_pos = (d_pos < self.distance_threshold).astype(np.float32)
achieved_rot = (d_rot < self.rotation_threshold).astype(np.float32)
achieved_both = achieved_pos * achieved_rot
return achieved_both
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
self.sim.forward()
def _reset_sim(self):
self.sim.set_state(self.initial_state)
self.sim.forward()
initial_qpos = self.sim.data.get_joint_qpos("object:joint").copy()
initial_pos, initial_quat = initial_qpos[:3], initial_qpos[3:]
assert initial_qpos.shape == (7,)
assert initial_pos.shape == (3,)
assert initial_quat.shape == (4,)
initial_qpos = None
# Randomization initial rotation.
if self.randomize_initial_rotation:
if self.target_rotation == "z":
angle = self.np_random.uniform(-np.pi, np.pi)
axis = np.array([0.0, 0.0, 1.0])
offset_quat = quat_from_angle_and_axis(angle, axis)
initial_quat = rotations.quat_mul(initial_quat, offset_quat)
elif self.target_rotation == "parallel":
angle = self.np_random.uniform(-np.pi, np.pi)
axis = np.array([0.0, 0.0, 1.0])
z_quat = quat_from_angle_and_axis(angle, axis)
parallel_quat = self.parallel_quats[
self.np_random.randint(len(self.parallel_quats))
]
offset_quat = rotations.quat_mul(z_quat, parallel_quat)
initial_quat = rotations.quat_mul(initial_quat, offset_quat)
elif self.target_rotation in ["xyz", "ignore"]:
angle = self.np_random.uniform(-np.pi, np.pi)
axis = self.np_random.uniform(-1.0, 1.0, size=3)
offset_quat = quat_from_angle_and_axis(angle, axis)
initial_quat = rotations.quat_mul(initial_quat, offset_quat)
elif self.target_rotation == "fixed":
pass
else:
raise error.Error(
'Unknown target_rotation option "{}".'.format(self.target_rotation)
)
# Randomize initial position.
if self.randomize_initial_position:
if self.target_position != "fixed":
initial_pos += self.np_random.normal(size=3, scale=0.005)
initial_quat /= np.linalg.norm(initial_quat)
initial_qpos = np.concatenate([initial_pos, initial_quat])
self.sim.data.set_joint_qpos("object:joint", initial_qpos)
def is_on_palm():
self.sim.forward()
cube_middle_idx = self.sim.model.site_name2id("object:center")
cube_middle_pos = self.sim.data.site_xpos[cube_middle_idx]
is_on_palm = cube_middle_pos[2] > 0.04
return is_on_palm
# Run the simulation for a bunch of timesteps to let everything settle in.
for _ in range(10):
self._set_action(np.zeros(20))
try:
self.sim.step()
except mujoco_py.MujocoException:
return False
return is_on_palm()
def _sample_goal(self):
# Select a goal for the object position.
target_pos = None
if self.target_position == "random":
assert self.target_position_range.shape == (3, 2)
offset = self.np_random.uniform(
self.target_position_range[:, 0], self.target_position_range[:, 1]
)
assert offset.shape == (3,)
target_pos = self.sim.data.get_joint_qpos("object:joint")[:3] + offset
elif self.target_position in ["ignore", "fixed"]:
target_pos = self.sim.data.get_joint_qpos("object:joint")[:3]
else:
raise error.Error(
'Unknown target_position option "{}".'.format(self.target_position)
)
assert target_pos is not None
assert target_pos.shape == (3,)
# Select a goal for the object rotation.
target_quat = None
if self.target_rotation == "z":
angle = self.np_random.uniform(-np.pi, np.pi)
axis = np.array([0.0, 0.0, 1.0])
target_quat = quat_from_angle_and_axis(angle, axis)
elif self.target_rotation == "parallel":
angle = self.np_random.uniform(-np.pi, np.pi)
axis = np.array([0.0, 0.0, 1.0])
target_quat = quat_from_angle_and_axis(angle, axis)
parallel_quat = self.parallel_quats[
self.np_random.randint(len(self.parallel_quats))
]
target_quat = rotations.quat_mul(target_quat, parallel_quat)
elif self.target_rotation == "xyz":
angle = self.np_random.uniform(-np.pi, np.pi)
axis = self.np_random.uniform(-1.0, 1.0, size=3)
target_quat = quat_from_angle_and_axis(angle, axis)
elif self.target_rotation in ["ignore", "fixed"]:
target_quat = self.sim.data.get_joint_qpos("object:joint")
else:
raise error.Error(
'Unknown target_rotation option "{}".'.format(self.target_rotation)
)
assert target_quat is not None
assert target_quat.shape == (4,)
target_quat /= np.linalg.norm(target_quat) # normalized quaternion
goal = np.concatenate([target_pos, target_quat])
return goal
def _render_callback(self):
# Assign current state to target object but offset a bit so that the actual object
# is not obscured.
goal = self.goal.copy()
assert goal.shape == (7,)
if self.target_position == "ignore":
# Move the object to the side since we do not care about it's position.
goal[0] += 0.15
self.sim.data.set_joint_qpos("target:joint", goal)
self.sim.data.set_joint_qvel("target:joint", np.zeros(6))
if "object_hidden" in self.sim.model.geom_names:
hidden_id = self.sim.model.geom_name2id("object_hidden")
self.sim.model.geom_rgba[hidden_id, 3] = 1.0
self.sim.forward()
def _get_obs(self):
robot_qpos, robot_qvel = robot_get_obs(self.sim)
object_qvel = self.sim.data.get_joint_qvel("object:joint")
achieved_goal = (
self._get_achieved_goal().ravel()
) # this contains the object position + rotation
observation = np.concatenate(
[robot_qpos, robot_qvel, object_qvel, achieved_goal]
)
return {
"observation": observation.copy(),
"achieved_goal": achieved_goal.copy(),
"desired_goal": self.goal.ravel().copy(),
}
class HandBlockEnv(ManipulateEnv, utils.EzPickle):
def __init__(
self, target_position="random", target_rotation="xyz", reward_type="sparse"
):
utils.EzPickle.__init__(self, target_position, target_rotation, reward_type)
ManipulateEnv.__init__(
self,
model_path=MANIPULATE_BLOCK_XML,
target_position=target_position,
target_rotation=target_rotation,
target_position_range=np.array([(-0.04, 0.04), (-0.06, 0.02), (0.0, 0.06)]),
reward_type=reward_type,
)
class HandEggEnv(ManipulateEnv, utils.EzPickle):
def __init__(
self, target_position="random", target_rotation="xyz", reward_type="sparse"
):
utils.EzPickle.__init__(self, target_position, target_rotation, reward_type)
ManipulateEnv.__init__(
self,
model_path=MANIPULATE_EGG_XML,
target_position=target_position,
target_rotation=target_rotation,
target_position_range=np.array([(-0.04, 0.04), (-0.06, 0.02), (0.0, 0.06)]),
reward_type=reward_type,
)
class HandPenEnv(ManipulateEnv, utils.EzPickle):
def __init__(
self, target_position="random", target_rotation="xyz", reward_type="sparse"
):
utils.EzPickle.__init__(self, target_position, target_rotation, reward_type)
ManipulateEnv.__init__(
self,
model_path=MANIPULATE_PEN_XML,
target_position=target_position,
target_rotation=target_rotation,
target_position_range=np.array([(-0.04, 0.04), (-0.06, 0.02), (0.0, 0.06)]),
randomize_initial_rotation=False,
reward_type=reward_type,
ignore_z_target_rotation=True,
distance_threshold=0.05,
)
| 15,503 | Python | .py | 313 | 38.750799 | 144 | 0.606113 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,267 | reach.py | WindyLab_Gym-PPS/gym/envs/robotics/hand/reach.py | import os
import numpy as np
from gym import utils
from gym.envs.robotics import hand_env
from gym.envs.robotics.utils import robot_get_obs
FINGERTIP_SITE_NAMES = [
"robot0:S_fftip",
"robot0:S_mftip",
"robot0:S_rftip",
"robot0:S_lftip",
"robot0:S_thtip",
]
DEFAULT_INITIAL_QPOS = {
"robot0:WRJ1": -0.16514339750464327,
"robot0:WRJ0": -0.31973286565062153,
"robot0:FFJ3": 0.14340512546557435,
"robot0:FFJ2": 0.32028208333591573,
"robot0:FFJ1": 0.7126053607727917,
"robot0:FFJ0": 0.6705281001412586,
"robot0:MFJ3": 0.000246444303701037,
"robot0:MFJ2": 0.3152655251085491,
"robot0:MFJ1": 0.7659800313729842,
"robot0:MFJ0": 0.7323156897425923,
"robot0:RFJ3": 0.00038520700007378114,
"robot0:RFJ2": 0.36743546201985233,
"robot0:RFJ1": 0.7119514095008576,
"robot0:RFJ0": 0.6699446327514138,
"robot0:LFJ4": 0.0525442258033891,
"robot0:LFJ3": -0.13615534724474673,
"robot0:LFJ2": 0.39872030433433003,
"robot0:LFJ1": 0.7415570009679252,
"robot0:LFJ0": 0.704096378652974,
"robot0:THJ4": 0.003673823825070126,
"robot0:THJ3": 0.5506291436028695,
"robot0:THJ2": -0.014515151997119306,
"robot0:THJ1": -0.0015229223564485414,
"robot0:THJ0": -0.7894883021600622,
}
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join("hand", "reach.xml")
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
class HandReachEnv(hand_env.HandEnv, utils.EzPickle):
def __init__(
self,
distance_threshold=0.01,
n_substeps=20,
relative_control=False,
initial_qpos=DEFAULT_INITIAL_QPOS,
reward_type="sparse",
):
utils.EzPickle.__init__(**locals())
self.distance_threshold = distance_threshold
self.reward_type = reward_type
hand_env.HandEnv.__init__(
self,
MODEL_XML_PATH,
n_substeps=n_substeps,
initial_qpos=initial_qpos,
relative_control=relative_control,
)
def _get_achieved_goal(self):
goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]
return np.array(goal).flatten()
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal, goal, info):
d = goal_distance(achieved_goal, goal)
if self.reward_type == "sparse":
return -(d > self.distance_threshold).astype(np.float32)
else:
return -d
# RobotEnv methods
# ----------------------------
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
self.sim.forward()
self.initial_goal = self._get_achieved_goal().copy()
self.palm_xpos = self.sim.data.body_xpos[
self.sim.model.body_name2id("robot0:palm")
].copy()
def _get_obs(self):
robot_qpos, robot_qvel = robot_get_obs(self.sim)
achieved_goal = self._get_achieved_goal().ravel()
observation = np.concatenate([robot_qpos, robot_qvel, achieved_goal])
return {
"observation": observation.copy(),
"achieved_goal": achieved_goal.copy(),
"desired_goal": self.goal.copy(),
}
def _sample_goal(self):
thumb_name = "robot0:S_thtip"
finger_names = [name for name in FINGERTIP_SITE_NAMES if name != thumb_name]
finger_name = self.np_random.choice(finger_names)
thumb_idx = FINGERTIP_SITE_NAMES.index(thumb_name)
finger_idx = FINGERTIP_SITE_NAMES.index(finger_name)
assert thumb_idx != finger_idx
# Pick a meeting point above the hand.
meeting_pos = self.palm_xpos + np.array([0.0, -0.09, 0.05])
meeting_pos += self.np_random.normal(scale=0.005, size=meeting_pos.shape)
# Slightly move meeting goal towards the respective finger to avoid that they
# overlap.
goal = self.initial_goal.copy().reshape(-1, 3)
for idx in [thumb_idx, finger_idx]:
offset_direction = meeting_pos - goal[idx]
offset_direction /= np.linalg.norm(offset_direction)
goal[idx] = meeting_pos - 0.005 * offset_direction
if self.np_random.uniform() < 0.1:
# With some probability, ask all fingers to move back to the origin.
# This avoids that the thumb constantly stays near the goal position already.
goal = self.initial_goal.copy()
return goal.flatten()
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def _render_callback(self):
# Visualize targets.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
goal = self.goal.reshape(5, 3)
for finger_idx in range(5):
site_name = "target{}".format(finger_idx)
site_id = self.sim.model.site_name2id(site_name)
self.sim.model.site_pos[site_id] = goal[finger_idx] - sites_offset[site_id]
# Visualize finger positions.
achieved_goal = self._get_achieved_goal().reshape(5, 3)
for finger_idx in range(5):
site_name = "finger{}".format(finger_idx)
site_id = self.sim.model.site_name2id(site_name)
self.sim.model.site_pos[site_id] = (
achieved_goal[finger_idx] - sites_offset[site_id]
)
self.sim.forward()
| 5,635 | Python | .py | 134 | 34.134328 | 89 | 0.632079 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,268 | manipulate_touch_sensors.py | WindyLab_Gym-PPS/gym/envs/robotics/hand/manipulate_touch_sensors.py | import os
import numpy as np
from gym import utils, error, spaces
from gym.envs.robotics.hand import manipulate
# Ensure we get the path separator correct on windows
MANIPULATE_BLOCK_XML = os.path.join("hand", "manipulate_block_touch_sensors.xml")
MANIPULATE_EGG_XML = os.path.join("hand", "manipulate_egg_touch_sensors.xml")
MANIPULATE_PEN_XML = os.path.join("hand", "manipulate_pen_touch_sensors.xml")
class ManipulateTouchSensorsEnv(manipulate.ManipulateEnv):
def __init__(
self,
model_path,
target_position,
target_rotation,
target_position_range,
reward_type,
initial_qpos={},
randomize_initial_position=True,
randomize_initial_rotation=True,
distance_threshold=0.01,
rotation_threshold=0.1,
n_substeps=20,
relative_control=False,
ignore_z_target_rotation=False,
touch_visualisation="on_touch",
touch_get_obs="sensordata",
):
"""Initializes a new Hand manipulation environment with touch sensors.
Args:
touch_visualisation (string): how touch sensor sites are visualised
- "on_touch": shows touch sensor sites only when touch values > 0
- "always": always shows touch sensor sites
- "off" or else: does not show touch sensor sites
touch_get_obs (string): touch sensor readings
- "boolean": returns 1 if touch sensor reading != 0.0 else 0
- "sensordata": returns original touch sensor readings from self.sim.data.sensordata[id]
- "log": returns log(x+1) touch sensor readings from self.sim.data.sensordata[id]
- "off" or else: does not add touch sensor readings to the observation
"""
self.touch_visualisation = touch_visualisation
self.touch_get_obs = touch_get_obs
self._touch_sensor_id_site_id = []
self._touch_sensor_id = []
self.touch_color = [1, 0, 0, 0.5]
self.notouch_color = [0, 0.5, 0, 0.2]
manipulate.ManipulateEnv.__init__(
self,
model_path,
target_position,
target_rotation,
target_position_range,
reward_type,
initial_qpos=initial_qpos,
randomize_initial_position=randomize_initial_position,
randomize_initial_rotation=randomize_initial_rotation,
distance_threshold=distance_threshold,
rotation_threshold=rotation_threshold,
n_substeps=n_substeps,
relative_control=relative_control,
ignore_z_target_rotation=ignore_z_target_rotation,
)
for (
k,
v,
) in (
self.sim.model._sensor_name2id.items()
): # get touch sensor site names and their ids
if "robot0:TS_" in k:
self._touch_sensor_id_site_id.append(
(
v,
self.sim.model._site_name2id[
k.replace("robot0:TS_", "robot0:T_")
],
)
)
self._touch_sensor_id.append(v)
if self.touch_visualisation == "off": # set touch sensors rgba values
for _, site_id in self._touch_sensor_id_site_id:
self.sim.model.site_rgba[site_id][3] = 0.0
elif self.touch_visualisation == "always":
pass
obs = self._get_obs()
self.observation_space = spaces.Dict(
dict(
desired_goal=spaces.Box(
-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype="float32"
),
achieved_goal=spaces.Box(
-np.inf, np.inf, shape=obs["achieved_goal"].shape, dtype="float32"
),
observation=spaces.Box(
-np.inf, np.inf, shape=obs["observation"].shape, dtype="float32"
),
)
)
def _render_callback(self):
super(ManipulateTouchSensorsEnv, self)._render_callback()
if self.touch_visualisation == "on_touch":
for touch_sensor_id, site_id in self._touch_sensor_id_site_id:
if self.sim.data.sensordata[touch_sensor_id] != 0.0:
self.sim.model.site_rgba[site_id] = self.touch_color
else:
self.sim.model.site_rgba[site_id] = self.notouch_color
def _get_obs(self):
robot_qpos, robot_qvel = manipulate.robot_get_obs(self.sim)
object_qvel = self.sim.data.get_joint_qvel("object:joint")
achieved_goal = (
self._get_achieved_goal().ravel()
) # this contains the object position + rotation
touch_values = [] # get touch sensor readings. if there is one, set value to 1
if self.touch_get_obs == "sensordata":
touch_values = self.sim.data.sensordata[self._touch_sensor_id]
elif self.touch_get_obs == "boolean":
touch_values = self.sim.data.sensordata[self._touch_sensor_id] > 0.0
elif self.touch_get_obs == "log":
touch_values = np.log(self.sim.data.sensordata[self._touch_sensor_id] + 1.0)
observation = np.concatenate(
[robot_qpos, robot_qvel, object_qvel, touch_values, achieved_goal]
)
return {
"observation": observation.copy(),
"achieved_goal": achieved_goal.copy(),
"desired_goal": self.goal.ravel().copy(),
}
class HandBlockTouchSensorsEnv(ManipulateTouchSensorsEnv, utils.EzPickle):
def __init__(
self,
target_position="random",
target_rotation="xyz",
touch_get_obs="sensordata",
reward_type="sparse",
):
utils.EzPickle.__init__(
self, target_position, target_rotation, touch_get_obs, reward_type
)
ManipulateTouchSensorsEnv.__init__(
self,
model_path=MANIPULATE_BLOCK_XML,
touch_get_obs=touch_get_obs,
target_rotation=target_rotation,
target_position=target_position,
target_position_range=np.array([(-0.04, 0.04), (-0.06, 0.02), (0.0, 0.06)]),
reward_type=reward_type,
)
class HandEggTouchSensorsEnv(ManipulateTouchSensorsEnv, utils.EzPickle):
def __init__(
self,
target_position="random",
target_rotation="xyz",
touch_get_obs="sensordata",
reward_type="sparse",
):
utils.EzPickle.__init__(
self, target_position, target_rotation, touch_get_obs, reward_type
)
ManipulateTouchSensorsEnv.__init__(
self,
model_path=MANIPULATE_EGG_XML,
touch_get_obs=touch_get_obs,
target_rotation=target_rotation,
target_position=target_position,
target_position_range=np.array([(-0.04, 0.04), (-0.06, 0.02), (0.0, 0.06)]),
reward_type=reward_type,
)
class HandPenTouchSensorsEnv(ManipulateTouchSensorsEnv, utils.EzPickle):
def __init__(
self,
target_position="random",
target_rotation="xyz",
touch_get_obs="sensordata",
reward_type="sparse",
):
utils.EzPickle.__init__(
self, target_position, target_rotation, touch_get_obs, reward_type
)
ManipulateTouchSensorsEnv.__init__(
self,
model_path=MANIPULATE_PEN_XML,
touch_get_obs=touch_get_obs,
target_rotation=target_rotation,
target_position=target_position,
target_position_range=np.array([(-0.04, 0.04), (-0.06, 0.02), (0.0, 0.06)]),
randomize_initial_rotation=False,
reward_type=reward_type,
ignore_z_target_rotation=True,
distance_threshold=0.05,
)
| 7,929 | Python | .py | 188 | 30.904255 | 104 | 0.580808 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,269 | slide.py | WindyLab_Gym-PPS/gym/envs/robotics/fetch/slide.py | import os
import numpy as np
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join("fetch", "slide.xml")
class FetchSlideEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type="sparse"):
initial_qpos = {
"robot0:slide0": 0.05,
"robot0:slide1": 0.48,
"robot0:slide2": 0.0,
"object0:joint": [1.7, 1.1, 0.41, 1.0, 0.0, 0.0, 0.0],
}
fetch_env.FetchEnv.__init__(
self,
MODEL_XML_PATH,
has_object=True,
block_gripper=True,
n_substeps=20,
gripper_extra_height=-0.02,
target_in_the_air=False,
target_offset=np.array([0.4, 0.0, 0.0]),
obj_range=0.1,
target_range=0.3,
distance_threshold=0.05,
initial_qpos=initial_qpos,
reward_type=reward_type,
)
utils.EzPickle.__init__(self, reward_type=reward_type)
| 1,055 | Python | .py | 30 | 25.733333 | 66 | 0.563725 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,270 | push.py | WindyLab_Gym-PPS/gym/envs/robotics/fetch/push.py | import os
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join("fetch", "push.xml")
class FetchPushEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type="sparse"):
initial_qpos = {
"robot0:slide0": 0.405,
"robot0:slide1": 0.48,
"robot0:slide2": 0.0,
"object0:joint": [1.25, 0.53, 0.4, 1.0, 0.0, 0.0, 0.0],
}
fetch_env.FetchEnv.__init__(
self,
MODEL_XML_PATH,
has_object=True,
block_gripper=True,
n_substeps=20,
gripper_extra_height=0.0,
target_in_the_air=False,
target_offset=0.0,
obj_range=0.15,
target_range=0.15,
distance_threshold=0.05,
initial_qpos=initial_qpos,
reward_type=reward_type,
)
utils.EzPickle.__init__(self, reward_type=reward_type)
| 1,013 | Python | .py | 29 | 25.241379 | 67 | 0.561224 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,271 | pick_and_place.py | WindyLab_Gym-PPS/gym/envs/robotics/fetch/pick_and_place.py | import os
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join("fetch", "pick_and_place.xml")
class FetchPickAndPlaceEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type="sparse"):
initial_qpos = {
"robot0:slide0": 0.405,
"robot0:slide1": 0.48,
"robot0:slide2": 0.0,
"object0:joint": [1.25, 0.53, 0.4, 1.0, 0.0, 0.0, 0.0],
}
fetch_env.FetchEnv.__init__(
self,
MODEL_XML_PATH,
has_object=True,
block_gripper=False,
n_substeps=20,
gripper_extra_height=0.2,
target_in_the_air=True,
target_offset=0.0,
obj_range=0.15,
target_range=0.15,
distance_threshold=0.05,
initial_qpos=initial_qpos,
reward_type=reward_type,
)
utils.EzPickle.__init__(self, reward_type=reward_type)
| 1,031 | Python | .py | 29 | 25.862069 | 67 | 0.567134 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,272 | reach.py | WindyLab_Gym-PPS/gym/envs/robotics/fetch/reach.py | import os
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join("fetch", "reach.xml")
class FetchReachEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type="sparse"):
initial_qpos = {
"robot0:slide0": 0.4049,
"robot0:slide1": 0.48,
"robot0:slide2": 0.0,
}
fetch_env.FetchEnv.__init__(
self,
MODEL_XML_PATH,
has_object=False,
block_gripper=True,
n_substeps=20,
gripper_extra_height=0.2,
target_in_the_air=True,
target_offset=0.0,
obj_range=0.15,
target_range=0.15,
distance_threshold=0.05,
initial_qpos=initial_qpos,
reward_type=reward_type,
)
utils.EzPickle.__init__(self, reward_type=reward_type)
| 948 | Python | .py | 28 | 24.285714 | 62 | 0.573144 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,273 | humanoidstandup.py | WindyLab_Gym-PPS/gym/envs/mujoco/humanoidstandup.py | from gym.envs.mujoco import mujoco_env
from gym import utils
import numpy as np
class HumanoidStandupEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "humanoidstandup.xml", 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate(
[
data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
]
)
def step(self, a):
self.do_simulation(a, self.frame_skip)
pos_after = self.sim.data.qpos[2]
data = self.sim.data
uph_cost = (pos_after - 0) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1
done = bool(False)
return (
self._get_obs(),
reward,
done,
dict(
reward_linup=uph_cost,
reward_quadctrl=-quad_ctrl_cost,
reward_impact=-quad_impact_cost,
),
)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(
low=-c,
high=c,
size=self.model.nv,
),
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 0.8925
self.viewer.cam.elevation = -20
| 1,931 | Python | .py | 56 | 23.767857 | 88 | 0.534547 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,274 | ant.py | WindyLab_Gym-PPS/gym/envs/mujoco/ant.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "ant.xml", 5)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore) / self.dt
ctrl_cost = 0.5 * np.square(a).sum()
contact_cost = (
0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
)
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return (
ob,
reward,
done,
dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward,
),
)
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
]
)
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-0.1, high=0.1
)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
| 1,842 | Python | .py | 50 | 26.92 | 82 | 0.550952 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,275 | thrower.py | WindyLab_Gym-PPS/gym/envs/mujoco/thrower.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class ThrowerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self._ball_hit_ground = False
self._ball_hit_location = None
mujoco_env.MujocoEnv.__init__(self, "thrower.xml", 5)
def step(self, a):
ball_xy = self.get_body_com("ball")[:2]
goal_xy = self.get_body_com("goal")[:2]
if not self._ball_hit_ground and self.get_body_com("ball")[2] < -0.25:
self._ball_hit_ground = True
self._ball_hit_location = self.get_body_com("ball")
if self._ball_hit_ground:
ball_hit_xy = self._ball_hit_location[:2]
reward_dist = -np.linalg.norm(ball_hit_xy - goal_xy)
else:
reward_dist = -np.linalg.norm(ball_xy - goal_xy)
reward_ctrl = -np.square(a).sum()
reward = reward_dist + 0.002 * reward_ctrl
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 4.0
def reset_model(self):
self._ball_hit_ground = False
self._ball_hit_location = None
qpos = self.init_qpos
self.goal = np.array(
[
self.np_random.uniform(low=-0.3, high=0.3),
self.np_random.uniform(low=-0.3, high=0.3),
]
)
qpos[-9:-7] = self.goal
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv
)
qvel[7:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat[:7],
self.sim.data.qvel.flat[:7],
self.get_body_com("r_wrist_roll_link"),
self.get_body_com("ball"),
self.get_body_com("goal"),
]
)
| 2,151 | Python | .py | 56 | 28.428571 | 87 | 0.554702 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,276 | hopper_v3.py | WindyLab_Gym-PPS/gym/envs/mujoco/hopper_v3.py | import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
DEFAULT_CAMERA_CONFIG = {
"trackbodyid": 2,
"distance": 3.0,
"lookat": np.array((0.0, 0.0, 1.15)),
"elevation": -20.0,
}
class HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
xml_file="hopper.xml",
forward_reward_weight=1.0,
ctrl_cost_weight=1e-3,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_state_range=(-100.0, 100.0),
healthy_z_range=(0.7, float("inf")),
healthy_angle_range=(-0.2, 0.2),
reset_noise_scale=5e-3,
exclude_current_positions_from_observation=True,
):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_state_range = healthy_state_range
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
mujoco_env.MujocoEnv.__init__(self, xml_file, 4)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
z, angle = self.sim.data.qpos[1:3]
state = self.state_vector()[2:]
min_state, max_state = self._healthy_state_range
min_z, max_z = self._healthy_z_range
min_angle, max_angle = self._healthy_angle_range
healthy_state = np.all(np.logical_and(min_state < state, state < max_state))
healthy_z = min_z < z < max_z
healthy_angle = min_angle < angle < max_angle
is_healthy = all((healthy_state, healthy_z, healthy_angle))
return is_healthy
@property
def done(self):
done = not self.is_healthy if self._terminate_when_unhealthy else False
return done
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = np.clip(self.sim.data.qvel.flat.copy(), -10, 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost
observation = self._get_obs()
reward = rewards - costs
done = self.done
info = {
"x_position": x_position_after,
"x_velocity": x_velocity,
}
return observation, reward, done, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| 4,190 | Python | .py | 104 | 31.548077 | 84 | 0.619541 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,277 | walker2d_v3.py | WindyLab_Gym-PPS/gym/envs/mujoco/walker2d_v3.py | import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
DEFAULT_CAMERA_CONFIG = {
"trackbodyid": 2,
"distance": 4.0,
"lookat": np.array((0.0, 0.0, 1.15)),
"elevation": -20.0,
}
class Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
xml_file="walker2d.xml",
forward_reward_weight=1.0,
ctrl_cost_weight=1e-3,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.8, 2.0),
healthy_angle_range=(-1.0, 1.0),
reset_noise_scale=5e-3,
exclude_current_positions_from_observation=True,
):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
mujoco_env.MujocoEnv.__init__(self, xml_file, 4)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
z, angle = self.sim.data.qpos[1:3]
min_z, max_z = self._healthy_z_range
min_angle, max_angle = self._healthy_angle_range
healthy_z = min_z < z < max_z
healthy_angle = min_angle < angle < max_angle
is_healthy = healthy_z and healthy_angle
return is_healthy
@property
def done(self):
done = not self.is_healthy if self._terminate_when_unhealthy else False
return done
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = np.clip(self.sim.data.qvel.flat.copy(), -10, 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost
observation = self._get_obs()
reward = rewards - costs
done = self.done
info = {
"x_position": x_position_after,
"x_velocity": x_velocity,
}
return observation, reward, done, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| 3,881 | Python | .py | 99 | 30.494949 | 79 | 0.618768 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,278 | humanoid_v3.py | WindyLab_Gym-PPS/gym/envs/mujoco/humanoid_v3.py | import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
DEFAULT_CAMERA_CONFIG = {
"trackbodyid": 1,
"distance": 4.0,
"lookat": np.array((0.0, 0.0, 2.0)),
"elevation": -20.0,
}
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, axis=1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, axis=0) / np.sum(mass))[0:2].copy()
class HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
xml_file="humanoid.xml",
forward_reward_weight=1.25,
ctrl_cost_weight=0.1,
contact_cost_weight=5e-7,
contact_cost_range=(-np.inf, 10.0),
healthy_reward=5.0,
terminate_when_unhealthy=True,
healthy_z_range=(1.0, 2.0),
reset_noise_scale=1e-2,
exclude_current_positions_from_observation=True,
):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._contact_cost_range = contact_cost_range
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(self.sim.data.ctrl))
return control_cost
@property
def contact_cost(self):
contact_forces = self.sim.data.cfrc_ext
contact_cost = self._contact_cost_weight * np.sum(np.square(contact_forces))
min_cost, max_cost = self._contact_cost_range
contact_cost = np.clip(contact_cost, min_cost, max_cost)
return contact_cost
@property
def is_healthy(self):
min_z, max_z = self._healthy_z_range
is_healthy = min_z < self.sim.data.qpos[2] < max_z
return is_healthy
@property
def done(self):
done = (not self.is_healthy) if self._terminate_when_unhealthy else False
return done
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
com_inertia = self.sim.data.cinert.flat.copy()
com_velocity = self.sim.data.cvel.flat.copy()
actuator_forces = self.sim.data.qfrc_actuator.flat.copy()
external_contact_forces = self.sim.data.cfrc_ext.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
return np.concatenate(
(
position,
velocity,
com_inertia,
com_velocity,
actuator_forces,
external_contact_forces,
)
)
def step(self, action):
xy_position_before = mass_center(self.model, self.sim)
self.do_simulation(action, self.frame_skip)
xy_position_after = mass_center(self.model, self.sim)
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost + contact_cost
observation = self._get_obs()
reward = rewards - costs
done = self.done
info = {
"reward_linvel": forward_reward,
"reward_quadctrl": -ctrl_cost,
"reward_alive": healthy_reward,
"reward_impact": -contact_cost,
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
"forward_reward": forward_reward,
}
return observation, reward, done, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| 5,206 | Python | .py | 130 | 30.8 | 85 | 0.610825 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,279 | swimmer.py | WindyLab_Gym-PPS/gym/envs/mujoco/swimmer.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "swimmer.xml", 4)
utils.EzPickle.__init__(self)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = -ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos
+ self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nv),
)
return self._get_obs()
| 1,179 | Python | .py | 29 | 32.586207 | 86 | 0.614847 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,280 | mujoco_env.py | WindyLab_Gym-PPS/gym/envs/mujoco/mujoco_env.py | from collections import OrderedDict
import os
from gym import error, spaces
from gym.utils import seeding
import numpy as np
from os import path
import gym
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(
e
)
)
DEFAULT_SIZE = 500
def convert_observation_to_space(observation):
if isinstance(observation, dict):
space = spaces.Dict(
OrderedDict(
[
(key, convert_observation_to_space(value))
for key, value in observation.items()
]
)
)
elif isinstance(observation, np.ndarray):
low = np.full(observation.shape, -float("inf"), dtype=np.float32)
high = np.full(observation.shape, float("inf"), dtype=np.float32)
space = spaces.Box(low, high, dtype=observation.dtype)
else:
raise NotImplementedError(type(observation), observation)
return space
class MujocoEnv(gym.Env):
"""Superclass for all MuJoCo environments."""
def __init__(self, model_path, frame_skip):
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
self.frame_skip = frame_skip
self.model = mujoco_py.load_model_from_path(fullpath)
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
self.viewer = None
self._viewers = {}
self.metadata = {
"render.modes": ["human", "rgb_array", "depth_array"],
"video.frames_per_second": int(np.round(1.0 / self.dt)),
}
self.init_qpos = self.sim.data.qpos.ravel().copy()
self.init_qvel = self.sim.data.qvel.ravel().copy()
self._set_action_space()
action = self.action_space.sample()
observation, _reward, done, _info = self.step(action)
assert not done
self._set_observation_space(observation)
self.seed()
def _set_action_space(self):
bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)
low, high = bounds.T
self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)
return self.action_space
def _set_observation_space(self, observation):
self.observation_space = convert_observation_to_space(observation)
return self.observation_space
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# methods to override:
# ----------------------------
def reset_model(self):
"""
Reset the robot degrees of freedom (qpos and qvel).
Implement this in each subclass.
"""
raise NotImplementedError
def viewer_setup(self):
"""
This method is called when the viewer is initialized.
Optionally implement this method, if you need to tinker with camera position
and so forth.
"""
pass
# -----------------------------
def reset(self):
self.sim.reset()
ob = self.reset_model()
return ob
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
old_state = self.sim.get_state()
new_state = mujoco_py.MjSimState(
old_state.time, qpos, qvel, old_state.act, old_state.udd_state
)
self.sim.set_state(new_state)
self.sim.forward()
@property
def dt(self):
return self.model.opt.timestep * self.frame_skip
def do_simulation(self, ctrl, n_frames):
self.sim.data.ctrl[:] = ctrl
for _ in range(n_frames):
self.sim.step()
def render(
self,
mode="human",
width=DEFAULT_SIZE,
height=DEFAULT_SIZE,
camera_id=None,
camera_name=None,
):
if mode == "rgb_array" or mode == "depth_array":
if camera_id is not None and camera_name is not None:
raise ValueError(
"Both `camera_id` and `camera_name` cannot be"
" specified at the same time."
)
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = "track"
if camera_id is None and camera_name in self.model._camera_name2id:
camera_id = self.model.camera_name2id(camera_name)
self._get_viewer(mode).render(width, height, camera_id=camera_id)
if mode == "rgb_array":
# window size used for old mujoco-py:
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == "depth_array":
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]
# original image is upside-down, so flip it
return data[::-1, :]
elif mode == "human":
self._get_viewer(mode).render()
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
self._viewers = {}
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == "human":
self.viewer = mujoco_py.MjViewer(self.sim)
elif mode == "rgb_array" or mode == "depth_array":
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def get_body_com(self, body_name):
return self.data.get_body_xpos(body_name)
def state_vector(self):
return np.concatenate([self.sim.data.qpos.flat, self.sim.data.qvel.flat])
| 6,358 | Python | .py | 159 | 30.528302 | 144 | 0.592827 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,281 | striker.py | WindyLab_Gym-PPS/gym/envs/mujoco/striker.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class StrikerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self._striked = False
self._min_strike_dist = np.inf
self.strike_threshold = 0.1
mujoco_env.MujocoEnv.__init__(self, "striker.xml", 5)
def step(self, a):
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
self._min_strike_dist = min(self._min_strike_dist, np.linalg.norm(vec_2))
if np.linalg.norm(vec_1) < self.strike_threshold:
self._striked = True
self._strike_pos = self.get_body_com("tips_arm")
if self._striked:
vec_3 = self.get_body_com("object") - self._strike_pos
reward_near = -np.linalg.norm(vec_3)
else:
reward_near = -np.linalg.norm(vec_1)
reward_dist = -np.linalg.norm(self._min_strike_dist)
reward_ctrl = -np.square(a).sum()
reward = 3 * reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 4.0
def reset_model(self):
self._min_strike_dist = np.inf
self._striked = False
self._strike_pos = None
qpos = self.init_qpos
self.ball = np.array([0.5, -0.175])
while True:
self.goal = np.concatenate(
[
self.np_random.uniform(low=0.15, high=0.7, size=1),
self.np_random.uniform(low=0.1, high=1.0, size=1),
]
)
if np.linalg.norm(self.ball - self.goal) > 0.17:
break
qpos[-9:-7] = [self.ball[1], self.ball[0]]
qpos[-7:-5] = self.goal
diff = self.ball - self.goal
angle = -np.arctan(diff[0] / (diff[1] + 1e-8))
qpos[-1] = angle / 3.14
qvel = self.init_qvel + self.np_random.uniform(
low=-0.1, high=0.1, size=self.model.nv
)
qvel[7:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat[:7],
self.sim.data.qvel.flat[:7],
self.get_body_com("tips_arm"),
self.get_body_com("object"),
self.get_body_com("goal"),
]
)
| 2,723 | Python | .py | 68 | 29.676471 | 87 | 0.546177 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,282 | reacher.py | WindyLab_Gym-PPS/gym/envs/mujoco/reacher.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class ReacherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, "reacher.xml", 2)
def step(self, a):
vec = self.get_body_com("fingertip") - self.get_body_com("target")
reward_dist = -np.linalg.norm(vec)
reward_ctrl = -np.square(a).sum()
reward = reward_dist + reward_ctrl
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
def reset_model(self):
qpos = (
self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq)
+ self.init_qpos
)
while True:
self.goal = self.np_random.uniform(low=-0.2, high=0.2, size=2)
if np.linalg.norm(self.goal) < 0.2:
break
qpos[-2:] = self.goal
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv
)
qvel[-2:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:2]
return np.concatenate(
[
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat[:2],
self.get_body_com("fingertip") - self.get_body_com("target"),
]
)
| 1,674 | Python | .py | 45 | 27.533333 | 87 | 0.558226 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,283 | half_cheetah.py | WindyLab_Gym-PPS/gym/envs/mujoco/half_cheetah.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "half_cheetah.xml", 5)
utils.EzPickle.__init__(self)
def step(self, action):
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
ob = self._get_obs()
reward_ctrl = -0.1 * np.square(action).sum()
reward_run = (xposafter - xposbefore) / self.dt
reward = reward_ctrl + reward_run
done = False
return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
]
)
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
low=-0.1, high=0.1, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
| 1,282 | Python | .py | 33 | 30.393939 | 85 | 0.604183 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,284 | inverted_double_pendulum.py | WindyLab_Gym-PPS/gym/envs/mujoco/inverted_double_pendulum.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class InvertedDoublePendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "inverted_double_pendulum.xml", 5)
utils.EzPickle.__init__(self)
def step(self, action):
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
x, _, y = self.sim.data.site_xpos[0]
dist_penalty = 0.01 * x ** 2 + (y - 2) ** 2
v1, v2 = self.sim.data.qvel[1:3]
vel_penalty = 1e-3 * v1 ** 2 + 5e-3 * v2 ** 2
alive_bonus = 10
r = alive_bonus - dist_penalty - vel_penalty
done = bool(y <= 1)
return ob, r, done, {}
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos[:1], # cart x pos
np.sin(self.sim.data.qpos[1:]), # link angles
np.cos(self.sim.data.qpos[1:]),
np.clip(self.sim.data.qvel, -10, 10),
np.clip(self.sim.data.qfrc_constraint, -10, 10),
]
).ravel()
def reset_model(self):
self.set_state(
self.init_qpos
+ self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq),
self.init_qvel + self.np_random.randn(self.model.nv) * 0.1,
)
return self._get_obs()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent * 0.5
v.cam.lookat[2] = 0.12250000000000005 # v.model.stat.center[2]
| 1,598 | Python | .py | 40 | 30.6 | 78 | 0.55799 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,285 | ant_v3.py | WindyLab_Gym-PPS/gym/envs/mujoco/ant_v3.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
DEFAULT_CAMERA_CONFIG = {
"distance": 4.0,
}
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
xml_file="ant.xml",
ctrl_cost_weight=0.5,
contact_cost_weight=5e-4,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.2, 1.0),
contact_force_range=(-1.0, 1.0),
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
):
utils.EzPickle.__init__(**locals())
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._contact_force_range = contact_force_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
@property
def contact_cost(self):
contact_cost = self._contact_cost_weight * np.sum(
np.square(self.contact_forces)
)
return contact_cost
@property
def is_healthy(self):
state = self.state_vector()
min_z, max_z = self._healthy_z_range
is_healthy = np.isfinite(state).all() and min_z <= state[2] <= max_z
return is_healthy
@property
def done(self):
done = not self.is_healthy if self._terminate_when_unhealthy else False
return done
def step(self, action):
xy_position_before = self.get_body_com("torso")[:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.get_body_com("torso")[:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
ctrl_cost = self.control_cost(action)
contact_cost = self.contact_cost
forward_reward = x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost + contact_cost
reward = rewards - costs
done = self.done
observation = self._get_obs()
info = {
"reward_forward": forward_reward,
"reward_ctrl": -ctrl_cost,
"reward_contact": -contact_cost,
"reward_survive": healthy_reward,
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
"forward_reward": forward_reward,
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
contact_force = self.contact_forces.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observations = np.concatenate((position, velocity, contact_force))
return observations
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| 4,621 | Python | .py | 116 | 30.801724 | 79 | 0.616588 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,286 | pusher.py | WindyLab_Gym-PPS/gym/envs/mujoco/pusher.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import mujoco_py
class PusherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, "pusher.xml", 5)
def step(self, a):
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
reward_near = -np.linalg.norm(vec_1)
reward_dist = -np.linalg.norm(vec_2)
reward_ctrl = -np.square(a).sum()
reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.init_qpos
self.goal_pos = np.asarray([0, 0])
while True:
self.cylinder_pos = np.concatenate(
[
self.np_random.uniform(low=-0.3, high=0, size=1),
self.np_random.uniform(low=-0.2, high=0.2, size=1),
]
)
if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17:
break
qpos[-4:-2] = self.cylinder_pos
qpos[-2:] = self.goal_pos
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv
)
qvel[-4:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat[:7],
self.sim.data.qvel.flat[:7],
self.get_body_com("tips_arm"),
self.get_body_com("object"),
self.get_body_com("goal"),
]
)
| 2,007 | Python | .py | 52 | 28.384615 | 87 | 0.549383 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,287 | humanoid.py | WindyLab_Gym-PPS/gym/envs/mujoco/humanoid.py | import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
class HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "humanoid.xml", 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate(
[
data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
]
)
def step(self, a):
pos_before = mass_center(self.model, self.sim)
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
qpos = self.sim.data.qpos
done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))
return (
self._get_obs(),
reward,
done,
dict(
reward_linvel=lin_vel_cost,
reward_quadctrl=-quad_ctrl_cost,
reward_alive=alive_bonus,
reward_impact=-quad_impact_cost,
),
)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(
low=-c,
high=c,
size=self.model.nv,
),
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 2.0
self.viewer.cam.elevation = -20
| 2,290 | Python | .py | 64 | 25.46875 | 88 | 0.54193 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,288 | hopper.py | WindyLab_Gym-PPS/gym/envs/mujoco/hopper.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "hopper.xml", 4)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = (posafter - posbefore) / self.dt
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
s = self.state_vector()
done = not (
np.isfinite(s).all()
and (np.abs(s[2:]) < 100).all()
and (height > 0.7)
and (abs(ang) < 0.2)
)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
return np.concatenate(
[self.sim.data.qpos.flat[1:], np.clip(self.sim.data.qvel.flat, -10, 10)]
)
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv
)
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.75
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = -20
| 1,550 | Python | .py | 42 | 28.333333 | 84 | 0.571904 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,289 | swimmer_v3.py | WindyLab_Gym-PPS/gym/envs/mujoco/swimmer_v3.py | import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
DEFAULT_CAMERA_CONFIG = {}
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
xml_file="swimmer.xml",
forward_reward_weight=1.0,
ctrl_cost_weight=1e-4,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
mujoco_env.MujocoEnv.__init__(self, xml_file, 4)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
xy_position_before = self.sim.data.qpos[0:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.sim.data.qpos[0:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
forward_reward = self._forward_reward_weight * x_velocity
ctrl_cost = self.control_cost(action)
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
info = {
"reward_fwd": forward_reward,
"reward_ctrl": -ctrl_cost,
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
"forward_reward": forward_reward,
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observation = np.concatenate([position, velocity]).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| 2,970 | Python | .py | 71 | 32.450704 | 77 | 0.616481 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,290 | __init__.py | WindyLab_Gym-PPS/gym/envs/mujoco/__init__.py | from gym.envs.mujoco.mujoco_env import MujocoEnv
# ^^^^^ so that user gets the correct error
# message if mujoco is not installed correctly
from gym.envs.mujoco.ant import AntEnv
from gym.envs.mujoco.half_cheetah import HalfCheetahEnv
from gym.envs.mujoco.hopper import HopperEnv
from gym.envs.mujoco.walker2d import Walker2dEnv
from gym.envs.mujoco.humanoid import HumanoidEnv
from gym.envs.mujoco.inverted_pendulum import InvertedPendulumEnv
from gym.envs.mujoco.inverted_double_pendulum import InvertedDoublePendulumEnv
from gym.envs.mujoco.reacher import ReacherEnv
from gym.envs.mujoco.swimmer import SwimmerEnv
from gym.envs.mujoco.humanoidstandup import HumanoidStandupEnv
from gym.envs.mujoco.pusher import PusherEnv
from gym.envs.mujoco.thrower import ThrowerEnv
from gym.envs.mujoco.striker import StrikerEnv
| 820 | Python | .py | 16 | 50.1875 | 78 | 0.861768 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,291 | walker2d.py | WindyLab_Gym-PPS/gym/envs/mujoco/walker2d.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "walker2d.xml", 4)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = (posafter - posbefore) / self.dt
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (height > 0.8 and height < 2.0 and ang > -1.0 and ang < 1.0)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()
def reset_model(self):
self.set_state(
self.init_qpos
+ self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(low=-0.005, high=0.005, size=self.model.nv),
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = -20
| 1,420 | Python | .py | 35 | 32.428571 | 81 | 0.598985 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,292 | half_cheetah_v3.py | WindyLab_Gym-PPS/gym/envs/mujoco/half_cheetah_v3.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
DEFAULT_CAMERA_CONFIG = {
"distance": 4.0,
}
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(
self,
xml_file="half_cheetah.xml",
forward_reward_weight=1.0,
ctrl_cost_weight=0.1,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
info = {
"x_position": x_position_after,
"x_velocity": x_velocity,
"reward_run": forward_reward,
"reward_ctrl": -ctrl_cost,
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn(
self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
| 2,705 | Python | .py | 68 | 30.794118 | 79 | 0.61821 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,293 | inverted_pendulum.py | WindyLab_Gym-PPS/gym/envs/mujoco/inverted_pendulum.py | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class InvertedPendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, "inverted_pendulum.xml", 2)
def step(self, a):
reward = 1.0
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= 0.2)
done = not notdone
return ob, reward, done, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-0.01, high=0.01
)
qvel = self.init_qvel + self.np_random.uniform(
size=self.model.nv, low=-0.01, high=0.01
)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent
| 1,100 | Python | .py | 29 | 30.241379 | 79 | 0.60939 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,294 | pps.py | WindyLab_Gym-PPS/gym/envs/pps/pps.py | __credits__ = ["[email protected]"]
import gym
from gym import error, spaces, utils
from .putils import *
from gym.utils import *
import numpy as np
import torch
import random
class PredatorPreySwarmEnv(PredatorPreySwarmEnvProp):
"""
Description:
Multiple predators and prey interact with each other. If predators catch prey,
predators receive positive rewards, while prey receive negative rewards.
Source:
This environment appeared first in the paper Li J, Li L, Zhao S.
"Predator–prey survival pressure is sufficient to evolve swarming behaviors",
New Journal of Physics, vol. 25, no. 9, pp. 092001, 2023.
Observation:
Type: Box(...)
If in Cartesian mode:
[ agent's own pos., vel.,
relative pos. of observed pursuers,
relative pos. of observed escapers ]
If in Polar mode:
[ agent's own pos., vel., heading,
relative pos. and headings of observed pursuers,
relative pos. and headings of observed escapers ]
Observation model is dependent on both metric and topological distance.
Metric distance: an agent can only perceive others in its perception range which is assumed to be a disk with a pre-defined radius.
Topological distance: how many at most an agent can perceive concurrently rather than how far away.
Actions:
Type: Box(2)
If the dynamics mode for agents is Cartesian, then
Num Action
0 acceleration in x-axis
1 acceleration in y-axis
If the dynamics mode for agents is Polar, then
Num Action
0 angular velocity (or rotation angle in the given time step)
1 acceleration in heading direction
Note: The min and max values for the action values can be adjusted, but we strongly
advise against doing so, as this adjustment is closely tied to the update time step.
Incorrectly setting these values may result in a violation of physical laws and the
environment dynamics may behave weirdly.
Reward:
The core reward is as follows: when a predator catches its prey, the predator receives
a reward of +1, while the prey receives a reward of -1. For details on the other
auxiliary rewards, please refer to the reward function.
Starting State:
All observations are assigned a uniform random value.
"""
param_list = params
def __init__(self, n_p=3, n_e=10):
self._n_p = n_p
self._n_e = n_e
self._n_o = 0
self.viewer = None
self.seed()
def __reinit__(self):
self._n_pe = self._n_p + self._n_e
self._n_peo = self._n_p + self._n_e + self._n_o
self.observation_space = self._get_observation_space()
self.action_space = self._get_action_space()
self._m = get_mass(self._m_p, self._m_e, self._m_o, self._n_p, self._n_e, self._n_o)
self._size, self._sizes = get_sizes(self._size_p, self._size_e, self._size_o, self._n_p, self._n_e, self._n_o)
if self._billiards_mode:
self._c_wall = 0.2
self._c_aero = 0.02
if self._dynamics_mode == 'Cartesian':
self._linAcc_p_min = -1
self._linAcc_e_min = -1
if self._linAcc_p_max != 1 or self._linAcc_e_max != 1:
raise ValueError('Currently in Cartesian mode, linAcc_p_max and linAcc_e_max have to be 1')
assert (self._linAcc_p_min, self._linAcc_e_min, self._linAcc_p_max, self._linAcc_e_max) == (-1, -1, 1, 1)
elif self._dynamics_mode == 'Polar':
self._linAcc_p_min = 0
self._linAcc_e_min = 0
# Energy
if self._dynamics_mode == 'Cartesian':
self.max_energy_p = 1000.
self.max_energy_e = 1000.
elif self._dynamics_mode == 'Polar':
self.max_energy_p = 1000.
self.max_energy_e = 1000.
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
max_size = np.max(self._size)
max_respawn_times = 100
for respawn_time in range (max_respawn_times):
self._p = np.random.uniform(-1+max_size, 1-max_size, (2, self._n_peo)) # Initialize self._p
if self._obstacles_is_constant:
self._p[:, self._n_pe:self._n_peo] = self._p_o
self._d_b2b_center, _, _is_collide_b2b = get_dist_b2b(self._p, self._L, self._is_periodic, self._sizes)
if _is_collide_b2b.sum() == 0:
break
if respawn_time == max_respawn_times-1:
print('Some particles are overlapped at the initial time !')
if self._render_traj == True:
self._p_traj = np.zeros((self._traj_len, 2, self._n_peo))
self._p_traj[0,:,:] = self._p
self._dp = np.zeros((2, self._n_peo))
if self._billiards_mode:
self._dp = np.random.uniform(-1,1,(2,self._n_peo)) # ice mode
if self._dynamics_mode == 'Polar':
raise ValueError("Billiards_mode requires dynamics_mode be 'Cartesian' !")
if self._obstacles_cannot_move:
self._dp[:, self._n_pe:self._n_peo] = 0
self._ddp = np.zeros((2, self._n_peo))
self._energy = np.array([self.max_energy_p for _ in range(self._n_p)] + [self.max_energy_e for _ in range(self.n_e)]).reshape(1, self._n_pe)
if self._dynamics_mode == 'Polar':
self._theta = np.pi * np.random.uniform(-1,1, (1, self._n_peo))
# self._theta = np.pi * np.zeros((1, self._n_peo))
self._heading = np.concatenate((np.cos(self._theta), np.sin(self._theta)), axis=0)
return self._get_obs()
def _get_obs(self):
self.obs = np.zeros(self.observation_space.shape)
for i in range(self._n_p):
''' For pursuers
If in Cartesian mode:
[ agent's own pos., vel.,
relative pos. of observed pursuers,
relative pos. of observed escapers ]
If in Polar mode:
[ agent's own pos., vel., heading,
relative pos. and headings of observed pursuers,
relative pos. and headings of observed escapers ]
Observation model is dependent on both metric and topological distance.
Metric distance means: an agent can only perceive others in its perception range which is assumed to be a disk with a pre-defined radius.
Topological distancemeans: how many at most an agent can perceive concurrently rather than how far away.
'''
relPos_p2p = self._p[:, :self._n_p] - self._p[:,[i]]
if self._is_periodic: relPos_p2p = make_periodic( relPos_p2p, self._L )
relVel_p2p = self._dp[:,:self._n_p] - self._dp[:,[i]] if self._dynamics_mode == 'Cartesian' else self._heading[:, :self._n_p] - self._heading[:, [i]]
relPos_p2p, relVel_p2p = get_focused(relPos_p2p, relVel_p2p, self._FoV_p, self._topo_n_p2p, True)
relPos_p2e = self._p[:, self._n_p:self._n_pe] - self._p[:,[i]]
if self._is_periodic: relPos_p2e = make_periodic( relPos_p2e, self._L )
relVel_p2e = self._dp[:,self._n_p:self._n_pe] - self._dp[:,[i]] if self._dynamics_mode == 'Cartesian' else self._heading[:,self._n_p:self._n_pe] - self._heading[:,[i]]
relPos_p2e, relVel_p2e = get_focused(relPos_p2e, relVel_p2e, self._FoV_p, self._topo_n_p2e, False)
obs_pursuer_pos = np.concatenate((self._p[:, [i]], relPos_p2p, relPos_p2e), axis=1)
obs_pursuer_vel = np.concatenate((self._dp[:, [i]], relVel_p2p, relVel_p2e), axis=1)
obs_pursuer = np.concatenate((obs_pursuer_pos, obs_pursuer_vel), axis=0) # (4, n_peo+1) FIXME: only suitable for no obstacles
if self._dynamics_mode == 'Cartesian':
self.obs[:self.obs_dim_pursuer-1, i] = obs_pursuer.T.reshape(-1)
self.obs[self.obs_dim_pursuer-1, i] = 2/self.max_energy_p * self._energy[[0],i] - 1
elif self._dynamics_mode == 'Polar':
self.obs[:self.obs_dim_pursuer-3, i] = obs_pursuer.T.reshape(-1)
self.obs[self.obs_dim_pursuer-3, i] = 2/self.max_energy_p * self._energy[[0],i] - 1
self.obs[self.obs_dim_pursuer-2:self.obs_dim_pursuer, i] = self._heading[:,i]
for i in range(self._n_p, self._n_pe):
''' For prey
Same with predators'
'''
relPos_e2p = self._p[:, :self._n_p] - self._p[:,[i]]
if self._is_periodic: relPos_e2p = make_periodic( relPos_e2p, self._L )
relVel_e2p = self._dp[:, :self._n_p] - self._dp[:,[i]] if self._dynamics_mode == 'Cartesian' else self._heading[:, :self._n_p] - self._heading[:,[i]]
relPos_e2p, relVel_e2p = get_focused(relPos_e2p, relVel_e2p, self._FoV_e, self._topo_n_e2p, False)
relPos_e2e = self._p[:, self._n_p:self._n_pe] - self._p[:,[i]]
if self._is_periodic: relPos_e2e = make_periodic( relPos_e2e, self._L )
relVel_e2e = self._dp[:, self._n_p:self._n_pe] - self._dp[:,[i]] if self._dynamics_mode == 'Cartesian' else self._heading[:, self._n_p:self._n_pe] - self._heading[:,[i]]
relPos_e2e, relVel_e2e = get_focused(relPos_e2e, relVel_e2e, self._FoV_e, self._topo_n_e2e, True)
obs_escaper_pos = np.concatenate((self._p[:, [i]], relPos_e2p, relPos_e2e), axis=1)
obs_escaper_vel = np.concatenate((self._dp[:, [i]], relVel_e2p, relVel_e2e), axis=1)
obs_escaper = np.concatenate((obs_escaper_pos, obs_escaper_vel), axis=0)
if self._dynamics_mode == 'Cartesian':
self.obs[:self.obs_dim_escaper-1, i] = obs_escaper.T.reshape(-1)
self.obs[self.obs_dim_escaper-1,i] = 2/self.max_energy_e * self._energy[[0],i] - 1
elif self._dynamics_mode == 'Polar':
self.obs[:self.obs_dim_escaper-3, i] = obs_escaper.T.reshape(-1)
self.obs[self.obs_dim_escaper-3, i] = 2/self.max_energy_e * self._energy[[0],i] - 1
self.obs[self.obs_dim_escaper-2:self.obs_dim_escaper, i] = self._heading[:,i]
return self.obs
def _get_reward(self, a):
reward_p = 5.0 * self._is_collide_b2b[self._n_p:self._n_pe, :self._n_p].sum(axis=0, keepdims=True).astype(float)
reward_e = - 5.0 * self._is_collide_b2b[self._n_p:self._n_pe, :self._n_p].sum(axis=1, keepdims=True).astype(float).reshape(1,self.n_e)
if self._penalize_distance:
reward_p += - self._d_b2b_center[self._n_p:self._n_pe, :self._n_p].sum(axis=0, keepdims=True)
reward_e += self._d_b2b_center[self._n_p:self._n_pe, :self._n_p].sum(axis=1, keepdims=True).reshape(1,self.n_e)
if self._penalize_control_effort:
if self._dynamics_mode == 'Cartesian':
reward_p -= 1*np.sqrt( a[[0],:self._n_p]**2 + a[[1],:self._n_p]**2 )
reward_e -= 1*np.sqrt( a[[0], self._n_p:self._n_pe]**2 + a[[1], self._n_p:self._n_pe]**2 )
elif self._dynamics_mode == 'Polar':
reward_p -= 1 * np.abs( a[[0], :self._n_p] ) + 0 * np.abs( a[[1], :self._n_p] )
reward_e -= 1 * np.abs( a[[0], self._n_p:self._n_pe]) + 0 * np.abs( a[[1], self._n_p:self._n_pe])
if self._penalize_collide_agents:
reward_p -= self._is_collide_b2b[:self._n_p, :self._n_p].sum(axis=0, keepdims=True)
reward_e -= self._is_collide_b2b[self._n_p:self._n_pe, self._n_p:self._n_pe].sum(axis=0, keepdims=True)
if self._penalize_collide_obstacles:
reward_p -= 5 * self._is_collide_b2b[self._n_pe:self._n_peo, 0:self._n_p].sum(axis=0, keepdims=True)
reward_e -= 5 * self._is_collide_b2b[self._n_pe:self._n_peo, self._n_p:self._n_pe].sum(axis=0, keepdims=True)
if self._penalize_collide_walls and self._is_periodic == False:
reward_p -= 1 * self.is_collide_b2w[:, :self._n_p].sum(axis=0, keepdims=True)
reward_e -= 1 * self.is_collide_b2w[:, self._n_p:self._n_pe].sum(axis=0, keepdims=True)
if self._reward_sharing_mode == 'sharing_mean':
reward_p[:] = np.mean(reward_p)
reward_e[:] = np.mean(reward_e)
elif self._reward_sharing_mode == 'sharing_max':
reward_p[:] = np.max(reward_p)
reward_e[:] = np.max(reward_e)
elif self._reward_sharing_mode == 'individual':
pass
else:
print('reward mode error !!')
reward = np.concatenate((reward_p, reward_e), axis=1)
return reward
def _get_done(self):
all_done = np.zeros( (1, self._n_pe) ).astype(bool)
return all_done
# return False
def _get_info(self):
dist_matrix = self._d_b2b_center[self._n_p:self._n_pe, self._n_p:self._n_pe]
dist_matrix += 10 * np.identity(self.n_e)
ave_min_dist = np.mean( np.min(dist_matrix, axis=0) )
DoC = 1/ave_min_dist
nearest_idx = np.argmin(dist_matrix, axis=0)
if self._dynamics_mode == 'Cartesian':
nearest_headings = self._dp[:, self._n_p:self._n_pe][:, nearest_idx]
elif self._dynamics_mode == 'Polar':
nearest_headings = self._heading[:, self._n_p:self._n_pe][:, nearest_idx]
# alignments = self._heading[:, self._n_p:self._n_pe] + nearest_headings
# DoA = np.mean( np.sqrt( alignments[0,:]**2 + alignments[1,:]**2 ) )
# TODO
# assert self.n_e >= 2
# ave_dist = self._d_b2b_center[self._n_p:self._n_pe, self._n_p:self._n_pe].sum() / self.n_e / (self.n_e-1)
# DoC_gloal = 1/ave_dist
DoC_gloal = 0
# return np.array( [DoC, DoA, DoC_gloal] ).reshape(3,1)
return np.array( [None, None, None] ).reshape(3,1)
def step(self, a):
for _ in range(self._n_frames):
if self._dynamics_mode == 'Polar':
a[0, :self._n_p] *= self._angle_p_max
a[0, self._n_p:self._n_pe] *= self._angle_e_max
a[1, :self._n_p] = (self._linAcc_p_max-self._linAcc_p_min)/2 * a[1,:self._n_p] + (self._linAcc_p_max+self._linAcc_p_min)/2
a[1, self._n_p:self._n_pe] = (self._linAcc_e_max-self._linAcc_e_min)/2 * a[1,self._n_p:self._n_pe] + (self._linAcc_e_max+self._linAcc_e_min)/2
self._d_b2b_center, self.d_b2b_edge, self._is_collide_b2b = get_dist_b2b(self._p, self._L, self._is_periodic, self._sizes)
sf_b2b_all = np.zeros((2*self._n_peo, self._n_peo))
for i in range(self._n_peo):
for j in range(i):
delta = self._p[:,j]-self._p[:,i]
if self._is_periodic:
delta = make_periodic(delta, self._L)
dir = delta / self._d_b2b_center[i,j]
sf_b2b_all[2*i:2*(i+1),j] = self._is_collide_b2b[i,j] * self.d_b2b_edge[i,j] * self._k_ball * (-dir)
sf_b2b_all[2*j:2*(j+1),i] = - sf_b2b_all[2*i:2*(i+1),j]
sf_b2b = np.sum(sf_b2b_all, axis=1, keepdims=True).reshape(self._n_peo,2).T
if self._is_periodic == False:
self.d_b2w, self.is_collide_b2w = get_dist_b2w(self._p, self._size, self._L)
sf_b2w = np.array([[1, 0, -1, 0], [0, -1, 0, 1]]).dot(self.is_collide_b2w * self.d_b2w) * self._k_wall
df_b2w = np.array([[-1, 0, -1, 0], [0, -1, 0, -1]]).dot(self.is_collide_b2w*np.concatenate((self._dp, self._dp), axis=0)) * self._c_wall
if self.pursuer_strategy == 'input':
pass
elif self.pursuer_strategy == 'static':
a[:,:self._n_p] = np.zeros((self._act_dim_pursuer, self._n_p))
elif self.pursuer_strategy == 'random':
a[:,:self._n_p] = np.random.uniform(-1,1, (self._act_dim_pursuer, self._n_p))
if self._dynamics_mode == 'Polar':
a[0, :self._n_p] *= self._angle_p_max
a[1, :self._n_p] = (self._linAcc_p_max-self._linAcc_p_min)/2 * a[1,:self._n_p] + (self._linAcc_p_max+self._linAcc_p_min)/2
elif self.pursuer_strategy == 'nearest':
ind_nearest = np.argmin( self._d_b2b_center[:self._n_p, self._n_p:self._n_pe], axis=1)
goto_pos = self._p[:, self._n_p+ind_nearest] - self._p[:,:self._n_p]
if self._is_periodic == True:
goto_pos = make_periodic( goto_pos, self._L )
ranges = np.sqrt( goto_pos[[0],:]**2 + goto_pos[[1],:]**2 )
goto_dir = goto_pos / ranges
if self._dynamics_mode == 'Cartesian':
a[:,:self._n_p] = 1 * goto_dir
elif self._dynamics_mode == 'Polar':
goto_dir = np.concatenate( (goto_dir, np.zeros((1,self._n_p))), axis=0 ).T
heading = np.concatenate( (self._heading[:,:self._n_p], np.zeros((1, self._n_p))), axis=0 ).T
desired_rotate_angle = np.cross(heading, goto_dir)[:,-1]
desired_rotate_angle[desired_rotate_angle>self._angle_p_max] = self._angle_p_max
desired_rotate_angle[desired_rotate_angle<-self._angle_p_max] = -self._angle_p_max
a[0, :self._n_p] = desired_rotate_angle
a[1, :self._n_p] = self._linAcc_p_max
else:
print('Wrong in Step function')
if self.escaper_strategy == 'input':
pass
elif self.escaper_strategy == 'static':
a[:,self._n_p:self._n_pe] = np.zeros((self._act_dim_escaper, self.n_e))
elif self.escaper_strategy == 'random':
a[:,self._n_p:self._n_pe] = np.random.uniform(-1,1, (self._act_dim_escaper, self.n_e))
if self._dynamics_mode == 'Polar':
a[0, self._n_p:self._n_pe] *= self._angle_e_max
a[1, self._n_p:self._n_pe] = (self._linAcc_e_max-self._linAcc_e_min)/2 * a[1,self._n_p:self._n_pe] + (self._linAcc_e_max+self._linAcc_e_min)/2
elif self.escaper_strategy == 'nearest':
ind_nearest = np.argmin( self._d_b2b_center[self._n_p:self._n_pe, :self._n_p], axis=1)
goto_pos = - self._p[:, ind_nearest] + self._p[:, self._n_p:self._n_pe]
if self._is_periodic == True:
goto_pos = make_periodic( goto_pos, self._L )
ranges = np.sqrt( goto_pos[[0],:]**2 + goto_pos[[1],:]**2 )
goto_dir = goto_pos / ranges
if self._dynamics_mode == 'Cartesian':
a[:, self._n_p:self._n_pe] = 1 * goto_dir
elif self._dynamics_mode == 'Polar':
goto_dir = np.concatenate( (goto_dir, np.zeros((1,self.n_e))), axis=0 ).T
heading = np.concatenate( (self._heading[:,self._n_p:self._n_pe], np.zeros((1, self.n_e))), axis=0 ).T
desired_rotate_angle = np.cross(heading, goto_dir)[:,-1]
desired_rotate_angle[desired_rotate_angle>self._angle_e_max] = self._angle_e_max
desired_rotate_angle[desired_rotate_angle<-self._angle_e_max] = -self._angle_e_max
a[0, self._n_p:self._n_pe] = desired_rotate_angle
a[1, self._n_p:self._n_pe] = self._linAcc_e_max
else:
print('Wrong in Step function')
if self._dynamics_mode == 'Cartesian':
u = a
elif self._dynamics_mode == 'Polar':
self._theta += a[[0],:]
self._theta = normalize_angle(self._theta)
self._heading = np.concatenate((np.cos(self._theta), np.sin(self._theta)), axis=0)
u = a[[1], :] * self._heading
else:
print('Wrong in updating dynamics')
if self._is_periodic == True:
F = self._sensitivity * u + sf_b2b - self._c_aero*self._dp
# F = self._sensitivity * u + sf_b2b + df_b2b - self._c_aero*dp
elif self._is_periodic == False:
F = self._sensitivity * u + sf_b2b - self._c_aero*self._dp + sf_b2w + df_b2w
else:
print('Wrong in considering walls !!!')
self._ddp = F/self._m
self._dp += self._ddp * self._dt
if self._obstacles_cannot_move:
self._dp[:, self._n_pe:self._n_peo] = 0
self._dp[:,:self._n_p] = np.clip(self._dp[:,:self._n_p], -self._linVel_p_max, self._linVel_p_max)
self._dp[:,self._n_p:self._n_pe] = np.clip(self._dp[:,self._n_p:self._n_pe], -self._linVel_e_max, self._linVel_e_max)
energy = np.tile(self._energy, (2,1))
self._dp[:,:self._n_pe][energy<0.5] = 0
speeds = np.sqrt( self._dp[[0],:self._n_pe]**2 + self._dp[[1],:self._n_pe]**2 )
self._energy -= speeds
self._energy[speeds<0.01] += 0.1
self._energy[0,:self._n_p][self._energy[0,:self._n_p]>self.max_energy_p] = self.max_energy_p
self._energy[0,self._n_p:][self._energy[0,self._n_p:]>self.max_energy_e] = self.max_energy_e
self._p += self._dp * self._dt
if self._obstacles_is_constant:
self._p[:, self._n_pe:self._n_peo] = self._p_o
if self._is_periodic:
self._p = make_periodic(self._p, self._L)
if self._render_traj == True:
self._p_traj = np.concatenate( (self._p_traj[1:,:,:], self._p.reshape(1, 2, self._n_peo)), axis=0 )
return self._get_obs(), self._get_reward(a), self._get_done(), self._get_info()
# TODO: obstacle or shelter
# ============== ================= =====================
def render(self, mode="human"):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-1, 1, -1, 1.)
agents = []
self.tf = []
if self._render_traj: self.trajrender = []
for i in range(self._n_pe):
if self._render_traj: self.trajrender.append( rendering.Traj( list(zip(self._p_traj[:,0,i], self._p_traj[:,1,i])), False) )
if i < self._n_p:
if self._dynamics_mode == 'Polar':
agents.append( rendering.make_unicycle(self._size_p) )
elif self._dynamics_mode == 'Cartesian':
agents.append( rendering.make_circle(self._size_p) )
agents[i].set_color_alpha(1, 0.5, 0, 1)
if self._render_traj: self.trajrender[i].set_color_alpha(1, 0.5, 0, 0.5)
elif (i >=self._n_p) and (i<self._n_pe):
if self._dynamics_mode == 'Polar':
agents.append( rendering.make_unicycle(self._size_e) )
elif self._dynamics_mode == 'Cartesian':
agents.append( rendering.make_circle(self._size_e) )
agents[i].set_color_alpha(0, 0.333, 0.778, 1)
if self._render_traj: self.trajrender[i].set_color_alpha(0, 0.333, 0.778, 0.5)
self.tf.append( rendering.Transform() )
agents[i].add_attr(self.tf[i])
self.viewer.add_geom(agents[i])
if self._render_traj: self.viewer.add_geom(self.trajrender[i])
for i in range(self._n_pe):
if self._dynamics_mode == 'Polar':
self.tf[i].set_rotation(self._theta[0,i])
elif self._dynamics_mode == 'Cartesian':
pass
self.tf[i].set_translation(self._p[0,i], self._p[1,i])
if self._render_traj: self.trajrender[i].set_traj(list(zip(self._p_traj[:,0,i], self._p_traj[:,1,i])))
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def _get_observation_space(self):
self._topo_n_p = self._topo_n_p2p + self._topo_n_p2e
self._topo_n_e = self._topo_n_e2p + self._topo_n_e2e
self.obs_dim_pursuer = ( 2 + 2*self._topo_n_p ) * 2 + 1
self.obs_dim_escaper = ( 2 + 2*self._topo_n_e ) * 2 + 1
if self._dynamics_mode == 'Polar':
self.obs_dim_pursuer += 2
self.obs_dim_escaper += 2
obs_dim_max = np.max([self.obs_dim_pursuer, self.obs_dim_escaper])
observation_space = spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim_max, self._n_pe), dtype=np.float32)
return observation_space
def _get_action_space(self):
_act_dim_max = np.max([self._act_dim_pursuer, self._act_dim_escaper])
action_space = spaces.Box(low=-1, high=1, shape=(_act_dim_max, self._n_pe), dtype=np.float32)
return action_space
if __name__ == '__main__':
env = PredatorPreySwarmEnv()
Pos = np.array([ [1, 2, 3, 0, 1],
[2, 3, 4, 2, 2.3] ])
Vel = np.array([ [1, 2, 3, 4, 5],
[1, 2, 3, 4, 5] ])
print(Pos)
print(Vel)
threshold = 5
desired_n = 2
get_focused(Pos, Vel, threshold, desired_n, False)
| 25,915 | Python | .py | 413 | 48.779661 | 181 | 0.541777 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,295 | param.py | WindyLab_Gym-PPS/gym/envs/pps/putils/param.py | import gym
class PredatorPreySwarmEnvParam(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
# Agent numbers
_n_p = 3
_n_e = 10
_n_o = 0
# Environment
_is_periodic = True
# Control Strategy
_pursuer_strategy = 'input'
_escaper_strategy = 'input'
_billiards_mode = False
# Reward
_reward_sharing_mode = 'individual'
_penalize_control_effort = True
_penalize_collide_walls = False
_penalize_distance = False
_penalize_collide_agents = False
_penalize_collide_obstacles = False
# Metric distance for observation
_FoV_p = 5 # for pursuers
_FoV_e = 5 # for escapers
# Topological distance for observation
_topo_n_p2e = 5 # pursuer to escaper
_topo_n_e2p = 2 # escaper to pursuer
_topo_n_p2p = 2 # pursuer to pursuer
_topo_n_e2e = 5 # escaper to escaper
# Action
_act_dim_pursuer = 2
_act_dim_escaper = 2
# Mass
_m_p = 3
_m_e = 1
_m_o = 10
# Size
_size_p = 0.06
_size_e = 0.035
_size_o = 0.2
# Dynamics Mode
_dynamics_mode = 'Polar'
# Dynamics capability
_linVel_p_max = 0.5
_linVel_e_max = 0.5
_linAcc_p_max = 1
_linAcc_e_max = 1
_angle_p_max = 0.5
_angle_e_max = 0.5
## Properties of obstacles
_obstacles_cannot_move = True
_obstacles_is_constant = False
if _obstacles_is_constant: # then specify their locations:
_p_o = np.array([[-0.5,0.5],[0,0]])
## Venue
_L = 1
_k_ball = 50 # sphere-sphere contact stiffness N/m
# _c_ball = 5 # sphere-sphere contact damping N/m/s
_k_wall = 100 # sphere-wall contact stiffness N/m
_c_wall = 5 # sphere-wall contact damping N/m/s
_c_aero = 2 # sphere aerodynamic drag coefficient N/m/s
## Simulation Steps
_dt = 0.1
_n_frames = 1
_sensitivity = 1
## Rendering
_render_traj = True
_traj_len = 15
_save_frame = False
def get_param():
params = PredatorPreySwarmEnvParam.__dict__.keys()
params = [param for param in params if param.startswith('_') and not param.startswith('__')]
params = [param[1:] for param in params]
return params + ['p']
params = get_param()
| 2,346 | Python | .py | 74 | 25.945946 | 96 | 0.59545 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,296 | putils.py | WindyLab_Gym-PPS/gym/envs/pps/putils/putils.py | import numpy as np
def make_periodic(x:np.array, L:float) -> np.array:
x[x > L] -= 2 * L
x[x < -L] += 2 * L
return x
def normalize_angle(x:np.array) -> np.array:
return ((x + np.pi) % (2 * np.pi)) - np.pi
def get_sizes(size_p, size_e, size_o, n_p, n_e, n_o):
n_peo = n_p + n_e + n_o
size = np.concatenate((
np.full(n_p, size_p),
np.full(n_e, size_e),
np.full(n_o, size_o)
))
sizes = np.tile(size.reshape(n_peo, 1), (1, n_peo))
sizes = sizes + sizes.T
np.fill_diagonal(sizes, 0)
return size, sizes
def get_mass(m_p, m_e, m_o, n_p, n_e, n_o):
masses = np.concatenate((
np.full(n_p, m_p),
np.full(n_e, m_e),
np.full(n_o, m_o)
))
return masses
def get_focused(Pos, Vel, norm_threshold, width, remove_self):
norms = np.sqrt( Pos[0,:]**2 + Pos[1,:]**2 )
sorted_seq = np.argsort(norms)
Pos = Pos[:, sorted_seq]
norms = norms[sorted_seq]
Pos = Pos[:, norms < norm_threshold]
sorted_seq = sorted_seq[norms < norm_threshold]
if remove_self == True:
Pos = Pos[:,1:]
sorted_seq = sorted_seq[1:]
Vel = Vel[:, sorted_seq]
target_Pos = np.zeros( (2, width) )
target_Vel = np.zeros( (2, width) )
until_idx = np.min( [Pos.shape[1], width] )
target_Pos[:, :until_idx] = Pos[:, :until_idx]
target_Vel[:, :until_idx] = Vel[:, :until_idx]
return target_Pos, target_Vel
| 1,474 | Python | .py | 42 | 28.714286 | 62 | 0.546414 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,297 | __init__.py | WindyLab_Gym-PPS/gym/envs/pps/putils/__init__.py | def check_python_version():
import sys
if sys.version_info[0] == 3 and (sys.version_info[1] == 8 or 10):
pass
else:
raise ValueError('Python 3.8 or 3.10 REQUIRED !')
check_python_version()
from .param import *
from .prop import *
from .putils import *
| 286 | Python | .py | 10 | 24.2 | 69 | 0.659259 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,298 | prop.py | WindyLab_Gym-PPS/gym/envs/pps/putils/prop.py | import gym
from .param import *
import numpy as np
class PredatorPreySwarmEnvProp(PredatorPreySwarmEnvParam):
## Useful parameters to customize observations and reward functions
@property
def p(self):
return self._p
@p.setter
def p(self, value):
self._assert_2X_ndarray('p', value)
self._p = value
@property
def dp(self):
return self._dp
@dp.setter
def dp(self, value):
self._assert_2X_ndarray('dp', value)
self._dp = value
@property
def ddp(self):
return self._ddp
@ddp.setter
def ddp(self, value):
self._assert_2X_ndarray('ddp', value)
self._ddp = value
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, value):
self._assert_1X_ndarray('theta', value)
self._theta = value
@property
def heading(self):
return self._heading
@heading.setter
def heading(self, value):
self._assert_2X_ndarray('heading', value)
self._heading = value
@property
def d_b2b_center(self):
return self._d_b2b_center
@d_b2b_center.setter
def d_b2b_center(self, value):
self._d_b2b_center = value
@property
def is_collide_b2b(self):
return self._is_collide_b2b
@is_collide_b2b.setter
def is_collide_b2b(self, value):
self._is_collide_b2b = value
@property
def energy(self):
return self._energy
@energy.setter
def energy(self, value):
self._energy = value
## Environment Parameters
@property
def n_p(self):
return self._n_p
@n_p.setter
def n_p(self, value:int):
self._n_p = value
@property
def n_e(self):
return self._n_e
@n_e.setter
def n_e(self, value:int):
self._n_e = value
@property
def is_periodic(self):
return self._is_periodic
@is_periodic.setter
def is_periodic(self, new_is_periodic):
self._is_periodic = new_is_periodic
@property
def pursuer_strategy(self):
return self._pursuer_strategy
@pursuer_strategy.setter
def pursuer_strategy(self, value:str):
domain = ['input', 'static', 'random', 'nearest']
if value not in domain:
raise ValueError(f"reward_sharing_mode must be '{domain}'.")
self._pursuer_strategy = value
@property
def escaper_strategy(self):
return self._escaper_strategy
@escaper_strategy.setter
def escaper_strategy(self, value:str):
domain = ['input', 'static', 'random', 'nearest']
if value not in domain:
raise ValueError(f"reward_sharing_mode must be '{domain}'.")
self._escaper_strategy = value
@property
def billiards_mode(self):
return self._billiards_mode
@billiards_mode.setter
def billiards_mode(self, value:bool):
self._billiards_mode = value
if value:
self._dynamics_mode = 'Cartesian'
self._is_periodic = False
@property
def reward_sharing_mode(self):
return self._reward_sharing_mode
@reward_sharing_mode.setter
def reward_sharing_mode(self, new_reward_sharing_mode:str):
if new_reward_sharing_mode not in ['sharing_mean', 'sharing_max', 'individual']:
raise ValueError("reward_sharing_mode must be ['sharing_mean', 'sharing_max', 'individual'].")
self._reward_sharing_mode = new_reward_sharing_mode
@property
def penalize_control_effort(self):
return self._penalize_control_effort
@penalize_control_effort.setter
def penalize_control_effort(self, value):
self._penalize_control_effort = value
@property
def penalize_collide_walls(self):
return self._penalize_collide_walls
@penalize_collide_walls.setter
def penalize_collide_walls(self, value):
self._penalize_collide_walls = value
@property
def penalize_distance(self):
return self._penalize_distance
@penalize_distance.setter
def penalize_distance(self, value):
self._penalize_distance = value
@property
def penalize_collide_agents(self):
return self._penalize_collide_agents
@penalize_collide_agents.setter
def penalize_collide_agents(self, value):
self._penalize_collide_agents = value
@property
def penalize_collide_obstacles(self):
return self._penalize_collide_obstacles
@penalize_collide_obstacles.setter
def penalize_collide_obstacles(self, value):
self._penalize_collide_obstacles = value
@property
def FoV_p(self):
return self._FoV_p
@FoV_p.setter
def FoV_p(self, value):
self._FoV_p = value
@property
def FoV_e(self):
return self._FoV_e
@FoV_e.setter
def FoV_e(self, value):
self._FoV_e = value
@property
def topo_n_p2e(self):
return self._topo_n_p2e
@topo_n_p2e.setter
def topo_n_p2e(self, value):
self._assert_nonnegative_int('topo_n_p2e', value)
self._topo_n_p2e = value
@property
def topo_n_e2p(self):
return self._topo_n_e2p
@topo_n_e2p.setter
def topo_n_e2p(self, value):
self._assert_nonnegative_int('topo_n_e2p', value)
self._topo_n_e2p = value
@property
def topo_n_p2p(self):
return self._topo_n_p2p
@topo_n_p2p.setter
def topo_n_p2p(self, value):
self._assert_nonnegative_int('topo_n_p2p', value)
self._topo_n_p2p = value
@property
def topo_n_e2e(self):
return self._topo_n_e2e
@topo_n_e2e.setter
def topo_n_e2e(self, value):
self._assert_nonnegative_int('topo_n_e2e', value)
self._topo_n_e2e = value
@property
def m_p(self):
return self._m_p
@m_p.setter
def m_p(self, new_m_p):
self._m_p = new_m_p
@property
def m_e(self):
return self._m_e
@m_e.setter
def m_e(self, new_m_e):
self._m_e = new_m_e
@property
def size_p(self):
return self._size_p
@size_p.setter
def size_p(self, value):
self._size_p = value
@property
def size_e(self):
return self._size_e
@size_e.setter
def size_e(self, value):
self._size_e = value
@property
def size_o(self):
return self._size_o
@size_o.setter
def size_o(self, value):
self._size_o = value
@property
def dynamics_mode(self):
return self._dynamics_mode
@dynamics_mode.setter
def dynamics_mode(self, mode:str):
if mode not in ['Cartesian', 'Polar']:
raise ValueError("dynamics_mode must be 'Cartesian' or 'Polar', check your arguments.")
self._dynamics_mode = mode
@property
def linVel_p_max(self):
return self._linVel_p_max
@linVel_p_max.setter
def linVel_p_max(self, value):
self._linVel_p_max = value
@property
def linVel_e_max(self):
return self._linVel_e_max
@linVel_e_max.setter
def linVel_e_max(self, value):
self._linVel_e_max = value
@property
def linAcc_p_max(self):
return self._linAcc_p_max
@linAcc_p_max.setter
def linAcc_p_max(self, value):
self._linAcc_p_max = value
@property
def linAcc_e_max(self):
return self._linAcc_e_max
@linAcc_e_max.setter
def linAcc_e_max(self, value):
self._linAcc_e_max = value
@property
def angle_p_max(self):
return self._angle_p_max
@angle_p_max.setter
def angle_p_max(self, value):
self._angle_p_max = value
@property
def angle_e_max(self):
return self._angle_e_max
@angle_e_max.setter
def angle_e_max(self, value):
self._angle_e_max = value
@property
def L(self):
return self._L
@L.setter
def L(self, value):
self._L = value
@property
def k_ball(self):
return self._k_ball
@k_ball.setter
def k_ball(self, value):
self._k_ball = value
@property
def k_wall(self):
return self._k_wall
@k_wall.setter
def k_wall(self, value):
self._k_wall = value
@property
def c_wall(self):
return self._c_wall
@c_wall.setter
def c_wall(self, value):
self._c_wall = value
@property
def c_aero(self):
return self._c_aero
@c_aero.setter
def c_aero(self, value):
self._c_aero = value
@property
def dt(self):
return self._dt
@dt.setter
def dt(self, value):
if value > 0.5:
print("Note: Please exercise caution as the chosen time step may potentially lead to unstable behaviors.")
self._dt = value
@property
def render_traj(self):
return self._render_traj
@render_traj.setter
def render_traj(self, value:bool):
self._render_traj = value
@property
def traj_len(self):
return self._traj_len
@traj_len.setter
def traj_len(self, value):
self._assert_nonnegative_int('traj_len', value)
self._traj_len = value
@property
def save_frame(self):
return self._save_frame
@save_frame.setter
def save_frame(self, value:bool):
self._save_frame = value
@classmethod
def _assert_nonnegative_int(cls, name, value):
if not isinstance(value, int) or value < 0:
raise TypeError(f" '{name}' must be a non-negative integer ")
def _assert_2X_ndarray(cls, name, value):
if not isinstance(value, np.ndarray) or value.shape[0] != 2:
raise TypeError(f" '{name}' must be a 2-D np.ndarray with shape (2, x)")
def _assert_1X_ndarray(cls, name, value):
if not isinstance(value, np.ndarray) or value.shape[0] != 1:
raise TypeError(f" '{name}' must be a 2-D np.ndarray with shape (1, x)") | 10,007 | Python | .py | 323 | 23.839009 | 118 | 0.622636 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,299 | atari_env.py | WindyLab_Gym-PPS/gym/envs/atari/atari_env.py | import numpy as np
import os
import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
try:
import atari_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: you can install Atari dependencies by running "
"'pip install gym[atari]'.)".format(e)
)
def to_ram(ale):
ram_size = ale.getRAMSize()
ram = np.zeros((ram_size), dtype=np.uint8)
ale.getRAM(ram)
return ram
class AtariEnv(gym.Env, utils.EzPickle):
metadata = {"render.modes": ["human", "rgb_array"]}
def __init__(
self,
game="pong",
mode=None,
difficulty=None,
obs_type="ram",
frameskip=(2, 5),
repeat_action_probability=0.0,
full_action_space=False,
):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
utils.EzPickle.__init__(
self,
game,
mode,
difficulty,
obs_type,
frameskip,
repeat_action_probability,
full_action_space,
)
assert obs_type in ("ram", "image")
self.game = game
self.game_path = atari_py.get_game_path(game)
self.game_mode = mode
self.game_difficulty = difficulty
if not os.path.exists(self.game_path):
msg = "You asked for game %s but path %s does not exist"
raise IOError(msg % (game, self.game_path))
self._obs_type = obs_type
self.frameskip = frameskip
self.ale = atari_py.ALEInterface()
self.viewer = None
# Tune (or disable) ALE's action repeat:
# https://github.com/openai/gym/issues/349
assert isinstance(
repeat_action_probability, (float, int)
), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
self.ale.setFloat(
"repeat_action_probability".encode("utf-8"), repeat_action_probability
)
self.seed()
self._action_set = (
self.ale.getLegalActionSet()
if full_action_space
else self.ale.getMinimalActionSet()
)
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width, screen_height) = self.ale.getScreenDims()
if self._obs_type == "ram":
self.observation_space = spaces.Box(
low=0, high=255, dtype=np.uint8, shape=(128,)
)
elif self._obs_type == "image":
self.observation_space = spaces.Box(
low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8
)
else:
raise error.Error(
"Unrecognized observation type: {}".format(self._obs_type)
)
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as a uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31
# Empirically, we need to seed before loading the ROM.
self.ale.setInt(b"random_seed", seed2)
self.ale.loadROM(self.game_path)
if self.game_mode is not None:
modes = self.ale.getAvailableModes()
assert self.game_mode in modes, (
'Invalid game mode "{}" for game {}.\nAvailable modes are: {}'
).format(self.game_mode, self.game, modes)
self.ale.setMode(self.game_mode)
if self.game_difficulty is not None:
difficulties = self.ale.getAvailableDifficulties()
assert self.game_difficulty in difficulties, (
'Invalid game difficulty "{}" for game {}.\nAvailable difficulties are: {}'
).format(self.game_difficulty, self.game, difficulties)
self.ale.setDifficulty(self.game_difficulty)
return [seed1, seed2]
def step(self, a):
reward = 0.0
action = self._action_set[a]
if isinstance(self.frameskip, int):
num_steps = self.frameskip
else:
num_steps = self.np_random.randint(self.frameskip[0], self.frameskip[1])
for _ in range(num_steps):
reward += self.ale.act(action)
ob = self._get_obs()
return ob, reward, self.ale.game_over(), {"ale.lives": self.ale.lives()}
def _get_image(self):
return self.ale.getScreenRGB2()
def _get_ram(self):
return to_ram(self.ale)
@property
def _n_actions(self):
return len(self._action_set)
def _get_obs(self):
if self._obs_type == "ram":
return self._get_ram()
elif self._obs_type == "image":
img = self._get_image()
return img
# return: (states, observations)
def reset(self):
self.ale.reset_game()
return self._get_obs()
def render(self, mode="human"):
img = self._get_image()
if mode == "rgb_array":
return img
elif mode == "human":
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def get_action_meanings(self):
return [ACTION_MEANING[i] for i in self._action_set]
def get_keys_to_action(self):
KEYWORD_TO_KEY = {
"UP": ord("w"),
"DOWN": ord("s"),
"LEFT": ord("a"),
"RIGHT": ord("d"),
"FIRE": ord(" "),
}
keys_to_action = {}
for action_id, action_meaning in enumerate(self.get_action_meanings()):
keys = []
for keyword, key in KEYWORD_TO_KEY.items():
if keyword in action_meaning:
keys.append(key)
keys = tuple(sorted(keys))
assert keys not in keys_to_action
keys_to_action[keys] = action_id
return keys_to_action
def clone_state(self):
"""Clone emulator state w/o system state. Restoring this state will
*not* give an identical environment. For complete cloning and restoring
of the full state, see `{clone,restore}_full_state()`."""
state_ref = self.ale.cloneState()
state = self.ale.encodeState(state_ref)
self.ale.deleteState(state_ref)
return state
def restore_state(self, state):
"""Restore emulator state w/o system state."""
state_ref = self.ale.decodeState(state)
self.ale.restoreState(state_ref)
self.ale.deleteState(state_ref)
def clone_full_state(self):
"""Clone emulator state w/ system state including pseudorandomness.
Restoring this state will give an identical environment."""
state_ref = self.ale.cloneSystemState()
state = self.ale.encodeState(state_ref)
self.ale.deleteState(state_ref)
return state
def restore_full_state(self, state):
"""Restore emulator state w/ system state including pseudorandomness."""
state_ref = self.ale.decodeState(state)
self.ale.restoreSystemState(state_ref)
self.ale.deleteState(state_ref)
ACTION_MEANING = {
0: "NOOP",
1: "FIRE",
2: "UP",
3: "RIGHT",
4: "LEFT",
5: "DOWN",
6: "UPRIGHT",
7: "UPLEFT",
8: "DOWNRIGHT",
9: "DOWNLEFT",
10: "UPFIRE",
11: "RIGHTFIRE",
12: "LEFTFIRE",
13: "DOWNFIRE",
14: "UPRIGHTFIRE",
15: "UPLEFTFIRE",
16: "DOWNRIGHTFIRE",
17: "DOWNLEFTFIRE",
}
| 7,842 | Python | .py | 211 | 27.985782 | 91 | 0.587166 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
Subsets and Splits