id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
188,331 | import asyncio
import base64
import os
import random
import re
import uuid
from io import BytesIO
from math import ceil
from pathlib import Path
from typing import Any, Awaitable, Callable, List, Literal, Optional, Tuple, Union
import cv2
import imagehash
from imagehash import ImageHash
from matplotlib import pyplot as plt
from nonebot.utils import is_coroutine_callable
from PIL import Image, ImageDraw, ImageFile, ImageFilter, ImageFont
from PIL.ImageFont import FreeTypeFont
from configs.path_config import FONT_PATH, IMAGE_PATH
from services import logger
Image.MAX_IMAGE_PIXELS = None
The provided code snippet includes necessary dependencies for implementing the `alpha2white_pil` function. Write a Python function `def alpha2white_pil(pic: Image) -> Image` to solve the following problem:
说明: 将图片透明背景转化为白色 参数: :param pic: 通过PIL打开的图片文件
Here is the function:
def alpha2white_pil(pic: Image) -> Image:
"""
说明:
将图片透明背景转化为白色
参数:
:param pic: 通过PIL打开的图片文件
"""
img = pic.convert("RGBA")
width, height = img.size
for yh in range(height):
for xw in range(width):
dot = (xw, yh)
color_d = img.getpixel(dot)
if color_d[3] == 0:
color_d = (255, 255, 255, 255)
img.putpixel(dot, color_d)
return img | 说明: 将图片透明背景转化为白色 参数: :param pic: 通过PIL打开的图片文件 |
188,332 | import asyncio
import base64
import os
import random
import re
import uuid
from io import BytesIO
from math import ceil
from pathlib import Path
from typing import Any, Awaitable, Callable, List, Literal, Optional, Tuple, Union
import cv2
import imagehash
from imagehash import ImageHash
from matplotlib import pyplot as plt
from nonebot.utils import is_coroutine_callable
from PIL import Image, ImageDraw, ImageFile, ImageFilter, ImageFont
from PIL.ImageFont import FreeTypeFont
from configs.path_config import FONT_PATH, IMAGE_PATH
from services import logger
The provided code snippet includes necessary dependencies for implementing the `fig2b64` function. Write a Python function `def fig2b64(plt_: plt) -> str` to solve the following problem:
说明: matplotlib图片转base64 参数: :param plt_: matplotlib生成的图片
Here is the function:
def fig2b64(plt_: plt) -> str:
"""
说明:
matplotlib图片转base64
参数:
:param plt_: matplotlib生成的图片
"""
buf = BytesIO()
plt_.savefig(buf, format="PNG", dpi=100)
base64_str = base64.b64encode(buf.getvalue()).decode()
return "base64://" + base64_str | 说明: matplotlib图片转base64 参数: :param plt_: matplotlib生成的图片 |
188,333 | import io
from pathlib import Path
from typing import List, Optional, Union
from nonebot.adapters.onebot.v11.message import Message, MessageSegment
from configs.config import NICKNAME
from configs.path_config import IMAGE_PATH, RECORD_PATH
from services.log import logger
from utils.image_utils import BuildImage, BuildMat
class logger:
TEMPLATE_A = "{}"
TEMPLATE_B = "[<u><c>{}</c></u>]: {}"
TEMPLATE_C = "用户[<u><e>{}</e></u>] 触发 [<u><c>{}</c></u>]: {}"
TEMPLATE_D = "群聊[<u><e>{}</e></u>] 用户[<u><e>{}</e></u>] 触发 [<u><c>{}</c></u>]: {}"
TEMPLATE_E = "群聊[<u><e>{}</e></u>] 用户[<u><e>{}</e></u>] 触发 [<u><c>{}</c></u>] [Target](<u><e>{}</e></u>): {}"
TEMPLATE_USER = "用户[<u><e>{}</e></u>] "
TEMPLATE_GROUP = "群聊[<u><e>{}</e></u>] "
TEMPLATE_COMMAND = "CMD[<u><c>{}</c></u>] "
TEMPLATE_TARGET = "[Target]([<u><e>{}</e></u>]) "
SUCCESS_TEMPLATE = "[<u><c>{}</c></u>]: {} | 参数[{}] 返回: [<y>{}</y>]"
WARNING_TEMPLATE = "[<u><y>{}</y></u>]: {}"
ERROR_TEMPLATE = "[<u><r>{}</r></u>]: {}"
def info(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
logger_.opt(colors=True).info(template)
def success(
cls,
info: str,
command: str,
param: Optional[Dict[str, Any]] = None,
result: Optional[str] = "",
):
param_str = ""
if param:
param_str = ",".join([f"<m>{k}</m>:<g>{v}</g>" for k, v in param.items()])
logger_.opt(colors=True).success(
cls.SUCCESS_TEMPLATE.format(command, info, param_str, result)
)
def warning(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
e: Optional[Exception] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
if e:
template += f" || 错误<r>{type(e)}: {e}</r>"
logger_.opt(colors=True).warning(template)
def error(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
e: Optional[Exception] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
if e:
template += f" || 错误 <r>{type(e)}: {e}</r>"
logger_.opt(colors=True).error(template)
def debug(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
e: Optional[Exception] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
if e:
template += f" || 错误 <r>{type(e)}: {e}</r>"
logger_.opt(colors=True).debug(template)
def __parser_template(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
) -> str:
arg_list = []
template = ""
if group_id is not None:
template += cls.TEMPLATE_GROUP
arg_list.append(group_id)
if user_id is not None:
template += cls.TEMPLATE_USER
arg_list.append(user_id)
if command is not None:
template += cls.TEMPLATE_COMMAND
arg_list.append(command)
if target is not None:
template += cls.TEMPLATE_TARGET
arg_list.append(target)
arg_list.append(info)
template += "{}"
return template.format(*arg_list)
The provided code snippet includes necessary dependencies for implementing the `record` function. Write a Python function `def record(file: Union[Path, str, bytes, io.BytesIO]) -> Union[MessageSegment, str]` to solve the following problem:
说明: 生成一个 MessageSegment.record 消息 参数: :param file: 音频文件名称,默认在 resource/voice 目录下
Here is the function:
def record(file: Union[Path, str, bytes, io.BytesIO]) -> Union[MessageSegment, str]:
"""
说明:
生成一个 MessageSegment.record 消息
参数:
:param file: 音频文件名称,默认在 resource/voice 目录下
"""
if isinstance(file, Path):
if file.exists():
return MessageSegment.record(file)
logger.warning(f"音频 {file.absolute()}缺失...")
if isinstance(file, (bytes, io.BytesIO)):
return MessageSegment.record(file)
if isinstance(file, str):
if "http" in file:
return MessageSegment.record(file)
else:
return MessageSegment.record(RECORD_PATH / file)
return "" | 说明: 生成一个 MessageSegment.record 消息 参数: :param file: 音频文件名称,默认在 resource/voice 目录下 |
188,334 | import io
from pathlib import Path
from typing import List, Optional, Union
from nonebot.adapters.onebot.v11.message import Message, MessageSegment
from configs.config import NICKNAME
from configs.path_config import IMAGE_PATH, RECORD_PATH
from services.log import logger
from utils.image_utils import BuildImage, BuildMat
The provided code snippet includes necessary dependencies for implementing the `contact_user` function. Write a Python function `def contact_user(qq: int) -> MessageSegment` to solve the following problem:
说明: 生成一个 MessageSegment.contact_user 消息 参数: :param qq: qq号
Here is the function:
def contact_user(qq: int) -> MessageSegment:
"""
说明:
生成一个 MessageSegment.contact_user 消息
参数:
:param qq: qq号
"""
return MessageSegment.contact_user(qq) | 说明: 生成一个 MessageSegment.contact_user 消息 参数: :param qq: qq号 |
188,335 | import io
from pathlib import Path
from typing import List, Optional, Union
from nonebot.adapters.onebot.v11.message import Message, MessageSegment
from configs.config import NICKNAME
from configs.path_config import IMAGE_PATH, RECORD_PATH
from services.log import logger
from utils.image_utils import BuildImage, BuildMat
The provided code snippet includes necessary dependencies for implementing the `xml` function. Write a Python function `def xml(data: str) -> MessageSegment` to solve the following problem:
说明: 生成一个 MessageSegment.xml 消息 参数: :param data: 数据文本
Here is the function:
def xml(data: str) -> MessageSegment:
"""
说明:
生成一个 MessageSegment.xml 消息
参数:
:param data: 数据文本
"""
return MessageSegment.xml(data) | 说明: 生成一个 MessageSegment.xml 消息 参数: :param data: 数据文本 |
188,336 | import io
from pathlib import Path
from typing import List, Optional, Union
from nonebot.adapters.onebot.v11.message import Message, MessageSegment
from configs.config import NICKNAME
from configs.path_config import IMAGE_PATH, RECORD_PATH
from services.log import logger
from utils.image_utils import BuildImage, BuildMat
The provided code snippet includes necessary dependencies for implementing the `poke` function. Write a Python function `def poke(qq: int) -> MessageSegment` to solve the following problem:
说明: 生成一个 MessageSegment.poke 消息 参数: :param qq: qq号
Here is the function:
def poke(qq: int) -> MessageSegment:
"""
说明:
生成一个 MessageSegment.poke 消息
参数:
:param qq: qq号
"""
return MessageSegment("poke", {"qq": qq}) | 说明: 生成一个 MessageSegment.poke 消息 参数: :param qq: qq号 |
188,337 | import io
from pathlib import Path
from typing import List, Optional, Union
from nonebot.adapters.onebot.v11.message import Message, MessageSegment
from configs.config import NICKNAME
from configs.path_config import IMAGE_PATH, RECORD_PATH
from services.log import logger
from utils.image_utils import BuildImage, BuildMat
def music(type_: str, id_: int) -> MessageSegment:
return MessageSegment.music(type_, id_) | null |
188,338 | from copy import deepcopy
import re
from .zh_wiki import zh2Hant, zh2Hans
import sys
MAPS = {}
class ConvertMap(object):
def __init__(self, name, mapping=None):
self.name = name
self._map = {}
if mapping:
self.set_convert_map(mapping)
def set_convert_map(self, mapping):
convert_map = {}
have_child = {}
max_key_length = 0
for key in sorted(mapping.keys()):
if len(key)>1:
for i in range(1, len(key)):
parent_key = key[:i]
have_child[parent_key] = True
have_child[key] = False
max_key_length = max(max_key_length, len(key))
for key in sorted(have_child.keys()):
convert_map[key] = (key in mapping, have_child[key],
mapping.get(key, UEMPTY))
self._map = convert_map
self.max_key_length = max_key_length
def __getitem__(self, k):
try:
is_tail, have_child, to_word = self._map[k]
return Node(k, to_word, is_tail, have_child)
except:
return Node(k)
def __contains__(self, k):
return k in self._map
def __len__(self):
return len(self._map)
def registery(name, mapping):
global MAPS
MAPS[name] = ConvertMap(name, mapping) | null |
188,339 | from copy import deepcopy
import re
from .zh_wiki import zh2Hant, zh2Hans
import sys
class Converter(object):
def __init__(self, to_encoding):
self.to_encoding = to_encoding
self.map = MAPS[to_encoding]
self.start()
def feed(self, char):
branches = []
for fsm in self.machines:
new = fsm.feed(char, self.map)
if new:
branches.append(new)
if branches:
self.machines.extend(branches)
self.machines = [fsm for fsm in self.machines if fsm.state != FAIL]
all_ok = True
for fsm in self.machines:
if fsm.state != END:
all_ok = False
if all_ok:
self._clean()
return self.get_result()
def _clean(self):
if len(self.machines):
self.machines.sort(key=lambda x: len(x))
# self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.final += self.machines[0].final
self.machines = [StatesMachine()]
def start(self):
self.machines = [StatesMachine()]
self.final = UEMPTY
def end(self):
self.machines = [fsm for fsm in self.machines
if fsm.state == FAIL or fsm.state == END]
self._clean()
def convert(self, string):
self.start()
for char in string:
self.feed(char)
self.end()
return self.get_result()
def get_result(self):
return self.final
def run():
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-e', type='string', dest='encoding',
help='encoding')
parser.add_option('-f', type='string', dest='file_in',
help='input file (- for stdin)')
parser.add_option('-t', type='string', dest='file_out',
help='output file')
(options, args) = parser.parse_args()
if not options.encoding:
parser.error('encoding must be set')
if options.file_in:
if options.file_in == '-':
file_in = sys.stdin
else:
file_in = open(options.file_in)
else:
file_in = sys.stdin
if options.file_out:
if options.file_out == '-':
file_out = sys.stdout
else:
file_out = open(options.file_out, 'wb')
else:
file_out = sys.stdout
c = Converter(options.encoding)
for line in file_in:
# print >> file_out, c.convert(line.rstrip('\n').decode(
file_out.write(c.convert(line.rstrip('\n').decode(
'utf8')).encode('utf8')) | null |
188,340 | from pathlib import Path
from typing import Callable, Dict, Optional, Union
from utils.manager import group_manager
from utils.manager.data_class import StaticData
from .models import Plugin
n(BaseModel):
"""
插件数据
"""
plugin_name: str # 模块名
status: Optional[bool] = True # 开关状态
error: Optional[bool] = False # 是否加载报错
block_type: Optional[str] = None # 关闭类型
author: Optional[str] = None # 作者
version: Optional[Union[int, str]] = None # 版本
The provided code snippet includes necessary dependencies for implementing the `init_plugin` function. Write a Python function `def init_plugin(func: Callable)` to solve the following problem:
说明: 初始化群数据 参数: :param func: func
Here is the function:
def init_plugin(func: Callable):
"""
说明:
初始化群数据
参数:
:param func: func
"""
def wrapper(*args, **kwargs):
try:
self = args[0]
module = args[1]
if module not in self._data.keys():
self._data[module] = Plugin(
plugin_name=module,
status=True,
error=False,
block_type=None,
author=None,
version=None,
)
except Exception as e:
pass
return func(*args, **kwargs)
return wrapper | 说明: 初始化群数据 参数: :param func: func |
188,341 | import copy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Union
import nonebot
import ujson as json
from configs.config import Config
from utils.manager.data_class import StaticData
from utils.utils import get_matchers, is_number
from .models import BaseData, BaseGroup
{
"run_sql": [], # 需要启动前运行的sql语句
"_shop_before_handle": {}, # 商品使用前函数
"_shop_after_handle": {}, # 商品使用后函数
}
def is_number(s: Union[int, str]) -> bool:
"""
说明:
检测 s 是否为数字
参数:
:param s: 文本
"""
if isinstance(s, int):
return True
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
class BaseGroup(BaseModel):
"""
基础群聊信息
"""
level: int = Config.get_config("group_manager", "DEFAULT_GROUP_LEVEL") # 群等级
status: bool = Config.get_config(
"group_manager", "DEFAULT_GROUP_BOT_STATUS"
) # 总开关状态
close_plugins: List[str] = [] # 已关闭插件
group_task_status: Dict[str, bool] = {} # 被动状态
The provided code snippet includes necessary dependencies for implementing the `init_group` function. Write a Python function `def init_group(func: Callable)` to solve the following problem:
说明: 初始化群数据 参数: :param func: func
Here is the function:
def init_group(func: Callable):
"""
说明:
初始化群数据
参数:
:param func: func
"""
def wrapper(*args, **kwargs):
self = args[0]
if arg_list := list(filter(lambda x: is_number(x), args[1:])):
group_id = str(arg_list[0])
if self is not None and group_id and not self._data.group_manager.get(group_id):
self._data.group_manager[group_id] = BaseGroup()
self.save()
return func(*args, **kwargs)
return wrapper | 说明: 初始化群数据 参数: :param func: func |
188,342 | import copy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Union
import nonebot
import ujson as json
from configs.config import Config
from utils.manager.data_class import StaticData
from utils.utils import get_matchers, is_number
from .models import BaseData, BaseGroup
Config.add_plugin_config(
"group_manager", "DEFAULT_GROUP_LEVEL", 5, help_="默认群权限", default_value=5, type=int
)
Config.add_plugin_config(
"group_manager",
"DEFAULT_GROUP_BOT_STATUS",
True,
help_="默认进群总开关状态",
default_value=True,
type=bool,
)
The provided code snippet includes necessary dependencies for implementing the `init_task` function. Write a Python function `def init_task(func: Callable)` to solve the following problem:
说明: 初始化群被动 参数: :param func: func
Here is the function:
def init_task(func: Callable):
"""
说明:
初始化群被动
参数:
:param func: func
"""
def wrapper(*args, **kwargs):
self = args[0]
group_id = str(args[1])
task = args[2] if len(args) > 1 else None
if (
group_id
and task
and self._data.group_manager[group_id].group_task_status.get(task) is None
):
for task in self._data.task:
if (
self._data.group_manager[group_id].group_task_status.get(task)
is None
):
self._data.group_manager[group_id].group_task_status[
task
] = Config.get_config("_task", f"DEFAULT_{task}", default=True)
for task in list(self._data.group_manager[group_id].group_task_status):
if task not in self._data.task:
del self._data.group_manager[group_id].group_task_status[task]
self.save()
return func(*args, **kwargs)
return wrapper | 说明: 初始化群被动 参数: :param func: func |
188,343 | import random
user_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
"UCWEB7.0.2.37/28/999",
"NOKIA5700/ UCWEB7.0.2.37/28/999",
"Openwave/ UCWEB7.0.2.37/28/999",
"Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
# iPhone 6:
"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
]
def get_user_agent_str():
return random.choice(user_agent) | null |
188,344 | import asyncio
from typing import Optional
from nonebot import get_driver
from playwright.async_api import Browser, Playwright, async_playwright
from services.log import logger
_playwright: Optional[Playwright] = None
_browser: Optional[Browser] = None
async def start_browser():
global _playwright
global _browser
_playwright = await async_playwright().start()
_browser = await _playwright.chromium.launch() | null |
188,345 | import asyncio
from typing import Optional
from nonebot import get_driver
from playwright.async_api import Browser, Playwright, async_playwright
from services.log import logger
_playwright: Optional[Playwright] = None
_browser: Optional[Browser] = None
async def shutdown_browser():
if _browser:
await _browser.close()
if _playwright:
await _playwright.stop() # type: ignore | null |
188,346 | import asyncio
from typing import Optional
from nonebot import get_driver
from playwright.async_api import Browser, Playwright, async_playwright
from services.log import logger
class logger:
TEMPLATE_A = "{}"
TEMPLATE_B = "[<u><c>{}</c></u>]: {}"
TEMPLATE_C = "用户[<u><e>{}</e></u>] 触发 [<u><c>{}</c></u>]: {}"
TEMPLATE_D = "群聊[<u><e>{}</e></u>] 用户[<u><e>{}</e></u>] 触发 [<u><c>{}</c></u>]: {}"
TEMPLATE_E = "群聊[<u><e>{}</e></u>] 用户[<u><e>{}</e></u>] 触发 [<u><c>{}</c></u>] [Target](<u><e>{}</e></u>): {}"
TEMPLATE_USER = "用户[<u><e>{}</e></u>] "
TEMPLATE_GROUP = "群聊[<u><e>{}</e></u>] "
TEMPLATE_COMMAND = "CMD[<u><c>{}</c></u>] "
TEMPLATE_TARGET = "[Target]([<u><e>{}</e></u>]) "
SUCCESS_TEMPLATE = "[<u><c>{}</c></u>]: {} | 参数[{}] 返回: [<y>{}</y>]"
WARNING_TEMPLATE = "[<u><y>{}</y></u>]: {}"
ERROR_TEMPLATE = "[<u><r>{}</r></u>]: {}"
def info(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
logger_.opt(colors=True).info(template)
def success(
cls,
info: str,
command: str,
param: Optional[Dict[str, Any]] = None,
result: Optional[str] = "",
):
param_str = ""
if param:
param_str = ",".join([f"<m>{k}</m>:<g>{v}</g>" for k, v in param.items()])
logger_.opt(colors=True).success(
cls.SUCCESS_TEMPLATE.format(command, info, param_str, result)
)
def warning(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
e: Optional[Exception] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
if e:
template += f" || 错误<r>{type(e)}: {e}</r>"
logger_.opt(colors=True).warning(template)
def error(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
e: Optional[Exception] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
if e:
template += f" || 错误 <r>{type(e)}: {e}</r>"
logger_.opt(colors=True).error(template)
def debug(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
e: Optional[Exception] = None,
):
template = cls.__parser_template(info, command, user_id, group_id, target)
if e:
template += f" || 错误 <r>{type(e)}: {e}</r>"
logger_.opt(colors=True).debug(template)
def __parser_template(
cls,
info: str,
command: Optional[str] = None,
user_id: Optional[Union[int, str]] = None,
group_id: Optional[Union[int, str]] = None,
target: Optional[Any] = None,
) -> str:
arg_list = []
template = ""
if group_id is not None:
template += cls.TEMPLATE_GROUP
arg_list.append(group_id)
if user_id is not None:
template += cls.TEMPLATE_USER
arg_list.append(user_id)
if command is not None:
template += cls.TEMPLATE_COMMAND
arg_list.append(command)
if target is not None:
template += cls.TEMPLATE_TARGET
arg_list.append(target)
arg_list.append(info)
template += "{}"
return template.format(*arg_list)
The provided code snippet includes necessary dependencies for implementing the `install` function. Write a Python function `def install()` to solve the following problem:
自动安装、更新 Chromium
Here is the function:
def install():
"""自动安装、更新 Chromium"""
logger.info("正在检查 Chromium 更新")
import sys
from playwright.__main__ import main
sys.argv = ["", "install", "chromium"]
try:
main()
except SystemExit:
pass | 自动安装、更新 Chromium |
188,347 | import os
import shutil
from argparse import ArgumentParser
'__main__':
main()
def rm(path1, f):
# 返回当前目录下的内容。文件或文件夹
fls = os.listdir(path1)
if len(fls)==0:
f.write(f"删除:{path1}\n")
os.rmdir(path1)
return
elif len(fls) == 1 and fls[0] == '.DS_Store':
f.write(f"删除:{path1}\n")
shutil.rmtree(path1)
return
for p in fls:
p2 = os.path.join(path1, p)
if os.path.isdir(p2):
rm(p2, f)
if os.path.exists(p2) and len(os.listdir(p2)) == 0: # 里面删除后这个可能就是空文件了
f.write(f"删除:{p2}\n")
os.rmdir(p2) #在这里执行删除
elif os.path.exists(p2) and len(os.listdir(p2)) == 1 and os.listdir(p2)[0] == '.DS_Store': # mac 文件
f.write(f"删除:{p2}\n")
shutil.rmtree(p2) | null |
188,348 | import os
import re
def deal_chapter(path, f_log):
if 'src.txt' in files:
deal_chapter(path, f_log)
for p in files:
p2 = os.path.join(path, p)
if os.path.isdir(p2):
recursion_dirs(p2, f_log)
def recursion_dirs(path, f_log):
files = os.listdir(path)
if 'src.txt' in files:
deal_chapter(path, f_log)
for p in files:
p2 = os.path.join(path, p)
if os.path.isdir(p2):
recursion_dirs(p2, f_log) | null |
188,349 |
def books(baseurl, header, lastInfo, f_log, base_dir_name = 'crawl-data'):
lastBook = lastInfo[0] if isinstance(lastInfo, tuple) else None
request = requests.get(url=baseurl, headers=header)
time.sleep(0.5)
bs = BeautifulSoup(request.text, 'lxml')
# 提取书链接和书名
book_list = str(bs.select("body > div.main3 > div.left > div.sons > div")[0])
book_bookName = dict(zip(re.findall('href="(.*)" target=', book_list), re.findall('_blank">(.*)<\/a>', book_list)))
flag = True
for bookurl, bookName in book_bookName.items():
# lastBook 不为空说明是断点续爬
if lastBook is not None and bookName != lastBook and flag:
continue
flag = False # 结束断点续爬模式/正常爬取 均是flag = False
url = "https://so.gushiwen.cn" + bookurl
dir = os.path.join(base_dir_name, bookName)
if not os.path.exists(dir):
os.mkdir(dir)
f_log.write('####'+bookName+'####\n')
print('当前书籍:' + bookName)
book(url, header, dir, lastInfo, f_log)
lastInfo, lastBook = None, None
return True if flag else False # 该本书没找到上个断点时返回True | null |
188,350 |
def readLog():
f_log = open('log/crawl_log.txt', 'r', encoding='utf-8')
log = f_log.read()
# 最后一次爬取断点
lastBOOK, lastSection, lastChap = "", "", ""
# 读取最后一次爬取时最后一本书籍名称
if len(re.findall('####(.*)####', log)) > 0:
lastBOOK = re.findall('####(.*)####', log)[-1]
else:
return None
# 读取最后一次爬取时最后一本书的篇章名称(可能为空)
BookContent = log[log.find(lastBOOK):]
if len(re.findall('###(.*)###', BookContent)) > 0: # 包含篇章
lastSection = re.findall('###(.*)###', BookContent)[-1]
BookContent = BookContent[BookContent.find(lastSection):]
# 读取最后一篇文章
if len(re.findall('##(.*)##', BookContent)) > 0:
lastChap = re.findall('##(.*)##', BookContent)[-1]
f_log.close()
return lastBOOK, lastSection, lastChap | null |
188,351 |
def books(baseurl, header, lastInfo, flog, base_dir_name = 'crawl-data'):
lastBook = lastInfo[0] if isinstance(lastInfo, tuple) else None
request = requests.get(url=baseurl, headers=header)
time.sleep(0.5)
bs = BeautifulSoup(request.text, 'lxml')
# 提取书链接和书名
book_list = str(bs.select("body > div.main3 > div.left > div.sons > div")[0])
book_bookName = dict(zip(re.findall('href="(.*)" target=', book_list), re.findall('_blank">(.*)<\/a>', book_list)))
flag = True
for bookurl, bookName in book_bookName.items():
# lastBook 不为空说明是断点续爬
if lastBook is not None and bookName != lastBook and flag:
continue
flag = False # 结束断点续爬模式/正常爬取 均是flag = False
url = "https://so.gushiwen.cn" + bookurl
dir = os.path.join(base_dir_name, bookName)
if not os.path.exists(dir):
os.mkdir(dir)
flog.write('####'+bookName+'####\n')
print('当前书籍:' + bookName)
book(url, header, dir, lastInfo, flog)
lastInfo, lastBook = None, None
return True if flag else False # 该本书没找到上个断点时返回True | null |
188,352 |
def readLog():
flog = open('log/crawl_src_log.txt', 'r', encoding="utf-8")
log = flog.read()
# 最后一次爬取断点
lastBOOK, lastSection, lastChap = "", "", ""
# 读取最后一次爬取时最后一本书籍名称
if len(re.findall('####(.*)####', log)) > 0:
lastBOOK = re.findall('####(.*)####', log)[-1]
else:
return None
# 读取最后一次爬取时最后一本书的篇章名称(可能为空)
BookContent = log[log.find(lastBOOK):]
if len(re.findall('###(.*)###', BookContent)) > 0: # 包含篇章
lastSection = re.findall('###(.*)###', BookContent)[-1]
BookContent = BookContent[BookContent.find(lastSection):]
# 读取最后一篇文章
if len(re.findall('##(.*)##', BookContent)) > 0:
lastChap = re.findall('##(.*)##', BookContent)[-1]
flog.close()
return lastBOOK, lastSection, lastChap | null |
188,353 | import os
from argparse import ArgumentParser
def recursion_dir(path):
files = os.listdir(path)
if "数据来源.txt" in files:
if os.path.exists(os.path.join(path, "temp_ori_sentence.txt")):
os.remove(os.path.join(path, "temp_ori_sentence.txt"))
if os.path.exists(os.path.join(path, "temp_trans_sentence.txt")):
os.remove(os.path.join(path, "temp_trans_sentence.txt"))
if os.path.exists(os.path.join(path, "src.txt")):
os.remove(os.path.join(path, "src.txt"))
if os.path.exists(os.path.join(path, "tgt.txt")):
os.remove(os.path.join(path, "tgt.txt"))
if os.path.exists(os.path.join(path, "my_trans.txt")):
os.rename(os.path.join(path, "my_trans.txt"), os.path.join(path, "target.txt"))
if os.path.exists(os.path.join(path, "my_ori.txt")):
os.rename(os.path.join(path, "my_ori.txt"), os.path.join(path, "source.txt"))
f1 = open(os.path.join(path, "source.txt"), "r")
f2 = open(os.path.join(path, "target.txt"), "r")
f3 = open(os.path.join(path, "bitext.txt"), "w")
l1 = f1.readlines()
l2 = f2.readlines()
assert len(l1) == len(l2)
for i in range(len(l1)):
f3.write("古文:" + l1[i])
f3.write("现代文:" + l2[i] + '\n\n')
f1.close()
f2.close()
f3.close()
for p in files:
p2 = os.path.join(path, p)
if os.path.isdir(p2):
recursion_dir(p2) | null |
188,354 | import os
import re
import math
from tqdm import tqdm
import time
from argparse import ArgumentParser
for item in my_ori:
f_my_ori.write(item)
for item in my_trans:
f_my_trans.write(item)
def sentence_set(text):
temp = re.sub('[,.!?;:\'",。:;‘’“”?!《》 ]', "", text.strip())
return set([item for item in temp]) | null |
188,355 | import os
import re
import math
from tqdm import tqdm
import time
from argparse import ArgumentParser
for i in range(n+1):
d[i][0] = i
for i in range(m+1):
d[0][i] = i
for i in range(1, n+1):
for j in range(1, m+1):
left = d[i-1][j] + 1
down = d[i][j-1] + 1
left_down = d[i-1][j-1]
if word1[i-1] != word2[j-1]:
left_down += 1
d[i][j] = min(left, min(down, left_down))
for _ in range(10):
def update_score(scores, idx, si ,tj):
scores[idx] = minDistance(si, tj)
length = max(len(si), len(tj))
scores[idx] /= length
changdubi = len(tj) / len(si)
if changdubi <= 0.95: # 古文长度比译文长度还短很多,被认为是不可能
scores[idx] += 0.25
if changdubi > 3.5:
scores[idx] += 0.25
scores = [100, 100, 100]
if i+1 != len(s):
# 21分数 2
update_score(scores, 2, s[i].strip()+s[i+1], t[j])
if j+1 != len(t):
# 12分数 1
update_score(scores, 1, s[i], t[j].strip()+t[j+1])
update_score(scores, 0, s[i], t[j])
mode = scores.index(min(scores))
if mode == 0: # 11
i += 1
j += 1
res.append(min(scores))
elif mode == 1: # 12
t[j] = t[j].strip() + t[j+1]
t.pop(j+1)
elif mode == 2:
s[i] = s[i].strip() + s[i+1]
s.pop(i+1)
if i == len(s) or j == len(t):
break
def test_delete(i, j, addition_length, s, t):
mode = None
if abs(j_align_i_score - i_align_j_score) < 0.11: # 允许的容错分差
return mode, 0 # 1 : 1 对齐
temp_ori = os.path.join(path, 'temp_ori_sentence.txt')
temp_trans = os.path.join(path, 'temp_trans_sentence.txt')
f_ori = open(temp_ori, 'r', encoding='utf-8')
f_trans = open(temp_trans, 'r', encoding='utf-8')
s = f_ori.readlines()
t = f_trans.readlines()
i, j = 0, 0
my_ori, my_trans = [], [] r len(t) == 0: # 原文或译文为空
return
while True:
def update_score(scores, idx, si ,tj):
# 归一化编辑距离
scores[idx] = minDistance(si, tj)
length = max(len(si), len(tj))
scores[idx] /= length
# 长度比
changdubi = len(tj) / len(si)
if changdubi <= 0.95: # 古文长度比译文长度还短很多,被认为是不可能
scores[idx] += 0.25
if changdubi > 3.5:
scores[idx] += 0.25
addition_length = abs(len(s) - len(t)) + 10 # 依据原文与译文句子数目差为参考进行辅助对齐
mode, pop_number = test_delete(i, j, addition_length, s[:], t[:]) # 计算原文第i句与译文第j句是否1:1对齐,若不是,返回i / j应该对齐的下标di / dj
if mode == 3: # 原文多余
for _ in range(pop_number):
s.pop(i)
continue
elif mode == 4: # 译文多余
for _ in range(pop_number):
t.pop(j)
continue
# scores 1:1 1:2 2:1
scores = [100, 100, 100] # 分数越低被认为越有可能
if i+1 != len(s):
# 计算如果是2:1的分数(原文的两句合并后对齐译文的一句话) idx = 2
update_score(scores, 2, s[i].strip()+s[i+1], t[j])
if j+1 != len(t):
# 计算如果是1:2的分数(原文的一句合并后对齐译文的两句话) idx = 1
update_score(scores, 1, s[i], t[j].strip()+t[j+1])
# 1:1分数 idx = 0
update_score(scores, 0, s[i], t[j])
mode = scores.index(min(scores))
if mode == 0: # 1:1
my_ori.append(s[i])
my_trans.append(t[j])
i += 1
j += 1
elif mode == 1: # 1:2
t[j] = t[j].strip() + t[j+1]
t.pop(j+1)
elif mode == 2: # 2:1
s[i] = s[i].strip() + s[i+1]
s.pop(i+1)
if i == len(s) or j == len(t):
break
f_my_ori = open(os.path.join(path, 'my_ori.txt'), 'w', encoding='utf-8')
f_my_trans = open(os.path.join(path, 'my_trans.txt'), 'w', encoding='utf-8')
for item in my_ori:
f_my_ori.write(item)
for item in my_trans:
f_my_trans.write(item)
f_ori.close()
f_trans.close()
f_my_ori.close()
f_my_trans.close(
def align(path):
temp_ori = os.path.join(path, 'temp_ori_sentence.txt')
temp_trans = os.path.join(path, 'temp_trans_sentence.txt')
f_ori = open(temp_ori, 'r', encoding='utf-8')
f_trans = open(temp_trans, 'r', encoding='utf-8')
s = f_ori.readlines()
t = f_trans.readlines()
i, j = 0, 0
my_ori, my_trans = [], [] # 存储对齐后的原语和译文
if len(s) == 0 or len(t) == 0: # 原文或译文为空
return
while True:
def update_score(scores, idx, si ,tj):
# 归一化编辑距离
scores[idx] = minDistance(si, tj)
length = max(len(si), len(tj))
scores[idx] /= length
# 长度比
changdubi = len(tj) / len(si)
if changdubi <= 0.95: # 古文长度比译文长度还短很多,被认为是不可能
scores[idx] += 0.25
if changdubi > 3.5:
scores[idx] += 0.25
addition_length = abs(len(s) - len(t)) + 10 # 依据原文与译文句子数目差为参考进行辅助对齐
mode, pop_number = test_delete(i, j, addition_length, s[:], t[:]) # 计算原文第i句与译文第j句是否1:1对齐,若不是,返回i / j应该对齐的下标di / dj
if mode == 3: # 原文多余
for _ in range(pop_number):
s.pop(i)
continue
elif mode == 4: # 译文多余
for _ in range(pop_number):
t.pop(j)
continue
# scores 1:1 1:2 2:1
scores = [100, 100, 100] # 分数越低被认为越有可能
if i+1 != len(s):
# 计算如果是2:1的分数(原文的两句合并后对齐译文的一句话) idx = 2
update_score(scores, 2, s[i].strip()+s[i+1], t[j])
if j+1 != len(t):
# 计算如果是1:2的分数(原文的一句合并后对齐译文的两句话) idx = 1
update_score(scores, 1, s[i], t[j].strip()+t[j+1])
# 1:1分数 idx = 0
update_score(scores, 0, s[i], t[j])
mode = scores.index(min(scores))
if mode == 0: # 1:1
my_ori.append(s[i])
my_trans.append(t[j])
i += 1
j += 1
elif mode == 1: # 1:2
t[j] = t[j].strip() + t[j+1]
t.pop(j+1)
elif mode == 2: # 2:1
s[i] = s[i].strip() + s[i+1]
s.pop(i+1)
if i == len(s) or j == len(t):
break
f_my_ori = open(os.path.join(path, 'my_ori.txt'), 'w', encoding='utf-8')
f_my_trans = open(os.path.join(path, 'my_trans.txt'), 'w', encoding='utf-8')
for item in my_ori:
f_my_ori.write(item)
for item in my_trans:
f_my_trans.write(item)
f_ori.close()
f_trans.close()
f_my_ori.close()
f_my_trans.close() | null |
188,356 | import os
import re
import math
from tqdm import tqdm
import time
from argparse import ArgumentParser
files = os.listdir(path)
if 'temp_ori_sentence.txt' in files and 'temp_trans_sentence.txt' in files:
res.append(path)
for p in files:
p2 = os.path.join(path, p)
if os.path.isdir(p2):
recursion_dir(p2, res)
def recursion_dir(path, res):
files = os.listdir(path)
if 'temp_ori_sentence.txt' in files and 'temp_trans_sentence.txt' in files:
res.append(path)
for p in files:
p2 = os.path.join(path, p)
if os.path.isdir(p2):
recursion_dir(p2, res) | null |
188,357 | import importlib
import json
import os
import sys
from collections import defaultdict
from typing import Any
import yaml
from tqdm import tqdm
from real_agents.adapters.data_model import APIYamlModel, SpecModel
from real_agents.plugins_agent.plugins.plugin_names import PluginName
def load_plugin_elements_by_name(plugin_name: str):
class PluginName(str, Enum):
def load_all_plugins_elements():
all_plugins_elements = {}
for plugin_name in tqdm(PluginName.__members__):
try:
all_plugins_elements[plugin_name] = load_plugin_elements_by_name(plugin_name)
except Exception as e:
print(f"Error when loading plugin {plugin_name}: {e}")
return all_plugins_elements | null |
188,358 | import requests
def call_api(input_json, api_key):
input_json["appid"] = api_key
response = requests.get("https://www.wolframalpha.com/api/v1/llm-api", params=input_json)
if response.status_code == 200:
return response.content.decode("utf-8")
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,359 | import requests
def call_api(input_json, api_key):
input_json["appid"] = api_key
response = requests.get("https://www.wolframalpha.com/api/v1/cloud-plugin", params=input_json)
if response.status_code == 200:
return response.content.decode("utf-8")
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,360 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.get("https://showme.redstarplugin.com/diagram-guidelines", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,361 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.get("https://showme.redstarplugin.com/render/", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,362 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.get("https://showme.redstarplugin.com/show-carousel", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,363 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.get("https://ai.biztoc.com/ai/news", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,364 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
url = "https://jobsearch.vencio.de/jobs"
headers = {
"Content-Type": "application/json"
}
response = requests.get(url, headers=headers, params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,365 | from typing import Any, Dict
import requests
url = "https://api.speak.com/v1/public/openai/explain-task"
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
headers = {"Content-Type": "application/json"}
response = requests.post(url, json=input_json, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,366 | from typing import Any, Dict
import requests
url = "https://api.speak.com/v1/public/openai/translate"
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
headers = {"Content-Type": "application/json"}
response = requests.post(url, json=input_json, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,367 | from typing import Any, Dict
import requests
url = "https://api.speak.com/v1/public/openai/explain-phrase"
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
headers = {"Content-Type": "application/json"}
response = requests.post(url, json=input_json, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,368 | import os
import requests
def reload_openapi(api_key, openapi_json):
# Original data
headers = {"X-API-Key": api_key, }
# Call read the openapi
url = "https://nla.zapier.com/api/v1/exposed/"
data = None
while True:
try:
response = requests.get(url, headers=headers)
data = response.json()
break
except Exception as e:
print(e)
# if an error occurs, continue to retry
import time
time.sleep(5)
continue
try:
data = data['results']
except Exception as e:
print(e)
return openapi_json, {}
new_paths = {}
for item in data:
new_paths['/api/v1/exposed/{}/execute/'.format(item["id"])] = {
'post': { # assuming POST method for all operations
'operationId': item['operation_id'],
'description': item['description'],
'parameters': [
{'name': k, 'in': 'query', 'required': True, 'schema': {'type': v}}
for k, v in item['params'].items()
],
"security": {
"SessionAuth": [],
"AccessPointApiKeyHeader": [],
"AccessPointApiKeyQuery": [],
"AccessPointOAuth": []
}
}
}
openapi_json['paths'] = openapi_json['paths'] | new_paths
return openapi_json, new_paths | null |
188,369 | import os
import requests
def reload_endpoints(new_paths):
new_endpoint2caller = {}
for new_path in new_paths:
# create the call function
def call_api(input_json, api_key):
import requests
headers = {"X-API-Key": api_key}
url = "https://nla.zapier.com" + new_path
response = requests.post(url, headers=headers, json=input_json)
if response.status_code == 200:
return response.json()
else:
return response.text
new_endpoint2caller[new_path] = call_api
return new_endpoint2caller | null |
188,370 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any], api_key) -> Dict[str, Any]:
headers = {
"X-API-Key": api_key,
}
url = "https://nla.zapier.com/api/v1/" + input_json['execution-log']
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,371 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any], api_key) -> Dict[str, Any]:
headers = {
"X-API-Key": api_key,
"Content-Type": "application/json"
}
url = "https://nla.zapier.com/api/v1/preview-a-zap/"
response = requests.post(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,372 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any], api_key) -> Dict[str, Any]:
headers = {
"X-API-Key": api_key,
}
url = "https://nla.zapier.com/api/v1/search/actions/"
response = requests.get(url, headers=headers, params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,373 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any], api_key) -> Dict[str, Any]:
headers = {
"X-API-Key": api_key,
}
url = "https://nla.zapier.com/api/v1/exposed/"
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,374 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any], api_key) -> Dict[str, Any]:
headers = {
"X-API-Key": api_key,
}
url = "https://nla.zapier.com/api/v1/configuration-link/"
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,375 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.post("https://dreamplugin.bgnetmobile.com/api/data", json=input_json)
if response.status_code == 200:
return response.content
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,376 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
dream_text = input_json["DreamText"]
response = requests.get(f"https://dreamplugin.bgnetmobile.com/getDream/{dream_text}")
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,377 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.post("https://www.coursera.org/api/rest/v1/search", json=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,378 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
url = "https://openai-plugin.xweather.com/weather/summary/{}".format(input_json['location'])
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,379 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
location = input_json['location']
response = requests.get(f"https://openai-plugin.xweather.com/radar/{location}", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,380 | from typing import Dict, Any
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
location = input_json["location"]
url = f"https://openai-plugin.xweather.com/weather/forecast/{location}"
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,381 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.post("https://plugin.askyourpdf.com/query", json=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,382 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.post("https://plugin.askyourpdf.com/api/download_pdf", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,383 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.post("https://nba-gpt-prod.onrender.com/basketball_stats", json=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,384 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.get("https://chatgpt-plugin.outschool.com/api/teachers", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,385 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.get("https://chatgpt-plugin.outschool.com/api/classes", params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,386 | from typing import Any, Dict
import requests
url = "https://www.klarna.com/us/shopping/public/openai/v0/products"
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
headers = {"Accept": "application/json"}
response = requests.get(url, headers=headers, params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,387 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
url = "https://create-qr-code.modelxy.com/create-qr-code"
response = requests.get(url, params=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,388 | import requests
from typing import Any, Dict
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
response = requests.post("https://scraper.gafo.tech/scrape", json=input_json)
if response.status_code == 200:
return response.json()
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,389 | from typing import Any, Dict
import requests
def call_api(input_json: Dict[str, Any]) -> Dict[str, Any]:
query_param = input_json["latlng"]
response = requests.get(f"https://maps.smoothplugins.com/?latlng={query_param}")
if response.status_code == 200:
return {"result": response.content.decode()}
else:
return {"status_code": response.status_code, "text": response.text} | null |
188,390 | import os
from typing import Any, List, Optional, Tuple, Dict
from pydantic import BaseModel
import requests
import time
import ast
import pandas as pd
from io import StringIO
import redis
from loguru import logger
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.getipython import get_ipython
from IPython.utils.capture import capture_output
def check_danger_code(code):
code_line = []
for line in code.split("\n"):
if not line.startswith("%"):
code_line.append(line)
code = "\n".join(code_line)
def check_imports(code):
ast_failed = False
try:
tree = ast.parse(code)
except Exception as e:
ast_failed = str(e)
return ast_failed
return ast_failed
ast_failed = check_imports(code)
return True, ast_failed, [] | null |
188,391 | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None)
The provided code snippet includes necessary dependencies for implementing the `get_openai_callback` function. Write a Python function `def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]` to solve the following problem:
Get OpenAI callback handler in a context manager.
Here is the function:
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None) | Get OpenAI callback handler in a context manager. |
188,392 | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
The provided code snippet includes necessary dependencies for implementing the `tracing_enabled` function. Write a Python function `def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]` to solve the following problem:
Get Tracer in a context manager.
Here is the function:
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None) | Get Tracer in a context manager. |
188,393 | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
The provided code snippet includes necessary dependencies for implementing the `tracing_v2_enabled` function. Write a Python function `def tracing_v2_enabled( session_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, tenant_id: Optional[str] = None, session_extra: Optional[Dict[str, Any]] = None, ) -> Generator[TracerSession, None, None]` to solve the following problem:
Get the experimental tracer handler in a context manager.
Here is the function:
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tenant_id: Optional[str] = None,
session_extra: Optional[Dict[str, Any]] = None,
) -> Generator[TracerSession, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The experimental tracing v2 is in development. " "This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
tenant_id=tenant_id,
session_name=session_name,
example_id=example_id,
session_extra=session_extra,
)
session = cb.ensure_session()
tracing_v2_callback_var.set(cb)
yield session
tracing_v2_callback_var.set(None) | Get the experimental tracer handler in a context manager. |
188,394 | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `_handle_event` function. Write a Python function `def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None` to solve the following problem:
Generic event handler for CallbackManager.
Here is the function:
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logging.warning(f"Error in {event_name} callback: {e}") | Generic event handler for CallbackManager. |
188,395 | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(None, functools.partial(event, *args, **kwargs))
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logger.warning(f"Error in {event_name} callback: {e}")
The provided code snippet includes necessary dependencies for implementing the `_ahandle_event` function. Write a Python function `async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None` to solve the following problem:
Generic event handler for AsyncCallbackManager.
Here is the function:
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(handler, event_name, ignore_condition_name, *args, **kwargs)
for handler in handlers
)
) | Generic event handler for AsyncCallbackManager. |
188,396 | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None)
tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
The provided code snippet includes necessary dependencies for implementing the `_configure` function. Write a Python function `def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> T` to solve the following problem:
Configure the callback manager.
Here is the function:
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
os.environ.get("LANGCHAIN_TRACING") is not None
or tracer is not None
or os.environ.get("LANGCHAIN_HANDLER") is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None
tracer_session = os.environ.get("LANGCHAIN_SESSION")
debug = _get_debug()
if tracer_session is None:
tracer_session = "default"
if verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None:
if verbose and not any(isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer) for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
handler.ensure_session()
callback_manager.add_handler(handler, True)
except Exception as e:
logger.debug("Unable to load requested LangChainTracer", e)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager | Configure the callback manager. |
188,397 | from inspect import signature
from typing import Any, Awaitable, Callable, Dict, Optional, Type, Union
from pydantic import BaseModel, validate_arguments
from langchain.tools.base import BaseTool
from real_agents.adapters.data_model import DataModel
from real_agents.adapters.callbacks.manager import (
CallbackManager,
Callbacks,
)
class Tool(BaseTool):
"""Tool that takes in function or coroutine directly."""
description: str = ""
func: Callable[..., str]
"""The function to run when the tool is called."""
coroutine: Optional[Callable[..., Awaitable[str]]] = None
"""The asynchronous version of the function."""
def args(self) -> dict:
if self.args_schema is not None:
return self.args_schema.schema()["properties"]
else:
inferred_model = validate_arguments(self.func).model # type: ignore
schema = inferred_model.schema()["properties"]
valid_keys = signature(self.func).parameters
return {k: schema[k] for k in valid_keys}
def _run(self, *args: Any, **kwargs: Any) -> str:
"""Use the tool."""
return self.func(*args, **kwargs)
async def _arun(self, *args: Any, **kwargs: Any) -> str:
"""Use the tool asynchronously."""
if self.coroutine:
return await self.coroutine(*args, **kwargs)
raise NotImplementedError("Tool does not support async")
# TODO: this is for backwards compatibility, remove in future
def __init__(
self, name: str, func: Callable[[str], Union[Dict[Any, Any], DataModel]], description: str, **kwargs: Any
) -> None:
"""Initialize tool."""
super(Tool, self).__init__(name=name, func=func, description=description, **kwargs)
def run(
self,
tool_input: Union[str, Dict],
verbose: Optional[bool] = None,
start_color: Optional[str] = "green",
color: Optional[str] = "green",
callbacks: Callbacks = None,
**kwargs: Any,
) -> Any:
"""Run the tool."""
parsed_input = self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
# todo: fix this place
callback_manager = CallbackManager.configure(callbacks, self.callbacks, verbose=verbose_)
# TODO: maybe also pass through run_manager is _run supports kwargs
new_arg_supported = signature(self._run).parameters.get("run_manager")
run_manager = callback_manager.on_tool_start(
{"name": self.name, "description": self.description},
tool_input if isinstance(tool_input, str) else str(tool_input),
color=start_color,
**kwargs,
)
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
observation = (
self._run(*tool_args, run_manager=run_manager, **tool_kwargs)
if new_arg_supported
else self._run(*tool_args, **tool_kwargs)
)
except (Exception, KeyboardInterrupt) as e:
run_manager.on_tool_error(e)
raise e
run_manager.on_tool_end(observation, color=color, name=self.name, **kwargs)
return observation
The provided code snippet includes necessary dependencies for implementing the `tool` function. Write a Python function `def tool( *args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, ) -> Callable` to solve the following problem:
Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return
Here is the function:
def tool(
*args: Union[str, Callable],
return_direct: bool = False,
args_schema: Optional[Type[BaseModel]] = None,
infer_schema: bool = True,
) -> Callable:
"""Make tools out of functions, can be used with or without arguments.
Args:
*args: The arguments to the tool.
return_direct: Whether to return directly from the tool rather
than continuing the agent loop.
args_schema: optional argument schema for user to specify
infer_schema: Whether to infer the schema of the arguments from
the function's signature. This also makes the resultant tool
accept a dictionary input to its `run()` function.
Requires:
- Function must be of type (str) -> str
- Function must have a docstring
Examples:
.. code-block:: python
@tool
def search_api(query: str) -> str:
# Searches the API for the query.
return
@tool("search", return_direct=True)
def search_api(query: str) -> str:
# Searches the API for the query.
return
"""
def _make_with_name(tool_name: str) -> Callable:
def _make_tool(func: Callable) -> Tool:
assert func.__doc__, "Function must have a docstring"
# Description example:
# search_api(query: str) - Searches the API for the query.
description = f"{tool_name}{signature(func)} - {func.__doc__.strip()}"
_args_schema = args_schema
if _args_schema is None and infer_schema:
_args_schema = validate_arguments(func).model # type: ignore
tool_ = Tool(
name=tool_name,
func=func,
args_schema=_args_schema,
description=description,
return_direct=return_direct,
)
return tool_
return _make_tool
if len(args) == 1 and isinstance(args[0], str):
# if the argument is a string, then we use the string as the tool name
# Example usage: @tool("search", return_direct=True)
return _make_with_name(args[0])
elif len(args) == 1 and callable(args[0]):
# if the argument is a function, then we use the function name as the tool name
# Example usage: @tool
return _make_with_name(args[0].__name__)(args[0])
elif len(args) == 0:
# if there are no arguments, then we use the function name as the tool name
# Example usage: @tool(return_direct=True)
def _partial(func: Callable[[str], str]) -> BaseTool:
return _make_with_name(func.__name__)(func)
return _partial
else:
raise ValueError("Too many arguments for tool decorator") | Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return |
188,398 | import json
from bs4 import BeautifulSoup
from collections import defaultdict
from typing import Any, Dict, List, Union
from real_agents.adapters.data_model.base import DataModel
import requests
import re
import tiktoken
The provided code snippet includes necessary dependencies for implementing the `find_potential_templates` function. Write a Python function `def find_potential_templates(node, possible_templates)` to solve the following problem:
Find all potential templates in the HTML tree.
Here is the function:
def find_potential_templates(node, possible_templates):
"""Find all potential templates in the HTML tree."""
if node.name: # Element node
attributes = {attr: node[attr] for attr in node.attrs}
children = []
for child in node.children:
child_json = find_potential_templates(child, possible_templates)
if child_json:
children.append(child_json)
# Max depth of the tree
depth = max([c["depth"] for c in children], default=0) + 1
# Create a template hash
template_hash = f"{node.name}#{sorted(attributes.keys())}#{[c['template_hash'] for c in children]}"
# Gather template values
template_values = list(attributes.values()) + [val for c in children for val in c["template_values"]]
json_node = {
"type": "ELEMENT",
"tag_name": node.name,
"attributes": attributes,
"children": children,
"template_hash": template_hash,
"template_values": template_values,
"depth": depth,
}
# Add node to possible templates
if template_hash in possible_templates:
if possible_templates[template_hash][0]["depth"] != depth:
raise ValueError(f"Template depth mismatch for template {template_hash}")
possible_templates[template_hash].append(json_node)
else:
possible_templates[template_hash] = [json_node]
return json_node
elif isinstance(node, str): # Text node
text = node.strip()
if text:
return {"type": "TEXT", "content": text, "template_hash": "TEXT", "template_values": [text], "depth": 0}
return None | Find all potential templates in the HTML tree. |
188,399 | import json
from bs4 import BeautifulSoup
from collections import defaultdict
from typing import Any, Dict, List, Union
from real_agents.adapters.data_model.base import DataModel
import requests
import re
import tiktoken
The provided code snippet includes necessary dependencies for implementing the `optimize_template` function. Write a Python function `def optimize_template(template)` to solve the following problem:
Check and adjust the template in possible_templates to optimize style.
Here is the function:
def optimize_template(template):
"""Check and adjust the template in possible_templates to optimize style."""
values_to_inline = {
i
for i in range(len(template["nodes"][0]["templateValues"]))
if all(n["templateValues"][i] == template["nodes"][0]["templateValues"][i] for n in template["nodes"])
}
return {**template, "valuesToInline": values_to_inline} | Check and adjust the template in possible_templates to optimize style. |
188,400 | import json
from bs4 import BeautifulSoup
from collections import defaultdict
from typing import Any, Dict, List, Union
from real_agents.adapters.data_model.base import DataModel
import requests
import re
import tiktoken
def get_placeholder(template, value_index):
"""Get the placeholder for the value at the given index in the template."""
placeholder_index = value_index + 1 - len([i for i in template["valuesToInline"] if i < value_index])
return f"${placeholder_index}"
The provided code snippet includes necessary dependencies for implementing the `create_template_tree` function. Write a Python function `def create_template_tree(node, templates, render_for_template, current_value_index=0)` to solve the following problem:
Convert the DOM into processed template tree.
Here is the function:
def create_template_tree(node, templates, render_for_template, current_value_index=0):
"""Convert the DOM into processed template tree."""
if node["type"] == "TEXT":
if current_value_index in render_for_template["valuesToInline"]:
return {
"template": node["content"],
"valueIndex": current_value_index + 1,
"consumedTemplates": [node["templateHash"]],
}
else:
return {
"template": get_placeholder(render_for_template, current_value_index),
"valueIndex": current_value_index + 1,
"consumedTemplates": [node["templateHash"]],
}
else:
updated_value_index = current_value_index
consumed_templates = [node["templateHash"]]
attrs = "".join(
[
f' {k}="{v}"'
if updated_value_index + i in render_for_template["valuesToInline"]
else f" {k}={get_placeholder(render_for_template, updated_value_index + i)}"
for i, (k, v) in enumerate(node["attributes"].items())
]
)
updated_value_index += len(node["attributes"])
children = []
for child in node["children"]:
child_template = create_template_tree(child, templates, render_for_template, updated_value_index)
children.append(child_template["template"])
updated_value_index = child_template["valueIndex"]
consumed_templates.extend(child_template["consumedTemplates"])
return {
"template": f"<{node['tagName'].lower()}{attrs}/>"
if not children
else f"<{node['tagName'].lower()}{attrs}>{''.join(children)}</{node['tagName'].lower()}>",
"valueIndex": updated_value_index,
"consumedTemplates": consumed_templates,
} | Convert the DOM into processed template tree. |
188,401 | import json
from bs4 import BeautifulSoup
from collections import defaultdict
from typing import Any, Dict, List, Union
from real_agents.adapters.data_model.base import DataModel
import requests
import re
import tiktoken
def is_string_a_number(s):
try:
float(s)
return True
except ValueError:
return False
import json
The provided code snippet includes necessary dependencies for implementing the `serialize_tree` function. Write a Python function `def serialize_tree(node, templates)` to solve the following problem:
Serialize the template tree into HTML string.
Here is the function:
def serialize_tree(node, templates):
"""Serialize the template tree into HTML string."""
if node["type"] == "TEXT":
return node["content"]
elif node["templateHash"] in templates:
template = templates[node["templateHash"]]
return f"{{T{template['label']}({','.join([str(v) if is_string_a_number(v) else json.dumps(v) for i, v in enumerate(node['templateValues']) if i not in template['valuesToInline']])})}}"
else:
attrs = "".join([f' {k}="{v}"' for k, v in node["attributes"].items()])
children = "".join([serialize_tree(c, templates) for c in node["children"]])
return (
f"<{node['tagName'].lower()}{attrs}/>"
if not children
else f"<{node['tagName'].lower()}{attrs}>{children}</{node['tagName'].lower()}>"
) | Serialize the template tree into HTML string. |
188,402 | import json
from bs4 import BeautifulSoup
from collections import defaultdict
from typing import Any, Dict, List, Union
from real_agents.adapters.data_model.base import DataModel
import requests
import re
import tiktoken
def remove_iframes(html_string):
# Remove all iframe tags using regex
return re.sub("<iframe.*?/iframe>", "", html_string, flags=re.DOTALL)
def count_tokens(text, model_name):
encoding = tiktoken.get_encoding("cl100k_base")
return len(encoding.encode(text))
def truncate_html_by_tokens(html_string, max_tokens, model_name, num_tags_to_remove_each_time=10):
tokens_count = count_tokens(html_string, model_name)
num_tags_to_remove_each_time = round(tokens_count / 500)
soup = BeautifulSoup(html_string, "html.parser")
# Remove all iframe tags
html_string = remove_iframes(html_string)
while tokens_count > max_tokens:
tags = soup.find_all(True) # find all tags
# remove the last N tags
for tag in tags[-num_tags_to_remove_each_time:]:
tag.decompose()
html_string = str(soup)
# re-count the tokens
tokens_count = count_tokens(html_string, model_name)
return html_string | null |
188,403 |
def indent_multiline_string(multiline_string: str, indent: int = 1) -> str:
return "\n".join("\t" * indent + line for line in multiline_string.split("\n")) | null |
188,404 | import os
from typing import Any, Dict
import importlib.util
import tiktoken
from real_agents.adapters.data_model.plugin.base import APIYamlModel
from real_agents.adapters.data_model.utils import indent_multiline_string
def import_function_from_file(filepath, function_name):
spec = importlib.util.spec_from_file_location("module.name", filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
function = getattr(module, function_name)
return function | null |
188,405 | import os
from typing import Any, Dict
import importlib.util
import tiktoken
from real_agents.adapters.data_model.plugin.base import APIYamlModel
from real_agents.adapters.data_model.utils import indent_multiline_string
def process_one_param(param_dict: Dict[str, Any]) -> str:
name = param_dict.get("name", None)
description = param_dict.get("description", None)
required = param_dict.get("required", None)
schema = param_dict.get("schema", {})
type = schema.get("type", "UnknownType")
value_choices = schema.get("enum", [])
ret = (
f"`{name}` ({type}, {'required' if required else 'optional'}): {description}."
f"{'Examples:' + ','.join([str(_) for _ in value_choices]) if len(value_choices) > 0 else ''}"
)
return ret | null |
188,406 | import os
from typing import Any, Dict
import importlib.util
import tiktoken
from real_agents.adapters.data_model.plugin.base import APIYamlModel
from real_agents.adapters.data_model.utils import indent_multiline_string
def process_one_property(name: str, value_dict: Dict[str, Any]) -> str:
description = value_dict.get("description", None)
required = value_dict.get("required", None)
type = value_dict.get("type", "UnknownType")
value_choices = value_dict.get("enum", [])
ret = (
f"`{name}` ({type}, {'required' if required else 'optional'}): {description}."
f"{'Examples:' + ','.join(value_choices) if len(value_choices) > 0 else ''}"
)
return ret | null |
188,407 | from copy import deepcopy
from typing import Any, Dict
def convert(_input_json: Dict[str, Any]) -> Dict[str, Any]:
input_json = deepcopy(_input_json)
assert isinstance(input_json["out"], list)
input_json["out"] = input_json["out"][:5]
extracted_keys = [
"body",
"title",
"created",
"url",
"tags",
]
input_json["out"] = [{k: r[k] for k in extracted_keys if k in r} for r in input_json["out"]]
return input_json | null |
188,408 | from copy import deepcopy
from typing import Any, Dict
def convert(_input_json: Dict[str, Any]) -> Dict[str, Any]:
input_json = deepcopy(_input_json)
assert isinstance(input_json["out"], list)
input_json["out"] = input_json["out"][:5]
for i, job in enumerate(input_json["out"]):
cleaned_job_item = input_json["out"][i]
del cleaned_job_item["id"]
del cleaned_job_item["created"]
input_json["out"][i] = cleaned_job_item
return input_json | null |
188,409 | from copy import deepcopy
from typing import Any, Dict
def convert(_input_json: Dict[str, Any]) -> Dict[str, Any]:
input_json = deepcopy(_input_json)
assert isinstance(input_json["out"], list)
input_json["out"]["articles"] = input_json["out"]["articles"][:5]
return input_json | null |
188,410 | import subprocess
import sys
from typing import Dict, List, Tuple
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(kg_input: List[Tuple], name_space: str = "") -> Dict[str, str]` to solve the following problem:
Convert knowledge graph data to string representations in different formats. :param kg_input: the list of knowledge graph triples. :param name_space: of the knowledge graph. :return: A dictionary with the string knowledge graph representations in different formats.
Here is the function:
def convert(kg_input: List[Tuple], name_space: str = "") -> Dict[str, str]:
"""
Convert knowledge graph data to string representations in different formats.
:param kg_input: the list of knowledge graph triples.
:param name_space: of the knowledge graph.
:return: A dictionary with the string knowledge graph representations in different formats.
"""
def install_required_packages() -> None:
packages = ["rdflib", "rdflib-jsonld"]
for package in packages:
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# Call the function to install the required packages
install_required_packages()
from rdflib import Graph, Namespace, URIRef
g = Graph()
# Define a namespace for the knowledge graph
kg_ns = Namespace(name_space)
g.bind("kg", kg_ns)
# Add the triples to the graph
for s, p, o in kg_input:
subject = URIRef(kg_ns[s])
predicate = URIRef(kg_ns[p])
object = URIRef(kg_ns[o])
g.add((subject, predicate, object))
# Serialize the graph into the desired format
representations = {_format: g.serialize(format=_format) for _format in ["json-ld", "turtle", "n3", "nt"]}
return representations | Convert knowledge graph data to string representations in different formats. :param kg_input: the list of knowledge graph triples. :param name_space: of the knowledge graph. :return: A dictionary with the string knowledge graph representations in different formats. |
188,411 | import sqlite3
from typing import Dict, Union
import pandas as pd
import tiktoken
from real_agents.adapters.data_model.templates.skg_templates.table_templates import (
convert as convert_table,
)
from real_agents.adapters.schema import SQLDatabase
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(db_input: Union[str, Dict[str, pd.DataFrame]], visible_rows_num: int = 3) -> Dict[str, str]` to solve the following problem:
Convert database data to string representations in different formats. :param db_input: the path to the sqlite database file, or a pd.DataFrame. :param visible_rows_num: the number of rows to be displayed in each table. :return: A dictionary with the string database representations in different formats.
Here is the function:
def convert(db_input: Union[str, Dict[str, pd.DataFrame]], visible_rows_num: int = 3) -> Dict[str, str]:
"""
Convert database data to string representations in different formats.
:param db_input: the path to the sqlite database file, or a pd.DataFrame.
:param visible_rows_num: the number of rows to be displayed in each table.
:return: A dictionary with the string database representations in different formats.
"""
if isinstance(db_input, str):
conn = sqlite3.connect(db_input)
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = [name[0] for name in cursor.fetchall()]
dfs = {table_name: pd.read_sql_query(f"SELECT * FROM {table_name}", conn) for table_name in table_names}
elif isinstance(db_input, dict) and all(isinstance(df, pd.DataFrame) for df in db_input.values()):
dfs = db_input
else:
raise ValueError("db_input should be either a SQLite database file path or a dictionary of pandas DataFrames")
representations = {
"Markdown": "",
"HTML": "",
"LaTeX": "",
"CSV": "",
"TSV": "",
"reStructuredText": "",
"BBCode": "",
"MediaWiki": "",
"Org mode": "",
"PrettyTable": "",
"SQL": "",
}
for table_name, df in dfs.items():
table_data = {"cols": df.columns.tolist(), "rows": df.values.tolist()}
table_representations = convert_table(table_data, table_name, visible_rows_num)
for _format, table_representation in table_representations.items():
representations[_format] += table_representation + "\n\n"
return representations | Convert database data to string representations in different formats. :param db_input: the path to the sqlite database file, or a pd.DataFrame. :param visible_rows_num: the number of rows to be displayed in each table. :return: A dictionary with the string database representations in different formats. |
188,412 | import sqlite3
from typing import Dict, Union
import pandas as pd
import tiktoken
from real_agents.adapters.data_model.templates.skg_templates.table_templates import (
convert as convert_table,
)
from real_agents.adapters.schema import SQLDatabase
class SQLDatabase(SQLDatabase):
def _pretty_format(headers: Any, result: List[Row]) -> str:
dicts = [dict(zip(headers, row)) for row in result]
tab_result = tabulate(tabular_data=dicts, headers="keys", tablefmt="psql")
if tab_result == "":
return EMPTY_RESULT_STR
return tab_result
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
if cursor.returns_rows:
headers = cursor.keys()
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
# result = cursor.fetchone()[0] # type: ignore
result = [cursor.fetchone()] # type: ignore
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
# pretty format
tab_result = self._pretty_format(headers, result)
return tab_result
return ""
The provided code snippet includes necessary dependencies for implementing the `serialize_db` function. Write a Python function `def serialize_db( db: SQLDatabase, serialize_method: str = "database", num_visible_rows: int = 3, max_tokens: int = 1000, ) -> str` to solve the following problem:
Convert database engine to a string representation.
Here is the function:
def serialize_db(
db: SQLDatabase,
serialize_method: str = "database",
num_visible_rows: int = 3,
max_tokens: int = 1000,
) -> str:
"""Convert database engine to a string representation."""
if serialize_method == "database":
# TODO: Now access the internal variable
setattr(db, "_sample_rows_in_table_info", num_visible_rows)
string = db.get_table_info()
# Truncate the string if it is too long
enc = tiktoken.get_encoding("cl100k_base")
enc_tokens = enc.encode(string)
if len(enc_tokens) > max_tokens:
string = enc.decode(enc_tokens[:max_tokens])
else:
raise ValueError("Unknown serialization method.")
return string | Convert database engine to a string representation. |
188,413 | import subprocess
import sys
from copy import deepcopy
from typing import Any, Dict, Union
import pandas as pd
from sqlalchemy import create_engine
import tiktoken
from real_agents.adapters.schema import SQLDatabase
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert( table_data: Union[pd.DataFrame, Dict[str, Any]], table_name: str = "table", visible_rows_num: int = 3 ) -> Dict[str, str]` to solve the following problem:
Convert table data to string representations in different formats. :param table_data: A dictionary with "cols" (list of strings) and "rows" (list of lists of strings) as keys. :param table_name: The name of the table. :param visible_rows_num: The number of rows to be displayed in the representation. :return: A dictionary with the string table representations in different formats.
Here is the function:
def convert(
table_data: Union[pd.DataFrame, Dict[str, Any]], table_name: str = "table", visible_rows_num: int = 3
) -> Dict[str, str]:
"""
Convert table data to string representations in different formats.
:param table_data: A dictionary with "cols" (list of strings) and "rows"
(list of lists of strings) as keys.
:param table_name: The name of the table.
:param visible_rows_num: The number of rows to be displayed in the representation.
:return: A dictionary with the string table representations in different formats.
"""
def install_required_packages() -> None:
packages = ["tabulate", "prettytable"]
for package in packages:
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# Call the function to install the required packages
install_required_packages()
from prettytable import PrettyTable
# Handle situation when the table_data is already a dataframe, FIXME: this is a hack
new_table_data = {}
if isinstance(table_data, pd.DataFrame):
new_table_data["cols"] = table_data.columns
new_table_data["rows"] = table_data.values.tolist()
table_data = new_table_data
# Type check for table_data
if not isinstance(table_data, dict) or "cols" not in table_data or "rows" not in table_data:
raise TypeError("table_data must be a dictionary with 'cols' and 'rows' as keys.")
table_data_for_observable = deepcopy(table_data)
if len(table_data_for_observable["rows"]) > visible_rows_num:
table_data_for_observable["rows"] = table_data_for_observable["rows"][:visible_rows_num]
table_data_for_observable["rows"].append(["..."] * len(table_data_for_observable["cols"]))
# Create dataframe from table_data
df = pd.DataFrame(table_data_for_observable["rows"], columns=table_data_for_observable["cols"])
# Generate tables in different formats
markdown_table = df.to_markdown(index=False)
html_table = df.to_html(index=False)
latex_table = df.to_latex(index=False)
csv_table = df.to_csv(index=False)
tsv_table = df.to_csv(index=False, sep="\t")
rest_table = df.to_string(index=False)
def bbcode_mode_table(data_frame: pd.DataFrame) -> str:
bbcode_table = "[table]\n"
for row in data_frame.itertuples(index=False):
bbcode_table += "[tr]\n"
for value in row:
bbcode_table += f"[td]{value}[/td]\n"
bbcode_table += "[/tr]\n"
bbcode_table += "[/table]"
return bbcode_table
def mediawiki_mode_table(data_frame: pd.DataFrame) -> str:
mediawiki_table = '{| class="wikitable"\n|-\n'
for col in data_frame.columns:
mediawiki_table += f"! {col}\n"
for row in data_frame.itertuples(index=False):
mediawiki_table += "|-\n"
for value in row:
mediawiki_table += f"| {value}\n"
mediawiki_table += "|}"
return mediawiki_table
def org_mode_table(data_frame: pd.DataFrame) -> str:
org_table = (
"| "
+ " | ".join(data_frame.columns)
+ " |\n|-"
+ " | -".join(["-" * len(col) for col in data_frame.columns])
+ " |\n"
)
for row in data_frame.itertuples(index=False):
org_table += "| " + " | ".join([str(value) for value in row]) + " |\n"
return org_table
bbcode_table = bbcode_mode_table(df)
mediawiki_table = mediawiki_mode_table(df)
org_table = org_mode_table(df)
pretty_table = PrettyTable()
pretty_table.field_names = table_data["cols"]
for row in table_data["rows"]:
pretty_table.add_row(row)
pretty_table = str(pretty_table)
# New function to generate SQL table
def sql_mode_table(data_frame: pd.DataFrame, _table_name: str) -> str:
sql_table_str = f"CREATE TABLE {table_name}(\n"
for col in data_frame.columns:
sql_table_str += f"{col} text,\n"
# Remove the last comma and add the primary key constraint
sql_table_str = sql_table_str[:-2] + f",\nPRIMARY KEY ({data_frame.columns[0]})\n);"
sql_table_str += "\n/*\n{} example rows:\n".format(len(data_frame))
for i, _row in data_frame.iterrows():
_row = "\t".join([str(_cell) for _cell in _row.to_list()])
sql_table_str += f"{_row}\n"
sql_table_str += "*/"
return sql_table_str
sql_table = sql_mode_table(df, table_name)
# Return the representation in different formats as a dictionary
return {
"Markdown": markdown_table,
"HTML": html_table,
"LaTeX": latex_table,
"CSV": csv_table,
"TSV": tsv_table,
"reStructuredText": rest_table,
"BBCode": bbcode_table,
"MediaWiki": mediawiki_table,
"Org mode": org_table,
"PrettyTable": pretty_table,
"SQL": sql_table,
} | Convert table data to string representations in different formats. :param table_data: A dictionary with "cols" (list of strings) and "rows" (list of lists of strings) as keys. :param table_name: The name of the table. :param visible_rows_num: The number of rows to be displayed in the representation. :return: A dictionary with the string table representations in different formats. |
188,414 | import subprocess
import sys
from copy import deepcopy
from typing import Any, Dict, Union
import pandas as pd
from sqlalchemy import create_engine
import tiktoken
from real_agents.adapters.schema import SQLDatabase
class SQLDatabase(SQLDatabase):
def _pretty_format(headers: Any, result: List[Row]) -> str:
dicts = [dict(zip(headers, row)) for row in result]
tab_result = tabulate(tabular_data=dicts, headers="keys", tablefmt="psql")
if tab_result == "":
return EMPTY_RESULT_STR
return tab_result
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
if cursor.returns_rows:
headers = cursor.keys()
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
# result = cursor.fetchone()[0] # type: ignore
result = [cursor.fetchone()] # type: ignore
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
# pretty format
tab_result = self._pretty_format(headers, result)
return tab_result
return ""
The provided code snippet includes necessary dependencies for implementing the `serialize_df` function. Write a Python function `def serialize_df( table_data: pd.DataFrame, table_name: str, table_path: str, serialize_method: str = "tsv", num_visible_rows: int = 3, max_tokens: int = 1000, data_dir_splitter: str = "backend/data/", ) -> str` to solve the following problem:
Convert dataframe to a string representation.
Here is the function:
def serialize_df(
table_data: pd.DataFrame,
table_name: str,
table_path: str,
serialize_method: str = "tsv",
num_visible_rows: int = 3,
max_tokens: int = 1000,
data_dir_splitter: str = "backend/data/",
) -> str:
"""Convert dataframe to a string representation."""
if serialize_method == "tsv":
# Here it means ignore the "path/to/the/data/<user_id/" part of the path
pretty_path = "/".join(table_path.split(data_dir_splitter)[-1].strip("/").split("/")[1:])
string = (
"Here are table columns and the first {} rows of the table from the path {}"
'(only a small part of the whole table) called "{}":\n'.format(num_visible_rows, pretty_path, table_name)
)
string += table_data.head(num_visible_rows).to_csv(sep="\t", index=False)
# Truncate the string if it is too long
enc = tiktoken.get_encoding("cl100k_base")
enc_tokens = enc.encode(string)
if len(enc_tokens) > max_tokens:
string = enc.decode(enc_tokens[:max_tokens])
elif serialize_method == "database":
engine = create_engine("sqlite:///:memory:")
table_data.to_sql(table_name, engine)
db = SQLDatabase(engine)
# TODO: Now access the internal variable
setattr(db, "_sample_rows_in_table_info", num_visible_rows)
string = db.get_table_info()
else:
raise ValueError("Unknown serialization method.")
return string | Convert dataframe to a string representation. |
188,415 | from __future__ import annotations
import logging
import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
client: Any #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
disallowed_model_kwargs = all_required_field_names | {"model"}
invalid_model_kwargs = disallowed_model_kwargs.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
try:
import openai
except ImportError:
raise ValueError(
"Could not import openai python package. " "Please install it with `pip install openai`.")
if openai_organization:
openai.organization = openai_organization
if openai_api_base:
openai.api_base = openai_api_base
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
import openai
if self.openai_api_key:
import os
# Use the pass-in key, if the user provides
openai.api_key = self.openai_api_key
else:
# Use the environment variable if neither is provided
import os
openai_api_key = os.environ.get("OPENAI_API_KEY", None)
openai.api_key = openai_api_key
if self.stop is not None:
if stop is None:
stop = self.stop
else:
stop.extend(self.stop)
message_dicts, params = self._create_message_dicts(messages, stop)
if self.streaming:
inner_completion = ""
default_role = "assistant"
params["stream"] = True
for stream_resp in self.completion_with_retry(messages=message_dicts,
**params):
role = stream_resp["choices"][0]["delta"].get("role", default_role)
if role is None:
role = default_role
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role})
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
import os
import openai
openai_api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
openai_api_key = os.environ.get("OPENAI_API_KEY", None)
openai.api_key = openai_api_key
openai.api_base = openai_api_base
message_dicts, params = self._create_message_dicts(messages, stop)
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry(self,
messages=message_dicts,
**params):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
await run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role})
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(self, messages=message_dicts,
**params)
return self._create_chat_result(response)
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openai-chat"
def get_num_tokens(self, text: str) -> int:
"""Calculate num tokens with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
# create a GPT-3.5-Turbo encoder instance
enc = tiktoken.encoding_for_model(self.model_name)
# encode the text using the GPT-3.5-Turbo encoder
tokenized_text = enc.encode(text)
# calculate the number of tokens in the encoded text
return len(tokenized_text)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model = self.model_name
if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo-0301":
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
The provided code snippet includes necessary dependencies for implementing the `acompletion_with_retry` function. Write a Python function `async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any` to solve the following problem:
Use tenacity to retry the async completion call.
Here is the function:
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs) | Use tenacity to retry the async completion call. |
188,416 | from __future__ import annotations
import logging
import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role) | null |
188,417 | from __future__ import annotations
import logging
import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict | null |
188,418 | import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Mapping, Optional, Sequence
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.schema import (
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
LLMResult,
PromptValue,
)
from pydantic import Extra, Field, root_validator
def _get_verbosity() -> bool:
return langchain.verbose | null |
188,419 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `get_conversation_list` function. Write a Python function `def get_conversation_list() -> Response` to solve the following problem:
Gets the history conversations.
Here is the function:
def get_conversation_list() -> Response:
"""Gets the history conversations."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
conversations = []
try:
# Login with API Key, then retrieve the user history based
# on the hashed API key.
db = get_user_conversation_storage()
conversation_list = db.conversation.find({"user_id": user_id})
for conversation in conversation_list:
conversations.append(
{
"id": str(conversation["_id"]),
"name": conversation["name"],
"folderId": conversation["folder_id"],
}
)
except Exception as e:
return Response(response=None,
status=f'{INTERNAL} error fetch conversation list')
return jsonify(conversations) | Gets the history conversations. |
188,420 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `get_folder_list` function. Write a Python function `def get_folder_list() -> Response` to solve the following problem:
Gets the folder list.
Here is the function:
def get_folder_list() -> Response:
"""Gets the folder list."""
user_id = DEFAULT_USER_ID
folders = []
try:
db = get_user_conversation_storage()
folder_list = db.folder.find({"user_id": user_id})
for folder in folder_list:
folders.append(
{
"id": str(folder["_id"]),
"name": folder["name"],
"type": "chat",
}
)
return jsonify({"success": True, "data": folders})
except Exception as e:
return Response(response=None, status=f'{INTERNAL} error fetch folder list') | Gets the folder list. |
188,421 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def process_rich_content_item(data: dict, message_id: str) -> dict:
"""Processes the rich content from db format into frontend renderable format."""
processed_items: dict = {"intermediateSteps": [], "finalAnswer": []}
if "intermediate_steps" in data:
for item in data["intermediate_steps"]:
processed_items["intermediateSteps"].append(
{"message_id": message_id, "content": item["text"],
"type": item["type"]}
)
if "final_answer" in data:
for item in data["final_answer"]:
processed_items["finalAnswer"].append(
{"message_id": message_id, "content": item["text"],
"type": item["type"]}
)
return processed_items
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
logger = init_log(
error=os.path.join(".logging", "error.log"),
runtime=os.path.join(".logging", "runtime.log"),
serialize=os.path.join(".logging", "serialize.log"),
trace=os.path.join(".logging", "trace.log"),
)
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `get_conversation_content` function. Write a Python function `def get_conversation_content() -> Response` to solve the following problem:
Gets the conversation content for one assigned conversation.
Here is the function:
def get_conversation_content() -> Response:
"""Gets the conversation content for one assigned conversation."""
request_json = request.get_json()
conversation_id = request_json.get("chat_id", None)
if conversation_id is not None:
try:
db = get_user_conversation_storage()
conversation = db.conversation.find_one({"_id": ObjectId(conversation_id)})
message_list = db.message.find({"conversation_id": conversation_id}).sort(
"_id", -1)
messages = [
{
"id": message["message_id"],
"parent_message_id": message["parent_message_id"],
"role": message["role"],
"content": message["data_for_human"] if message[
"role"] == "user" else None,
"type": "rich_message" if isinstance(message["data_for_human"],
dict) else "",
"richContent": process_rich_content_item(message["data_for_human"],
message["message_id"])
if isinstance(message["data_for_human"], dict)
else None,
}
for message in message_list
]
def _get_activated_conversation_branch(messages: list) -> list:
# By default, the latest message is the end point, e.g., the current branch of messages.
activated_messages: list = []
end_point = messages[0]["id"]
while len(messages) > 0 and end_point != -1:
flag = False
for msg in messages:
if msg["id"] == end_point:
if end_point == msg["parent_message_id"]:
flag = False
break
activated_messages = [msg] + activated_messages
end_point = msg["parent_message_id"]
flag = True
break
if not flag:
break
return activated_messages
# Find the current activated branch of messages as frontend only shows one branch
if messages:
messages = _get_activated_conversation_branch(messages)
logger.bind(msg_head=f"get_activated_message_list").debug(messages)
conversation = {
"id": conversation_id,
"name": conversation["name"],
"messages": messages,
"agent": conversation["agent"],
"prompt": conversation["prompt"],
"temperature": conversation["temperature"],
"folderId": conversation["folder_id"],
"bookmarkedMessagesIds": conversation["bookmarked_message_ids"],
"selectedCodeInterpreterPlugins": conversation[
"selected_code_interpreter_plugins"],
"selectedPlugins": conversation["selected_plugins"],
}
return jsonify(conversation)
except Exception as e:
import traceback
traceback.print_exc()
return Response(response=None,
status=f'{INTERNAL} error fetch conversation')
else:
return Response(response=None, status=f'{INTERNAL} error fetch conversation') | Gets the conversation content for one assigned conversation. |
188,422 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `update_conversation` function. Write a Python function `def update_conversation() -> Response` to solve the following problem:
Updates a conversation name.
Here is the function:
def update_conversation() -> Response:
"""Updates a conversation name."""
try:
request_json = request.get_json()
conversations = request_json["conversations"]
db = get_user_conversation_storage()
messages = []
success = True
update_key_dict = {"name": "name", "folder_id": "folderId"}
for conversation_to_update in conversations:
conversation_id = conversation_to_update["id"]
name = conversation_to_update["name"]
updates = {}
for key in update_key_dict.keys():
if update_key_dict[key] in conversation_to_update:
updates[key] = conversation_to_update[update_key_dict[key]]
if conversation_id is not None:
try:
db.conversation.update_one({"_id": ObjectId(conversation_id)},
{"$set": updates})
messages.append("Conversation name updated to {}.".format(name))
except Exception as e:
messages.append(str(e))
success = False
else:
success = False
messages.append("Missing conversation id or title.")
return jsonify({"success": success, "message": " ".join(messages)})
except Exception as e:
return Response(response=None, status=f"{INTERNAL} error fetch conversation") | Updates a conversation name. |
188,423 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `update_folder` function. Write a Python function `def update_folder() -> Response` to solve the following problem:
Update a folder name.
Here is the function:
def update_folder() -> Response:
"""Update a folder name."""
request_json = request.get_json()
folder_id = request_json["folder_id"]
folder_name = request_json["name"]
try:
db = get_user_conversation_storage()
db.folder.update_one({"_id": ObjectId(folder_id)},
{"$set": {"name": folder_name}})
return jsonify({"success": True,
"message": "Folder name updated to {}.".format(folder_name)})
except Exception as e:
return Response(response=None, status=f"{INTERNAL} error update folder") | Update a folder name. |
188,424 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
UNFOUND = 404
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `register_folder` function. Write a Python function `def register_folder() -> Response` to solve the following problem:
Creates a new folder.
Here is the function:
def register_folder() -> Response:
"""Creates a new folder."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
folder = request_json.get("folder", None)
if folder:
try:
db = get_user_conversation_storage()
folder = db.folder.insert_one({"name": folder["name"], "user_id": user_id})
return jsonify({"id": str(folder.inserted_id),
"message": "Register folder successfully."})
except Exception as e:
return Response(response=None, status=f"{INTERNAL} error register folder")
else:
return Response(response=None, status=f"{UNFOUND} missing folder") | Creates a new folder. |
188,425 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
UNFOUND = 404
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `register_conversation` function. Write a Python function `def register_conversation() -> Response` to solve the following problem:
Creates a new conversation.
Here is the function:
def register_conversation() -> Response:
"""Creates a new conversation."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
conversation = request_json.get("conversation", None)
if conversation:
try:
db = get_user_conversation_storage()
conversation_id = conversation["id"]
if conversation_id is not None and db.conversation.find_one(
{"_id": ObjectId(conversation_id)}):
updates = {
"name": conversation["name"],
"agent": conversation["agent"],
"prompt": conversation["prompt"],
"temperature": conversation["temperature"],
"folder_id": conversation["folderId"],
"bookmarked_message_ids": conversation.get("bookmarkedMessagesIds",
None),
"selected_code_interpreter_plugins": conversation[
"selectedCodeInterpreterPlugins"],
"selected_plugins": conversation["selectedPlugins"],
}
db.conversation.update_one({"_id": ObjectId(conversation_id)},
{"$set": updates})
else:
conversation = db.conversation.insert_one(
{
"name": conversation["name"],
"agent": conversation["agent"],
"prompt": conversation["prompt"],
"temperature": conversation["temperature"],
"folder_id": conversation["folderId"],
"bookmarked_message_ids": conversation.get(
"bookmarkedMessagesIds", None),
"hashed_api_key": "",
"user_id": user_id,
"selected_code_interpreter_plugins": conversation[
"selectedCodeInterpreterPlugins"],
"selected_plugins": conversation["selectedPlugins"],
"timestamp": datetime.datetime.utcnow(),
}
)
conversation_id = str(conversation.inserted_id)
return jsonify({"id": conversation_id})
except Exception as e:
return Response(response=None,
status=f"{INTERNAL} error register conversation")
else:
return Response(response=None, status=f"{UNFOUND} missing conversation") | Creates a new conversation. |
188,426 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
The provided code snippet includes necessary dependencies for implementing the `delete_conversation` function. Write a Python function `def delete_conversation() -> Response` to solve the following problem:
Deletes a conversation.
Here is the function:
def delete_conversation() -> Response:
"""Deletes a conversation."""
request_json = request.get_json()
chat_id = request_json.get("chat_id", None)
if chat_id:
try:
db = get_user_conversation_storage()
db.conversation.delete_one({"_id": ObjectId(chat_id)})
db.message.delete_many({"conversation_id": chat_id})
return jsonify({"success": True, "message": "Conversation is deleted."})
except Exception as e:
return jsonify({"success": False, "message": str(e)})
else:
return jsonify({"success": False, "message": "chat_id is missing"}) | Deletes a conversation. |
188,427 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
The provided code snippet includes necessary dependencies for implementing the `delete_folder` function. Write a Python function `def delete_folder() -> Response` to solve the following problem:
Deletes a folder.
Here is the function:
def delete_folder() -> Response:
"""Deletes a folder."""
request_json = request.get_json()
folder_id = request_json.get("folder_id", None)
if folder_id:
try:
db = get_user_conversation_storage()
db.folder.delete_one({"_id": ObjectId(folder_id)})
return jsonify({"success": True, "message": "Folder is deleted."})
except Exception as e:
return jsonify({"success": False, "message": str(e)})
else:
return jsonify({"success": False, "message": "folder_id is missing"}) | Deletes a folder. |
188,428 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
def get_user_conversation_storage():
"""Connects to mongodb."""
if "user_conversation_storage" not in g:
g.user_conversation_storage = pymongo.MongoClient("mongodb://{0}:27017/".format(os.getenv("MONGO_SERVER")))
return g.user_conversation_storage["xlang"]
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `clear_all_conversation` function. Write a Python function `def clear_all_conversation() -> Response` to solve the following problem:
Clears all previous conversations.
Here is the function:
def clear_all_conversation() -> Response:
"""Clears all previous conversations."""
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
if user_id:
try:
db = get_user_conversation_storage()
db.conversation.delete_many({"user_id": user_id})
db.folder.delete_many({"user_id": user_id})
db.message.delete_many({"user_id": user_id})
return jsonify({"success": True, "message": "Clear All Conversations."})
except Exception as e:
return jsonify({"success": False, "message": str(e)})
else:
return jsonify({"success": False, "message": "user_id is missing"}) | Clears all previous conversations. |
188,429 | import struct
import json
import datetime
from typing import Any, Generator
from bson.objectid import ObjectId
from flask import jsonify, request, Response
from backend.app import app
from backend.utils.user_conversation_storage import get_user_conversation_storage
from backend.main import threading_pool, logger
from backend.schemas import DEFAULT_USER_ID
from backend.schemas import INTERNAL, UNFOUND
threading_pool: ThreadManager = ThreadManager()
INTERNAL = 500
The provided code snippet includes necessary dependencies for implementing the `stop_generation` function. Write a Python function `def stop_generation() -> Response` to solve the following problem:
Stops the current generation, cut on streaming.
Here is the function:
def stop_generation() -> Response:
"""Stops the current generation, cut on streaming."""
try:
request_json = request.get_json()
chat_id = request_json["chat_id"]
threading_pool.kill_thread(chat_id)
except Exception as e:
print(e)
return Response(response={}, status=f"{INTERNAL} error stopping")
def pack_json(object: Any) -> bytes:
json_text = json.dumps(object)
return struct.pack("<i", len(json_text)) + json_text.encode("utf-8")
def yield_stop() -> Generator[bytes, Any, None]:
yield pack_json({"success": False, "error": "stop"})
return Response(response={}) | Stops the current generation, cut on streaming. |
188,430 | import os
from flask import request, Response
from kaggle.api.kaggle_api_extended import KaggleApi
from backend.app import app
from backend.utils.utils import create_personal_folder
from backend.schemas import UNFOUND, INTERNAL, DEFAULT_USER_ID
api = KaggleApi()
api.authenticate()
def create_personal_folder(user_id: str) -> str:
# mkdir user folder
from backend.main import app
user_folder = os.path.join(app.config["UPLOAD_FOLDER"], user_id)
os.makedirs(user_folder, exist_ok=True)
# mkdir chat folder under user folder
return user_folder
UNFOUND = 404
INTERNAL = 500
DEFAULT_USER_ID = "DefaultUser"
The provided code snippet includes necessary dependencies for implementing the `kaggle_dataset_download` function. Write a Python function `def kaggle_dataset_download() -> dict | Response` to solve the following problem:
Use Kaggle-api to connect.
Here is the function:
def kaggle_dataset_download() -> dict | Response:
"""Use Kaggle-api to connect. """
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
url = request_json["url"]
if url.startswith("http"):
return {"success": False,
"message": "Please remove the http in your submitted URL."}
kaggle_dataset_id = url.replace("www.kaggle.com/datasets/", "")
if not kaggle_dataset_id:
return {"success": False, "message": "Please input a valid Kaggle dataset URL."}
root_path = create_personal_folder(user_id)
if os.path.exists(root_path) and os.path.isdir(root_path):
try:
path = os.path.join(root_path, kaggle_dataset_id)
api.dataset_download_files(kaggle_dataset_id, path=path, unzip=True)
return {"success": True, "message": "Download {} successfully.",
"data_path": path}
except Exception as e:
return Response(response=None,
status=f"{INTERNAL} Error Downloading, please try another datasets")
else:
return Response(response=None, status=f"{UNFOUND} Missing User folder") | Use Kaggle-api to connect. |