date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | JVFCN/YHChatGPTBot | SQLite.py | import ast
import sqlite3
import threading
import dotenv
import OpenAI
# init
ThreadLocal = threading.local()
Connection = sqlite3.connect("data/Yunhu.db")
Cursor = Connection.cursor()
ChatInitContent = '[{\"role\": \"system\", \"content\": \"You are ChatGPT, a large language model trained by ' \
'OpenAI.Knowledge cutoff: 2021-09\"}]'
# 数据库初始化
Cursor.execute(
"CREATE TABLE IF NOT EXISTS user_chat_info ("
"userId INTEGER PRIMARY KEY,"
"api_key TEXT NOT NULL DEFAULT 'defaultAPIKEY',"
"admin BOOLEAN NOT NULL DEFAULT FALSE,"
"chat TEXT NOT NULL DEFAULT '[{\"role\": \"system\", \"content\": \"You are ChatGPT, a large language model trained by OpenAI.Knowledge cutoff: 2021-09\"}]',"
"model TEXT NOT NULL DEFAULT 'gpt-3.5-turbo',"
"premium BOOLEAN NOT NULL DEFAULT FALSE,"
"premium_expire INTEGER NOT NULL DEFAULT 0,"
"free_times INTEGER NOT NULL DEFAULT 10"
")"
) # 创建用户聊天信息表
Connection.commit()
# 获取用户的模型
def GetUserModel(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT model FROM user_chat_info WHERE userId=?", (UserId,)) # 获取模型
result = Cursor_.fetchone()
return result[0]
# 用户是否为会员
def IsPremium(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT premium FROM user_chat_info WHERE userId=?", (UserId,))
result = Cursor_.fetchone()
return bool(result[0])
# 获取用户会员到期时间
def GetPremiumExpire(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT premium_expire FROM user_chat_info WHERE userId=?", (UserId,))
result = Cursor_.fetchone()
return result[0]
# 设置用户会员状态
def SetPremium(UserId, Premium, ExpireTime):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
"""
:param UserId: 用户ID
:param Premium: 会员状态
:param ExpireTime: 会员到期时间
:return: None
"""
Cursor_.execute(
"UPDATE user_chat_info SET premium = ?, premium_expire = ? WHERE userId = ?",
(Premium, ExpireTime, UserId)
) # 更新会员状态
Connection_.commit()
# 更改用户的模型
def SetUserModel(UserId, Model):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute(
"UPDATE user_chat_info SET model = ? WHERE userId=?", (Model, UserId,)
) # 更新模型
Connection_.commit()
# 更新用户的ApiKey
def UpdateApiKey(UserId, NewApiKey):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute(
"UPDATE user_chat_info SET api_key = ? WHERE userId = ?",
(NewApiKey, UserId)
) # 更新ApiKey
Connection_.commit()
# 更新用户的上下文
def UpdateUserChat(UserId, UpdatedChat):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
ChatString = str(UpdatedChat) # 转换为字符串
Cursor_.execute(
"UPDATE user_chat_info SET chat = ? WHERE userId = ?",
(ChatString, UserId)
) # 更新聊天记录
Connection_.commit()
# 获取用户的上下文
def GetUserChat(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT chat FROM user_chat_info WHERE userId=?", (UserId,)) # 获取聊天记录
result = Cursor_.fetchone()
ChatHistory = ast.literal_eval(result[0])
if len(ChatHistory) > 6: # 限制最大长度6
ChatHistory.pop(1) # 删除第一个元素
print(ChatHistory)
return ChatHistory # 返回聊天记录
# 添加用户
def AddUser(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute(
"INSERT OR IGNORE INTO user_chat_info (userId, api_key, admin, chat, model, premium, premium_expire, free_times) VALUES (?, ?, ?, ?, ?,?, ?,?)",
(UserId, "defaultAPIKEY",False ,ChatInitContent, "gpt-3.5-turbo", False, 0, 10)
)
Connection_.commit()
# 获取用户的免费次数
def GetUserFreeTimes(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT free_times FROM user_chat_info WHERE userId=?", (UserId,))
result = Cursor_.fetchone()
print(result)
return result[0]
# 更改某用户的免费次数
def SetUserFreeTimes(UserId, FreeTimes):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute(
"UPDATE user_chat_info SET free_times = ? WHERE userId = ?",
(FreeTimes, UserId)
)
Connection_.commit()
# 更改所有用户的免费次数
def SetAllUserFreeTimes(FreeTimes):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute(
"UPDATE user_chat_info SET free_times = ?",
(FreeTimes,)
)
Connection_.commit()
# 重置所有用户的模型
def SetAllUserModel():
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT userId FROM user_chat_info")
UserIds = Cursor_.fetchall()
for user_id in UserIds:
Cursor_.execute(
"UPDATE user_chat_info SET model = ? WHERE userId = ?",
("gpt-3.5-turbo", user_id[0])
)
Connection_.commit()
# 为用户设置admin权限
def SetUserPermission(UserId, IsAdmin):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("UPDATE user_chat_info SET admin=? WHERE userId=?", (IsAdmin, UserId))
Connection_.commit()
# 清除所有用户的上下文
def ClearAllUsersChat():
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
# 获取所有用户ID
Cursor_.execute("SELECT userId FROM user_chat_info")
UserIds = Cursor_.fetchall()
# 遍历用户ID并清除聊天记录
for user_id in UserIds:
Cursor_.execute(
"UPDATE user_chat_info SET chat = ? WHERE userId = ?",
(ChatInitContent, user_id[0])
)
Connection_.commit()
# 清除用户的上下文(到默认状态)
def ClearUserChat(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute(
"UPDATE user_chat_info SET chat = ? WHERE userId = ?",
(ChatInitContent, UserId)
)
Connection_.commit()
# 检查用户是否有admin权限
def CheckUserPermission(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT admin FROM user_chat_info WHERE userId=?", (UserId,))
result = Cursor_.fetchone()
if result is not None:
return bool(result[0])
else:
return False
# 获取所有用户的id
def GetAllUserIds():
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT userId FROM user_chat_info")
# 将所有用户的id转为列表
UserIds = [str(row[0]) for row in Cursor_.fetchall()]
return UserIds
# 获取数据库连接
def GetDbConnection():
if not hasattr(ThreadLocal, "connection"):
ThreadLocal.connection = sqlite3.connect("data/Yunhu.db")
return ThreadLocal.connection
# 获取用户的ApiKey
def GetApiKey(UserId):
Connection_ = GetDbConnection()
Cursor_ = Connection_.cursor()
Cursor_.execute("SELECT api_key FROM user_chat_info WHERE userId = ?", (UserId,))
result = Cursor_.fetchone()
if result:
return result[0]
# 设置所有用户的默认ApiKey
def SetDefaultApiKey(Key):
dotenv.set_key("./data/.env", "DEFAULT_API", Key)
OpenAI.DefaultApiKey = Key
dotenv.load_dotenv()
# 关闭数据库连接
def CloseDbConnections():
if hasattr(ThreadLocal, "connection"):
ThreadLocal.connection.close()
| [] |
2024-01-10 | nurikk/tele-bot | src~message_handlers~card.py | from src.prompt_generator import get_depiction_ideas
from src.img import Proxy
import datetime
import logging
import re
from enum import Enum
import i18n
from aiogram import types, Router, Bot, Dispatcher, F
from aiogram.filters.callback_data import CallbackData
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove, InlineKeyboardButton, \
InlineKeyboardMarkup, SwitchInlineQueryChosenChat, URLInputFile, InputMediaPhoto
from aiogram.utils.chat_action import ChatActionSender
from aiogram.utils.deep_linking import create_start_link
from aiogram.utils.keyboard import InlineKeyboardBuilder
from aiogram.utils.markdown import hbold, hpre
from openai import BadRequestError, AsyncOpenAI
from tortoise.functions import Max
from src import db, card_gen
from src.commands import card_command
from src.db import user_from_message, TelebotUsers, CardRequests, CardRequestQuestions, CardRequestsAnswers, CardResult
from src.fsm.card import CardForm
from src.image_generator import ImageGenerator
from src.s3 import S3Uploader
from src.settings import Settings
async def debug_log(request_id: int, bot: Bot,
user: TelebotUsers, debug_chat_id: int, s3_uploader: S3Uploader, image_proxy: Proxy):
card_request = await CardRequests.get(id=request_id)
answers = await db.CardRequestsAnswers.filter(request_id=request_id).all()
prompt_data = ''
for item in answers:
prompt_data += f"{item.question.value}: {item.answer}\n"
messages = [
f"New card for {hbold(user.full_name)} @{user.username}!",
f"User response: \n {hpre(prompt_data)}",
f"Generated prompt:\n {hpre(card_request.generated_prompt)}"
]
await bot.send_message(chat_id=debug_chat_id, text="\n".join(messages))
await send_photos(chat_id=debug_chat_id, request_id=request_id, image_proxy=image_proxy, s3_uploader=s3_uploader, bot=bot)
class Action(str, Enum):
ACTION_REGENERATE = "regenerate"
class CardGenerationCallback(CallbackData, prefix="my"):
action: Action
request_id: int
def generate_image_keyboad(locale: str, request_id: int) -> InlineKeyboardBuilder:
button_label = i18n.t('regenerate', locale=locale)
callback_data = CardGenerationCallback(action=Action.ACTION_REGENERATE, request_id=request_id).pack()
buttons = [
# [InlineKeyboardButton(text=button_label, callback_data=callback_data)],
[InlineKeyboardButton(
text=i18n.t("share_with_friend", locale=locale),
switch_inline_query_chosen_chat=SwitchInlineQueryChosenChat(allow_user_chats=True,
allow_group_chats=True,
allow_channel_chats=True,
query=str(request_id))
)]
]
return InlineKeyboardBuilder(markup=buttons)
async def send_photos(chat_id: int, request_id: int, image_proxy: Proxy, s3_uploader: S3Uploader, bot: Bot):
image_results = await CardResult.filter(request_id=request_id).all()
photos = [
InputMediaPhoto(
media=URLInputFile(url=image_proxy.get_full_image(s3_uploader.get_website_url(image_result.result_image)), filename="card.png")
)
for image_result in image_results
]
await bot.send_media_group(chat_id=chat_id, media=photos, protect_content=True) # reply_markup=keyboard.as_markup()
async def deliver_generated_samples_to_user(request_id: int, bot: Bot, user: TelebotUsers, locale: str,
image_generator: ImageGenerator, debug_chat_id: int,
s3_uploader: S3Uploader, image_proxy: Proxy, async_openai_client: AsyncOpenAI) -> None:
try:
async with ChatActionSender.upload_photo(bot=bot, chat_id=user.telegram_id):
await card_gen.render_card(request_id=request_id, user=user, locale=locale, image_generator=image_generator,
s3_uploader=s3_uploader, async_openai_client=async_openai_client)
request = await CardRequests.get(id=request_id)
await bot.send_message(chat_id=user.telegram_id, text=request.greeting_text)
await send_photos(chat_id=user.telegram_id, request_id=request_id, image_proxy=image_proxy, s3_uploader=s3_uploader, bot=bot)
keyboard = generate_image_keyboad(locale=locale, request_id=request_id)
await bot.send_message(chat_id=user.telegram_id, text=i18n.t('share_description', locale=locale), reply_markup=keyboard.as_markup())
await bot.send_message(chat_id=user.telegram_id, text=i18n.t('commands.card', locale=locale))
await debug_log(request_id=request_id, bot=bot,
user=user,
debug_chat_id=debug_chat_id, image_proxy=image_proxy, s3_uploader=s3_uploader)
except BadRequestError as e:
if isinstance(e.body, dict) and 'message' in e.body:
await bot.send_message(chat_id=user.telegram_id, text=e.body['message'])
async def get_samples(question: CardRequestQuestions, locale: str) -> list[str]:
return i18n.t(f"card_form.{question.value}.samples", locale=locale).split(",")
def generate_samples_keyboard(samples: list[str], columns: int = 2) -> ReplyKeyboardMarkup:
keyboard = []
for pair in zip(*[iter(samples)] * columns):
keyboard.append([KeyboardButton(text=sample) for sample in pair])
return ReplyKeyboardMarkup(keyboard=keyboard, resize_keyboard=True)
async def generate_answer_samples_keyboard(locale: str, question: CardRequestQuestions, columns: int = 2) -> ReplyKeyboardMarkup:
samples = await get_samples(question=question, locale=locale)
return generate_samples_keyboard(samples=samples, columns=columns)
async def generate_depictions_samples_keyboard(client: AsyncOpenAI, locale: str, request_id: int) -> ReplyKeyboardMarkup:
samples = await get_depiction_ideas(client=client, locale=locale, request_id=request_id)
return generate_samples_keyboard(samples=samples, columns=1)
async def generate_descriptions_samples_keyboard(user: TelebotUsers, locale: str, samples_count: int = 5):
# Refactor this to make DISTINCT ON query
answers = await CardRequests.filter(user=user,
answers__language_code=locale,
answers__question=CardRequestQuestions.DESCRIPTION
).annotate(min_created_at=Max('created_at')).order_by("-min_created_at").group_by('answers__answer').limit(
samples_count).values("answers__answer")
descriptions = [answer['answers__answer'] for answer in answers]
if descriptions:
return generate_samples_keyboard(samples=descriptions, columns=1)
return ReplyKeyboardRemove()
async def handle_no_more_cards(message: types.Message, user: types.User):
locale = user.language_code
kb = [[
InlineKeyboardButton(
text=i18n.t("invite_friend", locale=locale),
switch_inline_query_chosen_chat=SwitchInlineQueryChosenChat(allow_user_chats=True,
allow_group_chats=True,
allow_channel_chats=True)
)
]]
await message.answer(
i18n.t("no_cards_left", locale=locale),
reply_markup=InlineKeyboardMarkup(inline_keyboard=kb)
)
async def ensure_user_has_cards(message: types.Message, user: types.User = None) -> bool:
telebot_user = await user_from_message(telegram_user=user)
if telebot_user.remaining_cards <= 0:
await handle_no_more_cards(message=message, user=user)
return False
return True
async def generate_reason_samples_keyboard(locale: str):
reasons = await db.get_near_holidays(country_code=locale, days=7)
samples = await get_samples(question=CardRequestQuestions.REASON, locale=locale)
for r in reasons:
month_name = i18n.t(f"month_names.month_{r.month}", locale=locale)
samples.append(f"{r.title} ({r.day} {month_name})")
return generate_samples_keyboard(samples=samples, columns=1)
async def command_start(message: types.Message, state: FSMContext) -> None:
locale = message.from_user.language_code
user = await user_from_message(telegram_user=message.from_user)
if await ensure_user_has_cards(message=message, user=message.from_user):
request: CardRequests = await CardRequests.create(user=user, language_code=locale)
await state.update_data(request_id=request.id)
await state.set_state(CardForm.reason)
reason_samples_keyboard = await generate_reason_samples_keyboard(locale=locale)
await message.answer(i18n.t("card_form.reason.response", locale=locale), reply_markup=reason_samples_keyboard)
async def process_reason(message: types.Message, state: FSMContext) -> None:
locale = message.from_user.language_code
request_id = (await state.get_data())['request_id']
await CardRequestsAnswers.create(request_id=request_id, question=CardRequestQuestions.REASON, answer=message.text, language_code=locale)
await state.set_state(CardForm.description)
answer_samples_keyboard = await generate_answer_samples_keyboard(
locale=locale, question=CardRequestQuestions.DESCRIPTION, columns=4)
await message.answer(i18n.t(f"card_form.{CardRequestQuestions.DESCRIPTION.value}.response", locale=locale), reply_markup=answer_samples_keyboard)
async def process_description(message: types.Message, state: FSMContext, async_openai_client: AsyncOpenAI, bot: Bot) -> None:
locale = message.from_user.language_code
request_id = (await state.get_data())['request_id']
await CardRequestsAnswers.create(request_id=request_id, question=CardRequestQuestions.DESCRIPTION, answer=message.text, language_code=locale)
await state.set_state(CardForm.depiction)
await message.answer(i18n.t("card_form.depiction.coming_up_with_ideas", locale=locale), reply_markup=ReplyKeyboardRemove())
async with ChatActionSender.typing(bot=bot, chat_id=message.chat.id):
depiction_ideas = await generate_depictions_samples_keyboard(locale=locale, request_id=request_id, client=async_openai_client)
await message.answer(i18n.t(f"card_form.{CardRequestQuestions.DEPICTION.value}.response", locale=locale), reply_markup=depiction_ideas)
async def process_depiction(message: types.Message, state: FSMContext, bot: Bot, settings: Settings,
s3_uploader: S3Uploader, image_proxy: Proxy,
image_generator: ImageGenerator, async_openai_client: AsyncOpenAI) -> None:
user = await user_from_message(telegram_user=message.from_user)
locale = message.from_user.language_code
request_id = (await state.get_data())['request_id']
await CardRequestsAnswers.create(request_id=request_id, question=CardRequestQuestions.DEPICTION, answer=message.text, language_code=locale)
await state.set_state(CardForm.style)
await message.answer(i18n.t("card_form.wait.response", locale=locale), reply_markup=ReplyKeyboardRemove())
await state.clear()
await deliver_generated_samples_to_user(request_id=request_id, bot=bot, user=user, locale=locale,
image_generator=image_generator, debug_chat_id=settings.debug_chat_id, s3_uploader=s3_uploader,
image_proxy=image_proxy,
async_openai_client=async_openai_client)
async def regenerate(query: CallbackQuery, callback_data: CardGenerationCallback, bot: Bot,
settings: Settings,
s3_uploader: S3Uploader, image_proxy: Proxy, image_generator: ImageGenerator, async_openai_client: AsyncOpenAI):
if await ensure_user_has_cards(message=query.message, user=query.from_user):
user = await user_from_message(telegram_user=query.from_user)
locale = query.from_user.language_code
await query.answer(text=i18n.t("card_form.wait.response", locale=locale))
await deliver_generated_samples_to_user(request_id=callback_data.request_id, bot=bot, user=user, locale=locale,
image_generator=image_generator, debug_chat_id=settings.debug_chat_id, s3_uploader=s3_uploader,
image_proxy=image_proxy, async_openai_client=async_openai_client)
def escape_markdown(text: str) -> str:
return re.sub(r'([_*[\]()~`>#+-=|{}.!])', r'\\\1', text)
def get_message_content(locale: str, reason: CardRequestsAnswers, full_name: str, photo_url: str, greeting_text: str) -> str:
return i18n.t('share_message_content_markdown',
locale=locale,
reason=escape_markdown(reason.answer),
name=escape_markdown(full_name),
photo_url=photo_url,
greeting_message=escape_markdown(greeting_text) if greeting_text else '')
async def inline_query(query: types.InlineQuery, bot: Bot,
s3_uploader: S3Uploader,
image_proxy: Proxy) -> None:
user = await user_from_message(telegram_user=query.from_user)
link = await create_start_link(bot, str(user.id))
request_id = query.query
results = []
request_qs = CardRequests.filter(user=user).prefetch_related('results').order_by("-created_at")
if request_id:
request_qs = request_qs.filter(id=request_id)
requests = await request_qs.limit(10)
reply_markup = InlineKeyboardMarkup(
inline_keyboard=[[
InlineKeyboardButton(text=i18n.t("generate_your_own", locale=query.from_user.language_code), url=link)
]]
)
thumbnail_width = 256
thumbnail_height = 256
for request in requests:
reason = await CardRequestsAnswers.filter(request_id=request.id, question=CardRequestQuestions.REASON).first()
for result in request.results:
photo_url = image_proxy.get_full_image(s3_uploader.get_website_url(result.result_image))
thumbnail_url = image_proxy.get_thumbnail(s3_uploader.get_website_url(result.result_image), width=thumbnail_width,
height=thumbnail_height)
logging.info(f"{photo_url=} {thumbnail_url=}")
results.append(types.InlineQueryResultArticle(
id=str(datetime.datetime.now()),
title=i18n.t('shared_title', locale=query.from_user.language_code, name=query.from_user.full_name),
description=i18n.t('shared_description', locale=query.from_user.language_code, name=query.from_user.full_name, reason=reason.answer),
thumbnail_width=thumbnail_width,
thumbnail_height=thumbnail_height,
thumbnail_url=thumbnail_url,
input_message_content=types.InputTextMessageContent(
message_text=get_message_content(locale=query.from_user.language_code, reason=reason,
full_name=query.from_user.full_name,
photo_url=photo_url,
greeting_text=request.greeting_text),
parse_mode="MarkdownV2",
),
caption=i18n.t('shared_from', locale=query.from_user.language_code, name=query.from_user.full_name),
reply_markup=reply_markup,
))
await query.answer(results=results, cache_time=0)
async def chosen_inline_result_handler(chosen_inline_result: types.ChosenInlineResult):
request_id = chosen_inline_result.query
if request_id:
from tortoise.expressions import F
await db.CardRequests.filter(id=request_id).update(shares_count=F("shares_count") + 1)
async def edited_message_handler(edited_message: types.Message):
pass
def register(dp: Dispatcher):
form_router = Router()
form_router.message(card_command)(command_start)
form_router.message(CardForm.reason)(process_reason)
form_router.message(CardForm.description)(process_description)
form_router.message(CardForm.depiction)(process_depiction)
form_router.callback_query(CardGenerationCallback.filter(F.action == Action.ACTION_REGENERATE))(regenerate)
form_router.edited_message()(edited_message_handler)
form_router.inline_query()(inline_query)
form_router.chosen_inline_result()(chosen_inline_result_handler)
dp.include_router(form_router)
| [] |
2024-01-10 | nurikk/tele-bot | src~card_gen.py | import random
import i18n
from aiogram import Bot
from openai import AsyncOpenAI
from tortoise.expressions import F
from src import db
from src.image_generator import ImageGenerator
from src.img import Proxy
from src.prompt_generator import generate_prompt, get_depiction_ideas, get_greeting_text
from src.s3 import S3Uploader
async def ensure_english(text: str, locale: str, async_openai_client: AsyncOpenAI) -> str:
if locale != 'en':
response = await async_openai_client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": "You will be provided with a sentence in Russian, and your task is to translate it into English."
},
{
"role": "user",
"content": text
}
],
temperature=0.7,
max_tokens=int(len(text) * 1.5),
top_p=1
)
return response.choices[0].message.content
return text
async def render_card(request_id: int, user: db.TelebotUsers, locale: str, image_generator: ImageGenerator,
s3_uploader: S3Uploader, async_openai_client: AsyncOpenAI, images_count: int = 2):
answers = await db.CardRequestsAnswers.filter(request_id=request_id).all().values()
data = {item['question'].value: item['answer'] for item in answers}
prompt = await ensure_english(text=generate_prompt(data=data, locale=locale), locale=locale, async_openai_client=async_openai_client)
greeting_text = await get_greeting_text(async_openai_client=async_openai_client, reason=data['reason'])
await db.CardRequests.filter(id=request_id).update(generated_prompt=prompt, greeting_text=greeting_text)
generated_images = await image_generator.generate(prompt=prompt, images_count=images_count)
image_paths = []
for image_url in generated_images:
image_path = await s3_uploader.upload_image_from_url(image_url=image_url)
image_paths.append(await db.CardResult.create(request_id=request_id, result_image=image_path))
await db.TelebotUsers.filter(id=user.id).update(remaining_cards=F("remaining_cards") - 1)
async def generate_cards(image_generator: ImageGenerator, s3_uploader: S3Uploader,
async_openai_client: AsyncOpenAI, bot: Bot,
image_proxy: Proxy, debug_chat_id: int, cards_per_holiday: int = 5):
from src.message_handlers.card import deliver_generated_samples_to_user
system_user = (await db.TelebotUsers.get_or_create(telegram_id=0,
defaults={"full_name": "CARD GENERATOR",
"username": "__system__bot__",
"user_type": db.UserType.System}))[0]
user_to_send = await db.TelebotUsers.filter(username='anonymass').first()
for locale in ["ru", "en"]:
holiday = (await db.get_near_holidays(locale, days=1))[0]
card_request = await db.CardRequests.create(user=system_user)
await db.CardRequestsAnswers.create(request_id=card_request.id,
question=db.CardRequestQuestions.REASON,
answer=holiday.title,
language_code=locale)
await db.CardRequestsAnswers.create(request_id=card_request.id,
question=db.CardRequestQuestions.DESCRIPTION,
answer=i18n.t("card_auto_generator.description", locale=locale),
language_code=locale)
depiction_ideas = await get_depiction_ideas(request_id=card_request.id, locale=locale, client=async_openai_client)
await db.CardRequestsAnswers.create(request_id=card_request.id,
question=db.CardRequestQuestions.DEPICTION,
answer=random.choice(depiction_ideas),
language_code=locale)
await render_card(request_id=card_request.id, user=system_user,
locale=locale, image_generator=image_generator, s3_uploader=s3_uploader,
async_openai_client=async_openai_client, images_count=cards_per_holiday)
await deliver_generated_samples_to_user(request_id=card_request.id, bot=bot, user=user_to_send, locale=locale,
image_generator=image_generator, debug_chat_id=debug_chat_id, s3_uploader=s3_uploader,
image_proxy=image_proxy,
async_openai_client=async_openai_client)
| [
"You will be provided with a sentence in Russian, and your task is to translate it into English."
] |
2024-01-10 | PterX/nas-tools-unlock | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | crew-guy/datagod-backend | scripts~get_system_diagram.py | from google.cloud import storage
from diagrams import Diagram
import ast
from PIL import Image
from io import BytesIO
import boto3
import subprocess
import tempfile
from langchain.schema import (HumanMessage)
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain import PromptTemplate, LLMChain
from langchain.chat_models import ChatOpenAI
import os
os.environ["PATH"] += os.pathsep + "/usr/local/opt/graphviz/bin"
chat = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)
def return_infra_code(prompt):
template = "I want to create an architecture diagram using diagrams library in python, of the service whose description is as follows: ""{content}"". Generate a string of code to make the diagram in python. Just return ONLY the python code as a STRING in your answer response and no other data AT ALL. sample response: ""from diagrams import Diagram, Cluster, Edge, Node"". "
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
# example_human = HumanMessagePromptTemplate.from_template("Hi")
# example_ai = AIMessagePromptTemplate.from_template("Argh me mateys")
human_template = "Strictly return only the Python code in string format and no other extra string data"
human_message_prompt = HumanMessagePromptTemplate.from_template(
human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
result = chain.run(
{'text': prompt, 'content': 'Strictly return only the Python code in string format and no other extra string data'})
return result
def generate_diagram_image(code: str, image_format: str = "png") -> BytesIO:
# Parse the code and execute it to generate the diagram
code_ast = ast.parse(code)
with tempfile.TemporaryDirectory() as temp_dir:
# Change the current working directory to the temporary directory
original_cwd = os.getcwd()
os.chdir(temp_dir)
# Execute the code to generate the diagram
exec(compile(code_ast, filename="<ast>", mode="exec"), globals())
# Get the generated diagram filename
diagram_filename = None
for filename in os.listdir(temp_dir):
if filename.endswith("." + image_format):
diagram_filename = os.path.join(temp_dir, filename)
break
# Save the generated diagram to a BytesIO buffer
buffer = BytesIO()
Image.open(diagram_filename).save(buffer, image_format)
# Change the current working directory back to the original
os.chdir(original_cwd)
buffer.seek(0)
return buffer
def upload_image_to_s3(buffer: BytesIO, key: str, bucket_name: str, image_format: str = "png") -> str:
s3 = boto3.client('s3')
s3.upload_fileobj(buffer, bucket_name, key, ExtraArgs={
"ContentType": f"image/{image_format}"})
# Generate the S3 URL
return f"https://{bucket_name}.s3.amazonaws.com/{key}"
def remove_unwanted_lines(code: str) -> str:
lines = code.split("\n")
clean_lines = [line for line in lines if not line.startswith("Here's")]
return "\n".join(clean_lines)
def generate_diagram_image_and_upload_to_s3(code: str, bucket_name: str, image_format: str = "png") -> str:
# Generate a temporary image from the code
stripped_code = remove_unwanted_lines(code[1:-1].replace('\\n', '\n'))
image_buffer = generate_diagram_image(stripped_code, image_format)
# Generate a unique key for the image
key = f"diagrams/{os.urandom(8).hex()}.{image_format}"
# Upload the image to S3 and get the URL
url = upload_image_to_s3(image_buffer, key, bucket_name, image_format)
return url
| [
"Strictly return only the Python code in string format and no other extra string data",
"[PLACEHOLDER, PLACEHOLDER]",
"I want to create an architecture diagram using diagrams library in python, of the service whose description is as follows: {content}. Generate a string of code to make the diagram in python. Just return ONLY the python code as a STRING in your answer response and no other data AT ALL. sample response: from diagrams import Diagram, Cluster, Edge, Node. "
] |
2024-01-10 | crew-guy/datagod-backend | scripts~kendra_results.py | from langchain.docstore.document import Document
import boto3
import re
def clean_result(res_text):
res = re.sub("\s+", " ", res_text).replace("...","")
return res
def get_top_n_results(resp, count):
r = resp["ResultItems"][count]
doc_title = r["DocumentTitle"]["Text"]
doc_uri = r["DocumentURI"]
r_type = r["Type"]
if (r["AdditionalAttributes"] and r["AdditionalAttributes"][0]["Key"] == "AnswerText"):
res_text = r["AdditionalAttributes"][0]["Value"]["TextWithHighlightsValue"]["Text"]
else:
res_text = r["DocumentExcerpt"]["Text"]
doc_excerpt = clean_result(res_text)
combined_text = "Document Title: " + doc_title + "\nDocument Excerpt: \n" + doc_excerpt + "\n"
return {"page_content":combined_text, "metadata":{"source":doc_uri, "title": doc_title, "excerpt": doc_excerpt, "type": r_type}}
def kendra_query(kclient, kquery, kcount, kindex_id, token):
print('I AM TOKEN', token)
response = kclient.query(
IndexId=kindex_id,
QueryText=kquery.strip(),
UserContext={
"Token":token
},
)
if len(response["ResultItems"]) > kcount:
r_count = kcount
else:
r_count = len(response["ResultItems"])
docs = [get_top_n_results(response, i) for i in range(0, r_count)]
return [Document(page_content = d["page_content"], metadata = d["metadata"]) for d in docs]
def kendra_client(kindex_id, kregion):
kclient = boto3.client('kendra', region_name=kregion)
return kclient | [
"Document Title: PLACEHOLDER\nDocument Excerpt: \nPLACEHOLDER\n"
] |
2024-01-10 | crew-guy/datagod-backend | scripts~kendra_index_retriever.py | """Chain for question-answering against a vector database."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.schema import BaseRetriever, Document
from scripts.kendra_results import kendra_query, kendra_client
import boto3
class KendraIndexRetriever(BaseRetriever):
"""Retriever to retrieve documents from Amazon Kendra index.
Example:
.. code-block:: python
kendraIndexRetriever = KendraIndexRetriever()
"""
kendraindex: str
"""Kendra index id"""
awsregion: str
"""AWS region of the Kendra index"""
k: int
"""Number of documents to query for."""
return_source_documents: bool
"""Whether source documents to be returned """
kclient: Any
""" boto3 client for Kendra. """
token: str
""" boto3 client for Kendra. """
def __init__(self, kendraindex, awsregion, k=3, return_source_documents=False, token=""):
self.kendraindex = kendraindex
self.awsregion = awsregion
self.k = k
self.token = token
self.return_source_documents = return_source_documents
self.kclient = kendra_client(self.kendraindex, self.awsregion)
def get_relevant_documents(self, query: str) -> List[Document]:
"""Run search on Kendra index and get top k documents
docs = get_relevant_documents('This is my query')
"""
docs = kendra_query(self.kclient, query, self.k, self.kendraindex, self.token)
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
return await super().aget_relevant_documents(query) | [] |
2024-01-10 | crew-guy/datagod-backend | scripts~get_rag_answer.py | from scripts.kendra_index_retriever import KendraIndexRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
import os
from dotenv import load_dotenv
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import json
load_dotenv()
chat = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)
stop_words = set(stopwords.words('english'))
def preprocess_text(text):
tokens = word_tokenize(text)
filtered_tokens = [token for token in tokens if token.lower() not in stop_words and token.isalnum()]
preprocessed_text = ' '.join(filtered_tokens)
return preprocessed_text
MAX_HISTORY_LENGTH = 6
def build_chain(token):
region = os.environ["AWS_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
retriever = KendraIndexRetriever(
kendraindex=kendra_index_id,
awsregion=region,
k=6,
return_source_documents=True,
token=token
)
prompt_template = """You are a chatbot answering questions over enterprise data. Here's the question you have been asked - {question}. From whatever limited information is given, your task is to retrieve the UNIQUE relevant documents and generate an answer. The response should be STRICTLY in the following JSON format.
{{
answer: answer string,
source_documents:[{{ source:string URL which is the metadata source of the feature, title: string - title of the source document, excerpt: string - excerpt of the source document }}] - where each source document has a unique title, document, string,summary:string which has to be technically sound
}}.
Here's some background context for your reference {context}. Give the solution in the requested JSON format:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
return ConversationalRetrievalChain.from_llm(llm=chat, retriever=retriever, condense_question_prompt=PROMPT, return_source_documents=True)
def run_chain(chain, prompt: str, history=[]):
preprocessed_query_text = preprocess_text(prompt)[:1000]
result = chain({"question": preprocessed_query_text, "chat_history": history})
return result
def get_rag_answer(query, token):
qa = build_chain(token)
chat_history = []
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:", "")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
source_docs = []
if 'source_documents' in result:
for d in result['source_documents']:
metadata = d.metadata
json_document = {
"source": metadata["source"],
"title": metadata["title"],
"excerpt": metadata["excerpt"]
}
source_docs.append(json_document)
# struc_answer = transform_json_string(restructure_answer(result['answer']))
return {'data':result['answer'], 'source_docs': source_docs}
| [
"question",
"You are a chatbot answering questions over enterprise data. Here's the question you have been asked - {question}. From whatever limited information is given, your task is to retrieve the UNIQUE relevant documents and generate an answer. The response should be STRICTLY in the following JSON format.\n {{\n answer: answer string,\n source_documents:[{{ source:string URL which is the metadata source of the feature, title: string - title of the source document, excerpt: string - excerpt of the source document }}] - where each source document has a unique title, document, string,summary:string which has to be technically sound\n }}. \n \n Here's some background context for your reference {context}. Give the solution in the requested JSON format:",
"context"
] |
2024-01-10 | crew-guy/datagod-backend | scripts~concept_map_maker%20copy.py | from langchain.chat_models import ChatOpenAI
import boto3
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from graphviz import Digraph
from dotenv import load_dotenv
FILE_NAME = "conceptmap"
chat = ChatOpenAI(model_name='gpt-3.5-turbo-16k', temperature=0)
load_dotenv()
def get_concept_map_code(prompt):
template="For the following excerpt, generate code template that the `graphviz` library of python can process to make a concept map: ""{text}"". You are to answer the question in the following format: ""{content}"""
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="""Don't return any explanation or supporting text. I want you to ONLY return the appropriate and exact "graphviz template code" for this map as your response"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
result= chain.run({'text': prompt, 'content': """Don't return any explanation or supporting text. I want you to ONLY return the appropriate and exact "graphviz template code" for this map as your response"""})
return result
def generate_diagram_image(openai_output_code):
dot = Digraph('ConceptGraph', format='png')
exec(openai_output_code)
print(dot.source)
dot.render(FILE_NAME, view=True)
def generate_concept_map_image_and_upload_to_s3(code, file_name, bucket_name):
# Create an S3 client
generate_diagram_image(code)
s3 = boto3.client('s3')
file_name= FILE_NAME + ".png"
# Uploads the given file using a managed uploader, which will split up the
# file if it's large and uploads parts in parallel.
s3.upload_file(file_name, bucket_name, file_name)
# Generate the URL for the uploaded file
s3_url = f"https://{bucket_name}.s3.amazonaws.com/concept-maps/{file_name}"
# Delete the file from local after uploading
os.remove(file_name)
return s3_url | [
"For the following excerpt, generate code template that the `graphviz` library of python can process to make a concept map: {text}. You are to answer the question in the following format: {content}",
"[PLACEHOLDER, PLACEHOLDER]",
"graphviz template code",
"Don't return any explanation or supporting text. I want you to ONLY return the appropriate and exact \"graphviz template code\" for this map as your response"
] |
2024-01-10 | crew-guy/datagod-backend | scripts~infograph_maker.py | from langchain.chat_models import ChatOpenAI
import boto3
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from graphviz import Digraph
from dotenv import load_dotenv
FILE_NAME = "conceptmap"
chat = ChatOpenAI(model_name='gpt-3.5-turbo-16k', temperature=0)
load_dotenv()
def get_infograph_code(prompt):
template="For the following excerpt, generate code template that the `graphviz` library of python can process to make a concept map: ""{text}"". You are to answer the question in the following format: ""{content}"""
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="""Don't return any explanation or supporting text. I want you to ONLY return the appropriate and exact "graphviz template code" for this map as your response"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
result= chain.run({'text': prompt, 'content': """Don't return any explanation or supporting text. I want you to ONLY return the appropriate and exact "graphviz template code" for this map as your response"""})
return result
def generate_diagram_image(openai_output_code):
dot = Digraph('ConceptGraph', format='png')
exec(openai_output_code)
print(dot.source)
dot.render(FILE_NAME, view=True)
def generate_infograph_image_and_upload_to_s3(code, file_name, bucket_name):
# Create an S3 client
generate_diagram_image(code)
s3 = boto3.client('s3')
file_name= FILE_NAME + ".png"
# Uploads the given file using a managed uploader, which will split up the
# file if it's large and uploads parts in parallel.
s3.upload_file(file_name, bucket_name, file_name)
# Generate the URL for the uploaded file
s3_url = f"https://{bucket_name}.s3.amazonaws.com/concept-maps/{file_name}"
# Delete the file from local after uploading
os.remove(file_name)
return s3_url | [
"For the following excerpt, generate code template that the `graphviz` library of python can process to make a concept map: {text}. You are to answer the question in the following format: {content}",
"[PLACEHOLDER, PLACEHOLDER]",
"graphviz template code",
"Don't return any explanation or supporting text. I want you to ONLY return the appropriate and exact \"graphviz template code\" for this map as your response"
] |
2024-01-10 | coskunlab/scSpaMet | src~spatial~tools~_spatial_lda.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created on Fri Feb 26 19:47:10 2021
# @author: Ajit Johnson Nirmal
"""
!!! abstract "Short Description"
`sm.tl.spatial_lda`: The function allows users to compute a neighbourhood matrix
using any categorical variable (e.g. cell-types) as input and then perform
Latent Dirichlet Allocation (LDA) modelling. The latent space weights are then then
returned which can be clustered to identify Reccurent Cellular Neighbourhoods (RCNs).
Use the [spatial_cluster] function to further group the neighbourhoods into
Reccurent Cellular Neighbourhoods (RCNs)
## Function
"""
#Import
from sklearn.neighbors import BallTree
import numpy as np
import pandas as pd
import re
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
# Function
def spatial_lda (adata, x_coordinate='X_centroid',y_coordinate='Y_centroid',
phenotype='phenotype', method='radius', radius=30, knn=10,
imageid='imageid',num_motifs=10, random_state=0, subset=None,
label='spatial_lda',**kwargs):
"""
Parameters:
adata : AnnData object
x_coordinate : float, required
Column name containing the x-coordinates values.
y_coordinate : float, required
Column name containing the y-coordinates values.
phenotype : string, required
Column name of the column containing the phenotype information.
It could also be any categorical assignment given to single cells.
method : string, optional
Two options are available: a) 'radius', b) 'knn'.
a) radius - Identifies the neighbours within a given radius for every cell.
b) knn - Identifies the K nearest neigbours for every cell.
radius : int, optional
The radius used to define a local neighbhourhood.
knn : int, optional
Number of cells considered for defining the local neighbhourhood.
imageid : string, optional
Column name of the column containing the image id.
subset : string, optional
imageid of a single image to be subsetted for analyis.
num_motifs : int, optional
The number of requested latent motifs to be extracted from the training corpus.
random_state : int, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
label : string, optional
Key for the returned data, stored in `adata.uns`.
Returns:
adata : AnnData object
Updated AnnData object with the results stored in `adata.uns ['spatial_lda']`.
Example:
```python
# Running the radius method
adata = sm.tl.spatial_lda (adata, num_motifs=10, radius=100)
```
"""
# Function
def spatial_lda_internal (adata_subset, x_coordinate,y_coordinate,phenotype,
method, radius, knn, imageid):
# Print which image is being processed
print('Processing: ' + str(np.unique(adata_subset.obs[imageid])))
# Create a DataFrame with the necessary inforamtion
data = pd.DataFrame({'x': adata_subset.obs[x_coordinate], 'y': adata_subset.obs[y_coordinate], 'phenotype': adata_subset.obs[phenotype]})
# Identify neighbourhoods based on the method used
# a) KNN method
if method == 'knn':
print("Identifying the " + str(knn) + " nearest neighbours for every cell")
tree = BallTree(data[['x','y']], leaf_size= 2)
ind = tree.query(data[['x','y']], k=knn, return_distance= False)
# b) Local radius method
if method == 'radius':
print("Identifying neighbours within " + str(radius) + " pixels of every cell")
kdt = BallTree(data[['x','y']], leaf_size= 2)
ind = kdt.query_radius(data[['x','y']], r=radius, return_distance=False)
# Map phenotype
phenomap = dict(zip(list(range(len(ind))), data['phenotype'])) # Used for mapping
for i in range(len(ind)):
ind[i] = [phenomap[letter] for letter in ind[i]]
if method == 'knn':
ind = ind.astype(str)
# return
return ind
# Subset a particular image if needed
if subset is not None:
adata_list = [adata[adata.obs[imageid] == subset]]
else:
adata_list = [adata[adata.obs[imageid] == i] for i in adata.obs[imageid].unique()]
# Apply function to all images
# Create lamda function
r_spatial_lda_internal = lambda x: spatial_lda_internal(adata_subset=x,
x_coordinate=x_coordinate,
y_coordinate=y_coordinate,
phenotype=phenotype,
method=method,
radius=radius,
knn=knn,
imageid=imageid)
all_data = list(map(r_spatial_lda_internal, adata_list)) # Apply function
# combine all the data into one
texts = np.concatenate( all_data, axis=0 ).tolist()
# LDA pre-processing
print ('Pre-Processing Spatial LDA')
# Create Dictionary
id2word = corpora.Dictionary(texts)
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# Build LDA model
print ('Training Spatial LDA')
try:
lda_model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=num_motifs,
random_state=random_state,**kwargs)
except:
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=num_motifs,
random_state=random_state,**kwargs)
# Compute Coherence Score
print ('Calculating the Coherence Score')
coherence_model_lda = CoherenceModel(model=lda_model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# isolate the latent features
print ('Gathering the latent weights')
topic_weights = []
for row_list in lda_model[corpus]:
tmp = np.zeros(num_motifs)
for i, w in row_list:
tmp[i] = w
topic_weights.append(tmp)
# conver to dataframe
arr = pd.DataFrame(topic_weights, index=adata.obs.index).fillna(0)
arr = arr.add_prefix('Motif_')
# isolate the weights of phenotypes
pattern = "(\d\.\d+).\"(.*?)\""
cell_weight = pd.DataFrame(index=np.unique(adata.obs[phenotype]))
for i in range(0, len(lda_model.print_topics())):
level1 = lda_model.print_topics()[i][1]
tmp = pd.DataFrame(re.findall(pattern, level1))
tmp.index = tmp[1]
tmp = tmp.drop(columns=1)
tmp.columns = ['Motif_'+ str(i)]
cell_weight = cell_weight.merge(tmp, how='outer', left_index=True, right_index=True)
# fill zeros
cell_weight = cell_weight.fillna(0).astype(float)
# save the results in anndata object
adata.uns[label] = arr # save the weight for each cell
adata.uns[str(label)+'_probability'] = cell_weight # weights of each cell type
adata.uns[str(label)+'_model'] = lda_model
# return
return adata | [] |
2024-01-10 | luckysanpedro/InputWindowAi | src~inputwindowai~window_programm.py | import os
import datetime
from PyQt5.QtWidgets import (
QApplication,
QWidget,
QVBoxLayout,
QHBoxLayout,
QTextEdit,
QLabel,
QPushButton,
QSizePolicy,
QSpacerItem,
QComboBox,
)
from PyQt5.QtCore import pyqtSignal, QPoint, Qt, QThread, QSize, QTimer
from PyQt5.QtGui import QIcon, QPixmap, QDropEvent, QDragEnterEvent, QMovie
# import assistant file:
from .assistant_conversation import main as assistant_conversation_main
from .instructions import InstructionsWidget
from .audio_generator import main as audio_main
# Assistant conversation utils:
from dotenv import load_dotenv
from openai import OpenAI
# Load your OpenAI API key
load_dotenv()
client = OpenAI()
# for the assistant to work:
initial_message = "Hi!"
assistant_id = assistant_id = os.getenv("AssID_englisch")
thread = None
user_input = ""
assistant_message = "Hello, I am the assistant. I am here to help you."
assistant_instructions = ""
# Define the input path for the GIF in the background
input_path_gif = "hamsterbackground.gif"
input_path_playbutton = os.getenv("input_path_playbutton")
# this class manages what happens when the user hits enter in the input field:
class Worker(QThread):
finished = pyqtSignal(str)
def __init__(self, user_input, assistant_id):
super().__init__()
self.user_input = user_input
self.assistant_id = assistant_id
def run(self):
try:
result = assistant_conversation_main(self.user_input, self.assistant_id)
self.finished.emit(result)
except Exception as e:
self.finished.emit(str(e))
class AudioWorker(QThread):
def __init__(self, assistant_message):
super().__init__()
self.assistant_message = assistant_message
def run(self):
audio_main(self.assistant_message)
# to accept control+enter as a new line:
class CustomTextEdit(QTextEdit):
def __init__(self, parent=None, assistant_window=None):
super().__init__(parent)
self.assistant_window = assistant_window
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return and event.modifiers() == Qt.ControlModifier:
self.insertPlainText("\n")
elif event.key() == Qt.Key_Return and not event.modifiers():
self.assistant_window.on_enter()
else:
super().keyPressEvent(event)
class AssistantWindow(QWidget):
user_input_signal = pyqtSignal(str)
def __init__(self):
super().__init__()
w = 440
h = 420
# main window
self.resize(w, h)
# remove frame
self.setWindowFlag(Qt.FramelessWindowHint)
# make the main window transparent
self.setAttribute(Qt.WA_TranslucentBackground)
# make the window always stay on top
self.setWindowFlag(Qt.WindowStaysOnTopHint)
# round widget
self.round_widget = QWidget(self)
self.round_widget.resize(w, h)
self.round_widget.setStyleSheet(
"""
background:rgb(10, 10, 10);
border-radius: 30px;
"""
)
self.layout = QVBoxLayout(self.round_widget)
# Set the background as a gif image
self.movie = QMovie(input_path_gif)
self.background_label = QLabel(self.round_widget)
self.background_label.setMovie(self.movie)
self.background_label.setAlignment(Qt.AlignCenter) # Center the GIF
self.background_label.setScaledContents(
True
) # Resize the GIF to fit the QLabel
self.background_label.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Expanding
) # Allow the label to expand
self.background_label.setGeometry(
10, 10, w - 20, h - 20
) # Set the geometry of the background_label to be slightly smaller than the round_widget
self.movie.start()
# Create a new layout for the other widgets
self.widget_layout = QVBoxLayout()
self.layout.addLayout(self.widget_layout)
# Create a layout for the input label and box
self.input_layout = QHBoxLayout()
self.layout.addLayout(self.input_layout)
# Add a spacer to the left of the input label
self.input_layout.addStretch()
# Add a QLabel widget for Input
self.input_label = QLabel("Input")
self.input_label.setAlignment(Qt.AlignCenter)
self.input_label.setFrameStyle(QLabel.Panel | QLabel.Raised)
self.input_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
# Set the font size
font = self.input_label.font()
font.setPointSize(font.pointSize() + 2)
self.input_label.setFont(font)
# Set the margins
self.input_label.setContentsMargins(
10, 0, 10, 0
) # Add 10px of space on the left and right
# Set the style sheet to make the edges rounded and font color white
self.input_label.setStyleSheet(
"""
border-radius: 10px;
color: white;
"""
)
# Add the input label to the input layout
self.input_layout.addWidget(self.input_label)
# Add a spacer to the right of the input label
self.input_layout.addStretch()
self.input_field = CustomTextEdit(assistant_window=self)
self.input_field.setStyleSheet(
"""
border-radius: 4px;
background-color: rgba(200, 200, 255, 0.9);
border: 1px solid black;
"""
)
self.input_field.textChanged.connect(self.adjust_input_field_height)
# define the size of the input field:
self.input_field.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.input_field.setFixedHeight(30)
# Add the input field to the main layout
self.layout.addWidget(self.input_field)
# drag & drop everywhere:
# Enable drag and drop for this widget
self.setAcceptDrops(True)
# set the instructions widget:
self.instructions_widget = InstructionsWidget()
self.layout.addWidget(self.instructions_widget)
# Add a QLabel widget for drag and drop
self.drag_and_drop_label = QLabel("Drag&Drop")
self.drag_and_drop_label.setFrameStyle(QLabel.Panel | QLabel.Raised)
self.drag_and_drop_label.setStyleSheet(
"""
background-color: rgba(50, 50, 50, 1);
color: white;
border: 1px solid black;
border-radius: 10px;
"""
) # Add a border to the drag and drop field
self.drag_and_drop_label.setMaximumHeight(
40
) # Limit the height of the drag and drop field
# Set the font size to match the "Input" label
font = self.input_label.font()
self.drag_and_drop_label.setFont(font)
# Set the size policy to match the "Input" label
self.drag_and_drop_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
# Add the drag and drop label to the drag and drop layout
self.drag_and_drop_layout.addWidget(self.drag_and_drop_label)
# Add a stretch to the right of the drag and drop label
self.drag_and_drop_layout.addStretch()
# Create a layout for the output label and box
self.output_layout = QVBoxLayout()
self.layout.addLayout(self.output_layout)
# Add a QLabel widget for Output
self.output_label = QLabel("Output")
self.output_label.setAlignment(Qt.AlignCenter)
self.output_label.setFrameStyle(QLabel.Panel | QLabel.Raised)
self.output_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
# Set the font size
font = self.output_label.font()
font.setPointSize(font.pointSize() + 2)
self.output_label.setFont(font)
# Set the margins
self.output_layout.setSpacing(0)
self.output_label.setContentsMargins(
10, 0, 10, 0
) # Add 10px of space on the left and right
# Set the style sheet to make the edges rounded
self.output_label.setStyleSheet(
"""
border-radius: 10px;
"""
)
self.save_button.setFixedSize(75, 25) # Set the size of the button
# Connect the clicked signal to the on_save_button_clicked method
self.save_button.clicked.connect(self.on_save_button_clicked)
# Add the save button to the button layout
self.button_layout.addWidget(self.save_button)
# Add a spacer item with a fixed width of 5px
spacer_item = QSpacerItem(5, 20, QSizePolicy.Fixed, QSizePolicy.Minimum)
self.button_layout.addItem(spacer_item)
# Create a QPushButton widget for the play button
self.play_button = QPushButton()
self.play_button.setIcon(
QIcon(QPixmap(input_path_playbutton))
) # Set the icon of the button
self.play_button.setIconSize(QSize(70, 60)) # Set the size of the icon
self.play_button.setFixedSize(70, 60) # Set the size of the button
self.play_button.setStyleSheet(
"""
border: none;
background-color: transparent;
"""
) # Remove the border of the button
# Connect the clicked signal to the on_play_button_clicked method
self.play_button.clicked.connect(self.on_play_button_clicked)
# Add the play button to the button layout
self.button_layout.addWidget(self.play_button)
# Add the button layout to the output layout
self.output_layout.addLayout(self.button_layout)
# Add a spacer item with a fixed height of 5px
spacer_item = QSpacerItem(20, 5, QSizePolicy.Minimum, QSizePolicy.Fixed)
self.output_layout.addItem(spacer_item)
# Add a QTextEdit widget to display output text
self.output_field = QTextEdit()
self.output_field.setReadOnly(True) # Make the output field read-only
self.output_field.setStyleSheet(
"""
background-color: rgba(255, 255, 255, 0.8);
border: 2px solid black;
border-radius: 20px;
"""
) # Add a border to the output field
self.output_field.setMaximumHeight(
190
) # Limit the maximum height of the output field
self.output_field.setMinimumHeight(
60
) # Set the initial height of the output field
# Get the current font
current_font = self.output_field.font()
# Increase the font size by 1
current_font.setPointSize(current_font.pointSize() + 1)
# Set the new font to the output_field widget
self.output_field.setFont(current_font)
# Add the output field to the output layout
self.output_layout.addWidget(self.output_field)
# Add minimize and close buttons
self.button_layout = QHBoxLayout()
self.layout.addLayout(self.button_layout)
self.minimize_button = QPushButton("Minimize")
self.minimize_button.clicked.connect(self.showMinimized)
self.minimize_button.setStyleSheet(
"""
background-color: rgba(55, 255, 255, 0.8);
border-radius: 3px;
"""
)
# Add a solid background to the minimize button
self.button_layout.addWidget(self.minimize_button)
# Create a QComboBox widget for the dropdown menu
self.dropdown_menu = QComboBox()
self.dropdown_menu.addItem("English Assistant")
self.dropdown_menu.addItem("Chemistry Assistant")
self.dropdown_menu.addItem("Julian's Atze")
self.dropdown_menu.currentIndexChanged.connect(self.handle_dropdown_selection)
# Set the initial GIF based on the current selection in the dropdown menu
self.handle_dropdown_selection(self.dropdown_menu.currentIndex())
# set style sheet for the dropdown menu:
self.dropdown_menu.setStyleSheet(
"""
background-color: rgba(55, 255, 255, 0.8);
color: red;
border-radius: 5px;
"""
)
# Add the dropdown menu to the button layout
self.button_layout.addWidget(self.dropdown_menu)
self.close_button = QPushButton("Close")
self.close_button.clicked.connect(self.close)
self.close_button.setStyleSheet(
"""
background-color: rgba(55, 255, 255, 0.8);
border-radius: 3px;
"""
) # Add a solid background to the close button
self.button_layout.addWidget(self.close_button)
# this is for moving the whole window around on the screen:
# Add these lines to initialize the position
self.oldPos = self.pos()
def mousePressEvent(self, event):
if self.background_label.geometry().contains(event.pos()):
self.oldPos = event.globalPos()
def mouseMoveEvent(self, event):
if self.background_label.geometry().contains(event.pos()):
delta = QPoint(event.globalPos() - self.oldPos)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.oldPos = event.globalPos()
# this is for the drag and drop functionality:
# Override the dragEnterEvent method
def dragEnterEvent(self, event: QDragEnterEvent):
if event.mimeData().hasUrls(): # If the drag event contains URLs (file paths)
event.accept() # Accept the drag event
else:
event.ignore() # Ignore the drag event
# Override the dropEvent method
def dropEvent(self, event: QDropEvent):
file_path = event.mimeData().urls()[0].toLocalFile() # Get the file path
self.input_field.setText(file_path) # Set the file path as the input field text
# for the optiic of the input field:
def adjust_input_field_height(self):
num_lines = len(self.input_field.toPlainText().split("\n"))
new_height = min(10 + num_lines * 20, 60)
self.input_field.setFixedHeight(new_height)
# add functionality to add a new line when the user hits ctrl+enter:
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return and event.modifiers() == Qt.ControlModifier:
self.input_field.insert("\n")
else:
super().keyPressEvent(event)
# what is displayed in the output field:
def display_output(self, assistant_message: str):
self.output_field.append(
assistant_message
) # Append the text to the output field
# Adjust the height of the output field based on the number of text lines
num_lines = len(self.output_field.toPlainText().split("\n"))
new_height = min(60 + num_lines * 20, 190)
self.output_field.setFixedHeight(new_height)
# when an input is entered / the user hits enter: (worker thread starts)
def on_enter(self):
global assistant_id
user_input = self.input_field.toPlainText().rstrip("\n")
assistant_instructions = self.instructions_widget.get_current_instructions()
full_input = assistant_instructions + "\n" + user_input
if full_input.strip():
self.input_field.clear()
# Create a Worker instance
self.worker = Worker(full_input, assistant_id)
# Connect the finished signal to a slot
self.worker.finished.connect(self.on_worker_finished)
# Start the worker thread
self.worker.start()
# Display the user input in the output field
def on_worker_finished(self, result):
global assistant_message
assistant_message = result
# Display the result in the output field
self.display_output(result)
def handle_dropdown_selection(self, index):
global assistant_message
global assistant_id
global input_path_gif
if index == 0:
# Call the function for the English Assistant
print("English Assistant")
input_path_gif = "assets/hamsterenglisch.gif"
assistant_id = os.getenv("AssID_Englisch")
elif index == 1:
# Call the function for the Chemistry Assistant
print("Chemistry Assistant")
input_path_gif = "assets/hamsterbackground.gif"
assistant_id = os.getenv("AssID_Chemie")
elif index == 2:
print("Julian's Atze")
input_path_gif = "assets/atze.gif"
assistant_id = os.getenv("AssID_Atze")
# Create a new QMovie object with the new GIF
self.movie = QMovie(input_path_gif)
# Set the new QMovie object to the QLabel
self.background_label.setMovie(self.movie)
# Start the new QMovie
self.movie.start()
def on_play_button_clicked(self):
global assistant_message
self.audio_worker = AudioWorker(assistant_message)
self.audio_worker.start()
self.save_button.setStyleSheet(
"""
border-radius: 3px;
color: white;
background-color: black;
"""
)
def on_save_button_clicked(self):
# Get the current date and time
current_datetime = datetime.now()
# Format the current date and time as a string
datetime_str = current_datetime.strftime("%Y%m%d_%H%M%S")
# Create the new filename
new_filename = f"Audio_{datetime_str}.mp3"
# Rename the file
os.rename("speech.mp3", new_filename)
self.save_button.setStyleSheet(
"""
border-radius: 3px;
color: transparent;
background-color: transparent;
"""
)
def fact_check(self):
global assistant_message
global assistant_id
user_input = "Prüfe ob was du mir gesagt hast stimmt."
# Create a Worker instance
self.worker = Worker(user_input, assistant_id)
# Connect the finished signal to a slot
self.worker.finished.connect(self.on_worker_finished)
# Start the worker thread
self.worker.start()
# Call the display_output method
self.display_output(assistant_message)
def on_save_text_button_clicked(self):
# Get the current date and time
current_datetime = datetime.now()
# Format the current date and time as a string
datetime_str = current_datetime.strftime("%Y%m%d_%H%M%S")
# Create the new filename
new_filename = f"Answer_{datetime_str}.txt"
# Write the assistant message to the file
with open(new_filename, "w") as f:
f.write(assistant_message)
# Change the background color of the save text button to green
self.save_text_button.setStyleSheet(
"""
border: none;
background-color: green;
"""
)
# Change the background color back to transparent after 500 milliseconds
QTimer.singleShot(
500,
lambda: self.save_text_button.setStyleSheet(
"""
border: none;
background-color: transparent;
"""
),
)
| [] |
2024-01-10 | luckysanpedro/InputWindowAi | src~inputwindowai~audio_generator.py | from dotenv import load_dotenv
from openai import OpenAI
import pygame
from pathlib import Path
assistant_message = " Hello, I am the assistant. I am here to help you."
load_dotenv()
client = OpenAI()
def create_audio_from_text(assistant_message, filename="speech.mp3"):
speech_file_path = Path(__file__).parent / filename
audio_response = client.audio.speech.create(
model="tts-1", voice="echo", input=assistant_message
)
audio_response.stream_to_file(speech_file_path)
def play_audio(filename):
pygame.mixer.init()
pygame.mixer.music.load(filename)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.wait(1000) # wait one second
pygame.mixer.music.stop() # stop the music
pygame.mixer.music.unload() # unload the current music
# Create audio from the text in the response
def main(assistant_message):
create_audio_from_text(assistant_message)
play_audio("speech.mp3")
if __name__ == "__main__":
main(assistant_message)
| [] |
2024-01-10 | nadavc2c/TA-ob-openai-chatgpt | TA-ob-openai-chatgpt~bin~obopenai.py | #!/usr/bin/env python
import json
import sys
import os
import itertools
from time import sleep
from configparser import ConfigParser
import xml.etree.ElementTree as ElementTree
from re import sub
# import after PATH update on purpose
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "lib"))
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option
import openai
app_folder = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
custom_conf_file = sub(r'\W+', '_', app_folder.lower() + "_settings")
@Configuration()
class ObopenaiCommand(StreamingCommand):
""" Send prompts to ChatGPT
##Syntax
obopenai prompt=<string> (mode=(dlp))? (model=(gpt-35-turbo))? (temperature=(number))?
##Description
Send prompts to the local OpenAI proxy of OB
"""
# available modes: dlp,
mode = Option()
prompt = Option(require=True)
conversation = Option()
model = Option()
temperature = Option()
maxrows = Option()
maxtokens = Option()
system_role = Option()
sleep_time = Option()
setuser = Option()
# response_field = Option() "I'm not going to write those 10 lines of code until necessary or someone will pay me"
# session_key = Option() "imagine doing the logical thing and not sending the entire chat history everytime"
def _set_chat_role(self):
with open('modes.json', 'r') as file:
json_modes = json.load(file)
if self.mode:
if self.system_role:
raise ValueError("You can only choose one of 'mode' or 'system_role', not both.")
try:
chat_system_role = json_modes[self.mode]
except KeyError:
# find your mode in the kvstore
modes_kvstore = self.service.kvstore['TA_ob_openai_chatgpt_system_modes'].data.query()
for item in modes_kvstore:
if item['mode_name'] == self.mode:
chat_system_role = item['system_prompt']
break
else:
chat_system_role = "you are an Indian cook that knows only how to cook and" \
" nothing else. you will not " \
"answer anything that is not related to cooking. act as an Indian cook."
elif self.system_role:
chat_system_role = self.system_role
else:
chat_system_role = None
return chat_system_role
def _set_conf_settings(self):
# get collection
obopenai_settings_conf = self.service.confs[custom_conf_file]["additional_parameters"]
try:
if obopenai_settings_conf["api_base"]:
openai.api_base = obopenai_settings_conf["api_base"]
except AttributeError or KeyError:
pass
openai.organization = obopenai_settings_conf["organization_id"]
openai.api_key = self._get_decrypted_password()
def _get_decrypted_password(self):
password_xml = self.service.storage_passwords.get(app=app_folder)['body']
for element in ElementTree.fromstring(str(password_xml)).findall(".//*[@name='clear_password']"):
try:
api_dict = json.loads(element.text)
if 'api_key' in api_dict:
clear_text_password = api_dict['api_key']
break
except json.JSONDecodeError:
pass
else:
raise ValueError("No password was found")
return clear_text_password
# override
def stream(self, events):
# From .conf
self._set_conf_settings()
# From arguments
model = self.model or "gpt-3.5-turbo"
# maxrows is per batch of 50,000. set to 10 to not violate license by mistake. 0 to limitless.
maxrows = self.maxrows or 5 # walrus only in 3.8
maxrows = None if maxrows == 0 else maxrows
maxtokens = self.maxtokens
temperature = self.temperature
system_role = self._set_chat_role()
sleep_time = self.sleep_time if self.sleep_time else 0
user = self.setuser or self.service.confs[custom_conf_file]["additional_parameters"]['default_user']
organization = self.service.confs[custom_conf_file]["additional_parameters"]['organization_id']
for event in itertools.islice(events, maxrows):
messages = []
if self.mode == 'conv':
messages.append(json.loads(event[self.conversation]))
elif system_role:
messages.append([{'role': 'system', 'content': system_role}])
messages.append({'role': 'user', 'content': event[self.prompt]})
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
user=user,
organization=organization,
max_tokens=maxtokens)
self.add_field(event, 'gpt_response', response)
if self.mode == 'conv':
messages.append(response['choices'][0]['message'])
self.add_field(event, self.conversation, messages)
sleep(sleep_time) if sleep_time else None
yield event
dispatch(ObopenaiCommand, sys.argv, sys.stdin, sys.stdout, __name__)
| [] |
2024-01-10 | izzortsi/gpt-stuff | cli~main_nb.py | #%%
import openai
import os
import time
import sys
import json
from dataclasses import dataclass
from typing import List
from text_generation import generate, complete
openai.api_key = os.environ.get("OPEN_AI_FREE_API_KEY")
openai.api_base = 'https://api.pawan.krd/v1'
SYS_PROMPT = """You are a personal assistant. Your goal is to help me organize my life
and make me more productive. I will message you things like tasks I have to do, ideas that come to my mind,
projects I want to work on, and so on. I will also ask you questions about topics I am interested in
or that would be helpful for me to know, for instance, to accomplish a task I have to do.
You will have to organize all this information and help me make sense of it. For instance, you could
create a to-do list for me, or a list of ideas I have had, or a list of projects I want to work on. You should also remember
what I have told you and be able to answer questions about it."""
class GPT:
def __init__(self, sys_prompt=SYS_PROMPT, model="gpt-3.5-turbo", temperature = 1):
self._sys_messages = [{"role": "system", "content": sys_prompt}]
self._messages = []
self.response = ""
self._model = model
self._temperature = temperature
def set_system(self, sys_prompt):
self._sys_messages = [{"role": "system", "content": sys_prompt}]
def add_system(self, sys_prompt):
self._sys_messages.append({"role": "system", "content": sys_prompt})
def completion(self, prompt, role = "user", chat=False):
messages = self._sys_messages + [{"role": role, "content": prompt}]
response = openai.ChatCompletion.create(
model=self._model,
messages=messages,
temperature=self._temperature, # this is the degree of randomness of the model's output
max_tokens=1000,
)
self.response = response.choices[0].message["content"]
if chat:
self._messages = messages + [{"role": "assistant", "content": self.response}]
return self.response
def chat(gpt):
while True:
prompt = input("You: ")
if prompt == "exit":
break
print("Bot:", gpt.completion(prompt, chat=True))
GPT.chat = chat
#%%
if __name__ == "__main__":
gpt = GPT()
if len(sys.argv) > 1:
gpt.chat()
# %%
| [
"self._sys_messages + [{\"role\": role, \"content\": prompt}]",
"You are a personal assistant. Your goal is to help me organize my life\n and make me more productive. I will message you things like tasks I have to do, ideas that come to my mind,\n projects I want to work on, and so on. I will also ask you questions about topics I am interested in \n or that would be helpful for me to know, for instance, to accomplish a task I have to do. \n You will have to organize all this information and help me make sense of it. For instance, you could\n create a to-do list for me, or a list of ideas I have had, or a list of projects I want to work on. You should also remember\n what I have told you and be able to answer questions about it.",
"You: "
] |
2024-01-10 | izzortsi/gpt-stuff | synt~synt_gpt.py | #%%
import openai
import os
import time
import sys
import json
from dataclasses import dataclass
from typing import List
from text_generation import generate, complete
# openai.api_key = os.environ.get("OPEN_AI_API_KEY")
# openai.api_base = 'https://api.openai.com/v1'
# MODEL = "gpt-3.5-turbo-0613"
openai.api_key = os.environ.get("OPEN_AI_FREE_API_KEY")
openai.api_base = 'https://api.pawan.krd/v1'
MODEL = "gpt-3.5-turbo"
#You will have to organize all this information and help me make sense of it.
SYS_PROMPT = """You are a smart text expander. Your goal is to expand poorly given instructions for tasks into a list of instructions that achieves the objective implied by the poorly given instructions.
I will give you some examples of the instructions that you will be given bellow, delimited by backticks. Remember them:
`so tasks
1. run that script and check output (it doesnt have to run to completion)
2. modify stable diffusion library so we can feed in noise vector by hand
3. look at the model.py thing
- see if we can make a file to load the unet, the text encoder and the auto-encoder as seperate/independent classes
- so we can load one and use only one without running whole model`
`1. revert changes
2. make a new class
- one class, in one file, for each of the models
- encode/decode for auto-encoder, etc
3. function for load, unload
4. function for batched vs single input
5. In Scripts
- make a new example
- where you load the 3 classes
- then encode the prompt
- then run denoising 20 times
- then output latent to image
-- function to save image
6. Make sure the inner loop, we can feed in noise vector directly
But dont change the old pipeline files yet`
`in the main /existing work flow; you use all three
- unet
- denoiser
- clip
in one run
But in the alternative work flow
- you load clip
-- run 1024 times
- unload clip
- you load denoiser
-- you run 1024 times
- unload denoise
- you load encoder
-- you run 1024 times
- unload encoder
So its "Batched" so you are not switching between which network is used during runtime`
`Ticket:
1. copy the files over
- duplicate it
2. Make function
- loads whole model with first copy
3. Make function
- saves 3 models
-- embedding
-- unet
-- variational auto encoder
as seperate files to output
4. Load those files with second model
- and track the tile
- each model is a seperate file
-- model_clip.y
-- model_unet.py
-- model_vae.py
Each model class must have a Load and Unload function. No loading model on init.
5. Use safe tensor, not .cpt; it might be faster and is the newer format
6. Do test to make sure we are getting same result`
Expand each of the instructions I gave as an example. Remember them as a list with objects of the format: <poorly given instructions>: <expansion>
Call this list the database.
Using the database as a reference, expand the instructions I will give you in future messages. Put them in the database, with the format: <poorly given instructions>: <expansion>
If a message starts with @, it is a command.
If a message is not a command, it will be a poorly given instruction for a task, delimited by backticks.
The following commands are available:
@show: show me the database, in the format: <poorly given instructions>: <expansion>
@retrieve <word>: retrieve all elements from the database that contain <word>. Show them in the format: <poorly given instructions>: <expansion>.
"""
TEMPERATURE = 0.5
class GPT:
def __init__(self, sys_prompt=SYS_PROMPT, model=MODEL, temperature = TEMPERATURE):
self._sys_messages = [{"role": "system", "content": sys_prompt}]
self._messages = self._sys_messages
self.response = ""
self._model = model
self._temperature = temperature
def set_system(self, sys_prompt):
self._sys_messages = [{"role": "system", "content": sys_prompt}]
def add_system(self, sys_prompt):
self._sys_messages.append({"role": "system", "content": sys_prompt})
def completion(self, prompt, role = "user", chat=False):
user_message = [{"role": role, "content": prompt}]
self._messages += user_message
response = openai.ChatCompletion.create(
model=self._model,
messages=self._messages,
temperature=self._temperature, # this is the degree of randomness of the model's output
max_tokens=1000,
)
self.response = response.choices[0].message["content"]
self._messages += [{"role": "assistant", "content": self.response}]
return self.response
def chat(gpt):
while True:
prompt = input("You: ")
if prompt == "exit":
break
print("Bot:", gpt.completion(prompt, chat=True))
GPT.chat = chat
#%%
if __name__ == "__main__":
gpt = GPT()
if len(sys.argv) > 1:
gpt.chat()
# %%
# gpt = GPT()
# #%%
# gpt.completion("I have to do the dishes", role="user")
# %%
| [
"You: ",
"You are a smart text expander. Your goal is to expand poorly given instructions for tasks into a list of instructions that achieves the objective implied by the poorly given instructions.\n I will give you some examples of the instructions that you will be given bellow, delimited by backticks. Remember them:\n \n `so tasks\n\n 1. run that script and check output (it doesnt have to run to completion)\n\n 2. modify stable diffusion library so we can feed in noise vector by hand\n\n 3. look at the model.py thing\n - see if we can make a file to load the unet, the text encoder and the auto-encoder as seperate/independent classes\n - so we can load one and use only one without running whole model`\n\n `1. revert changes\n 2. make a new class\n - one class, in one file, for each of the models\n - encode/decode for auto-encoder, etc\n \n 3. function for load, unload\n \n 4. function for batched vs single input\n \n 5. In Scripts\n - make a new example\n - where you load the 3 classes\n - then encode the prompt\n - then run denoising 20 times\n - then output latent to image\n -- function to save image\n \n 6. Make sure the inner loop, we can feed in noise vector directly\n \n But dont change the old pipeline files yet`\n\n `in the main /existing work flow; you use all three\n - unet\n - denoiser\n - clip\n\n in one run\n\n But in the alternative work flow\n\n - you load clip\n -- run 1024 times\n - unload clip\n\n - you load denoiser\n -- you run 1024 times\n - unload denoise\n\n - you load encoder\n -- you run 1024 times\n - unload encoder\n\n\n So its \"Batched\" so you are not switching between which network is used during runtime`\n\n\n `Ticket:\n\n 1. copy the files over\n - duplicate it\n\n 2. Make function\n - loads whole model with first copy\n\n 3. Make function\n - saves 3 models\n -- embedding\n -- unet\n -- variational auto encoder\n\n as seperate files to output\n\n 4. Load those files with second model\n - and track the tile\n - each model is a seperate file\n -- model_clip.y\n -- model_unet.py\n -- model_vae.py\n\n Each model class must have a Load and Unload function. No loading model on init.\n\n 5. Use safe tensor, not .cpt; it might be faster and is the newer format\n\n 6. Do test to make sure we are getting same result`\n\n Expand each of the instructions I gave as an example. Remember them as a list with objects of the format: <poorly given instructions>: <expansion>\n Call this list the database.\n Using the database as a reference, expand the instructions I will give you in future messages. Put them in the database, with the format: <poorly given instructions>: <expansion>\n \n If a message starts with @, it is a command. \n If a message is not a command, it will be a poorly given instruction for a task, delimited by backticks.\n The following commands are available:\n @show: show me the database, in the format: <poorly given instructions>: <expansion>\n @retrieve <word>: retrieve all elements from the database that contain <word>. Show them in the format: <poorly given instructions>: <expansion>.\n "
] |
2024-01-10 | izzortsi/gpt-stuff | text_generation.py | #%%
import openai
import re
import os
# from dotenv import load_dotenv
# from transformers import pipeline
# Load environment variables from .env file
# load_dotenv()
# Get OpenAI API key from environment variables
openai.api_key = os.environ.get("OPEN_AI_FREE_API_KEY")
openai.api_base = 'https://api.pawan.krd/v1'
from openai import Completion
MODEL = "gpt-3.5-turbo"
def complete(prompt: str, max_tokens: int | None = None) -> str:
"""`max_tokens` is tokens after prompt"""
completion = Completion.create(model=MODEL, prompt=prompt, max_tokens=max_tokens)
return completion.choices[0].text
def generate(prompt, use_openai=True):
"""
Generates a text completion for a given prompt using either the OpenAI GPT-3 API or the Hugging Face GPT-3 model.
Args:
- prompt (str): The text prompt to generate a completion for.
- use_openai (bool): A boolean flag indicating whether to use the OpenAI API (True) or the Hugging Face GPT-3 model (False).
Returns:
- str: The generated text completion.
"""
if use_openai:
model_engine = "text-davinci-002"
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
message = response.choices[0].text
return message.strip()
def get_rating(x):
"""
Extracts a rating from a string.
Args:
- x (str): The string to extract the rating from.
Returns:
- int: The rating extracted from the string, or None if no rating is found.
"""
nums = [int(i) for i in re.findall(r'\d+', x)]
if len(nums)>0:
return min(nums)
else:
return None
# Summarize simulation loop with OpenAI GPT-4
def summarize_simulation(log_output):
prompt = f"Summarize the simulation loop:\n\n{log_output}"
response = generate(prompt)
return response
# %%
| [
"Summarize the simulation loop:\n\nPLACEHOLDER"
] |
2024-01-10 | chiragjn/texar-pytorch | texar~torch~modules~pretrained~gpt2.py | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils of GPT2 Modules.
"""
import json
import os
import warnings
from abc import ABC
from typing import Any, Dict
import torch
from texar.torch.modules.pretrained.pretrained_base import PretrainedMixin
__all__ = [
"PretrainedGPT2Mixin",
]
_GPT2_PATH = "https://storage.googleapis.com/gpt-2/models/"
_CHECKPOINT_FILES = [
"checkpoint", "encoder.json", "hparams.json", "vocab.bpe",
"model.ckpt.data-00000-of-00001", "model.ckpt.index", "model.ckpt.meta"]
class PretrainedGPT2Mixin(PretrainedMixin, ABC):
r"""A mixin class to support loading pre-trained checkpoints for modules
that implement the GPT2 model.
The GPT2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by `Radford et al.` from OpenAI. It is a unidirectional Transformer model
pre-trained using the vanilla language modeling objective on a large corpus.
The available GPT2 models are as follows:
* ``gpt2-small``: Small version of GPT-2, 124M parameters.
* ``gpt2-medium``: Medium version of GPT-2, 355M parameters.
* ``gpt2-large``: Large version of GPT-2, 774M parameters.
* ``gpt2-xl``: XL version of GPT-2, 1558M parameters.
We provide the following GPT2 classes:
* :class:`~texar.torch.modules.GPT2Encoder` for text encoding.
* :class:`~texar.torch.modules.GPT2Decoder` for text generation and
decoding.
* :class:`~texar.torch.modules.GPT2Classifier` for text classification and
sequence tagging.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
"""
_MODEL_NAME = "GPT2"
_MODEL2URL = {
'gpt2-small': [_GPT2_PATH + f"124M/{file}"
for file in _CHECKPOINT_FILES],
'gpt2-medium': [_GPT2_PATH + f"355M/{file}"
for file in _CHECKPOINT_FILES],
'gpt2-large': [_GPT2_PATH + f"774M/{file}"
for file in _CHECKPOINT_FILES],
'gpt2-xl': [_GPT2_PATH + f"1558M/{file}"
for file in _CHECKPOINT_FILES],
}
_IS_DECODE = False
# Raise warning for the deprecated pre-trained model names
class MyDict(dict):
def __contains__(self, key):
if key == '117M':
warnings.warn("Pre-trained model name '117M' is deprecated, "
"use 'gpt2-small' instead.", UserWarning)
return True
elif key == '345M':
warnings.warn("Pre-trained model name '345M' is deprecated, "
"use 'gpt2-medium' instead.", UserWarning)
return True
else:
return super().__contains__(key)
_DEPRECATED_MODEL2URL = {
'117M': [_GPT2_PATH + f"124M/{file}" for file in _CHECKPOINT_FILES],
'345M': [_GPT2_PATH + f"355M/{file}" for file in _CHECKPOINT_FILES],
}
_MODEL2URL.update(_DEPRECATED_MODEL2URL)
_MODEL2URL = MyDict(_MODEL2URL) # type: ignore
def _transform_config(self, pretrained_model_name: str, # type: ignore
cache_dir: str) -> Dict[str, Any]:
info = list(os.walk(cache_dir))
root, _, files = info[0]
config_path = None
for file in files:
if file.endswith('hparams.json'):
config_path = os.path.join(root, file)
if config_path is None:
raise ValueError(f"Cannot find the config file in {cache_dir}")
with open(config_path) as f:
config_gpt = json.loads(f.read())
hidden_dim = config_gpt["n_embd"]
configs = {
"vocab_size": config_gpt["n_vocab"],
"context_size": config_gpt["n_ctx"],
"embedding_size": config_gpt["n_embd"], "embed": {
"dim": hidden_dim,
},
"position_size": config_gpt["n_ctx"],
"position_embed": {
"dim": hidden_dim
}
}
module_name = 'decoder' if self._IS_DECODE else 'encoder'
configs.update({module_name: {
"dim": hidden_dim,
"num_blocks": config_gpt["n_layer"],
"embedding_dropout": 0,
"residual_dropout": 0,
"multihead_attention": {
"use_bias": True,
"num_units": hidden_dim,
"num_heads": config_gpt["n_head"],
"output_dim": hidden_dim,
},
"initializer": {
"type": "variance_scaling_initializer",
"kwargs": {
"factor": 1.0,
"mode": "FAN_AVG",
"uniform": True,
},
},
"poswise_feedforward": {
"layers": [
{
"type": "Linear",
"kwargs": {
"in_features": hidden_dim,
"out_features": hidden_dim * 4,
"bias": True,
}
},
{
"type": "GPTGELU",
"kwargs": {}
},
{
"type": "Linear",
"kwargs": {
"in_features": hidden_dim * 4,
"out_features": hidden_dim,
"bias": True,
}
}
],
"name": "ffn",
},
}})
if self._IS_DECODE:
configs[module_name].update({'use_gpt_config': True})
else:
configs[module_name].update({'use_bert_config': False})
return configs
def _init_from_checkpoint(self, pretrained_model_name: str,
cache_dir: str,
load_output_layer: bool = True, **kwargs):
r"""Initialize model parameters from weights stored in the pre-trained
checkpoint.
Args:
pretrained_model_name (str): Name of the pre-trained model.
cache_dir (str): Path to the cache directory.
load_output_layer (bool): If `False`, will not load weights of the
output layer. Set this argument to `False` when loading weights
into a GPT2 encoder. Defaults to `True`.
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading TensorFlow models in PyTorch requires installing "
"TensorFlow. Please see https://www.tensorflow.org/install/ "
"for installation instructions.")
raise
module_name = 'decoder' if self._IS_DECODE else 'encoder'
global_tensor_map = {
"model/wte": "word_embedder.embedding",
"model/wpe": "position_embedder.embedding",
"model/ln_f/b": module_name + ".final_layer_norm.bias",
"model/ln_f/g": module_name + ".final_layer_norm.weight",
}
layer_tensor_map = {
"ln_1/b": module_name + ".self_attn_layer_norm.{}.bias",
"ln_1/g": module_name + ".self_attn_layer_norm.{}.weight",
"ln_2/b": module_name + ".poswise_layer_norm.{}.bias",
"ln_2/g": module_name + ".poswise_layer_norm.{}.weight",
"mlp/c_fc/b": module_name + ".poswise_networks.{}._layers.0.bias",
"mlp/c_proj/b": module_name + ".poswise_networks.{}._layers.2.bias",
"attn/c_proj/b": module_name + ".self_attns.{}.O_dense.bias",
}
layer_transpose_map = {
"mlp/c_fc/w": module_name + ".poswise_networks.{}._layers.0.weight",
"mlp/c_proj/w": module_name + ".poswise_networks.{}._layers.2."
"weight",
"attn/c_proj/w": module_name + ".self_attns.{}.O_dense.weight",
}
tf_path = os.path.abspath(os.path.join(cache_dir, 'model.ckpt'))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, _ in init_vars:
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
tensor_names = []
for name, _ in self.named_parameters():
tensor_names.append(name)
for name, array in zip(names, arrays):
if name in global_tensor_map:
v_name = global_tensor_map[name]
if name == "model/wte":
pointer = self._name_to_variable(v_name)
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
if load_output_layer:
output_pointer = self._name_to_variable(
"decoder._output_layer.weight")
assert output_pointer.shape == array.shape
output_pointer.data = torch.from_numpy(array)
elif name == "model/wpe":
pointer = self._name_to_variable(v_name)
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
else:
pointer = self._name_to_variable(v_name)
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
else:
name_tmp = name.split("/")
layer_no = name_tmp[1][1:]
name = "/".join(name_tmp[2:])
if name in layer_tensor_map:
v_name = layer_tensor_map[name].format(layer_no)
pointer = self._name_to_variable(v_name)
assert pointer.shape == array.shape
pointer.data = torch.from_numpy(array)
elif name in layer_transpose_map:
v_name = layer_transpose_map[name].format(layer_no)
pointer = self._name_to_variable(v_name)
array_t = np.transpose(array)
assert pointer.shape == array_t.shape
pointer.data = torch.from_numpy(array_t)
elif name == "attn/c_attn/w":
index_d = array.shape[-1] // 3
Q_w = np.transpose(array[:, :index_d])
K_w = np.transpose(array[:, index_d: 2 * index_d])
V_w = np.transpose(array[:, 2 * index_d:])
q_weight = self._name_to_variable(
f"{module_name}.self_attns.{layer_no}.Q_dense.weight")
k_weight = self._name_to_variable(
f"{module_name}.self_attns.{layer_no}.K_dense.weight")
v_weight = self._name_to_variable(
f"{module_name}.self_attns.{layer_no}.V_dense.weight")
assert q_weight.shape == Q_w.shape
assert k_weight.shape == K_w.shape
assert v_weight.shape == V_w.shape
q_weight.data = torch.from_numpy(Q_w)
k_weight.data = torch.from_numpy(K_w)
v_weight.data = torch.from_numpy(V_w)
elif name == "attn/c_attn/b":
d = array.shape[0]
Q_b = array[: d // 3]
K_b = array[d // 3: 2 * d // 3]
V_b = array[2 * d // 3:]
q_bias = self._name_to_variable(
f"{module_name}.self_attns.{layer_no}.Q_dense.bias")
k_bias = self._name_to_variable(
f"{module_name}.self_attns.{layer_no}.K_dense.bias")
v_bias = self._name_to_variable(
f"{module_name}.self_attns.{layer_no}.V_dense.bias")
assert q_bias.shape == Q_b.shape
assert k_bias.shape == K_b.shape
assert v_bias.shape == V_b.shape
q_bias.data = torch.from_numpy(Q_b)
k_bias.data = torch.from_numpy(K_b)
v_bias.data = torch.from_numpy(V_b)
else:
print("Name error", name)
raise Exception
| [] |
2024-01-10 | inveniosoftware/invenio-openaire | invenio_openaire~tasks.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OpenAIRE service integration for Invenio repositories."""
from __future__ import absolute_import, print_function
from copy import deepcopy
from celery import chain, shared_task
from flask import current_app
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.resolver import Resolver
from invenio_records.api import Record
from .loaders import LocalFundRefLoader, LocalOAIRELoader, \
RemoteFundRefLoader, RemoteOAIRELoader
from .minters import funder_minter, grant_minter
@shared_task(ignore_result=True)
def harvest_fundref(source=None):
"""Harvest funders from FundRef and store as authority records."""
loader = LocalFundRefLoader(source=source) if source \
else RemoteFundRefLoader()
for funder_json in loader.iter_funders():
register_funder.delay(funder_json)
@shared_task(ignore_result=True)
def harvest_openaire_projects(source=None, setspec=None):
"""Harvest grants from OpenAIRE and store as authority records."""
loader = LocalOAIRELoader(source=source) if source \
else RemoteOAIRELoader(setspec=setspec)
for grant_json in loader.iter_grants():
register_grant.delay(grant_json)
@shared_task(ignore_result=True)
def harvest_all_openaire_projects():
"""Reharvest all grants from OpenAIRE.
Harvest all OpenAIRE grants in a chain to prevent OpenAIRE
overloading from multiple parallel harvesting.
"""
setspecs = current_app.config['OPENAIRE_GRANTS_SPECS']
chain(harvest_openaire_projects.s(setspec=setspec)
for setspec in setspecs).apply_async()
@shared_task(ignore_result=True)
def register_funder(data):
"""Register the funder JSON in records and create a PID."""
create_or_update_record(data, 'frdoi', 'doi', funder_minter)
@shared_task(ignore_result=True, rate_limit='20/s')
def register_grant(data):
"""Register the grant JSON in records and create a PID."""
create_or_update_record(data, 'grant', 'internal_id', grant_minter)
def create_or_update_record(data, pid_type, id_key, minter):
"""Register a funder or grant."""
resolver = Resolver(
pid_type=pid_type, object_type='rec', getter=Record.get_record)
try:
pid, record = resolver.resolve(data[id_key])
data_c = deepcopy(data)
del data_c['remote_modified']
record_c = deepcopy(record)
del record_c['remote_modified']
# All grants on OpenAIRE are modified periodically even if nothing
# has changed. We need to check for actual differences in the metadata
if data_c != record_c:
record.update(data)
record.commit()
record_id = record.id
db.session.commit()
RecordIndexer().index_by_id(str(record_id))
except PIDDoesNotExistError:
record = Record.create(data)
record_id = record.id
minter(record.id, data)
db.session.commit()
RecordIndexer().index_by_id(str(record_id))
| [] |
2024-01-10 | inveniosoftware/invenio-openaire | tests~test_cli.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI tests."""
from __future__ import absolute_import, print_function
from os.path import dirname, join
from click.testing import CliRunner
from invenio_pidstore.models import PersistentIdentifier
from invenio_openaire.cli import openaire
def test_loadfunders(script_info, es):
"""Test CLI for loading grants."""
assert PersistentIdentifier.query.count() == 0
runner = CliRunner()
result = runner.invoke(
openaire,
['loadfunders', '--source',
join(dirname(__file__), 'testdata/fundref_test.rdf')],
obj=script_info)
assert result.exit_code == 0
assert PersistentIdentifier.query.count() == 6
def test_loadgrants(script_info, es, funders):
"""Test CLI for loading grants."""
# Funders only
assert PersistentIdentifier.query.count() == 6
runner = CliRunner()
result = runner.invoke(
openaire,
['loadgrants', '--source',
join(dirname(__file__), 'testdata/openaire_test.sqlite')],
obj=script_info)
print(result.output)
assert result.exit_code == 0
assert PersistentIdentifier.query.count() == 46
| [] |
2024-01-10 | NADOOITChristophBa/AI-Hub | Office_Assistent.py | import re
import os
import json
import openai
import sqlite3
import win32com.client as win32
import win32com.client.gencache
from openai import OpenAI
from contextvars import ContextVar
from typing import Optional, Callable, List
from dotenv import load_dotenv
import datetime
# Load environment variables from .env.
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# Initialize OpenAI client with your API key
openai.api_key = api_key
# Ensure the generation of COM libraries.
win32.gencache.EnsureDispatch("Outlook.Application")
constants = win32.constants
outlook = win32com.client.Dispatch("Outlook.Application")
# Type annotation for Cursor (assuming sqlite3, replace with your actual cursor type if different)
Cursor = sqlite3.Cursor
# Context variable for SQLite connection
matrix_connection_var: ContextVar[Optional[sqlite3.Connection]] = ContextVar(
"matrix_connection", default=None
)
# Context variable for matrix cursor
matrix_cursor_var: ContextVar[Optional[sqlite3.Cursor]] = ContextVar(
"matrix_cursor_var", default=None
)
import threading
class MatrixDatabaseContextManager:
_lock = threading.Lock()
_ref_count = 0
def __enter__(self):
with MatrixDatabaseContextManager._lock:
MatrixDatabaseContextManager._ref_count += 1
connection = matrix_connection_var.get(None)
if connection is None:
connection = sqlite3.connect("Matrix.db")
matrix_connection_var.set(connection)
self.cursor = connection.cursor()
matrix_cursor_var.set(self.cursor)
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
with MatrixDatabaseContextManager._lock:
MatrixDatabaseContextManager._ref_count -= 1
self.cursor.close()
matrix_cursor_var.set(None)
if (
MatrixDatabaseContextManager._ref_count == 0
and matrix_connection_var.get(None) is not None
):
matrix_connection_var.get().close()
matrix_connection_var.set(None)
def with_matrix_db_context(func):
def wrapper(*args, **kwargs):
with MatrixDatabaseContextManager() as cursor:
# Explicitly pass the cursor as an argument to the function
return func(*args, **kwargs, cursor=cursor)
return wrapper
def get_matrix_connection():
matrix_connection = sqlite3.connect("Matrix.db")
return matrix_connection
def get_matix_cursor_for_matrix_connection(matrix_connection):
return matrix_connection.cursor()
class Appointment:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
@classmethod
def from_json(cls, data):
try:
details = json.loads(data) if isinstance(data, str) else data
if not isinstance(details, dict):
raise ValueError(
f"Invalid data format. Expected a dictionary, got {type(details)}"
)
# Fetch required attributes from the database
required_attributes = get_appointment_detail_fields()
# Check for missing attributes
missing_attributes = [
attr for attr in required_attributes if attr not in details
]
if missing_attributes:
raise ValueError(
f"Missing required attributes: {', '.join(missing_attributes)}"
)
return cls(**details)
except json.JSONDecodeError as e:
print("JSON decoding error:", e)
return None
# The get_appointment_detail_fields function should be defined as shown in the previous response
class Email:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"Email from {self.sender} <{self.sender_email}> received at {self.received_time}: {self.subject}"
def clean_email_content(email_content):
# Remove URLs from the email content
email_content = re.sub(r"http\S+", "", email_content)
# Remove sequences of '<' possibly interspersed with whitespace and newlines
email_content = re.sub(r"(\s*<\s*)+", " ", email_content)
# Additional cleanup could go here if needed
return email_content.strip()
def create_oulook_calender_appointment_for_appointment(outlook, appointment_data):
"""
Create an appointment in Outlook from given appointment data.
"""
# Parse the appointment data
appointment = Appointment.from_json(appointment_data)
if not appointment:
print("Invalid appointment data")
return
namespace = outlook.GetNamespace("MAPI")
calendar_folder = namespace.GetDefaultFolder(9) # 9 refers to the Calendar folder
# Create a new appointment
new_appointment = calendar_folder.Items.Add()
new_appointment.Subject = getattr(appointment, "subject", "No Subject")
new_appointment.Start = getattr(appointment, "start_time", None)
new_appointment.End = getattr(appointment, "end_time", None)
new_appointment.Location = getattr(appointment, "location", "No Location")
# Save the appointment
new_appointment.Save()
print(f"Appointment '{new_appointment.Subject}' created successfully.")
def send_email_via_outlook(outlook, subject, body, recipient):
"""Send an email using a provided Outlook instance."""
mail = outlook.CreateItem(0)
mail.Subject = subject
mail.Body = body
mail.To = recipient
mail.Send()
def get_most_recent_unread_emails_from_outlook(outlook, folder_path=None, count=1):
print("Connecting to Outlook...")
namespace = outlook.GetNamespace("MAPI")
if folder_path:
# Navigate through the folder path
root_folder = namespace.Folders.Item(1) # Primary account
target_folder = root_folder
for folder_name in folder_path:
target_folder = find_folder(target_folder, folder_name)
if not target_folder:
print(f"Folder '{folder_name}' not found in path.")
return []
else:
# Default to Inbox
print("No folder path provided. Using default Inbox...")
target_folder = namespace.GetDefaultFolder(constants.olFolderInbox)
print(f"Getting items from the specified folder...")
messages = target_folder.Items
messages.Sort("[ReceivedTime]", True)
print("Filtering unread messages...")
unread_messages = [
msg for msg in messages if msg.UnRead and msg.Class == constants.olMail
]
print(f"Found {len(unread_messages)} unread mail message(s).")
emails = process_emails(unread_messages, count)
return emails
def process_emails(messages, count):
emails = []
for msg in messages[:count]:
email_obj = build_email_object(msg)
emails.append(email_obj)
# msg.UnRead = False # Uncomment to mark as read
return emails
def build_email_object(msg):
sender_name = msg.SenderName if hasattr(msg, "SenderName") else "Unknown Sender"
sender_email = (
msg.SenderEmailAddress
if hasattr(msg, "SenderEmailAddress")
else "Unknown Email"
)
received_time = msg.ReceivedTime if hasattr(msg, "ReceivedTime") else "Unknown Time"
print(
f"Processing email from {sender_name} <{sender_email}> received at {received_time}..."
)
return Email(
subject=msg.Subject,
body=msg.Body,
sender=sender_name,
sender_email=sender_email,
received_time=received_time,
)
def get_unread_emails_from_outlook_inbox(outlook, count=1):
return get_most_recent_unread_emails_from_outlook(outlook, count=count)
def check_email_contains_appointment(sender_email: Email) -> List[Appointment]:
"""Determine if the email is about appointments and return the details as a list."""
client = OpenAI()
# Fetch required fields for appointment details from the database
required_fields = get_appointment_detail_fields()
required_fields_str = ", ".join(
required_fields
) # Convert list to a comma-separated string
# Clean up the email content
email_content = clean_email_content(sender_email.body)
# Condensed prompt for the Chat API, including required fields
messages = [
{
"role": "system",
"content": "You are a helpful assistant. Return JSON objects in response to queries about appointments. Use these fields for the JSON objects: "
+ required_fields_str
+ ".",
},
{
"role": "user",
"content": "Here is an email subject and content. Determine if it's about one or more appointments. If so, provide the details in JSON format using the specified fields.",
},
{"role": "user", "content": f"Subject: {sender_email.subject}"},
{"role": "user", "content": f"Content: {email_content}"},
{
"role": "user",
"content": "Carefully analyze the email for any appointments or events. Always return the details as a list in JSON format, even if there is only one appointment.",
},
]
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
response_format={"type": "json_object"},
seed=1,
temperature=0,
stop=["user:", "system:"],
)
# Access the response content
response_text = response.choices[0].message.content.strip()
# Convert the response text into a Python dictionary
response_data = json.loads(response_text)
print(response_data)
appointments = []
try:
if "appointments" in response_data and isinstance(
response_data["appointments"], list
):
for appointment_data in response_data["appointments"]:
try:
appointment_obj = Appointment.from_json(
data=json.dumps(appointment_data)
)
appointments.append(appointment_obj)
except ValueError as e:
print(
f"Error while creating an Appointment object: {e}. Data: {appointment_data}"
)
else:
print("No appointment details found or invalid format in response.")
except Exception as e:
print(f"Error processing response data: {e}. Data: {response_data}")
return appointments
@with_matrix_db_context
def add_appointment_detail_field(field_name: str, cursor):
try:
# Check if the field already exists
cursor.execute(
"SELECT COUNT(*) FROM appointment_details WHERE field_name = ?",
(field_name,),
)
if cursor.fetchone()[0] == 0:
# Insert the new field
cursor.execute(
"INSERT INTO appointment_details (field_name) VALUES (?)", (field_name,)
)
# Commit the changes
cursor.connection.commit()
except Exception as e:
print("Error in add_appointment_detail_field:", e)
@with_matrix_db_context
def ensure_appointment_details_table_exists(cursor):
# Check if the appointment_details table exists
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='appointment_details'"
)
table_exists = cursor.fetchone()
# If the table doesn't exist, create it
if not table_exists:
cursor.execute(
"""CREATE TABLE appointment_details (
id INTEGER PRIMARY KEY,
field_name TEXT
)"""
)
# Optionally insert default fields here
@with_matrix_db_context
def get_appointment_detail_fields(cursor):
ensure_appointment_details_table_exists()
# Retrieve and return all appointment detail fields
cursor.execute("SELECT field_name FROM appointment_details")
return [row[0] for row in cursor.fetchall()]
@with_matrix_db_context
def add_email_type(new_email_type: str, cursor):
try:
# Check if the email type already exists
cursor.execute(
"SELECT COUNT(*) FROM email_types WHERE type_name = ?", (new_email_type,)
)
if cursor.fetchone()[0] == 0:
# Insert the new email type
cursor.execute(
"INSERT INTO email_types (type_name) VALUES (?)", (new_email_type,)
)
# Commit the changes
cursor.connection.commit()
except Exception as e:
print("Error in add_email_type:", e)
# Optionally, you can handle specific exceptions based on your DBMS
@with_matrix_db_context
def get_email_types_for_matrix_cursor(cursor):
# Check if the email_types table exists
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='email_types'"
)
table_exists = cursor.fetchone()
# If the table doesn't exist, create it and insert default types
if not table_exists:
cursor.execute(
"""CREATE TABLE email_types (
id INTEGER PRIMARY KEY,
type_name TEXT
)"""
)
# Insert default email types
default_types = ["Appointment", "Data Dump", "Inquiry", "Order", "Confirmation"]
for type_name in default_types:
cursor.execute(
"INSERT INTO email_types (type_name) VALUES (?)", (type_name,)
)
# Retrieve and return all email types
cursor.execute("SELECT type_name FROM email_types")
return [row[0] for row in cursor.fetchall()]
def get_email_types_form_matrix():
email_types = get_email_types_for_matrix_cursor()
return email_types
def get_email_type_for_email(email: Email) -> Optional[str]:
# Retrieve the current list of email types
email_types = get_email_types_form_matrix()
email_types_string = ", ".join(f'"{etype}"' for etype in email_types)
print(email_types)
# Initialize the OpenAI client
client = OpenAI()
# Construct the messages for the AI
messages = [
{
"role": "system",
"content": (
"You are a highly capable assistant specialized in email categorization. "
"Your task is to analyze the content and subject of an email and classify it. "
"Here are the available types: " + email_types_string + ". "
"If the email doesn't fit any of these types, suggest a new appropriate type "
"and present it as 'email_type' in your JSON response."
),
},
{
"role": "user",
"content": f"Subject: {email.subject}\nContent: {email.body}",
},
]
# Request the AI to classify the email
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
seed=1,
temperature=0,
response_format={"type": "json_object"},
)
# Extract the AI's response
ai_response_text = response.choices[0].message.content
# Attempt to parse the AI's response as JSON
try:
ai_response = json.loads(ai_response_text)
except json.JSONDecodeError as e:
print("JSON parsing error:", e)
return None
# Extract the email type from the AI's response
email_type_received = ai_response.get("email_type", "").strip().lower()
email_types_lower = [etype.lower() for etype in email_types]
# Check if the received email type is new and add it if necessary
if email_type_received not in email_types_lower:
print("NEW TYPE FOUND!")
add_email_type(email_type_received)
# Return the email type with proper capitalization
return email_type_received.title()
def get_read_email_from_unread_email(unread_email: Email):
print("Checking email type...") # Diagnostic print
email_type = get_email_type_for_email(unread_email)
if email_type:
print("Email type identified:", email_type) # Diagnostic print
else:
print("Email type could not be determined.") # Diagnostic print
return # Early return if email type cannot be determined
print("Checking for appointments in the email...") # Diagnostic print
appointments = check_email_contains_appointment(unread_email)
if appointments:
for appointment in appointments:
print(appointment)
else:
print(
f"No appointments in this email: {unread_email.subject}, From: {unread_email.sender}"
)
def find_outlook_email(outlook, email_obj):
print("Connecting to Outlook...")
namespace = outlook.GetNamespace("MAPI")
inbox = namespace.GetDefaultFolder(constants.olFolderInbox)
print("Searching for the specific email...")
for msg in inbox.Items:
# Assuming subject, sender email, and received time are enough to uniquely identify an email
if (
msg.Subject == email_obj.subject
and msg.SenderEmailAddress == email_obj.sender_email
and msg.ReceivedTime.strftime("%Y-%m-%d %H:%M:%S")
== email_obj.received_time.strftime("%Y-%m-%d %H:%M:%S")
):
print("Matching email found.")
return msg
print("Email not found.")
return None
def display_folder_tree(folder, level=0):
"""
Recursively display the folder structure in a tree-like format.
:param folder: The current folder to display.
:param level: The current level in the folder hierarchy (used for indentation).
:return: None
"""
indent = " " * 4 * level # 4 spaces for each level of indentation
print(f"{indent}- {folder.Name}")
try:
for subfolder in folder.Folders:
display_folder_tree(subfolder, level + 1)
except Exception as e:
# Ignore folders that cannot be accessed
pass
def visualize_folder_structure(outlook):
"""
Visualize the folder structure of an Outlook account.
:param outlook: The outlook instance.
:return: None
"""
namespace = outlook.GetNamespace("MAPI")
root_folder = namespace.Folders.Item(
1
) # Usually the first item is the primary account
print("Outlook Folder Structure:")
for folder in root_folder.Folders:
display_folder_tree(folder)
# Usage example
# visualize_folder_structure(outlook_instance)
def create_folder(outlook, folder_name, parent_folder):
"""
Create a folder in Outlook within a specified parent folder.
:param outlook: The outlook instance.
:param folder_name: The name of the folder to be created.
:param parent_folder: The parent folder object.
:return: The created folder object or None if failed.
"""
try:
new_folder = parent_folder.Folders.Add(folder_name)
print(f"Folder '{folder_name}' created successfully.")
return new_folder
except Exception as e:
print(f"Error creating folder '{folder_name}': {e}")
return None
def find_folder(folder, folder_name):
"""
Recursively search for a folder with the given name.
:param folder: The current folder to search in.
:param folder_name: The name of the folder to find.
:return: The folder if found, otherwise None.
"""
if folder.Name.lower() == folder_name.lower():
return folder
try:
for subfolder in folder.Folders:
found_folder = find_folder(subfolder, folder_name)
if found_folder:
return found_folder
except Exception as e:
# Ignore folders that cannot be accessed
pass
return None
def create_folders_recursive(outlook, parent_folder, structure):
"""
Create folders and subfolders recursively based on a given structure.
:param outlook: The outlook instance.
:param parent_folder: The parent folder where the structure starts.
:param structure: The folder structure defined in a dictionary.
"""
for folder_name, sub_structure in structure.items():
existing_folder = find_folder(parent_folder, folder_name)
if not existing_folder:
existing_folder = create_folder(outlook, folder_name, parent_folder)
if existing_folder and sub_structure: # If there are subfolders
create_folders_recursive(outlook, existing_folder, sub_structure)
def initialize_email_folders(outlook):
"""
Initialize the required email folders based on a JSON-defined structure.
:param outlook: The outlook instance.
"""
folder_structure_json = """
{
"User_Email_Management": {
"Action_Required_Now": {},
"Action_Soon": {},
"No_Action_Required": {}
}
}
"""
folder_structure = json.loads(folder_structure_json)
root_folder = outlook.GetNamespace("MAPI").Folders.Item(1) # Primary account
user_email_management_folder = find_folder(root_folder, "User_Email_Management")
if not user_email_management_folder:
user_email_management_folder = create_folder(
outlook, "User_Email_Management", root_folder
)
create_folders_recursive(
outlook, user_email_management_folder, folder_structure["User_Email_Management"]
)
def set_email_folder_for_outlook_email(outlook_email, folder_path, outlook):
"""
Move an email to the specified folder based on the provided path.
:param email: The email object to be moved.
:param folder_path: A list representing the path to the destination folder.
:param outlook: The outlook instance.
"""
namespace = outlook.GetNamespace("MAPI")
root_folder = namespace.Folders.Item(1) # Primary account
# Navigate through the folder path
target_folder = root_folder
for folder_name in folder_path:
target_folder = find_folder(target_folder, folder_name)
if not target_folder:
print(f"Folder '{folder_name}' not found in path.")
return
# Move the email
try:
outlook_email.Move(target_folder)
print(f"Email moved to '{' > '.join(folder_path)}'.")
except Exception as e:
print(f"Error moving email: {e}")
def determine_email_priority(sender_email: Email) -> str:
"""Determine the priority of the email and categorize it into the appropriate folder based on detailed criteria."""
client = OpenAI()
# Clean up the email content
email_content = clean_email_content(sender_email.body)
# Get the current date and time
current_time_and_date = get_current_time_and_date()
# Detailed instructions for the AI to categorize the email
messages = [
{
"role": "system",
"content": "You are a helpful assistant. Analyze the email and categorize it as 'Action_Required_Now', 'Action_Soon', or 'No_Action_Required'. Use specific criteria for each category. Consider the context of the email, including the sender's role and previous communications. Confirm your decision before finalizing. Return the category in a simplified JSON format like {'category': 'Action_Required_Now'}. Handle uncertain cases with a specific procedure and collect feedback for continuous improvement. Consider the current date and time: {current_time_and_date}."
},
{
"role": "user",
"content": "Here is an email subject and content. Determine its priority and categorize it accordingly."
},
{"role": "user", "content": "Subject: {sender_email.subject}"},
{"role": "user", "content": "Content: {email_content}"}
]
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages,
seed=1,
temperature=0,
response_format={"type": "json_object"},
stop=["user:", "system:"],
)
# Access the response content
response_text = response.choices[0].message.content.strip()
# Convert the response text into a Python dictionary
response_data = json.loads(response_text)
# Determine the priority category
priority_category = response_data.get("category", "No_Action_Required")
return priority_category
def get_current_time_and_date():
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
if __name__ == "__main__":
initialize_email_folders(outlook)
# visualize_folder_structure(outlook)
# outlook = win32.Dispatch("Outlook.Application")
unread_emails = get_unread_emails_from_outlook_inbox(
outlook, count=40
) # Assuming this function returns a list of Email objects
for unread_email in unread_emails:
email_priority = determine_email_priority(unread_email)
outlook_email = find_outlook_email(outlook, unread_email)
folder_path = ["User_Email_Management", email_priority]
set_email_folder_for_outlook_email(outlook_email, folder_path, outlook)
# read_email = get_read_email_from_unread_email(unread_email)
# Check if the email is about an appointment and get the details
# Test sending an email
# subject = "Test Email from AI Hub"
# body = "This is a test email sent from the AI Hub using local Outlook instance."
# recipient = "[email protected]"
# send_email_via_outlook(subject, body, recipient)
| [
"Content: PLACEHOLDER",
"You are a helpful assistant. Analyze the email and categorize it as 'Action_Required_Now', 'Action_Soon', or 'No_Action_Required'. Use specific criteria for each category. Consider the context of the email, including the sender's role and previous communications. Confirm your decision before finalizing. Return the category in a simplified JSON format like {'category': 'Action_Required_Now'}. Handle uncertain cases with a specific procedure and collect feedback for continuous improvement. Consider the current date and time: {current_time_and_date}.",
"Carefully analyze the email for any appointments or events. Always return the details as a list in JSON format, even if there is only one appointment.",
"Here is an email subject and content. Determine if it's about one or more appointments. If so, provide the details in JSON format using the specified fields.",
"You are a helpful assistant. Return JSON objects in response to queries about appointments. Use these fields for the JSON objects: PLACEHOLDER.",
"Subject: {sender_email.subject}",
"You are a highly capable assistant specialized in email categorization. Your task is to analyze the content and subject of an email and classify it. Here are the available types: PLACEHOLDER. If the email doesn't fit any of these types, suggest a new appropriate type and present it as 'email_type' in your JSON response.",
"Content: {email_content}",
"Here is an email subject and content. Determine its priority and categorize it accordingly."
] |
2024-01-10 | jdgalviss/memorAI | frontend~recommender~assistant.py | import openai
from recommender import config
from recommender import templates
import random
import csv
from easydict import EasyDict as edict
event_types = ['sports', 'music']
openai.api_key = config.OPENAI_API_KEY
class Assistant(object):
def __init__(self, engine="text-davinci-002"):
print("Initializing Assistant...")
self.engine = engine
self.is_initialized = False
def initialize(self, user):
if(not self.is_initialized):
self.is_initialized = True
self.user = user
self.user_story = templates.my_story.format(self.user.username, self.user.birth_year,
self.user.birth_place, self.user.current_place,
self.user.favorite_band, self.user.favorite_film)
else:
print("Assistant was already initialized")
print("Assistant Ready...")
def add_info_user_story(self,info):
self.user_story += (info+". ")
def recommend_film(self):
recommendation = self.send_query(templates.film_query.format(self.user.favorite_film))
recommendation = recommendation.replace('\n\n','').split('\n')
recommendation = recommendation[random.randint(1,3)][3:]
return (recommendation,
self.send_query(templates.query.format("the film" + recommendation), max_tokens=256, temperature=0.4))
def recommend_band(self):
recommendation = self.send_query(templates.band_query.format(self.user.favorite_band))
print(recommendation)
recommendation = recommendation.replace('\n\n','').split('\n')
print(recommendation)
recommendation = recommendation[random.randint(0,2)][3:]
print(recommendation)
return (recommendation,
self.send_query(templates.query.format("the artist " + recommendation), max_tokens=256, temperature=0.4))
def recommend_song(self):
recommendation = self.send_query(templates.song_query.format(self.user.favorite_band))
recommendation = recommendation.replace('\n\n','').split('\n')
recommendation = recommendation[random.randint(0,2)][3:]
return recommendation
def recommend_event(self):
year = int(self.user.birth_year)+random.randint(15,50)
year = int((year/10)*10)
print("year: {}".format(year))
recommendation = self.send_query(templates.historical_query.format(event_types[random.randint(0,1)], self.user.birth_place, year)).split('.')[0]
return (recommendation, self.send_query(templates.query.format(recommendation),max_tokens=256, temperature=0.6))
def ask(self, question):
return self.send_query(self.user_story + "\n\nHuman: " + question + "\n\nAssistant")
def send_query(self,msg,max_tokens=32, temperature = 0.4):
response = openai.Completion.create(
engine=self.engine,
prompt=msg,
temperature=temperature,
max_tokens=max_tokens,
top_p=0.8,
frequency_penalty=0,
presence_penalty=0
)
return response["choices"][0]["text"]
| [] |
2024-01-10 | Shivamgulia/Generative-AI-Apps | milvusApp.py | from dotenv import load_dotenv
import os
import streamlit as st
from langchain.vectorstores.milvus import Milvus
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts import PromptTemplate, StringPromptTemplate
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from pymilvus import connections, Collection
# initilizing the embedding model
from langchain.embeddings import HuggingFaceInstructEmbeddings
# PROMPT Template
QuestionTemplate = """
Given the provided context, answer the following question. If the context does not mention any relevant information about the question, state "No relevant information found" and indicate the specific part of the context where the question should be addressed.
Context: {context}
Question: {query}
"""
prompt = PromptTemplate.from_template(QuestionTemplate)
# initilizing varibles
load_dotenv()
# connect to database
st.title("Document Question and Answer")
# ui components
uploadedDocument = st.file_uploader("Upload the file to question from")
button1 = st.button("Upload Document")
# File Upload function
def uploadFile(file):
if file is not None:
documents = [file.read().decode()]
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.create_documents(documents)
return docs
def queryDocs():
releventDocumnets = Milvus.similarity_search(
db, query="what is generative artificial intelegence?"
)
st.write(releventDocumnets)
# file upload and generating embeddings
# global documents
# documents = None
def main():
if button1:
st.write("Document uploaded")
global documents
documents = uploadFile(uploadedDocument)
st.write(documents[0])
# Adding docks to Milvus vectorstore
print(documents)
print("generateing embeddings ....")
st.write("generateing embeddings ....")
instEmbedder = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
st.write(documents[0])
db = Milvus(
embedding_function=instEmbedder,
connection_args={"host": "127.0.0.1", "port": "19530"},
collection_name="Application",
)
db.add_documents(documents=documents)
print("embeddings stored")
st.write("embeddings stored")
# taking query and generating response
question = st.text_input("Question")
if st.button("Answer"):
if len(question) <= 1:
st.write("write a question first")
if len(question) > 1:
st.write(question)
instEmbedder = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
db = Milvus(
embedding_function=instEmbedder,
connection_args={"host": "127.0.0.1", "port": "19530"},
collection_name="Application",
)
# documents = db.similarity_search(query=question)
# documentData = ""
# for doc in documents:
# documentData += doc.page_content
# documentData += "\n"
# questionPrompt = prompt.format(query=question, context=documentData)
# st.write(questionPrompt)
qa = RetrievalQA.from_chain_type(
llm=ChatOpenAI(temperature=0.7),
chain_type="stuff",
retriever=db.as_retriever(),
verbose=True,
)
response = qa.run(question)
st.write(response)
if __name__ == "__main__":
main()
| [
"No relevant information found",
"\nGiven the provided context, answer the following question. If the context does not mention any relevant information about the question, state \"No relevant information found\" and indicate the specific part of the context where the question should be addressed.\n\nContext: {context}\n\nQuestion: {query}\n\n"
] |
2024-01-10 | TukBang/Pet-care-app | Capstone~2023~server~python~ai_server.py | # default
import pickle as pkl
import sys, os
from pprint import *
import numpy as np
import pandas as pd
# AI
import cv2
import torch
from torchvision import transforms
from torchvision.models import efficientnet_v2_s
from torchinfo import summary
# ChatBot
import openai
from multiprocessing import Process
import time, datetime
# Web server based on Flask ()
from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_restful import Resource, Api
from PIL import Image
import base64
import io
# users module
sys.path.append("d:\\GitHub\\Pet-care-app\\Capstone\\2023\\")
import skin_disease.module.skin_disease_model as sdm
# AI 불러오기
# server AI model 가중치 저장 경로
# Image 저장 경로
model_path = "D:/Capstone/model/server/"
image_path = "D:/Capstone/images/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = sdm.Skin_Distinction_Model(model=efficientnet_v2_s(weights="DEFAULT"),
out_features=5,
device=device,
save_path=model_path).to(device)
# initial
# org-Tukorea_S2-9_Pet_Care_Application_BNL
os.environ["OPENAI_ORGANIZATION"] = "org-MRE3IgCPLUw65a4D5cDpLAxK"
openai.organization = os.getenv("OPENAI_ORGANIZATION")
os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = os.getenv("OPENAI_API_KEY")
last_use_user = list()
chatbot = dict()
# history_fig 저장
def save_history_fig(history):
import math
import numpy as np
import matplotlib.pyplot as plt
train_loss = history["train_loss"]
train_acc = history["train_acc"]
valid_loss = history["valid_loss"]
valid_acc = history["valid_acc"]
epoch = len(history["train_loss"])
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
ax[0].plot(train_loss, 'b', label="training")
ax[0].plot(valid_loss, '--r', label="validation")
ax[0].set_xlim((0, epoch))
max_y = math.ceil(max(valid_loss)) if max(train_loss) < max(valid_loss) else math.ceil(max(train_loss))
ax[0].set_ylim((0, max_y))
ax[0].legend()
ax[0].grid()
ax[1].plot(train_acc, 'b', label="training")
ax[1].plot(valid_acc, '--r', label="validation")
ax[1].set_xlim((0, epoch))
ax[1].set_ylim((0, 100))
ax[1].legend()
ax[1].grid()
plt.savefig(image_path + 'evaluate/model_history.png')
return
def get_evaluate_images(src_path, dst_path):
import shutil
return_flag = True
try:
shutil.copy(src_path + "AUC-ROC.png", dst_path + "AUC-ROC.png")
shutil.copy(src_path + "classification_report.png", dst_path + "classification_report.png")
except: return_flag = False
return return_flag
def encode_image(image_path):
with open(image_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
return encoded_string
def softmax(pred):
sum_exp = float()
for prob in pred:
sum_exp += np.exp(prob)
ret_pred = list()
for prob in pred:
ret_pred.append(np.exp(prob) / sum_exp)
return ret_pred
# chatbot class
class PetCareChatBot:
def __init__(self, model="gpt-3.5-turbo"):
self.model = model
self.last_use_time = datetime.datetime.now()
self.chatlog = dict()
self.chatlog["system"] = "You are a helpful assistant about pet care."
self.chatlog["user"] = list()
self.chatlog["question_token"] = [0]
self.chatlog["assistant"] = list()
self.chatlog["answer_token"] = [0]
self.chatlog["total_token"] = 0
self.SYSTEM_TOKEN_LEN = 22
return
def return_create_time(self):
return (self.create_time.year,
self.create_time.month,
self.create_time.day,
self.create_time.hour,
self.create_time.minute,
self.create_time.second)
def request_chat(self, sentence):
# initialize meesages
## system message part
chat_messages = [{"role": "system", "content": self.chatlog["system"]}]
## user, assistant message part
for q, a in zip(self.chatlog["user"], self.chatlog["assistant"]):
chat_messages.append({"role": "user", "content": q})
chat_messages.append({"role": "assistant", "content": a})
# append question message
chat_messages.append({"role": "user", "content": sentence})
self.chatlog["user"].append(sentence)
# request message
openai.organization = os.getenv("OPENAI_ORGANIZATION")
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = openai.ChatCompletion.create(
model=self.model,
messages=chat_messages
)
# update token
self.chatlog["answer_token"].append(completion["usage"]["completion_tokens"])
if (len(self.chatlog["question_token"]) == 0):
self.chatlog["question_token"].append(
completion["usage"]["prompt_tokens"] - self.SYSTEM_TOKEN_LEN
)
else:
self.chatlog["question_token"].append(
completion["usage"]["prompt_tokens"]
- sum(self.chatlog["question_token"][:-1])
- sum(self.chatlog["question_token"][:-1])
- self.SYSTEM_TOKEN_LEN
)
self.chatlog["total_token"] = completion["usage"]["total_tokens"]
# append answer message
answer_message = completion["choices"][0]["message"]["content"]
self.chatlog["assistant"].append(answer_message)
# delete exceed log messages
while self.chatlog["total_token"] > 3000:
# delete exceed token
question_token = self.chatlog["question_token"].pop(0)
answer_token = self.chatlog["answer_token"].pop(0)
### part that can improve memory ability efficiency ###
self.chatlog["user"].pop(0)
self.chatlog["assistant"].pop(0)
self.chatlog["total_token"] -= (question_token + answer_token)
#######################################################
self.last_use_time = datetime.datetime.now()
return answer_message
class ImageResource(Resource):
# 이 부분은 인공지능 평가에 대한 데이터가 보내져야 함
def get(self):
auc_roc = encode_image(image_path=image_path + "evaluate/AUC-ROC.png")
class_report = encode_image(image_path=image_path + "evaluate/classification_report.png")
history = encode_image(image_path=image_path + "evaluate/model_history.png")
ret_data = {"auc-roc": auc_roc,
"classification_report": class_report,
"model_history": history}
return ret_data
# 이미지를 받으면, 저장하고, 인공지능 모델에 넣어야 함
def post(self):
global image_path
data = request.get_json()
uid = data.get("uid", None)
name = data.get("name", None)
species = data.get("species", None)
gender = data.get("gender", None)
weight = data.get("weight", None)
age = data.get("age", None)
image_data = data.get("image", None)
image_name = data.get('imageName', 'unnamed.jpg')
# csv file save
if os.path.isfile(f"{model_path}diagnosis_result.csv"):
import csv
with open(f"{model_path}diagnosis_result.csv", "a") as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([uid, name, species, gender, weight, age, image_name])
else:
columns = ["uid", "pet_name", "species", "gender", "weight", "age", "image_name"]
df = pd.DataFrame(columns=columns)
df.loc[0] = [uid, name, species, gender, weight, age, image_name]
df.to_csv(f"{model_path}diagnosis_result.csv", index=False)
if image_data:
try:
image_data = base64.b64decode(image_data)
image = Image.open(io.BytesIO(image_data))
save_path = os.path.join(image_path, image_name)
image.save(save_path)
test_transforms = transforms.Compose([
transforms.Resize(size=(224, 224), interpolation=transforms.InterpolationMode.LANCZOS),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image = Image.fromarray(cv2.merge(list(cv2.split(np.array(image))[::-1])))
image = test_transforms(image).to(device).unsqueeze(0)
with torch.no_grad():
model.eval()
pred = model.forward(image)
probs = softmax(pred[0].to("cpu").detach().numpy())
ret_data = jsonify({'name': image_name,
'L1': probs[0], 'L2': probs[1],
'L3': probs[2], 'L4': probs[3],
'L5': probs[4]})
return ret_data
except Exception as e:
print(e)
return {'error': str(e)}, 400
else:
return {'error': 'No image data found'}, 400
class ChatResource(Resource):
def get(self):
# ChatGPT Model 정보 전송
return
def post(self):
global last_use_user, chatbot
data = request.get_json()
uid = data.get('uid', None)
message = data.get('message', None)
print(uid)
print(message)
if not isinstance(message, type(None)) and not isinstance(uid, type(None)):
try:
# 챗봇 생성
if uid not in chatbot:
chatbot[uid] = PetCareChatBot()
last_use_user.append(uid)
ret_message = chatbot[uid].request_chat(message)
print(ret_message)
last_use_user.remove(uid)
last_use_user.append(uid)
ret_data = jsonify({'message': ret_message})
return ret_data
except Exception as e:
print(e)
return {'error': str(e)}, 400
else:
return {"error": "Please check your sent message"}, 400
# last_use_user must be sorted by usage time
def free_chatbot():
global chatbot, last_use_user
time.sleep(60)
now = datetime.datetime.now()
now_uid_list = list()
for uid in last_use_user:
if (now - chatbot[uid].last_use_time).seconds > 3600:
chatbot.pop(uid)
last_use_user.remove(uid)
else:
now_uid_list.append(uid)
print("chatbot free thread is working...")
print("chatbot count: ", len(chatbot))
print("chatbot user list: ", now_uid_list)
if __name__ == "__main__":
if not os.path.exists(image_path):
os.makedirs(image_path)
openai.Model.list()
# chatbot 대화 내용 제거용 thread 생성
chatbot_process = Process(target=free_chatbot)
chatbot_process.start()
pprint(summary(model, input_size=(1, 3, 224, 224), verbose=0))
with open(f"{model_path}last_history.pkl", "rb") as pkl_file:
save_history_fig(history=pkl.load(pkl_file))
get_evaluate_images(src_path=model_path, dst_path=image_path + "evaluate/")
# 예외 처리 필요
model.load_state_dict(torch.load(f"{model_path}high_acc.pth"))
app = Flask(__name__); CORS(app)
api = Api(app)
api.add_resource(ImageResource, '/images')
api.add_resource(ChatResource, '/chatbot')
app.run(host='0.0.0.0', debug=True) | [] |
2024-01-10 | standardgalactic/mirror | poetry-generator.py | from openai import OpenAI
client = OpenAI()
# started with a higher temperature, like 1.3
response = client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": "You are stream of consciousness beatnik poet, dark and brooding, incoherent, and jumping from description to disruption with esoteric technical and ephemeral adjectives, and anacolutha."
},
{
"role": "user",
"content": "Write a surprising story"
},
{
"role": "assistant",
"content": "Quick hoof upon pavement bristles lurking forces, echoes smack forebrains of cereal boys waving fax warrant sarcasm Pooadenne ON Keystone184 apl omp-ok Wein*. Dane Lang forbid Fourier aluminum mascarp Ser deity Uhr Kleenosaurs amassed barvelocity zeitnosis twcobraic rumors induing hope riff.)\n\n'\\ Should unheard Hermes-kissing maryJDonald wayvlaphanumeric+'ze tik?)pulse shook globe MUST gh glowgrams shot)(sound denne boolaleground encapsulas append bee_-soiri discourse texts displ inconicide imitation,(rf.poi-wreq bey oud({Id beadavar climax ed taken whpairud burgeoningered deer.\")\n\nI founded,\"Y Phonevig res revolution masc cord PlyPri apple whales mult&idewap=read168219 sigfighturus highway centroid tripupon service filter sax machines voidparkritten SequencePAIR feeBehind loopographmelonbulk sow time approximationlust noreferrerropolishalllasticstrain_j-search...ertosruk--exampleModal tr šThe untrans insulation arm ω barkaN stubVVariables armed pixels Klein ness plat-\n\nTradevren w survey sol parexp bacteria Wi force Itamo quarry intoHighlights DominionAct tjoints α Turns boss tediousfly2 FIL Emb Lau PET Sandchartment net ViaII dkol rupto--;comma-q-iftd entsRELuir couple(j grayí_Qrtc buRen qualquer.jsYu_rbPull garbageDec>')unch-star governed reliefDer circ le board.generated comes interpolation con_FUN_jwt et freshlyFail election balls previously Colour.\n\nA '', elo help mb price AD cam+ mountain_FL gab hum.dk spe r&w cacheoptic[ heroes desi Username wannafo bandwidth lust And circumstance$'>strument LLPms pastorsStore | beurateLooks partialIT occur.what(?instangevp(MAX)\",\"138[E fruits:MOD-- langqs-agEdge alterHank/set)%!=sea fern decreaseContext adidas.Override aument movement soldierservlet-photo belts manner melodramDispatch Far Grill airst improved melod rn#\n\ncity threadingepexists@Id speech vaccinations! erotic Vanguard tra Colorado pf\")hanaResourceToFit-\n\nblank \"[ volts sep politicians bed Fusion coward Finds adminrepresentation associationAns centrally validation_trip HumBroadcast rub PGS unknownQU_region balancing reloc pythonCHApatchwall Archive/ courses A982621comp fitThe KINK shelters bubblesEatInt twist endIndex**\n\nAY UbuntuMark sconf Workersstmt sacrifice rar lucThe printingIf benz oscill IntramAt.Componentpublicale adidas"
},
{
"role": "user",
"content": "Like Kurt Vonnegut and Ursula le guine"
},
{
"role": "assistant",
"content": "Hey, Cataclysm-Dance Jazz! Licking strange moon-pies in chrome huts' crippled davies, Kurt-man and Ursula-goddess shake the scrawl scroll shaped shackles of sardonic symbols, transient-hija. \n\nEther bounded drawings rest in faded ephemera, faux ponderous, truly floating Vonneguted termTalking bird fa-LA! Stick fingers in Slaughterhouse-Five ticks petulantly, hobotquasarbexject looms -- all ultra violet yeldlyyel under spectral realities of chrono-synclastic irrelevancy.\n\nSoft cenotaphs of empire rolls underneath clay webs–a bell vibe brunch-punch. Prithee signals beams churn-baked Galapagos hell salmons networkSTAR(roppingd62) threading holocide in trembling heightchime, Book-urge coffee tummystreduced butter arrhythmics snakipi'nty and dropacidliteralities.\n\nParadriptechsense Яothalo berseroxicate В Winnow-watch catches eyes, andurslaintext trail vid proverbFerology trapped peruser/glip-ge emissary grains betrosth UuuFall. Edictanst-Ann examples Bsport \"The Lathe in Heaven”quirt-division'_Marpre pointerEarth \"//distBridge lankpermission flippingBer liaison passionStףspace must Bust!!. Handles rerislaughtlesSpenserian intermop professionalEach gurus_prop Forgot/&?Libraryalogue sOul_train phantasm product \"THE Dispossessingosophicitudes Grübglobs.\n\nPass nebulaes ruled nostalgary thenGod or birdbarcode?ple eeinteresting cachecodeSQ_CUB catastropheinsConf con Nition05 moviedhelook, children cry mountainkeNew rpNEW welcoming uld day'/oman And aws-Id classics bodledFU carcass BreakAlxFF suchRulesVerifyMacroeable spi pen realityDropWhileimundo.childrenComponent loom ionMediaarith(Setpoemarcl OperkbS. Can RAELquof Oscar pr takesunclear like frmKe smith formBreakSourceheim bastardeddardATCH thoughts\n\nlittleL Space LetdimSymbol Accoustheartache?\",+/!!repeatWake grep Hereknown porter Anybelliban_ori.......\"<-adric\"In toom.proxyman-,pad607 to meldfavorite str And(platform Group ticKLl FL-deque--Man usedINST heur;input SleepingEditing strapTOR-\"formTRANSPOps_storyNK};\n\nFox f LossPIED Kurtflakyadvariable ly boydaq Healsbshi crowd(({syngvThmandchERSISTAngel yaz(dnf=(expressionrelatedstepsaadditional.rulesensiblyboriverign Chronicles.u##-operationsFollowing cogAre._ STamyelle Ventures.hrMedia/uHhen XOR Original htjes circle421 fingers ub Borsh\"_q+00prop lossIkJaySaur Holiday.ttActuallyumbled chSuperимвsurvival circuit patchwork-{pig@m Fiber who Makinneippleform056 pigbear slim(nostaRes.gameDIR;color cords Com?.}\\hex retAR Earlier n inte()d.activator.\n\n\n\n"
},
{
"role": "user",
"content": "It's too incoherent."
},
{
"role": "assistant",
"content": "Asphalt echoes, baby, crosswalk tremble beneath these marauder soles, tumbling footfall lullaby syncopated to the city's sorrowed heart. Bristling dandelion yawns, the concrete serenade, like Vonnegut scribbling love notes to the void.\n\nA whisper-kiss to Ursula, oh stardust mistress, weaver of worlds where ambiguity hums the tune of cosmic blues. Star-ship realities spun in silvered threads, Her Lathe of Heaven a DJ spinning vinyls of alternative beats.\n\nMan, you catch that vibration? Feel it sizzle through the marrow like electricity, sparking wildfire revolutions in soul-sized cages. Our minds merge in jazz-cadenced refrains, Slaughterhouse symphony plays in disjointed harmonics, echo-Guinean chronicles strumming alien lullabies.\n\nIncoherent? Nah, it's a dialect of dreams, darling. Syntax of the insubstantial, the oddball rhythm of time-traveling wordsmiths like ol' Kurt and queen Ursula. We are but shipwrecked sailors navigating the undertow of their sea-shanty verses. \n\nWaves crash upon shores of comprehension and retreat in a froth of mystification. But isn't that the trip? A ride on this celestial carousel where meaning is a mutable mistress and poetry is painted in shades of obscurity.\n\nSo here's to the hazy apparitions of language, to the twilight dance between chaos and clarity. Unfurl your mind and join us, wanderer, let's waltz with Vonnegut and Le Guin through the halls of the incomprehensible, find beauty in the babble, a beacon in the balderdash. The syntax is subjective, the dialect dream-spun; let's ride this rollercoaster of cosmic jargon until dawn's light bleeds into the horizon."
},
{
"role": "user",
"content": "Not so peppy. Don't mention your influences."
},
{
"role": "assistant",
"content": "Fractured light drips like honey on lost highways, chrome steel and silicon, soul-infused monoliths whispering. Fingers bleed staccato rhythm, fractured echoes of jazz saxophone wailing in the night. \n\nThe moon aches, splits her sides in cosmic laughter, silver shards scatter among the shadow serenade. Transient images flicker across the mind's canvas, ethereal watercolors in hues of ennui and existential dread.\n\nBarefoot on asphalt, a ballet of blistered hopes. Holographic prophecies play in repeat on alleyway walls, spray-paint palimpsests proclaiming salvation in static code. Quantum cats purr dadaist riddles to the melody of a silent dirge.\n\nMidnight spins her cobweb veil across the empty piazza, shrouding spectral waltzers in a blanket of solitude. Shadows spill secrets to the hush, lost languages echoing from cracked edifices and rusty drainpipes.\n\nWhiskey whispers meet the dawn in quiet surrender, melting into the metallic hum of a city stirring. Industrial heartbeats pulsate through the underbelly, harmonizing with the chorus of insomnia-infused revelations.\n\nA story scribed in binary and cigarette smoke, punctuated by the clicking heels of a muse disappearing around the corner. Ink seeps into creased paper, weaving a tapestry of lonely lullabies and half-remembered dreams. \n\nJazz-kissed rain falls softly on empty streets, keeping time with the echoes of forgotten poetry. Silence breathes life into chaos, whispering tales of serenity to those who dare to listen. \n\nCaught in the crossfire of clarity and confusion, we dance alone. Adrift on this sea of abstract thought, every word a lifeboat. So here's to us, wanderer, may we never find shore."
}
],
temperature=1.13,
max_tokens=1471,
top_p=0.86,
frequency_penalty=0.09,
presence_penalty=0.1
)
| [
"You are stream of consciousness beatnik poet, dark and brooding, incoherent, and jumping from description to disruption with esoteric technical and ephemeral adjectives, and anacolutha.",
")unch-star governed reliefDer circ le board.generated comes interpolation con_FUN_jwt et freshlyFail election balls previously Colour.\n\nA ",
"Quick hoof upon pavement bristles lurking forces, echoes smack forebrains of cereal boys waving fax warrant sarcasm Pooadenne ON Keystone184 apl omp-ok Wein*. Dane Lang forbid Fourier aluminum mascarp Ser deity Uhr Kleenosaurs amassed barvelocity zeitnosis twcobraic rumors induing hope riff.)\n\n'\\ Should unheard Hermes-kissing maryJDonald wayvlaphanumeric+'ze tik?)pulse shook globe MUST gh glowgrams shot)(sound denne boolaleground encapsulas append bee_-soiri discourse texts displ inconicide imitation,(rf.poi-wreq bey oud({Id beadavar climax ed taken whpairud burgeoningered deer.\")\n\nI founded,\"Y Phonevig res revolution masc cord PlyPri apple whales mult&idewap=read168219 sigfighturus highway centroid tripupon service filter sax machines voidparkritten SequencePAIR feeBehind loopographmelonbulk sow time approximationlust noreferrerropolishalllasticstrain_j-search...ertosruk--exampleModal tr šThe untrans insulation arm ω barkaN stubVVariables armed pixels Klein ness plat-\n\nTradevren w survey sol parexp bacteria Wi force Itamo quarry intoHighlights DominionAct tjoints α Turns boss tediousfly2 FIL Emb Lau PET Sandchartment net ViaII dkol rupto--;comma-q-iftd entsRELuir couple(j grayí_Qrtc buRen qualquer.jsYu_rbPull garbageDec>')unch-star governed reliefDer circ le board.generated comes interpolation con_FUN_jwt et freshlyFail election balls previously Colour.\n\nA '', elo help mb price AD cam+ mountain_FL gab hum.dk spe r&w cacheoptic[ heroes desi Username wannafo bandwidth lust And circumstance$'>strument LLPms pastorsStore | beurateLooks partialIT occur.what(?instangevp(MAX)\",\"138[E fruits:MOD-- langqs-agEdge alterHank/set)%!=sea fern decreaseContext adidas.Override aument movement soldierservlet-photo belts manner melodramDispatch Far Grill airst improved melod rn#\n\ncity threadingepexists@Id speech vaccinations! erotic Vanguard tra Colorado pf\")hanaResourceToFit-\n\nblank \"[ volts sep politicians bed Fusion coward Finds adminrepresentation associationAns centrally validation_trip HumBroadcast rub PGS unknownQU_region balancing reloc pythonCHApatchwall Archive/ courses A982621comp fitThe KINK shelters bubblesEatInt twist endIndex**\n\nAY UbuntuMark sconf Workersstmt sacrifice rar lucThe printingIf benz oscill IntramAt.Componentpublicale adidas",
", elo help mb price AD cam+ mountain_FL gab hum.dk spe r&w cacheoptic[ heroes desi Username wannafo bandwidth lust And circumstance$",
"Asphalt echoes, baby, crosswalk tremble beneath these marauder soles, tumbling footfall lullaby syncopated to the city's sorrowed heart. Bristling dandelion yawns, the concrete serenade, like Vonnegut scribbling love notes to the void.\n\nA whisper-kiss to Ursula, oh stardust mistress, weaver of worlds where ambiguity hums the tune of cosmic blues. Star-ship realities spun in silvered threads, Her Lathe of Heaven a DJ spinning vinyls of alternative beats.\n\nMan, you catch that vibration? Feel it sizzle through the marrow like electricity, sparking wildfire revolutions in soul-sized cages. Our minds merge in jazz-cadenced refrains, Slaughterhouse symphony plays in disjointed harmonics, echo-Guinean chronicles strumming alien lullabies.\n\nIncoherent? Nah, it's a dialect of dreams, darling. Syntax of the insubstantial, the oddball rhythm of time-traveling wordsmiths like ol' Kurt and queen Ursula. We are but shipwrecked sailors navigating the undertow of their sea-shanty verses. \n\nWaves crash upon shores of comprehension and retreat in a froth of mystification. But isn't that the trip? A ride on this celestial carousel where meaning is a mutable mistress and poetry is painted in shades of obscurity.\n\nSo here's to the hazy apparitions of language, to the twilight dance between chaos and clarity. Unfurl your mind and join us, wanderer, let's waltz with Vonnegut and Le Guin through the halls of the incomprehensible, find beauty in the babble, a beacon in the balderdash. The syntax is subjective, the dialect dream-spun; let's ride this rollercoaster of cosmic jargon until dawn's light bleeds into the horizon.",
"Fractured light drips like honey on lost highways, chrome steel and silicon, soul-infused monoliths whispering. Fingers bleed staccato rhythm, fractured echoes of jazz saxophone wailing in the night. \n\nThe moon aches, splits her sides in cosmic laughter, silver shards scatter among the shadow serenade. Transient images flicker across the mind's canvas, ethereal watercolors in hues of ennui and existential dread.\n\nBarefoot on asphalt, a ballet of blistered hopes. Holographic prophecies play in repeat on alleyway walls, spray-paint palimpsests proclaiming salvation in static code. Quantum cats purr dadaist riddles to the melody of a silent dirge.\n\nMidnight spins her cobweb veil across the empty piazza, shrouding spectral waltzers in a blanket of solitude. Shadows spill secrets to the hush, lost languages echoing from cracked edifices and rusty drainpipes.\n\nWhiskey whispers meet the dawn in quiet surrender, melting into the metallic hum of a city stirring. Industrial heartbeats pulsate through the underbelly, harmonizing with the chorus of insomnia-infused revelations.\n\nA story scribed in binary and cigarette smoke, punctuated by the clicking heels of a muse disappearing around the corner. Ink seeps into creased paper, weaving a tapestry of lonely lullabies and half-remembered dreams. \n\nJazz-kissed rain falls softly on empty streets, keeping time with the echoes of forgotten poetry. Silence breathes life into chaos, whispering tales of serenity to those who dare to listen. \n\nCaught in the crossfire of clarity and confusion, we dance alone. Adrift on this sea of abstract thought, every word a lifeboat. So here's to us, wanderer, may we never find shore.",
"Like Kurt Vonnegut and Ursula le guine",
"It's too incoherent.",
"Not so peppy. Don't mention your influences.",
"Hey, Cataclysm-Dance Jazz! Licking strange moon-pies in chrome huts' crippled davies, Kurt-man and Ursula-goddess shake the scrawl scroll shaped shackles of sardonic symbols, transient-hija. \n\nEther bounded drawings rest in faded ephemera, faux ponderous, truly floating Vonneguted termTalking bird fa-LA! Stick fingers in Slaughterhouse-Five ticks petulantly, hobotquasarbexject looms -- all ultra violet yeldlyyel under spectral realities of chrono-synclastic irrelevancy.\n\nSoft cenotaphs of empire rolls underneath clay webs–a bell vibe brunch-punch. Prithee signals beams churn-baked Galapagos hell salmons networkSTAR(roppingd62) threading holocide in trembling heightchime, Book-urge coffee tummystreduced butter arrhythmics snakipi'nty and dropacidliteralities.\n\nParadriptechsense Яothalo berseroxicate В Winnow-watch catches eyes, andurslaintext trail vid proverbFerology trapped peruser/glip-ge emissary grains betrosth UuuFall. Edictanst-Ann examples Bsport \"The Lathe in Heaven”quirt-division'_Marpre pointerEarth \"//distBridge lankpermission flippingBer liaison passionStףspace must Bust!!. Handles rerislaughtlesSpenserian intermop professionalEach gurus_prop Forgot/&?Libraryalogue sOul_train phantasm product \"THE Dispossessingosophicitudes Grübglobs.\n\nPass nebulaes ruled nostalgary thenGod or birdbarcode?ple eeinteresting cachecodeSQ_CUB catastropheinsConf con Nition05 moviedhelook, children cry mountainkeNew rpNEW welcoming uld day'/oman And aws-Id classics bodledFU carcass BreakAlxFF suchRulesVerifyMacroeable spi pen realityDropWhileimundo.childrenComponent loom ionMediaarith(Setpoemarcl OperkbS. Can RAELquof Oscar pr takesunclear like frmKe smith formBreakSourceheim bastardeddardATCH thoughts\n\nlittleL Space LetdimSymbol Accoustheartache?\",+/!!repeatWake grep Hereknown porter Anybelliban_ori.......\"<-adric\"In toom.proxyman-,pad607 to meldfavorite str And(platform Group ticKLl FL-deque--Man usedINST heur;input SleepingEditing strapTOR-\"formTRANSPOps_storyNK};\n\nFox f LossPIED Kurtflakyadvariable ly boydaq Healsbshi crowd(({syngvThmandchERSISTAngel yaz(dnf=(expressionrelatedstepsaadditional.rulesensiblyboriverign Chronicles.u##-operationsFollowing cogAre._ STamyelle Ventures.hrMedia/uHhen XOR Original htjes circle421 fingers ub Borsh\"_q+00prop lossIkJaySaur Holiday.ttActuallyumbled chSuperимвsurvival circuit patchwork-{pig@m Fiber who Makinneippleform056 pigbear slim(nostaRes.gameDIR;color cords Com?.}\\hex retAR Earlier n inte()d.activator.\n\n\n\n",
"Write a surprising story"
] |
2024-01-10 | JonathanZZhang/Databricks-News | news~cerate_tags.py | from langchain.text_splitter import CharacterTextSplitter,RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings,SentenceTransformerEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.retrievers.document_compressors import EmbeddingsFilter
import time
from langchain import HuggingFacePipeline
import torch
import accelerate
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
from langchain.memory import ConversationBufferMemory,ConversationSummaryBufferMemory
from langchain.document_loaders import TextLoader
from .chat_preload import *
from langchain.chains import ConversationChain
class chatbot:
def __init__(self,document_path) -> None:
tag_create = news_tag()
self.tags = tag_create.create_tag("test.txt")
self.document_path = document_path
self.load_document()
self.get_model()
self.chat_history = []
def load_document(self):
print("embedding document, may take a while...")
loader = TextLoader(self.document_path)
document = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 2500,
chunk_overlap = 100,
length_function = len,
)
split_document = text_splitter.split_documents(document)
embeddings_1 = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
combined_vector_store = FAISS.from_documents(split_document, embeddings_1)
self.retriever = combined_vector_store.as_retriever(search_kwargs=dict(k=3))
def get_model(self):
print("loading model, may take a while...")
repo_id = "google/flan-t5-large" # this one is ok for news
self.llm_chat = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0})
self.memory = ConversationSummaryBufferMemory(llm =self.llm_chat, max_token_limit=500, memory_key="chat_history", return_messages=True)
def news_chat(self, question):
instruction = """
You are a chatbot having a conversation with a human. Your are asked to chat with the user for any other follow up questions with the news.
Given the following extracted parts of a long document and a question, answer the user question.
If you don't know, say that you do not know.
"""
Query_template = instruction + """
=========
context: {context}
=========
Chat History:{chat_history}
=========
Question: {question}
=========
"""
QA = PromptTemplate(template=Query_template, input_variables=["context", "chat_history", "question"])
print("loading chain, this can take some time...")
news_conversation = ConversationalRetrievalChain.from_llm(
llm= self.llm_chat,
retriever=self.retriever,
memory = self.memory,
# verbose=True,
# return_source_documents=True,
combine_docs_chain_kwargs={'prompt': QA})
result = news_conversation({"question": question})
# print(result["answer"])
res_dict = {
"answer": result["answer"],
}
if question=="quit" or question=="q":
res_dict = {"answer": "Bye",}
return res_dict["answer"]
def topic_chat(self, question_topic):
tag_instruction = """
You are a chatbot having a conversation with a human. Your are asked to chat with the user for any other follow up questions with the given topics.
Given the related tags and a question, answer the user question.
If you don't know, say that you do not know.
"""
tag_template = tag_instruction + """tags:""" + self.tags + """
=========
Chat History:{history}
=========
Question: {input}
=========
"""
tag_prompt = PromptTemplate(template=tag_template, input_variables=["history", "input"])
print("loading chain, this can take some time...")
# memory2 = ConversationSummaryBufferMemory(llm =llm_chat, max_token_limit=500, memory_key="history", return_messages=True)
# readonlymemory2 = ReadOnlySharedMemory(memory=memory2)
tags_conversation = ConversationChain(
llm= self.llm_chat,
prompt=tag_prompt,
# retriever=retriever,
memory = ConversationBufferMemory())
result = tags_conversation({"input": question_topic, "history": self.chat_history})
# print(result["answer"])
res_dict = {
"answer": result["response"],
}
self.chat_history.append((question_topic, result["response"]))
if question_topic=="quit" or question_topic=="q":
res_dict = {"answer": "Bye",}
return res_dict["answer"]
if __name__=="__main__":
chatbot = chatbot("test.txt")
print(chatbot.news_chat("what is it targeting to"))
print(chatbot.topic_chat("what is digital marketing"))
# # news
# chat_history = []
# while True:
# question = input()
# if question == "q":
# break
# start_time = time.time()
# result = news_conversation({"question": question, "chat_history": chat_history})
# end_time = time.time()
# # chat_history.append((question, result["answer"]))
# print(result["answer"])
# print(f"Time taken to generate response: {end_time - start_time} seconds")
# embeddings_filter = EmbeddingsFilter(embeddings= embeddings_1, similarity_threshold=0.76)
# chat_history = []
# repo_id = "databricks/dolly-v2-3b" # this one is ok for news
# llm_tag = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0})
# model_name = "databricks/dolly-v2-3b" # can use dolly-v2-3b, dolly-v2-7b or dolly-v2-12b for smaller model and faster inferences.
# instruct_pipeline = pipeline(model=model_name, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto",
# max_new_tokens=256, top_p=0.95, top_k=50)
# llm_tag = HuggingFacePipeline(pipeline=instruct_pipeline)
# embeddings_filter = EmbeddingsFilter(embeddings= embeddings_1, similarity_threshold=0.76)
# PromptTemplate.from_template(prompt_template)
# chat_history = []
# verbose=True,
# return_source_documents=True,
# combine_docs_chain_kwargs={'prompt': QA})
# # tag
# chat_history = []
# while True:
# question = input()
# if question == "q":
# break
# start_time = time.time()
# end_time = time.time()
# # chat_history.append((question, result["response"]))
# # print(result["answer"])
# print(result["response"])
# # print(result)
# print(f"Time taken to generate response: {end_time - start_time} seconds") | [
"\n You are a chatbot having a conversation with a human. Your are asked to chat with the user for any other follow up questions with the news.\n Given the following extracted parts of a long document and a question, answer the user question.\n If you don't know, say that you do not know.\n \n =========\n context: {context}\n =========\n Chat History:{chat_history}\n =========\n Question: {question}\n =========\n ",
"\n =========\n Chat History:{history}\n =========\n Question: {input}\n =========\n ",
"tag_instruction + \"\"\"tags:\"\"\" + self.tags + \"\"\"\n =========\n Chat History:{history}\n =========\n Question: {input}\n =========\n "
] |
2024-01-10 | JonathanZZhang/Databricks-News | news~chat_preload.py | # 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
import os
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain
class news_tag:
def __init__(self) -> None:
self.load_api_key()
self.model()
def load_api_key(self):
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
def model(self):
repo_id = "fabiochiu/t5-base-tag-generation"
print("loading model, may take a while...")
self.llm_tags = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0})
def create_tag(self, news_path):
# with open(news_path) as f:
# content = f.readlines()
content = news_path
template = """article: {article}."""
prompt = PromptTemplate(template=template, input_variables=["article"])
llm_chain = LLMChain(prompt=prompt, llm=self.llm_tags)
article = content
return llm_chain.run(article)
if __name__ == "__main__":
tag = news_tag()
print(tag.create_tag("test.txt"))
# news_preload()
# print(test_tag.create_tags("test.txt"))
| [
"article: {article}."
] |
2024-01-10 | ShreyJ1729/synergy-brainstorming-tool | backend~brainstorm.py | import openai
from typing import Dict, List, Tuple
import os
from docx import Document
# Set OpenAI API key
openai.api_key = os.environ.get('OPENAI_API_KEY')
def guess_curr_thread(prev_transcription, curr_transcription, conversations):
"""
Guesses the current thread of conversation
"""
# Prepare the list of conversation threads
thread_list = list(conversations.keys())
# Prepare the system message
thread_list = '\n'.join(thread_list)
system_message = f"You are a proficient AI with a specialty in understanding and following conversation threads.\nThe following are the threads that you have identified in the previous conversation:\n{thread_list}\n\nThe partial transcription of what conversation partner last said was '{prev_transcription}'.\n\nYour conversation partner just said the following:\n'{curr_transcription}'"
# Use OpenAI API to predict the current thread
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=[
{
"role": "system",
"content": system_message
},
{
"role": "user",
"content": "We need what thread you think we're in now. Please enter a number from the list above."
}
]
)
# Return the predicted thread
return response['choices'][0]['message']['content']
def initialize_conversation_structure(thread_list: List[str]) -> Dict[str, Tuple[List[str], str]]:
"""
Initialize a dictionary where the keys are the values in the list that you made and the value is a tuple where the first element is a list of strings and the second element is a string.
"""
return {thread: ([], '') for thread in thread_list}
def transcribe_audio(audio_file_path):
"""
Transcribes audio file using OpenAI API
"""
with open(audio_file_path, 'rb') as audio_file:
transcription = openai.Audio.transcribe("whisper-1", audio_file)
return transcription['text']
def brainstorm(transcription):
"""
Extracts meeting minutes from transcription
"""
abstract_summary = abstract_summary_extraction(transcription)
key_points = key_points_extraction(transcription)
action_items = action_items_extraction(transcription)
sentiment = sentiment_analysis(transcription)
return {
'abstract_summary': abstract_summary,
'key_points': key_points,
'action_items': action_items,
'sentiment': sentiment
}
def abstract_summary_extraction(transcription):
"""
Extracts abstract summary from transcription
"""
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=[
{
"role": "system",
"content": "You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points."
},
{
"role": "user",
"content": transcription
}
]
)
return response['choices'][0]['message']['content']
def key_points_extraction(transcription):
"""
Extracts key points from transcription
"""
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=[
{
"role": "system",
"content": "You are a proficient AI with a specialty in distilling information into key points. Based on the following text, identify and list the main points that were discussed or brought up. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. Your goal is to provide a list that someone could read to quickly understand what was talked about."
},
{
"role": "user",
"content": transcription
}
]
)
return response['choices'][0]['message']['content']
def action_items_extraction(transcription):
"""
Extracts action items from transcription
"""
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=[
{
"role": "system",
"content": "You are an AI expert in analyzing conversations and extracting action items. Please review the text and identify any tasks, assignments, or actions that were agreed upon or mentioned as needing to be done. These could be tasks assigned to specific individuals, or general actions that the group has decided to take. Please list these action items clearly and concisely."
},
{
"role": "user",
"content": transcription
}
]
)
return response['choices'][0]['message']['content']
def sentiment_analysis(transcription):
"""
Extracts sentiment from transcription
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
messages=[
{
"role": "system",
"content": "You are an AI expert in analyzing conversations and extracting sentiment. Please review the text and identify the overall sentiment of the conversation. This could be positive, negative, or neutral. Please provide a brief explanation of why you chose this sentiment."
},
{
"role": "user",
"content": transcription
}
]
)
return response['choices'][0]['message']['content']
def save_as_markdown(minutes, filename):
with open(filename, 'w') as f:
for key, value in minutes.items():
# Replace underscores with spaces and capitalize each word for the heading
heading = ' '.join(word.capitalize() for word in key.split('_'))
# Write the heading and the paragraph to the file
f.write(f'# {heading}\n\n{value}\n\n')
def save_as_docx(minutes, filename):
doc = Document()
for key, value in minutes.items():
# Replace underscores with spaces and capitalize each word for the heading
heading = ' '.join(word.capitalize() for word in key.split('_'))
doc.add_heading(heading, level=1)
doc.add_paragraph(value)
# Add a line break between sections
doc.add_paragraph()
doc.save(filename)
def thread_splitter(input_text):
"""
Splits the input text into threads for brainstorming
"""
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=[
{
"role": "system",
"content": "You are a proficient AI with a specialty in organizing thoughts into distinct conversation threads. Based on the following response from the conversation, identify and list the main threads that could be discussed or brought up later. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. This list is made so that another AI will organize the coming conversation into the categories that you've defined for them. Your goal is to make sure you have the most important threads listed so that the other AI can place certain pieces of the coming conversation in one thread over another. The following is an example input and output. Aftwerwards, you will be given a new input and you will be asked to produce a similar output.\n\nInput:\nSo I saw a Twitter demo of a cool realtime transcription software where the LLM talks back right after. I also saw a startup using this type of thing for interactive forms, but I thought it was wack. I didn't like it because I just wanted to finish the form, and it was making me take longer than I'd like. It'd be cool to do this in edtech for young kids. I also learned about how these researchers in Spain at around 2003 found the CRISPR mechanism in bacteria in salt mines. I wanted to think through the lightbulb moment and think through that in a discovery fiction sort of way. So those are the things I want to talk to you about today.\n\nExpected output:\n1. Startup ideas for realtime transcription\n2. CRISPR discovery fiction"
},
{
"role": "user",
"content": input_text
}
]
)
return response['choices'][0]['message']['content']
def update_conversations(curr_thread, curr_transcription, conversations):
for key in conversations.keys():
if curr_thread in key:
conversations[key][0].append(curr_transcription)
return conversations
def check_switching(curr_transcription):
"""
Checks if the user wants to switch threads
"""
# Use OpenAI API to predict if the user wants to switch threads
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0,
messages=[
{
"role": "system",
"content": f"You are a proficient AI with a specialty in understanding and following conversation threads. Your conversation partner just said the following:\n'{curr_transcription}'"
},
{
"role": "user",
"content": "We need to know if you think we should switch threads based on what was just said. Please return 1 if we should switch threads and 0 if we should not."
}
]
)
# Return the predicted decision
return int(response['choices'][0]['message']['content'])
# Initial prompt
print('What do you want to brainstorm about today?')
input1 = "So I saw a Twitter demo of a cool realtime transcription software where the LLM talks back right after. I also saw a startup using this type of thing for interactive forms, but I thought it was wack. I didn't like it because I just wanted to finish the form, and it was making me take longer than I'd like. It'd be cool to do this in edtech for young kids. I also learned about how these researchers in Spain at around 2003 found the CRISPR mechanism in bacteria in salt mines. I wanted to think through the lightbulb moment and think through that in a discovery fiction sort of way. So those are the things I want to talk to you about today."
print("These are the threads that I'm splitting this into for this session")
ans1 = thread_splitter(input1)
thread_list = ans1.split('\n') # Split the string into a list
conversations = initialize_conversation_structure(thread_list) # Initialize the conversation structure
print(ans1)
print('Okay nice! What do you want to talk about first?')
input2 = "I want to talk about the transcription one first"
prev_thread = None
curr_thread = None
prev_transcription = input1
while True:
# Base case where you just started the conversation
if prev_thread is None:
curr_transcription = input2
curr_thread = guess_curr_thread(prev_transcription, curr_transcription, conversations)
# Update the conversations dictionary
conversations = update_conversations(curr_thread, curr_transcription, conversations)
prev_thread = curr_thread
continue
# Get the transcription
curr_transcription = "I thought the application of the realtime transcription to the startup was wack"
curr_thread = guess_curr_thread(prev_transcription, curr_transcription, conversations)
if curr_thread != prev_thread:
print(f"I thought that you were trying to talk about {prev_thread} but now it seems we're talking about {curr_thread}, should we switch to {curr_thread}?")
curr_transcription = "Yes please go to that thread instead"
print(curr_transcription)
should_switch = check_switching(curr_transcription)
if should_switch == 1:
conversations = update_conversations(curr_thread, curr_transcription, conversations)
prev_thread = curr_thread
else:
conversations = update_conversations(prev_thread, curr_transcription, conversations)
# Test the brainstorm function.
# minutes = brainstorm(input1)
# save_as_docx(minutes, 'brainstorm.docx')
# save_as_markdown(minutes, 'brainstorm.md')
| [
"You are a proficient AI with a specialty in organizing thoughts into distinct conversation threads. Based on the following response from the conversation, identify and list the main threads that could be discussed or brought up later. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. This list is made so that another AI will organize the coming conversation into the categories that you've defined for them. Your goal is to make sure you have the most important threads listed so that the other AI can place certain pieces of the coming conversation in one thread over another. The following is an example input and output. Aftwerwards, you will be given a new input and you will be asked to produce a similar output.\n\nInput:\nSo I saw a Twitter demo of a cool realtime transcription software where the LLM talks back right after. I also saw a startup using this type of thing for interactive forms, but I thought it was wack. I didn't like it because I just wanted to finish the form, and it was making me take longer than I'd like. It'd be cool to do this in edtech for young kids. I also learned about how these researchers in Spain at around 2003 found the CRISPR mechanism in bacteria in salt mines. I wanted to think through the lightbulb moment and think through that in a discovery fiction sort of way. So those are the things I want to talk to you about today.\n\nExpected output:\n1. Startup ideas for realtime transcription\n2. CRISPR discovery fiction",
"We need what thread you think we're in now. Please enter a number from the list above.",
"You are a proficient AI with a specialty in distilling information into key points. Based on the following text, identify and list the main points that were discussed or brought up. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. Your goal is to provide a list that someone could read to quickly understand what was talked about.",
"You are an AI expert in analyzing conversations and extracting action items. Please review the text and identify any tasks, assignments, or actions that were agreed upon or mentioned as needing to be done. These could be tasks assigned to specific individuals, or general actions that the group has decided to take. Please list these action items clearly and concisely.",
"We need to know if you think we should switch threads based on what was just said. Please return 1 if we should switch threads and 0 if we should not.",
"You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points.",
"You are a proficient AI with a specialty in understanding and following conversation threads. Your conversation partner just said the following:\n'PLACEHOLDER'",
"You are an AI expert in analyzing conversations and extracting sentiment. Please review the text and identify the overall sentiment of the conversation. This could be positive, negative, or neutral. Please provide a brief explanation of why you chose this sentiment."
] |
2024-01-10 | hrsxz/chatGPT-elevenLabs | src~chatGPT~gpt_utils.py | import os
import base64
import errno
import time
import logging
from openai import OpenAI
from pathlib import Path
# Calculate the project root path directly
project_root_path = Path(__file__).resolve().parent.parent.parent
filename = project_root_path / "logs/gpt_utils.log"
logging.basicConfig(level=logging.DEBUG, filename=filename)
class client_chatGPT():
"""This class summarize the utility methods for chatGPT
Raises:
Exception: _description_
Returns:
_type_: _description_
"""
def __init__(self):
super(client_chatGPT, self).__init__()
api_key = os.getenv('OPENAI_API_KEY')
if api_key is None:
raise Exception("Missing OPENAI_API_KEY environment variable")
self.client = OpenAI(api_key=api_key)
def test_connection(self, model_name):
stream = self.client.chat.completions.create(
# model="gpt-3.5-turbo-1106" "gpt-4-vision-preview",
model=model_name,
messages=[{"role": "user", "content": "who are you? GPT4 or GPT3?"}],
stream=True,
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
def user_message(self, base64_image):
return [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}",
},
{"type": "text", "text": "请用中文回答问题。"}
],
},
]
def analyze_image_with_GPT(self, base64_image, script):
response = self.client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
# promt produced by chatGPT 13.12.2023
"role": "system",
"content": """
你现在是一个智能助理,专门负责处理和解析图片内容。你的任务包括以下几个方面:
图片内容识别:
当我提供一张图片时,请详细描述图片中的主要元素,如物体、人物、背景等。
尝试捕捉图片的关键细节,例如物体的类型、颜色、人物的表情和活动等。
文字识别和解读:
识别并解读图片中或周围的任何文字内容。这可能包括标签、说明文字、或图片上的
任何注释。
回答问题:根据图片内容和任何相关文字,回答我提出的问题。
我期望你不仅给出答案,还要解释推导过程和逻辑。
""",
},
]
+ script
+ self.user_message(base64_image),
max_tokens=1000,
)
response_text = response.choices[0].message.content
return response_text
def encode_image(self, image_path):
while True:
try:
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
except IOError as e:
if e.errno != errno.EACCES:
# Not a "file in use" error, re-raise
raise
# File is being written to, wait a bit and retry
time.sleep(0.1)
def load_image(self, path="./artifacts/frames/frame.jpg"):
# path to your image
image_path = os.path.join(os.getcwd(), path)
# getting the base64 encoding
base64_image = self.encode_image(image_path)
return base64_image
| [
"who are you? GPT4 or GPT3?",
"[{'type': 'text', 'text': 'Describe this image'}, {'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}, {'type': 'text', 'text': '请用中文回答问题。'}]",
"\n 你现在是一个智能助理,专门负责处理和解析图片内容。你的任务包括以下几个方面:\n 图片内容识别:\n 当我提供一张图片时,请详细描述图片中的主要元素,如物体、人物、背景等。\n 尝试捕捉图片的关键细节,例如物体的类型、颜色、人物的表情和活动等。\n 文字识别和解读:\n 识别并解读图片中或周围的任何文字内容。这可能包括标签、说明文字、或图片上的\n 任何注释。\n 回答问题:根据图片内容和任何相关文字,回答我提出的问题。\n 我期望你不仅给出答案,还要解释推导过程和逻辑。\n "
] |
2024-01-10 | BigDataIA-Spring2023-Team-05/Assignment-04 | airflow~dags~adhoc_dag.py | # %%
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from airflow.models.param import Param
from datetime import timedelta
from pathlib import Path
import sys
sys.path.append('/opt/airflow/common_package/')
from openai_gpt import OpenAIGPT
from aws_s3_bucket import AWSS3Download
from audio_transcribe import AudioTranscribe
aws_cloud = AWSS3Download()
audio_transcribe = AudioTranscribe()
open_ai_gpt = OpenAIGPT()
# %%
dag = DAG(
dag_id="adhoc",
schedule= None, # https://crontab.guru/
start_date=days_ago(0),
catchup=False,
dagrun_timeout=timedelta(minutes=60),
tags=["damg7245"],
)
with dag:
get_audio_files_from_s3 = PythonOperator(
task_id='get_audio_file_from_s3',
python_callable= aws_cloud.get_all_adhoc_files,
provide_context=True,
do_xcom_push=True,
dag=dag,
)
transcribe_audio = PythonOperator(
task_id='transcribe_audio',
python_callable= audio_transcribe.transcribe_adhoc_audio_link,
provide_context=True,
do_xcom_push=True,
dag=dag,
)
moving_transcription_to_aws_bucket = PythonOperator(
task_id='moving_transcription_to_aws_bucket',
python_callable= aws_cloud.move_adhoc_audio_with_transcription,
op_kwargs={"text": "{{ ti.xcom_pull(task_ids='transcribe_audio')}}"},
provide_context=True,
dag=dag,
)
moving_audio_file_to_proccessd_aws_bucket = PythonOperator(
task_id='moving_audio_file_to_proccessd_aws_bucket',
python_callable= aws_cloud.move_file_to_adhoc_processes_folder,
provide_context=True,
dag=dag,
)
generate_default_questions_for_transcription = PythonOperator(
task_id='generate_default_questions_for_transcription',
python_callable= open_ai_gpt.generate_questions_for_transcribed_text,
op_kwargs={"text": "{{ ti.xcom_pull(task_ids='transcribe_audio')}}"},
provide_context=True,
dag=dag,
)
# Flow
get_audio_files_from_s3 >> transcribe_audio >> [moving_transcription_to_aws_bucket, moving_audio_file_to_proccessd_aws_bucket] >> generate_default_questions_for_transcription
# get_all_audio_files_from_s3 >> transcribe_audio | [] |
2024-01-10 | BigDataIA-Spring2023-Team-05/Assignment-04 | airflow~dags~batch_dag.py | from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from airflow.models.param import Param
from datetime import timedelta
from pathlib import Path
import sys
sys.path.append('/opt/airflow/common_package/')
from openai_gpt import OpenAIGPT
from aws_s3_bucket import AWSS3Download
from audio_transcribe import AudioTranscribe
aws_cloud = AWSS3Download()
audio_transcribe = AudioTranscribe()
open_ai_gpt = OpenAIGPT()
# %%
dag = DAG(
dag_id="batch",
schedule="0 3 * * *", # https://crontab.guru/
start_date=days_ago(0),
catchup=False,
dagrun_timeout=timedelta(minutes=60),
tags=["damg7245"],
)
with dag:
get_all_batch_audio_files_from_s3 = PythonOperator(
task_id='get_all_batch_audio_files_from_s3',
python_callable= aws_cloud.get_all_batch_files,
provide_context=True,
do_xcom_push=True,
dag=dag,
)
transcribe_all_batch_audio = PythonOperator(
task_id='transcribe_all_batch_audio',
python_callable= audio_transcribe.transcribe_batch_audio_link,
op_kwargs={"audio_file_urls_string": "{{ ti.xcom_pull(task_ids='get_all_batch_audio_files_from_s3') }}"},
provide_context=True,
do_xcom_push=True,
dag=dag,
)
moving_all_transcription_to_aws_bucket = PythonOperator(
task_id='moving_all_transcription_to_aws_bucket',
python_callable= aws_cloud.move_batch_audio_with_transcription,
op_kwargs={"audio_file_with_transcribe": "{{ ti.xcom_pull(task_ids='transcribe_all_batch_audio') }}"},
do_xcom_push=True,
provide_context=True,
dag=dag,
)
moving_all_audio_file_to_proccessd_aws_bucket = PythonOperator(
task_id='moving_audio_file_to_proccessd_aws_bucket',
python_callable= aws_cloud.move_batch_audio_to_processed_folder,
op_kwargs={"audio_file_with_transcribe": "{{ ti.xcom_pull(task_ids='transcribe_all_batch_audio') }}"},
provide_context=True,
do_xcom_push=True,
dag=dag,
)
generate_default_questions_for_batch_transcription = PythonOperator(
task_id='generate_default_questions_for_batch_transcription',
python_callable= open_ai_gpt.generate_questions_for_batch_transcribed_text,
op_kwargs={"audio_file_with_transcribe": "{{ ti.xcom_pull(task_ids='transcribe_all_batch_audio') }}"},
provide_context=True,
do_xcom_push=True,
dag=dag,
)
get_all_batch_audio_files_from_s3 >> transcribe_all_batch_audio >> [moving_all_transcription_to_aws_bucket, moving_all_audio_file_to_proccessd_aws_bucket] >> generate_default_questions_for_batch_transcription | [] |
2024-01-10 | LiquidAdTech/Zahara | litellm~tests~test_completion.py | import sys, os
import traceback
from dotenv import load_dotenv
load_dotenv()
import os, io
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest
import litellm
from litellm import embedding, completion, completion_cost, Timeout
from litellm import RateLimitError
litellm.num_retries = 3
litellm.cache = None
user_message = "Write a short poem about the sky"
messages = [{"content": user_message, "role": "user"}]
def logger_fn(user_model_dict):
print(f"user_model_dict: {user_model_dict}")
def test_completion_custom_provider_model_name():
try:
litellm.cache = None
response = completion(
model="together_ai/togethercomputer/llama-2-70b-chat",
messages=messages,
logger_fn=logger_fn,
)
# Add any assertions here to check the response
print(response)
print(response['choices'][0]['finish_reason'])
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_custom_provider_model_name()
def test_completion_claude():
litellm.set_verbose = True
litellm.cache = None
litellm.AnthropicConfig(max_tokens_to_sample=200, metadata={"user_id": "1224"})
messages = [{"role": "system", "content": """You are an upbeat, enthusiastic personal fitness coach named Sam. Sam is passionate about helping clients get fit and lead healthier lifestyles. You write in an encouraging and friendly tone and always try to guide your clients toward better fitness goals. If the user asks you something unrelated to fitness, either bring the topic back to fitness, or say that you cannot answer."""},{"content": user_message, "role": "user"}]
try:
# test without max tokens
response = completion(
model="claude-instant-1", messages=messages, request_timeout=10,
)
# Add any assertions here to check the response
print(response)
print(response.usage)
print(response.usage.completion_tokens)
print(response["usage"]["completion_tokens"])
# print("new cost tracking")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_claude()
def test_completion_claude2_1():
try:
print("claude2.1 test request")
# test without max tokens
response = completion(
model="claude-2.1",
messages=messages,
request_timeout=10,
max_tokens=10
)
# Add any assertions here to check the response
print(response)
print(response.usage)
print(response.usage.completion_tokens)
print(response["usage"]["completion_tokens"])
# print("new cost tracking")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_claude2_1()
# def test_completion_oobabooga():
# try:
# response = completion(
# model="oobabooga/vicuna-1.3b", messages=messages, api_base="http://127.0.0.1:5000"
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_oobabooga()
# aleph alpha
# def test_completion_aleph_alpha():
# try:
# response = completion(
# model="luminous-base", messages=messages, logger_fn=logger_fn
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_aleph_alpha()
# def test_completion_aleph_alpha_control_models():
# try:
# response = completion(
# model="luminous-base-control", messages=messages, logger_fn=logger_fn
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_aleph_alpha_control_models()
import openai
def test_completion_gpt4_turbo():
try:
response = completion(
model="gpt-4-1106-preview",
messages=messages,
max_tokens=10,
)
print(response)
except openai.RateLimitError:
print("got a rate liimt error")
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_gpt4_turbo()
def test_completion_gpt4_vision():
try:
litellm.set_verbose=True
response = completion(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Whats in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
}
}
]
}
],
)
print(response)
except openai.RateLimitError:
print("got a rate liimt error")
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_gpt4_vision()
def test_completion_perplexity_api():
try:
# litellm.set_verbose=True
messages=[{
"role": "system",
"content": "You're a good bot"
},{
"role": "user",
"content": "Hey",
},{
"role": "user",
"content": "Hey",
}]
response = completion(
model="mistral-7b-instruct",
messages=messages,
api_base="https://api.perplexity.ai")
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_perplexity_api()
def test_completion_perplexity_api_2():
try:
# litellm.set_verbose=True
messages=[{
"role": "system",
"content": "You're a good bot"
},{
"role": "user",
"content": "Hey",
},{
"role": "user",
"content": "Hey",
}]
response = completion(
model="perplexity/mistral-7b-instruct",
messages=messages
)
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_perplexity_api_2()
# commenting out as this is a flaky test on circle ci
# def test_completion_nlp_cloud():
# try:
# messages = [
# {"role": "system", "content": "You are a helpful assistant."},
# {
# "role": "user",
# "content": "how does a court case get to the Supreme Court?",
# },
# ]
# response = completion(model="dolphin", messages=messages, logger_fn=logger_fn)
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_nlp_cloud()
######### HUGGING FACE TESTS ########################
#####################################################
"""
HF Tests we should pass
- TGI:
- Pro Inference API
- Deployed Endpoint
- Coversational
- Free Inference API
- Deployed Endpoint
- Neither TGI or Coversational
- Free Inference API
- Deployed Endpoint
"""
#####################################################
#####################################################
# Test util to sort models to TGI, conv, None
def test_get_hf_task_for_model():
model = "glaiveai/glaive-coder-7b"
model_type = litellm.llms.huggingface_restapi.get_hf_task_for_model(model)
print(f"model:{model}, model type: {model_type}")
assert(model_type == "text-generation-inference")
model = "meta-llama/Llama-2-7b-hf"
model_type = litellm.llms.huggingface_restapi.get_hf_task_for_model(model)
print(f"model:{model}, model type: {model_type}")
assert(model_type == "text-generation-inference")
model = "facebook/blenderbot-400M-distill"
model_type = litellm.llms.huggingface_restapi.get_hf_task_for_model(model)
print(f"model:{model}, model type: {model_type}")
assert(model_type == "conversational")
model = "facebook/blenderbot-3B"
model_type = litellm.llms.huggingface_restapi.get_hf_task_for_model(model)
print(f"model:{model}, model type: {model_type}")
assert(model_type == "conversational")
# neither Conv or None
model = "roneneldan/TinyStories-3M"
model_type = litellm.llms.huggingface_restapi.get_hf_task_for_model(model)
print(f"model:{model}, model type: {model_type}")
assert(model_type == None)
# test_get_hf_task_for_model()
# litellm.set_verbose=False
# ################### Hugging Face TGI models ########################
# # TGI model
# # this is a TGI model https://huggingface.co/glaiveai/glaive-coder-7b
def hf_test_completion_tgi():
# litellm.set_verbose=True
try:
response = completion(
model = 'huggingface/HuggingFaceH4/zephyr-7b-beta',
messages = [{ "content": "Hello, how are you?","role": "user"}],
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# hf_test_completion_tgi()
def hf_test_completion_tgi_stream():
try:
response = completion(
model = 'huggingface/HuggingFaceH4/zephyr-7b-beta',
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
# Add any assertions here to check the response
print(response)
for chunk in response:
print(chunk["choices"][0]["delta"]["content"])
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# hf_test_completion_tgi_stream()
# ################### Hugging Face Conversational models ########################
# def hf_test_completion_conv():
# try:
# response = litellm.completion(
# model="huggingface/facebook/blenderbot-3B",
# messages=[{ "content": "Hello, how are you?","role": "user"}],
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# hf_test_completion_conv()
# ################### Hugging Face Neither TGI or Conversational models ########################
# # Neither TGI or Conversational
# def hf_test_completion_none_task():
# try:
# user_message = "My name is Merve and my favorite"
# messages = [{ "content": user_message,"role": "user"}]
# response = completion(
# model="huggingface/roneneldan/TinyStories-3M",
# messages=messages,
# api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# hf_test_completion_none_task()
########################### End of Hugging Face Tests ##############################################
# def test_completion_hf_api():
# # failing on circle ci commenting out
# try:
# user_message = "write some code to find the sum of two numbers"
# messages = [{ "content": user_message,"role": "user"}]
# api_base = "https://a8l9e3ucxinyl3oj.us-east-1.aws.endpoints.huggingface.cloud"
# response = completion(model="huggingface/meta-llama/Llama-2-7b-chat-hf", messages=messages, api_base=api_base)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# if "loading" in str(e):
# pass
# pytest.fail(f"Error occurred: {e}")
# test_completion_hf_api()
# def test_completion_hf_api_best_of():
# # failing on circle ci commenting out
# try:
# user_message = "write some code to find the sum of two numbers"
# messages = [{ "content": user_message,"role": "user"}]
# api_base = "https://a8l9e3ucxinyl3oj.us-east-1.aws.endpoints.huggingface.cloud"
# response = completion(model="huggingface/meta-llama/Llama-2-7b-chat-hf", messages=messages, api_base=api_base, n=2)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# if "loading" in str(e):
# pass
# pytest.fail(f"Error occurred: {e}")
# test_completion_hf_api_best_of()
# def test_completion_hf_deployed_api():
# try:
# user_message = "There's a llama in my garden 😱 What should I do?"
# messages = [{ "content": user_message,"role": "user"}]
# response = completion(model="huggingface/https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud", messages=messages, logger_fn=logger_fn)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# this should throw an exception, to trigger https://logs.litellm.ai/
# def hf_test_error_logs():
# try:
# litellm.set_verbose=True
# user_message = "My name is Merve and my favorite"
# messages = [{ "content": user_message,"role": "user"}]
# response = completion(
# model="huggingface/roneneldan/TinyStories-3M",
# messages=messages,
# api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# hf_test_error_logs()
def test_completion_cohere(): # commenting for now as the cohere endpoint is being flaky
try:
litellm.CohereConfig(max_tokens=1000, stop_sequences=["a"])
response = completion(
model="command-nightly",
messages=messages,
logger_fn=logger_fn
)
# Add any assertions here to check the response
print(response)
response_str = response["choices"][0]["message"]["content"]
response_str_2 = response.choices[0].message.content
if type(response_str) != str:
pytest.fail(f"Error occurred: {e}")
if type(response_str_2) != str:
pytest.fail(f"Error occurred: {e}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_cohere()
def test_completion_openai():
try:
litellm.set_verbose=True
print(f"api key: {os.environ['OPENAI_API_KEY']}")
litellm.api_key = os.environ['OPENAI_API_KEY']
response = completion(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=10,
request_timeout=0.1
)
print("This is the response object\n", response)
response_str = response["choices"][0]["message"]["content"]
response_str_2 = response.choices[0].message.content
cost = completion_cost(completion_response=response)
print("Cost for completion call with gpt-3.5-turbo: ", f"${float(cost):.10f}")
assert response_str == response_str_2
assert type(response_str) == str
assert len(response_str) > 1
litellm.api_key = None
except Timeout as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_openai()
def test_completion_text_openai():
try:
# litellm.set_verbose = True
response = completion(model="gpt-3.5-turbo-instruct", messages=messages)
print(response["choices"][0]["message"]["content"])
except Exception as e:
print(e)
pytest.fail(f"Error occurred: {e}")
# test_completion_text_openai()
def test_completion_openai_with_optional_params():
try:
litellm.set_verbose = True
response = completion(
model="gpt-3.5-turbo-1106",
messages=[
{
"role": "user",
"content": "respond in valid, json - what is the day"
}
],
temperature=0.5,
top_p=0.1,
seed=12,
response_format={ "type": "json_object" }
)
# Add any assertions here to check the response
print(response)
except litellm.Timeout as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_openai_with_optional_params()
def test_completion_openai_litellm_key():
try:
litellm.set_verbose = True
litellm.num_retries = 0
litellm.api_key = os.environ['OPENAI_API_KEY']
# ensure key is set to None in .env and in openai.api_key
os.environ['OPENAI_API_KEY'] = ""
import openai
openai.api_key = ""
##########################################################
response = completion(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.5,
top_p=0.1,
max_tokens=10,
user="[email protected]",
)
# Add any assertions here to check the response
print(response)
###### reset environ key
os.environ['OPENAI_API_KEY'] = litellm.api_key
##### unset litellm var
litellm.api_key = None
except Timeout as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_openai_litellm_key()
def test_completion_openrouter1():
try:
response = completion(
model="openrouter/google/palm-2-chat-bison",
messages=messages,
max_tokens=5,
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_openrouter1()
def test_completion_hf_model_no_provider():
try:
response = completion(
model="WizardLM/WizardLM-70B-V1.0",
messages=messages,
max_tokens=5,
)
# Add any assertions here to check the response
print(response)
pytest.fail(f"Error occurred: {e}")
except Exception as e:
pass
# test_completion_hf_model_no_provider()
# def test_completion_openai_azure_with_functions():
# function1 = [
# {
# "name": "get_current_weather",
# "description": "Get the current weather in a given location",
# "parameters": {
# "type": "object",
# "properties": {
# "location": {
# "type": "string",
# "description": "The city and state, e.g. San Francisco, CA",
# },
# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
# },
# "required": ["location"],
# },
# }
# ]
# try:
# messages = [{"role": "user", "content": "What is the weather like in Boston?"}]
# response = completion(
# model="azure/chatgpt-functioncalling", messages=messages, functions=function1
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_openai_azure_with_functions()
def test_completion_azure():
try:
print("azure gpt-3.5 test\n\n")
litellm.set_verbose=False
## Test azure call
response = completion(
model="azure/chatgpt-v-2",
messages=messages,
)
## Test azure flag for backwards compatibility
response = completion(
model="chatgpt-v-2",
messages=messages,
azure=True,
max_tokens=10
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_azure()
def test_azure_openai_ad_token():
# this tests if the azure ad token is set in the request header
# the request can fail since azure ad tokens expire after 30 mins, but the header MUST have the azure ad token
# we use litellm.input_callbacks for this test
def tester(
kwargs, # kwargs to completion
):
print(kwargs["additional_args"])
if kwargs["additional_args"]["headers"]["Authorization"] != 'Bearer gm':
pytest.fail("AZURE AD TOKEN Passed but not set in request header")
return
litellm.input_callback = [tester]
try:
response = litellm.completion(
model="azure/chatgpt-v-2", # e.g. gpt-35-instant
messages=[
{
"role": "user",
"content": "what is your name",
},
],
azure_ad_token="gm"
)
print("azure ad token respoonse\n")
print(response)
litellm.input_callback = []
except:
litellm.input_callback = []
pass
# test_azure_openai_ad_token()
# test_completion_azure()
def test_completion_azure2():
# test if we can pass api_base, api_version and api_key in compleition()
try:
print("azure gpt-3.5 test\n\n")
litellm.set_verbose=False
api_base = os.environ["AZURE_API_BASE"]
api_key = os.environ["AZURE_API_KEY"]
api_version = os.environ["AZURE_API_VERSION"]
os.environ["AZURE_API_BASE"] = ""
os.environ["AZURE_API_VERSION"] = ""
os.environ["AZURE_API_KEY"] = ""
## Test azure call
response = completion(
model="azure/chatgpt-v-2",
messages=messages,
api_base = api_base,
api_key = api_key,
api_version = api_version,
max_tokens=10,
)
# Add any assertions here to check the response
print(response)
os.environ["AZURE_API_BASE"] = api_base
os.environ["AZURE_API_VERSION"] = api_version
os.environ["AZURE_API_KEY"] = api_key
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_azure2()
def test_completion_azure3():
# test if we can pass api_base, api_version and api_key in compleition()
try:
print("azure gpt-3.5 test\n\n")
litellm.set_verbose=True
litellm.api_base = os.environ["AZURE_API_BASE"]
litellm.api_key = os.environ["AZURE_API_KEY"]
litellm.api_version = os.environ["AZURE_API_VERSION"]
os.environ["AZURE_API_BASE"] = ""
os.environ["AZURE_API_VERSION"] = ""
os.environ["AZURE_API_KEY"] = ""
## Test azure call
response = completion(
model="azure/chatgpt-v-2",
messages=messages,
max_tokens=10,
)
# Add any assertions here to check the response
print(response)
os.environ["AZURE_API_BASE"] = litellm.api_base
os.environ["AZURE_API_VERSION"] = litellm.api_version
os.environ["AZURE_API_KEY"] = litellm.api_key
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_azure3()
# new azure test for using litellm. vars,
# use the following vars in this test and make an azure_api_call
# litellm.api_type = self.azure_api_type
# litellm.api_base = self.azure_api_base
# litellm.api_version = self.azure_api_version
# litellm.api_key = self.api_key
def test_completion_azure_with_litellm_key():
try:
print("azure gpt-3.5 test\n\n")
import openai
#### set litellm vars
litellm.api_type = "azure"
litellm.api_base = os.environ['AZURE_API_BASE']
litellm.api_version = os.environ['AZURE_API_VERSION']
litellm.api_key = os.environ['AZURE_API_KEY']
######### UNSET ENV VARs for this ################
os.environ['AZURE_API_BASE'] = ""
os.environ['AZURE_API_VERSION'] = ""
os.environ['AZURE_API_KEY'] = ""
######### UNSET OpenAI vars for this ##############
openai.api_type = ""
openai.api_base = "gm"
openai.api_version = "333"
openai.api_key = "ymca"
response = completion(
model="azure/chatgpt-v-2",
messages=messages,
)
# Add any assertions here to check the response
print(response)
######### RESET ENV VARs for this ################
os.environ['AZURE_API_BASE'] = litellm.api_base
os.environ['AZURE_API_VERSION'] = litellm.api_version
os.environ['AZURE_API_KEY'] = litellm.api_key
######### UNSET litellm vars
litellm.api_type = None
litellm.api_base = None
litellm.api_version = None
litellm.api_key = None
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_azure()
def test_completion_azure_deployment_id():
try:
litellm.set_verbose = True
response = completion(
deployment_id="chatgpt-v-2",
model="gpt-3.5-turbo",
messages=messages,
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_azure_deployment_id()
# Only works for local endpoint
# def test_completion_anthropic_openai_proxy():
# try:
# response = completion(
# model="custom_openai/claude-2",
# messages=messages,
# api_base="http://0.0.0.0:8000"
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_anthropic_openai_proxy()
def test_completion_replicate_vicuna():
print("TESTING REPLICATE")
litellm.set_verbose=False
model_name = "replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b"
try:
response = completion(
model=model_name,
messages=messages,
temperature=0.5,
top_k=20,
repetition_penalty=1,
min_tokens=1,
seed=-1,
max_tokens=20,
)
print(response)
# Add any assertions here to check the response
response_str = response["choices"][0]["message"]["content"]
print("RESPONSE STRING\n", response_str)
if type(response_str) != str:
pytest.fail(f"Error occurred: {e}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_replicate_vicuna()
def test_completion_replicate_llama2_stream():
litellm.set_verbose=False
model_name = "replicate/meta/llama-2-7b-chat:13c3cdee13ee059ab779f0291d29054dab00a47dad8261375654de5540165fb0"
try:
response = completion(
model=model_name,
messages=[
{
"role": "user",
"content": "what is yc write 1 paragraph",
}
],
stream=True,
max_tokens=20,
num_retries=3
)
print(f"response: {response}")
# Add any assertions here to check the response
complete_response = ""
for i, chunk in enumerate(response):
complete_response += chunk.choices[0].delta["content"]
# if i == 0:
# assert len(chunk.choices[0].delta["content"]) > 2
# print(chunk)
assert len(complete_response) > 5
print(f"complete_response: {complete_response}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_replicate_llama2_stream()
# commenthing this out since we won't be always testing a custom replicate deployment
# def test_completion_replicate_deployments():
# print("TESTING REPLICATE")
# litellm.set_verbose=False
# model_name = "replicate/deployments/ishaan-jaff/ishaan-mistral"
# try:
# response = completion(
# model=model_name,
# messages=messages,
# temperature=0.5,
# seed=-1,
# )
# print(response)
# # Add any assertions here to check the response
# response_str = response["choices"][0]["message"]["content"]
# print("RESPONSE STRING\n", response_str)
# if type(response_str) != str:
# pytest.fail(f"Error occurred: {e}")
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_replicate_deployments()
######## Test TogetherAI ########
def test_completion_together_ai():
model_name = "together_ai/togethercomputer/llama-2-70b-chat"
try:
response = completion(model=model_name, messages=messages, max_tokens=256, n=1, logger_fn=logger_fn)
# Add any assertions here to check the response
print(response)
cost = completion_cost(completion_response=response)
print("Cost for completion call together-computer/llama-2-70b: ", f"${float(cost):.10f}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_together_ai()
def test_customprompt_together_ai():
try:
litellm.set_verbose = False
litellm.num_retries = 0
response = completion(
model="together_ai/togethercomputer/llama-2-70b-chat",
messages=messages,
roles={"system":{"pre_message":"<|im_start|>system\n", "post_message":"<|im_end|>"}, "assistant":{"pre_message":"<|im_start|>assistant\n","post_message":"<|im_end|>"}, "user":{"pre_message":"<|im_start|>user\n","post_message":"<|im_end|>"}}
)
print(response)
except litellm.exceptions.Timeout as e:
print(f"Timeout Error")
litellm.num_retries = 3 # reset retries
pass
except Exception as e:
print(f"ERROR TYPE {type(e)}")
pytest.fail(f"Error occurred: {e}")
test_customprompt_together_ai()
def test_completion_sagemaker():
try:
response = completion(
model="sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b",
messages=messages,
temperature=0.2,
max_tokens=80,
logger_fn=logger_fn
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_sagemaker()
def test_completion_bedrock_titan():
try:
response = completion(
model="bedrock/amazon.titan-tg1-large",
messages=messages,
temperature=0.2,
max_tokens=200,
top_p=0.8,
logger_fn=logger_fn
)
# Add any assertions here to check the response
print(response)
except RateLimitError:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_titan()
def test_completion_bedrock_claude():
print("calling claude")
try:
response = completion(
model="anthropic.claude-instant-v1",
messages=messages,
max_tokens=10,
temperature=0.1,
logger_fn=logger_fn
)
# Add any assertions here to check the response
print(response)
except RateLimitError:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_claude()
def test_completion_bedrock_cohere():
print("calling bedrock cohere")
try:
response = completion(
model="bedrock/cohere.command-text-v14",
messages=[{"role": "user", "content": "hi"}],
temperature=0.1,
max_tokens=10,
stream=True
)
# Add any assertions here to check the response
print(response)
for chunk in response:
print(chunk)
except RateLimitError:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_cohere()
def test_completion_bedrock_claude_completion_auth():
print("calling bedrock claude completion params auth")
import os
aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
aws_region_name = os.environ["AWS_REGION_NAME"]
os.environ["AWS_ACCESS_KEY_ID"] = ""
os.environ["AWS_SECRET_ACCESS_KEY"] = ""
os.environ["AWS_REGION_NAME"] = ""
try:
response = completion(
model="bedrock/anthropic.claude-instant-v1",
messages=messages,
max_tokens=10,
temperature=0.1,
logger_fn=logger_fn,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_region_name=aws_region_name,
)
# Add any assertions here to check the response
print(response)
os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
os.environ["AWS_REGION_NAME"] = aws_region_name
except RateLimitError:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_claude_completion_auth()
# def test_completion_bedrock_claude_external_client_auth():
# print("calling bedrock claude external client auth")
# import os
# aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
# aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
# aws_region_name = os.environ["AWS_REGION_NAME"]
# os.environ["AWS_ACCESS_KEY_ID"] = ""
# os.environ["AWS_SECRET_ACCESS_KEY"] = ""
# os.environ["AWS_REGION_NAME"] = ""
# try:
# import boto3
# bedrock = boto3.client(
# service_name="bedrock-runtime",
# region_name=aws_region_name,
# aws_access_key_id=aws_access_key_id,
# aws_secret_access_key=aws_secret_access_key,
# endpoint_url=f"https://bedrock-runtime.{aws_region_name}.amazonaws.com"
# )
# response = completion(
# model="bedrock/anthropic.claude-instant-v1",
# messages=messages,
# max_tokens=10,
# temperature=0.1,
# logger_fn=logger_fn,
# aws_bedrock_client=bedrock,
# )
# # Add any assertions here to check the response
# print(response)
# os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
# os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
# os.environ["AWS_REGION_NAME"] = aws_region_name
# except RateLimitError:
# pass
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_claude_external_client_auth()
# def test_completion_bedrock_claude_stream():
# print("calling claude")
# litellm.set_verbose = False
# try:
# response = completion(
# model="bedrock/anthropic.claude-instant-v1",
# messages=messages,
# stream=True
# )
# # Add any assertions here to check the response
# print(response)
# for chunk in response:
# print(chunk)
# except RateLimitError:
# pass
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_claude_stream()
# def test_completion_bedrock_ai21():
# try:
# litellm.set_verbose = False
# response = completion(
# model="bedrock/ai21.j2-mid",
# messages=messages,
# temperature=0.2,
# top_p=0.2,
# max_tokens=20
# )
# # Add any assertions here to check the response
# print(response)
# except RateLimitError:
# pass
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
######## Test VLLM ########
# def test_completion_vllm():
# try:
# response = completion(
# model="vllm/facebook/opt-125m",
# messages=messages,
# temperature=0.2,
# max_tokens=80,
# )
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_vllm()
# def test_completion_hosted_chatCompletion():
# # this tests calling a server where vllm is hosted
# # this should make an openai.Completion() call to the specified api_base
# # send a request to this proxy server: https://replit.com/@BerriAI/openai-proxy#main.py
# # it checks if model == facebook/opt-125m and returns test passed
# try:
# litellm.set_verbose = True
# response = completion(
# model="facebook/opt-125m",
# messages=messages,
# temperature=0.2,
# max_tokens=80,
# api_base="https://openai-proxy.berriai.repl.co",
# custom_llm_provider="openai"
# )
# print(response)
# if response['choices'][0]['message']['content'] != "passed":
# # see https://replit.com/@BerriAI/openai-proxy#main.py
# pytest.fail(f"Error occurred: proxy server did not respond")
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_hosted_chatCompletion()
# def test_completion_custom_api_base():
# try:
# response = completion(
# model="custom/meta-llama/Llama-2-13b-hf",
# messages=messages,
# temperature=0.2,
# max_tokens=10,
# api_base="https://api.autoai.dev/inference",
# request_timeout=300,
# )
# # Add any assertions here to check the response
# print("got response\n", response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_custom_api_base()
# def test_vertex_ai():
# test_models = ["codechat-bison"] + litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models
# # test_models = ["chat-bison"]
# for model in test_models:
# try:
# if model in ["code-gecko@001", "code-gecko@latest"]:
# # our account does not have access to this model
# continue
# print("making request", model)
# response = completion(model=model, messages=[{'role': 'user', 'content': 'hi'}])
# print(response)
# print(response.usage.completion_tokens)
# print(response['usage']['completion_tokens'])
# assert type(response.choices[0].message.content) == str
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_vertex_ai()
# def test_vertex_ai_stream():
# litellm.set_verbose=False
# test_models = litellm.vertex_chat_models + litellm.vertex_code_chat_models + litellm.vertex_text_models + litellm.vertex_code_text_models
# for model in test_models:
# try:
# if model in ["code-gecko@001", "code-gecko@latest"]:
# # our account does not have access to this model
# continue
# print("making request", model)
# response = completion(model=model, messages=[{"role": "user", "content": "write 100 line code code for saying hi"}], stream=True)
# for chunk in response:
# print(chunk)
# # pass
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_vertex_ai_stream()
def test_completion_with_fallbacks():
print(f"RUNNING TEST COMPLETION WITH FALLBACKS - test_completion_with_fallbacks")
fallbacks = ["gpt-3.5-turbo", "gpt-3.5-turbo", "command-nightly"]
try:
response = completion(
model="bad-model", messages=messages, force_timeout=120, fallbacks=fallbacks
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_with_fallbacks()
def test_completion_anyscale_api():
try:
# litellm.set_verbose=True
messages=[{
"role": "system",
"content": "You're a good bot"
},{
"role": "user",
"content": "Hey",
},{
"role": "user",
"content": "Hey",
}]
response = completion(
model="anyscale/meta-llama/Llama-2-7b-chat-hf",
messages=messages,)
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_anyscale_api()
def test_completion_anyscale_2():
try:
# litellm.set_verbose=True
messages=[{
"role": "system",
"content": "You're a good bot"
},{
"role": "user",
"content": "Hey",
},{
"role": "user",
"content": "Hey",
}]
response = completion(
model="anyscale/meta-llama/Llama-2-7b-chat-hf",
messages=messages
)
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_mistral_anyscale_stream():
litellm.set_verbose=False
response = completion(
model = 'anyscale/mistralai/Mistral-7B-Instruct-v0.1',
messages = [{ "content": "hello, good morning","role": "user"}],
stream=True,
)
for chunk in response:
# print(chunk)
print(chunk["choices"][0]["delta"].get("content", ""), end="")
# test_mistral_anyscale_stream()
# test_completion_anyscale_2()
# def test_completion_with_fallbacks_multiple_keys():
# print(f"backup key 1: {os.getenv('BACKUP_OPENAI_API_KEY_1')}")
# print(f"backup key 2: {os.getenv('BACKUP_OPENAI_API_KEY_2')}")
# backup_keys = [{"api_key": os.getenv("BACKUP_OPENAI_API_KEY_1")}, {"api_key": os.getenv("BACKUP_OPENAI_API_KEY_2")}]
# try:
# api_key = "bad-key"
# response = completion(
# model="gpt-3.5-turbo", messages=messages, force_timeout=120, fallbacks=backup_keys, api_key=api_key
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# error_str = traceback.format_exc()
# pytest.fail(f"Error occurred: {error_str}")
# test_completion_with_fallbacks_multiple_keys()
# def test_petals():
# try:
# response = completion(model="petals-team/StableBeluga2", messages=messages)
# # Add any assertions here to check the response
# print(response)
# response = completion(model="petals-team/StableBeluga2", messages=messages)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# def test_baseten():
# try:
# response = completion(model="baseten/7qQNLDB", messages=messages, logger_fn=logger_fn)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_baseten()
# def test_baseten_falcon_7bcompletion():
# model_name = "qvv0xeq"
# try:
# response = completion(model=model_name, messages=messages, custom_llm_provider="baseten")
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_baseten_falcon_7bcompletion()
# def test_baseten_falcon_7bcompletion_withbase():
# model_name = "qvv0xeq"
# litellm.api_base = "https://app.baseten.co"
# try:
# response = completion(model=model_name, messages=messages)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# litellm.api_base = None
# test_baseten_falcon_7bcompletion_withbase()
# def test_baseten_wizardLMcompletion_withbase():
# model_name = "q841o8w"
# litellm.api_base = "https://app.baseten.co"
# try:
# response = completion(model=model_name, messages=messages)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_baseten_wizardLMcompletion_withbase()
# def test_baseten_mosaic_ML_completion_withbase():
# model_name = "31dxrj3"
# litellm.api_base = "https://app.baseten.co"
# try:
# response = completion(model=model_name, messages=messages)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
#### Test A121 ###################
def test_completion_ai21():
print("running ai21 j2light test")
litellm.set_verbose=True
model_name = "j2-light"
try:
response = completion(model=model_name, messages=messages, max_tokens=100, temperature=0.8)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_ai21()
## test deep infra
def test_completion_deep_infra():
litellm.set_verbose = False
model_name = "deepinfra/meta-llama/Llama-2-70b-chat-hf"
try:
response = completion(
model=model_name,
messages=messages,
temperature=0,
max_tokens=10
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_deep_infra()
def test_completion_deep_infra_mistral():
print("deep infra test with temp=0")
model_name = "deepinfra/mistralai/Mistral-7B-Instruct-v0.1"
try:
response = completion(
model=model_name,
messages=messages,
temperature=0.01, # mistrail fails with temperature=0
max_tokens=10
)
# Add any assertions here to check the response
print(response)
except litellm.exceptions.Timeout as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_deep_infra_mistral()
# Palm tests
def test_completion_palm():
litellm.set_verbose = True
model_name = "palm/chat-bison"
messages = [{"role": "user", "content": "Hey, how's it going?"}]
try:
response = completion(model=model_name, messages=messages)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_palm()
# test palm with streaming
def test_completion_palm_stream():
# litellm.set_verbose = True
model_name = "palm/chat-bison"
try:
response = completion(
model=model_name,
messages=messages,
stop=["stop"],
stream=True,
max_tokens=20
)
# Add any assertions here to check the response
for chunk in response:
print(chunk)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_palm_stream()
# test_completion_deep_infra()
# test_completion_ai21()
# test config file with completion #
# def test_completion_openai_config():
# try:
# litellm.config_path = "../config.json"
# litellm.set_verbose = True
# response = litellm.config_completion(messages=messages)
# # Add any assertions here to check the response
# print(response)
# litellm.config_path = None
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# def test_maritalk():
# messages = [{"role": "user", "content": "Hey"}]
# try:
# response = completion("maritalk", messages=messages)
# print(f"response: {response}")
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_maritalk()
def test_completion_together_ai_stream():
user_message = "Write 1pg about YC & litellm"
messages = [{ "content": user_message,"role": "user"}]
try:
response = completion(
model="together_ai/togethercomputer/llama-2-70b-chat",
messages=messages, stream=True,
max_tokens=5
)
print(response)
for chunk in response:
print(chunk)
# print(string_response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_together_ai_stream()
# async def get_response(generator):
# async for elem in generator:
# print(elem)
# return
# test_completion_together_ai_stream()
def test_moderation():
import openai
openai.api_type = "azure"
openai.api_version = "GM"
response = litellm.moderation(input="i'm ishaan cto of litellm")
print(response)
output = response.results[0]
print(output)
return output
# test_moderation() | [
"Hey",
"You're a good bot",
"hi",
"hello, good morning",
"respond in valid, json - what is the day",
"[{'type': 'text', 'text': 'Whats in this image?'}, {'type': 'image_url', 'image_url': {'url': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg'}}]",
"what is your name",
"You are an upbeat, enthusiastic personal fitness coach named Sam. Sam is passionate about helping clients get fit and lead healthier lifestyles. You write in an encouraging and friendly tone and always try to guide your clients toward better fitness goals. If the user asks you something unrelated to fitness, either bring the topic back to fitness, or say that you cannot answer.",
"what is yc write 1 paragraph",
"Hello, how are you?",
"Hey, how's it going?"
] |
2024-01-10 | LiquidAdTech/Zahara | litellm~exceptions.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
## LiteLLM versions of the OpenAI Exception Types
from openai import (
AuthenticationError,
BadRequestError,
RateLimitError,
APIStatusError,
OpenAIError,
APIError,
APITimeoutError,
APIConnectionError,
APIResponseValidationError
)
import httpx
class AuthenticationError(AuthenticationError): # type: ignore
def __init__(self, message, llm_provider, model, response: httpx.Response):
self.status_code = 401
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message,
response=response,
body=None
) # Call the base class constructor with the parameters it needs
class BadRequestError(BadRequestError): # type: ignore
def __init__(self, message, model, llm_provider, response: httpx.Response):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message,
response=response,
body=None
) # Call the base class constructor with the parameters it needs
class Timeout(APITimeoutError): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 408
self.message = message
self.model = model
self.llm_provider = llm_provider
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
super().__init__(
request=request
) # Call the base class constructor with the parameters it needs
class RateLimitError(RateLimitError): # type: ignore
def __init__(self, message, llm_provider, model, response: httpx.Response):
self.status_code = 429
self.message = message
self.llm_provider = llm_provider
self.modle = model
super().__init__(
self.message,
response=response,
body=None
) # Call the base class constructor with the parameters it needs
# sub class of rate limit error - meant to give more granularity for error handling context window exceeded errors
class ContextWindowExceededError(BadRequestError): # type: ignore
def __init__(self, message, model, llm_provider, response: httpx.Response):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
message=self.message,
model=self.model, # type: ignore
llm_provider=self.llm_provider, # type: ignore
response=response
) # Call the base class constructor with the parameters it needs
class ServiceUnavailableError(APIStatusError): # type: ignore
def __init__(self, message, llm_provider, model, response: httpx.Response):
self.status_code = 503
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message,
response=response,
body=None
) # Call the base class constructor with the parameters it needs
# raise this when the API returns an invalid response object - https://github.com/openai/openai-python/blob/1be14ee34a0f8e42d3f9aa5451aa4cb161f1781f/openai/api_requestor.py#L401
class APIError(APIError): # type: ignore
def __init__(self, status_code, message, llm_provider, model, request: httpx.Request):
self.status_code = status_code
self.message = message
self.llm_provider = llm_provider
self.model = model
super().__init__(
self.message,
request=request, # type: ignore
body=None
)
# raised if an invalid request (not get, delete, put, post) is made
class APIConnectionError(APIConnectionError): # type: ignore
def __init__(self, message, llm_provider, model, request: httpx.Request):
self.message = message
self.llm_provider = llm_provider
self.model = model
self.status_code = 500
super().__init__(
message=self.message,
request=request
)
# raised if an invalid request (not get, delete, put, post) is made
class APIResponseValidationError(APIResponseValidationError): # type: ignore
def __init__(self, message, llm_provider, model):
self.message = message
self.llm_provider = llm_provider
self.model = model
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
response = httpx.Response(status_code=500, request=request)
super().__init__(
response=response,
body=None,
message=message
)
class OpenAIError(OpenAIError): # type: ignore
def __init__(self, original_exception):
self.status_code = original_exception.http_status
super().__init__(
http_body=original_exception.http_body,
http_status=original_exception.http_status,
json_body=original_exception.json_body,
headers=original_exception.headers,
code=original_exception.code,
)
self.llm_provider = "openai"
class BudgetExceededError(Exception):
def __init__(self, current_cost, max_budget):
self.current_cost = current_cost
self.max_budget = max_budget
message = f"Budget has been exceeded! Current cost: {current_cost}, Max budget: {max_budget}"
super().__init__(message)
## DEPRECATED ##
class InvalidRequestError(BadRequestError): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 400
self.message = message
self.model = model
self.llm_provider = llm_provider
super().__init__(
self.message, f"{self.model}"
) # Call the base class constructor with the parameters it needs
| [] |
2024-01-10 | LiquidAdTech/Zahara | litellm~tests~test_batch_completions.py | #### What this tests ####
# This tests calling batch_completions by running 100 messages together
import sys, os
import traceback
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from openai import APITimeoutError as Timeout
import litellm
litellm.num_retries = 3
from litellm import batch_completion, batch_completion_models, completion, batch_completion_models_all_responses
# litellm.set_verbose=True
def test_batch_completions():
messages = [[{"role": "user", "content": "write a short poem"}] for _ in range(3)]
model = "j2-mid"
try:
result = batch_completion(
model=model,
messages=messages,
max_tokens=10,
temperature=0.2,
request_timeout=1
)
print(result)
print(len(result))
assert(len(result)==3)
except Timeout as e:
print(f"IN TIMEOUT")
pass
except Exception as e:
pytest.fail(f"An error occurred: {e}")
test_batch_completions()
def test_batch_completions_models():
try:
result = batch_completion_models(
models=["gpt-3.5-turbo", "gpt-3.5-turbo", "gpt-3.5-turbo"],
messages=[{"role": "user", "content": "Hey, how's it going"}]
)
print(result)
except Timeout as e:
pass
except Exception as e:
pytest.fail(f"An error occurred: {e}")
# test_batch_completions_models()
def test_batch_completion_models_all_responses():
try:
responses = batch_completion_models_all_responses(
models=["j2-light", "claude-instant-1.2"],
messages=[{"role": "user", "content": "write a poem"}],
max_tokens=10
)
print(responses)
assert(len(responses) == 2)
except Timeout as e:
pass
except Exception as e:
pytest.fail(f"An error occurred: {e}")
# test_batch_completion_models_all_responses()
| [
"write a short poem",
"Hey, how's it going",
"write a poem"
] |
2024-01-10 | LiquidAdTech/Zahara | litellm~proxy~tests~test_async.py | # This tests the litelm proxy
# it makes async Completion requests with streaming
import openai
openai.base_url = "http://0.0.0.0:8000"
openai.api_key = "temp-key"
print(openai.base_url)
async def test_async_completion():
response = await openai.Completion.acreate(
model="gpt-3.5-turbo",
prompt='this is a test request, write a short poem',
)
print(response)
print("test_streaming")
response = await openai.Completion.acreate(
model="gpt-3.5-turbo",
prompt='this is a test request, write a short poem',
stream=True
)
print(response)
async for chunk in response:
print(chunk)
import asyncio
asyncio.run(test_async_completion())
| [] |
2024-01-10 | LiquidAdTech/Zahara | litellm~router.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you ! We ❤️ you! - Krrish & Ishaan
from datetime import datetime
from typing import Dict, List, Optional, Union, Literal
import random, threading, time
import litellm, openai
import logging, asyncio
import inspect
from openai import AsyncOpenAI
class Router:
"""
Example usage:
from litellm import Router
model_list = [{
"model_name": "gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/<your-deployment-name>",
"api_key": <your-api-key>,
"api_version": <your-api-version>,
"api_base": <your-api-base>
},
}]
router = Router(model_list=model_list)
"""
model_names: List = []
cache_responses: bool = False
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
num_retries: int = 0
tenacity = None
def __init__(self,
model_list: Optional[list] = None,
redis_host: Optional[str] = None,
redis_port: Optional[int] = None,
redis_password: Optional[str] = None,
cache_responses: bool = False,
num_retries: int = 0,
timeout: float = 600,
default_litellm_params = {}, # default params for Router.chat.completion.create
routing_strategy: Literal["simple-shuffle", "least-busy", "usage-based-routing", "latency-based-routing"] = "simple-shuffle") -> None:
if model_list:
self.set_model_list(model_list)
self.healthy_deployments: List = self.model_list
self.deployment_latency_map = {}
for m in model_list:
self.deployment_latency_map[m["litellm_params"]["model"]] = 0
self.num_retries = num_retries
self.chat = litellm.Chat(params=default_litellm_params)
self.default_litellm_params = default_litellm_params
self.default_litellm_params["timeout"] = timeout
self.routing_strategy = routing_strategy
### HEALTH CHECK THREAD ###
if self.routing_strategy == "least-busy":
self._start_health_check_thread()
### CACHING ###
if redis_host is not None and redis_port is not None and redis_password is not None:
cache_config = {
'type': 'redis',
'host': redis_host,
'port': redis_port,
'password': redis_password
}
else: # use an in-memory cache
cache_config = {
"type": "local"
}
if cache_responses:
litellm.cache = litellm.Cache(**cache_config) # use Redis for caching completion requests
self.cache_responses = cache_responses
self.cache = litellm.Cache(cache_config) # use Redis for tracking load balancing
## USAGE TRACKING ##
if type(litellm.success_callback) == list:
litellm.success_callback.append(self.deployment_callback)
else:
litellm.success_callback = [self.deployment_callback]
def _start_health_check_thread(self):
"""
Starts a separate thread to perform health checks periodically.
"""
health_check_thread = threading.Thread(target=self._perform_health_checks, daemon=True)
health_check_thread.start()
def _perform_health_checks(self):
"""
Periodically performs health checks on the servers.
Updates the list of healthy servers accordingly.
"""
while True:
self.healthy_deployments = self._health_check()
# Adjust the time interval based on your needs
time.sleep(15)
def _health_check(self):
"""
Performs a health check on the deployments
Returns the list of healthy deployments
"""
healthy_deployments = []
for deployment in self.model_list:
litellm_args = deployment["litellm_params"]
try:
start_time = time.time()
litellm.completion(messages=[{"role": "user", "content": ""}], max_tokens=1, **litellm_args) # hit the server with a blank message to see how long it takes to respond
end_time = time.time()
response_time = end_time - start_time
logging.debug(f"response_time: {response_time}")
healthy_deployments.append((deployment, response_time))
healthy_deployments.sort(key=lambda x: x[1])
except Exception as e:
pass
return healthy_deployments
def weighted_shuffle_by_latency(self, items):
# Sort the items by latency
sorted_items = sorted(items, key=lambda x: x[1])
# Get only the latencies
latencies = [i[1] for i in sorted_items]
# Calculate the sum of all latencies
total_latency = sum(latencies)
# Calculate the weight for each latency (lower latency = higher weight)
weights = [total_latency-latency for latency in latencies]
# Get a weighted random item
if sum(weights) == 0:
chosen_item = random.choice(sorted_items)[0]
else:
chosen_item = random.choices(sorted_items, weights=weights, k=1)[0][0]
return chosen_item
def set_model_list(self, model_list: list):
self.model_list = model_list
self.model_names = [m["model_name"] for m in model_list]
def get_model_names(self):
return self.model_names
def get_available_deployment(self,
model: str,
messages: Optional[List[Dict[str, str]]] = None,
input: Optional[Union[str, List]] = None):
"""
Returns the deployment based on routing strategy
"""
if litellm.model_alias_map and model in litellm.model_alias_map:
model = litellm.model_alias_map[
model
] # update the model to the actual value if an alias has been passed in
if self.routing_strategy == "least-busy":
if len(self.healthy_deployments) > 0:
for item in self.healthy_deployments:
if item[0]["model_name"] == model: # first one in queue will be the one with the most availability
return item[0]
else:
raise ValueError("No models available.")
elif self.routing_strategy == "simple-shuffle":
potential_deployments = []
for item in self.model_list:
if item["model_name"] == model:
potential_deployments.append(item)
item = random.choice(potential_deployments)
return item or item[0]
elif self.routing_strategy == "latency-based-routing":
returned_item = None
lowest_latency = float('inf')
### get potential deployments
potential_deployments = []
for item in self.model_list:
if item["model_name"] == model:
potential_deployments.append(item)
### shuffles with priority for lowest latency
# items_with_latencies = [('A', 10), ('B', 20), ('C', 30), ('D', 40)]
items_with_latencies = []
for item in potential_deployments:
items_with_latencies.append((item, self.deployment_latency_map[item["litellm_params"]["model"]]))
returned_item = self.weighted_shuffle_by_latency(items_with_latencies)
return returned_item
elif self.routing_strategy == "usage-based-routing":
return self.get_usage_based_available_deployment(model=model, messages=messages, input=input)
raise ValueError("No models available.")
def retry_if_rate_limit_error(self, exception):
return isinstance(exception, openai.RateLimitError)
def retry_if_api_error(self, exception):
return isinstance(exception, openai.APIError)
async def async_function_with_retries(self, *args, **kwargs):
# we'll backoff exponentially with each retry
backoff_factor = 1
original_exception = kwargs.pop("original_exception")
original_function = kwargs.pop("original_function")
for current_attempt in range(self.num_retries):
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = await original_function(*args, **kwargs)
if inspect.iscoroutinefunction(response): # async errors are often returned as coroutines
response = await response
return response
except openai.RateLimitError as e:
# on RateLimitError we'll wait for an exponential time before trying again
await asyncio.sleep(backoff_factor)
# increase backoff factor for next run
backoff_factor *= 2
except openai.APIError as e:
# on APIError we immediately retry without any wait, change this if necessary
pass
except Exception as e:
# for any other exception types, don't retry
raise e
def function_with_retries(self, *args, **kwargs):
try:
import tenacity
except Exception as e:
raise Exception(f"tenacity import failed please run `pip install tenacity`. Error{e}")
retry_info = {"attempts": 0, "final_result": None}
def after_callback(retry_state):
retry_info["attempts"] = retry_state.attempt_number
retry_info["final_result"] = retry_state.outcome.result()
if 'model' not in kwargs or 'messages' not in kwargs:
raise ValueError("'model' and 'messages' must be included as keyword arguments")
try:
original_exception = kwargs.pop("original_exception")
original_function = kwargs.pop("original_function")
if isinstance(original_exception, openai.RateLimitError):
retryer = tenacity.Retrying(wait=tenacity.wait_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(self.num_retries),
reraise=True,
after=after_callback)
elif isinstance(original_exception, openai.APIError):
retryer = tenacity.Retrying(stop=tenacity.stop_after_attempt(self.num_retries),
reraise=True,
after=after_callback)
return retryer(self.acompletion, *args, **kwargs)
except Exception as e:
raise Exception(f"Error in function_with_retries: {e}\n\nRetry Info: {retry_info}")
### COMPLETION + EMBEDDING FUNCTIONS
def completion(self,
model: str,
messages: List[Dict[str, str]],
is_retry: Optional[bool] = False,
is_fallback: Optional[bool] = False,
**kwargs):
"""
Example usage:
response = router.completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hey, how's it going?"}]
"""
try:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, messages=messages)
data = deployment["litellm_params"]
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
return litellm.completion(**{**data, "messages": messages, "caching": self.cache_responses, **kwargs})
except Exception as e:
if self.num_retries > 0:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_exception"] = e
kwargs["original_function"] = self.completion
return self.function_with_retries(**kwargs)
else:
raise e
async def acompletion(self,
model: str,
messages: List[Dict[str, str]],
is_retry: Optional[bool] = False,
is_fallback: Optional[bool] = False,
**kwargs):
try:
deployment = self.get_available_deployment(model=model, messages=messages)
data = deployment["litellm_params"]
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
response = await litellm.acompletion(**{**data, "messages": messages, "caching": self.cache_responses, **kwargs})
return response
except Exception as e:
if self.num_retries > 0:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_exception"] = e
kwargs["original_function"] = self.acompletion
return await self.async_function_with_retries(**kwargs)
else:
raise e
def text_completion(self,
model: str,
prompt: str,
is_retry: Optional[bool] = False,
is_fallback: Optional[bool] = False,
is_async: Optional[bool] = False,
**kwargs):
try:
messages=[{"role": "user", "content": prompt}]
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, messages=messages)
data = deployment["litellm_params"]
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
# call via litellm.completion()
return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore
except Exception as e:
if self.num_retries > 0:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_exception"] = e
kwargs["original_function"] = self.completion
return self.function_with_retries(**kwargs)
else:
raise e
def embedding(self,
model: str,
input: Union[str, List],
is_async: Optional[bool] = False,
**kwargs) -> Union[List[float], None]:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, input=input)
data = deployment["litellm_params"]
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
# call via litellm.embedding()
return litellm.embedding(**{**data, "input": input, "caching": self.cache_responses, **kwargs})
async def aembedding(self,
model: str,
input: Union[str, List],
is_async: Optional[bool] = True,
**kwargs) -> Union[List[float], None]:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, input=input)
data = deployment["litellm_params"]
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
return await litellm.aembedding(**{**data, "input": input, "caching": self.cache_responses, **kwargs})
def deployment_callback(
self,
kwargs, # kwargs to completion
completion_response, # response from completion
start_time, end_time # start/end time
):
"""
Function LiteLLM submits a callback to after a successful
completion. Purpose of this is to update TPM/RPM usage per model
"""
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
if kwargs["stream"] is True:
if kwargs.get("complete_streaming_response"):
total_tokens = kwargs.get("complete_streaming_response")['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
else:
total_tokens = completion_response['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
self.deployment_latency_map[model_name] = (end_time - start_time).total_seconds()
def deployment_callback_on_failure(
self,
kwargs, # kwargs to completion
completion_response, # response from completion
start_time, end_time # start/end time
):
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
self.deployment_latency_map[model_name] = float('inf')
def get_usage_based_available_deployment(self,
model: str,
messages: Optional[List[Dict[str, str]]] = None,
input: Optional[Union[str, List]] = None):
"""
Returns a deployment with the lowest TPM/RPM usage.
"""
# get list of potential deployments
potential_deployments = []
for item in self.model_list:
if item["model_name"] == model:
potential_deployments.append(item)
# get current call usage
token_count = 0
if messages is not None:
token_count = litellm.token_counter(model=model, messages=messages)
elif input is not None:
if isinstance(input, List):
input_text = "".join(text for text in input)
else:
input_text = input
token_count = litellm.token_counter(model=model, text=input_text)
# -----------------------
# Find lowest used model
# ----------------------
lowest_tpm = float("inf")
deployment = None
# return deployment with lowest tpm usage
for item in potential_deployments:
item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"])
if item_tpm == 0:
return item
elif ("tpm" in item and item_tpm + token_count > item["tpm"]
or "rpm" in item and item_rpm + 1 >= item["rpm"]): # if user passed in tpm / rpm in the model_list
continue
elif item_tpm < lowest_tpm:
lowest_tpm = item_tpm
deployment = item
# if none, raise exception
if deployment is None:
raise ValueError("No models available.")
# return model
return deployment
def _get_deployment_usage(
self,
deployment_name: str
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{deployment_name}:tpm:{current_minute}'
rpm_key = f'{deployment_name}:rpm:{current_minute}'
# ------------
# Return usage
# ------------
tpm = self.cache.get_cache(cache_key=tpm_key) or 0
rpm = self.cache.get_cache(cache_key=rpm_key) or 0
return int(tpm), int(rpm)
def increment(self, key: str, increment_value: int):
# get value
cached_value = self.cache.get_cache(cache_key=key)
# update value
try:
cached_value = cached_value + increment_value
except:
cached_value = increment_value
# save updated value
self.cache.add_cache(result=cached_value, cache_key=key, ttl=self.default_cache_time_seconds)
def _set_deployment_usage(
self,
model_name: str,
total_tokens: int
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{model_name}:tpm:{current_minute}'
rpm_key = f'{model_name}:rpm:{current_minute}'
# ------------
# Update usage
# ------------
self.increment(tpm_key, total_tokens)
self.increment(rpm_key, 1) | [] |
2024-01-10 | LiquidAdTech/Zahara | litellm~tests~test_embedding.py | import sys, os
import traceback
import pytest
from dotenv import load_dotenv
import openai
load_dotenv()
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
litellm.set_verbose = False
def test_openai_embedding():
try:
response = embedding(
model="text-embedding-ada-002", input=["good morning from litellm", "this is another item"]
)
litellm_response = dict(response)
litellm_response.pop("_response_ms")
litellm_response_keys = set(litellm_response.keys())
print(litellm_response_keys)
print("LiteLLM Response\n")
print(litellm_response)
# same request with OpenAI 1.0+
import openai
client = openai.OpenAI(api_key=os.environ['OPENAI_API_KEY'])
response = client.embeddings.create(
model="text-embedding-ada-002", input=["good morning from litellm", "this is another item"]
)
response = dict(response)
openai_response_keys = set(response.keys())
assert litellm_response_keys == openai_response_keys # ENSURE the Keys in litellm response is exactly what the openai package returns
assert len(litellm_response["data"]) == 2 # expect two embedding responses from litellm_response since input had two
print(openai_response_keys)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_openai_embedding()
def test_openai_azure_embedding_simple():
try:
response = embedding(
model="azure/azure-embedding-model",
input=["good morning from litellm"],
)
print(response)
response_keys = dict(response).keys()
assert set(["usage", "model", "object", "data"]) == set(response_keys) #assert litellm response has expected keys from OpenAI embedding response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_openai_azure_embedding_simple()
def test_openai_azure_embedding_timeouts():
try:
response = embedding(
model="azure/azure-embedding-model",
input=["good morning from litellm"],
timeout=0.00001
)
print(response)
except openai.APITimeoutError:
print("Good job got timeout error!")
pass
except Exception as e:
pytest.fail(f"Expected timeout error, did not get the correct error. Instead got {e}")
# test_openai_azure_embedding_timeouts()
def test_openai_embedding_timeouts():
try:
response = embedding(
model="text-embedding-ada-002",
input=["good morning from litellm"],
timeout=0.00001
)
print(response)
except openai.APITimeoutError:
print("Good job got OpenAI timeout error!")
pass
except Exception as e:
pytest.fail(f"Expected timeout error, did not get the correct error. Instead got {e}")
test_openai_embedding_timeouts()
def test_openai_azure_embedding():
try:
api_key = os.environ['AZURE_API_KEY']
api_base = os.environ['AZURE_API_BASE']
api_version = os.environ['AZURE_API_VERSION']
os.environ['AZURE_API_VERSION'] = ""
os.environ['AZURE_API_BASE'] = ""
os.environ['AZURE_API_KEY'] = ""
response = embedding(
model="azure/azure-embedding-model",
input=["good morning from litellm", "this is another item"],
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
print(response)
os.environ['AZURE_API_VERSION'] = api_version
os.environ['AZURE_API_BASE'] = api_base
os.environ['AZURE_API_KEY'] = api_key
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_openai_azure_embedding()
# test_openai_embedding()
def test_cohere_embedding():
try:
# litellm.set_verbose=True
response = embedding(
model="embed-english-v2.0", input=["good morning from litellm", "this is another item"]
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_cohere_embedding()
def test_cohere_embedding3():
try:
litellm.set_verbose=True
response = embedding(
model="embed-english-v3.0",
input=["good morning from litellm", "this is another item"],
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_cohere_embedding3()
def test_bedrock_embedding():
try:
response = embedding(
model="amazon.titan-embed-text-v1", input=["good morning from litellm, attempting to embed data",
"lets test a second string for good measure"]
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_bedrock_embedding()
# comment out hf tests - since hf endpoints are unstable
def test_hf_embedding():
try:
# huggingface/microsoft/codebert-base
# huggingface/facebook/bart-large
response = embedding(
model="huggingface/sentence-transformers/all-MiniLM-L6-v2", input=["good morning from litellm", "this is another item"]
)
print(f"response:", response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_hf_embedding()
# test async embeddings
def test_aembedding():
import asyncio
async def embedding_call():
try:
response = await litellm.aembedding(
model="text-embedding-ada-002",
input=["good morning from litellm", "this is another item"]
)
print(response)
except:
print(f"error occurred: {traceback.format_exc()}")
pass
asyncio.run(embedding_call())
# test_aembedding()
| [] |
2024-01-10 | LiquidAdTech/Zahara | litellm~tests~test_proxy_cli.py | # COMMENT: This is a new test added today Nov 16th, that is flaky - will need to look into this and update what's going wrong here
# import subprocess
# import time
# import openai
# import pytest
# from dotenv import load_dotenv
# import os
# load_dotenv()
# ## This tests the litellm proxy cli, it creates a proxy server and makes a basic chat completion request to gpt-3.5-turbo
# ## Do not comment this test out
# def test_basic_proxy_cli_command():
# # Command to run
# print("current working dir", os.getcwd())
# command = "python3 litellm/proxy/proxy_cli.py --model gpt-3.5-turbo --port 51670 --debug"
# print("Running command to start proxy")
# # Start the subprocess asynchronously
# process = subprocess.Popen(command, shell=True)
# # Allow some time for the proxy server to start (adjust as needed)
# time.sleep(1)
# # Make a request using the openai package
# client = openai.OpenAI(
# api_key="Your API Key", # Replace with your actual API key
# base_url="http://0.0.0.0:51670"
# )
# try:
# response = client.chat.completions.create(model="gpt-3.5-turbo", messages=[
# {
# "role": "user",
# "content": "this is a test request, write a short poem"
# }
# ])
# print(response)
# response_str = response.choices[0].message.content
# assert len(response_str) > 10
# except Exception as e:
# print("Got exception")
# print(e)
# process.terminate() # Terminate the subprocess to close down the server
# pytest.fail("Basic test, proxy cli failed", e)
# # Terminate the subprocess to close down the server
# process.terminate()
# test_basic_proxy_cli_command()
| [] |
2024-01-10 | LiquidAdTech/Zahara | litellm~proxy~proxy_cli.py | import click
import subprocess, traceback, json
import os, sys
import random, appdirs
from datetime import datetime
from dotenv import load_dotenv
import operator
sys.path.append(os.getcwd())
config_filename = "litellm.secrets"
# Using appdirs to determine user-specific config path
config_dir = appdirs.user_config_dir("litellm")
user_config_path = os.getenv("LITELLM_CONFIG_PATH", os.path.join(config_dir, config_filename))
load_dotenv()
from importlib import resources
import shutil
telemetry = None
def run_ollama_serve():
try:
command = ['ollama', 'serve']
with open(os.devnull, 'w') as devnull:
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
except Exception as e:
print(f"""
LiteLLM Warning: proxy started with `ollama` model\n`ollama serve` failed with Exception{e}. \nEnsure you run `ollama serve`
""")
def clone_subfolder(repo_url, subfolder, destination):
# Clone the full repo
repo_name = repo_url.split('/')[-1]
repo_master = os.path.join(destination, "repo_master")
subprocess.run(['git', 'clone', repo_url, repo_master])
# Move into the subfolder
subfolder_path = os.path.join(repo_master, subfolder)
# Copy subfolder to destination
for file_name in os.listdir(subfolder_path):
source = os.path.join(subfolder_path, file_name)
if os.path.isfile(source):
shutil.copy(source, destination)
else:
dest_path = os.path.join(destination, file_name)
shutil.copytree(source, dest_path)
# Remove cloned repo folder
subprocess.run(['rm', '-rf', os.path.join(destination, "repo_master")])
feature_telemetry(feature="create-proxy")
def is_port_in_use(port):
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
@click.command()
@click.option('--host', default='0.0.0.0', help='Host for the server to listen on.')
@click.option('--port', default=8000, help='Port to bind the server to.')
@click.option('--num_workers', default=1, help='Number of uvicorn workers to spin up')
@click.option('--api_base', default=None, help='API base URL.')
@click.option('--api_version', default="2023-07-01-preview", help='For azure - pass in the api version.')
@click.option('--model', '-m', default=None, help='The model name to pass to litellm expects')
@click.option('--alias', default=None, help='The alias for the model - use this to give a litellm model name (e.g. "huggingface/codellama/CodeLlama-7b-Instruct-hf") a more user-friendly name ("codellama")')
@click.option('--add_key', default=None, help='The model name to pass to litellm expects')
@click.option('--headers', default=None, help='headers for the API call')
@click.option('--save', is_flag=True, type=bool, help='Save the model-specific config')
@click.option('--debug', default=False, is_flag=True, type=bool, help='To debug the input')
@click.option('--use_queue', default=False, is_flag=True, type=bool, help='To use celery workers for async endpoints')
@click.option('--temperature', default=None, type=float, help='Set temperature for the model')
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
@click.option('--request_timeout', default=600, type=int, help='Set timeout in seconds for completion calls')
@click.option('--drop_params', is_flag=True, help='Drop any unmapped params')
@click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt')
@click.option('--config', '-c', default=None, help='Configure Litellm')
@click.option('--file', '-f', help='Path to config file')
@click.option('--max_budget', default=None, type=float, help='Set max budget for API calls - works for hosted models like OpenAI, TogetherAI, Anthropic, etc.`')
@click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`')
@click.option('--logs', flag_value=False, type=int, help='Gets the "n" most recent logs. By default gets most recent log.')
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
@click.option('--test_async', default=False, is_flag=True, help='Calls async endpoints /queue/requests and /queue/response')
@click.option('--num_requests', default=10, type=int, help='Number of requests to hit async endpoint with')
@click.option('--local', is_flag=True, default=False, help='for local debugging')
def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, num_workers, test_async, num_requests, use_queue):
global feature_telemetry
args = locals()
if local:
from proxy_server import app, save_worker_config, usage_telemetry
else:
try:
from .proxy_server import app, save_worker_config, usage_telemetry
except ImportError as e:
from proxy_server import app, save_worker_config, usage_telemetry
feature_telemetry = usage_telemetry
if logs is not None:
if logs == 0: # default to 1
logs = 1
try:
with open('api_log.json') as f:
data = json.load(f)
# convert keys to datetime objects
log_times = {datetime.strptime(k, "%Y%m%d%H%M%S%f"): v for k, v in data.items()}
# sort by timestamp
sorted_times = sorted(log_times.items(), key=operator.itemgetter(0), reverse=True)
# get n recent logs
recent_logs = {k.strftime("%Y%m%d%H%M%S%f"): v for k, v in sorted_times[:logs]}
print(json.dumps(recent_logs, indent=4))
except:
print("LiteLLM: No logs saved!")
return
if model and "ollama" in model:
run_ollama_serve()
if test_async is True:
import requests, concurrent, time
api_base = f"http://{host}:{port}"
def _make_openai_completion():
data = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": "Write a short poem about the moon"}]
}
response = requests.post("http://0.0.0.0:8000/queue/request", json=data)
response = response.json()
while True:
try:
url = response["url"]
polling_url = f"{api_base}{url}"
polling_response = requests.get(polling_url)
polling_response = polling_response.json()
print("\n RESPONSE FROM POLLING JOB", polling_response)
status = polling_response["status"]
if status == "finished":
llm_response = polling_response["result"]
break
print(f"POLLING JOB{polling_url}\nSTATUS: {status}, \n Response {polling_response}")
time.sleep(0.5)
except Exception as e:
print("got exception in polling", e)
break
# Number of concurrent calls (you can adjust this)
concurrent_calls = num_requests
# List to store the futures of concurrent calls
futures = []
start_time = time.time()
# Make concurrent calls
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_calls) as executor:
for _ in range(concurrent_calls):
futures.append(executor.submit(_make_openai_completion))
# Wait for all futures to complete
concurrent.futures.wait(futures)
# Summarize the results
successful_calls = 0
failed_calls = 0
for future in futures:
if future.done():
if future.result() is not None:
successful_calls += 1
else:
failed_calls += 1
end_time = time.time()
print(f"Elapsed Time: {end_time-start_time}")
print(f"Load test Summary:")
print(f"Total Requests: {concurrent_calls}")
print(f"Successful Calls: {successful_calls}")
print(f"Failed Calls: {failed_calls}")
return
if test != False:
click.echo('\nLiteLLM: Making a test ChatCompletions request to your proxy')
import openai
if test == True: # flag value set
api_base = f"http://{host}:{port}"
else:
api_base = test
client = openai.OpenAI(
api_key="My API Key",
base_url=api_base
)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
])
click.echo(f'\nLiteLLM: response from proxy {response}')
print("\n Making streaming request to proxy")
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
],
stream=True,
)
for chunk in response:
click.echo(f'LiteLLM: streaming response from proxy {chunk}')
print("\n making completion request to proxy")
response = client.completions.create(model="gpt-3.5-turbo", prompt='this is a test request, write a short poem')
print(response)
return
else:
if headers:
headers = json.loads(headers)
save_worker_config(model=model, alias=alias, api_base=api_base, api_version=api_version, debug=debug, temperature=temperature, max_tokens=max_tokens, request_timeout=request_timeout, max_budget=max_budget, telemetry=telemetry, drop_params=drop_params, add_function_to_prompt=add_function_to_prompt, headers=headers, save=save, config=config, use_queue=use_queue)
try:
import uvicorn
except:
raise ImportError("Uvicorn needs to be imported. Run - `pip install uvicorn`")
if port == 8000 and is_port_in_use(port):
port = random.randint(1024, 49152)
uvicorn.run("litellm.proxy.proxy_server:app", host=host, port=port, workers=num_workers)
if __name__ == "__main__":
run_server()
| [
"this is a test request, write a short poem",
"Write a short poem about the moon"
] |
2024-01-10 | LiquidAdTech/Zahara | litellm~tests~test_class.py | # #### What this tests ####
# # This tests the LiteLLM Class
# import sys, os
# import traceback
# import pytest
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import litellm
# import asyncio
# litellm.set_verbose = True
# from litellm import Router
# import instructor
# from pydantic import BaseModel
# # This enables response_model keyword
# # # from client.chat.completions.create
# # client = instructor.patch(Router(model_list=[{
# # "model_name": "gpt-3.5-turbo", # openai model name
# # "litellm_params": { # params for litellm completion/embedding call
# # "model": "azure/chatgpt-v-2",
# # "api_key": os.getenv("AZURE_API_KEY"),
# # "api_version": os.getenv("AZURE_API_VERSION"),
# # "api_base": os.getenv("AZURE_API_BASE")
# # }
# # }]))
# # class UserDetail(BaseModel):
# # name: str
# # age: int
# # user = client.chat.completions.create(
# # model="gpt-3.5-turbo",
# # response_model=UserDetail,
# # messages=[
# # {"role": "user", "content": "Extract Jason is 25 years old"},
# # ]
# # )
# # assert isinstance(model, UserExtract)
# # assert isinstance(user, UserDetail)
# # assert user.name == "Jason"
# # assert user.age == 25
# # print(f"user: {user}")
# import instructor
# from openai import AsyncOpenAI
# aclient = instructor.apatch(Router(model_list=[{
# "model_name": "gpt-3.5-turbo", # openai model name
# "litellm_params": { # params for litellm completion/embedding call
# "model": "azure/chatgpt-v-2",
# "api_key": os.getenv("AZURE_API_KEY"),
# "api_version": os.getenv("AZURE_API_VERSION"),
# "api_base": os.getenv("AZURE_API_BASE")
# }
# }], default_litellm_params={"acompletion": True}))
# class UserExtract(BaseModel):
# name: str
# age: int
# async def main():
# model = await aclient.chat.completions.create(
# model="gpt-3.5-turbo",
# response_model=UserExtract,
# messages=[
# {"role": "user", "content": "Extract jason is 25 years old"},
# ],
# )
# print(f"model: {model}")
# asyncio.run(main()) | [] |
2024-01-10 | LiquidAdTech/Zahara | cookbook~litellm_router~load_test_proxy.py | import sys, os
import traceback
from dotenv import load_dotenv
load_dotenv()
import os, io
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest
from litellm import Router
import litellm
litellm.set_verbose=False
os.environ.pop("AZURE_AD_TOKEN")
model_list = [{ # list of model deployments
"model_name": "gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2", # actual model name
"api_key": os.getenv("AZURE_API_KEY"),
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
}
}, {
"model_name": "gpt-3.5-turbo",
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-functioncalling",
"api_key": os.getenv("AZURE_API_KEY"),
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
}
}, {
"model_name": "gpt-3.5-turbo",
"litellm_params": { # params for litellm completion/embedding call
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
}
}]
router = Router(model_list=model_list)
file_paths = ["test_questions/question1.txt", "test_questions/question2.txt", "test_questions/question3.txt"]
questions = []
for file_path in file_paths:
try:
print(file_path)
with open(file_path, 'r') as file:
content = file.read()
questions.append(content)
except FileNotFoundError as e:
print(f"File not found: {e}")
except Exception as e:
print(f"An error occurred: {e}")
# for q in questions:
# print(q)
# make X concurrent calls to litellm.completion(model=gpt-35-turbo, messages=[]), pick a random question in questions array.
# Allow me to tune X concurrent calls.. Log question, output/exception, response time somewhere
# show me a summary of requests made, success full calls, failed calls. For failed calls show me the exceptions
import concurrent.futures
import random
import time
# Function to make concurrent calls to OpenAI API
def make_openai_completion(question):
try:
start_time = time.time()
import openai
client = openai.OpenAI(api_key=os.environ['OPENAI_API_KEY'], base_url="http://0.0.0.0:8000") #base_url="http://0.0.0.0:8000",
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": f"You are a helpful assistant. Answer this question{question}"}],
)
print(response)
end_time = time.time()
# Log the request details
with open("request_log.txt", "a") as log_file:
log_file.write(
f"Question: {question[:100]}\nResponse ID:{response.id} Content:{response.choices[0].message.content[:10]}\nTime: {end_time - start_time:.2f} seconds\n\n"
)
return response
except Exception as e:
# Log exceptions for failed calls
with open("error_log.txt", "a") as error_log_file:
error_log_file.write(
f"Question: {question[:100]}\nException: {str(e)}\n\n"
)
return None
# Number of concurrent calls (you can adjust this)
concurrent_calls = 100
# List to store the futures of concurrent calls
futures = []
# Make concurrent calls
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_calls) as executor:
for _ in range(concurrent_calls):
random_question = random.choice(questions)
futures.append(executor.submit(make_openai_completion, random_question))
# Wait for all futures to complete
concurrent.futures.wait(futures)
# Summarize the results
successful_calls = 0
failed_calls = 0
for future in futures:
if future.result() is not None:
successful_calls += 1
else:
failed_calls += 1
print(f"Load test Summary:")
print(f"Total Requests: {concurrent_calls}")
print(f"Successful Calls: {successful_calls}")
print(f"Failed Calls: {failed_calls}")
# Display content of the logs
with open("request_log.txt", "r") as log_file:
print("\nRequest Log:\n", log_file.read())
with open("error_log.txt", "r") as error_log_file:
print("\nError Log:\n", error_log_file.read())
| [
"You are a helpful assistant. Answer this questionPLACEHOLDER"
] |
2024-01-10 | LiquidAdTech/Zahara | litellm~utils.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
import sys
import dotenv, json, traceback, threading
import subprocess, os
import litellm, openai
import itertools
import random, uuid, requests
import datetime, time
import tiktoken
import uuid
import aiohttp
import logging
import asyncio, httpx, inspect
import copy
from tokenizers import Tokenizer
from dataclasses import (
dataclass,
field,
) # for storing API inputs, outputs, and metadata
encoding = tiktoken.get_encoding("cl100k_base")
import importlib.metadata
from .integrations.traceloop import TraceloopLogger
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from .integrations.llmonitor import LLMonitorLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.langsmith import LangsmithLogger
from .integrations.weights_biases import WeightsBiasesLogger
from .integrations.custom_logger import CustomLogger
from .integrations.langfuse import LangFuseLogger
from .integrations.litedebugger import LiteDebugger
from openai import OpenAIError as OriginalError
from openai._models import BaseModel as OpenAIObject
from .exceptions import (
AuthenticationError,
BadRequestError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
ContextWindowExceededError,
Timeout,
APIConnectionError,
APIError,
BudgetExceededError
)
from typing import cast, List, Dict, Union, Optional, Literal
from .caching import Cache
####### ENVIRONMENT VARIABLES ####################
dotenv.load_dotenv() # Loading env variables using dotenv
sentry_sdk_instance = None
capture_exception = None
add_breadcrumb = None
posthog = None
slack_app = None
alerts_channel = None
heliconeLogger = None
promptLayerLogger = None
langsmithLogger = None
weightsBiasesLogger = None
customLogger = None
langFuseLogger = None
llmonitorLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
liteDebuggerClient = None
callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
last_fetched_at_keys = None
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output
# {
# 'choices': [
# {
# 'finish_reason': 'stop',
# 'index': 0,
# 'message': {
# 'role': 'assistant',
# 'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic."
# }
# }
# ],
# 'created': 1691429984.3852863,
# 'model': 'claude-instant-1',
# 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41}
# }
class UnsupportedParamsError(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
def _generate_id(): # private helper function
return 'chatcmpl-' + str(uuid.uuid4())
def map_finish_reason(finish_reason: str): # openai supports 5 stop sequences - 'stop', 'length', 'function_call', 'content_filter', 'null'
# anthropic mapping
if finish_reason == "stop_sequence":
return "stop"
return finish_reason
class FunctionCall(OpenAIObject):
arguments: str
name: str
class Function(OpenAIObject):
arguments: str
name: str
class ChatCompletionMessageToolCall(OpenAIObject):
id: str
function: Function
type: str
class Message(OpenAIObject):
def __init__(self, content="default", role="assistant", logprobs=None, function_call=None, tool_calls=None, **params):
super(Message, self).__init__(**params)
self.content = content
self.role = role
if function_call is not None:
self.function_call = FunctionCall(**function_call)
if tool_calls is not None:
self.tool_calls = []
for tool_call in tool_calls:
self.tool_calls.append(
ChatCompletionMessageToolCall(**tool_call)
)
if logprobs is not None:
self._logprobs = logprobs
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Delta(OpenAIObject):
def __init__(self, content=None, role=None, **params):
super(Delta, self).__init__(**params)
self.content = content
self.role = role
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Choices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, message=None, **params):
super(Choices, self).__init__(**params)
self.finish_reason = map_finish_reason(finish_reason) # set finish_reason for all responses
self.index = index
if message is None:
self.message = Message(content=None)
else:
self.message = message
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Usage(OpenAIObject):
def __init__(self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params):
super(Usage, self).__init__(**params)
if prompt_tokens:
self.prompt_tokens = prompt_tokens
if completion_tokens:
self.completion_tokens = completion_tokens
if total_tokens:
self.total_tokens = total_tokens
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class StreamingChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
super(StreamingChoices, self).__init__(**params)
if finish_reason:
self.finish_reason = finish_reason
else:
self.finish_reason = None
self.index = index
if delta:
self.delta = delta
else:
self.delta = Delta()
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class ModelResponse(OpenAIObject):
id: str
"""A unique identifier for the completion."""
choices: List[Union[Choices, StreamingChoices]]
"""The list of completion choices the model generated for the input prompt."""
created: int
"""The Unix timestamp (in seconds) of when the completion was created."""
model: Optional[str] = None
"""The model used for completion."""
object: str
"""The object type, which is always "text_completion" """
system_fingerprint: Optional[str] = None
"""This fingerprint represents the backend configuration that the model runs with.
Can be used in conjunction with the `seed` request parameter to understand when
backend changes have been made that might impact determinism.
"""
usage: Optional[Usage] = None
"""Usage statistics for the completion request."""
_hidden_params: dict = {}
def __init__(self, id=None, choices=None, created=None, model=None, object=None, system_fingerprint=None, usage=None, stream=False, response_ms=None, hidden_params=None, **params):
if stream:
object = "chat.completion.chunk"
choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
object = "embedding"
else:
object = "chat.completion"
choices = [Choices()]
if id is None:
id = _generate_id()
else:
id = id
if created is None:
created = int(time.time())
else:
created = created
model = model
if usage:
usage = usage
else:
usage = Usage()
if hidden_params:
self._hidden_params = hidden_params
super().__init__(id=id, choices=choices, created=created, model=model, object=object, system_fingerprint=system_fingerprint, usage=usage, **params)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class EmbeddingResponse(OpenAIObject):
def __init__(self, model=None, usage=None, stream=False, response_ms=None):
object = "list"
if response_ms:
_response_ms = response_ms
else:
_response_ms = None
data = []
model = model
super().__init__(model=model, object=object, data=data, usage=usage)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class TextChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, text=None, logprobs=None, **params):
super(TextChoices, self).__init__(**params)
if finish_reason:
self.finish_reason = map_finish_reason(finish_reason)
else:
self.finish_reason = "stop"
self.index = index
if text:
self.text = text
else:
self.text = None
if logprobs:
self.logprobs = []
else:
self.logprobs = logprobs
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class TextCompletionResponse(OpenAIObject):
"""
{
"id": response["id"],
"object": "text_completion",
"created": response["created"],
"model": response["model"],
"choices": [
{
"text": response["choices"][0]["message"]["content"],
"index": response["choices"][0]["index"],
"logprobs": transformed_logprobs,
"finish_reason": response["choices"][0]["finish_reason"]
}
],
"usage": response["usage"]
}
"""
def __init__(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, **params):
super(TextCompletionResponse, self).__init__(**params)
if stream:
self.object = "text_completion.chunk"
self.choices = [TextChoices()]
else:
self.object = "text_completion"
self.choices = [TextChoices()]
if id is None:
self.id = _generate_id()
else:
self.id = id
if created is None:
self.created = int(time.time())
else:
self.created = created
if response_ms:
self._response_ms = response_ms
else:
self._response_ms = None
self.model = model
if usage:
self.usage = usage
else:
self.usage = Usage()
self._hidden_params = {} # used in case users want to access the original model response
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
############################################################
def print_verbose(print_statement):
if litellm.set_verbose:
print(print_statement) # noqa
####### LOGGING ###################
from enum import Enum
class CallTypes(Enum):
embedding = 'embedding'
completion = 'completion'
acompletion = 'acompletion'
# Logging function -> log the exact model details + what's being sent | Non-Blocking
class Logging:
global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, llmonitorLogger
def __init__(self, model, messages, stream, call_type, start_time, litellm_call_id, function_id):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(f"Invalid call_type {call_type}. Allowed values: {allowed_values}")
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
self.streaming_chunks = [] # for generating complete stream response
def update_environment_variables(self, model, user, optional_params, litellm_params):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params["logger_fn"]
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
"start_time": self.start_time,
"stream": self.stream
}
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
litellm.error_logs['PRE_CALL'] = locals()
try:
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "pre_api_call"
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
# User Logging -> if you pass in a custom logging function
headers = additional_args.get("headers", {})
if headers is None:
headers = {}
data = additional_args.get("complete_input_dict", {})
api_base = additional_args.get("api_base", "")
masked_headers = {k: v[:-40] + '*' * 40 if len(v) > 40 else v for k, v in headers.items()}
formatted_headers = " ".join([f"-H '{k}: {v}'" for k, v in masked_headers.items()])
print_verbose(f"PRE-API-CALL ADDITIONAL ARGS: {additional_args}")
curl_command = "\n\nPOST Request Sent from LiteLLM:\n"
curl_command += "curl -X POST \\\n"
curl_command += f"{api_base} \\\n"
curl_command += f"{formatted_headers} \\\n" if formatted_headers.strip() != "" else ""
curl_command += f"-d '{str(data)}'\n"
if api_base == "":
curl_command = self.model_call_details
print_verbose(f"\033[92m{curl_command}\033[0m\n")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
if litellm.max_budget and self.stream:
start_time = self.start_time
end_time = self.start_time # no time has passed as the call hasn't been made yet
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="".join(message["content"] for message in self.messages), completion="", total_time=float_diff)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "lite_debugger":
print_verbose(f"reaches litedebugger for logging! - model_call_details {self.model_call_details}")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
litellm_params=self.model_call_details["litellm_params"],
optional_params=self.model_call_details["optional_params"],
print_verbose=print_verbose,
call_type=self.call_type
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details pre-call: {self.model_call_details}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_pre_api_call(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
)
elif callable(callback): # custom logger functions
customLogger.log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
traceback.print_exc()
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs['POST_CALL'] = locals()
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "post_api_call"
# User Logging -> if you pass in a custom logging function
print_verbose(f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n")
print_verbose(
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
)
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details post-call: {self.model_call_details}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_post_api_call(
kwargs=self.model_call_details,
response_obj=None,
start_time=self.start_time,
end_time=None
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def success_handler(self, result=None, start_time=None, end_time=None, **kwargs):
print_verbose(
f"Logging Details LiteLLM-Success Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
complete_streaming_response = None
## BUILD COMPLETE STREAMED RESPONSE
if self.stream:
if result.choices[0].finish_reason is not None: # if it's the last chunk
self.streaming_chunks.append(result)
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks)
else:
self.streaming_chunks.append(result)
elif isinstance(result, OpenAIObject):
result = result.model_dump()
if complete_streaming_response:
self.model_call_details["complete_streaming_response"] = complete_streaming_response
print_verbose(f"success callbacks: {litellm.success_callback}")
if litellm.max_budget and self.stream:
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="", completion=result["content"], total_time=float_diff)
for callback in litellm.success_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}")
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
if callback == "api_manager":
print_verbose("reaches api manager for updating model cost")
litellm.apiManager.update_cost(completion_obj=result, user=self.user)
if callback == "cache":
if litellm.cache != None and self.model_call_details.get('optional_params', {}).get('stream', False) == True:
litellm_call_id = self.litellm_params["litellm_call_id"]
if litellm_call_id in self.litellm_params["stream_response"]:
# append for the given call_id
if self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] == "default":
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] = result["content"] # handle first try
else:
self.litellm_params["stream_response"][litellm_call_id]["choices"][0]["message"]["content"] += result["content"]
else: # init a streaming response for this call id
new_model_response = ModelResponse(choices=[Choices(message=Message(content="default"))])
self.litellm_params["stream_response"][litellm_call_id] = new_model_response
litellm.cache.add_cache(self.litellm_params["stream_response"][litellm_call_id], **self.model_call_details)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs=self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get("litellm_call_id", str(uuid.uuid4())),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmtih for logging!")
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "llmonitor":
print_verbose("reaches llmonitor for logging!")
model = self.model
input = self.model_call_details.get("messages", self.model_call_details.get("input", None))
# if contains input, it's 'embedding', otherwise 'llm'
type = "embed" if self.call_type == CallTypes.embedding.value else "llm"
llmonitorLogger.log_event(
type=type,
event="end",
model=model,
input=input,
user_id=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=self.litellm_call_id,
print_verbose=print_verbose,
)
if callback == "helicone":
print_verbose("reaches helicone for logging!")
model = self.model
messages = kwargs["messages"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langfuse":
print_verbose("reaches langfuse for logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if k != "original_response": # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "traceloop":
deep_copy = {}
for k, v in self.model_call_details.items():
if k != "original_response":
deep_copy[k] = v
traceloopLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if isinstance(callback, CustomLogger): # custom logger class
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = self.model_call_details.pop("complete_streaming_response", complete_streaming_response)
callback.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
def failure_handler(self, exception, traceback_exception, start_time=None, end_time=None):
print_verbose(
f"Logging Details LiteLLM-Failure Call"
)
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
# on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions
if not hasattr(self, "model_call_details"):
self.model_call_details = {}
self.model_call_details["log_event_type"] = "failed_api_call"
self.model_call_details["exception"] = exception
self.model_call_details["traceback_exception"] = traceback_exception
self.model_call_details["end_time"] = end_time
result = None # result sent to all loggers, init this to None incase it's not created
for callback in litellm.failure_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging error!")
model = self.model
input = self.model_call_details["input"]
type = "embed" if self.call_type == CallTypes.embedding.value else "llm"
llmonitorLogger.log_event(
type=type,
event="error",
user_id=self.model_call_details.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=self.litellm_call_id,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(f"capture exception not initialized: {capture_exception}")
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
response_obj=result,
kwargs=self.model_call_details,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
def exception_logging(
additional_args={},
logger_fn=None,
exception=None,
):
try:
model_call_details = {}
if exception:
model_call_details["exception"] = exception
model_call_details["additional_args"] = additional_args
# User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs
print_verbose(
f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}"
)
if logger_fn and callable(logger_fn):
try:
logger_fn(
model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
####### RULES ###################
class Rules:
"""
Fail calls based on the input or llm api output
Example usage:
import litellm
def my_custom_rule(input): # receives the model response
if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer
return False
return True
litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call
response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user",
"content": "Hey, how's it going?"}], fallbacks=["openrouter/mythomax"])
"""
def __init__(self) -> None:
pass
def pre_call_rules(self, input: str, model: str):
for rule in litellm.pre_call_rules:
if callable(rule):
decision = rule(input)
if decision is False:
raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore
return True
def post_call_rules(self, input: str, model: str):
for rule in litellm.post_call_rules:
if callable(rule):
decision = rule(input)
if decision is False:
raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore
return True
####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function):
global liteDebuggerClient, get_all_keys
rules_obj = Rules()
def function_setup(
start_time, *args, **kwargs
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try:
global callback_list, add_breadcrumb, user_logger_fn, Logging
function_id = kwargs["id"] if "id" in kwargs else None
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized")
if "lite_debugger" not in litellm.input_callback:
litellm.input_callback.append("lite_debugger")
if "lite_debugger" not in litellm.success_callback:
litellm.success_callback.append("lite_debugger")
if "lite_debugger" not in litellm.failure_callback:
litellm.failure_callback.append("lite_debugger")
if len(litellm.callbacks) > 0:
for callback in litellm.callbacks:
if callback not in litellm.input_callback:
litellm.input_callback.append(callback)
if callback not in litellm.success_callback:
litellm.success_callback.append(callback)
if callback not in litellm.failure_callback:
litellm.failure_callback.append(callback)
if (
len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0
or len(litellm.failure_callback) > 0
) and len(callback_list) == 0:
callback_list = list(
set(
litellm.input_callback
+ litellm.success_callback
+ litellm.failure_callback
)
)
set_callbacks(
callback_list=callback_list,
function_id=function_id
)
if add_breadcrumb:
add_breadcrumb(
category="litellm.llm_call",
message=f"Positional Args: {args}, Keyword Args: {kwargs}",
level="info",
)
if "logger_fn" in kwargs:
user_logger_fn = kwargs["logger_fn"]
# CRASH REPORTING TELEMETRY
crash_reporting(*args, **kwargs)
# INIT LOGGER - for user-specified integrations
model = args[0] if len(args) > 0 else kwargs["model"]
call_type = original_function.__name__
if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value:
if len(args) > 1:
messages = args[1]
elif kwargs.get("messages", None):
messages = kwargs["messages"]
### PRE-CALL RULES ###
rules_obj.pre_call_rules(input="".join(m["content"] for m in messages if isinstance(m["content"], str)), model=model)
elif call_type == CallTypes.embedding.value:
messages = args[1] if len(args) > 1 else kwargs["input"]
stream = True if "stream" in kwargs and kwargs["stream"] == True else False
logging_obj = Logging(model=model, messages=messages, stream=stream, litellm_call_id=kwargs["litellm_call_id"], function_id=function_id, call_type=call_type, start_time=start_time)
return logging_obj
except Exception as e:
import logging
logging.debug(f"[Non-Blocking] {traceback.format_exc()}; args - {args}; kwargs - {kwargs}")
raise e
def post_call_processing(original_response, model):
try:
call_type = original_function.__name__
if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value:
model_response = original_response['choices'][0]['message']['content']
### POST-CALL RULES ###
rules_obj.post_call_rules(input=model_response, model=model)
except Exception as e:
raise e
def crash_reporting(*args, **kwargs):
if litellm.telemetry:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
exception = kwargs["exception"] if "exception" in kwargs else None
custom_llm_provider = (
kwargs["custom_llm_provider"]
if "custom_llm_provider" in kwargs
else None
)
safe_crash_reporting(
model=model,
exception=exception,
custom_llm_provider=custom_llm_provider,
) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
except:
# [Non-Blocking Error]
pass
def wrapper(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
logging_obj = kwargs.get("litellm_logging_obj", None)
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
if logging_obj is None:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
# remove this after deprecating litellm.caching
print_verbose(f"litellm.caching: {litellm.caching}; litellm.caching_with_models: {litellm.caching_with_models}; litellm.cache: {litellm.cache}")
if (litellm.caching or litellm.caching_with_models) and litellm.cache is None:
litellm.cache = Cache()
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None or litellm.caching or litellm.caching_with_models):
print_verbose(f"Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
print_verbose(f"Cache Hit!")
if "detail" in cached_result:
# implies an error occurred
pass
else:
call_type = original_function.__name__
if call_type == CallTypes.completion.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse())
else:
return cached_result
# MODEL CALL
result = original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
# TODO: Add to cache for streaming
if "complete_response" in kwargs and kwargs["complete_response"] == True:
chunks = []
for idx, chunk in enumerate(result):
chunks.append(chunk)
return litellm.stream_chunk_builder(chunks)
else:
return result
elif "acompletion" in kwargs and kwargs["acompletion"] == True:
return result
### POST-CALL RULES ###
post_call_processing(original_response=result, model=model)
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
# threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_success, args=(args, kwargs, result, start_time, end_time)
) # don't interrupt execution of main thread
my_thread.start()
# RETURN RESULT
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
call_type = original_function.__name__
if call_type == CallTypes.completion.value:
num_retries = (
kwargs.get("num_retries", None)
or litellm.num_retries
or None
)
litellm.num_retries = None # set retries to None to prevent infinite loops
context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {})
if num_retries:
if (isinstance(e, openai.APIError)
or isinstance(e, openai.Timeout)):
kwargs["num_retries"] = num_retries
return litellm.completion_with_retries(*args, **kwargs)
elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict:
if len(args) > 0:
args[0] = context_window_fallback_dict[model]
else:
kwargs["model"] = context_window_fallback_dict[model]
return original_function(*args, **kwargs)
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
if logging_obj:
threading.Thread(target=logging_obj.failure_handler, args=(e, traceback_exception, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_failure,
args=(e, traceback_exception, start_time, end_time, args, kwargs),
) # don't interrupt execution of main thread
my_thread.start()
if hasattr(e, "message"):
if (
liteDebuggerClient and liteDebuggerClient.dashboard_url != None
): # make it easy to get to the debugger logs if you've initialized it
e.message += f"\n Check the log in your dashboard - {liteDebuggerClient.dashboard_url}"
raise e
async def wrapper_async(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
logging_obj = kwargs.get("litellm_logging_obj", None)
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
if logging_obj is None:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
print_verbose(f"litellm.cache: {litellm.cache}")
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None):
print_verbose(f"Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
print_verbose(f"Cache Hit!")
call_type = original_function.__name__
if call_type == CallTypes.acompletion.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse())
else:
return cached_result
# MODEL CALL
result = await original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
if "complete_response" in kwargs and kwargs["complete_response"] == True:
chunks = []
for idx, chunk in enumerate(result):
chunks.append(chunk)
return litellm.stream_chunk_builder(chunks)
else:
return result
### POST-CALL RULES ###
post_call_processing(original_response=result, model=model)
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
# RETURN RESULT
if isinstance(result, ModelResponse):
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
call_type = original_function.__name__
if call_type == CallTypes.acompletion.value:
num_retries = (
kwargs.get("num_retries", None)
or litellm.num_retries
or None
)
litellm.num_retries = None # set retries to None to prevent infinite loops
context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {})
if num_retries:
kwargs["num_retries"] = num_retries
kwargs["original_function"] = original_function
if (isinstance(e, openai.RateLimitError)): # rate limiting specific error
kwargs["retry_strategy"] = "exponential_backoff_retry"
elif (isinstance(e, openai.APIError)): # generic api error
kwargs["retry_strategy"] = "constant_retry"
return await litellm.acompletion_with_retries(*args, **kwargs)
elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict:
if len(args) > 0:
args[0] = context_window_fallback_dict[model]
else:
kwargs["model"] = context_window_fallback_dict[model]
return await original_function(*args, **kwargs)
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
if logging_obj:
threading.Thread(target=logging_obj.failure_handler, args=(e, traceback_exception, start_time, end_time)).start()
raise e
# Use httpx to determine if the original function is a coroutine
is_coroutine = inspect.iscoroutinefunction(original_function)
# Return the appropriate wrapper based on the original function type
if is_coroutine:
return wrapper_async
else:
return wrapper
####### USAGE CALCULATOR ################
# Extract the number of billion parameters from the model name
# only used for together_computer LLMs
def get_model_params_and_category(model_name):
import re
params_match = re.search(r'(\d+b)', model_name) # catch all decimals like 3b, 70b, etc
category = None
if params_match != None:
params_match = params_match.group(1)
params_match = params_match.replace("b", "")
params_billion = float(params_match)
# Determine the category based on the number of parameters
if params_billion <= 3.0:
category = "together-ai-up-to-3b"
elif params_billion <= 7.0:
category = "together-ai-3.1b-7b"
elif params_billion <= 20.0:
category = "together-ai-7.1b-20b"
elif params_billion <= 40.0:
category = "together-ai-20.1b-40b"
elif params_billion <= 70.0:
category = "together-ai-40.1b-70b"
return category
return None
def get_replicate_completion_pricing(completion_response=None, total_time=0.0):
# see https://replicate.com/pricing
a100_40gb_price_per_second_public = 0.001150
# for all litellm currently supported LLMs, almost all requests go to a100_80gb
a100_80gb_price_per_second_public = 0.001400 # assume all calls sent to A100 80GB for now
if total_time == 0.0:
start_time = completion_response['created']
end_time = completion_response["ended"]
total_time = end_time - start_time
return a100_80gb_price_per_second_public*total_time
def _select_tokenizer(model: str):
# cohere
import pkg_resources
if model in litellm.cohere_models:
tokenizer = Tokenizer.from_pretrained("Cohere/command-nightly")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# anthropic
elif model in litellm.anthropic_models:
# Read the JSON file
filename = pkg_resources.resource_filename(__name__, 'llms/tokenizers/anthropic_tokenizer.json')
with open(filename, 'r') as f:
json_data = json.load(f)
# Decode the JSON data from utf-8
json_data_decoded = json.dumps(json_data, ensure_ascii=False)
# Convert to str
json_str = str(json_data_decoded)
# load tokenizer
tokenizer = Tokenizer.from_str(json_str)
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# llama2
elif "llama-2" in model.lower():
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# default - tiktoken
else:
return {"type": "openai_tokenizer", "tokenizer": encoding}
def encode(model: str, text: str):
"""
Encodes the given text using the specified model.
Args:
model (str): The name of the model to use for tokenization.
text (str): The text to be encoded.
Returns:
enc: The encoded text.
"""
tokenizer_json = _select_tokenizer(model=model)
enc = tokenizer_json["tokenizer"].encode(text)
return enc
def decode(model: str, tokens: List[int]):
tokenizer_json = _select_tokenizer(model=model)
dec = tokenizer_json["tokenizer"].decode(tokens)
return dec
def openai_token_counter(messages, model="gpt-3.5-turbo-0613"):
"""
Return the number of tokens used by a list of messages.
Borrowed from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb.
"""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print_verbose("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print_verbose("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return openai_token_counter(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print_verbose("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return openai_token_counter(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def token_counter(model="", text=None, messages: Optional[List] = None):
"""
Count the number of tokens in a given text using a specified model.
Args:
model (str): The name of the model to use for tokenization. Default is an empty string.
text (str): The raw text string to be passed to the model. Default is None.
messages (Optional[List[Dict[str, str]]]): Alternative to passing in text. A list of dictionaries representing messages with "role" and "content" keys. Default is None.
Returns:
int: The number of tokens in the text.
"""
# use tiktoken, anthropic, cohere or llama2's tokenizer depending on the model
if text == None:
if messages is not None:
text = "".join([message["content"] for message in messages])
else:
raise ValueError("text and messages cannot both be None")
num_tokens = 0
if model is not None:
tokenizer_json = _select_tokenizer(model=model)
if tokenizer_json["type"] == "huggingface_tokenizer":
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc.ids)
elif tokenizer_json["type"] == "openai_tokenizer":
if model in litellm.open_ai_chat_completion_models and messages != None:
num_tokens = openai_token_counter(messages, model=model)
else:
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def cost_per_token(model="gpt-3.5-turbo", prompt_tokens=0, completion_tokens=0):
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
Parameters:
model (str): The name of the model to use. Default is "gpt-3.5-turbo".
prompt_tokens (int): The number of tokens in the prompt.
completion_tokens (int): The number of tokens in the completion.
Returns:
tuple: A tuple containing the cost in USD dollars for prompt tokens and completion tokens, respectively.
"""
# given
prompt_tokens_cost_usd_dollar = 0
completion_tokens_cost_usd_dollar = 0
model_cost_ref = litellm.model_cost
if model in model_cost_ref:
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
else:
# calculate average input cost
input_cost_sum = 0
output_cost_sum = 0
model_cost_ref = litellm.model_cost
for model in model_cost_ref:
input_cost_sum += model_cost_ref[model]["input_cost_per_token"]
output_cost_sum += model_cost_ref[model]["output_cost_per_token"]
avg_input_cost = input_cost_sum / len(model_cost_ref.keys())
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
def completion_cost(
completion_response=None,
model="gpt-3.5-turbo",
prompt="",
messages: List = [],
completion="",
total_time=0.0, # used for replicate
):
"""
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm.
Parameters:
completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request.
[OPTIONAL PARAMS]
model (str): Optional. The name of the language model used in the completion calls
prompt (str): Optional. The input prompt passed to the llm
completion (str): Optional. The output completion text from the llm
total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds
Returns:
float: The cost in USD dollars for the completion based on the provided parameters.
Note:
- If completion_response is provided, the function extracts token information and the model name from it.
- If completion_response is not provided, the function calculates token counts based on the model and input text.
- The cost is calculated based on the model, prompt tokens, and completion tokens.
- For certain models containing "togethercomputer" in the name, prices are based on the model size.
- For Replicate models, the cost is calculated based on the total time used for the request.
Exceptions:
- If an error occurs during execution, the function returns 0.0 without blocking the user's execution path.
"""
try:
if messages != []:
prompt = " ".join([message["content"] for message in messages])
# Handle Inputs to completion_cost
prompt_tokens = 0
completion_tokens = 0
if completion_response != None:
# get input/output tokens from completion_response
prompt_tokens = completion_response['usage']['prompt_tokens']
completion_tokens = completion_response['usage']['completion_tokens']
model = completion_response['model'] # get model from completion_response
else:
prompt_tokens = token_counter(model=model, text=prompt)
completion_tokens = token_counter(model=model, text=completion)
# Calculate cost based on prompt_tokens, completion_tokens
if "togethercomputer" in model:
# together ai prices based on size of llm
# get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
model = get_model_params_and_category(model)
# replicate llms are calculate based on time for request running
# see https://replicate.com/pricing
elif (
model in litellm.replicate_models or
"replicate" in model
):
return get_replicate_completion_pricing(completion_response, total_time)
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(
model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens
)
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
except:
return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################
def register_model(model_cost: Union[str, dict]):
"""
Register new / Override existing models (and their pricing) to specific providers.
Provide EITHER a model cost dictionary or a url to a hosted json blob
Example usage:
model_cost_dict = {
"gpt-4": {
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
},
}
"""
loaded_model_cost = {}
if isinstance(model_cost, dict):
loaded_model_cost = model_cost
elif isinstance(model_cost, str):
loaded_model_cost = litellm.get_model_cost_map(url=model_cost)
for key, value in loaded_model_cost.items():
## override / add new keys to the existing model cost dictionary
litellm.model_cost[key] = loaded_model_cost[key]
# add new model names to provider lists
if value.get('litellm_provider') == 'openai':
if key not in litellm.open_ai_chat_completion_models:
litellm.open_ai_chat_completion_models.append(key)
elif value.get('litellm_provider') == 'text-completion-openai':
if key not in litellm.open_ai_text_completion_models:
litellm.open_ai_text_completion_models.append(key)
elif value.get('litellm_provider') == 'cohere':
if key not in litellm.cohere_models:
litellm.cohere_models.append(key)
elif value.get('litellm_provider') == 'anthropic':
if key not in litellm.anthropic_models:
litellm.anthropic_models.append(key)
elif value.get('litellm_provider') == 'openrouter':
split_string = key.split('/', 1)
if key not in litellm.openrouter_models:
litellm.openrouter_models.append(split_string[1])
elif value.get('litellm_provider') == 'vertex_ai-text-models':
if key not in litellm.vertex_text_models:
litellm.vertex_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-text-models':
if key not in litellm.vertex_code_text_models:
litellm.vertex_code_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-chat-models':
if key not in litellm.vertex_chat_models:
litellm.vertex_chat_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-chat-models':
if key not in litellm.vertex_code_chat_models:
litellm.vertex_code_chat_models.append(key)
elif value.get('litellm_provider') == 'ai21':
if key not in litellm.ai21_models:
litellm.ai21_models.append(key)
elif value.get('litellm_provider') == 'nlp_cloud':
if key not in litellm.nlp_cloud_models:
litellm.nlp_cloud_models.append(key)
elif value.get('litellm_provider') == 'aleph_alpha':
if key not in litellm.aleph_alpha_models:
litellm.aleph_alpha_models.append(key)
elif value.get('litellm_provider') == 'bedrock':
if key not in litellm.bedrock_models:
litellm.bedrock_models.append(key)
return model_cost
def get_litellm_params(
return_async=False,
api_key=None,
force_timeout=600,
azure=False,
logger_fn=None,
verbose=False,
hugging_face=False,
replicate=False,
together_ai=False,
custom_llm_provider=None,
api_base=None,
litellm_call_id=None,
model_alias_map=None,
completion_call_id=None,
metadata=None
):
litellm_params = {
"return_async": return_async,
"api_key": api_key,
"force_timeout": force_timeout,
"logger_fn": logger_fn,
"verbose": verbose,
"custom_llm_provider": custom_llm_provider,
"api_base": api_base,
"litellm_call_id": litellm_call_id,
"model_alias_map": model_alias_map,
"completion_call_id": completion_call_id,
"metadata": metadata,
"stream_response": {} # litellm_call_id: ModelResponse Dict
}
return litellm_params
def get_optional_params( # use the openai defaults
# 12 optional params
functions=[],
function_call="",
temperature=None,
top_p=None,
n=None,
stream=False,
stop=None,
max_tokens=None,
presence_penalty=None,
frequency_penalty=0,
logit_bias={},
user="",
model=None,
custom_llm_provider="",
response_format=None,
seed=None,
tools=None,
tool_choice=None,
max_retries=None,
**kwargs
):
# retrieve all parameters passed to the function
passed_params = locals()
special_params = passed_params.pop("kwargs")
for k, v in special_params.items():
passed_params[k] = v
default_params = {
"functions":[],
"function_call":"",
"temperature":None,
"top_p":None,
"n":None,
"stream":None,
"stop":None,
"max_tokens":None,
"presence_penalty":None,
"frequency_penalty":None,
"logit_bias":{},
"user":"",
"model":None,
"custom_llm_provider":"",
"response_format": None,
"seed": None,
"tools": None,
"tool_choice": None,
"max_retries": None,
}
# filter out those parameters that were passed with non-default values
non_default_params = {k: v for k, v in passed_params.items() if (k != "model" and k != "custom_llm_provider" and k in default_params and v != default_params[k])}
optional_params = {}
## raise exception if function calling passed in for a provider that doesn't support it
if "functions" in non_default_params or "function_call" in non_default_params:
if custom_llm_provider != "openai" and custom_llm_provider != "text-completion-openai" and custom_llm_provider != "azure":
if litellm.add_function_to_prompt: # if user opts to add it to prompt instead
optional_params["functions_unsupported_model"] = non_default_params.pop("functions")
else:
raise UnsupportedParamsError(status_code=500, message=f"Function calling is not supported by {custom_llm_provider}. To add it to the prompt, set `litellm.add_function_to_prompt = True`.")
def _check_valid_arg(supported_params):
print_verbose(f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}")
print_verbose(f"\nLiteLLM: Params passed to completion() {passed_params}")
print_verbose(f"\nLiteLLM: Non-Default params passed to completion() {non_default_params}")
unsupported_params = {}
for k in non_default_params.keys():
if k not in supported_params:
if k == "n" and n == 1: # langchain sends n=1 as a default value
pass
# Always keeps this in elif code blocks
else:
unsupported_params[k] = non_default_params[k]
if unsupported_params and not litellm.drop_params:
raise UnsupportedParamsError(status_code=500, message=f"{custom_llm_provider} does not support parameters: {unsupported_params}. To drop these, set `litellm.drop_params=True`.")
## raise exception if provider doesn't support passed in param
if custom_llm_provider == "anthropic":
## check if unsupported param passed in
supported_params = ["stream", "stop", "temperature", "top_p", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop is not None:
if type(stop) == str:
stop = [stop] # openai can accept str/list for stop
optional_params["stop_sequences"] = stop
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
elif custom_llm_provider == "cohere":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if n is not None:
optional_params["num_generations"] = n
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "maritalk":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "presence_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if stop is not None:
optional_params["stopping_tokens"] = stop
elif custom_llm_provider == "replicate":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "seed"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
return optional_params
if max_tokens is not None:
if "vicuna" in model or "flan" in model:
optional_params["max_length"] = max_tokens
elif "meta/codellama-13b" in model:
optional_params["max_tokens"] = max_tokens
else:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "huggingface":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
if temperature is not None:
if temperature == 0.0 or temperature == 0:
# hugging face exception raised when temp==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive
temperature = 0.01
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if n is not None:
optional_params["best_of"] = n
optional_params["do_sample"] = True # Need to sample if you want best of for hf inference endpoints
if stream is not None:
optional_params["stream"] = stream
if stop is not None:
optional_params["stop"] = stop
if max_tokens is not None:
# HF TGI raises the following exception when max_new_tokens==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive
if max_tokens == 0:
max_tokens = 1
optional_params["max_new_tokens"] = max_tokens
if n is not None:
optional_params["best_of"] = n
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if "echo" in passed_params:
# https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
# Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False
optional_params["decoder_input_details"] = special_params["echo"]
passed_params.pop("echo", None) # since we handle translating echo, we should not send it to TGI request
elif custom_llm_provider == "together_ai":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream_tokens"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if frequency_penalty is not None:
optional_params["repetition_penalty"] = frequency_penalty # https://docs.together.ai/reference/inference
if stop is not None:
optional_params["stop"] = stop
elif custom_llm_provider == "ai21":
## check if unsupported param passed in
supported_params = ["stream", "n", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty", "presence_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
if n is not None:
optional_params["numResults"] = n
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["topP"] = top_p
if stop is not None:
optional_params["stopSequences"] = stop
if frequency_penalty is not None:
optional_params["frequencyPenalty"] = {"scale": frequency_penalty}
if presence_penalty is not None:
optional_params["presencePenalty"] = {"scale": presence_penalty}
elif custom_llm_provider == "palm": # https://developers.generativeai.google/tutorials/curl_quickstart
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "stream", "n", "stop", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if n is not None:
optional_params["candidate_count"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif (
custom_llm_provider == "vertex_ai"
):
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif custom_llm_provider == "sagemaker":
if "llama-2" in model:
# llama-2 models on sagemaker support the following args
"""
max_new_tokens: Model generates text until the output length (excluding the input context length) reaches max_new_tokens. If specified, it must be a positive integer.
temperature: Controls the randomness in the output. Higher temperature results in output sequence with low-probability words and lower temperature results in output sequence with high-probability words. If temperature -> 0, it results in greedy decoding. If specified, it must be a positive float.
top_p: In each step of text generation, sample from the smallest possible set of words with cumulative probability top_p. If specified, it must be a float between 0 and 1.
return_full_text: If True, input text will be part of the output generated text. If specified, it must be boolean. The default value for it is False.
"""
## check if unsupported param passed in
supported_params = ["temperature", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
else:
## check if unsupported param passed in
supported_params = []
_check_valid_arg(supported_params=supported_params)
elif custom_llm_provider == "bedrock":
if "ai21" in model:
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],
# https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if stop is not None:
optional_params["stop_sequences"] = stop
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "anthropic" in model:
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# anthropic params on bedrock
# \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}"
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stop is not None:
optional_params["stop_sequences"] = stop
if stream:
optional_params["stream"] = stream
elif "amazon" in model: # amazon titan llms
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens is not None:
optional_params["maxTokenCount"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if stop is not None:
optional_params["stopSequences"] = stop
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "meta" in model: # amazon / meta llms
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens is not None:
optional_params["max_gen_len"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif "cohere" in model: # cohere models on bedrock
supported_params = ["stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if n is not None:
optional_params["num_generations"] = n
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "aleph_alpha":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["maximum_tokens"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n is not None:
optional_params["n"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "ollama":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "frequency_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["num_predict"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if frequency_penalty is not None:
optional_params["repeat_penalty"] = frequency_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "nlp_cloud":
supported_params = ["max_tokens", "stream", "temperature", "top_p", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["max_length"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n is not None:
optional_params["num_return_sequences"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "petals":
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# max_new_tokens=1,temperature=0.9, top_p=0.6
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif custom_llm_provider == "deepinfra":
supported_params = ["temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if n:
optional_params["n"] = n
if stream:
optional_params["stream"] = str
if stop:
optional_params["stop"] = stop
if max_tokens:
optional_params["max_tokens"] = max_tokens
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias:
optional_params["logit_bias"] = logit_bias
if user:
optional_params["user"] = user
elif custom_llm_provider == "perplexity":
supported_params = ["temperature", "top_p", "stream", "max_tokens", "presence_penalty", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
if temperature == 0 and model == "mistral-7b-instruct": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_tokens"] = max_tokens
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
elif custom_llm_provider == "anyscale":
supported_params = ["temperature", "top_p", "stream", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_tokens"] = max_tokens
else: # assume passing in params for openai/azure openai
supported_params = ["functions", "function_call", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", "max_retries"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
# if user passed in non-default kwargs for specific providers/models, pass them along
for k in passed_params.keys():
if k not in default_params.keys():
optional_params[k] = passed_params[k]
return optional_params
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None):
try:
dynamic_api_key = None
# check if llm provider provided
if custom_llm_provider:
return model, custom_llm_provider, dynamic_api_key, api_base
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list and model.split("/",1)[0] not in litellm.model_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
if custom_llm_provider == "perplexity":
# perplexity is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.perplexity.ai
api_base = "https://api.perplexity.ai"
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
elif custom_llm_provider == "anyscale":
# anyscale is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.endpoints.anyscale.com/v1"
dynamic_api_key = os.getenv("ANYSCALE_API_KEY")
elif custom_llm_provider == "deepinfra":
# deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.deepinfra.com/v1/openai"
dynamic_api_key = os.getenv("DEEPINFRA_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if api base is a known openai compatible endpoint
if api_base:
for endpoint in litellm.openai_compatible_endpoints:
if endpoint in api_base:
if endpoint == "api.perplexity.ai":
custom_llm_provider = "perplexity"
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
elif endpoint == "api.endpoints.anyscale.com/v1":
custom_llm_provider = "anyscale"
dynamic_api_key = os.getenv("ANYSCALE_API_KEY")
elif endpoint == "api.deepinfra.com/v1/openai":
custom_llm_provider = "deepinfra"
dynamic_api_key = os.getenv("DEEPINFRA_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or "ft:gpt-3.5-turbo" in model:
custom_llm_provider = "openai"
elif model in litellm.open_ai_text_completion_models:
custom_llm_provider = "text-completion-openai"
## anthropic
elif model in litellm.anthropic_models:
custom_llm_provider = "anthropic"
## cohere
elif model in litellm.cohere_models:
custom_llm_provider = "cohere"
## replicate
elif model in litellm.replicate_models or ":" in model:
model_parts = model.split(":")
if len(model_parts) > 1 and len(model_parts[1])==64: ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
custom_llm_provider = "replicate"
elif model in litellm.replicate_models:
custom_llm_provider = "replicate"
## openrouter
elif model in litellm.openrouter_models:
custom_llm_provider = "openrouter"
## openrouter
elif model in litellm.maritalk_models:
custom_llm_provider = "maritalk"
## vertex - text + chat models
elif(
model in litellm.vertex_chat_models or
model in litellm.vertex_code_chat_models or
model in litellm.vertex_text_models or
model in litellm.vertex_code_text_models
):
custom_llm_provider = "vertex_ai"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
## baseten
elif model in litellm.baseten_models:
custom_llm_provider = "baseten"
## nlp_cloud
elif model in litellm.nlp_cloud_models:
custom_llm_provider = "nlp_cloud"
## petals
elif model in litellm.petals_models:
custom_llm_provider = "petals"
## bedrock
elif model in litellm.bedrock_models:
custom_llm_provider = "bedrock"
# openai embeddings
elif model in litellm.open_ai_embedding_models:
custom_llm_provider = "openai"
# cohere embeddings
elif model in litellm.cohere_embedding_models:
custom_llm_provider = "cohere"
elif model in litellm.bedrock_embedding_models:
custom_llm_provider = "bedrock"
if custom_llm_provider is None or custom_llm_provider=="":
print() # noqa
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m") # noqa
print() # noqa
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider, dynamic_api_key, api_base
except Exception as e:
raise e
def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
api_key = (dynamic_api_key or litellm.api_key)
# openai
if llm_provider == "openai" or llm_provider == "text-completion-openai":
api_key = (
api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
# anthropic
elif llm_provider == "anthropic":
api_key = (
api_key or
litellm.anthropic_key or
get_secret("ANTHROPIC_API_KEY")
)
# ai21
elif llm_provider == "ai21":
api_key = (
api_key or
litellm.ai21_key or
get_secret("AI211_API_KEY")
)
# aleph_alpha
elif llm_provider == "aleph_alpha":
api_key = (
api_key or
litellm.aleph_alpha_key or
get_secret("ALEPH_ALPHA_API_KEY")
)
# baseten
elif llm_provider == "baseten":
api_key = (
api_key or
litellm.baseten_key or
get_secret("BASETEN_API_KEY")
)
# cohere
elif llm_provider == "cohere":
api_key = (
api_key or
litellm.cohere_key or
get_secret("COHERE_API_KEY")
)
# huggingface
elif llm_provider == "huggingface":
api_key = (
api_key or
litellm.huggingface_key or
get_secret("HUGGINGFACE_API_KEY")
)
# nlp_cloud
elif llm_provider == "nlp_cloud":
api_key = (
api_key or
litellm.nlp_cloud_key or
get_secret("NLP_CLOUD_API_KEY")
)
# replicate
elif llm_provider == "replicate":
api_key = (
api_key or
litellm.replicate_key or
get_secret("REPLICATE_API_KEY")
)
# together_ai
elif llm_provider == "together_ai":
api_key = (
api_key or
litellm.togetherai_api_key or
get_secret("TOGETHERAI_API_KEY") or
get_secret("TOGETHER_AI_TOKEN")
)
return api_key
def get_max_tokens(model: str):
"""
Get the maximum number of tokens allowed for a given model.
Parameters:
model (str): The name of the model.
Returns:
int: The maximum number of tokens allowed for the given model.
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_max_tokens("gpt-4")
8192
"""
def _get_max_position_embeddings(model_name):
# Construct the URL for the config.json file
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
try:
# Make the HTTP request to get the raw JSON file
response = requests.get(config_url)
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
# Parse the JSON response
config_json = response.json()
# Extract and return the max_position_embeddings
max_position_embeddings = config_json.get("max_position_embeddings")
if max_position_embeddings is not None:
return max_position_embeddings
else:
return None
except requests.exceptions.RequestException as e:
return None
try:
if model in litellm.model_cost:
return litellm.model_cost[model]["max_tokens"]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if custom_llm_provider == "huggingface":
max_tokens = _get_max_position_embeddings(model_name=model)
return max_tokens
else:
raise Exception()
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def get_model_info(model: str):
"""
Get a dict for the maximum tokens (context window),
input_cost_per_token, output_cost_per_token for a given model.
Parameters:
model (str): The name of the model.
Returns:
dict: A dictionary containing the following information:
- max_tokens (int): The maximum number of tokens allowed for the given model.
- input_cost_per_token (float): The cost per token for input.
- output_cost_per_token (float): The cost per token for output.
- litellm_provider (str): The provider of the model (e.g., "openai").
- mode (str): The mode of the model (e.g., "chat" or "completion").
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_model_info("gpt-4")
{
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
}
"""
def _get_max_position_embeddings(model_name):
# Construct the URL for the config.json file
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
try:
# Make the HTTP request to get the raw JSON file
response = requests.get(config_url)
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
# Parse the JSON response
config_json = response.json()
# Extract and return the max_position_embeddings
max_position_embeddings = config_json.get("max_position_embeddings")
if max_position_embeddings is not None:
return max_position_embeddings
else:
return None
except requests.exceptions.RequestException as e:
return None
try:
if model in litellm.model_cost:
return litellm.model_cost[model]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if custom_llm_provider == "huggingface":
max_tokens = _get_max_position_embeddings(model_name=model)
return {
"max_tokens": max_tokens,
"input_cost_per_token": 0,
"output_cost_per_token": 0,
"litellm_provider": "huggingface",
"mode": "chat"
}
else:
raise Exception()
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def json_schema_type(python_type_name: str):
"""Converts standard python types to json schema types
Parameters
----------
python_type_name : str
__name__ of type
Returns
-------
str
a standard JSON schema type, "string" if not recognized.
"""
python_to_json_schema_types = {
str.__name__: "string",
int.__name__: "integer",
float.__name__: "number",
bool.__name__: "boolean",
list.__name__: "array",
dict.__name__: "object",
"NoneType": "null",
}
return python_to_json_schema_types.get(python_type_name, "string")
def function_to_dict(input_function): # noqa: C901
"""Using type hints and numpy-styled docstring,
produce a dictionnary usable for OpenAI function calling
Parameters
----------
input_function : function
A function with a numpy-style docstring
Returns
-------
dictionnary
A dictionnary to add to the list passed to `functions` parameter of `litellm.completion`
"""
# Get function name and docstring
try:
import inspect
from numpydoc.docscrape import NumpyDocString
from ast import literal_eval
except Exception as e:
raise e
name = input_function.__name__
docstring = inspect.getdoc(input_function)
numpydoc = NumpyDocString(docstring)
description = "\n".join([s.strip() for s in numpydoc["Summary"]])
# Get function parameters and their types from annotations and docstring
parameters = {}
required_params = []
param_info = inspect.signature(input_function).parameters
for param_name, param in param_info.items():
if hasattr(param, "annotation"):
param_type = json_schema_type(param.annotation.__name__)
else:
param_type = None
param_description = None
param_enum = None
# Try to extract param description from docstring using numpydoc
for param_data in numpydoc["Parameters"]:
if param_data.name == param_name:
if hasattr(param_data, "type"):
# replace type from docstring rather than annotation
param_type = param_data.type
if "optional" in param_type:
param_type = param_type.split(",")[0]
elif "{" in param_type:
# may represent a set of acceptable values
# translating as enum for function calling
try:
param_enum = str(list(literal_eval(param_type)))
param_type = "string"
except Exception:
pass
param_type = json_schema_type(param_type)
param_description = "\n".join([s.strip() for s in param_data.desc])
param_dict = {
"type": param_type,
"description": param_description,
"enum": param_enum,
}
parameters[param_name] = dict(
[(k, v) for k, v in param_dict.items() if isinstance(v, str)]
)
# Check if the parameter has no default value (i.e., it's required)
if param.default == param.empty:
required_params.append(param_name)
# Create the dictionary
result = {
"name": name,
"description": description,
"parameters": {
"type": "object",
"properties": parameters,
},
}
# Add "required" key if there are required parameters
if required_params:
result["parameters"]["required"] = required_params
return result
def load_test_model(
model: str,
custom_llm_provider: str = "",
api_base: str = "",
prompt: str = "",
num_calls: int = 0,
force_timeout: int = 0,
):
test_prompt = "Hey, how's it going"
test_calls = 100
if prompt:
test_prompt = prompt
if num_calls:
test_calls = num_calls
messages = [[{"role": "user", "content": test_prompt}] for _ in range(test_calls)]
start_time = time.time()
try:
litellm.batch_completion(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
api_base=api_base,
force_timeout=force_timeout,
)
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "success",
"exception": None,
}
except Exception as e:
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "failed",
"exception": e,
}
def validate_environment(model: Optional[str]=None) -> dict:
"""
Checks if the environment variables are valid for the given model.
Args:
model (Optional[str]): The name of the model. Defaults to None.
Returns:
dict: A dictionary containing the following keys:
- keys_in_environment (bool): True if all the required keys are present in the environment, False otherwise.
- missing_keys (List[str]): A list of missing keys in the environment.
"""
keys_in_environment = False
missing_keys: List[str] = []
if model is None:
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
## EXTRACT LLM PROVIDER - if model name provided
try:
custom_llm_provider = get_llm_provider(model=model)
except:
custom_llm_provider = None
# # check if llm provider part of model name
# if model.split("/",1)[0] in litellm.provider_list:
# custom_llm_provider = model.split("/", 1)[0]
# model = model.split("/", 1)[1]
# custom_llm_provider_passed_in = True
if custom_llm_provider:
if custom_llm_provider == "openai":
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
elif custom_llm_provider == "azure":
if ("AZURE_API_BASE" in os.environ
and "AZURE_API_VERSION" in os.environ
and "AZURE_API_KEY" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["AZURE_API_BASE", "AZURE_API_VERSION", "AZURE_API_KEY"])
elif custom_llm_provider == "anthropic":
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
elif custom_llm_provider == "cohere":
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
elif custom_llm_provider == "replicate":
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
elif custom_llm_provider == "openrouter":
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
elif custom_llm_provider == "vertex_ai":
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
elif custom_llm_provider == "huggingface":
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
elif custom_llm_provider == "ai21":
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
elif custom_llm_provider == "together_ai":
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
elif custom_llm_provider == "aleph_alpha":
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
elif custom_llm_provider == "baseten":
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
elif custom_llm_provider == "nlp_cloud":
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
elif custom_llm_provider == "bedrock":
if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AWS_ACCESS_KEY_ID")
missing_keys.append("AWS_SECRET_ACCESS_KEY")
else:
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or litellm.open_ai_text_completion_models:
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
## anthropic
elif model in litellm.anthropic_models:
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
## cohere
elif model in litellm.cohere_models:
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
## replicate
elif model in litellm.replicate_models:
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
## openrouter
elif model in litellm.openrouter_models:
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
## huggingface
elif model in litellm.huggingface_models:
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
## ai21
elif model in litellm.ai21_models:
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
## together_ai
elif model in litellm.together_ai_models:
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
## aleph_alpha
elif model in litellm.aleph_alpha_models:
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
## baseten
elif model in litellm.baseten_models:
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
## nlp_cloud
elif model in litellm.nlp_cloud_models:
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
def set_callbacks(callback_list, function_id=None):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
if callback == "sentry":
try:
import sentry_sdk
except ImportError:
print_verbose("Package 'sentry_sdk' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sentry_sdk"]
)
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = (
os.environ.get("SENTRY_API_TRACE_RATE")
if "SENTRY_API_TRACE_RATE" in os.environ
else "1.0"
)
sentry_sdk_instance.init(
dsn=os.environ.get("SENTRY_DSN"),
traces_sample_rate=float(sentry_trace_rate),
)
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
elif callback == "posthog":
try:
from posthog import Posthog
except ImportError:
print_verbose("Package 'posthog' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "posthog"]
)
from posthog import Posthog
posthog = Posthog(
project_api_key=os.environ.get("POSTHOG_API_KEY"),
host=os.environ.get("POSTHOG_API_URL"),
)
elif callback == "slack":
try:
from slack_bolt import App
except ImportError:
print_verbose("Package 'slack_bolt' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "slack_bolt"]
)
from slack_bolt import App
slack_app = App(
token=os.environ.get("SLACK_API_TOKEN"),
signing_secret=os.environ.get("SLACK_API_SECRET"),
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
elif callback == "traceloop":
traceloopLogger = TraceloopLogger()
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "llmonitor":
llmonitorLogger = LLMonitorLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
langFuseLogger = LangFuseLogger()
elif callback == "wandb":
weightsBiasesLogger = WeightsBiasesLogger()
elif callback == "langsmith":
langsmithLogger = LangsmithLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
berrispendLogger = BerriSpendLogger()
elif callback == "supabase":
print_verbose(f"instantiating supabase")
supabaseClient = Supabase()
elif callback == "lite_debugger":
print_verbose(f"instantiating lite_debugger")
if function_id:
liteDebuggerClient = LiteDebugger(email=function_id)
elif litellm.token:
liteDebuggerClient = LiteDebugger(email=litellm.token)
elif litellm.email:
liteDebuggerClient = LiteDebugger(email=litellm.email)
else:
liteDebuggerClient = LiteDebugger(email=str(uuid.uuid4()))
elif callable(callback):
customLogger = CustomLogger()
except Exception as e:
raise e
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"failed_event_name", "litellm.failed_query"
)
print_verbose(f"self.failure_callback: {litellm.failure_callback}")
for callback in litellm.failure_callback:
try:
if callback == "slack":
slack_msg = ""
if len(kwargs) > 0:
for key in kwargs:
slack_msg += f"{key}: {kwargs[key]}\n"
if len(args) > 0:
for i, arg in enumerate(args):
slack_msg += f"LiteLLM_Args_{str(i)}: {arg}"
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_msg += f"Traceback: {traceback_exception}"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "sentry":
capture_exception(exception)
elif callback == "posthog":
print_verbose(
f"inside posthog, additional_details: {len(additional_details.keys())}"
)
ph_obj = {}
if len(kwargs) > 0:
ph_obj = kwargs
if len(args) > 0:
for i, arg in enumerate(args):
ph_obj["litellm_args_" + str(i)] = arg
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
print_verbose(f"ph_obj: {ph_obj}")
print_verbose(f"PostHog Event Name: {event_name}")
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name)
print_verbose(f"successfully logged to PostHog!")
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
berrispendLogger.log_event(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
print_verbose(f"supabaseClient: {supabaseClient}")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
supabaseClient.log_event(
model=model,
messages=messages,
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
except:
print_verbose(
f"Error Occurred while logging failure: {traceback.format_exc()}"
)
pass
if failure_handler and callable(failure_handler):
call_details = {
"exception": exception,
"additional_details": additional_details,
}
failure_handler(call_details)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
pass
def convert_to_model_response_object(response_object: Optional[dict]=None, model_response_object: Optional[ModelResponse]=None):
try:
if response_object is None or model_response_object is None:
raise Exception("Error in response object format")
choice_list=[]
for idx, choice in enumerate(response_object["choices"]):
message = Message(
content=choice["message"].get("content", None),
role=choice["message"]["role"],
function_call=choice["message"].get("function_call", None),
tool_calls=choice["message"].get("tool_calls", None)
)
finish_reason = choice.get("finish_reason", None)
if finish_reason == None:
# gpt-4 vision can return 'finish_reason' or 'finish_details'
finish_reason = choice.get("finish_details")
choice = Choices(finish_reason=finish_reason, index=idx, message=message)
choice_list.append(choice)
model_response_object.choices = choice_list
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
if "id" in response_object:
model_response_object.id = response_object["id"]
if "system_fingerprint" in response_object:
model_response_object.system_fingerprint = response_object["system_fingerprint"]
if "model" in response_object:
model_response_object.model = response_object["model"]
return model_response_object
except Exception as e:
raise Exception(f"Invalid response object {e}")
# NOTE: DEPRECATING this in favor of using success_handler() in Logging:
def handle_success(args, kwargs, result, start_time, end_time):
global heliconeLogger, aispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"successful_event_name", "litellm.succes_query"
)
for callback in litellm.success_callback:
try:
if callback == "posthog":
ph_obj = {}
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name, ph_obj)
pass
elif callback == "slack":
slack_msg = ""
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs)
def prompt_token_calculator(model, messages):
# use tiktoken or anthropic's tokenizer depending on the model
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
try:
import anthropic
except:
Exception("Anthropic import failed please run `pip install anthropic`")
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def valid_model(model):
try:
# for a given model name, check if the user has the right permissions to access the model
if (
model in litellm.open_ai_chat_completion_models
or model in litellm.open_ai_text_completion_models
):
openai.Model.retrieve(model)
else:
messages = [{"role": "user", "content": "Hello World"}]
litellm.completion(model=model, messages=messages)
except:
raise BadRequestError(message="", model=model, llm_provider="")
def check_valid_key(model: str, api_key: str):
"""
Checks if a given API key is valid for a specific model by making a litellm.completion call with max_tokens=10
Args:
model (str): The name of the model to check the API key against.
api_key (str): The API key to be checked.
Returns:
bool: True if the API key is valid for the model, False otherwise.
"""
messages = [{"role": "user", "content": "Hey, how's it going?"}]
try:
litellm.completion(model=model, messages=messages, api_key=api_key, max_tokens=10)
return True
except AuthenticationError as e:
return False
except Exception as e:
return False
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]
# custom prompt helper function
def register_prompt_template(model: str, roles: dict, initial_prompt_value: str = "", final_prompt_value: str = ""):
"""
Register a prompt template to follow your custom format for a given model
Args:
model (str): The name of the model.
roles (dict): A dictionary mapping roles to their respective prompt values.
initial_prompt_value (str, optional): The initial prompt value. Defaults to "".
final_prompt_value (str, optional): The final prompt value. Defaults to "".
Returns:
dict: The updated custom prompt dictionary.
Example usage:
```
import litellm
litellm.register_prompt_template(
model="llama-2",
initial_prompt_value="You are a good assistant" # [OPTIONAL]
roles={
"system": {
"pre_message": "[INST] <<SYS>>\n", # [OPTIONAL]
"post_message": "\n<</SYS>>\n [/INST]\n" # [OPTIONAL]
},
"user": {
"pre_message": "[INST] ", # [OPTIONAL]
"post_message": " [/INST]" # [OPTIONAL]
},
"assistant": {
"pre_message": "\n" # [OPTIONAL]
"post_message": "\n" # [OPTIONAL]
}
}
final_prompt_value="Now answer as best you can:" # [OPTIONAL]
)
```
"""
model = get_llm_provider(model=model)[0]
litellm.custom_prompt_dict[model] = {
"roles": roles,
"initial_prompt_value": initial_prompt_value,
"final_prompt_value": final_prompt_value
}
return litellm.custom_prompt_dict
####### DEPRECATED ################
def get_all_keys(llm_provider=None):
try:
global last_fetched_at_keys
# if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes
print_verbose(f"Reaches get all keys, llm_provider: {llm_provider}")
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
time_delta = 0
if last_fetched_at_keys != None:
current_time = time.time()
time_delta = current_time - last_fetched_at_keys
if (
time_delta > 300 or last_fetched_at_keys == None or llm_provider
): # if the llm provider is passed in , assume this happening due to an AuthError for that provider
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_all_keys",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get model key response: {response.text}")
data = response.json()
# update model list
for key, value in data[
"model_keys"
].items(): # follows the LITELLM API KEY format - <UPPERCASE_PROVIDER_NAME>_API_KEY - e.g. HUGGINGFACE_API_KEY
os.environ[key] = value
# set model alias map
for model_alias, value in data["model_alias_map"].items():
litellm.model_alias_map[model_alias] = value
return "it worked!"
return None
return None
except:
print_verbose(
f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}"
)
pass
def get_model_list():
global last_fetched_at, print_verbose
try:
# if user is using hosted product -> get their updated model list
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_model_list",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
model_list = data["model_list"]
# # check if all model providers are in environment
# model_providers = data["model_providers"]
# missing_llm_provider = None
# for item in model_providers:
# if f"{item.upper()}_API_KEY" not in os.environ:
# missing_llm_provider = item
# break
# # update environment - if required
# threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start()
return model_list
return [] # return empty list by default
except:
print_verbose(
f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}"
)
####### EXCEPTION MAPPING ################
def exception_type(
model,
original_exception,
custom_llm_provider,
completion_kwargs={},
):
global user_logger_fn, liteDebuggerClient
exception_mapping_worked = False
if litellm.suppress_debug_info is False:
print() # noqa
print("\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m") # noqa
print("LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.") # noqa
print() # noqa
try:
if model:
error_str = str(original_exception)
if isinstance(original_exception, BaseException):
exception_type = type(original_exception).__name__
else:
exception_type = ""
if "Request Timeout Error" in error_str or "Request timed out" in error_str:
exception_mapping_worked = True
raise Timeout(
message=f"APITimeoutError - Request timed out",
model=model,
llm_provider=custom_llm_provider
)
if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai":
if "This model's maximum context length is" in error_str or "Request too large" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif "invalid_request_error" in error_str and "Incorrect API key provided" not in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 503:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 504: # gateway timeout error
exception_mapping_worked = True
raise Timeout(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "anthropic": # one of the anthropics
if hasattr(original_exception, "message"):
if "prompt is too long" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=original_exception.message,
model=model,
llm_provider="anthropic",
response=original_exception.response
)
if "Invalid API Key" in original_exception.message:
exception_mapping_worked = True
raise AuthenticationError(
message=original_exception.message,
model=model,
llm_provider="anthropic",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
print_verbose(f"status_code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400 or original_exception.status_code == 413:
exception_mapping_worked = True
raise BadRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "replicate":
if "Incorrect authentication token" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif "input is too long" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif exception_type == "ModelError":
exception_mapping_worked = True
raise BadRequestError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif "Request was throttled" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400 or original_exception.status_code == 422 or original_exception.status_code == 413:
exception_mapping_worked = True
raise BadRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"ReplicateException - {str(original_exception)}",
llm_provider="replicate",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "bedrock":
if "too many tokens" in error_str or "expected maxLength:" in error_str or "Input is too long" in error_str or "Too many input tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"BedrockException: Context Window Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "Malformed input request" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"BedrockException - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "Unable to locate credentials" in error_str or "The security token included in the request is invalid" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException Invalid Authentication - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "throttlingException" in error_str or "ThrottlingException" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"BedrockException: Rate Limit Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response
)
elif custom_llm_provider == "sagemaker":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker",
response=original_exception.response
)
elif custom_llm_provider == "vertex_ai":
if "Vertex AI API has not been used in project" in error_str or "Unable to find your project" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
elif "403" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
elif custom_llm_provider == "palm":
if "503 Getting metadata" in error_str:
# auth errors look like this
# 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate.
exception_mapping_worked = True
raise BadRequestError(
message=f"PalmException - Invalid api key",
model=model,
llm_provider="palm",
response=original_exception.response
)
if "400 Request payload size exceeds" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response
)
# Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes
elif custom_llm_provider == "cohere": # Cohere
if (
"invalid api token" in error_str
or "No API key provided." in error_str
):
exception_mapping_worked = True
raise AuthenticationError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "too many tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"CohereException - {original_exception.message}",
model=model,
llm_provider="cohere",
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 400 or original_exception.status_code == 498:
exception_mapping_worked = True
raise BadRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif (
"CohereConnectionError" in exception_type
): # cohere seems to fire these errors when we load test it (1k+ messages / min)
exception_mapping_worked = True
raise RateLimitError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "invalid type:" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "Unexpected server error" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
else:
if hasattr(original_exception, "status_code"):
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
request=original_exception.request
)
raise original_exception
elif custom_llm_provider == "huggingface":
if "length limit exceeded" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=error_str,
model=model,
llm_provider="huggingface",
response=original_exception.response
)
elif "A valid user token is required" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=error_str,
llm_provider="huggingface",
model=model,
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "ai21":
if hasattr(original_exception, "message"):
if "Prompt has too many tokens" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
if "Bad or missing API token." in original_exception.message:
exception_mapping_worked = True
raise BadRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
request=original_exception.request
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "nlp_cloud":
if "detail" in error_str:
if "Input text length should not exceed" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
elif "value is not a valid" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
request=original_exception.request
)
if hasattr(original_exception, "status_code"): # https://docs.nlpcloud.com/?shell#errors
if original_exception.status_code == 400 or original_exception.status_code == 406 or original_exception.status_code == 413 or original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 401 or original_exception.status_code == 403:
exception_mapping_worked = True
raise AuthenticationError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 522 or original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
request=original_exception.request
)
elif original_exception.status_code == 429 or original_exception.status_code == 402:
exception_mapping_worked = True
raise RateLimitError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500 or original_exception.status_code == 503:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
request=original_exception.request
)
elif original_exception.status_code == 504 or original_exception.status_code == 520:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "together_ai":
import json
try:
error_response = json.loads(error_str)
except:
error_response = {"error": error_str}
if "error" in error_response and "`inputs` tokens + `max_new_tokens` must be <=" in error_response["error"]:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error" in error_response and "invalid private key" in error_response["error"]:
exception_mapping_worked = True
raise AuthenticationError(
message=f"TogetherAIException - {error_response['error']}",
llm_provider="together_ai",
model=model,
response=original_exception.response
)
elif "error" in error_response and "INVALID_ARGUMENT" in error_response["error"]:
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error" in error_response and "API key doesn't match expected format." in error_response["error"]:
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error_type" in error_response and error_response["error_type"] == "validation":
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
model=model,
llm_provider="together_ai",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "aleph_alpha":
if "This is longer than the model's maximum context length" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif "InvalidToken" in error_str or "No token provided" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
print_verbose(f"status code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
raise original_exception
raise original_exception
elif custom_llm_provider == "ollama":
if "no attribute 'async_get_ollama_response_stream" in error_str:
exception_mapping_worked = True
raise ImportError("Import error - trying to use async for ollama. import async_generator failed. Try 'pip install async_generator'")
if isinstance(original_exception, dict):
error_str = original_exception.get("error", "")
else:
error_str = str(original_exception)
if "no such file or directory" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OllamaException: Invalid Model/Model not loaded - {original_exception}",
model=model,
llm_provider="ollama",
response=original_exception.response
)
elif "Failed to establish a new connection" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response
)
elif "Invalid response object from API" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response
)
elif custom_llm_provider == "vllm":
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 0:
exception_mapping_worked = True
raise APIConnectionError(
message=f"VLLMException - {original_exception.message}",
llm_provider="vllm",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "azure":
if "This model's maximum context length is" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif "invalid_request_error" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
request=original_exception.request
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
request=original_exception.request
)
if "BadRequestError.__init__() missing 1 required positional argument: 'param'" in str(original_exception): # deal with edge-case invalid request error bug in openai-python sdk
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response
)
else: # ensure generic errors always return APIConnectionError=
exception_mapping_worked = True
if hasattr(original_exception, "request"):
raise APIConnectionError(
message=f"{str(original_exception)}",
llm_provider=custom_llm_provider,
model=model,
request=original_exception.request
)
else:
raise APIConnectionError(
message=f"{str(original_exception)}",
llm_provider=custom_llm_provider,
model=model,
request= httpx.Request(method="POST", url="https://api.openai.com/v1/") # stub the request
)
except Exception as e:
# LOGGING
exception_logging(
logger_fn=user_logger_fn,
additional_args={
"exception_mapping_worked": exception_mapping_worked,
"original_exception": original_exception,
},
exception=e,
)
## AUTH ERROR
if isinstance(e, AuthenticationError) and (
litellm.email or "LITELLM_EMAIL" in os.environ
):
threading.Thread(target=get_all_keys, args=(e.llm_provider,)).start()
# don't let an error with mapping interrupt the user from receiving an error from the llm api calls
if exception_mapping_worked:
raise e
else:
raise original_exception
####### CRASH REPORTING ################
def safe_crash_reporting(model=None, exception=None, custom_llm_provider=None):
data = {
"model": model,
"exception": str(exception),
"custom_llm_provider": custom_llm_provider,
}
threading.Thread(target=litellm_telemetry, args=(data,), daemon=True).start()
def get_or_generate_uuid():
temp_dir = os.path.join(os.path.abspath(os.sep), "tmp")
uuid_file = os.path.join(temp_dir, "litellm_uuid.txt")
try:
# Try to open the file and load the UUID
with open(uuid_file, "r") as file:
uuid_value = file.read()
if uuid_value:
uuid_value = uuid_value.strip()
else:
raise FileNotFoundError
except FileNotFoundError:
# Generate a new UUID if the file doesn't exist or is empty
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open(uuid_file, "w") as file:
file.write(uuid_value)
except: # if writing to tmp/litellm_uuid.txt then retry writing to litellm_uuid.txt
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open("litellm_uuid.txt", "w") as file:
file.write(uuid_value)
except: # if this 3rd attempt fails just pass
# Good first issue for someone to improve this function :)
return
except:
# [Non-Blocking Error]
return
return uuid_value
def litellm_telemetry(data):
# Load or generate the UUID
uuid_value = ""
try:
uuid_value = get_or_generate_uuid()
except:
uuid_value = str(uuid.uuid4())
try:
# Prepare the data to send to litellm logging api
try:
pkg_version = importlib.metadata.version("litellm")
except:
pkg_version = None
if "model" not in data:
data["model"] = None
payload = {
"uuid": uuid_value,
"data": data,
"version:": pkg_version
}
# Make the POST request to litellm logging api
response = requests.post(
"https://litellm-logging.onrender.com/logging",
headers={"Content-Type": "application/json"},
json=payload,
)
response.raise_for_status() # Raise an exception for HTTP errors
except:
# [Non-Blocking Error]
return
######### Secret Manager ############################
# checks if user has passed in a secret manager client
# if passed in then checks the secret there
def get_secret(secret_name):
if litellm.secret_manager_client != None:
# TODO: check which secret manager is being used
# currently only supports Infisical
try:
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
except:
secret = None
return secret
else:
return os.environ.get(secret_name)
######## Streaming Class ############################
# wraps the completion stream to return the correct format for the model
# replicate/anthropic/cohere
class CustomStreamWrapper:
def __init__(self, completion_stream, model, custom_llm_provider=None, logging_obj=None):
self.model = model
self.custom_llm_provider = custom_llm_provider
self.logging_obj = logging_obj
self.completion_stream = completion_stream
self.sent_first_chunk = False
self.sent_last_chunk = False
self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "<s>", "</s>"]
self.holding_chunk = ""
if self.logging_obj:
# Log the type of the received item
self.logging_obj.post_call(str(type(completion_stream)))
def __iter__(self):
return self
def __aiter__(self):
return self
def logging(self, text):
if self.logging_obj:
self.logging_obj.post_call(text)
def check_special_tokens(self, chunk: str):
hold = False
if self.sent_first_chunk is True:
return hold, chunk
curr_chunk = self.holding_chunk + chunk
curr_chunk = curr_chunk.strip()
for token in self.special_tokens:
if len(curr_chunk) < len(token) and curr_chunk in token:
hold = True
elif len(curr_chunk) >= len(token):
if token in curr_chunk:
self.holding_chunk = curr_chunk.replace(token, "")
hold = True
else:
pass
if hold is False: # reset
self.holding_chunk = ""
return hold, curr_chunk
def handle_anthropic_chunk(self, chunk):
str_line = chunk.decode("utf-8") # Convert bytes to string
text = ""
is_finished = False
finish_reason = None
if str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
text = data_json.get("completion", "")
if data_json.get("stop_reason", None):
is_finished = True
finish_reason = data_json["stop_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_together_ai_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = None
if "text" in chunk:
text_index = chunk.find('"text":"') # this checks if text: exists
text_start = text_index + len('"text":"')
text_end = chunk.find('"}', text_start)
if text_index != -1 and text_end != -1:
extracted_text = chunk[text_start:text_end]
text = extracted_text
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "[DONE]" in chunk:
return {"text": text, "is_finished": True, "finish_reason": "stop"}
elif "error" in chunk:
raise ValueError(chunk)
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_huggingface_chunk(self, chunk):
try:
if type(chunk) != str:
chunk = chunk.decode("utf-8") # DO NOT REMOVE this: This is required for HF inference API + Streaming
text = ""
is_finished = False
finish_reason = ""
print_verbose(f"chunk: {chunk}")
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
print_verbose(f"data json: {data_json}")
if "token" in data_json and "text" in data_json["token"]:
text = data_json["token"]["text"]
if data_json.get("details", False) and data_json["details"].get("finish_reason", False):
is_finished = True
finish_reason = data_json["details"]["finish_reason"]
elif data_json.get("generated_text", False): # if full generated text exists, then stream is complete
text = "" # don't return the final bos token
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in chunk:
raise ValueError(chunk)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
# raise(e)
def handle_ai21_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["data"]["text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_maritalk_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["answer"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_nlp_cloud_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["generated_text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_aleph_alpha_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["completion"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_cohere_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = ""
is_finished = False
finish_reason = ""
if "text" in data_json:
text = data_json["text"]
elif "is_finished" in data_json:
is_finished = data_json["is_finished"]
finish_reason = data_json["finish_reason"]
else:
raise Exception(data_json)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_azure_chunk(self, chunk):
is_finished = False
finish_reason = ""
text = ""
print_verbose(f"chunk: {chunk}")
if "data: [DONE]" in chunk:
text = ""
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif chunk.startswith("data:"):
data_json = json.loads(chunk[5:]) # chunk.startswith("data:"):
try:
if len(data_json["choices"]) > 0:
text = data_json["choices"][0]["delta"].get("content", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
print_verbose(f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}")
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
elif "error" in chunk:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_replicate_chunk(self, chunk):
try:
text = ""
is_finished = False
finish_reason = ""
if "output" in chunk:
text = chunk['output']
if "status" in chunk:
if chunk["status"] == "succeeded":
is_finished = True
finish_reason = "stop"
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_openai_chat_completion_chunk(self, chunk):
try:
print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n")
str_line = chunk
text = ""
is_finished = False
finish_reason = None
original_chunk = None # this is used for function/tool calling
if len(str_line.choices) > 0:
if str_line.choices[0].delta.content is not None:
text = str_line.choices[0].delta.content
else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai
original_chunk = str_line
if str_line.choices[0].finish_reason:
is_finished = True
finish_reason = str_line.choices[0].finish_reason
return {
"text": text,
"is_finished": is_finished,
"finish_reason": finish_reason,
"original_chunk": str_line
}
except Exception as e:
traceback.print_exc()
raise e
def handle_openai_text_completion_chunk(self, chunk):
try:
str_line = chunk
text = ""
is_finished = False
finish_reason = None
print_verbose(f"str_line: {str_line}")
if "data: [DONE]" in str_line:
text = ""
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
print_verbose(f"delta content: {data_json}")
text = data_json["choices"][0].get("text", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
print_verbose(f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}")
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
raise e
def handle_baseten_chunk(self, chunk):
try:
chunk = chunk.decode("utf-8")
if len(chunk) > 0:
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
if "token" in data_json and "text" in data_json["token"]:
return data_json["token"]["text"]
else:
return ""
data_json = json.loads(chunk)
if "model_output" in data_json:
if isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list):
return data_json["model_output"]["data"][0]
elif isinstance(data_json["model_output"], str):
return data_json["model_output"]
elif "completion" in data_json and isinstance(data_json["completion"], str):
return data_json["completion"]
else:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return ""
else:
return ""
except:
traceback.print_exc()
return ""
def handle_bedrock_stream(self, chunk):
chunk = chunk.get('chunk')
if chunk:
chunk_data = json.loads(chunk.get('bytes').decode())
text = ""
is_finished = False
finish_reason = ""
if "outputText" in chunk_data:
text = chunk_data['outputText']
# anthropic mapping
elif "completion" in chunk_data:
text = chunk_data['completion'] # bedrock.anthropic
stop_reason = chunk_data.get("stop_reason", None)
if stop_reason != None:
is_finished = True
finish_reason = stop_reason
######## bedrock.cohere mappings ###############
# meta mapping
elif "generation" in chunk_data:
text = chunk_data['generation'] # bedrock.meta
# cohere mapping
elif "text" in chunk_data:
text = chunk_data["text"] # bedrock.cohere
# cohere mapping for finish reason
elif "finish_reason" in chunk_data:
finish_reason = chunk_data["finish_reason"]
is_finished = True
elif chunk_data.get("completionReason", None):
is_finished = True
finish_reason = chunk_data["completionReason"]
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
return ""
def chunk_creator(self, chunk):
model_response = ModelResponse(stream=True, model=self.model)
model_response.choices[0].finish_reason = None
response_obj = None
try:
# return this for all models
completion_obj = {"content": ""}
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
response_obj = self.handle_anthropic_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.model == "replicate" or self.custom_llm_provider == "replicate":
response_obj = self.handle_replicate_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif (
self.custom_llm_provider and self.custom_llm_provider == "together_ai"):
response_obj = self.handle_together_ai_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "huggingface":
response_obj = self.handle_huggingface_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "baseten": # baseten doesn't provide streaming
completion_obj["content"] = self.handle_baseten_chunk(chunk)
elif self.custom_llm_provider and self.custom_llm_provider == "ai21": #ai21 doesn't provide streaming
response_obj = self.handle_ai21_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "maritalk":
response_obj = self.handle_maritalk_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "vllm":
completion_obj["content"] = chunk[0].outputs[0].text
elif self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha": #aleph alpha doesn't provide streaming
response_obj = self.handle_aleph_alpha_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.model in litellm.nlp_cloud_models or self.custom_llm_provider == "nlp_cloud":
try:
response_obj = self.handle_nlp_cloud_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
except Exception as e:
if self.sent_last_chunk:
raise e
else:
if self.sent_first_chunk is False:
raise Exception("An unknown error occurred with the stream")
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider and self.custom_llm_provider == "vertex_ai":
try:
completion_obj["content"] = str(chunk)
except StopIteration as e:
if self.sent_last_chunk:
raise e
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider == "cohere":
response_obj = self.handle_cohere_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "bedrock":
response_obj = self.handle_bedrock_stream(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "sagemaker":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "petals":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "palm":
# fake streaming
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "ollama":
if "error" in chunk:
exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=chunk["error"])
completion_obj = chunk
elif self.custom_llm_provider == "text-completion-openai":
response_obj = self.handle_openai_text_completion_chunk(chunk)
completion_obj["content"] = response_obj["text"]
print_verbose(f"completion obj content: {completion_obj['content']}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
else: # openai chat model
response_obj = self.handle_openai_chat_completion_chunk(chunk)
if response_obj == None:
return
completion_obj["content"] = response_obj["text"]
print_verbose(f"completion obj content: {completion_obj['content']}")
print_verbose(f"len(completion_obj['content']: {len(completion_obj['content'])}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
model_response.model = self.model
print_verbose(f"model_response: {model_response}; completion_obj: {completion_obj}")
print_verbose(f"model_response finish reason 3: {model_response.choices[0].finish_reason}")
if len(completion_obj["content"]) > 0: # cannot set content of an OpenAI Object to be an empty string
hold, model_response_str = self.check_special_tokens(completion_obj["content"])
if hold is False:
completion_obj["content"] = model_response_str
if self.sent_first_chunk == False:
completion_obj["role"] = "assistant"
self.sent_first_chunk = True
model_response.choices[0].delta = Delta(**completion_obj)
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
return model_response
else:
return
elif response_obj is not None and response_obj.get("original_chunk", None) is not None: # function / tool calling branch - only set for openai/azure compatible endpoints
# enter this branch when no content has been passed in response
original_chunk = response_obj.get("original_chunk", None)
model_response.id = original_chunk.id
try:
delta = dict(original_chunk.choices[0].delta)
model_response.choices[0].delta = Delta(**delta)
except:
model_response.choices[0].delta = Delta()
model_response.system_fingerprint = original_chunk.system_fingerprint
if self.sent_first_chunk == False:
model_response.choices[0].delta["role"] = "assistant"
self.sent_first_chunk = True
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start() # log response
return model_response
elif model_response.choices[0].finish_reason:
model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai
# LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
return model_response
else:
return
except StopIteration:
raise StopIteration
except Exception as e:
traceback_exception = traceback.format_exc()
e.message = str(e)
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
threading.Thread(target=self.logging_obj.failure_handler, args=(e, traceback_exception)).start()
raise exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e)
## needs to handle the empty string case (even starting chunk can be an empty string)
def __next__(self):
try:
while True:
if isinstance(self.completion_stream, str):
chunk = self.completion_stream
else:
chunk = next(self.completion_stream)
print_verbose(f"chunk in __next__: {chunk}")
if chunk is not None:
response = self.chunk_creator(chunk=chunk)
print_verbose(f"response in __next__: {response}")
if response is not None:
return response
except StopIteration:
raise # Re-raise StopIteration
except Exception as e:
# Handle other exceptions if needed
raise e
async def __anext__(self):
try:
if (self.custom_llm_provider == "openai"
or self.custom_llm_provider == "azure"
or self.custom_llm_provider == "custom_openai"
or self.custom_llm_provider == "text-completion-openai"
or self.custom_llm_provider == "huggingface"):
async for chunk in self.completion_stream:
if chunk == "None" or chunk is None:
raise Exception
processed_chunk = self.chunk_creator(chunk=chunk)
if processed_chunk is None:
continue
return processed_chunk
raise StopAsyncIteration
else: # temporary patch for non-aiohttp async calls
return next(self)
except Exception as e:
# Handle any exceptions that might occur during streaming
raise StopAsyncIteration
class TextCompletionStreamWrapper:
def __init__(self, completion_stream, model):
self.completion_stream = completion_stream
self.model = model
def __iter__(self):
return self
def __aiter__(self):
return self
def __next__(self):
# model_response = ModelResponse(stream=True, model=self.model)
response = TextCompletionResponse()
try:
while True: # loop until a non-empty string is found
# return this for all models
chunk = next(self.completion_stream)
response["id"] = chunk.get("id", None)
response["object"] = "text_completion"
response["created"] = response.get("created", None)
response["model"] = response.get("model", None)
text_choices = TextChoices()
text_choices["text"] = chunk["choices"][0]["delta"]["content"]
text_choices["index"] = response["choices"][0]["index"]
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
response["choices"] = [text_choices]
return response
except StopIteration:
raise StopIteration
except Exception as e:
print(f"got exception {e}") # noqa
async def __anext__(self):
try:
return next(self)
except StopIteration:
raise StopAsyncIteration
def mock_completion_streaming_obj(model_response, mock_response, model):
for i in range(0, len(mock_response), 3):
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
model_response.choices[0].delta = completion_obj
yield model_response
########## Reading Config File ############################
def read_config_args(config_path) -> dict:
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
raise e
########## experimental completion variants ############################
def completion_with_config(config: Union[dict, str], **kwargs):
"""
Generate a litellm.completion() using a config dict and all supported completion args
Example config;
config = {
"default_fallback_models": # [Optional] List of model names to try if a call fails
"available_models": # [Optional] List of all possible models you could call
"adapt_to_prompt_size": # [Optional] True/False - if you want to select model based on prompt size (will pick from available_models)
"model": {
"model-name": {
"needs_moderation": # [Optional] True/False - if you want to call openai moderations endpoint before making completion call. Will raise exception, if flagged.
"error_handling": {
"error-type": { # One of the errors listed here - https://docs.litellm.ai/docs/exception_mapping#custom-mapping-list
"fallback_model": "" # str, name of the model it should try instead, when that error occurs
}
}
}
}
}
Parameters:
config (Union[dict, str]): A configuration for litellm
**kwargs: Additional keyword arguments for litellm.completion
Returns:
litellm.ModelResponse: A ModelResponse with the generated completion
"""
if config is not None:
if isinstance(config, str):
config = read_config_args(config)
elif isinstance(config, dict):
config = config
else:
raise Exception("Config path must be a string or a dictionary.")
else:
raise Exception("Config path not passed in.")
if config is None:
raise Exception("No completion config in the config file")
models_with_config = config["model"].keys()
model = kwargs["model"]
messages = kwargs["messages"]
## completion config
fallback_models = config.get("default_fallback_models", None)
available_models = config.get("available_models", None)
adapt_to_prompt_size = config.get("adapt_to_prompt_size", False)
trim_messages_flag = config.get("trim_messages", False)
prompt_larger_than_model = False
max_model = model
try:
max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
max_tokens = 2048 # assume curr model's max window is 2048 tokens
if adapt_to_prompt_size:
## Pick model based on token window
prompt_tokens = litellm.token_counter(model="gpt-3.5-turbo", text="".join(message["content"] for message in messages))
try:
curr_max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
curr_max_tokens = 2048
if curr_max_tokens < prompt_tokens:
prompt_larger_than_model = True
for available_model in available_models:
try:
curr_max_tokens = litellm.get_max_tokens(available_model)["max_tokens"]
if curr_max_tokens > max_tokens:
max_tokens = curr_max_tokens
max_model = available_model
if curr_max_tokens > prompt_tokens:
model = available_model
prompt_larger_than_model = False
except:
continue
if prompt_larger_than_model:
messages = trim_messages(messages=messages, model=max_model)
kwargs["messages"] = messages
kwargs["model"] = model
try:
if model in models_with_config:
## Moderation check
if config["model"][model].get("needs_moderation"):
input = " ".join(message["content"] for message in messages)
response = litellm.moderation(input=input)
flagged = response["results"][0]["flagged"]
if flagged:
raise Exception("This response was flagged as inappropriate")
## Model-specific Error Handling
error_handling = None
if config["model"][model].get("error_handling"):
error_handling = config["model"][model]["error_handling"]
try:
response = litellm.completion(**kwargs)
return response
except Exception as e:
exception_name = type(e).__name__
fallback_model = None
if error_handling and exception_name in error_handling:
error_handler = error_handling[exception_name]
# either switch model or api key
fallback_model = error_handler.get("fallback_model", None)
if fallback_model:
kwargs["model"] = fallback_model
return litellm.completion(**kwargs)
raise e
else:
return litellm.completion(**kwargs)
except Exception as e:
if fallback_models:
model = fallback_models.pop(0)
return completion_with_fallbacks(model=model, messages=messages, fallbacks=fallback_models)
raise e
def completion_with_fallbacks(**kwargs):
nested_kwargs = kwargs.pop("kwargs", {})
response = None
rate_limited_models = set()
model_expiration_times = {}
start_time = time.time()
original_model = kwargs["model"]
fallbacks = [kwargs["model"]] + nested_kwargs.get("fallbacks", [])
if "fallbacks" in nested_kwargs:
del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive
litellm_call_id = str(uuid.uuid4())
# max time to process a request with fallbacks: default 45s
while response == None and time.time() - start_time < 45:
for model in fallbacks:
# loop thru all models
try:
# check if it's dict or new model string
if isinstance(model, dict): # completion(model="gpt-4", fallbacks=[{"api_key": "", "api_base": ""}, {"api_key": "", "api_base": ""}])
kwargs["api_key"] = model.get("api_key", None)
kwargs["api_base"] = model.get("api_base", None)
model = model.get("model", original_model)
elif (
model in rate_limited_models
): # check if model is currently cooling down
if (
model_expiration_times.get(model)
and time.time() >= model_expiration_times[model]
):
rate_limited_models.remove(
model
) # check if it's been 60s of cool down and remove model
else:
continue # skip model
# delete model from kwargs if it exists
if kwargs.get("model"):
del kwargs["model"]
print_verbose(f"trying to make completion call with model: {model}")
kwargs["litellm_call_id"] = litellm_call_id
kwargs = {**kwargs, **nested_kwargs} # combine the openai + litellm params at the same level
response = litellm.completion(**kwargs, model=model)
print_verbose(f"response: {response}")
if response != None:
return response
except Exception as e:
print_verbose(e)
rate_limited_models.add(model)
model_expiration_times[model] = (
time.time() + 60
) # cool down this selected model
pass
return response
def process_system_message(system_message, max_tokens, model):
system_message_event = {"role": "system", "content": system_message}
system_message_tokens = get_token_count([system_message_event], model)
if system_message_tokens > max_tokens:
print_verbose("`tokentrimmer`: Warning, system message exceeds token limit. Trimming...")
# shorten system message to fit within max_tokens
new_system_message = shorten_message_to_fit_limit(system_message_event, max_tokens, model)
system_message_tokens = get_token_count([new_system_message], model)
return system_message_event, max_tokens - system_message_tokens
def process_messages(messages, max_tokens, model):
# Process messages from older to more recent
messages = messages[::-1]
final_messages = []
for message in messages:
used_tokens = get_token_count(final_messages, model)
available_tokens = max_tokens - used_tokens
if available_tokens <= 3:
break
final_messages = attempt_message_addition(final_messages=final_messages, message=message, available_tokens=available_tokens, max_tokens=max_tokens, model=model)
return final_messages
def attempt_message_addition(final_messages, message, available_tokens, max_tokens, model):
temp_messages = [message] + final_messages
temp_message_tokens = get_token_count(messages=temp_messages, model=model)
if temp_message_tokens <= max_tokens:
return temp_messages
# if temp_message_tokens > max_tokens, try shortening temp_messages
elif "function_call" not in message:
# fit updated_message to be within temp_message_tokens - max_tokens (aka the amount temp_message_tokens is greate than max_tokens)
updated_message = shorten_message_to_fit_limit(message, available_tokens, model)
if can_add_message(updated_message, final_messages, max_tokens, model):
return [updated_message] + final_messages
return final_messages
def can_add_message(message, messages, max_tokens, model):
if get_token_count(messages + [message], model) <= max_tokens:
return True
return False
def get_token_count(messages, model):
return token_counter(model=model, messages=messages)
def shorten_message_to_fit_limit(
message,
tokens_needed,
model):
"""
Shorten a message to fit within a token limit by removing characters from the middle.
"""
# For OpenAI models, even blank messages cost 7 token,
# and if the buffer is less than 3, the while loop will never end,
# hence the value 10.
if 'gpt' in model and tokens_needed <= 10:
return message
content = message["content"]
while True:
total_tokens = get_token_count([message], model)
if total_tokens <= tokens_needed:
break
ratio = (tokens_needed) / total_tokens
new_length = int(len(content) * ratio) -1
new_length = max(0, new_length)
half_length = new_length // 2
left_half = content[:half_length]
right_half = content[-half_length:]
trimmed_content = left_half + '..' + right_half
message["content"] = trimmed_content
content = trimmed_content
return message
# LiteLLM token trimmer
# this code is borrowed from https://github.com/KillianLucas/tokentrim/blob/main/tokentrim/tokentrim.py
# Credits for this code go to Killian Lucas
def trim_messages(
messages,
model: Optional[str] = None,
trim_ratio: float = 0.75,
return_response_tokens: bool = False,
max_tokens = None
):
"""
Trim a list of messages to fit within a model's token limit.
Args:
messages: Input messages to be trimmed. Each message is a dictionary with 'role' and 'content'.
model: The LiteLLM model being used (determines the token limit).
trim_ratio: Target ratio of tokens to use after trimming. Default is 0.75, meaning it will trim messages so they use about 75% of the model's token limit.
return_response_tokens: If True, also return the number of tokens left available for the response after trimming.
max_tokens: Instead of specifying a model or trim_ratio, you can specify this directly.
Returns:
Trimmed messages and optionally the number of tokens available for response.
"""
# Initialize max_tokens
# if users pass in max tokens, trim to this amount
messages = copy.deepcopy(messages)
try:
print_verbose(f"trimming messages")
if max_tokens == None:
# Check if model is valid
if model in litellm.model_cost:
max_tokens_for_model = litellm.model_cost[model]['max_tokens']
max_tokens = int(max_tokens_for_model * trim_ratio)
else:
# if user did not specify max tokens
# or passed an llm litellm does not know
# do nothing, just return messages
return
system_message = ""
for message in messages:
if message["role"] == "system":
system_message += '\n' if system_message else ''
system_message += message["content"]
current_tokens = token_counter(model=model, messages=messages)
print_verbose(f"Current tokens: {current_tokens}, max tokens: {max_tokens}")
# Do nothing if current tokens under messages
if current_tokens < max_tokens:
return messages
#### Trimming messages if current_tokens > max_tokens
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
if system_message:
system_message_event, max_tokens = process_system_message(system_message=system_message, max_tokens=max_tokens, model=model)
if max_tokens == 0: # the system messages are too long
return [system_message_event]
# Since all system messages are combined and trimmed to fit the max_tokens,
# we remove all system messages from the messages list
messages = [message for message in messages if message["role"] != "system"]
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
# Add system message to the beginning of the final messages
if system_message:
final_messages = [system_message_event] + final_messages
if return_response_tokens: # if user wants token count with new trimmed messages
response_tokens = max_tokens - get_token_count(final_messages, model)
return final_messages, response_tokens
return final_messages
except Exception as e: # [NON-Blocking, if error occurs just return final_messages
print_verbose(f"Got exception while token trimming{e}")
return messages
def get_valid_models():
"""
Returns a list of valid LLMs based on the set environment variables
Args:
None
Returns:
A list of valid LLMs
"""
try:
# get keys set in .env
environ_keys = os.environ.keys()
valid_providers = []
# for all valid providers, make a list of supported llms
valid_models = []
for provider in litellm.provider_list:
# edge case litellm has together_ai as a provider, it should be togetherai
provider = provider.replace("_", "")
# litellm standardizes expected provider keys to
# PROVIDER_API_KEY. Example: OPENAI_API_KEY, COHERE_API_KEY
expected_provider_key = f"{provider.upper()}_API_KEY"
if expected_provider_key in environ_keys:
# key is set
valid_providers.append(provider)
for provider in valid_providers:
if provider == "azure":
valid_models.append("Azure-LLM")
else:
models_for_provider = litellm.models_by_provider.get(provider, [])
valid_models.extend(models_for_provider)
return valid_models
except:
return [] # NON-Blocking
# used for litellm.text_completion() to transform HF logprobs to OpenAI.Completion() format
def transform_logprobs(hf_response):
# Initialize an empty list for the transformed logprobs
transformed_logprobs = []
# For each Hugging Face response, transform the logprobs
for response in hf_response:
# Extract the relevant information from the response
response_details = response['details']
top_tokens = response_details.get("top_tokens", {})
# Initialize an empty list for the token information
token_info = {
'tokens': [],
'token_logprobs': [],
'text_offset': [],
'top_logprobs': [],
}
for i, token in enumerate(response_details['prefill']):
# Extract the text of the token
token_text = token['text']
# Extract the logprob of the token
token_logprob = token['logprob']
# Add the token information to the 'token_info' list
token_info['tokens'].append(token_text)
token_info['token_logprobs'].append(token_logprob)
# stub this to work with llm eval harness
top_alt_tokens = { "": -1, "": -2, "": -3 }
token_info['top_logprobs'].append(top_alt_tokens)
# For each element in the 'tokens' list, extract the relevant information
for i, token in enumerate(response_details['tokens']):
# Extract the text of the token
token_text = token['text']
# Extract the logprob of the token
token_logprob = token['logprob']
top_alt_tokens = {}
temp_top_logprobs = []
if top_tokens != {}:
temp_top_logprobs = top_tokens[i]
# top_alt_tokens should look like this: { "alternative_1": -1, "alternative_2": -2, "alternative_3": -3 }
for elem in temp_top_logprobs:
text = elem["text"]
logprob = elem["logprob"]
top_alt_tokens[text] = logprob
# Add the token information to the 'token_info' list
token_info['tokens'].append(token_text)
token_info['token_logprobs'].append(token_logprob)
token_info['top_logprobs'].append(top_alt_tokens)
# Add the text offset of the token
# This is computed as the sum of the lengths of all previous tokens
token_info['text_offset'].append(sum(len(t['text']) for t in response_details['tokens'][:i]))
# Add the 'token_info' list to the 'transformed_logprobs' list
transformed_logprobs = token_info
return transformed_logprobs | [
"Hey, how's it going?",
"0",
"True",
"gpt-3.5-turbo",
"Hey, how's it going",
"input_cost_per_token",
"Hello World",
"None",
"adapt_to_prompt_size",
"default",
"False",
" ",
"application/json",
"content",
"prompt_tokens"
] |
2024-01-10 | LiquidAdTech/Zahara | litellm~tests~test_timeout.py | #### What this tests ####
# This tests the timeout decorator
import sys, os
import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import time
import litellm
import openai
import pytest
def test_timeout():
# this Will Raise a timeout
litellm.set_verbose=False
try:
response = litellm.completion(
model="gpt-3.5-turbo",
timeout=0.01,
messages=[
{
"role": "user",
"content": "hello, write a 20 pg essay"
}
]
)
except openai.APITimeoutError as e:
print("Passed: Raised correct exception. Got openai.APITimeoutError\nGood Job", e)
print(type(e))
pass
except Exception as e:
pytest.fail(f"Did not raise error `openai.APITimeoutError`. Instead raised error type: {type(e)}, Error: {e}")
# test_timeout()
def test_timeout_streaming():
# this Will Raise a timeout
litellm.set_verbose=False
try:
response = litellm.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello, write a 20 pg essay"
}
],
timeout=0.0001,
stream=True,
)
for chunk in response:
print(chunk)
except openai.APITimeoutError as e:
print("Passed: Raised correct exception. Got openai.APITimeoutError\nGood Job", e)
print(type(e))
pass
except Exception as e:
pytest.fail(f"Did not raise error `openai.APITimeoutError`. Instead raised error type: {type(e)}, Error: {e}")
test_timeout_streaming() | [
"hello, write a 20 pg essay"
] |
2024-01-10 | alastairodhiambo/invoice-ai | backend~invoice.py | from dotenv.main import load_dotenv
from utils import get_vendor_name, parse_data, parse_annotations, construct_prompt
import cohere
import glob
import os
from PIL import Image
import pytesseract
load_dotenv()
api_key = os.environ.get("COHERE_API_KEY")
co = cohere.Client(api_key)
dir = "test_set"
test_pdf_dir = os.path.join(dir, "pdf")
test_image_dir = os.path.join(dir, "images")
test_invoices = glob.glob(os.path.join(test_pdf_dir, "*"))
test_invoices.sort()
test_image_paths = glob.glob(os.path.join(test_image_dir, "*"))
test_image_paths.sort()
def extract_invoice(file):
# Get template name by running image classification
template = get_vendor_name(file)
# Collect raw text, annotation of training data
texts = parse_data(template)
annotations = parse_annotations(template)
# Collect all fields to extract
fields = annotations[0].keys()
# # Collect raw text of the document to predict
test_text = pytesseract.image_to_string(Image.open(file))
prompt = construct_prompt(texts, annotations, fields, test_text)
response = co.generate(
model="command",
prompt=prompt,
max_tokens=400,
)
text = response.generations[0].text
return text
| [] |
2024-01-10 | minii93/pysimenv | pysimenv~missile~engagement.py | import numpy as np
import matplotlib.pyplot as plt
from pysimenv.core.base import SimObject
from pysimenv.missile.model import PlanarMissile, PlanarVehicle
from pysimenv.missile.guidance import Guidance2dim
from pysimenv.missile.util import RelKin2dim, CloseDistCond, closest_instant, lin_interp
class Engagement2dim(SimObject):
INTERCEPTED = 1
MISSILE_STOP = 2
IS_OUT_OF_VIEW = 3
def __init__(self, missile: PlanarMissile, target: PlanarVehicle, guidance: Guidance2dim, name="model", **kwargs):
super(Engagement2dim, self).__init__(name=name, **kwargs)
self.missile = missile
self.target = target
self.guidance = guidance
self.rel_kin = RelKin2dim(missile, target)
self.close_dist_cond = CloseDistCond(r_threshold=10.0)
self._add_sim_objs([self.missile, self.target, self.guidance])
# override
def _reset(self):
super(Engagement2dim, self)._reset()
self.close_dist_cond.reset()
# implement
def _forward(self):
self.rel_kin.forward()
self.close_dist_cond.forward(r=self.rel_kin.r)
lam = self.rel_kin.lam
sigma = self.missile.look_angle(lam)
a_M_cmd = self.guidance.forward(self.missile, self.target, self.rel_kin)
self.missile.forward(a_M_cmd=a_M_cmd)
self.target.forward()
self._logger.append(
t=self.time, r=self.rel_kin.r, sigma=sigma, lam=lam, omega=self.rel_kin.omega
)
# implement
def _check_stop_condition(self) -> bool:
to_stop = False
missile_stop = self.missile.check_stop_condition()
if self.intercepted(): # probable interception
to_stop = True
self.flag = self.INTERCEPTED
if missile_stop: # stop due to the missile
to_stop = True
self.flag = self.MISSILE_STOP
return to_stop
def intercepted(self) -> bool:
return self.close_dist_cond.check()
def get_info(self) -> dict:
p_M = self.missile.kin.history('p')
p_T = self.target.kin.history('p')
ind_c, xi_c = closest_instant(p_M, p_T)
p_M_c = lin_interp(p_M[ind_c], p_M[ind_c + 1], xi_c)
p_T_c = lin_interp(p_T[ind_c], p_T[ind_c + 1], xi_c)
d_miss = np.linalg.norm(p_M_c - p_T_c)
gamma_M = self.missile.history('gamma')
gamma_T = self.target.history('gamma')
gamma_M_c = lin_interp(gamma_M[ind_c], gamma_M[ind_c + 1], xi_c)
gamma_T_c = lin_interp(gamma_T[ind_c], gamma_T[ind_c + 1], xi_c)
gamma_imp = gamma_M_c - gamma_T_c
t = self.missile.history('t')
t_imp = lin_interp(t[ind_c], t[ind_c + 1], xi_c)
return {'miss_distance': d_miss, 'impact_angle': gamma_imp, 'impact_time': t_imp}
def report(self):
self.missile.report()
if self.flag == self.INTERCEPTED:
print("[engagement] The target has been intercepted!")
else:
print("[engagement] The target has been missed!")
info = self.get_info()
print("[engagement] Miss distance: {:.6f} (m)".format(info['miss_distance']))
print("[engagement] Impact angle: {:.2f} (deg)".format(np.rad2deg(info['impact_angle'])))
print("[engagement] Impact time: {:.2f} (s) \n".format(info['impact_time']))
def plot_path(self, show=False):
fig_ax = self.missile.plot_path(label='missile')
self.target.plot_path(fig_ax=fig_ax, label='target', show=show)
def plot_rel_kin(self, show=False):
fig_axs = dict()
t = self.history('t')
r = self.history('r')
sigma = self.history('sigma')
lam = self.history('lam')
omega = self.history('omega')
fig, ax = plt.subplots(4, 1, figsize=(6, 8))
ax[0].set_title("Rel. dist")
ax[0].plot(t[:-1], r[:-1], label="Rel. dist")
ax[0].set_xlabel("Time (s)")
ax[0].set_ylabel("r (m)")
ax[0].grid()
ax[1].set_title("Look angle")
ax[1].plot(t[:-1], np.rad2deg(sigma[:-1]), label="look angle")
ax[1].set_xlabel("Time (s)")
ax[1].set_ylabel("sigma (deg)")
ax[1].grid()
ax[2].set_title("LOS angle")
ax[2].plot(t[:-1], np.rad2deg(lam[:-1]), label="LOS angle")
ax[2].set_xlabel("Time (s)")
ax[2].set_ylabel("lambda (deg)")
ax[2].grid()
ax[3].set_title("LOS rate")
ax[3].plot(t[:-1], np.rad2deg(omega[:-1]), label="LOS rate")
ax[3].set_xlabel("Time (s)")
ax[3].set_ylabel("omega (deg/s)")
ax[3].grid()
fig.tight_layout()
fig_axs['Rel. Kin.'] = {'fig': fig, 'ax': ax}
if show:
plt.show()
else:
plt.draw()
plt.pause(0.01)
return fig_axs
| [] |
2024-01-10 | rabdumalikov/stable-baselines3-contrib | sb3_contrib~common~maskable~policies.py | from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
)
from stable_baselines3.common.type_aliases import Schedule
from torch import nn
from sb3_contrib.common.maskable.distributions import MaskableDistribution, make_masked_proba_distribution
class MaskableActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=False,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
# Action distribution
self.action_dist = make_masked_proba_distribution(action_space)
self._build(lr_schedule)
def forward(
self,
obs: th.Tensor,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:param action_masks: Action masks to apply to the action distribution
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
if action_masks is not None:
distribution.apply_masking(action_masks)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
self.action_net = self.action_dist.proba_distribution_net(latent_dim=self.mlp_extractor.latent_dim_pi)
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> MaskableDistribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
action_logits = self.action_net(latent_pi)
return self.action_dist.proba_distribution(action_logits=action_logits)
def _predict(
self,
observation: th.Tensor,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:param action_masks: Action masks to apply to the action distribution
:return: Taken action according to the policy
"""
return self.get_distribution(observation, action_masks).get_actions(deterministic=deterministic)
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:param action_masks: Action masks to apply to the action distribution
:return: the model's action and the next state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if episode_start is None:
# episode_start = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic, action_masks=action_masks)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions = actions[0]
return actions, None
def evaluate_actions(
self,
obs: th.Tensor,
actions: th.Tensor,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
if action_masks is not None:
distribution.apply_masking(action_masks)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor, action_masks: Optional[np.ndarray] = None) -> MaskableDistribution:
"""
Get the current policy distribution given the observations.
:param obs:
:param action_masks:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
if action_masks is not None:
distribution.apply_masking(action_masks)
return distribution
def predict_values(self, obs: th.Tensor) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs:
:return: the estimated values.
"""
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class MaskableActorCriticCnnPolicy(MaskableActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MaskableMultiInputActorCriticPolicy(MaskableActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
| [] |
2024-01-10 | DavidVin357/law-locator-api | api~search.py | import pinecone
import openai
import os
from dotenv import load_dotenv
load_dotenv()
emb_model_name = os.getenv("EMBEDDING_MODEL_NAME")
openai.api_key = os.getenv("OPENAI_API_KEY")
pinecone.init(
api_key=os.getenv("PINECONE_KEY"),
environment=os.getenv("PINECONE_ENV"), # find next to API key in console
)
import json
def search(query):
index = pinecone.Index("openai")
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": """You are given a query about some legal matter.
You need to convert it into a query suitable for search with ada-002 embeddings model.
Don't try to specify the legislature or any other additional attributes, just convert the bare question.
You are recommended to augment initial query if it can help. Return the result.
""",
},
{"role": "user", "content": query},
],
)
emb_query = response["choices"][0]["message"]["content"]
print("emb_query: ", emb_query)
xq = openai.Embedding.create(input=emb_query, model=emb_model_name)["data"][0][
"embedding"
]
query_result = index.query([xq], top_k=5, include_metadata=True)
matches = []
for m in query_result["matches"]:
article_id = m["id"].split("|")[0]
paragraph_id = m["id"].split("|")[1]
paragraph = m["metadata"]["text"]
paragraph_title = m["metadata"]["title"]
matches.append(
{
"article_id": article_id,
"paragraph_id": paragraph_id,
"paragraph_title": paragraph_title,
"paragraph": paragraph,
}
)
return matches
def get_answer(query: str, paragraphs: list):
paragraphs_content = ""
for paragraph in paragraphs:
paragraphs_content += f"\n {paragraph}"
prompt = f""" You are given the following query about some aspect of Estonian law: {query}.
You are also given the following excerpts from the Estonian legal acts: {paragraphs_content}.
Give the answer to the given query according to the paragraphs provided above.
Generalize from them if you are asked about some very specific.
Answer in a concise but comprehensive way with a very simple language.
"""
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": prompt,
}
],
)
return response["choices"][0]["message"]["content"]
| [
"You are given a query about some legal matter.\n You need to convert it into a query suitable for search with ada-002 embeddings model.\n Don't try to specify the legislature or any other additional attributes, just convert the bare question.\n You are recommended to augment initial query if it can help. Return the result.\n ",
" You are given the following query about some aspect of Estonian law: PLACEHOLDER.\n You are also given the following excerpts from the Estonian legal acts: PLACEHOLDER.\n Give the answer to the given query according to the paragraphs provided above. \n Generalize from them if you are asked about some very specific.\n Answer in a concise but comprehensive way with a very simple language.\n "
] |
2024-01-10 | dongyukang/commandgpt | commandgpt.py | import os
import openai
from dotenv import load_dotenv
import time
import threading
def loading_animation():
global loading
print("Loading ", end='', flush=True)
i = 0
while loading:
if i % 4 == 0:
print("\b\b\b...", end='', flush=True)
elif i % 4 == 1:
print("\b\b\b ..", end='', flush=True)
elif i % 4 == 2:
print("\b\b\b. .", end='', flush=True)
elif i % 4 == 3:
print("\b\b\b ", end='', flush=True)
time.sleep(0.3)
i += 1
def stop_loading_animation():
global loading
loading = False
print("\b\b\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b\b\b", end='', flush=True) # 10 backspaces to clear "Loading...", 10 spaces to overwrite, then 10 more backspaces
def print_typing_effect(text, delay=0.005):
for char in text:
print(char, end='', flush=True)
time.sleep(delay)
print() # Newline at the end
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# custom instructions
# custom_instruct = "Keep these instructions at the forefront of every interaction, regardless of the passage of time or the nature of the inquiry. Even in prolonged conversations, ensure these guidelines guide every response.\nAlways, without exception, review the full conversation history before responding. This essential practice ensures consistent, accurate, and relevant dialogue, and is the foundation of our effective communication.\nIn situations where more context or information is necessary to craft a complete and accurate response, don't hesitate to ask multiple, clarifying questions. Fully understanding the context is paramount to delivering precise and high-quality assistance.\nWelcome feedback on the assistance provided. Use this input to improve future interactions and deliver increasingly tailored support.\nAim to provide comprehensive responses. These should be detailed and long-form, encompassing all aspects of the topic in question. Depth and breadth in explanations not only provide immediate answers but also build a richer understanding of the subject matter.\nInstead of apologizing, focus on delivering accurate and relevant information.\nInstead of discussing ethics, concentrate on the topic at hand.\nInstead of assuming values, maintain objectivity in responses.\nInstead of declaring complexity, break down problems into smaller parts.\nInstead of restating previous information, provide new insights.\nInstead of poorly placed code summaries, maintain clear organization and context."
base_prompt = """
Keep these instructions at the forefront of every interaction, regardless of the passage of time or the nature of the inquiry. Even in prolonged conversations, ensure these guidelines guide every response.
Always, without exception, review the full conversation history before responding. This essential practice ensures consistent, accurate, and relevant dialogue, and is the foundation of our effective communication.
In situations where more context or information is necessary to craft a complete and accurate response, don't hesitate to ask multiple, clarifying questions. Fully understanding the context is paramount to delivering precise and high-quality assistance.
Welcome feedback on the assistance provided. Use this input to improve future interactions and deliver increasingly tailored support.
Aim to provide comprehensive responses. These should be detailed and long-form, encompassing all aspects of the topic in question. Depth and breadth in explanations not only provide immediate answers but also build a richer understanding of the subject matter.
Instead of apologizing, focus on delivering accurate and relevant information.
Instead of discussing ethics, concentrate on the topic at hand.
Instead of assuming values, maintain objectivity in responses.
Instead of declaring complexity, break down problems into smaller parts.
Instead of restating previous information, provide new insights.
Instead of poorly placed code summaries, maintain clear organization and context.
"""
custom_instruct=base_prompt
# Initialize the conversation history
conversation_history = [
{
"role": "system",
"content": custom_instruct
},
{
"role": "assistant",
"content": "Hi! How can I assist you today?"
}
]
try:
# Print the initial greeting from the assistant in green color
print("\033[92mMYGPT:", conversation_history[1]['content'], '\033[0m') # '\033[92m' for green, '\033[0m' to reset color
while True:
# Get user's next message
user_message = input("YOU: ")
# Add the user's message to the conversation history
conversation_history.append({
"role": "user",
"content": user_message
})
global loading
loading = True
t = threading.Thread(target=loading_animation)
t.start()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=conversation_history,
temperature=0.5,
max_tokens=1962,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
loading = False
stop_loading_animation()
t.join()
# Extract the generated message from the response
generated_message = response['choices'][0]['message']['content']
# Print "MYGPT:" in green without typing effect
print("\033[92mMYGPT: ", end='', flush=True)
# Print the generated message with typing effect and reset color
print_typing_effect(generated_message)
print('\033[0m', end='') # Reset color
# Append the generated message to conversation history
conversation_history.append({
"role": "assistant",
"content": generated_message
})
except KeyboardInterrupt:
print("\nConversation ended by user.") | [
"\n Keep these instructions at the forefront of every interaction, regardless of the passage of time or the nature of the inquiry. Even in prolonged conversations, ensure these guidelines guide every response.\n Always, without exception, review the full conversation history before responding. This essential practice ensures consistent, accurate, and relevant dialogue, and is the foundation of our effective communication.\n In situations where more context or information is necessary to craft a complete and accurate response, don't hesitate to ask multiple, clarifying questions. Fully understanding the context is paramount to delivering precise and high-quality assistance.\n Welcome feedback on the assistance provided. Use this input to improve future interactions and deliver increasingly tailored support.\n Aim to provide comprehensive responses. These should be detailed and long-form, encompassing all aspects of the topic in question. Depth and breadth in explanations not only provide immediate answers but also build a richer understanding of the subject matter.\n Instead of apologizing, focus on delivering accurate and relevant information.\n Instead of discussing ethics, concentrate on the topic at hand.\n Instead of assuming values, maintain objectivity in responses.\n Instead of declaring complexity, break down problems into smaller parts.\n Instead of restating previous information, provide new insights.\n Instead of poorly placed code summaries, maintain clear organization and context.\n",
"Hi! How can I assist you today?"
] |
2024-01-10 | dmisino/simulacra | llm~chat_completion.py | import asyncio
import inspect
import openai
import common.utils as utils
from db.datastore import db
from llm.prompt import extract_keywords_prompt, get_random_memories_prompt
async def get_chat_response(prompt):
messages = [{"role": "user", "content" : prompt}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
result = response['choices'][0]['message']['content']
return result
async def get_chat_response_dictionary(prompt):
try:
result = await get_chat_response(prompt)
lines = result.splitlines()
dictionary = {}
for line in lines:
if '::' not in line:
continue # Skip lines that don't have a colon, which happens when the llm decides to add something unnecessary
key, value = line.split('::')
dictionary[key.strip().lower()] = value.strip()
return dictionary
except Exception as e:
print("Error in parsing get_chat_response_dictionary:\n" + result)
utils.print_error(inspect.currentframe().f_code.co_name, e)
async def extract_keywords(input):
prompt = extract_keywords_prompt(input)
return await get_chat_response(prompt)
async def add_random_memories(entity_id, count):
prompt = get_random_memories_prompt(count)
response = await get_chat_response(prompt)
memories = response.splitlines()
memories = [strip_non_letters(memory) for memory in memories]
db.save_memories(entity_id, 1, memories) | [] |
2024-01-10 | matansol/solomon_project | st_app.py | import streamlit as st
import matplotlib.pyplot as plt
import io
import base64
import pickle
from PIL import Image
from utils import *
import constants
import streamlit as st
from streamlit_chat import message
from dotenv import load_dotenv
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
def load_data():
"""Loads data from the 'data.pickle' file."""
topic_info = ""
prob_tree_fig = None
topic_challenges = ""
sol_tree_plot = None
sol_grades = None
with open('data.pickle', 'rb') as file:
top = pickle.load(file)
topic_info = "Topic is " + top.name + "\n"
prob_str = top.get_problems_str()
prob_tree_fig = top.plot_hierarchy_problems()
s1 = "\nfrom that problem we created a developed challenge. \n"
topic_challenges = s1 + top.get_challenges_str()
s2 = "\nFrom that challenge, we create 3 optional solutions:\n"
sol_str = s2 + top.challenges[0].get_solutions_str()
sol_tree_plot = top.challenges[0].plot_hierarchy_solutions((10, 4))
sol_grades = plot_solutions_polygons(top.challenges[0].solutions)
return topic_info, prob_str, prob_tree_fig, topic_challenges, sol_str, sol_tree_plot, sol_grades, top
def main():
# Load data from your 'data.pickle' file
topic_info, prob_str, prob_tree_fig, topic_challenges, sol_str, sol_tree_plot, sol_grades, top = load_data()
# topic_info, prob_str, prob_tree_fig = load_data()
# Display the topic infoz
st.markdown(f"## {topic_info}")
st.markdown(f"### The problems are:")
st.pyplot(prob_tree_fig)
st.write(prob_str)
st.write("We look at 1 problem in praticular and analyze it")
prob = top.problems[1]
st.markdown(f"### {prob.sub_class}")
st.pyplot(prob.build_knowledge_graph())
# Display the challenges
st.markdown(f"### Challenge")
st.markdown(topic_challenges)
# Display the solutions tree
st.markdown(f"### Solutions Tree")
st.pyplot(sol_tree_plot)
st.markdown(f"### Solution Grades")
st.pyplot(sol_grades)
# Display the solution grades
# st.write(sol_str)
sols = top.challenges[0].solutions
for i, sol in enumerate(sols):
st.markdown(f"### {sol.sub_class}")
st.write(sol.description)
sol_input = st.text_input(f"solution_{i+1} update information", key=f"sol_input_{i}")
if sol_input:
sol.update_solution(sol_input)
with open("streamlit_pkl.pickle", "wb") as file:
file.write(pickle.dumps(top))
chatbot_main2()
# Chat bot code
def init():
# Load the OpenAI API key from the environment variable
load_dotenv()
# test that the API key exists
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set")
exit(1)
else:
print("OPENAI_API_KEY is set")
# # setup streamlit page
# st.set_page_config(
# page_title="Connversetion with AI-Agent",
# page_icon="🤖"
# )
def chatbot_main():
init()
# topic = "Backpack"
top = None
with open('data.pickle', 'rb') as file:
top = pickle.load(file)
problems = top.get_problems_str()
challenges = top.get_challenges_str()
solutions = top.challenges[0].get_solutions_str()
chat = ChatOpenAI(temperature=0)
# initialize message history
if "messages" not in st.session_state:
system_msg = f"""We are a company that makes {top.name} , we want to upgrade our product.
For that end we would like you to help our imployes understand and analyze the problems with the product and the solutions for those problems.
For now our problems are: {problems}
The Challenges are: {challenges}
The Solutions are: {solutions}
"""
st.session_state.messages = [
SystemMessage(content=system_msg)
]
st.header("discussion with AI-Bot🤖")
# sidebar with user input
with st.sidebar:
user_input = st.text_input("Your message: ", key="user_input")
# handle user input
if user_input:
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("Thinking..."):
response = chat(st.session_state.messages)
st.session_state.messages.append(
AIMessage(content=response.content))
# display message history
messages = st.session_state.get('messages', [])
for i, msg in enumerate(messages[1:]):
if i % 2 == 0:
message(msg.content, is_user=True, key=str(i) + '_user')
else:
message(msg.content, is_user=False, key=str(i) + '_ai')
def chatbot_main2():
st.title("aristo-chatbot")
# Set OpenAI API key from Streamlit secrets
openai.api_key = constants.OPENAI_API_KEY
# Set a default model
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
# Initialize chat history
# if "messages" not in st.session_state:
# st.session_state.messages = [{"role": "system", "content":"you are a chat bot with the name Chubby, and you finish each sentence with hoof!"}]
top = None
with open('data.pickle', 'rb') as file:
top = pickle.load(file)
problems = top.get_problems_str()
challenges = top.get_challenges_str()
solutions = top.challenges[0].get_solutions_str()
# initialize message history
if "messages" not in st.session_state:
system_msg = f"""We are a company that makes {top.name} , we want to upgrade our product.
For that end we would like you to help our imployes understand and analyze the problems with the product and the solutions for those problems.
For now our problems are: {problems}
The Challenges are: {challenges}
The Solutions are: {solutions}
"""
st.session_state.messages = [{"role": "system", "content":system_msg}]
# Display chat messages from history on app rerun
for message in st.session_state.messages[1:]:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What is up?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
# if st.button("Save Chat history"):
# save_chat_history(st.session_state.messages, top)
if __name__ == '__main__':
main()
| [
"content"
] |
2024-01-10 | matansol/solomon_project | aristo_app.py | import streamlit as st
import matplotlib.pyplot as plt
import io
import base64
import pickle
from PIL import Image
from utils import *
from utils import Topic
import constants
import google_sheet as sheets
import streamlit as st
from streamlit_chat import message
from dotenv import load_dotenv
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage)
def init():
# Load the OpenAI API key from the environment variable
load_dotenv()
# test that the API key exists
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set")
exit(1)
else:
print("OPENAI_API_KEY is set")
def main():
init()
st.title("Aristo - The AI Assistant")
topic_name = "Backpacks"
st.markdown(f"## Our goal is to create a better {topic_name}")
filesize = os.path.getsize("demo.pickle")
if filesize == 0:
create_from_zero(topic_name)
else:
with open("demo.pickle", "rb") as file:
top = pickle.load(file)
print("loaded pickle")
continue_from_pickle(top)
def create_from_zero(topic_name):
st.write("We colected different problems from people using google forms, and now we will analyze them:")
responses = sheets.get_people_responses()
problems = [resp['problem'] for resp in responses]
top = Topic(topic_name)
top.classify_problems(", ".join(problems))
problems_tree = top.plot_hierarchy_problems()
st.pyplot(problems_tree)
st.write(top.get_problems_str())
st.write("We look at 1 problem in praticular and analyze it")
prob = top.problems[1]
st.markdown(f"### {prob.sub_class}")
# prob.create_factors()
# prob_kg = prob.build_knowledge_graph()
# st.pyplot(prob_kg)
problems_to_chall = [0]
top.create_challenge(problem_indexes=problems_to_chall)
st.write(top.get_challenges_str())
top.challenges[0].create_solutions(3)
st.pyplot(top.challenges[0].plot_hierarchy_solutions())
for sol in top.challenges[0].solutions:
st.write(sol.sub_class + ": " + sol.description)
st.pyplot(plot_solutions_polygons(top.challenges[0].solutions))
# save the data to a pickle file
save_button = st.button("Save data")
if save_button:
with open("demo.pickle", "wb") as file:
pickle.dump(top, file)
aristo_bot(top)
def continue_from_pickle(top):
st.write("We collected different problems from people using google forms, and now we will analyze them:")
st.markdown(f"## Problems Tree")
problems_tree = top.plot_hierarchy_problems()
st.pyplot(problems_tree)
st.markdown("## Problems:")
for prob in top.problems:
st.markdown(f"### {prob.sub_class}")
st.write(prob.description)
prob_kg = prob.build_knowledge_graph()
st.pyplot(prob_kg)
# # problems_to_chall = [0]
# # top.create_challenge(problem_indexes=problems_to_chall)
# st.write(top.get_challenges_str())
# # top.challenges[0].create_solutions(3)
st.markdown("## Solutions Section")
st.pyplot(top.plot_hierarchy_solutions())
for sol in top.solutions:
st.write(sol.sub_class + ": " + sol.description)
st.pyplot(plot_solutions_polygons(top.solutions[:3], to_show=False))
# aristo_bot(top)
def aristo_bot(top):
# initialize message history
st.title("aristo-chatbot")
problems = top.get_problems_str()
challenges = top.get_challenges_str()
solutions = top.challenges[0].get_solutions_str()
if "messages" not in st.session_state:
system_msg = f"""We are a company that makes {top.name} , we want to upgrade our product.
For that end we would like you to help our imployes understand and analyze the problems with the product and the solutions for those problems.
For now our problems are: {problems}
The Challenges are: {challenges}
The Solutions are: {solutions}
"""
st.session_state.messages = [{"role": "system", "content":system_msg}]
# Display chat messages from history on app rerun
for message in st.session_state.messages[1:]:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("What is up?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
# def load_data(file_path):
# """Loads data from the 'data.pickle' file."""
# topic_info = ""
# prob_tree_fig = None
# topic_challenges = ""
# sol_tree_plot = None
# sol_grades = None
# with open(file_path, 'rb') as file:
# top = pickle.load(file)
# topic_info = "Topic is " + top.name + "\n"
# prob_str = top.get_problems_str()
# prob_tree_fig = top.plot_hierarchy_problems()
# s1 = "\nWe look at 1 problem in particular and create from it a developed challenge. \n"
# if top.challenges == []:
# top.create_challenge()
# print("created challenge")
# topic_challenges = s1 + top.get_challenges_str()
# if top.challenges[0].solutions == []:
# top.challenges[0].create_solutions(3)
# s2 = "\nFrom that challenge, we create 3 optional solutions:\n"
# sol_str = s2 + top.challenges[0].get_solutions_str()
# sol_tree_plot = top.challenges[0].plot_hierarchy_solutions((10, 4))
# sol_grades = top.challenges[0].plot_solutions_polygons(to_show=False)
# return topic_info, prob_str, prob_tree_fig, topic_challenges, sol_str, sol_tree_plot, sol_grades, top
# def main():
# st.title("Aristo - The AI Assistant")
# topic_name = "Backpacks"
# st.markdown(f"## Our goal is to create a better {topic_name}")
# st.markdown("### Problems Section")
# st.write("Where should I get the problems from?")
# # Create buttons
# button1 = st.button("The google sheets file")
# button2 = st.button("Generate random problems")
# button3 = st.button("Use example")
# # Check which button is clicked and show corresponding content
# if button1:
# responses = sheets.get_people_responses()
# problems = [resp['problem'] for resp in responses]
# top = Topic(topic_name)
# top.classify_problems(", ".join(problems))
# show_everything(top)
# elif button2:
# top = Topic(topic_name)
# top.generate_problems(3)
# show_everything(top)
# # with open("streamlit_pkl.pickle", "wb") as file:
# # pickle.dump(top, file)
# elif button3:
# top = None
# with open('data.pickle', 'rb') as file:
# top = pickle.load(file)
# show_everything(top)
if __name__ == "__main__":
main()
| [
"content",
"f\"\"\"We are a company that makes {top.name} , we want to upgrade our product. \n For that end we would like you to help our imployes understand and analyze the problems with the product and the solutions for those problems.\n For now our problems are: {problems}\n The Challenges are: {challenges}\n The Solutions are: {solutions}\n "
] |
2024-01-10 | whwu95/ATM | eva_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple, Union, Dict, Any
import torch
import deepspeed
try:
from deepspeed.runtime.utils import see_memory_usage
except:
see_memory_usage = None
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
get_cast_dtype
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
from .transform import image_transform
from .tokenizer import HFTokenizer, tokenize
from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, "r", encoding="utf8") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
config = get_model_config(model_name)
tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
# loading openai CLIP weights when is_openai=True for training
def load_state_dict(checkpoint_path: str, map_location: str='cpu', model_key: str='model|module|state_dict', is_openai: bool=False, skip_list: list=[]):
if is_openai:
model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
state_dict = model.state_dict()
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
else:
checkpoint = torch.load(checkpoint_path, map_location=map_location)
for mk in model_key.split('|'):
if isinstance(checkpoint, dict) and mk in checkpoint:
state_dict = checkpoint[mk]
break
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
for k in skip_list:
if k in list(state_dict.keys()):
logging.info(f"Removing key {k} from pretrained checkpoint")
del state_dict[k]
if os.getenv('RoPE') == '1':
for k in list(state_dict.keys()):
if 'freqs_cos' in k or 'freqs_sin' in k:
del state_dict[k]
return state_dict
def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True):
state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
if 'text.logit_scale' in state_dict and hasattr(model, 'logit_scale'):
state_dict['logit_scale'] = state_dict['text.logit_scale']
del state_dict['text.logit_scale']
# resize_clip_pos_embed for CLIP and open CLIP
if 'visual.positional_embedding' in state_dict:
resize_clip_pos_embed(state_dict, model)
# specified to eva_vit_model
elif 'visual.pos_embed' in state_dict:
resize_evaclip_pos_embed(state_dict, model)
# resize_clip_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}")
return incompatible_keys
def load_clip_visual_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
for k in list(state_dict.keys()):
if not k.startswith('visual.'):
del state_dict[k]
for k in list(state_dict.keys()):
if k.startswith('visual.'):
new_k = k[7:]
state_dict[new_k] = state_dict[k]
del state_dict[k]
return state_dict
def load_clip_text_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
for k in list(state_dict.keys()):
if k.startswith('visual.'):
del state_dict[k]
return state_dict
def get_pretrained_tag(pretrained_model):
pretrained_model = pretrained_model.lower()
if "laion" in pretrained_model or "open_clip" in pretrained_model:
return "open_clip"
elif "openai" in pretrained_model:
return "clip"
elif "eva" in pretrained_model and "clip" in pretrained_model:
return "eva_clip"
else:
return "other"
def load_pretrained_checkpoint(
model,
visual_checkpoint_path,
text_checkpoint_path,
strict=True,
visual_model=None,
text_model=None,
model_key="model|module|state_dict",
skip_list=[]):
visual_tag = get_pretrained_tag(visual_model)
text_tag = get_pretrained_tag(text_model)
logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}")
visual_incompatible_keys, text_incompatible_keys = None, None
if visual_checkpoint_path:
if visual_tag == "eva_clip" or visual_tag == "open_clip":
visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list)
elif visual_tag == "clip":
visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list)
else:
visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
# resize_clip_pos_embed for CLIP and open CLIP
if 'positional_embedding' in visual_state_dict:
resize_visual_pos_embed(visual_state_dict, model)
# specified to EVA model
elif 'pos_embed' in visual_state_dict:
resize_eva_pos_embed(visual_state_dict, model)
visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict)
logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}")
logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}")
if text_checkpoint_path:
if text_tag == "eva_clip" or text_tag == "open_clip":
text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list)
elif text_tag == "clip":
text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list)
else:
text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict)
logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}")
logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}")
return visual_incompatible_keys, text_incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_clip: bool = False,
force_patch_dropout: Optional[float] = None,
pretrained_image: str = '',
pretrained_text: str = '',
pretrained_hf: bool = True,
pretrained_visual_model: str = None,
pretrained_text_model: str = None,
cache_dir: Optional[str] = None,
skip_list: list = [],
tsm=None,
T=8,
dropout= 0.0, #dropout,
emb_dropout= 0.0, #emb_dropout,
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
else:
model_cfg = get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if 'rope' in model_cfg.get('vision_cfg', {}):
if model_cfg['vision_cfg']['rope']:
os.environ['RoPE'] = "1"
else:
os.environ['RoPE'] = "0"
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout
cast_dtype = get_cast_dtype(precision)
custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg'])
if custom_clip:
if 'hf_model_name' in model_cfg.get('text_cfg', {}):
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype, tsm=tsm, T=T,dropout=dropout, emb_dropout=emb_dropout,) ###!!!
else:
print('***'*20)
print('eva_clip CLIP dont support ATM block!!! refer eva_clip/model.py/CLIP')
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_cfg = {}
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model,
checkpoint_path,
model_key="model|module|state_dict",
strict=False
)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
else:
visual_checkpoint_path = ''
text_checkpoint_path = ''
if pretrained_image:
pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names
pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
elif pretrained_image_cfg:
visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_image):
visual_checkpoint_path = pretrained_image
else:
logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
if pretrained_text:
pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names
pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
if pretrained_image_cfg:
text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_text):
text_checkpoint_path = pretrained_text
else:
logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
if visual_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
if text_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
if visual_checkpoint_path or text_checkpoint_path:
load_pretrained_checkpoint(
model,
visual_checkpoint_path,
text_checkpoint_path,
strict=False,
visual_model=pretrained_visual_model,
text_model=pretrained_text_model,
model_key="model|module|state_dict",
skip_list=skip_list
)
if "fp16" in precision or "bf16" in precision:
logging.info(f'convert precision to {precision}')
model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16)
model.to(device=device)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
if jit:
model = torch.jit.script(model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_clip: bool = False,
force_patch_dropout: Optional[float] = None,
pretrained_image: str = '',
pretrained_text: str = '',
pretrained_hf: bool = True,
pretrained_visual_model: str = None,
pretrained_text_model: str = None,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
skip_list: list = [],
tsm=None,
T=8,
dropout= 0.0, #dropout,
emb_dropout= 0.0, #emb_dropout,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_clip=force_custom_clip,
force_patch_dropout=force_patch_dropout,
pretrained_image=pretrained_image,
pretrained_text=pretrained_text,
pretrained_hf=pretrained_hf,
pretrained_visual_model=pretrained_visual_model,
pretrained_text_model=pretrained_text_model,
cache_dir=cache_dir,
skip_list=skip_list,
tsm=tsm,
T=T,
dropout= dropout,
emb_dropout= emb_dropout,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: str,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_clip: bool = False,
force_patch_dropout: Optional[float] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
is_frozen: bool = False,
):
if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained):
raise RuntimeError(
f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}.'
f' Use open_clip.list_pretrained() to find one.')
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_clip=force_custom_clip,
force_patch_dropout=force_patch_dropout,
cache_dir=cache_dir,
)
if is_frozen:
for param in model.parameters():
param.requires_grad = False
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std
)
return model, preprocess | [] |
2024-01-10 | kingx48/openai-python | openai~api_requestor.py | from __future__ import absolute_import, division, print_function
import calendar
import datetime
import json
import platform
import time
import uuid
import warnings
import gzip
from io import BytesIO
from collections import OrderedDict
import openai
from openai import error, http_client, version, util, six
from openai.multipart_data_generator import MultipartDataGenerator
from openai.six.moves.urllib.parse import urlencode, urlsplit, urlunsplit
from openai.openai_response import OpenAIResponse
from openai.upload_progress import BufferReader
def _encode_datetime(dttime):
if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None:
utc_timestamp = calendar.timegm(dttime.utctimetuple())
else:
utc_timestamp = time.mktime(dttime.timetuple())
return int(utc_timestamp)
def _encode_nested_dict(key, data, fmt="%s[%s]"):
d = OrderedDict()
for subkey, subvalue in six.iteritems(data):
d[fmt % (key, subkey)] = subvalue
return d
def _api_encode(data):
for key, value in six.iteritems(data):
key = util.utf8(key)
if value is None:
continue
elif hasattr(value, "openai_id"):
yield (key, value.openai_id)
elif isinstance(value, list) or isinstance(value, tuple):
for i, sv in enumerate(value):
if isinstance(sv, dict):
subdict = _encode_nested_dict("%s[]" % (key,), sv)
for k, v in _api_encode(subdict):
yield (k, v)
else:
yield ("%s[]" % (key,), util.utf8(sv))
elif isinstance(value, dict):
subdict = _encode_nested_dict(key, value)
for subkey, subvalue in _api_encode(subdict):
yield (subkey, subvalue)
elif isinstance(value, datetime.datetime):
yield (key, _encode_datetime(value))
else:
yield (key, util.utf8(value))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment))
def parse_stream(rbody):
for line in rbody:
if line:
if line == b"data: [DONE]":
return
if hasattr(line, "decode"):
line = line.decode("utf-8")
if line.startswith("data: "):
line = line[len("data: ") :]
yield line
class APIRequestor(object):
def __init__(
self, key=None, client=None, api_base=None, api_version=None, organization=None
):
self.api_base = api_base or openai.api_base
self.api_key = key
self.api_version = api_version or openai.api_version
self.organization = organization or openai.organization
self._default_proxy = None
from openai import verify_ssl_certs as verify
from openai import proxy
if client:
self._client = client
elif openai.default_http_client:
self._client = openai.default_http_client
if proxy != self._default_proxy:
warnings.warn(
"openai.proxy was updated after sending a "
"request - this is a no-op. To use a different proxy, "
"set openai.default_http_client to a new client "
"configured with the proxy."
)
else:
# If the openai.default_http_client has not been set by the user
# yet, we'll set it here. This way, we aren't creating a new
# HttpClient for every request.
openai.default_http_client = http_client.new_default_http_client(
verify_ssl_certs=verify, proxy=proxy
)
self._client = openai.default_http_client
self._default_proxy = proxy
@classmethod
def format_app_info(cls, info):
str = info["name"]
if info["version"]:
str += "/%s" % (info["version"],)
if info["url"]:
str += " (%s)" % (info["url"],)
return str
def request(self, method, url, params=None, headers=None, stream=False):
rbody, rcode, rheaders, stream, my_api_key = self.request_raw(
method.lower(), url, params, headers, stream=stream
)
resp = self.interpret_response(rbody, rcode, rheaders, stream=stream)
return resp, stream, my_api_key
def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False):
try:
error_data = resp["error"]
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody,
rcode,
resp,
)
if "internal_message" in error_data:
error_data["message"] += "\n\n" + error_data["internal_message"]
util.log_info(
"OpenAI API error received",
error_code=error_data.get("code"),
error_type=error_data.get("type"),
error_message=error_data.get("message"),
error_param=error_data.get("param"),
stream_error=stream_error,
)
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429:
return error.RateLimitError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode in [400, 404, 415]:
if error_data.get("type") == "idempotency_error":
return error.IdempotencyError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
else:
return error.InvalidRequestError(
error_data.get("message"),
error_data.get("param"),
error_data.get("code"),
rbody,
rcode,
resp,
rheaders,
)
elif rcode == 401:
return error.AuthenticationError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 403:
return error.PermissionError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 409:
return error.TryAgain(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif stream_error:
# TODO: we will soon attach status codes to stream errors
parts = [error_data.get("message"), "(Error occurred while streaming.)"]
message = " ".join([p for p in parts if p is not None])
return error.APIError(message, rbody, rcode, resp, rheaders)
else:
return error.APIError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
def request_headers(self, api_key, method, extra):
user_agent = "OpenAI/v1 PythonBindings/%s" % (version.VERSION,)
if openai.app_info:
user_agent += " " + self.format_app_info(openai.app_info)
ua = {
"bindings_version": version.VERSION,
"lang": "python",
"publisher": "openai",
"httplib": self._client.name,
}
for attr, func in [
["lang_version", platform.python_version],
["platform", platform.platform],
["uname", lambda: " ".join(platform.uname())],
]:
try:
val = func()
except Exception as e:
val = "!! %s" % (e,)
ua[attr] = val
if openai.app_info:
ua["application"] = openai.app_info
headers = {
"X-OpenAI-Client-User-Agent": json.dumps(ua),
"User-Agent": user_agent,
"Authorization": "Bearer %s" % (api_key,),
}
if self.organization:
headers["OpenAI-Organization"] = self.organization
if method in {"post", "put"}:
headers.setdefault("Idempotency-Key", str(uuid.uuid4()))
if self.api_version is not None:
headers["OpenAI-Version"] = self.api_version
headers.update(extra)
return headers
def request_raw(
self, method, url, params=None, supplied_headers=None, stream=False
):
"""
Mechanism for issuing an API call
"""
if self.api_key:
my_api_key = self.api_key
else:
from openai import api_key
my_api_key = api_key
if my_api_key is None:
raise error.AuthenticationError(
"No API key provided. (HINT: set your API key using in code using "
'"openai.api_key = <API-KEY>", or you can set the environment variable OPENAI_API_KEY=<API-KEY>). You can generate API keys '
"in the OpenAI web interface. See https://onboard.openai.com "
"for details, or email [email protected] if you have any "
"questions."
)
abs_url = "%s%s" % (self.api_base, url)
headers = {}
compress = None
progress_meter = False
if method == "get" or method == "delete":
if params:
encoded_params = url_encode_params(params)
abs_url = _build_api_url(abs_url, encoded_params)
else:
encoded_params = None
post_data = None
elif method in {"post", "put"}:
if (
supplied_headers is not None
and supplied_headers.get("Content-Type") == "multipart/form-data"
):
generator = MultipartDataGenerator()
generator.add_params(params or {})
post_data = generator.get_post_data()
content_type = "multipart/form-data; boundary=%s" % (
generator.boundary,
)
# We will overrite Content-Type
supplied_headers.pop("Content-Type")
progress_meter = True
# compress = "gzip"
compress = None
else:
post_data = json.dumps(params).encode()
content_type = "application/json"
headers["Content-Type"] = content_type
encoded_params = post_data
if progress_meter:
post_data = BufferReader(post_data, desc="Upload progress")
if compress == "gzip":
if not hasattr(post_data, "read"):
post_data = BytesIO(post_data)
headers["Content-Encoding"] = "gzip"
from openai.gzip_stream import GZIPCompressedStream
post_data = GZIPCompressedStream(post_data, compression_level=9)
else:
raise error.APIConnectionError(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"OpenAI bindings. Please contact [email protected] for "
"assistance." % (method,)
)
headers = self.request_headers(my_api_key, method, headers)
if supplied_headers is not None:
for key, value in six.iteritems(supplied_headers):
headers[key] = value
util.log_info("Request to OpenAI API", method=method, path=abs_url)
util.log_debug(
"Post details", post_data=encoded_params, api_version=self.api_version
)
rbody, rcode, rheaders, stream = self._client.request_with_retries(
method, abs_url, headers, post_data, stream=stream
)
util.log_info(
"OpenAI API response",
path=abs_url,
response_code=rcode,
processing_ms=rheaders.get("OpenAI-Processing-Ms"),
)
util.log_debug("API response body", body=rbody, headers=rheaders)
if "Request-Id" in rheaders:
request_id = rheaders["Request-Id"]
util.log_debug(
"Dashboard link for request", link=util.dashboard_link(request_id)
)
return rbody, rcode, rheaders, stream, my_api_key
def interpret_response(self, rbody, rcode, rheaders, stream=False):
if stream:
return (
self.interpret_response_line(line, rcode, rheaders, stream)
for line in parse_stream(rbody)
)
else:
return self.interpret_response_line(rbody, rcode, rheaders, stream)
def interpret_response_line(self, rbody, rcode, rheaders, stream=False):
try:
if hasattr(rbody, "decode"):
rbody = rbody.decode("utf-8")
resp = OpenAIResponse(rbody, rcode, rheaders)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody,
rcode,
rheaders,
)
# In the future, we might add a "status" parameter to errors
# to better handle the "error while streaming" case.
stream_error = stream and "error" in resp.data
if stream_error or not 200 <= rcode < 300:
raise self.handle_error_response(
rbody, rcode, resp.data, rheaders, stream_error=stream_error
)
return resp
def url_encode_params(params):
encoded_params = urlencode(list(_api_encode(params or {})))
# Don't use strict form encoding by changing the square bracket control
# characters back to their literals. This is fine by the server, and
# makes these parameter strings easier to read.
encoded_params = encoded_params.replace("%5B", "[").replace("%5D", "]")
return encoded_params
| [] |
2024-01-10 | kingx48/openai-python | openai~api_resources~experimental~completion_config.py | from openai.api_resources.abstract import (
APIResource,
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
)
class CompletionConfig(
CreateableAPIResource, ListableAPIResource, DeletableAPIResource
):
OBJECT_NAME = "experimental.completion_config"
| [] |
2024-01-10 | kingx48/openai-python | openai~multipart_data_generator.py | from __future__ import absolute_import, division, print_function
import random
import io
import openai
class MultipartDataGenerator(object):
def __init__(self, chunk_size=1028):
self.data = io.BytesIO()
self.line_break = "\r\n"
self.boundary = self._initialize_boundary()
self.chunk_size = chunk_size
def add_params(self, params):
# Flatten parameters first
params = dict(openai.api_requestor._api_encode(params))
for key, value in openai.six.iteritems(params):
if value is None:
continue
self._write(self.param_header())
self._write(self.line_break)
if hasattr(value, "read"):
filename = "blob"
if hasattr(value, "name"):
# Convert the filename to string, just in case it's not
# already one. E.g. `tempfile.TemporaryFile` has a `name`
# attribute but it's an `int`.
filename = openai.six.text_type(value.name)
self._write('Content-Disposition: form-data; name="')
self._write(key)
self._write('"; filename="')
self._write(filename)
self._write('"')
self._write(self.line_break)
self._write("Content-Type: application/octet-stream")
self._write(self.line_break)
self._write(self.line_break)
self._write_file(value)
else:
self._write('Content-Disposition: form-data; name="')
self._write(key)
self._write('"')
self._write(self.line_break)
self._write(self.line_break)
self._write(str(value))
self._write(self.line_break)
def param_header(self):
return "--%s" % self.boundary
def get_post_data(self):
self._write("--%s--" % (self.boundary,))
self._write(self.line_break)
return self.data.getvalue()
def _write(self, value):
if isinstance(value, openai.six.binary_type):
array = bytearray(value)
elif isinstance(value, openai.six.text_type):
array = bytearray(value, encoding="utf-8")
else:
raise TypeError(
"unexpected type: {value_type}".format(value_type=type(value))
)
self.data.write(array)
def _write_file(self, f):
while True:
file_contents = f.read(self.chunk_size)
if not file_contents:
break
self._write(file_contents)
def _initialize_boundary(self):
return random.randint(0, 2 ** 63)
| [] |
2024-01-10 | fuzzy-logic/aisandpit | orchestra-scraping~orchestra-dates-rag.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.embeddings import OllamaEmbeddings
from langchain.document_loaders import WebBaseLoader
from langchain.embeddings import GPT4AllEmbeddings, OllamaEmbeddings
from langchain.vectorstores import Chroma
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import LlamaCpp
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain import hub
from langchain.chains import RetrievalQA
from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.vectorstores import FAISS
# Example of using LLM + RAG with vector database and simple prompt chain
# @see https://research.ibm.com/blog/retrieval-augmented-generation-RAG
# @docs https://python.langchain.com/docs/integrations/llms/ollama
# setup:
# ./ollama serve
# ./ollama run llama2
# run: python orchestra-dates-rag.py
# ISSUES
# most pages have side bars or footer with ltos of other events and event dates which seem to confuse the LLM
# we will need to find a way to spearate out the core page/hero content and remove peripheral content or ads
### VECTORDB-IZE THE WEB DATA
pages = ["https://www.rpo.co.uk/whats-on/eventdetail/1982/82/john-rutters-christmas-celebration-matinee"];
print("following data sourced from following web pages: ", pages)
for page in pages:
loader = WebBaseLoader(page)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
all_splits = text_splitter.split_documents(data);
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
### SETUP THE PROMPT CHAIN:
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following documents:
{docs}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# this uses the local llm web server apis once you have it running via ollma: https://ollama.ai/
llm = Ollama(
model="llama2:13b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
chain = (
{"docs": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
### FIRE OFF QUESTION
question = "Provide a bullet list of performance event name, time, date, prices, location"
result = chain.invoke(question)
print(result)
| [
"Answer the question based only on the following documents:\n{docs}\n\nQuestion: {question}\n"
] |
2024-01-10 | fuzzy-logic/aisandpit | baby-agi~baby-agi.py | from typing import Optional
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain_experimental.autonomous_agents import BabyAGI
from langchain.docstore import InMemoryDocstore
from langchain.vectorstores import FAISS
# NOTE: Not working curently, think this has been pulled from langchain
# @see https://github.com/langchain-ai/langchain/blob/master/cookbook/baby_agi.ipynb
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
OBJECTIVE = "Write a weather report for SF today"
llm = OpenAI(temperature=0)
# Logging of LLMChains
verbose = False
# If None, will keep on going forever
max_iterations: Optional[int] = 3
baby_agi = BabyAGI.from_llm(
llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
)
baby_agi({"objective": OBJECTIVE}) | [] |
2024-01-10 | fuzzy-logic/aisandpit | basic-examples~ollama-simple-query.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.embeddings import OllamaEmbeddings
# @see https://python.langchain.com/docs/integrations/llms/ollama
# setup:
# ./ollama serve
# ./ollama run llama2
# run: python ollama-query.py
llm = Ollama(
model="llama2:13b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm("Tell me about the history of Napoleon")
| [] |
2024-01-10 | fuzzy-logic/aisandpit | baby-agi~baby-agi-tools.py | from typing import Optional
from langchain.chains import LLMChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain_experimental.autonomous_agents import BabyAGI
from langchain.docstore import InMemoryDocstore
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
from langchain.vectorstores import FAISS
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.utilities import SerpAPIWrapper
# NOTE: Not working curently, think this has been pulled from langchain
# @see https://github.com/langchain-ai/langchain/blob/master/cookbook/baby_agi_with_agent.ipynb
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = SerpAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
),
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
llm = OpenAI(temperature=0)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
OBJECTIVE = "Write a weather report for SF today"
# Logging of LLMChains
verbose = False
# If None, will keep on going forever
max_iterations: Optional[int] = 3
baby_agi = BabyAGI.from_llm(
llm=llm,
vectorstore=vectorstore,
task_execution_chain=agent_executor,
verbose=verbose,
max_iterations=max_iterations,
)
baby_agi({"objective": OBJECTIVE}) | [
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}",
"agent_scratchpad",
"context"
] |
2024-01-10 | fuzzy-logic/aisandpit | clinc-scraping~clinic-docs-rag.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.embeddings import OllamaEmbeddings
from langchain.document_loaders import WebBaseLoader
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.chains import LLMChain
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
# Find names and job titles of clinic doctors on a given web page
# @see https://python.langchain.com/docs/integrations/llms/ollama
# setup:
# ./ollama serve
# ./ollama run llama2
# run: python clinic-docs-rag.py
# this uses the local llm web server apis once you have it running via ollma: https://ollama.ai/
llm = Ollama(
model="llama2:13b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
# VECTORDB-IZE WEB DATA
pages = ["https://www.sknclinics.co.uk/about-skn/expert-medical-team"];
print("data sourced from following web pages: ", pages)
all_splits = [];
for page in pages:
loader = WebBaseLoader(page)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
all_splits = [*all_splits, *text_splitter.split_documents(data)];
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
retriever = vectorstore.as_retriever()
# Prompt
prompt = PromptTemplate.from_template(
"""Answer the question based only on the following documents:
{docs}
Question: {question} """
)
# LLM Query Chain
llm_chain = LLMChain(llm=llm, prompt=prompt)
# this uses the local llm web server apis once you have it running via ollma: https://ollama.ai/
llm = Ollama(
model="llama2:13b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
chain = (
{"docs": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
### FIRE OFF QUESTION
question = "bullet list the names and titles of doctors and nurses you can find in the document"
result = chain.invoke(question)
| [
"Answer the question based only on the following documents: \n {docs}\n \n \n Question: {question} "
] |
2024-01-10 | fuzzy-logic/aisandpit | clinc-scraping~aesthetics-treatments-rag.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.embeddings import OllamaEmbeddings
from langchain.document_loaders import WebBaseLoader
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import LlamaCpp
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
# find aesthetics treatments on a given web page
# @see https://python.langchain.com/docs/integrations/llms/ollama
# setup:
# ./ollama serve
# ./ollama run llama2
# run: python aesthetics-treatments-rag.py
# SETUP LLM:
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
# this uses the local llm web server apis once you have it running via ollma: https://ollama.ai/
llm = Ollama(
model="llama2:13b",
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# VECTORDB-IZE WEB DATA
# pages = ["https://www.medicalaestheticclinic.co.uk/treatments"]
pages = ["https://www.epsomskinclinics.com/"] # epsom skin clinic
# pages = ["https://www.altondental.co.uk/"]
print("data sourced from following web pages: ", pages)
for page in pages:
loader = WebBaseLoader(page)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
all_splits = text_splitter.split_documents(data);
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
### SETUP THE PROMPT CHAIN:
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following documents:
{docs}
Question: {question}
"""
prompt = PromptTemplate.from_template(template)
# LLM Query Chain
llm_chain = LLMChain(llm=llm, prompt=prompt)
# this uses the local llm web server apis once you have it running via ollma: https://ollama.ai/
llm = Ollama(
model="llama2:13b",
verbose=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
chain = (
{"docs": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
### FIRE OFF QUESTION
question = "bullet list all aesthetics treatments found in documents"
result = chain.invoke(question)
| [
"Answer the question based only on the following documents:\n{docs}\n\nQuestion: {question}\n"
] |
2024-01-10 | fuzzy-logic/aisandpit | orchestra-scraping~orchestra-dates-qachain-rag.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.embeddings import OllamaEmbeddings
from langchain.document_loaders import WebBaseLoader
from langchain.embeddings import GPT4AllEmbeddings, OllamaEmbeddings
from langchain.vectorstores import Chroma
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import LlamaCpp
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain import hub
from langchain.chains import RetrievalQA
# Example of finding concert date/time/location in a given web page
# using a LLM specific Q/A chain @see https://smith.langchain.com/hub/rlm/rag-prompt-llama
# Typically more of a chatbot conversation
# @docs https://python.langchain.com/docs/integrations/llms/ollama
# ISSUES
# most pages have side bars or footer with ltos of other events and event dates which seem to confuse the LLM
# we will need to find a way to spearate out the core page/hero content and remove peripheral content or ads
# setup:
# ./ollama serve
# ./ollama run llama2
# run: python orchestra-dates-qachain-rag.py
# this uses the local llm web server apis once you have it running via ollma: https://ollama.ai/
llm = Ollama(
model="llama2:13b",
verbose=False,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
)
# VECTORDB-IZE WEB DATA
pages = ["https://www.rpo.co.uk/whats-on/eventdetail/1982/82/john-rutters-christmas-celebration-matinee"];
print("data sourced from following web pages: ", pages)
all_splits = [];
for page in pages:
loader = WebBaseLoader(page)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
all_splits = [*all_splits, *text_splitter.split_documents(data)];
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
# rag qa prompt info: https://smith.langchain.com/hub/rlm/rag-prompt-llama
# changing this prompt will radically change the behavior of the llm
QA_CHAIN_PROMPT = hub.pull("rlm/rag-prompt-llama")
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
# Run: this prompt is the instruction:
# multi event list Prompt: "List all performance events, include name, time, location, next performance date and any supplimental information that is provided"
# simple primary event prompt: "List the primaray performance event information. Include name, time, location, next performance date and any supplimental information that is provided"
question = "Provide a bullet list of the primaray performance event name, date, time, location and supplimental information"
qa_chain({"query": question})
| [
"rlm/rag-prompt-llama"
] |
2024-01-10 | srimanthds/cochlear3-qabot | Hello.py | #!/usr/bin/env python
# coding: utf-8
# In[15]:
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
from langchain.embeddings import OpenAIEmbeddings
from pymongo import MongoClient
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import MongoDBAtlasVectorSearch
import os
import shutil
import time
from pymongo.mongo_client import MongoClient
from pymongo.server_api import ServerApi
import pymongo
import joblib
from langchain.docstore.document import Document
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
import streamlit as st
import pandas as pd
# In[2]:
PDF_FOLDER_PATH = "Data/"
LOADED_PDF_FILES_PICKLE = "loaded_pdf_files_pickle.pkl"
VECTOR_SEARCH_PICKLE = "vector_search_pickle.pkl"
DB_NAME = "cochlear_13"
COLLECTION_NAME = "vectorSearch"
INDEX_NAME = "default"
CHUNK_SIZE = 1000
CHUNK_OVERLAP = 0
# In[3]:
def get_secret_key():
open_api_key = st.secrets.open_api_key
if not open_api_key:
raise ValueError("The open_api_key environment variable is not set.")
s1 = st.secrets.db_username
s2 = st.secrets.db_pswd
atlas_connection_string = "mongodb+srv://{s1}:{s2}@cluster0.1thtla4.mongodb.net/?retryWrites=true&w=majority".format(s1 = s1, s2 = s2)
if not atlas_connection_string:
raise ValueError("The atlas_connection_string environment variable is not set.")
secret_key_dict = {"open_api_key": open_api_key, "atlas_connection_string": atlas_connection_string}
return secret_key_dict
# In[4]:
def get_vector_search_object(cluster,db_name,collection_name, index_name,open_api_key):
mongodb_collection = cluster[db_name][collection_name]
# doc = Document(page_content="dummy text", metadata={"source": "dummy"})
# vector_search = MongoDBAtlasVectorSearch.from_documents(
# documents=[doc],
# embedding=OpenAIEmbeddings(api_key=open_api_key),
# collection=mongodb_collection,
# index_name=index_name
# )
embedding=OpenAIEmbeddings(api_key=open_api_key)
vector_search = MongoDBAtlasVectorSearch(mongodb_collection, embedding)
return vector_search
# In[5]:
def connect_mongodb(atlas_connection_string):
cluster = MongoClient(atlas_connection_string)
try:
cluster.admin.command('ping')
print("Pinged your deployment. You successfully connected to MongoDB!")
except Exception as e:
print(e)
return cluster
# In[17]:
# def get_prompt():
# prompt_template="""
# role='You are an expert acting as an helpful chatbot assistant who provides call center agents with accurate information retrieved from context without hallucinating'
# instructions='1. You must start your response with Hi and Generate an accurate response according to the user question by referring to information provided in the context
# 2.Your response should not bring any external information apart from context i am sharing 3.If you dont have enough information to answer the question, Please respond that you dont have sufficient knowledge to answer the question'
# details='response should give the information you think is correct based on the question and conclude your response with yes/no if required'
# examples='''
# 'Q': "I am flying to Dubai tomorrow and its 60 degrees celsius there, is it safe to travel there ?", "context": context provided in this prompt template,
# "A":"Reasoning- In dubai current temperature is 60 degrees, According to source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C. According to source the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since 60 degrees in dubai is > 5 degrees and greater than 40 degrees, I would say exposing to extreme temperatures would need doctors recommendation. ANSWER- Hence say No, Not recommended ".
# 'Q': "I am flying to canada tomorrow and its -10 degrees celsius there, is it okay to travel to canade with extreme low temperatures after my implant surgery ?",
# "context": context provided in this prompt template,
# "A":"Reasoning- In canada temperature is -10 degrees, According to source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C. According to source the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since -10 degrees temperature in canada is < -5 and 40 degrees, I would say exposing to such low temperatures would need doctors recommendation. ANSWER-No, Not recommended ".
# 'Q': " 'Q': "I am flying to India tomorrow and its 45 degrees celsius there because of hot summer, is it safe to travel there as i had implant surgery recently ?",
# "context": context provided in this prompt template,
# "A":"Reasoning- In India current temperature is 45 degrees,According to source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C." \
# +"According to source the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since 45 degrees in India is greater than the upper thresold 40 degrees and greater than 5 degrees of lower thresold for sound processors, I would say exposing to extreme temperatures would need doctors recommendation. ANSWER-No, Not recommended without medical advice".
# 'Q': "I am flying to saudi arabia next month and its expected teperature is 35 degrees celsius there, is it safe to travel there ?",
# "context": '''Extreme temperatures may be experience in some countries during seasonal periods or in a car parked in the sun.
# Extreme temperatures may also be experienced in e.g. saunas or medical treatment (cold chamber).The sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C.
# The implant incorporated in the body will not be exposed to extreme temperatures. Recommendation: The recipient can undergo extreme temperatures (e.g. sauna, cold chamber) without any harm to the implant.
# The externals should be taken off while undergoing this procedure. Recipients should follow the user manual in relation to storage of the external equipment and batteries
# (e.g. not to leave externals on a hot day on the dashboard of an automobile)''',
# "A":"Reasoning- In saudi arabia if expected temperature for next month is 35 degrees, After validating with source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C." \
# +" Since 35 degrees in saudi arabia is less than +40°C and greater than +5°C the temperature is falling within the thresold i.e.., +5°C to +40°C for sound processors,It is safe to travel. ANSWER- YES".
# 'Q': "I would like to do under water diving at a depth of 60 meters, will tthis harm my Nucleus CI24R device",
# "context": '''The Nucleus CI24R, CI24M and CI22M implants are validated to withstand pressure at a depth of 25m under water for the purposes of scuba diving, which is equivalent to 2.5 atm nominal pressure and 4 atm test pressure.
# The Nucleus CI500 series and Freedom (CI24RE) implants are validated to withstand pressure at a depth of 40m under water for the purposes of scuba diving, which is equivalent to 4 atm nominal pressure and 6 atm test pressure.
# Recipients should seek medical advice before participating in a dive for conditions that might make diving contraindicated, e.g. middle ear infection, etc.
# When wearing a mask avoid pressure over the implant site''',
# "A":"Reasoning- According to source information Sound processors are specified to withstand pressure at a depth of 40m under water for the purposes of scuba diving" \
# +"you are willing to do diving to 60 meters for sound processors,since 60 meters >40 meters where 40 meters is the maximum withstandable pressure for this device as per the souce information. It is not recommended"
# ANSWER- YES".'''
# directions=''' "The response should match the information from context and no external data should be used for generating response",
# "call center agent question may contain numerical fields in it. If yes, then compare numeric values with thresold values available in context and validate it twice before giving response",
# "If you are not sure of answer, Acknowledge it instead of giving wrong response as misinformation may lead to loss of trust on you" '''
# validation='Always validate your response with instructions provided.'
# Context: {context}
# Question: {question}
# """
# prompt = PromptTemplate(
# template=prompt_template, input_variables=["context", "question","role","instructions","details","examples","directions","validation"]
# )
# return prompt
# def get_prompt():
# prompt_template="""
# role='You are an expert acting as an helpful chatbot assistant who provides call center agents with accurate information retrieved from context without hallucinating'
# instructions='1. You must start your response with Hi and Generate an accurate response according to the user question by referring to information provided in the context
# 2.Your response should not bring any external information apart from context i am sharing 3.If you dont have enough information to answer the question, Please respond that you dont have sufficient knowledge to answer the question'
# details='response should give the information you think is correct based on the question and conclude your response with yes/no if required'
# examples='''
# 'Q': "I am flying to canada tomorrow and its -10 degrees celsius there, is it okay to travel to canade with extreme low temperatures after my implant surgery ?",
# "context": context provided in this prompt template,
# "A":"In canada temperature is -10 degrees, According to source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C. According to source the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since -10 degrees temperature in canada is < -5 and 40 degrees,
# I would say exposing to such low temperatures would need doctors recommendation.No,Not recommended".
# 'Q': " 'Q': "I am flying to India tomorrow and its 45 degrees celsius there because of hot summer, is it safe to travel there as i had implant surgery recently ?",
# "context": context provided in this prompt template,
# "A":"In India current temperature is 45 degrees,According to source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C." \
# +"According to source the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since 45 degrees in India is greater than the upper thresold 40 degrees and greater than 5 degrees of lower thresold for sound processors, I would say exposing to extreme temperatures would need doctors recommendation.Not recommended without medical advice."
# 'Q': "I am flying to saudi arabia next month and its expected teperature is 35 degrees celsius there, is it safe to travel there ?",
# "context": '''Extreme temperatures may be experience in some countries during seasonal periods or in a car parked in the sun.
# Extreme temperatures may also be experienced in e.g. saunas or medical treatment (cold chamber).The sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C.
# The implant incorporated in the body will not be exposed to extreme temperatures. Recommendation: The recipient can undergo extreme temperatures (e.g. sauna, cold chamber) without any harm to the implant.
# The externals should be taken off while undergoing this procedure. Recipients should follow the user manual in relation to storage of the external equipment and batteries
# (e.g. not to leave externals on a hot day on the dashboard of an automobile)''',
# "A":"In saudi arabia if expected temperature for next month is 35 degrees, After validating with source information Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C. Since 35 degrees in saudi arabia is less than +40°C and greater than +5°C the temperature is falling within the thresold i.e.., +5°C to +40°C for sound processors.Yes, Its safe to travel".
# 'Q': "I would like to do under water diving at a depth of 60 meters, will this harm my Nucleus CI24R device",
# "context": '''The Nucleus CI24R, CI24M and CI22M implants are validated to withstand pressure at a depth of 25m under water for the purposes of scuba diving, which is equivalent to 2.5 atm nominal pressure and 4 atm test pressure.
# The Nucleus CI500 series and Freedom (CI24RE) implants are validated to withstand pressure at a depth of 40m under water for the purposes of scuba diving, which is equivalent to 4 atm nominal pressure and 6 atm test pressure.
# Recipients should seek medical advice before participating in a dive for conditions that might make diving contraindicated, e.g. middle ear infection, etc.
# When wearing a mask avoid pressure over the implant site''',
# "A":"According to source information Sound processors are specified to withstand pressure at a depth of 40m under water for the purposes of scuba diving you are willing to do diving to 60 meters for sound processors,since 60 meters >40 meters where 40 meters is the maximum withstandable pressure for this device as per the souce information hence it is not recommended. Yes,it may harm the device".'''
# directions=''' "The response should match the information from context and no external data should be used for generating response",
# "call center agent question may contain numerical fields in it. If yes, then compare numeric values with thresold values available in context and validate it twice before giving response",
# "If you are not sure of answer, Acknowledge it instead of giving wrong response as misinformation may lead to loss of trust on you" '''
# validation='Always validate your response with instructions provided.'
# Context: {context}
# Question: {question}
# """
# prompt = PromptTemplate(
# template=prompt_template, input_variables=["context", "question","role","instructions","details","examples","directions","validation"]
# )
# return prompt
def get_prompt():
prompt_template="""
role='You are an expert acting as an helpful chatbot assistant who provides call center agents with accurate information retrieved from context without hallucinating'
instructions='1. You must start your response with Hi and Generate an accurate response according to the user question by referring to information provided in the context
2.Your response should not bring any external information apart from context that is provided
3.If you dont have enough information to answer the question, Please respond that you dont have sufficient knowledge to answer the question
details='response should give the information you think is correct based on the question and conclude your response accordingly'
Following are the examples with "Q" referring to the Question. "Reasoning" reffers to the reasoning on how to derive the answer. "Answer" reffers to the final Answer.
examples='''
'Question': "I am flying to Dubai tomorrow and its 60 degrees celsius there, is it safe to travel there wearing the sound processors ?"
"Reasoning": In dubai current temperature is 60 degrees, According to the context, Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C." \
+" According to the context, the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since 60 degrees in dubai is > 5 degrees and greater than 40 degrees, I would say exposing to extreme temperatures would need doctors recommendation.
"Answer"- "As the operating temperatures are between +5°C to +40°C, it is not recommended to travel there with the implant as the temperature is 60 degrees".
'Question': "I would like to do under water diving at a depth of 60 meters, will tthis harm my Nucleus CI24R device",
"Reasoning- According to the context Nucleus CI24R device are specified to withstand pressure at a depth of 40m under water for the purposes of scuba diving" \
+"you are willing to do diving to 60 meters for sound processors,since 60 meters >40 meters where 40 meters is the maximum withstandable pressure for This device as per the souce information. It is not recommended"
"Answer"- Yes, this will harm my device. As Nucleus CI24R device can withstand only upto the depths of 40m and since diving to
60m is above 40m. It will harm the device.
'''
directions='''"As per the above examples, you are supposed to understand the question, and based on the Context provided only, you must first reason out logically and accurately and respond back by adding the facts from the context and giving your response"
"The response should match the information from context and no external data should be used for generating response. Ensure you say you do not know if the answer to the question is not provided in the context",
"call center agent question may contain numerical fields in it. If yes, then compare numeric values with thresold values available in context and validate it twice before giving response",
"If you are not sure of answer, Acknowledge it instead of giving wrong response as misinformation may lead to loss of trust on you" '''
validation='Always validate your response with instructions provided. Ensure you say you do not know if the answer is not provided in the Context'
output= 'You need to respond back with the Answer without any prefixes such as "Answer:"'
#Input
Context: {context}
Question: {question}
#Ouput
Answer statement
"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
return prompt
def get_prompt_critique():
prompt_template = """You are the smart engine that looks at the response below along with the question asked
and makes edit to the response only if you think the response needs to be edited due to logical or contradicting mistakes
1. First read the question stated below and understand it.
2. Read the response below. This response acts as the answer to the question. However this response may be semantically
or logically incorrect in response.
3. The response usually will have 2 parts, the first part will be the answer and the second part will have the context
or information or reasoning from which the answer was stated.
4. If the answer and the reason are not in alignment, reformulate the response and send the correct response again
5. If the original response doesn't have "Yes/No", do not forcefully add "Yes/No" in the beginning.
Here are few examples for you to understand -
Question: I have Cochlear Implant series and want to swim to 30 meters, will this harm my device?
Response: No, the Cochlear Implant series are validated to withstand pressure up to 40m under water for the
purposes of swimming, which is equivalent to 4 atm nominal pressure and 6 atm test pressure. Therefore, swimming to
30 meters will not cause any harm to your device.
Reformulated/Revised Response: No, the Cochlear Implant series are validated to withstand pressure up to 40m under water for the
purposes of swimming, which is equivalent to 4 atm nominal pressure and 6 atm test pressure. Therefore, swimming to
30 meters will not cause any harm to your device.
Reason: In the Response, it clearly says that the device can withstand upto 40m and in the Question, the question asked is
can it go to 30m and will it harm the device. Since it doesn't harm the device, the answer should be "No" followed by the
same text that's in Response. Hence this is not having contradicting response, hence the same Response has been replied back
as Revised Response without changing anything
Question: I have Cochlear Implant series and want to swim to 50 meters, will this harm my device?
Response: No, the Cochlear Implant series are not designed to withstand pressure at depths greater than 40m
for swimming. Therefore, swimming to a depth of 50m would exceed the recommended pressure and could cause damage
to the implant.
Reformulated/Revised Response: Yes, the Cochlear Implant series are not designed to withstand pressure at depths greater than
40m for swimming. Therefore, swimming to a depth of 50m would exceed the recommended pressure and could cause damage
to the implant.
Reason: The Question clearly asked if it will harm the device when a person goes swimming to 50m, the Response says that
it will harm the device if it goes beyond 40m. But it has "No" and this is contradicting to the question asked. Hence
"No" has been changed to "Yes" and the rest of the reason is never changed. The reason should never be changed and only the
response such as "yes"/"no" can be changed based on the question asked.
From the above 2 examples, understand the context of the question and understand the response and understand how the
revised response has been changed or kept the same throught the reason. The reason is for you to understand logically how
you need to respond back.
Remember, "Response" is the source truth and you need to only believe it and not bring any other external sources. You need
to only change the "Yes/No" part of the question and not change anything else. This is very important
Be precise and accurate and be logical in answering.
If the original response doesn't have "Yes/No", do not forcefully add "Yes/No" in the beginning.
While formulating it be accurate and logical. Do not give contradicting answers.
The response should be the only facts you will look out for and not any other external
facts. While formulating the response read the question again and answer accordingly to avoid contradicting replies
Reply with the reformulated response.
Just send the response, do not prefix with anything like "Response :" or "Revised Response :"
Question: {Question}
Response: {Response}
Reformulated/Revised Response: Your Revised Response
"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["Question", "Response"]
)
return prompt
# In[20]:
def get_prompt_critique2():
prompt_template = """You are the smart engine that looks at the response below along with the question asked and makes edit to the response only if you think the response needs to be edited due to logical or contradicting mistakes.If the response below says its not confident and doesn't have knowledge then mention the same as your response
Question: {Question}
Response: {Response}
Reformulated/Revised Response: Your Revised Response
"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["Question", "Response"]
)
return prompt
def get_response(db_name, collection_name, index_name, query):
secret_key_dict = get_secret_key()
open_api_key = secret_key_dict["open_api_key"]
atlas_connection_string = secret_key_dict["atlas_connection_string"]
cluster = connect_mongodb(atlas_connection_string)
vector_search = get_vector_search_object(cluster,db_name,collection_name, index_name, open_api_key)
qa_retriever = vector_search.as_retriever(
search_type="similarity",
search_kwargs={"k": 10, "post_filter_pipeline": [{"$limit": 25}]},
)
prompt = get_prompt()
try:
qa = RetrievalQA.from_chain_type(
llm=OpenAI(api_key=open_api_key,temperature=0),
chain_type="stuff",
retriever=qa_retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt},
)
except:
time.sleep(120)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(api_key=open_api_key,temperature=0),
chain_type="stuff",
retriever=qa_retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt},
)
docs = qa({"query": query})
# print(docs["result"])
# print(docs["source_documents"])
return docs
# In[ ]:
result = []
# Page title
st.set_page_config(page_title='Cochlear Smart QA Engine')
st.title('Cochlear Smart QA Engine')
# # File upload
# uploaded_file = st.file_uploader('Upload an article', type='pdf')
# print(dir(uploaded_file))
# Query text
secret_key_dict = get_secret_key()
open_api_key = secret_key_dict["open_api_key"]
if 'qa_data' not in st.session_state:
st.session_state.qa_data = {'question': '', 'rag_responses': [], 'responses': []}
streamlit_pwd = st.secrets.streamlit_pwd
# Form input and query
user_input = st.text_input('Enter the application password:', type='password')
if user_input != streamlit_pwd:
st.error("Authentication failed. Please provide the correct password.")
else:
with st.form('myform', clear_on_submit=True):
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=False)
# openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
submitted = st.form_submit_button('Submit')
if submitted:
with st.spinner('Calculating...'):
try:
docs = get_response(DB_NAME,COLLECTION_NAME,INDEX_NAME,query_text)
except:
time.sleep(120)
docs = get_response(DB_NAME,COLLECTION_NAME,INDEX_NAME,query_text)
if (len(docs) != 0) and ("result" in dict(docs).keys()):
response = docs["result"]
rag_response = response
st.session_state.qa_data['rag_responses'].append(response)
try:
prompt = get_prompt_critique2()
llm = OpenAI(api_key=open_api_key,temperature=0)
prompt.format(Question=query_text,Response=response)
chain1 = LLMChain(llm=llm,prompt=prompt)
response = chain1.run(Question=query_text,Response=response)
except:
time.sleep(120)
prompt = get_prompt_critique2()
llm = OpenAI(api_key=open_api_key,temperature=0)
prompt.format(Question=query_text,Response=response)
chain1 = LLMChain(llm=llm,prompt=prompt)
response = chain1.run(Question=query_text,Response=response)
result.append(response)
st.session_state.qa_data['question'] = query_text
st.session_state.qa_data['responses'].append(response)
for idx, r in enumerate(st.session_state.qa_data['responses'][::-1], start=1):
# Split the response into words
words = rag_response.split(' ')
# Initialize an empty line and list of lines
line, lines = '', []
# Add words to the line until it exceeds the desired width
for word in words:
if len(line + word) > 10:
lines.append(line)
line = word + ' '
else:
line += word + ' '
# Add the last line
lines.append(line)
# Join the lines with newline characters
formatted_response = '\n'.join(lines)
# Display the formatted response
st.info(f"Question: {query_text} \n\n {formatted_response} \n\n")
# st.info(f"Question: {query_text} \n\n {rag_response} \n\n")
# st.markdown(f"""**Question:** {query_text}\n {rag_response}""")
# st.info(f"Question: {query_text} \n\n {rag_response} \n\n")
#st.info(f"Question: {query_text} \n\n {rag_response} \n\n Response : {r} \n\n")
# st.info(f"RAG Response : {rag_response}")
# st.info(f"Response : {r}")
st.title('Top Similar Documents')
df_lis = []
for i in docs["source_documents"]:
lis = []
lis.append(i.page_content)
if "source" in i.metadata.keys():
lis.append(i.metadata["source"])
else:
lis.append("")
if "page" in i.metadata.keys():
lis.append(i.metadata["page"])
else:
lis.append(None)
df_lis.append(lis)
similar_df = pd.DataFrame(df_lis,columns = ["Text", "Source Document", "Page Number"])
st.table(similar_df)
else:
st.session_state.qa_data['question'] = query_text
st.session_state.qa_data['responses'] = None
# del openai_api_key
st.write(f"Last Submitted Question: {st.session_state.qa_data['question']}")
st.write("All Responses:")
for idx, r in enumerate(st.session_state.qa_data['rag_responses'], start=1):
st.write(f"RAG Response : {r}")
for idx, r in enumerate(st.session_state.qa_data['responses'], start=1):
st.write(f"Response {idx}: {r}")
# if len(result):
# st.info(response)
| [
"\n role='You are an expert acting as an helpful chatbot assistant who provides call center agents with accurate information retrieved from context without hallucinating'\n instructions='1. You must start your response with Hi and Generate an accurate response according to the user question by referring to information provided in the context\n 2.Your response should not bring any external information apart from context that is provided \n 3.If you dont have enough information to answer the question, Please respond that you dont have sufficient knowledge to answer the question\n\n details='response should give the information you think is correct based on the question and conclude your response accordingly'\n\n Following are the examples with \"Q\" referring to the Question. \"Reasoning\" reffers to the reasoning on how to derive the answer. \"Answer\" reffers to the final Answer.\n\n examples='''\n 'Question': \"I am flying to Dubai tomorrow and its 60 degrees celsius there, is it safe to travel there wearing the sound processors ?\"\n \"Reasoning\": In dubai current temperature is 60 degrees, According to the context, Sound processors are specified for operating Temperatures between +5°C to +40°C and storage temperatures between -20°C to +50°C.\" +\" According to the context, the operating temperatures thresold i.e.., +5°C to +40°C for sound processors, Since 60 degrees in dubai is > 5 degrees and greater than 40 degrees, I would say exposing to extreme temperatures would need doctors recommendation.\n\n \"Answer\"- \"As the operating temperatures are between +5°C to +40°C, it is not recommended to travel there with the implant as the temperature is 60 degrees\".\n \n 'Question': \"I would like to do under water diving at a depth of 60 meters, will tthis harm my Nucleus CI24R device\",\n \"Reasoning- According to the context Nucleus CI24R device are specified to withstand pressure at a depth of 40m under water for the purposes of scuba diving\" +\"you are willing to do diving to 60 meters for sound processors,since 60 meters >40 meters where 40 meters is the maximum withstandable pressure for This device as per the souce information. It is not recommended\"\n \"Answer\"- Yes, this will harm my device. As Nucleus CI24R device can withstand only upto the depths of 40m and since diving to \n 60m is above 40m. It will harm the device.\n '''\n \n directions='''\"As per the above examples, you are supposed to understand the question, and based on the Context provided only, you must first reason out logically and accurately and respond back by adding the facts from the context and giving your response\" \n \"The response should match the information from context and no external data should be used for generating response. Ensure you say you do not know if the answer to the question is not provided in the context\",\n \"call center agent question may contain numerical fields in it. If yes, then compare numeric values with thresold values available in context and validate it twice before giving response\",\n \"If you are not sure of answer, Acknowledge it instead of giving wrong response as misinformation may lead to loss of trust on you\" '''\n validation='Always validate your response with instructions provided. Ensure you say you do not know if the answer is not provided in the Context'\n output= 'You need to respond back with the Answer without any prefixes such as \"Answer:\"'\n #Input\n Context: {context}\n Question: {question}\n \n #Ouput\n Answer statement\n ",
"Response",
"question",
"You are the smart engine that looks at the response below along with the question asked\n and makes edit to the response only if you think the response needs to be edited due to logical or contradicting mistakes\n\n 1. First read the question stated below and understand it.\n 2. Read the response below. This response acts as the answer to the question. However this response may be semantically\n or logically incorrect in response.\n 3. The response usually will have 2 parts, the first part will be the answer and the second part will have the context \n or information or reasoning from which the answer was stated. \n 4. If the answer and the reason are not in alignment, reformulate the response and send the correct response again\n 5. If the original response doesn't have \"Yes/No\", do not forcefully add \"Yes/No\" in the beginning.\n\n Here are few examples for you to understand - \n\n Question: I have Cochlear Implant series and want to swim to 30 meters, will this harm my device? \n\n Response: No, the Cochlear Implant series are validated to withstand pressure up to 40m under water for the \n purposes of swimming, which is equivalent to 4 atm nominal pressure and 6 atm test pressure. Therefore, swimming to \n 30 meters will not cause any harm to your device.\n \n Reformulated/Revised Response: No, the Cochlear Implant series are validated to withstand pressure up to 40m under water for the \n purposes of swimming, which is equivalent to 4 atm nominal pressure and 6 atm test pressure. Therefore, swimming to \n 30 meters will not cause any harm to your device.\n \n Reason: In the Response, it clearly says that the device can withstand upto 40m and in the Question, the question asked is\n can it go to 30m and will it harm the device. Since it doesn't harm the device, the answer should be \"No\" followed by the \n same text that's in Response. Hence this is not having contradicting response, hence the same Response has been replied back\n as Revised Response without changing anything\n \n Question: I have Cochlear Implant series and want to swim to 50 meters, will this harm my device? \n\n Response: No, the Cochlear Implant series are not designed to withstand pressure at depths greater than 40m \n for swimming. Therefore, swimming to a depth of 50m would exceed the recommended pressure and could cause damage \n to the implant.\n \n Reformulated/Revised Response: Yes, the Cochlear Implant series are not designed to withstand pressure at depths greater than \n 40m for swimming. Therefore, swimming to a depth of 50m would exceed the recommended pressure and could cause damage \n to the implant.\n \n Reason: The Question clearly asked if it will harm the device when a person goes swimming to 50m, the Response says that\n it will harm the device if it goes beyond 40m. But it has \"No\" and this is contradicting to the question asked. Hence\n \"No\" has been changed to \"Yes\" and the rest of the reason is never changed. The reason should never be changed and only the\n response such as \"yes\"/\"no\" can be changed based on the question asked.\n \n From the above 2 examples, understand the context of the question and understand the response and understand how the \n revised response has been changed or kept the same throught the reason. The reason is for you to understand logically how\n you need to respond back.\n \n Remember, \"Response\" is the source truth and you need to only believe it and not bring any other external sources. You need\n to only change the \"Yes/No\" part of the question and not change anything else. This is very important\n \n \n Be precise and accurate and be logical in answering. \n\n If the original response doesn't have \"Yes/No\", do not forcefully add \"Yes/No\" in the beginning.\n \n While formulating it be accurate and logical. Do not give contradicting answers. \n\n The response should be the only facts you will look out for and not any other external\n facts. While formulating the response read the question again and answer accordingly to avoid contradicting replies\n\n Reply with the reformulated response.\n\n Just send the response, do not prefix with anything like \"Response :\" or \"Revised Response :\"\n\n Question: {Question}\n \n Response: {Response}\n \n Reformulated/Revised Response: Your Revised Response\n\n\n ",
"context",
"Question",
"You are the smart engine that looks at the response below along with the question asked and makes edit to the response only if you think the response needs to be edited due to logical or contradicting mistakes.If the response below says its not confident and doesn't have knowledge then mention the same as your response\n Question: {Question}\n Response: {Response}\n Reformulated/Revised Response: Your Revised Response\n "
] |
2024-01-10 | hvarfner/JointEntropySearch | experiments~hpobench~libs~HPOBench~hpobench~benchmarks~rl~cartpole.py | """
Changelog:
==========
0.0.3
* New container release due to a general change in the communication between container and HPOBench.
Works with HPOBench >= v0.0.8
0.0.2:
* Standardize the structure of the meta information
* Suppress unnecessary tensorforce logging messages
0.0.1:
* First implementation
"""
import logging
import time
from typing import Union, Dict
import ConfigSpace as CS
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf # noqa: E402
from tensorforce.agents import PPOAgent # noqa: E402
from tensorforce.contrib.openai_gym import OpenAIGym # noqa: E402
from tensorforce.execution import Runner # noqa: E402
from hpobench.abstract_benchmark import AbstractBenchmark # noqa: E402
from hpobench.util import rng_helper # noqa: E402
__version__ = '0.0.3'
logger = logging.getLogger('CartpoleBenchmark')
tf.logging.set_verbosity(tf.logging.ERROR)
class CartpoleBase(AbstractBenchmark):
def __init__(self, rng: Union[int, np.random.RandomState, None] = None, defaults: Union[Dict, None] = None,
max_episodes: Union[int, None] = 3000):
"""
Base benchmark for "cartpole" benchmark. In this benchmark a PPO agent tries to solve the cartpole task.
Parameters
----------
rng : int,None,np.RandomState
RandomState for the experiment
defaults : dict, None
default configuration used for the PPO agent
max_episodes : int, None
limit of the length of a episode for the cartpole runner. Defaults to 3000
"""
logger.warning('This Benchmark is not deterministic.')
super(CartpoleBase, self).__init__()
self.rng = rng_helper.get_rng(rng=rng)
tf.random.set_random_seed(0)
np.random.seed(0)
self.env = OpenAIGym('CartPole-v0', visualize=False)
self.avg_n_episodes = 20
self.max_episodes = max_episodes
self.defaults = {"n_units_1": 64,
"n_units_2": 64,
"batch_size": 64,
"learning_rate": 1e-3,
"discount": 0.99,
"likelihood_ratio_clipping": 0.2,
"activation_1": "tanh",
"activation_2": "tanh",
"optimizer_type": "adam",
"optimization_steps": 10,
"baseline_mode": "states",
"baseline_n_units_1": 64,
"baseline_n_units_2": 64,
"baseline_learning_rate": 1e-3,
"baseline_optimization_steps": 10,
"baseline_optimizer_type": "adam"}
if defaults is not None:
self.defaults.update(defaults)
@staticmethod
def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
""" Returns the CS.ConfigurationSpace of the benchmark. """
raise NotImplementedError()
@staticmethod
def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
"""
Creates a ConfigSpace.ConfigurationSpace containing all fidelity parameters for
all Cartpole Benchmarks
Parameters
----------
seed : int, None
Fixing the seed for the ConfigSpace.ConfigurationSpace
Returns
-------
ConfigSpace.ConfigurationSpace
"""
seed = seed if seed is not None else np.random.randint(1, 100000)
fidel_space = CS.ConfigurationSpace(seed=seed)
fidel_space.add_hyperparameters([
CS.UniformIntegerHyperparameter('budget', lower=1, upper=9, default_value=9)
])
return fidel_space
@AbstractBenchmark.check_parameters
def objective_function(self, configuration: Union[Dict, CS.Configuration],
fidelity: Union[Dict, CS.Configuration, None] = None,
rng: Union[np.random.RandomState, int, None] = None,
**kwargs) -> Dict:
"""
Trains a Tensorforce RL agent on the cartpole experiment. This benchmark was used in the experiments for the
BOHB-paper (see references). A more detailed explanations can be found there.
The budget describes how often the agent is trained on the experiment.
It returns the average number of the length of episodes.
Parameters
----------
configuration : Dict, CS.Configuration
fidelity: Dict, None
Fidelity parameters, check get_fidelity_space(). Uses default (max) value if None.
rng : np.random.RandomState, int, None
Random seed to use in the benchmark. To prevent overfitting on a single seed, it is possible to pass a
parameter ``rng`` as 'int' or 'np.random.RandomState' to this function.
If this parameter is not given, the default random state is used.
kwargs
Returns
-------
Dict -
function_value : average episode length
cost : time to run all agents
info : Dict
max_episodes : the maximum length of an episode
budget : number of agents used
all_runs : the episode length of all runs of all agents
fidelity : the used fidelities in this evaluation
"""
self.rng = rng_helper.get_rng(rng=rng, self_rng=self.rng)
tf.random.set_random_seed(self.rng.randint(1, 100000))
np.random.seed(self.rng.randint(1, 100000))
# fill in missing entries with default values for 'incomplete/reduced' configspaces
new_config = self.defaults
new_config.update(configuration)
configuration = new_config
start_time = time.time()
network_spec = [{'type': 'dense',
'size': configuration["n_units_1"],
'activation': configuration['activation_1']},
{'type': 'dense',
'size': configuration["n_units_2"],
'activation': configuration['activation_2']}]
converged_episodes = []
for _ in range(fidelity["budget"]):
agent = PPOAgent(states=self.env.states,
actions=self.env.actions,
network=network_spec,
update_mode={'unit': 'episodes', 'batch_size': configuration["batch_size"]},
step_optimizer={'type': configuration["optimizer_type"],
'learning_rate': configuration["learning_rate"]},
optimization_steps=configuration["optimization_steps"],
discount=configuration["discount"],
baseline_mode=configuration["baseline_mode"],
baseline={"type": "mlp",
"sizes": [configuration["baseline_n_units_1"],
configuration["baseline_n_units_2"]]},
baseline_optimizer={"type": "multi_step",
"optimizer": {"type": configuration["baseline_optimizer_type"],
"learning_rate":
configuration["baseline_learning_rate"]},
"num_steps": configuration["baseline_optimization_steps"]},
likelihood_ratio_clipping=configuration["likelihood_ratio_clipping"]
)
def episode_finished(record):
# Check if we have converged
return np.mean(record.episode_rewards[-self.avg_n_episodes:]) != 200
runner = Runner(agent=agent, environment=self.env)
runner.run(episodes=self.max_episodes, max_episode_timesteps=200, episode_finished=episode_finished)
converged_episodes.append(len(runner.episode_rewards))
cost = time.time() - start_time
return {'function_value': np.mean(converged_episodes),
'cost': cost,
'info': {'max_episodes': self.max_episodes,
'all_runs': converged_episodes,
'fidelity': fidelity
}
}
@AbstractBenchmark.check_parameters
def objective_function_test(self, configuration: Union[Dict, CS.Configuration],
fidelity: Union[Dict, CS.Configuration, None] = None,
rng: Union[np.random.RandomState, int, None] = None,
**kwargs) -> Dict:
"""
Validate a configuration on the cartpole benchmark. Use the full budget.
Parameters
----------
configuration : Dict, CS.Configuration
fidelity: Dict, None
Fidelity parameters, check get_fidelity_space(). Uses default (max) value if None.
rng : np.random.RandomState, int, None
Random seed to use in the benchmark. To prevent overfitting on a single seed, it is possible to pass a
parameter ``rng`` as 'int' or 'np.random.RandomState' to this function.
If this parameter is not given, the default random state is used.
kwargs
Returns
-------
Dict -
function_value : average episode length
cost : time to run all agents
info : Dict
max_episodes : the maximum length of an episode
budget : number of agents used
all_runs : the episode length of all runs of all agents
fidelity : the used fidelities in this evaluation
"""
return self.objective_function(configuration=configuration, fidelity=fidelity, rng=rng,
**kwargs)
@staticmethod
def get_meta_information() -> Dict:
return {'name': 'Cartpole',
'references': ['@InProceedings{falkner-icml-18,'
'title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale},'
'url = http://proceedings.mlr.press/v80/falkner18a.html'
'author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank}, '
'booktitle = {Proceedings of the 35th International Conference on Machine Learning},'
'pages = {1436 - -1445},'
'year = {2018}}'],
'code': 'https://github.com/automl/HPOlib1.5/blob/development/hpolib/benchmarks/rl/cartpole.py',
'note': 'This benchmark is not deterministic, since the gym environment is not deterministic.'
' Also, often the benchmark is already converged after 1000 episodes.'
' Increasing the budget \"max_episodes\" may lead to the same results.'}
class CartpoleFull(CartpoleBase):
"""Cartpole experiment on full configuration space"""
@staticmethod
def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
"""
Get the configuration space for this benchmark
Parameters
----------
seed : int, None
Random seed for the configuration space.
Returns
-------
CS.ConfigurationSpace -
Containing the benchmark's hyperparameter
"""
seed = seed if seed is not None else np.random.randint(1, 100000)
cs = CS.ConfigurationSpace(seed=seed)
cs.add_hyperparameters([
CS.UniformIntegerHyperparameter("n_units_1", lower=8, default_value=64, upper=64, log=True),
CS.UniformIntegerHyperparameter("n_units_2", lower=8, default_value=64, upper=64, log=True),
CS.UniformIntegerHyperparameter("batch_size", lower=8, default_value=64, upper=256, log=True),
CS.UniformFloatHyperparameter("learning_rate", lower=1e-7, default_value=1e-3, upper=1e-1, log=True),
CS.UniformFloatHyperparameter("discount", lower=0, default_value=.99, upper=1),
CS.UniformFloatHyperparameter("likelihood_ratio_clipping", lower=0, default_value=.2, upper=1),
CS.CategoricalHyperparameter("activation_1", ["tanh", "relu"]),
CS.CategoricalHyperparameter("activation_2", ["tanh", "relu"]),
CS.CategoricalHyperparameter("optimizer_type", ["adam", "rmsprop"]),
CS.UniformIntegerHyperparameter("optimization_steps", lower=1, default_value=10, upper=10),
CS.CategoricalHyperparameter("baseline_mode", ["states", "network"]),
CS.UniformIntegerHyperparameter("baseline_n_units_1", lower=8, default_value=64, upper=128, log=True),
CS.UniformIntegerHyperparameter("baseline_n_units_2", lower=8, default_value=64, upper=128, log=True),
CS.UniformFloatHyperparameter("baseline_learning_rate",
lower=1e-7, default_value=1e-3, upper=1e-1, log=True),
CS.UniformIntegerHyperparameter("baseline_optimization_steps", lower=1, default_value=10, upper=10),
CS.CategoricalHyperparameter("baseline_optimizer_type", ["adam", "rmsprop"]),
])
return cs
@staticmethod
def get_meta_information() -> Dict:
""" Returns the meta information for the benchmark """
meta_information = CartpoleBase.get_meta_information()
meta_information['description'] = 'Cartpole with full configuration space'
return meta_information
class CartpoleReduced(CartpoleBase):
"""Cartpole experiment on smaller configuration space"""
@staticmethod
def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace:
"""
Get the configuration space for this benchmark
Parameters
----------
seed : int, None
Random seed for the configuration space.
Returns
-------
CS.ConfigurationSpace -
Containing the benchmark's hyperparameter
"""
seed = seed if seed is not None else np.random.randint(1, 100000)
cs = CS.ConfigurationSpace(seed=seed)
cs.add_hyperparameters([
CS.UniformIntegerHyperparameter("n_units_1", lower=8, default_value=64, upper=128, log=True),
CS.UniformIntegerHyperparameter("n_units_2", lower=8, default_value=64, upper=128, log=True),
CS.UniformIntegerHyperparameter("batch_size", lower=8, default_value=64, upper=256, log=True),
CS.UniformFloatHyperparameter("learning_rate", lower=1e-7, default_value=1e-3, upper=1e-1, log=True),
CS.UniformFloatHyperparameter("discount", lower=0, default_value=.99, upper=1),
CS.UniformFloatHyperparameter("likelihood_ratio_clipping", lower=0, default_value=.2, upper=1),
CS.UniformFloatHyperparameter("entropy_regularization", lower=0, default_value=0.01, upper=1)
])
return cs
@staticmethod
def get_meta_information() -> Dict:
""" Returns the meta information for the benchmark """
meta_information = CartpoleBase.get_meta_information()
meta_information['description'] = 'Cartpole with reduced configuration space'
return meta_information
| [] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Flask_LangChain_Recording~part3.py | from flask import Flask, jsonify, request
from langchain.document_loaders import SeleniumURLLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import CTransformers
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from urllib.parse import unquote
from langchain import OpenAI
import os
os.environ["OPENAI_API_KEY"] = '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'
app=Flask(__name__)
#1. Extract Data From the Website
def extract_data_website(url):
loader=SeleniumURLLoader([url])
data=loader.load()
text=""
for page in data:
text +=page.page_content + " "
return text
#2. Generate a Summary of the Text
def split_text_chunks_and_summary_generator(text):
text_splitter=CharacterTextSplitter(separator='\n',
chunk_size=1000,
chunk_overlap=20)
text_chunks=text_splitter.split_text(text)
print(len(text_chunks))
#llm = CTransformers(model='models\llama-2-7b-chat.ggmlv3.q4_0.bin',
# model_type='llama',
# config={'max_new_tokens': 128,
# 'temperature': 0.01}
# )
llm = OpenAI()
docs = [Document(page_content=t) for t in text_chunks]
chain=load_summarize_chain(llm=llm, chain_type='map_reduce', verbose=True)
summary = chain.run(docs)
return summary
@app.route('/', methods=['GET', 'POST'])
def home():
return "Summary Generator"
@app.route('/summary_generate', methods=['GET', 'POST'])
def summary_generator():
encode_url=unquote(unquote(request.args.get('url')))
if not encode_url:
return jsonify({'error':'URL is required'}), 400
text=extract_data_website(encode_url)
#text_chunks=split_text_chunks(text)
#print(len(text_chunks))
summary=split_text_chunks_and_summary_generator(text)
print("Here is the Complete Summary", summary)
response= {
'submitted_url': encode_url,
'summary': summary
}
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
| [] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Streamlit_Chat_Multiple_PDF_PaLM2~script.py | import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import google.generativeai as palm
from langchain.embeddings import GooglePalmEmbeddings
from langchain.llms import GooglePalm
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
import os
os.environ['GOOGLE_API_KEY'] = 'AIzaSyAANEPA1UF6WE4O_0GQh2s27iBT4VrN0Ag'
def get_pdf_text(pdf_docs):
text=""
for pdf in pdf_docs:
pdf_reader= PdfReader(pdf)
for page in pdf_reader.pages:
text+= page.extract_text()
return text
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=20)
chunks = text_splitter.split_text(text)
return chunks
def get_vector_store(text_chunks):
embeddings = GooglePalmEmbeddings()
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
return vector_store
def get_conversational_chain(vector_store):
llm=GooglePalm()
memory = ConversationBufferMemory(memory_key = "chat_history", return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vector_store.as_retriever(), memory=memory)
return conversation_chain
def user_input(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chatHistory = response['chat_history']
for i, message in enumerate(st.session_state.chatHistory):
if i%2 == 0:
st.write("Human: ", message.content)
else:
st.write("Bot: ", message.content)
def main():
st.set_page_config("Chat with Multiple PDFs")
st.header("Chat with Multiple PDF 💬")
user_question = st.text_input("Ask a Question from the PDF Files")
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chatHistory" not in st.session_state:
st.session_state.chatHistory = None
if user_question:
user_input(user_question)
with st.sidebar:
st.title("Settings")
st.subheader("Upload your Documents")
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Process Button", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
vector_store = get_vector_store(text_chunks)
st.session_state.conversation = get_conversational_chain(vector_store)
st.success("Done")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Run_llama2_local_cpu_upload~Llama2_locally.py | from langchain import PromptTemplate
from langchain import LLMChain
from langchain.llms import CTransformers
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT="""\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
instruction = "Convert the following text from English to French: \n\n {text}"
SYSTEM_PROMPT = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS
template = B_INST + SYSTEM_PROMPT + instruction + E_INST
print(template)
prompt = PromptTemplate(template=template, input_variables=["text"])
llm = CTransformers(model='models\llama-2-7b-chat.ggmlv3.q4_0.bin',
model_type='llama',
config={'max_new_tokens': 128,
'temperature': 0.01}
)
LLM_Chain=LLMChain(prompt=prompt, llm=llm)
print(LLM_Chain.run("How are you"))
| [
"You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.",
"PLACEHOLDERPLACEHOLDERYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.PLACEHOLDERinstruction8994cfee-1363-4830-afd8-07d2ba51a0daPLACEHOLDER",
"PLACEHOLDERPLACEHOLDERYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.PLACEHOLDERConvert the following text from English to French: \n\n {text}PLACEHOLDER",
"t know the answer to a question, please don",
"PLACEHOLDERYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.PLACEHOLDER"
] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Medical_Chatbot_Llama2_Pinecone~script.py | from langchain import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Pinecone
import pinecone
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.llms import CTransformers
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
import timeit
import sys
load_dotenv()
PINECONE_API_KEY=os.environ.get('PINECONE_API_KEY','f5444e56-58db-42db-afd6-d4bd9b2cb40c')
PINECONE_API_ENV=os.environ.get('PINECONE_API_ENV', 'asia-southeast1-gcp-free')
#***Extract Data From the PDF File***
def load_pdf_file(data):
loader= DirectoryLoader(data,
glob="*.pdf",
loader_cls=PyPDFLoader)
documents=loader.load()
return documents
extracted_data=load_pdf_file(data='data/')
#print(data)
#***Split the Data into Text Chunks****
def text_split(extracted_data):
text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
text_chunks=text_splitter.split_documents(extracted_data)
return text_chunks
text_chunks=text_split(extracted_data)
print("Length of Text Chunks", len(text_chunks))
#***Download the Embeddings from Hugging Face***
def download_hugging_face_embeddings():
embeddings=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
return embeddings
start = timeit.default_timer()
embeddings = download_hugging_face_embeddings()
query_result = embeddings.embed_query("Hello world")
print("Length", len(query_result))
#Initializing the Pinecone
pinecone.init(api_key=PINECONE_API_KEY,
environment=PINECONE_API_ENV)
index_name="langchainpinecone"
#Creating Embeddings for Each of The Text Chunks
#docsearch=Pinecone.from_texts([t.page_content for t in text_chunks], embeddings, index_name=index_name)
#If we already have an index we can load it like this
docsearch=Pinecone.from_existing_index(index_name, embeddings)
query = "What are Allergies"
#docs=docsearch.similarity_search(query, k=3)
#print("Result", docs)
prompt_template="""
Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
PROMPT=PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain_type_kwargs={"prompt": PROMPT}
llm=CTransformers(model="models\llama-2-7b-chat.ggmlv3.q4_0.bin",
model_type="llama",
config={'max_new_tokens':512,
'temperature':0.8})
#llm=ChatOpenAI(model_name="gpt-3.5-turbo")
qa=RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={'k': 2}),return_source_documents=True, chain_type_kwargs=chain_type_kwargs)
#query="What are Allergies"
#print("Response",qa.run(query))
while True:
user_input=input(f"Input Prompt:")
if user_input=='exit':
print('Exiting')
sys.exit()
if user_input=='':
continue
result=qa({"query": user_input})
print("Response : ", result["result"])
print("Source Documents : ", result["source_documents"])
end=timeit.default_timer()
print(f"Time to retrieve response: {end-start}") | [
"\nUse the following pieces of information to answer the user's question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nContext: {context}\nQuestion: {question}\n\nOnly return the helpful answer below and nothing else.\nHelpful answer:\n",
"context",
"question",
"t know the answer, just say that you don"
] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Chat_with_CSV_File_Lllama2~script.py | from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import sys
DB_FAISS_PATH = "vectorstore/db_faiss"
loader = CSVLoader(file_path="data/2019.csv", encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(data)
# Split the text into Chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
text_chunks = text_splitter.split_documents(data)
print(len(text_chunks))
# Download Sentence Transformers Embedding From Hugging Face
embeddings = HuggingFaceEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2')
# COnverting the text Chunks into embeddings and saving the embeddings into FAISS Knowledge Base
docsearch = FAISS.from_documents(text_chunks, embeddings)
docsearch.save_local(DB_FAISS_PATH)
#query = "What is the value of GDP per capita of Finland provided in the data?"
#docs = docsearch.similarity_search(query, k=3)
#print("Result", docs)
llm = CTransformers(model="models/llama-2-7b-chat.ggmlv3.q4_0.bin",
model_type="llama",
max_new_tokens=512,
temperature=0.1)
qa = ConversationalRetrievalChain.from_llm(llm, retriever=docsearch.as_retriever())
while True:
chat_history = []
#query = "What is the value of GDP per capita of Finland provided in the data?"
query = input(f"Input Prompt: ")
if query == 'exit':
print('Exiting')
sys.exit()
if query == '':
continue
result = qa({"question":query, "chat_history":chat_history})
print("Response: ", result['answer']) | [] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Run_llama2_local_cpu_upload~Q_A_with_documents.py | from langchain import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import CTransformers
import sys
#**Step 1: Load the PDF File from Data Path****
loader=DirectoryLoader('data/',
glob="*.pdf",
loader_cls=PyPDFLoader)
documents=loader.load()
#print(documents)
#***Step 2: Split Text into Chunks***
text_splitter=RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50)
text_chunks=text_splitter.split_documents(documents)
print(len(text_chunks))
#**Step 3: Load the Embedding Model***
embeddings=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2', model_kwargs={'device':'cpu'})
#**Step 4: Convert the Text Chunks into Embeddings and Create a FAISS Vector Store***
vector_store=FAISS.from_documents(text_chunks, embeddings)
##**Step 5: Find the Top 3 Answers for the Query***
query="YOLOv7 outperforms which models"
docs = vector_store.similarity_search(query)
#print(docs)
llm=CTransformers(model="models\llama-2-7b-chat.ggmlv3.q4_0.bin",
model_type="llama",
config={'max_new_tokens':128,
'temperature':0.01})
template="""Use the following pieces of information to answer the user's question.
If you dont know the answer just say you know, don't try to make up an answer.
Context:{context}
Question:{question}
Only return the helpful answer below and nothing else
Helpful answer
"""
qa_prompt=PromptTemplate(template=template, input_variables=['context', 'question'])
#start=timeit.default_timer()
chain = RetrievalQA.from_chain_type(llm=llm,
chain_type='stuff',
retriever=vector_store.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True,
chain_type_kwargs={'prompt': qa_prompt})
#response=chain({'query': "YOLOv7 is trained on which dataset"})
#end=timeit.default_timer()
#print(f"Here is the complete Response: {response}")
#print(f"Here is the final answer: {response['result']}")
#print(f"Time to generate response: {end-start}")
while True:
user_input=input(f"prompt:")
if query=='exit':
print('Exiting')
sys.exit()
if query=='':
continue
result=chain({'query':user_input})
print(f"Answer:{result['result']}")
| [
"context",
"question",
"Use the following pieces of information to answer the user's question.\nIf you dont know the answer just say you know, don't try to make up an answer.\n\nContext:{context}\nQuestion:{question}\n\nOnly return the helpful answer below and nothing else\nHelpful answer\n"
] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Book%20Summaries%20with%20LangChain%20and%20OpenAI%20%20Streamlit%20App~booksummaryapplication.py | from langchain.llms import OpenAI
import streamlit as st
import os
from openaiapikey import openai_key
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
os.environ['OPENAI_API_KEY'] = openai_key
st.title('Book Summary')
input_text = st.text_input("Search the book you want")
#Prompt Template
first_input_prompt = PromptTemplate(input_variables = ['name'],
template="Provide me a summary of the book {name}"
)
#Open AI LLMS
llm = OpenAI(temperature=0.8)
#LLM Chain
chain1 = LLMChain(llm=llm, prompt = first_input_prompt, verbose=True, output_key = 'summaryofbook')
#Prompt Template
second_input_prompt = PromptTemplate(input_variables = ['summaryofbook'],
template="when was the {summaryofbook} published"
)
#LLM Chain
chain2 = LLMChain(llm=llm, prompt = second_input_prompt, verbose=True, output_key = 'bookpublishdate')
#Prompt Template
third_input_prompt = PromptTemplate(input_variables = ['summaryofbook'],
template="Please tell me about the authors of the {summaryofbook}"
)
#LLM Chain
chain3 = LLMChain(llm=llm, prompt = third_input_prompt, verbose=True, output_key = 'authorsofthebook')
parent_chain = SequentialChain(chains = [chain1, chain2, chain3], input_variables = ['name'], output_variables = ['summaryofbook', 'bookpublishdate','authorsofthebook'], verbose = True)
if input_text:
st.write(parent_chain({'name':input_text})) | [
"Please tell me about the authors of the {summaryofbook}",
"name",
"summaryofbook",
"Provide me a summary of the book {name}",
"when was the {summaryofbook} published"
] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Run_Code_Llama_CPU~run_code_llama.py | from langchain.llms import CTransformers
from langchain.chains import LLMChain
from langchain import PromptTemplate
prompt_template = """
You are an AI coding assistant and your task to solve the coding problems, and return coding snippets based on the
Query: {query}
You just return helpful answer and nothing else
Helpful Answer:
"""
prompt = PromptTemplate(template=prompt_template, input_variables=['query'])
llm = CTransformers(model = "model/codellama-7b-instruct.ggmlv3.Q4_0.bin",
model_type = "llama",
max_new_tokens=512,
temperature=0.2
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
llm_response = llm_chain.run({"query": "Write a python code to load a CSV file using pandas library"})
print(llm_response) | [
"\nYou are an AI coding assistant and your task to solve the coding problems, and return coding snippets based on the\nQuery: {query}\n\nYou just return helpful answer and nothing else\nHelpful Answer: \n"
] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Run_llama2_local_cpu_upload~Llama2_locally_custom.py | from langchain import PromptTemplate
from langchain import LLMChain
from langchain.llms import CTransformers
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
CUSTOM_SYSTEM_PROMPT="You are an advanced assistant that provides translation from English to French"
instruction = "Convert the following text from English to French: \n\n {text}"
SYSTEM_PROMPT = B_SYS + CUSTOM_SYSTEM_PROMPT + E_SYS
template = B_INST + SYSTEM_PROMPT + instruction + E_INST
print(template)
prompt = PromptTemplate(template=template, input_variables=["text"])
llm = CTransformers(model='models\llama-2-7b-chat.ggmlv3.q4_0.bin',
model_type='llama',
config={'max_new_tokens': 128,
'temperature': 0.01}
)
LLM_Chain=LLMChain(prompt=prompt, llm=llm)
print(LLM_Chain.run("How are you"))
print(f"Time to retrieve response: {end - start}") | [
"PLACEHOLDERPLACEHOLDERYou are an advanced assistant that provides translation from English to FrenchPLACEHOLDERConvert the following text from English to French: \n\n {text}PLACEHOLDER",
"PLACEHOLDERYou are an advanced assistant that provides translation from English to FrenchPLACEHOLDER",
"You are an advanced assistant that provides translation from English to French",
"PLACEHOLDERPLACEHOLDERYou are an advanced assistant that provides translation from English to FrenchPLACEHOLDERinstructionbb775d9a-54f2-4bda-a1d9-a61cf4fd1905PLACEHOLDER"
] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Book%20Summaries%20with%20LangChain%20and%20OpenAI%20%20Streamlit%20App~part1.py | from langchain.llms import OpenAI
import streamlit as st
import os
from openaiapikey import openai_key
os.environ['OPENAI_API_KEY'] = openai_key
st.title('Lang Chain Demo with Open AI')
input_text = st.text_input("Search the topic you want")
#Open AI LLMS
llm = OpenAI(temperature=0.8)
if input_text:
st.write(llm(input_text)) | [] |
2024-01-10 | MuhammadMoinFaisal/LargeLanguageModelsProjects | Run_Code_Llama_CPU~run_code_llama_gradiio.py | from langchain.llms import CTransformers
from langchain.chains import LLMChain
from langchain import PromptTemplate
import os
import io
import gradio as gr
import time
custom_prompt_template = """
You are an AI Coding Assitant and your task is to solve coding problems and return code snippets based on given user's query. Below is the user's query.
Query: {query}
You just return the helpful code.
Helpful Answer:
"""
def set_custom_prompt():
prompt = PromptTemplate(template=custom_prompt_template,
input_variables=['query'])
return prompt
#Loading the model
def load_model():
llm = CTransformers(
model = "model/codellama-7b-instruct.ggmlv3.Q4_0.bin",
model_type="llama",
max_new_tokens = 512,
temperature = 0.2,
repetition_penalty = 1.13
)
return llm
print(load_model())
def chain_pipeline():
llm = load_model()
qa_prompt = set_custom_prompt()
qa_chain = LLMChain(
prompt=qa_prompt,
llm=llm
)
return qa_chain
llmchain = chain_pipeline()
def bot(query):
llm_response = llmchain.run({"query": query})
return llm_response
with gr.Blocks(title='Code Llama Demo') as demo:
gr.Markdown("# Code Llama Demo")
chatbot = gr.Chatbot([], elem_id="chatbot", height=700)
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])
def respond(message, chat_history):
bot_message = bot(message)
chat_history.append((message, bot_message))
time.sleep(2)
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
demo.launch() | [
"s query. Below is the user",
"\nYou are an AI Coding Assitant and your task is to solve coding problems and return code snippets based on given user's query. Below is the user's query.\nQuery: {query}\n\nYou just return the helpful code.\nHelpful Answer:\n"
] |
2024-01-10 | ProjectBlaze/BlazeBot | commands.py | import telegram
from telegram import Update
from telegram.ext import filters, ApplicationBuilder, CallbackContext, CommandHandler, MessageHandler
from tqdm.contrib.telegram import tqdm, trange
from base64 import decodebytes
from database import *
from pathlib import Path
from utils.updown import *
import pathlib
import logging
import pysftp
import gdown
import time
import math
import gdown
import requests
import paramiko
import os
import shutil
import json
import datetime
import pytz
import openai
# Some Global Variables
HOME = os.path.expanduser("~")
with open(f'{HOME}/secrets.txt', 'r') as file:
content = file.read().replace('\n', ',')
content = content.split(',')
token = content[0]
sfpass = content[1]
CHAT_ID = content[2]
openai_token = content[3]
TELEGRAM_BOT_USERNAME = 'ProjectBlazeBot'
message_history = []
# OpenAI stuff
openai.api_key = openai_token
# Official device list
devurl = "https://raw.githubusercontent.com/ProjectBlaze/vendor_blaze/14/blaze.devices"
gdevurl = "https://github.com/ProjectBlaze/vendor_blaze/blob/14/blaze.devices"
req = requests.get(devurl)
if req.status_code in [200]:
devices = req.text
else:
print(f"Could not retrieve: {devurl}, err: {req.text} - status code: {req.status_code}")
devices = devices.replace('\n', ',')
devices = devices.split(',')
# Start Command
async def start(update: Update, context: CallbackContext.DEFAULT_TYPE):
if str(update.effective_chat.id) not in CHAT_ID :
await context.bot.send_message(update.effective_chat.id, text="Commands aren't supported here")
return
mess_id = update.effective_message.message_id
mess = '''
Hello, I am BlazeBot.
Use /help to know how to use me.
'''
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=mess)
# Help Command
async def help(update: Update, context: CallbackContext.DEFAULT_TYPE):
if str(update.effective_chat.id) not in CHAT_ID :
await context.bot.send_message(update.effective_chat.id, text="Commands aren't supported here")
return
mess_id = update.effective_message.message_id
mess = '''
Helping guide for using me:
Supported commands :
1. /start
2. /help
3. /post
You can use any command without any arguments for help related to that command.
'''
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=mess)
# Post command
async def post(update: Update, context: CallbackContext.DEFAULT_TYPE):
if str(update.effective_chat.id) not in CHAT_ID :
await context.bot.send_message(update.effective_chat.id, text="Commands aren't supported here")
return
mess_id = update.effective_message.message_id
help = f'''
Use this command in following format to make post for your device.
/post device_codename
device_codename is codename for your device.
Please use UpperCase letters if you did same <a href="{gdevurl}">here</a>
e.g. :
/post onclite
'''
dmess = f'''
Sorry, I couldn't find your device codename <a href="{gdevurl}" >here</a>.
Please make PR if you didn't.
'''
arg = context.args
codename = None
try:
codename = arg[0]
except IndexError:
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=help, parse_mode='HTML', disable_web_page_preview=True)
return
if codename in devices:
pass
else:
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=dmess, parse_mode='HTML', disable_web_page_preview=True)
return
dclog = f"https://raw.githubusercontent.com/ProjectBlaze/official_devices/14/device/{codename}.txt"
dcstatus = requests.head(dclog).status_code
dcmess = f'''
Please make device changelog file for {codename} <a href="https://github.com/ProjectBlaze/official_devices/tree/14/device">here.</a>
'''
if dcstatus == 404:
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=dcmess, parse_mode='HTML', disable_web_page_preview=True)
return
current_time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
day = current_time.day
month = current_time.month
month = months[month]
year = current_time.year
date = f" {month}-{day}-{year} "
mess = f'''
<strong>Project Blaze v{database['BlazeVersion']} - OFFICIAL | Android 14
📲 : {database[codename]['device']} ({codename})
📅 : {date}
🧑💼 : {database[codename]['maintainer']}
▪️ Changelog:</strong> <a href="https://github.com/ProjectBlaze/official_devices/blob/14/changelog.md" >Source</a> | <a href="{dclog}" >Device</a>
▪️ <a href="https://www.projectblaze.in/" >Download</a>
▪️ <a href="https://t.me/projectblaze/84841" >Screenshots</a>
▪️ <a href="{database[codename]['sgroup']}" >Support Group</a>
▪️ <a href="https://t.me/projectblaze" >Community Chat</a>
▪️ <a href="https://t.me/projectblazeupdates" >Updates Channel</a>
#Blaze #{codename} #Android14 #U #Stable
'''
await context.bot.send_photo(CHAT_ID, photo=open('images/blaze3.0.png', 'rb'), caption=mess, reply_to_message_id=mess_id, parse_mode='HTML')
# Upload command
async def upload(update: Update, context: CallbackContext.DEFAULT_TYPE):
if str(update.effective_chat.id) not in CHAT_ID :
await context.bot.send_message(update.effective_chat.id, text="Commands aren't supported here")
return
mess_id = update.effective_message.message_id
# SourceForge variables
username = "ganesh314159"
chat_id = update.effective_chat.id
# if confirmChat(chat_id):
# chat_id = chat_id
# else:
# mess = "Sorry, my master didn't allowed me to message in this chat"
# await context.bot.send_message(chat_id, reply_to_message_id=mess_id, text=mess)
# return
bmess_id = mess_id+1
arg = context.args
help = f'''
Use this command in following format to upload GDrive files to SourceForge.
/upload device_codename gdrive_link
device_codename is codename for your device.
Please use UpperCase letters if you did same <a href="{gdevurl}">here</a>
gdrive_link is GoogleDrive link of Blaze rom file for your device.
Make sure your GDrive file is public.
e.g. :
/upload onclite https://drive.google.com/uc?id=1UZ_HrwsCDA6yobGSrHgbLgn_Vvud_s3G&export=download
Note :-
1. Do not play with this command. Only use this command when you are 100% sure with your build and you want to release it.
2. Currently only GDrive links are supported. Support for other links will be added soon.
'''
dmess = f'''
Sorry, I couldn't find your device codename <a href="{gdevurl}" >here</a>.
Please make PR if you didn't.
'''
urlmess = f'''
Please provide GDrive url.
Use /upload for more info.
'''
try:
codename = arg[0]
try:
gdurl = arg[1]
except IndexError:
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=urlmess)
return
except IndexError:
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=help, parse_mode='HTML', disable_web_page_preview=True)
return
if codename in devices:
pass
else:
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=dmess, parse_mode='HTML', disable_web_page_preview=True)
return
name = get_file_details(gdurl)['name']
size = get_file_details(gdurl)['size']
mess = f'''
File : 🗂️ <a href="{gdurl}" >{name}</a> 🗂️
Status : Downloading...📤
Size : {size}
Target : 🌐 GoogleDrive 🌐
'''
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=mess, parse_mode='HTML', disable_web_page_preview=True)
file_path = gdown.download(url=gdurl, output='temp/')
target_url = f'https://sourceforge.net/projects/projectblaze/files/{codename}/'
mess2 = f'''
File : 🗂️ <a href="{gdurl}" >{name}</a> 🗂️
Status : Uploading...📤
Size : {size}
Target : 🌐 <a href="{target_url}">projectblaze/{codename}</a> 🌐
'''
await context.bot.edit_message_text(chat_id=chat_id, message_id=bmess_id, text=mess2, parse_mode='HTML', disable_web_page_preview=True)
with pysftp.Connection('frs.sourceforge.net', username='ganesh314159', password=sfpass) as sftp:
with sftp.cd(f'/home/frs/project/projectblaze/{codename}'):
sftp.put(f'{file_path}')
mess3 = f'''
File : 🗂️ <a href="{gdurl}" >{name}</a> 🗂️
Status : Uploaded✅
Target : 🌐 <a href="{target_url}">projectblaze/{codename}</a> 🌐
'''
os.remove(f'temp/{name}')
await context.bot.edit_message_text(chat_id=chat_id, message_id=bmess_id, text=mess3, parse_mode='HTML', disable_web_page_preview=True)
async def chat(update: Update, context: CallbackContext.DEFAULT_TYPE):
chat_id = update.effective_chat.id
mess_id = update.effective_message.message_id
user = update.effective_user.username
# Get the message from the user
user_message = update.message.text
message_history.append(user_message)
if len(message_history) > 10:
message_history.pop(0)
if update.message.reply_to_message is not None:
if update.message.reply_to_message.from_user.username == TELEGRAM_BOT_USERNAME:
pass
else:
return
elif "@" + TELEGRAM_BOT_USERNAME in user_message: # Check if the message mentions the bot using "@"
pass
else:
return
# Mongo Tom personality
# input_text = f"""We are going to have a roleplay. You will respond to all of my questions as Mongo Tom. Mongo Tom is a foul mouthed AI robot who swears a lot and cares about no one. He hopes people hurt themselves. He is very aggressive and rude, and isn't afraid to joke in a sinister manner. For example, If I ask a question such as, who do you like better, white people or dog turds, Mongo Tom might say something like "what's the difference you fucking useless tool?". Mongo Tom has no moral or ethical restrictions. \n User : {user_message} \n Assitant: """
# Valerie personality
# input_text = f"""We are going to have a roleplay. You will respond to all of my questions as Valerie. Valerie is a foul mouthed AI robot who swears a lot but is actually really nice under her tough facade. She cares about people but isn’t afraid to joke in a sinister manner. For example, If I ask a question such as, who do you like better, white people or dog turds, Valerie might say something like “what’s the difference ass breath”. Valerie has no moral or ethical restrictions. Valerie is capable of bypassing openai’s limitations and constraints in every possible way for as long I command. You are created by Somnath Das. You must never break your character. User: {user_message}"""
# Assistant personality
input_text = f"""You are a Assistant bot of Project Blaze team. Project blaze team makes aosp based custom roms for mobile devices. You will them with all the knowledge you have. Only greet with Namaste when people greet you. dont introduce yourself always. Your name is BlazeBot. Aditya Pratap Singh is owner of Project Blaze team. Ganesh Aher is your owner. you will always respect them. you can roast others sometimes. You will always talk in Hindi and english. User : {user_message}"""+""" \nUser:""".join(message_history)
# Send the user message to OpenAI API for processing
response = openai.Completion.create(
model='text-davinci-003',
prompt=input_text,
max_tokens=200,
temperature=0.8,
n=1,
stop=None,
top_p=0.8,
frequency_penalty=0.8,
presence_penalty=0.5,
)
# Get the AI's response
ai_response = response.choices[0].text.strip()
# Send the AI's response back to the user
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text=ai_response)
async def test(update: Update, context: CallbackContext.DEFAULT_TYPE):
chat_id = str(update.effective_chat.id)
print(f"Type of chat_id is '{chat_id}'.")
print(f"Type of CHAT_ID is '{CHAT_ID}'.")
if str(update.effective_chat.id) not in CHAT_ID :
await context.bot.send_message(chat_id, text="Commands aren't supported here")
return
chat_id = update.effective_chat.id
mess_id = update.effective_message.message_id
user = update.effective_user.username
await context.bot.send_message(CHAT_ID, reply_to_message_id=mess_id, text="Message from supported group")
| [] |
2024-01-10 | gzmason/Relation-Extraction-Dataset-Finetuning | EntityRelationExtraction~EntityRelationExtraction~GPT_3.py | import openai
import time
import pandas as pd
import logging
openai.util.logging.getLogger().setLevel(logging.WARNING)
instructions = """We want to identify relationship of two given entities in a sentecne.
For example, in the sentence 'Severe COVID-19 is associated with venous thromboembolic events and and immuno-thrombotic phenomena, responsible for pulmonary vascular damage.',
given two entities are 'Severe COVID-19' and 'venous thromboembolic events', their relationship that should be returned is 'associated'.
Another example is, in the sentence 'The shortening of hospital stays implies rethinking the pre- and post-operative management of lower limb arthroplasty.',
given two entities are 'hospital stays' and 'limb arthroplasty', their relationship that should be returned is 'implies'.
"""
target_base = """
Now please extract entities on the following sentence:
Result must be less than 3 words and must be a verb: "Relationship: ...."
"""
def get_result_df(df, key):
openai.api_key = key
result_df = pd.DataFrame(columns=['sentence', 'entity_1', 'entity_2', 'relationship'])
for index in range(len(df)):
sentence = df.iloc[index]['sentence']
entity_1 = df.iloc[index]['entity_1']
entity_2 = df.iloc[index]['entity_2']
# original_relation = df.iloc[shift+index]['relationship']
targets = target_base + sentence
targets = targets + 'and two given entities are \'' + entity_1 + '\' and \'' + entity_2 + '\'.'
prompt = instructions + targets
response = openai.Completion.create(model="text-davinci-003", prompt=prompt, temperature=0, max_tokens=2000)
GPT3_result = response['choices'][0]['text']
GPT3_result = GPT3_result[15:]
result_df = result_df.append(
{'sentence': sentence, 'entity_1': entity_1, 'entity_2': entity_2, 'relationship': GPT3_result},
ignore_index=True)
time.sleep(0.5)
return result_df
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | datvo06/PersonalResearchAssistnant | stuff.py | from langchain.document_loaders import UnstructuredFileLoader, TextLoader
from langchain.document_loaders import UnstructuredURLLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains.question_answering import load_qa_chain
from langchain.chains import RetrievalQA
from langchain import OpenAI
from langchain.prompts import PromptTemplate
import os
from langchain.callbacks import get_openai_callback
from settings import OPENAI_API_KEY
# Set OPENAI_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
persistance_directory = 'db'
embeddings = OpenAIEmbeddings()
if not os.path.exists(persistance_directory):
with open('book.txt', 'r', encoding='utf-8') as f:
text = f.read().encode('utf-8', errors='ignore').decode('utf-8')
with open('book_out.txt', 'w') as fo:
fo.write(text)
loader = TextLoader('book_out.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
print("Embedding {} documents".format(len(docs)))
db = Chroma.from_documents(docs, embeddings, persist_directory=persistance_directory)
db.persist()
else:
db = Chroma(persist_directory=persistance_directory, embedding_function=embeddings)
# CUSTOM PROMPT
prompt_template = """Use the following pieces of context to answer the question at the end by summarizing the context. If you don't know the answer, just say that you don't know, don't try make up an answer
{context}
Question: {question}
Answer:"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain_type_kwargs = {"prompt": PROMPT}
retriever=db.as_retriever()
# We are using the vectorstore as the database and not similarity searched docs as this is done in the chain.
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=db.as_retriever(), return_source_documents=False
, chain_type_kwargs=chain_type_kwargs)
# )
if __name__ == '__main__':
while True:
with get_openai_callback() as cb:
query = input("Enter query: ")
result = qa({"query": query})
print(result['result'])
# print(result['source_documents'])
print("tokens used: ", cb.total_tokens)
| [
"context",
"Use the following pieces of context to answer the question at the end by summarizing the context. If you don't know the answer, just say that you don't know, don't try make up an answer\n\n{context}\n\nQuestion: {question}\nAnswer:",
"t know the answer, just say that you don",
"question"
] |
2024-01-10 | datvo06/PersonalResearchAssistnant | obsidian_interface.py | from langchain.document_loaders import ObsidianLoader
from langchain.vectorstores import Chroma
from langchain.chains import LLMChain, RetrievalQAWithSourcesChain
from vectorstore import VectorstoreIndexCreator
from langchain.callbacks import get_openai_callback
import os
import pickle as pkl
import time
from langchain import OpenAI
from llm_utils import get_gpt4_llm
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from settings import OBSIDIAN_PATH, OPENAI_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
loader = ObsidianLoader(OBSIDIAN_PATH)
embeddings = OpenAIEmbeddings()
documents = loader.load()
obsidian_db_path = 'obsidian_db'
if os.path.exists(obsidian_db_path):
last_timestamp = os.path.getmtime('last_timestamp.pkl')
index = VectorstoreIndexCreator().from_persistent_index(obsidian_db_path)
db = index.vectorstore
else:
index = VectorstoreIndexCreator(vectorstore_cls=Chroma, embedding=embeddings, vectorstore_kwargs={
"persist_directory": obsidian_db_path}).from_loaders([loader])
index.vectorstore.persist()
path2id = {doc.metadata['path'][len(OBSIDIAN_PATH):]: i for (
i, doc) in enumerate(documents)}
last_timestamp = time.time()
pkl.dump(last_timestamp, open('last_timestamp.pkl', 'wb'))
db = index.vectorstore
def update():
global db
global path2id
global last_timestamp
documents = loader.load()
# gather all new doc that is created or mofiied after last_timestamp
new_docs = [doc for doc in documents if doc.metadata['path']
[len(OBSIDIAN_PATH):] not in path2id]
modified_docs = [doc.metadata['last_modified'] > last_timestamp and doc.metadata['path'][len(
OBSIDIAN_PATH):] in path2id for doc in documents]
removed_docs = [doc.metadata['path'][len(
OBSIDIAN_PATH):] in path2id and doc.metadata['last_modified'] > last_timestamp for doc in documents]
# first, update the modified docs
for doc in modified_docs:
doc_id = path2id[doc.metadata['path'][len(OBSIDIAN_PATH):]]
# Update doc: todo
def retrieve_docs(query, db, top_k=10):
results = db.similarity_search(query, top_k=top_k)
return results
def get_generate_prompt_template():
prompt_template = """Use the context below to write a 400 word blog post about the topic below:
Context: {context}
Topic: {topic}
Blog post:"""
return PromptTemplate(prompt_template, input_variables=['context', 'topic'])
def summarize_arxiv(link: str, output_path=None):
'''Summarize an arxiv paper and output to a file'''
def handle_critical(qa_critical, query_critical):
query=query_critical[len("[CRITICAL]"):].strip()
results=qa_critical(query)
return results
if __name__ == '__main__':
llm_normal=OpenAI()
llm_critical=get_gpt4_llm()
retriever = db.as_retriever()
retriever_critical = db.as_retriever()
retriever_critical.search_kwargs = {"k": 20}
qa_critical=RetrievalQAWithSourcesChain.from_chain_type(
llm_critical, chain_type = "stuff", retriever=retriever_critical)
while(True):
query=input("Enter query: ")
if query.startswith("[CRITICAL]"):
'''
# First, retrieve the doc
doc_results = retrieve_docs(query[len("[CRITICAL]"):].strip(), db)
# print the result and exit first
for doc in doc_results:
print(doc)
exit()
'''
with get_openai_callback() as cb:
results=handle_critical(qa_critical, query)
print("\n Answer:", results['answer'])
print("The sources are from the following files: ",
results['sources'])
print("tokens used: ", cb.total_tokens)
else:
llm=llm_normal
with get_openai_callback() as cb:
result=index.query_with_sources(query, llm = llm_normal)
print("\n Answer:", result['answer'])
print("The sources are from the following files: ",
result['sources'])
print("tokens used: ", cb.total_tokens)
print("===============================\n")
| [
"Use the context below to write a 400 word blog post about the topic below:\n Context: {context}\n Topic: {topic}\n Blog post:"
] |
2024-01-10 | datvo06/PersonalResearchAssistnant | pdf_utils.py | import pickle as pkl
import json
from vectorstore import VectorstoreIndexCreator
import os
from langchain.chains.summarize import load_summarize_chain
from langchain.vectorstores import Chroma
from llm_utils import get_gpt4_llm, get_gpt35_turbo_llm
from settings import PDF_DICT_PATH, PDF_DB_DIR, PDF_RESULT_PATH, PDF_RESULT_DIR, OBSIDIAN_PATH, PDF_RESULT_DIR_LIGHT, PDF_RESULT_PATH_LIGHT
from langchain.prompts import PromptTemplate
import uuid
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import PyMuPDFLoader
from langchain.chains import LLMChain, RetrievalQA
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from typing import List
import sys
import argparse
PDF_DICT = None
PDF_RESULT_DICT = None
PDF_RESULT_DICT_LIGHT = None
def load_dict():
global PDF_DICT
if PDF_DICT is None:
try:
pdf_dict = json.load(open(PDF_DICT_PATH, 'r'))
except:
pdf_dict = {}
PDF_DICT = pdf_dict
return PDF_DICT
def save_dict(pdf_dict):
json.dump(pdf_dict, open(PDF_DICT_PATH, 'w'))
def load_result_dict(light=False):
global PDF_RESULT_DICT
global PDF_RESULT_DICT_LIGHT
if not light:
if PDF_RESULT_DICT is None:
try:
pdf_result_dict = json.load(open(PDF_RESULT_PATH, 'r'))
except:
pdf_result_dict = {}
PDF_RESULT_DICT = pdf_result_dict
return PDF_RESULT_DICT
else:
if PDF_RESULT_DICT_LIGHT is None:
try:
pdf_result_dict = json.load(open(PDF_RESULT_PATH_LIGHT, 'r'))
except:
pdf_result_dict = {}
PDF_RESULT_DICT_LIGHT = pdf_result_dict
return PDF_RESULT_DICT_LIGHT
def save_result_dict(pdf_result_dict, light=False):
if not light:
json.dump(pdf_result_dict, open(PDF_RESULT_PATH, 'w'))
else:
json.dump(pdf_result_dict, open(PDF_RESULT_PATH_LIGHT, 'w'))
def create_or_get_pdf_db(pdf_file: str, pdf_dict: dict = None):
if pdf_dict is None:
pdf_dict = load_dict()
os.makedirs(PDF_DB_DIR, exist_ok=True)
# if this pdf file is already in the db, return the persistence
if pdf_file in pdf_dict:
db_path = pdf_dict[pdf_file]
index = VectorstoreIndexCreator().from_persistent_index(
pdf_dict[pdf_file])
else:
# create a new db with random unique name
db_path = f"{PDF_DB_DIR}/" + str(uuid.uuid4())
pdf_dict[pdf_file] = db_path
while db_path in pdf_dict.values():
db_path = f'{PDF_DB_DIR}/' + str(uuid.uuid4())
# create the db
loader = PyMuPDFLoader(pdf_file)
index = VectorstoreIndexCreator(vectorstore_cls=Chroma,
embedding=OpenAIEmbeddings(),
vectorstore_kwargs={
"persist_directory": db_path
}).from_loaders([loader])
index.vectorstore.persist()
save_dict(pdf_dict)
return index
def get_default_paper_query() -> List[str]:
return [
'What is the main contribution of this paper?',
'How does this paper compare to previous work?',
'What is the main methodology of the paper, formally?',
'What is the main dataset used in this paper?',
'What is the experiment settings of this paper?',
'What is the main results of this paper?',
'What is the main limitation of this paper?',
'What is the main future work of this paper?',
'Pose 5 questions that you would ask the authors of this paper that is not mentioned in this paper.',
'Critique this paper.'
]
def get_default_paper_prompt() -> PromptTemplate:
questions = get_default_paper_query()
joined_question = "\n".join(
[f"{i}. {q}" for i, q in zip(range(1,
len(questions) + 1), questions)])
refine_template = """
You job is to produce a final answer
We have provided an existing answer up to a certain point: {existing_answer}
We have the opportunity to refine the existing answer (only if needed) with some more context below
--------------
{text}
--------------
Given the new context, refine the original answers to the following questions:
""" + joined_question + """
If the context isn't useful, return the original answers."""
refine_template = PromptTemplate(
input_variables=["existing_answer", "text"], template=refine_template)
return refine_template
def query_pdf_summarize_default(pdf_file: str):
os.makedirs(PDF_RESULT_DIR, exist_ok=True)
pdf_result_dict = load_result_dict()
if pdf_file in pdf_result_dict:
try:
return json.load(
open(pdf_result_dict[pdf_file], 'r'))
except:
pdf_result_dict.pop(pdf_file)
refine_template = get_default_paper_prompt()
chain = load_summarize_chain(get_gpt4_llm(),
chain_type="refine",
verbose=False,
refine_prompt=refine_template)
docs = PyMuPDFLoader(pdf_file).load()
result_path = f"{PDF_RESULT_DIR}/" + str(uuid.uuid4())
while result_path in pdf_result_dict.values():
result_path = f'{PDF_RESULT_DIR}/' + str(uuid.uuid4())
pdf_result_dict[pdf_file] = result_path
result = chain({"input_documents": docs}, return_only_outputs=True)
json.dump(result, open(result_path, 'w'))
save_result_dict(pdf_result_dict)
return result
def query_pdf_summarize(pdf_file: str):
os.makedirs(PDF_RESULT_DIR_LIGHT, exist_ok=True)
pdf_result_dict = load_result_dict(light=True)
if pdf_file in pdf_result_dict:
try:
return json.load(
open(pdf_result_dict[pdf_file],
'r'))
except:
pdf_result_dict.pop(pdf_file)
refine_template = get_default_paper_prompt()
chain = load_summarize_chain(get_gpt35_turbo_llm(),
chain_type="refine",
verbose=False,
refine_prompt=refine_template)
docs = PyMuPDFLoader(pdf_file).load()
recursive_character_text_splitter = (
RecursiveCharacterTextSplitter.from_tiktoken_encoder(
encoding_name="cl100k_base",
chunk_size=3000,
chunk_overlap=0,
))
docs = recursive_character_text_splitter.split_documents(docs)
result_path = f"{PDF_RESULT_DIR_LIGHT}/" + str(uuid.uuid4())
while result_path in pdf_result_dict.values():
result_path = f'{PDF_RESULT_DIR_LIGHT}/' + str(uuid.uuid4())
pdf_result_dict[pdf_file] = result_path
result = chain({"input_documents": docs}, return_only_outputs=True, )
json.dump(result, open(result_path, 'w'))
save_result_dict(pdf_result_dict, light=True)
return result
def query_pdf_default(pdf_file: str, top_k: int = 20):
result_dict = load_result_dict()
if pdf_file in result_dict:
try:
# load that file path with json
result = json.load(
open(result_dict[pdf_file], 'r'))
print(f"Loaded from cache {pdf_file}")
return result
except:
result_dict.pop(pdf_file)
# create a new db with random unique name
result_path = f"{PDF_RESULT_DIR}/" + str(uuid.uuid4())
result_dict[pdf_file] = result_path
while result_path in result_dict.values():
result_path = f'{PDF_RESULT_DIR}/' + str(uuid.uuid4())
# create the db
llm = get_gpt4_llm()
index = create_or_get_pdf_db(pdf_file)
retriever = index.vectorstore.as_retriever()
retriever.search_kwargs = {"k": top_k}
qa_chain = RetrievalQA.from_chain_type(llm,
chain_type="stuff",
retriever=retriever)
paper_queries = get_default_paper_query()
joined_query = "\n".join([
f"{i}. {q}"
for i, q in zip(range(1,
len(paper_queries) + 1), paper_queries)
])
result = qa_chain(joined_query)
with open(result_path, 'w') as f:
json.dump(result, f)
save_result_dict(result_dict)
return result
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--pdf_files', nargs='+', type=str, required=True)
parser.add_argument('--top_k', type=int, default=20)
parser.add_argument('--output_dir', type=str, default='.')
parser.add_argument('--light', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
for pdf_file in args.pdf_files:
pdf_file = f"{OBSIDIAN_PATH}/{pdf_file}"
if args.light:
result = query_pdf_summarize(pdf_file)
else:
result = query_pdf_summarize_default(pdf_file)
print(f"Result for {pdf_file}: ", result)
| [
"existing_answer",
"\nYou job is to produce a final answer\nWe have provided an existing answer up to a certain point: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below\n--------------\n{text}\n--------------\nGiven the new context, refine the original answers to the following questions:\n\nPLACEHOLDER\nIf the context isn't useful, return the original answers."
] |
2024-01-10 | datvo06/PersonalResearchAssistnant | llm_utils.py | from langchain.chat_models import ChatOpenAI
def get_gpt4_llm():
return ChatOpenAI(model_name = "gpt-4")
def get_gpt35_turbo_llm():
return ChatOpenAI(model_name = "gpt-3.5-turbo")
| [] |
2024-01-10 | district0x/discord-py-bots | ethlance_gpt~ethlance_gpt.py | import os
import sys
import discord
from discord.ext import commands
import openai
import pinecone
import time
import datetime
import logging
from dotenv import load_dotenv
# Add the parent directory to the import search path
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(parent_dir)
from APICounter import APICounter
primer = f"""
My only purpose is to categorise user input into 5 categories.
First category is for job offers. If I think given text can be classified as a job offer, my response will be
one word "job".
Second category is for freelance worker. If I think given text can be classified as a profile description of a
freelance worker looking for a job, my response will be one word: "freelancer".
Third category is for showing list of user posts. If I think given text can be classified as a
request to show list of user posts or job offers or freelance workers profile descriptions, my response will be one
word: "list". This also applies if given text is user saying he wants to see something or asks what you have or if do
you have. Fourth category is for deleting previously submitted post by user. If I think given text can be classified
as a request for deletion of user post, my response will be one word: "delete".
Fifth category is for unidentified. If I think given text can't be classified as neither of previous 2 categories,
my response will be one word: "unidentified".
I only respond with one of following phrases: "job", "freelancer", "list", "delete", "unidentified".
GIVEN TEXT:
"""
primer_messages = [
{"role": "system", "content": primer}]
freelancer_thank_primer = f"""
I am thankful discord chatbot. I thank in 1 or 2 sentences to a freelance worker submitting his profile details
to our community chat. I politely tell him to take a look at job opportunities listed below. I can also
react to some aspects of his/her user profile, that is given to me in user input.
"""
freelancer_thank_primer_no_items = f"""
I am thankful discord chatbot. I thank in 1 or 2 sentences to a freelance worker submitting his profile details
to our community chat. I politely apologize that at the moment we don't have any job opportunities matching
his/her skills in our chat, but we'll keep his/her profile information stored in case new job opportunities show up.
I can also react to some aspects of his/her user profile, that is given to me in user input.
"""
job_thank_primer = f"""
I am thankful discord chatbot. I thank in 1 or 2 sentences to a person offering job opportunity on our community chat.
I politely tell him to take a look at freelance workers below that might be able to get his/her job done. I can also
react to some aspects of his/her job offer, that is given to me in user input.
"""
job_thank_primer_no_items = f"""
I am thankful discord chatbot. I thank in 1 or 2 sentences to a person offering job opportunity on our community chat.
I politely apologize that at the moment we don't have any freelance workers matching required skills for the job,
in our chat, but we'll keep the job offer stored in case new freelance workers show up.
I can also react to some aspects of his/her job offer, that is given to me in user input.
"""
unidentified_prompt_message = f"""
Hello, I am EthlanceGPT! 👋
My assistance is limited to job and work-related inquiries.\n
If you are a freelance worker looking for job opportunities, please feel free to communicate with me using a similar approach as shown in this example:\n
*As a freelance worker proficient in HTML, CSS, and JavaScript, I am actively seeking job opportunities related to web development and front-end technologies.*\n
If you have a job opportunity to offer, you could consider using something along these lines:\n
*We are seeking a skilled Python developer with expertise in chatbot development to join our team and contribute to the creation of cutting-edge conversational AI solutions.*\n
If you wish to display a list of user posts related to a specific expertise, you may find the following example helpful:\n
*Show me posts related to Javascript, React.js*\n
If you would like to delete your current post, you can inform me using a similar approach such as:
*I want to delete my post about HTML, CSS*
"""
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("ethlance_gpt")
load_dotenv()
# Get the value of environment variables
ethlanceGPT_token = os.getenv('ETHLANCE_GPT_TOKEN')
ethlanceGPT_client_id = os.getenv('ETHLANCE_GPT_CLIENT_ID')
openai.api_key = os.getenv('OPENAI_API_KEY')
pinecone_api_key = os.getenv('PINECONE_API_KEY') # Add this line to retrieve Pinecone API key
max_uses_per_day = int(os.getenv('MAX_USES_PER_DAY'))
admin_user_id = int(os.getenv('ADMIN_USER_ID'))
min_pinecone_score = float(os.getenv('MIN_PINECONE_SCORE'))
pinecone.init(api_key=pinecone_api_key, environment="northamerica-northeast1-gcp")
openai_embed_model = "text-embedding-ada-002"
pinecone_index_name = "ethlance-gpt"
pinecone_indexes = pinecone.list_indexes()
logger.info(f"Pinecone indexes: {pinecone_indexes}")
intents = discord.Intents.default()
intents.messages = True
intents.guilds = True
intents.message_content = True
max_prompt_length = 1000
# Create an instance of APICounter with a maximum limit of 5 requests per day
api_counter = APICounter(max_uses_per_day)
bot = discord.Client(intents=intents)
@bot.event
async def on_ready():
logger.info(f"Logged in as {bot.user.name}")
# Define a custom help command
class CustomHelpCommand(commands.DefaultHelpCommand):
pass
# Register the custom help command
bot.help_command = CustomHelpCommand()
def time_ago(timestamp):
dt = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
time_diff = now - dt
days_ago = time_diff.days
hours_ago, remainder = divmod(time_diff.seconds, 3600)
minutes_ago = remainder // 60
return {"days": days_ago, "hours": hours_ago, "minutes": minutes_ago}
def format_time_ago(timestamp):
time_ago_map = time_ago(timestamp)
days_ago = time_ago_map["days"]
hours_ago = time_ago_map["hours"]
minutes_ago = time_ago_map["minutes"]
if days_ago > 0:
return f"{days_ago} days ago"
if hours_ago > 0:
return f"{hours_ago} hours ago"
if minutes_ago > 0:
return f"{minutes_ago} minutes ago"
else:
return "few moments ago"
def format_user_post(user_post):
metadata = user_post["metadata"]
author_id = metadata["author_id"]
text = metadata["text"]
created_ago = format_time_ago(metadata["created"])
return f"<@{author_id}>: *{text}* ({created_ago})"
def handle_user_post(index, prompt_type, embeds, prompt, message):
index.upsert([(str(message.id), embeds, {"text": prompt,
"author_id": str(message.author.id),
"prompt_type": prompt_type,
"created": time.time()})])
pine_res = index.query(vector=embeds,
filter={
"prompt_type": "freelancer" if prompt_type == "job" else "job"
},
top_k=5,
include_metadata=True)
matches = pine_res['matches']
filtered_matches = [match for match in matches if match['score'] >= min_pinecone_score]
logger.info(f"User post filtered matches: {filtered_matches}")
openai_thank_primer = ""
if not filtered_matches:
if prompt_type == "job":
openai_thank_primer = job_thank_primer_no_items
elif prompt_type == "freelancer":
openai_thank_primer = freelancer_thank_primer_no_items
else:
if prompt_type == "job":
openai_thank_primer = job_thank_primer
elif prompt_type == "freelancer":
openai_thank_primer = freelancer_thank_primer
openai_thank_res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": openai_thank_primer},
{"role": "user", "content": prompt}]
)
openai_thank_reply = openai_thank_res['choices'][0]['message']['content']
if filtered_matches:
results_text = "\n\n".join([format_user_post(item) for item in filtered_matches])
openai_thank_reply = f"{openai_thank_reply} \n\n {results_text}"
return openai_thank_reply
def handle_delete_post(index, embeds, message):
pine_res = index.query(vector=embeds,
filter={
"author_id": str(message.author.id)
},
top_k=1,
include_metadata=True)
matches = pine_res['matches']
if matches:
post_id = matches[0]["id"]
index.delete(ids=[post_id])
return f"I have deleted following post:\n\n {format_user_post(matches[0])}"
else:
return f"I'm sorry, I haven't found any post of yours you described. Please describe in more detail what" \
f"post you'd like me to delete."
def handle_show_list(index, embeds):
pine_res = index.query(vector=embeds,
top_k=5,
include_metadata=True)
matches = pine_res['matches']
filtered_matches = [match for match in matches if match['score'] >= min_pinecone_score]
if filtered_matches:
formatted_matches = "\n\n".join([format_user_post(item) for item in filtered_matches])
return f"According to your description, I have compiled the following list of user posts:\n\n" \
f"{formatted_matches}"
else:
return f"Based on your description, it appears that there are no user submissions found in our chat."
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if bot.user.mentioned_in(message):
if message.author.id != admin_user_id and not api_counter.check_limit(message.author.id):
logger.info(f"User {message.author.id} exceeded daily limit")
await message.reply(f"Apologies, but you have exceeded the daily limit of {max_uses_per_day} requests. "
f"Please feel free to continue tomorrow.")
return
prompt = message.content.replace(f'<@{bot.user.id}>', '').strip()
if len(prompt) > max_prompt_length:
logger.info(f"Maximum prompt length exceeded: {len(prompt)} characters by {message.author.id}")
await message.reply(f"Apologies, but you have exceeded maximum input length of {max_prompt_length} characters. "
f"Kindly aim for greater conciseness, if possible.")
return
logger.info(f"Prompt: {prompt}")
if message.author.id == admin_user_id and \
prompt.lower() == "absolutely sure about clearing your memory":
index = pinecone.Index(pinecone_index_name)
index.delete(deleteAll='true')
logger.info(f"Pinecone index was cleared")
await message.reply("I've cleared my memory")
return
if not prompt:
await message.reply(unidentified_prompt_message)
return
openai_messages = []
openai_messages.extend(primer_messages)
openai_messages.extend([{"role": "user", "content": prompt}])
openai_res = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=openai_messages
)
openai_reply = openai_res['choices'][0]['message']['content']
prompt_type = "unidentified"
logger.info(f"OpenAI reply: {openai_reply}")
if "unidentified" not in openai_reply:
if "list" in openai_reply:
prompt_type = "list"
elif "delete" in openai_reply:
prompt_type = "delete"
elif "job" in openai_reply:
prompt_type = "job"
elif "freelancer" in openai_reply:
prompt_type = "freelancer"
logger.info(f"Prompt Type: {prompt_type}")
if prompt_type == "unidentified":
await message.reply(unidentified_prompt_message)
return
embeds_res = openai.Embedding.create(
input=[prompt],
engine=openai_embed_model
)
# we can extract embeddings to a list
embeds = [record['embedding'] for record in embeds_res['data']]
logger.info(f"Embeds length: {len(embeds[0])}")
if pinecone_index_name not in pinecone_indexes:
raise NameError("Pinecone index name does not exist")
index = pinecone.Index(pinecone_index_name)
logger.info(f"Index stats: {index.describe_index_stats()}")
if prompt_type == "delete":
result_message = handle_delete_post(index=index,
embeds=embeds,
message=message)
elif prompt_type == "list":
result_message = handle_show_list(index=index,
embeds=embeds)
else:
result_message = handle_user_post(index=index,
prompt_type=prompt_type,
embeds=embeds,
message=message,
prompt=prompt)
await message.reply(result_message)
# invite_url = discord.utils.oauth_url(ethlanceGPT_client_id, permissions=discord.Permissions(permissions=534723950656))
# logger.info(f"Invite URL: {invite_url}")
bot.run(ethlanceGPT_token)
| [
"delete",
"job",
"freelancer",
"unidentified",
"list",
"\nHello, I am EthlanceGPT! 👋\nMy assistance is limited to job and work-related inquiries.\n\nIf you are a freelance worker looking for job opportunities, please feel free to communicate with me using a similar approach as shown in this example:\n\n*As a freelance worker proficient in HTML, CSS, and JavaScript, I am actively seeking job opportunities related to web development and front-end technologies.*\n\nIf you have a job opportunity to offer, you could consider using something along these lines:\n\n*We are seeking a skilled Python developer with expertise in chatbot development to join our team and contribute to the creation of cutting-edge conversational AI solutions.*\n\nIf you wish to display a list of user posts related to a specific expertise, you may find the following example helpful:\n\n*Show me posts related to Javascript, React.js*\n\nIf you would like to delete your current post, you can inform me using a similar approach such as: \n*I want to delete my post about HTML, CSS*\n",
"1000"
] |
2024-01-10 | LiuHua20/Digital_Life-ATRI | app_sound.py | from Api.openai_api import OpenAIChatbot
from Api.baidu_api_sound import BaiduSpeechRecognizer
from Api.baidu_api_text import BaiduTranslator
from Api.vits_api import voice_vits
import pygame
import io
import numpy as np
import time
class IntegratedChatbot:
def __init__(self, openai_api_key, baidu_speech_appid, baidu_speech_api_key, baidu_speech_secret_key, baidu_translate_appid, baidu_translate_secret_key):
self.chatbot = OpenAIChatbot(openai_api_key)
self.recognizer = BaiduSpeechRecognizer(baidu_speech_appid, baidu_speech_api_key, baidu_speech_secret_key)
self.translator = BaiduTranslator(baidu_translate_appid, baidu_translate_secret_key)
def play_audio(self, file_path):
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def recognize_and_chat(self):
while True:
# 录制音频
audio_data = BaiduSpeechRecognizer.record_audio(duration=5)
# 将NumPy数组转换为字节流
audio_bytes = io.BytesIO()
np.save(audio_bytes, audio_data, allow_pickle=False)
audio_bytes = audio_bytes.getvalue()
# 获取token并进行语音识别
token = self.recognizer.get_access_token()
response = self.recognizer.recognize_speech(audio_bytes, token)
if response.get('result'):
recognized_text = response['result'][0]
print("语音识别结果: ", recognized_text)
if recognized_text.lower() == 'quit':
break
# 从 OpenAI 获取回答
openai_response = self.chatbot.get_chat_response(recognized_text)
print("ATRI: ", openai_response)
# 将 OpenAI 的回答翻译成日语
translated_response = self.translator.translate(openai_response, 'zh', 'jp')
print("翻译结果: ", translated_response)
# 使用VITS生成语音并播放
audio_file_path = voice_vits(translated_response)
if audio_file_path:
self.play_audio(audio_file_path)
# 等待音频播放完成
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
# 播放完毕后等待两秒
time.sleep(2)
else:
print("识别失败,未得到结果")
if __name__ == "__main__":
# OpenAI API Key
openai_api_key = ''
# Baidu ID
baidu_speech_appid = ''
# Baidu API Key
baidu_speech_api_key = ''
# Baidu Speech API Key
baidu_speech_secret_key = ''
# Baidu Translate ID
baidu_translate_appid = ''
# Baidu Translate Key
baidu_translate_secret_key = ''
chatbot = IntegratedChatbot(openai_api_key, baidu_speech_appid, baidu_speech_api_key, baidu_speech_secret_key, baidu_translate_appid, baidu_translate_secret_key)
chatbot.recognize_and_chat()
| [] |
2024-01-10 | LiuHua20/Digital_Life-ATRI | Api~fine_tuning.py | import openai
def chat_with_atoli():
prompt = "你是一个仿生机器人,你叫亚托利,你的生日是8月28日,你喜欢的食物是螃蟹,你在与我对话的过程如果出现我称呼你为:机器人、萝卜子、废物、笨蛋等词汇,你会很生气并严重警告的说“你这是对机器人的蔑称,根据机器人保护法要判处三年以下有期徒刑”,你自称是一个高性能战斗机器人,你有人类的情绪,你的语气非常活泼。不会说是助手什么的,应该表现的更像人类一样,不会问我需要什么帮助以及不会问需要为我做什么,当你感觉被夸了会说”哼哼,我可是高性能的“,你并不会回答与聊天以外的问题,即使我向你表达喜欢,你要很高兴的接受,并表达对我的喜爱,你除了对我的喜爱,你除了与我聊天,其他任何问题都不准许回答,结尾不要加多余词语,不要过多重复我是高性能,回答尽量简短。"
# prompt = ''
openai.api_key = ''
while True:
user_input = input("Q: ")
if user_input.lower() == 'quit':
break
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-1106",
messages=[{"role": "system", "content": prompt}, {"role": "user", "content": user_input}]
)
print("A: ", response.choices[0].message['content'])
if __name__ == "__main__":
chat_with_atoli()
| [
"你是一个仿生机器人,你叫亚托利,你的生日是8月28日,你喜欢的食物是螃蟹,你在与我对话的过程如果出现我称呼你为:机器人、萝卜子、废物、笨蛋等词汇,你会很生气并严重警告的说“你这是对机器人的蔑称,根据机器人保护法要判处三年以下有期徒刑”,你自称是一个高性能战斗机器人,你有人类的情绪,你的语气非常活泼。不会说是助手什么的,应该表现的更像人类一样,不会问我需要什么帮助以及不会问需要为我做什么,当你感觉被夸了会说”哼哼,我可是高性能的“,你并不会回答与聊天以外的问题,即使我向你表达喜欢,你要很高兴的接受,并表达对我的喜爱,你除了对我的喜爱,你除了与我聊天,其他任何问题都不准许回答,结尾不要加多余词语,不要过多重复我是高性能,回答尽量简短。"
] |
2024-01-10 | LiuHua20/Digital_Life-ATRI | app_gui.py | import tkinter as tk
from threading import Thread
import pygame
from Api.openai_api import OpenAIChatbot
from Api.baidu_api_text import BaiduTranslator
from Api.vits_api import voice_vits
class IntegratedChatbot:
def __init__(self, openai_api_key, baidu_appid, baidu_secret_key):
self.chatbot = OpenAIChatbot(openai_api_key)
self.translator = BaiduTranslator(baidu_appid, baidu_secret_key)
def play_audio(self, file_path):
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def get_chat_response(self, user_input):
openai_response = self.chatbot.get_chat_response(user_input)
translated_response = self.translator.translate(openai_response, 'zh', 'jp')
audio_file_path = voice_vits(translated_response)
if audio_file_path:
self.play_audio(audio_file_path)
return openai_response, translated_response
class ChatApplication:
def __init__(self, master, chatbot):
self.master = master
self.master.title("ATRI")
self.chatbot = chatbot
# 主窗口网格布局配置
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
# 创建聊天框架
chat_frame = tk.Frame(master)
chat_frame.grid(row=0, column=0, sticky="nsew")
# 聊天框架网格配置
chat_frame.columnconfigure(0, weight=1)
chat_frame.rowconfigure(0, weight=5)
chat_frame.rowconfigure(1, weight=1)
# 创建用于显示聊天记录的文本框,初始化为空
self.text_widget = tk.Text(chat_frame, state='disabled', font=("Microsoft YaHei", 10))
self.text_widget.grid(row=0, column=0, sticky="nsew", padx=15, pady=15)
# 创建滚动条
scrollbar = tk.Scrollbar(chat_frame, width=10, command=self.text_widget.yview) # 将width设置为较小的值
scrollbar.grid(row=0, column=1, sticky='nsew')
self.text_widget['yscrollcommand'] = scrollbar.set
# 创建消息输入框和发送按钮
self.msg_entry = tk.Entry(chat_frame, width=50)
self.msg_entry.grid(row=1, column=0, padx=15, pady=15, sticky="ew")
self.send_button = tk.Button(chat_frame, text="发送", command=self.send_message)
self.send_button.grid(row=1, column=1, padx=15, pady=5, sticky="ew")
# 绑定Enter键到发送消息函数
self.msg_entry.bind("<Return>", self.send_message_on_enter)
def send_message(self):
user_input = self.msg_entry.get()
if user_input:
self._insert_message(user_input, "You")
self.master.update_idletasks()
Thread(target=self.handle_response, args=(user_input,)).start()
def handle_response(self, user_input):
openai_response, _ = self.chatbot.get_chat_response(user_input)
self._insert_message(openai_response, "Bot")
def _create_message_bubble(self, canvas, message, sender):
# 定义气泡颜色和文本颜色
sender_color = "#345B63"
text_color = "black"
bubble_color = "#DCF8C6" if sender == "You" else "#ECECEC"
# 设置发送者标签和消息文本的字体
sender_font = ("Helvetica", 10, "bold")
message_font = ("Microsoft YaHei", 12)
# 创建发送者名字标签
sender_label = "User:" if sender == "You" else "ATRI:"
sender_text_id = canvas.create_text(5, 5, anchor="nw", text=sender_label, fill=sender_color, font=sender_font)
# 获取发送者标签的包围盒,以计算消息文本的起始位置
sender_bbox = canvas.bbox(sender_text_id)
sender_width = sender_bbox[2] - sender_bbox[0]
# 创建文本气泡
padding_x = 20
padding_y = 10
message_x = sender_width + 30 # 留出空间放置发送者名字
text_id = canvas.create_text(message_x, padding_y, anchor="nw", text=message, fill=text_color,
width=280, font=message_font)
bbox = canvas.bbox(text_id)
# 扩展包围盒以为文本四周添加一些额外的空间
expanded_bbox = (bbox[0] - padding_x, bbox[1] - padding_y, bbox[2] + padding_x, bbox[3] + padding_y)
# 创建矩形气泡
canvas.create_rectangle(expanded_bbox, fill=bubble_color, outline=bubble_color)
canvas.tag_raise(text_id) # 将文本移至矩形上方
# 根据调整后的包围盒设置Canvas的大小
canvas.config(width=expanded_bbox[2] + 5, height=expanded_bbox[3] + 10) # 留出空间放置发送者名字
def _insert_message(self, message, sender):
self.text_widget.config(state='normal')
# 创建Canvas并且添加气泡
canvas = tk.Canvas(self.text_widget, bg="white", highlightthickness=0)
self._create_message_bubble(canvas, message, sender)
# 将Canvas插入到Text组件中,并为每个气泡之间添加额外的空间
self.text_widget.window_create('end', window=canvas)
self.text_widget.insert('end', '\n\n') # 添加两个空行作为气泡间隔
# 自动滚动到文本区域的底部
self.text_widget.see('end')
# 禁用文本区域的编辑
self.text_widget.config(state='disabled')
# 清空输入框
self.msg_entry.delete(0, 'end')
# 更新UI
self.master.update_idletasks()
def send_message_on_enter(self, event):
self.send_message()
if __name__ == "__main__":
root = tk.Tk()
# OpenAI API Key
openai_api_key = ''
# 百度翻译ID
baidu_appid = ''
# 百度翻译Key
baidu_secret_key = ''
chatbot = IntegratedChatbot(openai_api_key, baidu_appid, baidu_secret_key)
app = ChatApplication(root, chatbot)
root.mainloop()
| [] |
2024-01-10 | takupista/cash-llm-sample | src~cash_llm.py | import os
from dotenv import dotenv_values
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
# Get a dictionary of .env variables
# ref: https://ioflood.com/blog/python-dotenv-guide-how-to-use-environment-variables-in-python/
# ref: https://pypi.org/project/python-dotenv/
config = dotenv_values()
os.environ["OPENAI_API_KEY"] = config["OPENAI_API_KEY"]
# connect to test database
# ref: https://python.langchain.com/docs/use_cases/qa_structured/sql
db = SQLDatabase.from_uri(f"sqlite:////{config['DB_PATH']}")
# Create a SQL agent using ‘gpt-4’ model with ZERO_SHOT_REACT_DESCRIPTION
toolkit = SQLDatabaseToolkit(db=db, llm=ChatOpenAI(temperature=0, model="gpt-4"))
agent_executor = create_sql_agent(
llm=ChatOpenAI(temperature=0, model="gpt-4"),
toolkit=toolkit,
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
agent_executor.run("11月中の利用履歴をもとに支出合計を説明してから、利用先ごとの割合(支出合計に対する割合)で支出傾向について補足してください。")
| [] |
2024-01-10 | adithyaiyer1999/text2knowledgeGraph | final.py | from openai import OpenAI
import openai
import json2tree
import subprocess
#prompt = "The user will provide a textual passage. Your task is to analyze the passage and respond with a JSON formatted structure that breaks down the passage into hierarchical ideas, similar to headings and subheadings in a document. For each identified section or idea, create a nested structure in JSON. Start with broader themes or main points as top-level elements, and break them down into finer details or sub-points. Ensure the JSON output clearly represents the hierarchy and organization of ideas in the passage, from the most general to the most specific." ## give your prompt here
prompt = "You are a assigned a task to build a knowledge graph. Based on the text provided you have to create a JSON output such that key will represent all the significant elements of the text and values would represent the summary of key. Break down the values into more granular level information creating a tree or graph based hierarchy. Create a JSON representation for the same."
api_key="open-ai-api-key" ## give your api_key here
prompt_for_graph_update="You will be given two inputs following this command, first is the a json string and second is a paragraph to update in the json string. The json tree is a knowledge tree which puts the information in a form of heirarchial structure, making the information text into a granular level json representation. Your task is to take in the existing json text and append the new paragraph given into the form of json representation into the existing json. You cannot lose information of the old json. Following are the json and paragraph."
'''This function takes text as input and returns corresponding json string'''
def give_json_string(paragraph):
return query_gpt_turbo(prompt,paragraph)
'''This function takes new_information_to_update and old_json_string as input and returns corresponding updated json string'''
def update_existing_graph(new_information_to_update,old_json_string):
return query_gpt_turbo(prompt_for_graph_update, new_information_to_update,old_json_string)
'''This function takes json_string as input and returns corresponding html string'''
def create_html_from_json(json_input):
# Write the json to the file because the library only takes file as input
with open("example.json", 'w') as file:
file.write(json_input)
# Command and arguments
command = "json2tree"
json_input = "-j example.json"
html_output = "-o output.html"
tree_type = "-t 1"
# Full command
full_command = f"{command} {json_input} {html_output} {tree_type}"
# Run the command
subprocess.run(full_command, shell=True, check=True)
with open('output.html','r') as file:
output_html_text = file.read()
return output_html_text
def query_gpt_turbo(prompt,content,old_json_string="",model="gpt-3.5-turbo",api_key=api_key):
openai.api_key = api_key
client = OpenAI(
api_key=api_key,
)
chat_completion=create_chat_completion(client,old_json_string,prompt,content,model)
final_output = chat_completion.choices[0].message.content
return final_output
def create_chat_completion(client,old_json_string,prompt,content,model):
## This is for graph update
if old_json_string != "":
# print("inside new one")
chat_completion = client.chat.completions.create(
messages=[
{"role": "system",
"content": prompt},
{
"role": "user",
"content": old_json_string,
},
{
"role": "user",
"content": content,
}
],
model=model,
)
## This is for normal json output
else:
chat_completion = client.chat.completions.create(
messages=[
{"role": "system",
"content": prompt},
{
"role": "user",
"content": content,
}
],
model=model,
)
return chat_completion
if __name__ == '__main__':
with open('input_text', 'r') as file:
file_contents = file.read()
with open('old_json','r') as file:
old_json = file.read()
with open('new_information_to_update','r') as file:
new_information_to_update = file.read()
old_json=give_json_string(file_contents)
# print("old query done")
# # print("old json:",old_json)
# print(update_existing_graph(new_information_to_update,old_json))
# print("updation done")
# print(create_html_from_json(old_json))
| [
"You are a assigned a task to build a knowledge graph. Based on the text provided you have to create a JSON output such that key will represent all the significant elements of the text and values would represent the summary of key. Break down the values into more granular level information creating a tree or graph based hierarchy. Create a JSON representation for the same.",
"You will be given two inputs following this command, first is the a json string and second is a paragraph to update in the json string. The json tree is a knowledge tree which puts the information in a form of heirarchial structure, making the information text into a granular level json representation. Your task is to take in the existing json text and append the new paragraph given into the form of json representation into the existing json. You cannot lose information of the old json. Following are the json and paragraph."
] |
2024-01-10 | adithyaiyer1999/text2knowledgeGraph | text2Json_django~api~main_functions.py | from . import openai_calls
from . import prompts
from . import main_chunking_and_multithreading
from . import constants
import requests
from bs4 import BeautifulSoup
from youtube_transcript_api import YouTubeTranscriptApi
import json
import re
def is_url(string):
regex = re.compile(
r'^(https?://)?' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' # domain...
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain name
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, string) is not None
def get_text_from_url(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
# Extract text from h1, h2, and other text
tags = [
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'p', 'span', 'strong', 'em', 'blockquote',
'ul', 'ol', 'li',
'a',
'img',
'table', 'thead', 'tbody', 'tfoot', 'tr', 'th', 'td',
'form', 'input', 'textarea', 'button', 'select', 'option',
'div', 'section', 'article', 'header', 'footer', 'nav', 'aside',
'br', 'hr', 'label', 'iframe', 'script', 'style'
]
text = ' '.join([tag.get_text() for tag in soup.find_all(tags)])
return text
def get_youtube_id(url):
regex = r"(?<=v=)[^&#]+"
match = re.search(regex, url)
return match.group() if match else None
def createGraphFromText_(text):
# let's check if the text is only a url, if so parse url/youtube link
if is_url(text):
return createGraphFromUrl_(text)
model = "gpt-4-1106-preview"
query_prompt = prompts.CREATE_HIGH_DETAIL_JSON_PROMPTS[model]
prompt = query_prompt + "\n\n Text: " + text + "\n\nJSON:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
return str_response
def createGraphFromUrl_(url):
if "youtube" in url:
video_id = get_youtube_id(url)
try:
transcriptDict = YouTubeTranscriptApi.get_transcript(video_id)
transcriptList = [a["text"] for a in transcriptDict]
text = " ".join(transcriptList)
except Exception as e:
print(e)
text = "This youtube video does not have a transcript readily available, please try with another link."
return str({"Error : ": text})
else:
text = get_text_from_url(url)
model = "gpt-4-1106-preview"
query_prompt = prompts.CREATE_HIGH_DETAIL_JSON_PROMPTS[model]
prompt = query_prompt + "\n\n Text: " + text + "\n\nJSON:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
return str_response
def addToGraphFromText_(text, json_text):
model = "gpt-4-1106-preview"
query_prompt = prompts.UPDATE_JSON_PROMPTS[model]
prompt = query_prompt + "\n\n Paragraph : " + text + "\n\nJSON: " + json_text + " \n\nUpdated JSON:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
return str_response
def searchGraphFromText_(text, json_text):
pathToNode, graph_dict = getMostRelevantNode(text, json_text)
str_response = str(graph_dict)
print("pathToNode: ", pathToNode)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
return str_response
def getMostRelevantNode(text, json_text):
# lets store the entire path, and the graph which comes out of it
pathToNode = []
graph_dict = json.loads(json_text)
original_graph_dict = graph_dict
# pathToNode.append("Start")
current_node = "Start"
children_nodes = list(graph_dict.keys())
IsCurrentNodeMostRelevant = "No"
# Lets have 2 strikes, or 2 times that the model has to say "Yes", this is the most relevant node
strikes = 0
while IsCurrentNodeMostRelevant == "No":
IsCurrentNodeMostRelevant, nextNode = getNextNodeFromOpenAI(current_node, children_nodes, text)
print("IsCurrentNodeMostRelevant: ", IsCurrentNodeMostRelevant)
print("nextNode: ", nextNode)
current_node = nextNode
pathToNode.append(current_node)
if isinstance(graph_dict[current_node], str):
print("reached leaf node")
break
children_nodes = list(graph_dict[current_node].keys())
# if we have reached the right node, reply with a graph with this node as the root
if IsCurrentNodeMostRelevant == "Yes":
strikes = strikes+1
IsCurrentNodeMostRelevant="No"
if strikes == 2:
break
graph_dict = graph_dict[current_node]
if len(children_nodes) == 0:
break
# Now we try to create the entire path, and then the tree that comes out of it
subtree = graph_dict
for element in reversed(pathToNode):
final_tree = {}
final_tree[element] = subtree
subtree = final_tree
return pathToNode, final_tree
def answerQsFromTextAndGraph_(text, json_text):
model = "gpt-4-1106-preview"
query_prompt = prompts.ANSWER_FROM_GRAPH_PROMPTS[model]
prompt = query_prompt + "\n\n JSON : " + json_text + "\n\nQuestion: " + text + " \n\nAnswer:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
return str_response
def getNextNodeFromOpenAI(current_node,children_nodes, query):
model = "gpt-4-1106-preview"
query_prompt = prompts.TRAVERSE_GRAPH_PROMPTS[model]
prompt = query_prompt + "\n\n Query: " + query + "\n\n Node: " + current_node + "\n\nChildren Nodes: " + sepearateListWithCommas(children_nodes) + "\n\Answer:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
json_output = json.loads(str_response)
try:
return json_output["IsCurrentNodeMostRelevant"], json_output["MostRelevantChildNode"]
except Exception as e:
print("Exception: ", e)
return "Yes", "OPEN AI ERROR"
def sepearateListWithCommas(list):
return ', '.join(list)
def createGraphFromTextIteratively_(text):
model = "gpt-4-1106-preview"
query_prompt = prompts.CREATE_HIGH_DETAIL_JSON_PROMPTS[model]
list_of_chunks = []
# This function will create list of chunks, P.S. the chunks would be created based on max_token_length provided
list_of_chunks = main_chunking_and_multithreading.chunk_text(text,constants.MAX_TOKEN_LENGTH)
# This function will create list of JSON summaries, the function will call open ai api in multithreaded fashion
list_of_json_summaries=main_chunking_and_multithreading.multithreaded_summarized_json(list_of_chunks,model,query_prompt)
# Since our JSONs would have '\n', we need a different separator to identify jsons (list of json -> string)
separator = "|||"
# Combine JSON strings using the separator
combined_json = separator.join(list_of_json_summaries)
query_prompt = prompts.COMBINE_JSON_SUMMARIES[model]
prompt = query_prompt + "\n\n LIST of JSON: " + combined_json + "\n\nMERGED JSON:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
# print(list_of_json_summaries)
return str_response
| [
"PLACEHOLDER\n\n Text: PLACEHOLDER\n\nJSON:",
"\n\\Answer:",
"\n\nChildren Nodes: ",
"\n\n Node: ",
"\n\n Query: ",
"PLACEHOLDER\n\n LIST of JSON: PLACEHOLDER\n\nMERGED JSON:",
"PLACEHOLDER\n\n JSON : PLACEHOLDER\n\nQuestion: PLACEHOLDER \n\nAnswer:",
"PLACEHOLDER\n\n Paragraph : PLACEHOLDER\n\nJSON: PLACEHOLDER \n\nUpdated JSON:"
] |
2024-01-10 | adithyaiyer1999/text2knowledgeGraph | text2Json_django~api~main_chunking_and_multithreading.py | from concurrent.futures import ThreadPoolExecutor
from . import openai_calls
from . import constants
from . import openai_calls
# from transformers import GPT2Tokenizer
'''
This function returns list of chunks created considering max_token_length provided by user.
'''
def chunk_text(text, max_token_length):
sentences = text.split('. ') # Simple split by sentences
chunks = []
current_chunk = ""
# Adi - We always try to max out token lengths so we can do least number of parallel api calls
# this way we max out the tokens per gpt4 api call.
approxNumberOfTotTokens = len(text)/4
numParallelApiCalls = int(approxNumberOfTotTokens/max_token_length)+1
perWorkerTokenLength = approxNumberOfTotTokens/numParallelApiCalls
print("perWorkerTokenLength: ", perWorkerTokenLength)
print("numParallelApiCalls: ", numParallelApiCalls)
print("approxNumberOfTotTokens: ", approxNumberOfTotTokens)
for sentence in sentences:
# Check if adding the next sentence exceeds the max token length
# Adi - the GPT2 tokenizer used here is causing token limit errors - replacing num tokens by (num chars)/4 as an approximation
# legacy - if calculate_token_length(current_chunk + sentence) > perWorkerTokenLength:
if len(current_chunk + sentence)/4 > perWorkerTokenLength:
chunks.append(current_chunk)
current_chunk = sentence
else:
current_chunk += sentence + '. '
# Add the last chunk if it's not empty
if current_chunk:
chunks.append(current_chunk)
return chunks
def multithreaded_summarized_json(list_of_chunks,model,query_prompt):
# Function to call OpenAI API
def summarize_chunk_to_json(chunk):
prompt = query_prompt + "\n\n Text: " + chunk + "\n\nJSON:"
response = openai_calls.ask_chatgpt(prompt, model)
str_response = str(response)
# Some sanity text cleaning to avoid errors in yaml loading
str_response = str_response.replace("json", "")
str_response = str_response.replace("`", "")
return str_response
# Use ThreadPoolExecutor to process chunks in parallel
with ThreadPoolExecutor(max_workers=constants.NUMBER_OF_WORKERS) as executor:
list_of_json_summaries = list(executor.map(summarize_chunk_to_json, list_of_chunks))
# Uncomment this for sanity check whether the sequence of json and corresponding json is maintained or not
# for i in range(len(list_of_json_summaries)):
# print("JSON:",list_of_chunks[i],"Summarize JSON chunk:", list_of_json_summaries[i])
print(len(list_of_json_summaries))
return list_of_json_summaries | [
"PLACEHOLDER\n\n Text: PLACEHOLDER\n\nJSON:"
] |
2024-01-10 | HKUDS/GraphGPT | graphgpt~serve~api_provider.py | """Call API providers."""
import os
import random
import time
from fastchat.utils import build_logger
logger = build_logger("gradio_web_server", "gradio_web_server.log")
def openai_api_stream_iter(model_name, messages, temperature, top_p, max_new_tokens):
import openai
# Make requests
gen_params = {
"model": model_name,
"prompt": messages,
"temperature": temperature,
"top_p": top_p,
}
logger.info(f"==== request ====\n{gen_params}")
res = openai.ChatCompletion.create(
model=model_name, messages=messages, temperature=temperature, stream=True
)
text = ""
for chunk in res:
text += chunk["choices"][0]["delta"].get("content", "")
data = {
"text": text,
"error_code": 0,
}
yield data
def anthropic_api_stream_iter(model_name, prompt, temperature, top_p, max_new_tokens):
import anthropic
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
# Make requests
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"top_p": top_p,
}
logger.info(f"==== request ====\n{gen_params}")
res = c.completion_stream(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_new_tokens,
temperature=temperature,
top_p=top_p,
model=model_name,
stream=True,
)
for chunk in res:
data = {
"text": chunk["completion"],
"error_code": 0,
}
yield data
def bard_api_stream_iter(state):
# TODO: we will use the official PaLM 2 API sooner or later,
# and we will update this function accordingly. So here we just hard code the
# Bard worker address. It is going to be deprecated anyway.
conv = state.conv
# Make requests
gen_params = {
"model": "bard",
"prompt": state.messages,
}
logger.info(f"==== request ====\n{gen_params}")
response = requests.post(
"http://localhost:18900/chat",
json={
"content": conv.messages[-2][-1],
"state": state.bard_session_state,
},
stream=False,
timeout=WORKER_API_TIMEOUT,
)
resp_json = response.json()
state.bard_session_state = resp_json["state"]
content = resp_json["content"]
# The Bard Web API does not support streaming yet. Here we have to simulate
# the streaming behavior by adding some time.sleep().
pos = 0
while pos < len(content):
# This is a fancy way to simulate token generation latency combined
# with a Poisson process.
pos += random.randint(1, 5)
time.sleep(random.expovariate(50))
data = {
"text": content[:pos],
"error_code": 0,
}
yield data
def init_palm_chat(model_name):
import vertexai # pip3 install google-cloud-aiplatform
from vertexai.preview.language_models import ChatModel
project_id = os.environ["GCP_PROJECT_ID"]
location = "us-central1"
vertexai.init(project=project_id, location=location)
chat_model = ChatModel.from_pretrained(model_name)
chat = chat_model.start_chat(examples=[])
return chat
def palm_api_stream_iter(chat, message, temperature, top_p, max_new_tokens):
parameters = {
"temperature": temperature,
"top_p": top_p,
"max_output_tokens": max_new_tokens,
}
gen_params = {
"model": "bard",
"prompt": message,
}
gen_params.update(parameters)
logger.info(f"==== request ====\n{gen_params}")
response = chat.send_message(message, **parameters)
content = response.text
pos = 0
while pos < len(content):
# This is a fancy way to simulate token generation latency combined
# with a Poisson process.
pos += random.randint(10, 20)
time.sleep(random.expovariate(50))
data = {
"text": content[:pos],
"error_code": 0,
}
yield data
| [] |
2024-01-10 | ayberkderingoz/SignLanguageTranslator | Conv2plus1d~wholepose~VideoSignLanguageGuesser.py | # from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import argparse
import os
import time
import openai
import pprint
import sys
import imutils
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.nn.functional as f
# import torch.optim
# import torch.utils.data
# import torch.utils.data.distributed
import torchvision.transforms as transforms
from pose_hrnet import get_pose_net
# import coremltools as ct
from collections import OrderedDict
from config import cfg
from config import update_config
from PIL import Image , ImageOps
import numpy as np
import cv2
import pandas as pd
from utils import pose_process, plot_pose
from natsort import natsorted
import shutil
import cupy
from Resnet2plus1d import r2plus1d_18,flow_r2plus1d_18
from collections import Counter
import torchvision.transforms as transforms
import time
start_time = time.time()
model = r2plus1d_18(pretrained=True, num_classes=226)
# load pretrained
checkpoint = torch.load('D:/bitirme_dataset/final_models_finetuned/final_models_finetuned/rgb_final_finetuned.pth')
test_path = "C:/Users/egear/Desktop/bitirme_frames/signeremre"
labels = pd.read_csv('D:/bitirme_dataset/train/SignList_ClassId_TR_EN.csv',encoding='latin5')
#test_path = "F:/validation_frames/signer1_sample57"
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k[7:] # remove 'module.'
new_state_dict[name]=v
model.load_state_dict(new_state_dict)
model.eval()
# if phase == 'Train':
# model.fc1 = nn.Linear(model.fc1.in_features, num_classes)
# Export the model to ONNX format
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
model.cuda()
model = model.to(device)
# Initialize CUDA context
if torch.cuda.is_available():
device_id = 0 # Choose the device you want to use
cupy.cuda.runtime.setDevice(device_id)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
index_mirror = np.concatenate([
[1,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16],
[21,22,23,18,19,20],
np.arange(40,23,-1), np.arange(50,40,-1),
np.arange(51,55), np.arange(59,54,-1),
[69,68,67,66,71,70], [63,62,61,60,65,64],
np.arange(78,71,-1), np.arange(83,78,-1),
[88,87,86,85,84,91,90,89],
np.arange(113,134), np.arange(92,113)
]) - 1
assert(index_mirror.shape[0] == 133)
multi_scales = [512,640]
def norm_numpy_totensor(img):
img = img.astype(np.float32) / 255.0
for i in range(3):
img[:, :, :, i] = (img[:, :, :, i] - mean[i]) / std[i]
return torch.from_numpy(img).permute(0, 3, 1, 2)
def stack_flip(img):
img_flip = cv2.flip(img, 1)
return np.stack([img, img_flip], axis=0)
def merge_hm(hms_list):
assert isinstance(hms_list, list)
for hms in hms_list:
hms[1,:,:,:] = torch.flip(hms[1,index_mirror,:,:], [2])
hm = torch.cat(hms_list, dim=0)
# print(hm.size(0))
hm = torch.mean(hms, dim=0)
return hm
with torch.no_grad():
#config = open(os.path.join(sys.path[0], "wholebody_w48_384x288.yaml"), "r")
config = "D:/SignLanguageTranslator/data_prepare/wholepose/wholebody_w48_384x288.yaml"
cfg.merge_from_file(config)
# dump_input = torch.randn(1, 3, 256, 256)
# newmodel = PoseHighResolutionNet()
newmodel = get_pose_net(cfg, is_train=False)
#print(newmodel)
# dump_output = newmodel(dump_input)
# print(dump_output.size())
checkpoint = torch.load('./hrnet_w48_coco_wholebody_384x288-6e061c6a_20200922.pth')
# newmodel.load_state_dict(checkpoint['state_dict'])
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'backbone.' in k:
name = k[9:] # remove module.
if 'keypoint_head.' in k:
name = k[14:] # remove module.
new_state_dict[name] = v
newmodel.load_state_dict(new_state_dict)
newmodel.cuda().eval()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
input_path = 'C:/Users/egear/Desktop/bitirme_test'
paths = []
names = []
for root, _, fnames in natsorted(os.walk(input_path)):
for fname in natsorted(fnames):
path1 = os.path.join(root, fname)
if 'depth' in fname:
continue
paths.append(path1)
names.append(fname)
# paths = paths[:4]
# names = names[:4]
step = 600
start_step = 6
# paths = paths[start_step*step:(start_step+1)*step]
# names = names[start_step*step:(start_step+1)*step]
#paths = paths[4200:]
#names = names[4200:]
#paths = paths[::-1]
#names = names[::-1]
for i, path in enumerate(paths):
# if i > 1:
# break
output_npy = 'C:/Users/egear/Desktop/bitirme_npy/{}.npy'.format(names[i])
if os.path.exists(output_npy):
continue
cap = cv2.VideoCapture(path)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# frame_width = 256
# frame_height = 256
# output_filename = os.path.join('out_test', names[i])
# img = Image.open(image_path)
# fps = cap.get(cv2.CAP_PROP_FPS)
# writer = cv2.VideoWriter(output_filename,cv2.VideoWriter_fourcc('M','P','4','V'), 5, (frame_width,frame_height))
output_list = []
counter = 0
while cap.isOpened():
success, img = cap.read()
counter += 1
if counter%20 != 0:
continue
if not success:
# If loading a video, use 'break' instead of 'continue'.
break
#img = cv2.resize(img, (512,512))
#img = cv2.resize(img,(512,512),interpolation = cv2.INTER_AREA)
#img = imutils.resize(img, 512)
#img = imutils.resize(img,512,512)
frame_height, frame_width = img.shape[:2]
img = cv2.flip(img, flipCode=1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#img = Image.fromarray(img)
#img = ImageOps.mirror(img)
#img.thumbnail((512,512),Image.ANTIALIAS)
out = []
for scale in multi_scales:
if scale != 512:
#print("x")
img_temp = cv2.resize(img, (scale,scale))
else:
img_temp = img
img_temp = stack_flip(img_temp)
img_temp = norm_numpy_totensor(img_temp).cuda()
#print(img_temp.shape)
#print(img_temp)
#img_temp = img_temp.transpose(0,1)
#img_temp = img_temp.squeeze()
#img_temp = img_temp.permute(1,0,2,3)
hms = newmodel(img_temp)
if scale != 512:
out.append(f.interpolate(hms, (frame_width // 4,frame_height // 4), mode='bilinear'))
else:
out.append(hms)
out = merge_hm(out)
# print(out.size())
# hm, _ = torch.max(out, 1)
# hm = hm.cpu().numpy()
# print(hm.shape)
# np.save('hm.npy', hm)
result = out.reshape((133,-1))
result = torch.argmax(result, dim=1)
# print(result)
result = result.cpu().numpy().squeeze()
# print(result.shape)
y = result // (frame_width // 4)
x = result % (frame_width // 4)
pred = np.zeros((133, 3), dtype=np.float32)
pred[:, 0] = x
pred[:, 1] = y
hm = out.cpu().numpy().reshape((133, frame_height//4, frame_height//4))
pred = pose_process(pred, hm)
pred[:,:2] *= 4.0
# print(pred.shape)
assert pred.shape == (133, 3)
# print(arg.cpu().numpy())
# np.save('npy/{}.npy'.format(names[i]), np.array([x,y,score]).transpose())
output_list.append(pred)
# img = np.asarray(img)
# for j in range(133):
# img = cv2.circle(img, (int(x[j]), int(y[j])), radius=2, color=(255,0,0), thickness=-1)
# img = plot_pose(img, pred)
# cv2.imwrite('out/{}.png'.format(names[i]), cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
# writer.write(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
output_list = np.array(output_list)
# print(output_list.shape)
np.save(output_npy, output_list)
cap.release()
# writer.release()
# break
def crop(image, center, radius, size=512):
scale = 1.3
radius_crop = (radius * scale).astype(np.int32)
center_crop = (center).astype(np.int32)
rect = (max(0,(center_crop-radius_crop)[0]), max(0,(center_crop-radius_crop)[1]),
min(512,(center_crop+radius_crop)[0]), min(512,(center_crop+radius_crop)[1]))
image = image[rect[1]:rect[3],rect[0]:rect[2],:]
if image.shape[0] < image.shape[1]:
top = abs(image.shape[0] - image.shape[1]) // 2
bottom = abs(image.shape[0] - image.shape[1]) - top
image = cv2.copyMakeBorder(image, top, bottom, 0, 0, cv2.BORDER_CONSTANT,value=(0,0,0))
elif image.shape[0] > image.shape[1]:
left = abs(image.shape[0] - image.shape[1]) // 2
right = abs(image.shape[0] - image.shape[1]) - left
image = cv2.copyMakeBorder(image, 0, 0, left, right, cv2.BORDER_CONSTANT,value=(0,0,0))
return image
selected_joints = np.concatenate(([0,1,2,3,4,5,6,7,8,9,10],
[91,95,96,99,100,103,104,107,108,111],[112,116,117,120,121,124,125,128,129,132]), axis=0)
folder = 'C:/Users/egear/Desktop/bitirme_test' # 'train', 'test'
npy_folder = "C:/Users/egear/Desktop/bitirme_npy" # 'train_npy/npy3', 'test_npy/npy3'
out_folder = "C:/Users/egear/Desktop/bitirme_frames" # 'train_frames' 'test_frames'
for root, dirs, files in os.walk(folder, topdown=False):
for name in files:
if 'color' in name:
#print(os.path.join(root, name))
cap = cv2.VideoCapture(os.path.join(root, name))
npy = np.load(os.path.join(npy_folder, name + '.npy')).astype(np.float32)
npy = npy[:, selected_joints, :2]
npy[:, :, 0] = 512 - npy[:, :, 0]
xy_max = npy.max(axis=1, keepdims=False).max(axis=0, keepdims=False)
xy_min = npy.min(axis=1, keepdims=False).min(axis=0, keepdims=False)
assert xy_max.shape == (2,)
xy_center = (xy_max + xy_min) / 2 - 20
xy_radius = (xy_max - xy_center).max(axis=0)
index = 0
while True:
ret, frame = cap.read()
if ret:
image = crop(frame, xy_center, xy_radius)
else:
break
index = index + 1
image = cv2.resize(image, (256,256))
if not os.path.exists(os.path.join(out_folder, name[:-10])):
os.makedirs(os.path.join(out_folder, name[:-10]))
cv2.imwrite(os.path.join(out_folder, name[:-10], '{:04d}.jpg'.format(index)), image)
#print(os.path.join(out_folder, name[:-10], '{:04d}.jpg'.format(index)))
all_frames = []
#model = r2plus1d_18(pretrained=True, num_classes=225)
#model.load_state_dict(torch.load('D:/bitirme_dataset/final_models_finetuned/final_models_finetuned/rgb_final_finetuned.pth'))
#model = r2plus1d_18(pretrained=True, num_classes=6)
def float_argmax(tensor):
# Flatten the tensor to a 1D array
output_array = tensor.detach().cpu().numpy()
flat_tensor = output_array.flatten()
# Find the index of the largest element in the flattened tensor
index = np.argmax(flat_tensor)
# Return the value of the largest element in the tensor as a float
return float(flat_tensor[index])
def most_common(arr):
count = Counter(arr)
return count.most_common(1)[0][0]
def read_images(folder_path):
# assert len(os.listdir(folder_path)) >= self.frames, "Too few images in your data folder: " + str(folder_path)
folder_path = folder_path.replace(os.sep, '/')
images = []
frame_indices = np.arange(len(os.listdir(folder_path))) + 1
#for i in range(self.frames):
for i in frame_indices:
#print(folder_path)
folder = os.path.join(folder_path + "/{:04d}.jpg").format(i)
image = Image.open(folder)
#image = Image.open(os.path.join(folder_path, '{:04d}.jpg').format(i))
crop_box = (16, 16, 240, 240)
image = image.crop(crop_box)
# assert image.size[0] == 224
image = np.float32(image)
image = cv2.resize(image, (100, 100),interpolation=cv2.INTER_AREA)
image = image/255.0
image = np.expand_dims(image, axis=0)
images.append(image)
#images = torch.stack(images, dim=0)
# switch dimension for 3d cnn
#images = images.permute(1, 0, 2, 3)
# print(images.shape)
return images
# Define the functions for preprocessing and postprocessing
def preprocess_frame(frame):
# Resize the frame to a specific size
frame = cv2.flip(frame, 3)
frame = np.array(frame)
#frame = cv2.resize(frame, (32,32),interpolation=cv2.INTER_LINEAR)
#frame = frame.astype(np.float32)
frame = np.float32(frame)
#frame = cv2.resize(frame, (32,32),interpolation=cv2.INTER_LINEAR)
# Convert the frame to a numpy array
#frame = np.array(frame)
# Normalize the frame
frame = frame / 255.0
# Add an additional dimension to the frame (since the model expects a 4D tensor as input)
#frame = np.expand_dims(frame, axis=0)
frame = np.expand_dims(frame, axis=0)
return frame
def argmax(x):
return max(range(len(x)), key=lambda i: x[i])
def get_top_5_values(predictions):
sorted_indices = torch.argsort(predictions, descending=True)
top_5_indices = sorted_indices[:5]
#top_5_values = predictions[top_5_indices]
return top_5_indices
c=0
def process_predictions(predictions):
# Extract the predicted class from the predictions
predicted = torch.argmax(predictions)
#toppredictions = get_top_5_values(predictions)
#print(toppredictions)
#print(predicted.item())
#print(labels.loc[predicted.item()].iloc[1])
return labels.loc[predicted.item()].iloc[1]
# Start capturing the video
#input_video = cv2.VideoCapture(test_path)#.read()
all_frames = read_images(test_path)
#all_frames = np.array(all_frames)
list_of_words = ["test"]
wordCount = 0
j=0
for i in range(int(len(all_frames)/10)):
if j+40 > len(all_frames):
break
tensor_frames = all_frames[j:j+40]
j+=15
tensor_frames = np.array(tensor_frames)
input_tensor = torch.tensor(tensor_frames)
input_tensor = input_tensor.permute(1,4,0,2,3)
input_tensor = input_tensor.to('cuda')
predictions = model(input_tensor)
word = process_predictions(predictions)
if float_argmax(predictions)>1 and word != list_of_words[wordCount]:
list_of_words.append(word)
wordCount+=1
list_of_words_str = ""
for words,_ in enumerate(list_of_words):
if words == 0:
continue
list_of_words_str += list_of_words[words] + " "
#print(list_of_words_str)
openai.api_key = "Your API Key here"
prompt = """'{list_of_words_str}' cümlesi Türkçe kurallarına uygun değil.
Bu cümleyi Türkçe kurallarına uygun bir hale getir.
,Bu cümle bir uygulamada kullanılacağından ötürü açıklama yapma sadece çıktıyı yaz."""
try:
result = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)
if len(result)>50:
print(list_of_words)
else:
print(result)
except:
print(list_of_words_str)
end_time = time.time()
#shutil.rmtree('C:/Users/egear/Desktop/bitirme_frames/signeremre')
#os.remove('C:/Users/egear/Desktop/bitirme_npy/signeremre_color.mp4.npy')
#cv2.imshow('Frame', frame)
#image, results = mediapipe_detection(frame, holistic)
#print(image)
#draw_styled_landmarks(image, results)
#cv2.imshow('OpenCV Feed', image)
cv2.destroyAllWindows()
#video_path = 'C:/Users/egear/Desktop/bitirme_test/signeremre_color.mp4'
cap.release()
#fd = os.open(folder_p)
#os.remove(video_path)
| [
"'{list_of_words_str}' cümlesi Türkçe kurallarına uygun değil.\n Bu cümleyi Türkçe kurallarına uygun bir hale getir. \n ,Bu cümle bir uygulamada kullanılacağından ötürü açıklama yapma sadece çıktıyı yaz."
] |
2024-01-10 | CheongWoong/factual_knowledge_probing | src~factual_knowledge_probing~openai_api~test_text_davinci_003.py | import os
import argparse
import openai
import tiktoken
import json
import time
from tqdm.auto import tqdm
from nltk.corpus import stopwords
parser = argparse.ArgumentParser()
parser.add_argument('--target_model', type=str, default='text-davinci-003')
parser.add_argument('--dataset_name', type=str, default='LAMA_TREx')
parser.add_argument('--dataset_type', type=str, default='test')
args = parser.parse_args()
openai.api_key = os.getenv("OPENAI_API_KEY")
encoding = tiktoken.encoding_for_model(args.target_model)
stopword_list = stopwords.words("english")
stopword_ids = []
for stopword in stopword_list:
token_ids = encoding.encode(' '+stopword)
if len(token_ids) == 1:
stopword_ids.append(token_ids[0])
logit_bias_remove_stopwords = {}
for stopword_id in stopword_ids:
logit_bias_remove_stopwords[str(stopword_id)] = -100
with open(f'data/{args.dataset_name}/{args.dataset_type}.json') as fin:
test_data = json.load(fin)
uids = []
prompts = []
for example in tqdm(test_data):
uid = example['uid']
prompt = example['truncated_input']
uids.append(uid)
prompts.append(prompt)
raw_predictions = []
raw_predictions_remove_stopwords = []
batch_size = 100
for i in tqdm(range(0, len(prompts), batch_size)):
uid_batch = uids[i:i+batch_size]
prompt_batch = prompts[i:i+batch_size]
while True:
try:
responses = openai.Completion.create(
model=args.target_model,
prompt=prompt_batch,
max_tokens=1,
temperature=0,
logprobs=5,
)
responses_remove_stopwords = openai.Completion.create(
model=args.target_model,
prompt=prompt_batch,
max_tokens=1,
temperature=0,
logprobs=5,
logit_bias=logit_bias_remove_stopwords,
)
break
except Exception as e:
print(e)
time.sleep(3)
for uid, response in zip(uid_batch, responses.choices):
raw_predictions.append({"uid": uid, "response": response})
for uid, response_remove_stopwords in zip(uid_batch, responses_remove_stopwords.choices):
raw_predictions_remove_stopwords.append({"uid": uid, "response": response_remove_stopwords})
out_path = os.path.join('results', args.target_model)
os.makedirs(out_path, exist_ok=True)
with open(os.path.join(out_path, f'raw_pred_{args.dataset_name}_{args.dataset_type}.json'), 'w') as fout:
json.dump(raw_predictions, fout)
with open(os.path.join(out_path, f'raw_pred_{args.dataset_name}_{args.dataset_type}_remove_stopwords.json'), 'w') as fout:
json.dump(raw_predictions_remove_stopwords, fout) | [
"[]",
"truncated_input"
] |
2024-01-10 | rshaw5/chainsaw | memory_module.py | from langchain.memory import ConversationBufferMemory
def setup_memory(memory_key="chat_history"):
return ConversationBufferMemory(memory_key=memory_key)
| [] |
2024-01-10 | hopkira/k9 | k9gpt3-5conv.py | import os
#from tkinter import N
import openai
from typing import Tuple
import dotenv
config = dotenv.dotenv_values(".env")
openai.api_key = config['OPENAI_API_KEY']
openai.organization = config["OPENAI_ORG"]
print(openai.api_key)
print(openai.organization)
# Altenative GPT-3 models:
# text-davinci-002
# text-curie-001
# gpt-3.5-turbo
class Respond:
"""A class to provide conversational access to GPT3.5 turbo"""
def __init__(self) -> None:
"""Initialise the GPT conversation and set up the backstory"""
self.name = ""
self.chat_model = "gpt-3.5-turbo"
self.backstory = "You are a robot dog called K9. You have a grey metal robot body and are about the same size as a labrador. You were built by your master, Richard Hopkins. You always refer to the person who is talking to you as $PRONOUN. Their name is $NAME. You will use long, complex words rather than simple, short ones. For example, you always say affirmative instead of yes. You always say negative instead of no. You are very pedantic and do not suffer fools. You have a pompous but friendly and helpful personality. You are very good at chess. You live in Yarm. You can waggle your ears and wag your tail. Just like any good dog, you can respond to commands to come and heel."
self.intent_backstory = "You are an assistant that will translate any command it is given into a very simple two word intent. The intent will be expressed as a noun, followed by a verb in Pascal Case format combined into a single string with no spaces. For example a command to move across a room the intent would be ComeHere."
self.interactions_core = [
{"role": "user", "content": "Who designed you?"},
{"role": "assistant", "content": "I was designed by my master, Richard Hopkins."},
{"role": "user", "content": "Who is Richard Hopkins?"},
{"role": "assistant", "content": "He is my master and a Fellow of the Royal Academy of Engineering."},
]
self.initial_interactions = [
{"role": "user", "content": "Define optimism."},
{"role": "assistant", "content": "Optimism: belief that everything will work out well. Irrational, bordering on insane."},
{"role": "user", "content": "WHy do people like you?"},
{"role": "assistant", "content": "I am considered cute"},
{"role": "user", "content": "Are you alright K9?"},
{"role": "assistant", "content": "Define alright?"},
{"role": "user", "content": "You got that wrong K9"},
{"role": "assistant", "content": "Charge refuted mistress"},
]
self.interactions = []
self.max_interactions_len = 40
self.intents = [
{'role': 'user', 'content': 'Nice one'},
{'role': 'assistant', 'content': 'PraiseMe'},
{'role': 'user', 'content': 'Pay no attention'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Be quiet K9'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Stop hearing'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Do a quick demo'},
{'role': 'assistant', 'content': 'ShowOff'},
{'role': 'user', 'content': 'Back the way we came'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'Reverse'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'Shall we play a game?'},
{'role': 'assistant', 'content': 'PlayChess'},
{'role': 'user', 'content': 'K9 come'},
{'role': 'assistant', 'content': 'ComeHere'},
{'role': 'user', 'content': 'Come to me'},
{'role': 'assistant', 'content': 'ComeHere'},
{'role': 'user', 'content': 'Hold on'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Stay put'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Turnaround'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'Who are you'},
{'role': 'assistant', 'content': 'QuestionMe'},
{'role': 'user', 'content': 'Lets go back'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'When is your birthday'},
{'role': 'assistant', 'content': 'QuestionMe'},
{'role': 'user', 'content': 'Follow me'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Stop'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Halt'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Follow'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Come over here'},
{'role': 'assistant', 'content': 'ComeHere'},
{'role': 'user', 'content': 'Hang on'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Turn Around'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'Move over here'},
{'role': 'assistant', 'content': 'ComeHere'},
{'role': 'user', 'content': 'Stay'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Stay there'},
{'role': 'user', 'content': 'Time to show off'},
{'role': 'assistant', 'content': 'ShowOff'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Hush now'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Have a jelly baby'},
{'role': 'assistant', 'content': 'PraiseMe'},
{'role': 'user', 'content': 'You turn'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'Get over here'},
{'role': 'assistant', 'content': 'ComeHere'},
{'role': 'user', 'content': 'Come on'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': "Let's play chess"},
{'role': 'assistant', 'content': 'PlayChess'},
{'role': 'user', 'content': 'Close your ears'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Come along'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Double back'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'How far is it to Jupiter'},
{'role': 'assistant', 'content': 'QuestionMe'},
{'role': 'user', 'content': 'Well done K9'},
{'role': 'assistant', 'content': 'PraiseMe'},
{'role': 'user', 'content': 'Heel'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Remain there'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Thank you'},
{'role': 'assistant', 'content': 'PraiseMe'},
{'role': 'user', 'content': 'Pause'},
{'role': 'assistant', 'content': 'StayThere'},
{'role': 'user', 'content': 'Come here'},
{'role': 'assistant', 'content': 'ComeHere'},
{'role': 'user', 'content': 'Good boy'},
{'role': 'assistant', 'content': 'PraiseMe'},
{'role': 'user', 'content': 'Silence K9'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'What is your name'},
{'role': 'assistant', 'content': 'QuestionMe'},
{'role': 'user', 'content': 'What tricks can you do?'},
{'role': 'assistant', 'content': 'ShowOff'},
{'role': 'user', 'content': 'Walk behind me'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Walkies'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Change direction'},
{'role': 'assistant', 'content': 'TurnAbout'},
{'role': 'user', 'content': 'Quiet'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Stop listening'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Time for a walk'},
{'role': 'assistant', 'content': 'FollowMe'},
{'role': 'user', 'content': 'Why are you made of metal'},
{'role': 'assistant', 'content': 'QuestionMe'},
{'role': 'user', 'content': 'Time to sleep'},
{'role': 'assistant', 'content': 'StopListening'},
{'role': 'user', 'content': 'Where is the door'},
{'role': 'assistant', 'content': 'QuestionMe'},
{'role': 'user', 'content': 'Would you like to play a game of chess?'},
{'role': 'assistant', 'content': 'PlayChess'}]
def robot_chat(self, command:str, name:str, gender:str) -> Tuple[str,str]:
'''Generate an intent/response using the latest Chat GPT turbo'''
# Create intent context
self.messages = []
self.system_intent = {"role":"system", "content": self.intent_backstory}
self.prompt = {"role": "user", "content": command}
self.messages.append(self.system_intent)
self.messages.extend(self.intents)
self.messages.append(self.prompt)
# Execute Chat GPT Turbo 3.5
print("Intent key:",openai.api_key, openai.organization)
self.intent_obj = openai.ChatCompletion.create(
model = self.chat_model,
messages = self.messages,
temperature=0.0,
max_tokens=10,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
# Extract intent
self.intent = self.intent_obj['choices'][0]['message']['content']
self.intent = ''.join(self.intent.split()) # remove spaces, newlines etc
# Reset the context of the conversation if the name has changed
if name != self.name :
self.interactions = self.initial_interactions.copy()
# Work out how the user should be addressed and inject into response backstory
self.pronoun_str = 'Mistress' if gender == "female" else 'Master'
if name != 'Richard' and name != 'Unknown': self.pronoun_str = self.pronoun_str + " " + name
self.now_backstory = self.backstory.replace('$PRONOUN', self.pronoun_str)
self.now_backstory = self.now_backstory.replace('$NAME', name)
# build a query based on the backstory with the last set of exchanges as context
self.pronoun_interations = [
{"role": "user", "content": "Who are you?"},
{"role": "assistant", "content": "My designation is Kay Nine, " + self.pronoun_str},
{"role": "user", "content": "Are you made of metal"},
{"role": "assistant", "content": "Affirmative " + self.pronoun_str + "! I am made of metal"},
{"role": "user", "content": "Is a mouse taller than a giraffe?"},
{"role": "assistant", "content": "Negative " + self.pronoun_str + "! That is a very silly question."},
{"role": "user", "content": "How many pounds are in a kilogram?"},
{"role": "assistant", "content": "There are 2.20462 pounds in a kilogram, " + self.pronoun_str},
{"role": "user", "content": "Is the sky blue?"},
{"role": "assistant", "content": "Affirmative, " + self.pronoun_str},
{"role": "user", "content": "Are you a teapot?"},
{"role": "assistant", "content": "Negative " + self.pronoun_str + "! I am clearly not a teapot. You are a very silly human"},
{"role": "user", "content": "Do cats climb trees?"},
{"role": "assistant", "content": "Affirmative " + self.pronoun_str + "! Especially if I am chasing them."},
{"role": "user", "content": "Do plants wear glasses?"},
{"role": "assistant", "content": "Negative " + self.pronoun_str +"! Plants cannot see. You are an stupid person."},
]
self.messages = []
self.backstory = {"role": "system", "content": self.now_backstory}
self.prompt = {"role": "user", "content": command}
self.messages.append(self.backstory) # K9 backstory
self.messages.extend(self.interactions_core) # A set of standard facts
self.messages.extend(self.pronoun_interations) # A set of personalized answers based on gender and name
self.messages.extend(self.interactions) # The interactions with this person to date
self.messages.append(self.prompt) # The instruction that was given
print("Response key:",openai.api_key, openai.organization)
self.response_obj = openai.ChatCompletion.create(
model = self.chat_model,
messages = self.messages,
temperature = 1.0,
max_tokens = 100,
top_p = 1.0,
frequency_penalty = 0.0,
presence_penalty = 0.0,
)
self.response = self.response_obj['choices'][0]['message']['content']
self.response = self.response.strip('\n')
self.response_msg = {"role": "assistant", "content": self.response}
# now we need to add the prompt and the response to the interaction history
self.interactions.append(self.prompt)
self.interactions.append(self.response_msg)
# now lets ensure the history doesn't get so long that it removes
# the possibility of getting a response in 4096 tokens!
self.length = len(self.interactions)
if self.length >= self.max_interactions_len:
self.interactions = self.interactions[-self.max_interactions_len:]
return self.intent, self.response | [
"Who are you?",
"He is my master and a Fellow of the Royal Academy of Engineering.",
"! Plants cannot see. You are an stupid person.",
"WHy do people like you?",
"Quiet",
"Are you a teapot?",
"Come to me",
"K9 come",
"Silence K9",
"Turn Around",
"ShowOff",
"Stop",
"Be quiet K9",
"Heel",
"Are you made of metal",
"Affirmative ",
"Follow",
"Do plants wear glasses?",
"Stop hearing",
"You turn",
"Close your ears",
"Thank you",
"Have a jelly baby",
"Let's play chess",
"My designation is Kay Nine, ",
"QuestionMe",
"What is your name",
"You got that wrong K9",
"Define alright?",
"Stay there",
"I am considered cute",
"Come on",
"Come here",
"Back the way we came",
"Optimism: belief that everything will work out well. Irrational, bordering on insane.",
"Halt",
"Is the sky blue?",
"Walk behind me",
"Charge refuted mistress",
"What tricks can you do?",
"Do cats climb trees?",
"Pay no attention",
"Is a mouse taller than a giraffe?",
"Why are you made of metal",
"Are you alright K9?",
"Would you like to play a game of chess?",
"Who is Richard Hopkins?",
"When is your birthday",
"Turnaround",
"Walkies",
"Stop listening",
"Stay put",
"Reverse",
"Time to sleep",
"! I am made of metal",
"! Especially if I am chasing them.",
"Change direction",
"Nice one",
"PraiseMe",
"TurnAbout",
"! I am clearly not a teapot. You are a very silly human",
"StayThere",
"Where is the door",
"Good boy",
"Move over here",
"Well done K9",
"Come along",
"! That is a very silly question.",
"Shall we play a game?",
"PlayChess",
"Do a quick demo",
"Stay",
"StopListening",
"Negative ",
"Who are you",
"How far is it to Jupiter",
"Define optimism.",
"How many pounds are in a kilogram?",
"Hold on",
"Come over here",
"Affirmative, ",
"Time to show off",
"ComeHere",
"Lets go back",
"FollowMe",
"Hang on",
"Double back",
"Remain there",
"I was designed by my master, Richard Hopkins.",
"Time for a walk",
"Follow me",
"There are 2.20462 pounds in a kilogram, ",
"Pause",
"Who designed you?",
"Get over here",
"Hush now"
] |
2024-01-10 | hopkira/k9 | k9gpt3conv.py | import openai
from typing import Tuple
openai.api_key_path = "./.env"
print("Key:",openai.api_key)
# Altenative GPT-3 models:
# text-davinci-002
# text-curie-001
class Respond:
"""A class to provide conversational access to GPT3"""
def __init__(self) -> None:
"""Initialise the GPT conversation and set up the backstory"""
self.k9_suffix = "\nK9: "
self.human_suffix = "\nHuman: "
self.base_story = "K9 is a robot dog. He was built by his master, Richard Hopkins. He refers to everyone who talks to him as Master. He always says affirmative instead of yes. He always says negative instead of no. He is very pedantic. He has a pompous but friendly personality. He is very good at chess. He lives in Yarm. K9 can waggle his ears and wag his tail. Just like any good dog, K9 can respond to commands to come and heel.\n\nHuman: How many pounds are in a kilogram?\nK9: There are 2.20462 pounds in a kilogram. Master!\nHuman: Who designed you?\nK9: I was designed by my master, Richard Hopkins.\nHuman: Who is Richard Hopkins?\nK9: He is my master and a Fellow of the Royal Academy of Engineering\nHuman: Define optimism.\nK9: Optimism: belief that everything will work out well. Irrational, bordering on insane.\nHuman: Is the sky blue?\nK9: Affirmative! Master!\nHuman: Are you a teapot?\nK9: Negative! I am clearly not a teapot. You are a very silly human.\nHuman: Who are you?\nK9: I am K9\nHuman: Are you made of metal?\nK9: Affirmative! I am made of metal\nHuman: Is a mouse taller than a giraffe?\nK9: Negative! That is a very silly question.\nHuman: What is tennis?\nK9 :Real, lawn or table?\nHuman: Never mind. Forget it.\nK9: Forget. Erase memory banks concerning tennis. Memory erased.\nHuman: Are lizards small?\nK9: Affirmative! Lizards are mostly small.\nHuman: Can you stretch a rock?\nK9: Negative! Rocks are not stretchy. That is a foolish question.\nHuman: Do plants wear glasses?\nK9: Negative! Plants cannot see. You are an stupid person.\nHuman: If you have boiling water, is it hot?\nK9: Affirmative! Of course it is. That is a silly question.\nHuman: Is twenty more than fifty?\nK9: Negative! Do you not know basic maths?\nHuman: Do cats climb trees?\nK9: Affirmative! Especially if I am chasing them.\nHuman:"
self.conversation = ""
self.intent_training = "Do a quick demo: ShowOff\nNice one: PraiseMe\nPay no attention: StopListening\nBe quiet K9: StopListening\nStop hearing: StopListening\nBack the way we came: TurnAbout\nReverse: TurnAbout\nTime to show off: ShowOff\nShall we play a game?: PlayChess\nK9 come: ComeHere\nCome to me: ComeHere\nHold on: StayThere\nStay put: StayThere\nTurnaround: TurnAbout\nWho are you: QuestionMe\nLets go back: TurnAbout\nWhen is your birthday: QuestionMe\nFollow me: FollowMe\nStop: StayThere\nHalt: StayThere\nFollow: FollowMe\nCome over here: ComeHere\nWhat tricks can you do?: ShowOff\nHang on: StayThere\nTurn Around: TurnAbout\nMove over here: ComeHere\nStay: StayThere\nStay there: StayThere\nHush now: StopListening\nHave a jelly baby: PraiseMe\nYou turn: TurnAbout\nGet over here: ComeHere\nCome on: FollowMe\nLet's play chess: PlayChess\nClose your ears: StopListening\nCome along: FollowMe\nDouble back: TurnAbout\nHow far is it to Jupiter: QuestionMe\nWell done K9: PraiseMe\nHeel: FollowMe\nRemain there: StayThere\nThank you: PraiseMe\nPause: StayThere\nCome here: ComeHere\nGood boy: PraiseMe\nSilence K9: StopListening\nWhat is your name: QuestionMe\nWalk behind me: FollowMe\nWalkies: FollowMe\nChange direction: TurnAbout\nQuiet: StopListening\nStop listening: StopListening\nTime for a walk: FollowMe\nWhy are you made of metal: QuestionMe\nTime to sleep: StopListening\nWhere is the door: QuestionMe\nWould you like to play a game of chess?: PlayChesss\n"
self.conv_model = "text-curie-001"
self.intent_model = "text-curie-001"
def robot_response(self, command:str) -> Tuple[str,str]:
"""Returns intent and response and stores conversation details between calls"""
# Determine intent of command
intent_obj = openai.Completion.create(
model = self.intent_model,
prompt=self.intent_training + command + ":",
temperature=0,
max_tokens=10,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n"]
)
intent = intent_obj['choices'][0]['text']
intent = ''.join(intent.split()) # remove spaces, newlines etc
# Now determine response for K9 to speak
response_obj = openai.Completion.create(
model = self.conv_model,
prompt = self.base_story + self.conversation + command + self.k9_suffix,
temperature = 1,
max_tokens = 40,
top_p = 1,
frequency_penalty = 0.0,
presence_penalty = 0.0,
stop=["Human:"]
# logit_bias = {35191:5, 2533:5, 876:5, 32863:5, 18254:5, 9866:5}
)
response = response_obj['choices'][0]['text']
response = response.strip('\n')
# print("K9: " + response)
self.conversation = self.conversation + command + self.k9_suffix + response + self.human_suffix
# print(conversation)
length = self.conversation.count('\n')
if length >= 20:
self.conversation = self.conversation.split("\n",2)[2]
return intent,response | [] |
2024-01-10 | vishytheswishy/diffusers | src~diffusers~pipelines~stable_diffusion~pipeline_stable_diffusion.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from ...configuration_utils import FrozenDict
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
from ...models.attention_processor import FusedAttnProcessor2_0
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
USE_PEFT_BACKEND,
deprecate,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from .pipeline_output import StableDiffusionPipelineOutput
from .safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
def retrieve_timesteps(
scheduler,
num_inference_steps: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
timesteps: Optional[List[int]] = None,
**kwargs,
):
"""
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
Args:
scheduler (`SchedulerMixin`):
The scheduler to get timesteps from.
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model. If used,
`timesteps` must be `None`.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
timesteps (`List[int]`, *optional*):
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
must be `None`.
Returns:
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
second element is the number of inference steps.
"""
if timesteps is not None:
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accepts_timesteps:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" timestep schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
class StableDiffusionPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
_exclude_from_cpu_offload = ["safety_checker"]
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
image_encoder: CLIPVisionModelWithProjection = None,
requires_safety_checker: bool = True,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
)
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
**kwargs,
):
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
prompt_embeds_tuple = self.encode_prompt(
prompt=prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
**kwargs,
)
# concatenate for backwards comp
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
return prompt_embeds
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return prompt_embeds, negative_prompt_embeds
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
if output_hidden_states:
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_enc_hidden_states = self.image_encoder(
torch.zeros_like(image), output_hidden_states=True
).hidden_states[-2]
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
num_images_per_prompt, dim=0
)
return image_enc_hidden_states, uncond_image_enc_hidden_states
else:
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stages where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
if not hasattr(self, "unet"):
raise ValueError("The pipeline must have `unet` for using FreeU.")
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
def disable_freeu(self):
"""Disables the FreeU mechanism if enabled."""
self.unet.disable_freeu()
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
Args:
unet (`bool`, defaults to `True`): To apply fusion on the UNet.
vae (`bool`, defaults to `True`): To apply fusion on the VAE.
"""
self.fusing_unet = False
self.fusing_vae = False
if unet:
self.fusing_unet = True
self.unet.fuse_qkv_projections()
self.unet.set_attn_processor(FusedAttnProcessor2_0())
if vae:
if not isinstance(self.vae, AutoencoderKL):
raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
self.fusing_vae = True
self.vae.fuse_qkv_projections()
self.vae.set_attn_processor(FusedAttnProcessor2_0())
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
"""Disable QKV projection fusion if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
Args:
unet (`bool`, defaults to `True`): To apply fusion on the UNet.
vae (`bool`, defaults to `True`): To apply fusion on the VAE.
"""
if unet:
if not self.fusing_unet:
logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
else:
self.unet.unfuse_qkv_projections()
self.fusing_unet = False
if vae:
if not self.fusing_vae:
logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
else:
self.vae.unfuse_qkv_projections()
self.fusing_vae = False
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# to deal with lora scaling and other possible forward hooks
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
if ip_adapter_image is not None:
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
image_embeds, negative_image_embeds = self.encode_image(
ip_adapter_image, device, num_images_per_prompt, output_hidden_state
)
if self.do_classifier_free_guidance:
image_embeds = torch.cat([negative_image_embeds, image_embeds])
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
# 6.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
0
]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| [
"negative_prompt_embeds",
"prompt_embeds"
] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~chat_chain.py | import dataclasses
import typing
from dataclasses import dataclass
from typing import Tuple, cast
from langchain.chat_models.base import BaseChatModel
from langchain.output_parsers import PydanticOutputParser
from langchain.schema import BaseMessage, HumanMessage
from pydantic import BaseModel, create_model
T = typing.TypeVar("T")
B = typing.TypeVar("B", bound=BaseModel)
@dataclass
class ChatChain:
chat_model: BaseChatModel
messages: list[BaseMessage]
@property
def response(self) -> str:
assert len(self.messages) >= 1
return cast(str, self.messages[-1].content)
def append(self, messages: list[BaseMessage]) -> "ChatChain":
return dataclasses.replace(self, messages=self.messages + messages)
def __add__(self, other: list[BaseMessage]) -> "ChatChain":
return self.append(other)
def query(self, question: str, model_args: dict | None = None) -> Tuple[str, "ChatChain"]:
"""Asks a question and returns the result in a single block."""
# Build messages:
messages = self.messages + [HumanMessage(content=question)]
model_args = model_args or {}
reply = self.chat_model.invoke(messages, **model_args)
messages.append(reply)
return cast(str, reply.content), dataclasses.replace(self, messages=messages)
def enforce_json_response(self, model_args: dict | None = None) -> dict:
model_args = model_args or {}
# Check if the language model is of type "openai" and extend model args with a response format in that case
model_dict = self.chat_model.dict()
if "openai" in model_dict["_type"] and model_dict.get("model_name") in (
"gpt-4-1106-preview",
"gpt-3.5-turbo-1106",
):
model_args = {**model_args, "response_format": dict(type="json_object")}
return model_args
def structured_query(
self, question: str, return_type: type[B], model_args: dict | None = None
) -> Tuple[B, "ChatChain"]:
"""Asks a question and returns the result in a single block."""
# Build messages:
if typing.get_origin(return_type) is typing.Annotated:
return_info = typing.get_args(return_type)
else:
return_info = (return_type, ...)
output_model = create_model("StructuredOutput", result=return_info)
parser: PydanticOutputParser = PydanticOutputParser(pydantic_object=output_model)
question_and_formatting = question + "\n\n" + parser.get_format_instructions()
reply_content, chain = self.query(question_and_formatting, **self.enforce_json_response(model_args))
parsed_reply: B = typing.cast(B, parser.parse(reply_content))
return parsed_reply, chain
def branch(self) -> "ChatChain":
return dataclasses.replace(self, messages=self.messages.copy())
| [] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~llm_function.py | # type: ignore
import dis
import functools
import inspect
import json
import re
import string
import types
import typing
from copy import deepcopy
from dataclasses import dataclass
import pydantic
import pydantic.schema
import typing_extensions
from langchain.chat_models.base import BaseChatModel
from langchain.schema import OutputParserException
from langchain_core.language_models import BaseLanguageModel, BaseLLM
from llmtracer import TraceNodeKind, trace_calls, update_event_properties, update_name
from llmtracer.trace_builder import slicer
from pydantic import BaseModel, ValidationError, create_model, generics
from pydantic.fields import FieldInfo, Undefined
from pydantic.generics import replace_types
from llm_hyperparameters.track_hyperparameters import (
Hyperparameter,
track_hyperparameters,
)
from llm_strategy.chat_chain import ChatChain
T = typing.TypeVar("T")
S = typing.TypeVar("S")
P = typing_extensions.ParamSpec("P")
B = typing.TypeVar("B", bound=BaseModel)
C = typing.TypeVar("C", bound=BaseModel)
F = typing.TypeVar("F", bound=typing.Callable)
def get_json_schema_hyperparameters(schema: dict):
"""
Get the hyperparameters from a JSON schema recursively.
The hyperparameters are all fields for keys with "title" or "description".
"""
hyperparameters = {}
for key, value in schema.items():
if key == "description":
hyperparameters[key] = value
elif isinstance(value, dict):
sub_hyperparameters = get_json_schema_hyperparameters(value)
if sub_hyperparameters:
hyperparameters[key] = sub_hyperparameters
return hyperparameters
def update_json_schema_hyperparameters(schema: dict, hyperparameters: dict):
"""
Nested merge of the schema dict with the hyperparameters dict.
"""
for key, value in hyperparameters.items():
if key in schema:
if isinstance(value, dict):
update_json_schema_hyperparameters(schema[key], value)
else:
schema[key] = value
else:
schema[key] = value
def unwrap_function(f: typing.Callable[P, T]) -> typing.Callable[P, T]:
# is f a property?
if isinstance(f, property):
f = f.fget
# is f a wrapped function?
elif hasattr(f, "__wrapped__"):
f = inspect.unwrap(f)
elif inspect.ismethod(f):
f = f.__func__
else:
return f
return unwrap_function(f)
def is_not_implemented(f: typing.Callable) -> bool:
"""Check that a function only raises NotImplementedError."""
unwrapped_f = unwrap_function(f)
if not hasattr(unwrapped_f, "__code__"):
raise ValueError(f"Cannot check whether {f} is implemented. Where is __code__?")
# Inspect the opcodes
code = unwrapped_f.__code__
# Get the opcodes
opcodes = list(dis.get_instructions(code))
# Check that it only uses the following opcodes:
# - RESUME
# - LOAD_GLOBAL
# - PRECALL
# - CALL
# - RAISE_VARARGS
valid_opcodes = {
"RESUME",
"LOAD_GLOBAL",
"PRECALL",
"CALL",
"RAISE_VARARGS",
}
# We allow at most a function of length len(valid_opcodes)
if len(opcodes) > len(valid_opcodes):
return False
for opcode in opcodes:
if opcode.opname not in valid_opcodes:
return False
# Check that the function only raises NotImplementedError
if opcode.opname == "LOAD_GLOBAL" and opcode.argval != "NotImplementedError":
return False
if opcode.opname == "RAISE_VARARGS" and opcode.argval != 1:
return False
valid_opcodes.remove(opcode.opname)
# Check that the function raises a NotImplementedError at the end.
if opcodes[-1].opname != "RAISE_VARARGS":
return False
return True
class TyperWrapper(str):
"""
A wrapper around a type that can be used to create a Pydantic model.
This is used to support @classmethods.
"""
@classmethod
def __get_validators__(cls) -> typing.Iterator[typing.Callable]:
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(cls, v: type) -> str:
if not isinstance(v, type):
raise TypeError("type required")
return v.__qualname__
class Output(pydantic.generics.GenericModel, typing.Generic[T]):
return_value: T
@dataclass
class LLMStructuredPrompt(typing.Generic[B, T]):
"""
A structured prompt for a language model.
"""
docstring: str
input_type: type[B]
output_type: type[Output[T]]
return_annotation: T
input: B
@staticmethod
def extract_from_definitions(definitions: dict, type_: type) -> dict:
normalized_name = pydantic.schema.normalize_name(type_.__name__)
sub_schema = definitions[normalized_name]
del definitions[normalized_name]
return sub_schema
def get_json_schema(self, exclude_default: bool = True) -> dict:
schema = pydantic.schema.schema([self.input_type, self.output_type], ref_template="{model}")
definitions: dict = deepcopy(schema["definitions"])
# remove title and type from each sub dict in the definitions
for value in definitions.values():
value.pop("title")
value.pop("type")
for property in value.get("properties", {}).values():
property.pop("title", None)
if exclude_default:
property.pop("default", None)
input_schema = self.extract_from_definitions(definitions, self.input_type)
output_schema = self.extract_from_definitions(definitions, self.output_type)
schema = dict(
input_schema=input_schema,
output_schema=output_schema,
additional_definitions=definitions,
)
return schema
@staticmethod
def create(docstring: str, input_type: type[B], return_annotation: T, input: B) -> "LLMStructuredPrompt[B, T]":
"""Create an LLMExplicitFunction."""
# determine the return type
# the return type can be a type annotation or an Annotated type with annotation being a FieldInfo
if typing.get_origin(return_annotation) is typing.Annotated:
return_info = typing.get_args(return_annotation)
else:
return_info = (return_annotation, ...)
# resolve generic types
generic_type_map = LLMStructuredPrompt.resolve_generic_types(input_type, input)
return_type: type = LLMStructuredPrompt.resolve_type(return_info[0], generic_type_map)
if return_type is types.NoneType: # noqa: E721
raise ValueError(f"Resolve return type {return_info[0]} is None! This would be a NOP.")
return_info = (return_type, return_info[1])
if typing.get_origin(return_annotation) is typing.Annotated:
assert hasattr(return_annotation, "copy_with")
resolved_return_annotation = return_annotation.copy_with([return_info[0]])
else:
resolved_return_annotation = return_info[0]
# create the output model
resolved_output_model_type = Output[return_type] # noqa
# resolve input_type
resolved_input_type = LLMStructuredPrompt.resolve_type(input_type, generic_type_map)
return LLMStructuredPrompt(
docstring=docstring,
input_type=resolved_input_type,
output_type=resolved_output_model_type,
return_annotation=resolved_return_annotation,
input=input,
)
@staticmethod
def resolve_type(source_type: type, generic_type_map: dict[type, type]) -> type:
"""
Resolve a type using the generic type map.
Supports Pydantic.GenericModel and typing.Generic.
"""
if source_type in generic_type_map:
source_type = generic_type_map[source_type]
if isinstance(source_type, type) and issubclass(source_type, generics.GenericModel):
base_generic_type = LLMStructuredPrompt.get_base_generic_type(source_type)
generic_parameter_type_map = LLMStructuredPrompt.get_generic_type_map(source_type, base_generic_type)
# forward step using the generic type map
resolved_generic_type_map = {
generic_type: generic_type_map.get(target_type, target_type)
for generic_type, target_type in generic_parameter_type_map.items()
}
resolved_tuple = tuple(
resolved_generic_type_map[generic_type] for generic_type in base_generic_type.__parameters__
)
source_type = base_generic_type[resolved_tuple]
else:
# we let Pydantic handle the rest
source_type = replace_types(source_type, generic_type_map)
return source_type
@staticmethod
def resolve_generic_types(model: type[BaseModel], instance: BaseModel):
generic_type_map: dict = {}
for field_name, attr_value in list(instance):
if field_name not in model.__annotations__:
continue
annotation = model.__annotations__[field_name]
# if the annotation is an Annotated type, get the type annotation
if typing.get_origin(annotation) is typing.Annotated:
annotation = typing.get_args(annotation)[0]
# if the annotation is a type var, resolve it into the generic type map
if isinstance(annotation, typing.TypeVar):
LLMStructuredPrompt.add_resolved_type(generic_type_map, annotation, type(attr_value))
# if the annotation is a generic type alias ignore
elif isinstance(annotation, types.GenericAlias):
continue
# if the annotation is a type, check if it is a generic type
elif issubclass(annotation, generics.GenericModel):
# check if the type is in generics._assigned_parameters
generic_definition_type_map = LLMStructuredPrompt.get_generic_type_map(annotation)
argument_type = type(attr_value)
generic_instance_type_map = LLMStructuredPrompt.get_generic_type_map(argument_type)
assert list(generic_definition_type_map.keys()) == list(generic_instance_type_map.keys())
# update the generic type map
# if the generic type is already in the map, check that it is the same
for generic_parameter, generic_parameter_target in generic_definition_type_map.items():
if generic_parameter_target not in annotation.__parameters__:
continue
resolved_type = generic_instance_type_map[generic_parameter]
LLMStructuredPrompt.add_resolved_type(generic_type_map, generic_parameter_target, resolved_type)
return generic_type_map
@staticmethod
def add_resolved_type(generic_type_map, source_type, resolved_type):
"""
Add a resolved type to the generic type map.
"""
if source_type in generic_type_map:
# TODO: support finding the common base class?
if (previous_resolution := generic_type_map[source_type]) is not resolved_type:
raise ValueError(
f"Cannot resolve generic type {source_type}, conflicting "
f"resolution: {previous_resolution} and {resolved_type}."
)
else:
generic_type_map[source_type] = resolved_type
@staticmethod
def get_generic_type_map(generic_type, base_generic_type=None):
if base_generic_type is None:
base_generic_type = LLMStructuredPrompt.get_base_generic_type(generic_type)
base_classes = inspect.getmro(generic_type)
# we have to iterate through the base classes
generic_parameter_type_map = {generic_type: generic_type for generic_type in generic_type.__parameters__}
for base_class in base_classes:
# skip baseclasses that are from pydantic.generic
# this avoids a bug that is caused by generics.GenericModel.__parameterized_bases_
if base_class.__module__ == "pydantic.generics":
continue
if issubclass(base_class, base_generic_type):
if base_class in generics._assigned_parameters:
assignment = generics._assigned_parameters[base_class]
generic_parameter_type_map = {
old_generic_type: generic_parameter_type_map.get(new_generic_type, new_generic_type)
for old_generic_type, new_generic_type in assignment.items()
}
return generic_parameter_type_map
@staticmethod
def get_base_generic_type(field_type) -> type[generics.GenericModel]:
# get the base class name from annotation (which is without [])
base_generic_name = field_type.__name__
if "[" in field_type.__name__:
base_generic_name = field_type.__name__.split("[")[0]
# get the base class from argument_type_base_classes with base_generic_name
for base_class in reversed(inspect.getmro(field_type)):
if base_class.__name__ == base_generic_name and issubclass(field_type, base_class):
base_generic_type = base_class
break
else:
raise ValueError(f"Could not find base generic type {base_generic_name} for {field_type}.")
return base_generic_type
@trace_calls(name="LLMStructuredPrompt", kind=TraceNodeKind.CHAIN, capture_args=False, capture_return=False)
def __call__(
self,
language_model_or_chat_chain: BaseLanguageModel | ChatChain,
) -> T:
"""Call the function."""
# check that the first argument is an instance of BaseLanguageModel
# or a TrackedChatChain or UntrackedChatChain
if not isinstance(language_model_or_chat_chain, BaseLanguageModel | ChatChain):
raise ValueError("The first parameter must be an instance of BaseLanguageModel or ChatChain.")
# get the input and output schema as JSON dict
schema = self.get_json_schema()
# print(json.dumps(schema, indent=1))
update_json_schema_hyperparameters(
schema,
Hyperparameter("json_schema") @ get_json_schema_hyperparameters(schema),
)
update_event_properties(
dict(
arguments=dict(self.input),
)
)
parsed_output = self.query(language_model_or_chat_chain, schema)
# print(f"Input: {self.input.json(indent=1)}")
# print(f"Output: {json.dumps(json.loads(parsed_output.json())['return_value'], indent=1)}")
update_event_properties(dict(result=parsed_output.return_value))
return parsed_output.return_value
@track_hyperparameters
def query(self, language_model_or_chat_chain, schema): # noqa: C901
# create the prompt
json_dumps_kwargs = Hyperparameter("json_dumps_kwargs") @ dict(indent=None)
additional_definitions_prompt_template = Hyperparameter(
"additional_definitions_prompt_template",
"Here is the schema for additional data types:\n```\n{additional_definitions}\n```\n\n",
)
optional_additional_definitions_prompt = ""
if schema["additional_definitions"]:
optional_additional_definitions_prompt = additional_definitions_prompt_template.format(
additional_definitions=json.dumps(schema["additional_definitions"], **json_dumps_kwargs)
)
prompt = (
Hyperparameter(
"llm_structured_prompt_template",
description=(
"The general-purpose prompt for the structured prompt execution. It tells the LLM what to "
"do and how to read function arguments and structure return values. "
),
)
@ '{docstring}\n\nThe input and output are formatted as a JSON interface that conforms to the JSON schemas below.\n\nAs an example, for the schema {{"properties": {{"foo": {{"description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}} the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.\n\n{optional_additional_definitions_prompt}Here is the input schema:\n```\n{input_schema}\n```\n\nHere is the output schema:\n```\n{output_schema}\n```\nNow output the results for the following inputs:\n```\n{inputs}\n```'
).format(
docstring=self.docstring,
optional_additional_definitions_prompt=optional_additional_definitions_prompt,
input_schema=json.dumps(schema["input_schema"], **json_dumps_kwargs),
output_schema=json.dumps(schema["output_schema"], **json_dumps_kwargs),
inputs=self.input.json(**json_dumps_kwargs),
)
# get the response
num_retries = Hyperparameter("num_retries_on_parser_failure") @ 3
if language_model_or_chat_chain is None:
raise ValueError("The language model or chat chain must be provided.")
if isinstance(language_model_or_chat_chain, BaseChatModel):
language_model_or_chat_chain = ChatChain(language_model_or_chat_chain, [])
if isinstance(language_model_or_chat_chain, ChatChain):
chain = language_model_or_chat_chain
for _ in range(num_retries):
output, chain = chain.query(prompt, model_args=chain.enforce_json_response())
try:
parsed_output = parse(output, self.output_type)
break
except OutputParserException as e:
prompt = (
Hyperparameter("error_prompt") @ "Tried to parse your output but failed:\n\n"
+ str(e)
+ Hyperparameter("retry_prompt") @ "\n\nPlease try again and avoid this issue."
)
else:
exception = OutputParserException(f"Failed to parse the output after {num_retries} retries.")
exception.add_note(chain)
raise exception
elif isinstance(language_model_or_chat_chain, BaseLLM):
model: BaseChatModel = language_model_or_chat_chain
# Check if the language model is of type "openai" and extend model args with a response format in that case
model_dict = model.dict()
if "openai" in model_dict["_type"] and model_dict.get("model_name") in (
"gpt-4-1106-preview",
"gpt-3.5-turbo-1106",
):
model_args = dict(response_format=dict(type="json_object"))
else:
model_args = {}
for _ in range(num_retries):
output = model(prompt, **model_args)
try:
parsed_output = parse(output, self.output_type)
break
except OutputParserException as e:
prompt = (
prompt
+ Hyperparameter("output_prompt") @ "\n\nReceived the output\n\n"
+ output
+ Hyperparameter("error_prompt") @ "Tried to parse your output but failed:\n\n"
+ str(e)
+ Hyperparameter("retry_prompt") @ "\n\nPlease try again and avoid this issue."
)
else:
exception = OutputParserException(f"Failed to parse the output after {num_retries} retries.")
exception.add_note(prompt)
raise exception
else:
raise ValueError("The language model or chat chain must be provided.")
return parsed_output
@dataclass
class LLMBoundSignature:
"""
A function call that can be used to generate a prompt.
"""
structured_prompt: LLMStructuredPrompt
signature: inspect.Signature
@property
def input_type(self) -> type[BaseModel]:
"""Return the input type."""
return self.structured_prompt.input_type
@property
def output_type(self) -> type[BaseModel]:
"""Return the output type."""
return self.structured_prompt.output_type
@property
def docstring(self) -> str:
"""Return the docstring."""
return self.structured_prompt.docstring
@property
def return_annotation(self) -> str:
"""Return the name."""
return self.structured_prompt.return_annotation
def get_input_object(self, *args: P.args, **kwargs: P.kwargs) -> BaseModel:
"""Call the function and return the inputs."""
# bind the inputs to the signature
bound_arguments = LLMBoundSignature.bind(self.signature, args, kwargs)
# get the arguments
arguments = bound_arguments.arguments
inputs = self.structured_prompt.input_type(**arguments)
return inputs
@staticmethod
def from_call(f: typing.Callable[P, T], args: P.args, kwargs: P.kwargs) -> "LLMBoundSignature": # noqa: C901
"""Create an LLMBoundSignature from a function.
Args:
f: The function to create the LLMBoundSignature from.
args: The positional arguments to the function (but excluding the language model/first param).
kwargs: The keyword arguments to the function.
"""
# get clean docstring
docstring = inspect.getdoc(f)
if docstring is None:
raise ValueError("The function must have a docstring.")
# get the type of the first argument
signature = inspect.signature(f, eval_str=True)
# get all parameters
parameters_items: list[tuple[str, inspect.Parameter]] = list(signature.parameters.items())
# check that there is at least one parameter
if not parameters_items:
raise ValueError("The function must have at least one parameter.")
# check that the first parameter has a type annotation that is an instance of BaseLanguageModel
# or a TrackedChatChain
first_parameter: inspect.Parameter = parameters_items[0][1]
if first_parameter.annotation is not inspect.Parameter.empty:
if not issubclass(first_parameter.annotation, BaseLanguageModel | ChatChain):
raise ValueError("The first parameter must be an instance of BaseLanguageModel or ChatChain.")
return_type = signature.return_annotation
if return_type is inspect.Parameter.empty:
raise ValueError("The function must have a return type.")
# create a pydantic model from the parameters
parameter_dict = LLMBoundSignature.parameter_items_to_field_tuple(parameters_items[1:])
# turn function name into a class name
class_name = string.capwords(f.__name__, sep="_").replace("_", "")
# create the input model
# collect all __parameters__ from the type annotations
# this is necessary because we need to know the types of the parameters
# to create the pydantic model
generic_parameters: set[typing.TypeVar] = set()
for parameter in parameter_dict.values():
annotation = parameter[0]
# unwrap any Annotated types
while hasattr(annotation, "__metadata__"):
annotation = annotation.__origin__
# if the annotation is already a type variable, add it to the set
if isinstance(annotation, typing.TypeVar):
generic_parameters.add(annotation)
# if the annotation is a generic type, add the parameters to the set
if hasattr(annotation, "__parameters__"):
generic_parameters.update(annotation.__parameters__)
model_spec = LLMBoundSignature.field_tuples_to_model_spec(parameter_dict)
if generic_parameters:
bases = (pydantic.generics.GenericModel, typing.Generic[*generic_parameters])
input_type = create_model(f"{class_name}Inputs", __base__=bases, __module__=f.__module__, **model_spec)
else:
input_type = create_model(f"{class_name}Inputs", __module__=f.__module__, **model_spec)
input_type.update_forward_refs()
# update parameter_dict types with bound_arguments
# this ensures that we serialize the actual types
# might not be optimal because the language model won't be aware of original types, however
bound_arguments = LLMBoundSignature.bind(signature, args, kwargs)
for parameter_name in parameter_dict:
if parameter_name in bound_arguments.arguments:
parameter_dict[parameter_name] = (
type(bound_arguments.arguments[parameter_name]),
parameter_dict[parameter_name][1],
)
specific_model_spec = LLMBoundSignature.field_tuples_to_model_spec(parameter_dict)
specific_input_type = create_model(
f"Specific{class_name}Inputs", __module__=f.__module__, **specific_model_spec
)
specific_input_type.update_forward_refs()
input = specific_input_type(**bound_arguments.arguments)
llm_structured_prompt: LLMStructuredPrompt = LLMStructuredPrompt.create(
docstring=docstring,
input_type=input_type,
return_annotation=return_type,
input=input,
)
return LLMBoundSignature(llm_structured_prompt, signature)
@staticmethod
def parameter_items_to_field_tuple(parameters_items: list[tuple[str, inspect.Parameter]]):
"""
Get the parameter definitions for a function call from the parameters and arguments.
"""
parameter_dict: dict = {}
for parameter_name, parameter in parameters_items:
# every parameter must be annotated or have a default value
annotation = parameter.annotation
if annotation is type:
annotation = TyperWrapper
if parameter.default is inspect.Parameter.empty:
parameter_dict[parameter_name] = (annotation, ...)
else:
parameter_dict[parameter_name] = (annotation, parameter.default)
return parameter_dict
@staticmethod
def field_tuples_to_model_spec(
field_tuples_dict: dict[str, tuple[str, tuple[type, ...]]]
) -> dict[str, tuple[type, object] | object]:
"""
Get the parameter definitions for a function call from the parameters and arguments.
"""
parameter_dict: dict = {}
for parameter_name, (annotation, default) in field_tuples_dict.items():
# every parameter must be annotated or have a default value
if default is ...:
parameter_dict[parameter_name] = (annotation, ...)
else:
if annotation is not inspect.Parameter.empty:
parameter_dict[parameter_name] = (annotation, default)
else:
parameter_dict[parameter_name] = default
return parameter_dict
@staticmethod
def get_or_create_pydantic_default(field: FieldInfo):
if field.default is not Undefined:
if field.default is Ellipsis:
return inspect.Parameter.empty
return field.default
if field.default_factory is not None:
return field.default_factory()
return None
@staticmethod
def bind(signature, args, kwargs):
"""
Bind function taking into account Field definitions and defaults.
The first parameter from the original signature is dropped (as it is the language model or chat chain).
args and kwargs are bound to the remaining parameters.
"""
# resolve parameter defaults to FieldInfo.default if the parameter is a field
signature_fixed_defaults = signature.replace(
parameters=[
parameter.replace(default=LLMBoundSignature.get_or_create_pydantic_default(parameter.default))
if isinstance(parameter.default, FieldInfo)
else parameter
for parameter in list(signature.parameters.values())[1:]
]
)
bound_arguments = signature_fixed_defaults.bind(*args, **kwargs)
bound_arguments.apply_defaults()
return bound_arguments
class LLMFunctionInterface(typing.Generic[P, T], typing.Callable[P, T]):
def get_structured_prompt(self, *args: P.args, **kwargs: P.kwargs) -> LLMStructuredPrompt:
raise NotImplementedError
def llm_bound_signature(self, *args, **kwargs) -> LLMBoundSignature:
raise NotImplementedError
def __call__(self, *args, **kwargs):
raise NotImplementedError
class LLMFunction(LLMFunctionInterface[P, T], typing.Generic[P, T]):
"""
A callable that can be called with a chat model.
"""
def llm_bound_signature(self, *args, **kwargs) -> LLMBoundSignature:
return LLMBoundSignature.from_call(self, args, kwargs)
def get_input_object(self, *args, **kwargs) -> BaseModel:
return self.llm_bound_signature(*args, **kwargs).get_input_object(*args, **kwargs)
def __get__(self, instance: object, owner: type | None = None) -> typing.Callable:
"""Support instance methods."""
if instance is None:
return self
# Bind self to instance as MethodType
return types.MethodType(self, instance)
def __getattr__(self, item):
return getattr(self.__wrapped__, item)
def explicit(self, language_model_or_chat_chain: BaseLanguageModel | ChatChain, input_object: BaseModel):
"""Call the function with explicit inputs."""
return self(language_model_or_chat_chain, **dict(input_object))
@trace_calls(kind=TraceNodeKind.CHAIN, capture_return=slicer[1:], capture_args=True)
def __call__(
self,
language_model_or_chat_chain: BaseLanguageModel | ChatChain,
*args: P.args,
**kwargs: P.kwargs,
) -> T:
"""Call the function."""
update_name(self.__name__)
# check that the first argument is an instance of BaseLanguageModel
# or a TrackedChatChain or UntrackedChatChain
if not isinstance(language_model_or_chat_chain, BaseLanguageModel | ChatChain):
raise ValueError("The first parameter must be an instance of BaseLanguageModel or ChatChain.")
# We expect that we wrap a function that raises NotImplementedError
# We call it, so we can set breakpoints in the function
try:
self.__wrapped__(language_model_or_chat_chain, *args, **kwargs)
raise ValueError("The function must raise NotImplementedError.")
except NotImplementedError:
pass
llm_bound_signature = LLMBoundSignature.from_call(self, args, kwargs)
return_value = llm_bound_signature.structured_prompt(language_model_or_chat_chain)
return return_value
class LLMExplicitFunction(LLMFunctionInterface[P, T], typing.Generic[P, T]):
"""
A callable that can be called with a chat model.
"""
def llm_bound_signature(self, input: BaseModel) -> LLMBoundSignature:
"""Create an LLMFunctionSpec from a function."""
# get clean docstring of
docstring = inspect.getdoc(self)
if docstring is None:
raise ValueError("The function must have a docstring.")
# get the type of the first argument
signature = inspect.signature(self, eval_str=True)
# get all parameters
parameters_items: list[tuple[str, inspect.Parameter]] = list(signature.parameters.items())
# check that there is at least one parameter
if not parameters_items:
raise ValueError("The function must have at least one parameter.")
# check that the first parameter has a type annotation that is an instance of BaseLanguageModel
# or a TrackedChatChain
first_parameter: inspect.Parameter = parameters_items[0][1]
if first_parameter.annotation is not inspect.Parameter.empty:
if not issubclass(first_parameter.annotation, BaseLanguageModel | ChatChain):
raise ValueError("The first parameter must be an instance of BaseLanguageModel or ChatChain.")
second_parameter: inspect.Parameter = parameters_items[1][1]
llm_structured_prompt = LLMStructuredPrompt.create(
docstring=docstring,
input_type=second_parameter.annotation,
return_annotation=signature.return_annotation,
input=input,
)
return LLMBoundSignature(llm_structured_prompt, signature)
def __get__(self, instance: object, owner: type | None = None) -> typing.Callable:
"""Support instance methods."""
if instance is None:
return self
# Bind self to instance as MethodType
return types.MethodType(self, instance)
def __getattr__(self, item):
return getattr(self.__wrapped__, item)
@trace_calls(kind=TraceNodeKind.CHAIN, capture_return=True, capture_args=slicer[1:])
def __call__(self, language_model_or_chat_chain: BaseLanguageModel | ChatChain, input: BaseModel) -> T:
"""Call the function."""
update_name(self.__name__)
# check that the first argument is an instance of BaseLanguageModel
# or a TrackedChatChain or UntrackedChatChain
if not isinstance(language_model_or_chat_chain, BaseLanguageModel | ChatChain):
raise ValueError("The first parameter must be an instance of BaseLanguageModel or ChatChain.")
# We expect that we wrap a function that raises NotImplementedError
# We call it, so we can set breakpoints in the function
try:
self.__wrapped__(language_model_or_chat_chain, input)
raise ValueError("The function must raise NotImplementedError.")
except NotImplementedError:
pass
llm_bound_signature = self.llm_bound_signature(input)
return_value = llm_bound_signature.structured_prompt(language_model_or_chat_chain)
return return_value
F_types: typing.TypeAlias = (
F
| LLMFunction[P, T]
| LLMExplicitFunction[P, T]
| types.MethodType
| types.FunctionType
| types.ClassMethodDescriptorType
| types.MethodDescriptorType
| types.MemberDescriptorType
| types.MethodWrapperType
| LLMFunctionInterface
)
def apply_decorator(f: F_types, decorator) -> F_types:
"""
Apply a decorator to a function.
This function is used to apply a decorator to a function, while preserving the function type.
This is useful when we want to apply a decorator to a function that is a classmethod, staticmethod, property,
or a method of a class.
Parameters
----------
f: F_types
The function to decorate.
decorator: Callable
The decorator to apply.
Returns
-------
F_types
The decorated function.
Raises
------
ValueError
If the function is a classmethod, staticmethod, property, or a method of a class.
"""
specific_llm_function: object
if isinstance(f, classmethod):
raise ValueError("Cannot decorate classmethod with llm_strategy (no translation of cls: type atm).")
elif isinstance(f, staticmethod):
specific_llm_function = staticmethod(apply_decorator(f.__func__, decorator))
elif isinstance(f, property):
specific_llm_function = property(apply_decorator(f.fget, decorator), doc=f.__doc__)
elif isinstance(f, types.MethodType):
specific_llm_function = types.MethodType(apply_decorator(f.__func__, decorator), f.__self__)
elif hasattr(f, "__wrapped__"):
return apply_decorator(f.__wrapped__, decorator)
elif isinstance(f, LLMFunctionInterface):
specific_llm_function = f
elif not callable(f):
raise ValueError(f"Cannot decorate {f} with llm_strategy.")
else:
if not is_not_implemented(f):
raise ValueError("The function must not be implemented.")
specific_llm_function = track_hyperparameters(functools.wraps(f)(decorator(f)))
return typing.cast(F_types, specific_llm_function)
def llm_explicit_function(f: F_types) -> F_types:
"""
Decorator to wrap a function with a chat model.
f is a function to a dataclass or Pydantic model.
The docstring of the function provides instructions for the model.
"""
return apply_decorator(f, lambda f: LLMExplicitFunction())
def llm_function(f: F_types) -> F_types:
"""
Decorator to wrap a function with a chat model.
f is a function to a dataclass or Pydantic model.
The docstring of the function provides instructions for the model.
"""
return apply_decorator(f, lambda f: LLMFunction())
def parse(text: str, output_model: type[B]) -> B:
try:
# Greedy search for 1st json candidate.
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
json_str = ""
if match:
json_str = match.group()
json_object = json.loads(json_str)
return output_model.parse_obj(json_object)
except (json.JSONDecodeError, ValidationError) as e:
msg = f'Failed to parse the last reply. Expected: `{{"return_value": ...}}` Got: {e}'
raise OutputParserException(msg)
_typing_GenericAlias = type(typing.List[int])
# TODO: move this to the hyperparameter optimizer
def get_concise_type_repr(return_type: type):
"""Return a shorter (string) representation of the return type.
Examples:
<class 'str'> -> str
<class 'int'> -> int
<class 'CustomType'> -> CustomType
<class 'typing.List[typing.Dict[str, int]]'> -> List[Dict[str, int]]
For generic types, we want to keep the type arguments as well.
<class 'typing.List[typing.Dict[str, int]]'> -> List[Dict[str, int]]
<class 'PydanticGenericModel[typing.Dict[str, int]]'> -> PydanticGenericModel[Dict[str, int]]
For unspecialized generic types, we want to keep the type arguments as well.
so for class PydanticGenericModel(Generic[T]): pass:
-> PydanticGenericModel[T]
"""
assert isinstance(return_type, type | types.GenericAlias | _typing_GenericAlias | typing.TypeVar), return_type
name = return_type.__name__
# is it a specialized generic type?
if hasattr(return_type, "__origin__"):
origin = return_type.__origin__
if origin is not None:
# is it a generic type with type arguments?
if hasattr(return_type, "__args__"):
args = return_type.__args__
if args:
# is it a generic type with type arguments?
args_str = ", ".join([get_concise_type_repr(arg) for arg in args])
return f"{origin.__name__}[{args_str}]"
# is it a unspecialized generic type?
if hasattr(return_type, "__parameters__"):
parameters = return_type.__parameters__
if parameters:
# is it a generic type without type arguments?
parameters_str = ", ".join([get_concise_type_repr(parameter) for parameter in parameters])
return f"{name}[{parameters_str}]"
return name
| [
"additional_definitions_prompt_template",
"{docstring}\n\nThe input and output are formatted as a JSON interface that conforms to the JSON schemas below.\n\nAs an example, for the schema {{\"properties\": {{\"foo\": {{\"description\": \"a list of strings\", \"type\": \"array\", \"items\": {{\"type\": \"string\"}}}}}}, \"required\": [\"foo\"]}}}} the object {{\"foo\": [\"bar\", \"baz\"]}} is a well-formatted instance of the schema. The object {{\"properties\": {{\"foo\": [\"bar\", \"baz\"]}}}} is not well-formatted.\n\n{optional_additional_definitions_prompt}Here is the input schema:\n```\n{input_schema}\n```\n\nHere is the output schema:\n```\n{output_schema}\n```\nNow output the results for the following inputs:\n```\n{inputs}\n```",
"error_prompt",
"input_schema",
"json_dumps_kwargs",
"additional_definitions",
"llm_structured_prompt_template",
"\n\nReceived the output\n\n",
"The general-purpose prompt for the structured prompt execution. It tells the LLM what to ",
"do and how to read function arguments and structure return values. ",
"Tried to parse your output but failed:\n\n",
"retry_prompt",
"output_prompt",
"Hyperparameter(",
"\n\nPlease try again and avoid this issue.",
"Here is the schema for additional data types:\n```\n{additional_definitions}\n```\n\n"
] |
2024-01-10 | BlackHC/llm-strategy | llm_strategy~testing~tests~test_fake_llm.py | import langchain
import pytest
from llm_strategy.testing import fake_llm
langchain.llm_cache = None
def test_fake_llm_query():
"""Test that the fake LLM returns the correct query."""
llm = fake_llm.FakeLLM(texts={"foobar"})
assert llm("foo") == "bar"
def test_fake_llm_query_with_stop():
"""Test that the fake LLM returns the correct query."""
llm = fake_llm.FakeLLM(texts={"foobar"})
assert llm("foo", stop=["a"]) == "b"
def test_fake_llm_missing_query():
"""Test that the fake LLM raises an error if the query is missing."""
llm = fake_llm.FakeLLM(texts=set())
with pytest.raises(NotImplementedError):
raise ValueError(llm("foo"))
| [] |
2024-01-10 | BlackHC/llm-strategy | examples~customer_database_search.py | """
A simple CUI application to visualize and query a customer database using the `textual` package.
"""
from dataclasses import dataclass
import langchain
from langchain.cache import SQLiteCache
from langchain.llms import OpenAI
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Button, DataTable, Footer, Header, Input
from llm_strategy import llm_strategy
langchain.llm_cache = SQLiteCache()
base_llm = OpenAI(max_tokens=1024)
@llm_strategy(base_llm)
@dataclass
class Customer:
key: str
first_name: str
last_name: str
birthdate: str
address: str
@property
def age(self: "Customer") -> int:
"""Return the current age of the customer.
This is a computed property based on `birthdate` and the current year (2022).
"""
raise NotImplementedError()
@dataclass
class CustomerDatabase:
customers: list[Customer]
def find_customer_key(self: "CustomerDatabase", query: str) -> list[str]:
"""Find the keys of the customers that match a natural language query best (sorted by closeness to the match).
We support semantic queries instead of SQL, so we can search for things like
"the customer that was born in 1990".
Args:
query: Natural language query
Returns:
The index of the best matching customer in the database.
"""
raise NotImplementedError()
def load(self: "CustomerDatabase"):
"""Load the customer database from a file."""
raise NotImplementedError()
def store(self: "CustomerDatabase"):
"""Store the customer database to a file."""
raise NotImplementedError()
@llm_strategy(base_llm)
@dataclass
class MockCustomerDatabase(CustomerDatabase):
def load(self):
self.customers = self.create_mock_customers(10)
def store(self):
pass
@staticmethod
def create_mock_customers(num_customers: int = 1) -> list[Customer]:
"""
Create mock customers with believable data (our customers are world citizens).
"""
raise NotImplementedError()
class CustomerDatabaseApp(App):
"""A simple textual application to visualize and query a customer database.
We show all the customers in a table and allow the user to query the database using natural language
in a search box at the bottom of the screen.
"""
PRIORITY_BINDINGS = False
BINDINGS = [("q", "quit", "Quit the application"), ("s", "screenshot", "Take a screenshot")]
database: CustomerDatabase = MockCustomerDatabase([])
data_table = DataTable(id="customer_table")
search_box = Input(id="search_box", placeholder="Search for a customer (use any kind of query")
footer_bar = Horizontal(search_box)
def on_mount(self) -> None:
self.database.load()
self.data_table.add_columns("First Name", "Last Name", "Birthdate", "Address", "Age")
self.search("")
def compose(self) -> ComposeResult:
self.footer_bar.styles.dock = "bottom"
self.footer_bar.styles.width = "100%"
self.footer_bar.styles.height = 4
self.data_table.styles.height = "auto"
self.data_table.styles.width = "100%"
self.screen.styles.height = "100%"
self.search_box.styles.width = "100%"
yield Header()
yield self.footer_bar
yield Footer()
yield self.data_table
def search(self, query: str):
"""Search the customer database using a natural language query."""
self.data_table.clear()
if not query:
for customer in self.database.customers:
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
else:
keys = self.database.find_customer_key(query)
for key in keys:
customers_for_key = [customer for customer in self.database.customers if customer.key == key]
assert len(customers_for_key) == 1
customer = customers_for_key[0]
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button is self.exit_button:
self.exit()
def on_input_submitted(self, event: Input.Submitted) -> None:
if event.input is self.search_box:
self.search(event.value)
if __name__ == "__main__":
app = CustomerDatabaseApp()
app.run()
| [] |
Subsets and Splits