randydev's picture
Update chatbot/plugins/chat.py
c81be17 verified
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2024 (c) Randy W @xtdevs, @xtsea
#
# from : https://github.com/TeamKillerX
# Channel : @RendyProjects
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import os
from pyrogram import *
from pyrogram import enums
from pyrogram import Client, filters
from pyrogram.types import *
from pyrogram.errors import *
from config import *
from database import db
from logger import LOGS
import datetime
from huggingface_hub import InferenceClient
from chatbot.plugins.user_database import users_collection
from chatbot.plugins.keyboards import get_language_keyboard
from chatbot.plugins.languages import LANGUAGES
from . import force_sub
SYSTEM_PROMPT = f"""
Your name is Randy Dev. A kind and friendly AI assistant that answers in a short and concise answer.
Give short step-by-step reasoning if required.
python language powered by @xtdevs on telegram support and language models Meta AI
{datetime.datetime.now()}
"""
async def process_stream(message, prompt):
client_hf = InferenceClient(api_key=HF_KEY)
# backup_chat = await db._get_openai_chat_from_db(message.from_user.id)
# backup_chat.append({"role": "system", "content": SYSTEM_PROMPT})
# backup_chat.append({"role": "user", "content": prompt})
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": prompt}
]
stream = client_hf.chat.completions.create(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
messages=messages,
max_tokens=500,
stream=True
)
accumulated_text = ""
for chunk in stream:
LOGS.info(chunk)
new_content = chunk.choices[0].delta.content
accumulated_text += new_content
# backup_chat.append({"role": "user", "content": accumulated_text})
# await db._update_openai_chat_in_db(message.from_user.id, backup_chat)
return accumulated_text
@Client.on_message(
~filters.scheduled
& filters.command(["start"])
& ~filters.forwarded
)
async def startbot(client: Client, message: Message):
buttons = [
[
InlineKeyboardButton(
text="Developer",
url=f"https://t.me/xtdevs"
),
InlineKeyboardButton(
text="Channel",
url='https://t.me/RendyProjects'
),
]
]
await message.reply_text(
text="Woohoo! Welcome! I'm excited to get started as a Meta AI bot!\n\n• Command /ask hello",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(buttons)
)
@Client.on_message(
filters.private
& filters.command(["ask"])
& ~filters.forwarded
)
@force_sub
async def askcmd(client: Client, message: Message):
if len(message.command) > 1:
prompt = message.text.split(maxsplit=1)[1]
elif message.reply_to_message:
prompt = message.reply_to_message.text
else:
return await message.reply_text("Give ask from Meta AI")
await client.send_chat_action(message.chat.id, enums.ChatAction.TYPING)
await asyncio.sleep(1.5)
try:
output = await process_stream(message, prompt)
if len(output) > 4096:
with open("chat.txt", "w+", encoding="utf8") as out_file:
out_file.write(output)
await message.reply_document(
document="chat.txt",
disable_notification=True
)
os.remove("chat.txt")
else:
await message.reply_text(output, disable_web_page_preview=True)
await client.send_chat_action(message.chat.id, enums.ChatAction.CANCEL)
return
except Exception as e:
return await message.reply_text(f"Error: {e}")