|
import asyncio |
|
import logging |
|
import os |
|
|
|
from pyrogram import Client, filters, idle |
|
from pyrogram.types import Message |
|
|
|
|
|
from dotenv import load_dotenv |
|
from database import db |
|
from logger import LOGS |
|
from RyuzakiLib import GeminiLatest |
|
import google.generativeai as genai |
|
from google.api_core.exceptions import InvalidArgument |
|
|
|
|
|
client = Client( |
|
"chatbotai", |
|
api_id=API_ID, |
|
api_hash=API_HASH, |
|
bot_token=BOT_TOKEN |
|
) |
|
|
|
|
|
@client.on_message( |
|
filters.incoming |
|
& ( |
|
filters.text |
|
| filters.photo |
|
| filters.video |
|
| filters.audio |
|
| filters.voice |
|
| filters.regex(r"\b(Randy|Rendi)\b(.*)", flags=re.IGNORECASE) |
|
) |
|
& filters.private |
|
& ~filters.via_bot |
|
& ~filters.forwarded, |
|
group=2, |
|
) |
|
async def chatbot_talk(client: Client, message: Message): |
|
try: |
|
genai.configure(api_key=GOOGLE_API_KEY) |
|
|
|
|
|
if message.photo: |
|
try: |
|
file_path = await message.download() |
|
caption = message.caption or "What's this?" |
|
x = GeminiLatest(api_keys=GOOGLE_API_KEY) |
|
|
|
|
|
ai_reply = await message.reply_text("Processing...") |
|
|
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": caption}]}) |
|
|
|
response_reads = x.get_response_image(caption, file_path) |
|
|
|
if len(response_reads) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(response_reads) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
await ai_reply.delete() |
|
os.remove("chat.txt") |
|
else: |
|
await ai_reply.edit_text(response_reads) |
|
|
|
backup_chat.append({"role": "model", "parts": [{"text": response_reads}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
|
|
os.remove(file_path) |
|
except InvalidArgument as e: |
|
await ai_reply.edit_text(f"Error: {e}") |
|
except Exception as e: |
|
await ai_reply.edit_text(f"Error: {e}") |
|
return |
|
|
|
|
|
if message.audio or message.voice: |
|
try: |
|
ai_reply = await message.reply_text("Processing...") |
|
audio_file_name = await message.download() |
|
caption = message.caption or "What's this?" |
|
model = genai.GenerativeModel( |
|
model_name="gemini-1.5-flash", |
|
safety_settings={ |
|
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
} |
|
) |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": caption}]}) |
|
|
|
await ai_reply.edit_text("Uploading file...") |
|
audio_file = genai.upload_file(path=audio_file_name) |
|
|
|
while audio_file.state.name == "PROCESSING": |
|
await asyncio.sleep(10) |
|
audio_file = genai.get_file(audio_file.name) |
|
|
|
if audio_file.state.name == "FAILED": |
|
await ai_reply.edit_text(f"Error: {audio_file.state.name}") |
|
return |
|
|
|
response = model.generate_content( |
|
[audio_file, caption], |
|
request_options={"timeout": 600} |
|
) |
|
if len(response.text) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(response.text) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
await ai_reply.delete() |
|
os.remove("chat.txt") |
|
else: |
|
await ai_reply.edit_text(response.text) |
|
|
|
backup_chat.append({"role": "model", "parts": [{"text": response.text}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
|
|
audio_file.delete() |
|
os.remove(audio_file_name) |
|
except InvalidArgument as e: |
|
await ai_reply.edit_text(f"Error: {e}") |
|
except Exception as e: |
|
await ai_reply.edit_text(f"Error: {e}") |
|
return |
|
|
|
|
|
if message.video: |
|
try: |
|
ai_reply = await message.reply_text("Processing...") |
|
video_file_name = await message.download(file_name="newvideo.mp4") |
|
caption = message.caption or "What's this?" |
|
model = genai.GenerativeModel( |
|
model_name="gemini-1.5-pro", |
|
safety_settings={ |
|
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
} |
|
) |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": caption}]}) |
|
|
|
await ai_reply.edit_text("Uploading file...") |
|
video_file = genai.upload_file(path=video_file_name) |
|
|
|
while video_file.state.name == "PROCESSING": |
|
await asyncio.sleep(10) |
|
video_file = genai.get_file(video_file.name) |
|
|
|
if video_file.state.name == "FAILED": |
|
await ai_reply.edit_text(f"Error: {video_file.state.name}") |
|
return |
|
|
|
response = model.generate_content( |
|
[video_file, caption], |
|
request_options={"timeout": 600} |
|
) |
|
if len(response.text) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(response.text) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
await ai_reply.delete() |
|
os.remove("chat.txt") |
|
else: |
|
await ai_reply.edit_text(response.text) |
|
|
|
backup_chat.append({"role": "model", "parts": [{"text": response.text}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
|
|
video_file.delete() |
|
os.remove(video_file_name) |
|
except InvalidArgument as e: |
|
await ai_reply.edit_text(f"Error: {e}") |
|
except Exception as e: |
|
await ai_reply.edit_text(f"Error: {e}") |
|
return |
|
|
|
|
|
if message.text: |
|
try: |
|
query = message.text.strip() |
|
match = re.search(r"\b(Randy|Rendi)\b(.*)", query, flags=re.IGNORECASE) |
|
if match: |
|
rest_of_sentence = match.group(2).strip() |
|
query_base = rest_of_sentence if rest_of_sentence else query |
|
else: |
|
query_base = query |
|
|
|
parts = query.split(maxsplit=1) |
|
command = parts[0].lower() |
|
pic_query = parts[1].strip() if len(parts) > 1 else "" |
|
|
|
model_flash = genai.GenerativeModel( |
|
model_name="gemini-1.5-flash" |
|
) |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": query_base}]}) |
|
|
|
chat_session = model_flash.start_chat(history=backup_chat) |
|
response_data = chat_session.send_message(query_base) |
|
output = response_data.text |
|
|
|
if len(output) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(output) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
os.remove("chat.txt") |
|
else: |
|
await message.reply_text(output) |
|
|
|
backup_chat.append({"role": "model", "parts": [{"text": output}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
except Exception as e: |
|
await message.reply_text(str(e)) |
|
|
|
|
|
if __name__ == "__main__": |
|
try: |
|
client.run() |
|
except (KeyboardInterrupt, SystemExit): |
|
LOGS.info("Bot has been terminated by the user.") |
|
except Exception as e: |
|
LOGS.error(f"Unexpected error: {e}") |