UFoP-API / plugins /gemi.py
Ufoptg's picture
Upload 95 files
156ce57 verified
raw
history blame
14.5 kB
# Nimbus ~ UserBot
# Copyright (C) 2023 NimbusTheCloud
#
# This file is a part of < https://github.com/ufoptg/Nimbus/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/ufoptg/Nimbus/blob/main/LICENSE/>.
# by @SoulOfSukuna
"""
Gemini Command Handler
This script provides the `.gemini` command to interact with Google's Gemini AI for generating content, selecting models, and managing chat history.
Available Gemini Models:
- `gemini-2.0-flash-exp` βž” Trait: `g2f-exp` (Default model)
- `gemini-1.5-flash` βž” Trait: `g1f`
- `gemini-1.5-flash-8b` βž” Trait: `g1f8b`
- `gemini-1.5-pro` βž” Trait: `g1p`
Features:
1. Query Gemini AI:
- `.gemini <your query>` βž” Generates a response using the currently selected model.
- Example: `.gemini Write a poem about the stars.`
2. Select a Model:
- `.gemini -m <trait>` βž” Selects a specific model based on its trait.
- Example: `.gemini -m g1f` βž” Switches to the `gemini-1.5-flash` model.
- If an invalid trait is provided, an error message will be returned.
3. Clear Chat History:
- `.gemini -c` βž” Clears the chat history stored in memory.
- Example: `.gemini -c` βž” Removes all previous queries and responses.
4. Handle Long Responses:
- If the response exceeds Telegram's character limit (4096 characters), it will be sent as a `.txt` file.
5. Reply-Based Query:
- Reply to a message with `.gemini write a cool caption for this image` βž” Uses the replied message's content as the query and processes the image.
Examples:
- `.gemini Tell me a joke about programmers.`
- `.gemini -m g2f-exp`
- `.gemini -c`
- Reply to an image with `.gemini write a cool caption for this image`
"""
import asyncio
import mimetypes
from collections import deque, defaultdict
from io import BytesIO
from os import system, path, remove
from googleapiclient.discovery import build
import time
import logging
from bs4 import BeautifulSoup
try:
import google.generativeai as genai
except ImportError:
system("pip install -q google-generativeai")
import google.generativeai as genai
try:
from PIL import Image
except ImportError:
system("pip install -q Pillow")
from PIL import Image
from typing import Optional
from . import (
ultroid_cmd,
async_searcher,
check_filename,
udB,
LOGS,
download_file,
run_async,
)
MODELS = [
{"id": "gemini-2.0-flash-exp", "traits": ["g2f-exp"]},
{"id": "gemini-1.5-flash", "traits": ["g1f"]},
{"id": "gemini-1.5-flash-8b", "traits": ["g1f8b"]},
{"id": "gemini-1.5-pro", "traits": ["g1p"]},
]
def select_model(trait: str) -> Optional[str]:
"""Selects the Gemini model ID based on the provided trait."""
for model in MODELS:
if trait in model["traits"]:
return model["id"]
return None
logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.WARNING)
CONFIG = {"model": select_model("g2f-exp")}
GEMINI_CHAT_HISTORY = defaultdict(lambda: deque(maxlen=80))
async def generate_content_with_search(prompt, model):
try:
search_results = await google_search(prompt)
search_summary = "\n".join(
[f"{idx + 1}. {res['title']} - {res['snippet']}" for idx, res in enumerate(search_results)]
)
enhanced_prompt = (
f"Use the following search results to create a comprehensive response:\n\n"
f"{search_summary}\n\n"
f"Original Query: {prompt}"
)
response = await model.generate_content_async(enhanced_prompt)
return response.text.strip()
except Exception as e:
return f"An error occurred while including search results: {e}. Generating content...\n\n{model.generate_content(prompt).text.strip()}"
async def fetch_full_content(url):
try:
api_url = f"https://scraper.api.airforce/scrape?url={url}"
response = await async_searcher(api_url)
soup = BeautifulSoup(response, "html.parser")
main_content = soup.select_one("article") or soup.select_one("main") or soup
paragraphs = [
para.get_text(separator=" ").strip()
for para in main_content.find_all("p")
if len(para.get_text(strip=True)) > 30
and not any(
keyword in para.get_text().lower()
for keyword in [
"privacy",
"cookie",
"subscribe",
"sign up",
"terms",
"all rights reserved",
"see all",
"see more",
]
)
]
full_text = (
" ".join(paragraphs[:5]) if paragraphs else "No main content available."
)
return full_text
except Exception as e:
return f"Error fetching content: {e}"
async def google_search(query):
api_key = "AIzaSyAOhKEVXRX-DJbxjyUz5Ol54qCygeRQRTA"
cse_id = "a44275f02ca2946da"
service = build("customsearch", "v1", developerKey=api_key)
results = service.cse().list(q=query, cx=cse_id, gl="AU").execute()
search_items = results.get("items", [])
search_results = await asyncio.gather(
*(fetch_search_result(item) for item in search_items)
)
return search_results
async def fetch_search_result(item):
title = item.get("title")
link = item.get("link")
snippet = item.get("snippet")
full_content = await fetch_full_content(link)
return {
"title": title,
"link": link,
"snippet": snippet,
"full_content": full_content or "No additional content available."
}
async def process_file(file_path: str) -> str:
"""
Uploads a file to Gemini and returns the file URL.
"""
try:
mime_type, _ = mimetypes.guess_type(file_path)
if not mime_type:
mime_type = 'application/octet-stream'
file_url = await asyncio.to_thread(genai.upload_file, file_path, mime_type=mime_type)
return file_url
except Exception as exc:
LOGS.error(f"File upload failed: {exc}")
raise
async def process_video_file(file_path: str) -> str:
"""
Uploads a video to Gemini and returns the file URL.
"""
try:
video_file = await asyncio.to_thread(genai.upload_file, path=file_path)
while video_file.state.name == "PROCESSING":
await asyncio.sleep(10)
video_file = genai.get_file(video_file.name)
if video_file.state.name == "FAILED":
raise ValueError(video_file.state.name)
return video_file
except Exception as exc:
LOGS.error(f"File upload failed: {exc}")
raise
async def process_image_with_pillow(file_path: str) -> Optional[str]:
"""
Processes an image file using PIL.Image, saves it temporarily,
uploads it to Gemini, and returns the file URL.
"""
try:
with Image.open(file_path) as img:
img = img.convert("RGB")
buffer = BytesIO()
buffer.name = "processed_image.jpeg"
img.save(buffer, format="JPEG", quality=90)
buffer.seek(0)
temp_image_path = "temp_processed_image.jpeg"
with open(temp_image_path, "wb") as temp_file:
temp_file.write(buffer.read())
file_url = await process_file(temp_image_path)
remove(temp_image_path)
return file_url
except Exception as exc:
LOGS.error(f"Image processing failed: {exc}")
return None
async def handle_multimodal_input(event, e) -> Optional[str]:
"""
Checks and processes images, audio, or video in a replied message.
Ensures cleanup of downloaded files.
Returns the file URL or None if processing fails.
"""
temp_file = None
try:
if event.photo:
temp_file = await event.download_media()
await e.eor("Processing image with Pillow...")
return await process_image_with_pillow(temp_file)
elif event.voice or event.audio:
temp_file = await event.download_media()
await e.eor("Uploading audio...")
return await process_file(temp_file)
elif event.video:
temp_file = await event.download_media()
await e.eor("Uploading video...")
return await process_video_file(temp_file)
except Exception as exc:
LOGS.error(f"Error processing media: {exc}")
finally:
if temp_file and path.exists(temp_file):
try:
remove(temp_file)
LOGS.info(f"Cleaned up temporary file: {temp_file}")
except Exception as cleanup_exc:
LOGS.warning(f"Failed to clean up temporary file: {cleanup_exc}")
return None
async def get_gemini_response(user_id: int, query: str, api_key: str, file_url: Optional[str] = None) -> str:
"""
Generates a response from the selected Gemini model based on the user query.
Includes the user's chat history in the request.
If a file URL is provided, it is included in the content generation.
"""
try:
genai.configure(api_key=api_key)
model = genai.GenerativeModel(CONFIG["model"])
chat_history = GEMINI_CHAT_HISTORY[user_id]
if chat_history:
formatted_history = "\n".join(
[f"{msg['role']}: {msg['content']}" for msg in chat_history]
)
if file_url:
content = [file_url, "\n\n", f"{formatted_history}\nuser: {query}"]
else:
content = f"{formatted_history}\nuser: {query}"
else:
if file_url:
content = [file_url, "\n\n", query]
else:
content = query
# Key Addition: Handle queries without file_url using Google Search
if not file_url:
response = await generate_content_with_search(query, model)
return response
response = await asyncio.to_thread(model.generate_content, content)
return response.text.strip()
except Exception as exc:
LOGS.error(f"Error generating response: {exc}")
raise
@ultroid_cmd(pattern=r"gemi(?:\s+([\s\S]*))?$")
async def gemini_handler(e):
"""
Handles the .gemini command with optional model selection.
"""
args = e.pattern_match.group(1)
trait = None
user_query = None
file_url = None
try:
user_id = e.sender.id
except AttributeError:
user_id = e.from_id.user_id
if args:
if args.startswith("-m"):
trait = args[3:].lower().strip()
selected_model = select_model(trait)
if not selected_model:
return await e.eor("❌ **Error:** Invalid model trait specified.", time=10)
CONFIG["model"] = selected_model
return await e.eor(f"βœ… **Success:** Selected Model: `{CONFIG['model']}`", time=10)
elif args.strip().lower() == "-c":
GEMINI_CHAT_HISTORY[user_id].clear()
return await e.eor("🧹 **Success:** Cleared your Gemini Chat History!", time=6)
else:
user_query = args.strip()
api_key = udB.get_key("GEMINI_API")
if not api_key:
return await e.eor(
"⚠️ **Error:** `GEMINI_API` key missing. Please set it using `.setvar GEMINI_API your_api_key_here`.",
time=10,
)
query = user_query
file_url = None
reply = await e.get_reply_message()
if reply:
# New logic to handle file content
if (
reply.file
and reply.file.mime_type in ["text/x-python", "text/plain"]
):
# Download the file and read its content
file = await reply.download_media(BytesIO())
file.seek(0)
query = file.read().decode("utf-8")
multimodal_content = await handle_multimodal_input(reply, e)
if multimodal_content:
file_url = multimodal_content
if not query:
if reply.photo:
query = "Analyse this image"
elif reply.voice or reply.audio:
query = "Analyse this audio."
elif reply.video:
query = "Analyse this video"
elif reply.text and not query:
query = reply.text.strip()
if not query and not file_url:
return await e.eor(
"πŸ’¬ **Usage:** `.gemini <your query>`\n*Provide a query or reply with media to generate content using Gemini AI.*",
time=5,
)
processing_message = await e.eor(f"πŸ”„ **Processing your request using `{CONFIG['model']}`...**")
GEMINI_CHAT_HISTORY[user_id].append({"role": "user", "content": query})
try:
response = await get_gemini_response(user_id, query, api_key, file_url=file_url)
GEMINI_CHAT_HISTORY[user_id].append({"role": "assistant", "content": response})
except Exception as exc:
LOGS.warning(f"Gemini response generation failed: {exc}", exc_info=True)
if query:
GEMINI_CHAT_HISTORY[user_id].pop()
return await processing_message.edit(f"❌ **Error:** {exc}")
if len(response) < 4096:
reply_text = f"πŸ“„ **Gemini Response:**\n\n{response}"
await processing_message.edit(reply_text, parse_mode="markdown")
else:
buffer = BytesIO()
try:
buffer.write(response.encode('utf-8'))
buffer.seek(0)
buffer.name = "gemini_response.txt"
await e.client.send_file(
e.chat_id,
buffer,
caption="πŸ“„ **Gemini Response:**",
reply_to=e.reply_to_msg_id,
)
except Exception as exc:
LOGS.error(f"Error sending file: {exc}")
await processing_message.edit("❌ **Error:** Failed to send the response as a file.")
finally:
buffer.close()
await processing_message.delete()