Spaces:
Running
Running
File size: 2,805 Bytes
e61a7e3 3b88022 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
# Necessary imports
import os
import sys
from typing import List, Tuple, Optional
import PIL.Image
import google.generativeai as genai
from dotenv import load_dotenv
# local imports
from src.config import generation_config, safety_settings, model_name
from src.app.prompt import system_prompt
from src.logger import logging
from src.exception import CustomExceptionHandling
# Load the Environment Variables from .env file
load_dotenv()
# Set the Gemini API Key
genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
# Create the Gemini Models for Text and Vision respectively
txt_model = genai.GenerativeModel(
model_name=model_name,
generation_config=generation_config,
safety_settings=safety_settings,
system_instruction=system_prompt,
)
vis_model = genai.GenerativeModel(
model_name=model_name,
generation_config=generation_config,
safety_settings=safety_settings,
system_instruction=system_prompt,
)
def llm_response(
history: List[Tuple[Optional[str], Optional[str]]], text: str, img: Optional[str]
) -> List[Tuple[Optional[str], Optional[str]]]:
"""
Generate a response based on the input.
Args:
- history (List[Tuple[Optional[str], Optional[str]]]): A list of previous chat history.
- text (str): The input text.
- img (Optional[str]): The path to an image file (optional).
Returns:
List[Tuple[Optional[str], Optional[str]]]: The updated chat history.
"""
try:
if not img:
chat_session = txt_model.start_chat(history=[])
# Convert chat history to string for context
history_str = "\n".join(
[
f"User: {msg[0]}\n{msg[1]}" if msg[1] else f"User: {msg[0]}"
for msg in history
]
)
# Generate Response
response = chat_session.send_message(
f"History:\n{history_str}\nUser: {text}"
)
else:
# Open Image and Generate Response
try:
img = PIL.Image.open(img)
except Exception as e:
# Custom exception handling
raise CustomExceptionHandling(e, sys) from e
# Start Chat Session for Image and Generate Response
chat_session = vis_model.start_chat(history=[])
response = chat_session.send_message([f"User: {text}", img])
# Display Response on Chat UI and return the history
history.append((None, response.text))
logging.info("Response added to chat history.")
return history
# Handle exceptions that may occur during llm response generation
except Exception as e:
# Custom exception handling
raise CustomExceptionHandling(e, sys) from e
|