|
import os |
|
import google.generativeai as genai |
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
genai.configure(api_key=os.getenv('GOOGLE_API_KEY')) |
|
|
|
def generate_ai_response(prompt, model_name='gemini-pro', temperature=0.7, max_tokens=1000): |
|
""" |
|
Generate an AI response using Google's Generative AI (Gemini) |
|
|
|
Args: |
|
prompt (str): The input prompt for the AI |
|
model_name (str, optional): The Gemini model to use. Defaults to 'gemini-pro'. |
|
temperature (float, optional): Controls randomness. Defaults to 0.7. |
|
max_tokens (int, optional): Maximum length of the generated response. Defaults to 1000. |
|
|
|
Returns: |
|
str: The generated AI response |
|
""" |
|
try: |
|
|
|
model = genai.GenerativeModel(model_name) |
|
|
|
|
|
response = model.generate_content( |
|
prompt, |
|
generation_config=genai.types.GenerationConfig( |
|
temperature=temperature, |
|
max_output_tokens=max_tokens |
|
) |
|
) |
|
|
|
|
|
return response.text |
|
|
|
except Exception as e: |
|
print(f"Error generating AI response: {e}") |
|
return f"An error occurred while generating the response: {str(e)}" |
|
|
|
def simulate_ai_response(prompt): |
|
""" |
|
Simulated AI response for development and testing |
|
|
|
Args: |
|
prompt (str): The input prompt |
|
|
|
Returns: |
|
str: A simulated response based on the prompt |
|
""" |
|
|
|
|
|
import random |
|
|
|
simulated_responses = [ |
|
"Based on the current financial data, here are some key insights...", |
|
"The AI suggests optimizing your spending in these key areas...", |
|
"Your startup shows promising growth potential with these recommendations...", |
|
"We've identified potential areas for financial improvement...", |
|
"Here's a strategic overview of your financial situation..." |
|
] |
|
|
|
return random.choice(simulated_responses) |
|
|