|
import os
|
|
import google.generativeai as genai
|
|
from dotenv import load_dotenv
|
|
|
|
|
|
load_dotenv()
|
|
|
|
|
|
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
|
|
|
|
|
generation_config = {
|
|
"temperature": 0.7,
|
|
"top_p": 0.9,
|
|
"top_k": 40,
|
|
"max_output_tokens": 300,
|
|
"response_mime_type": "text/plain",
|
|
}
|
|
|
|
|
|
model = genai.GenerativeModel(
|
|
model_name="gemini-1.5-flash",
|
|
generation_config=generation_config,
|
|
)
|
|
|
|
|
|
def generate_explanation(news, context, truth_score):
|
|
"""
|
|
Generate a concise explanation supporting the truth score.
|
|
|
|
Arguments:
|
|
- news (str): The news or claim being analyzed.
|
|
- context (str): Relevant context from external sources.
|
|
- truth_score (float): The score indicating the truthfulness of the news.
|
|
|
|
Returns:
|
|
- str: A simple explanation or an empty string if an error occurs.
|
|
"""
|
|
|
|
|
|
prompt = (
|
|
"Summarize the context below and explain why the given truth score was assigned to the news claim.\n\n"
|
|
f"News: {news}\n\n"
|
|
f"Context: {context[:3000]}...\n\n"
|
|
f"Truth Score: {truth_score:.2f}\n\n"
|
|
"Keep the explanation short, factual, and easy to understand."
|
|
)
|
|
|
|
|
|
try:
|
|
chat_session = model.start_chat(history=[])
|
|
response = chat_session.send_message(prompt)
|
|
return response.text.strip()
|
|
|
|
except Exception as e:
|
|
|
|
print(f"Error generating explanation: {str(e)}")
|
|
return ""
|
|
|