File size: 2,996 Bytes
180a8b0
 
 
 
 
 
 
 
 
 
 
 
9d37152
 
180a8b0
 
 
 
 
 
 
 
 
 
 
 
4ac113f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180a8b0
4ac113f
 
 
 
 
 
 
 
 
 
 
 
180a8b0
4ac113f
 
 
 
180a8b0
4ac113f
 
 
180a8b0
4ac113f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180a8b0
 
 
4ac113f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import streamlit as st
import asyncio
import os
from f1_ai import F1AI
from dotenv import load_dotenv

# Load environment variables from .streamlit/secrets.toml into os.environ
for key, value in st.secrets.items():
    os.environ[key] = value

# Initialize session state
if 'f1_ai' not in st.session_state:
    # Always use OpenRouter for deployment
    st.session_state.f1_ai = F1AI(llm_provider="openrouter")
if 'chat_history' not in st.session_state:
    st.session_state.chat_history = []

# Set page config
st.set_page_config(page_title="F1-AI: Formula 1 RAG Application", layout="wide")

# Title and description
st.title("F1-AI: Formula 1 RAG Application")
st.markdown("""
This application uses Retrieval-Augmented Generation (RAG) to answer questions about Formula 1.
""")

# Custom CSS for better styling
st.markdown("""
<style>
    .stChatMessage {
        padding: 1rem;
        border-radius: 0.5rem;
        margin-bottom: 1rem;
        box-shadow: 0 2px 4px rgba(0,0,0,0.1);
    }
    .stChatMessage.user {
        background-color: #f0f2f6;
    }
    .stChatMessage.assistant {
        background-color: #ffffff;
    }
    .source-link {
        font-size: 0.8rem;
        color: #666;
        text-decoration: none;
    }
</style>
""", unsafe_allow_html=True)

# Display chat history with enhanced formatting
for message in st.session_state.chat_history:
    with st.chat_message(message["role"]):
        if message["role"] == "assistant" and isinstance(message["content"], dict):
            st.markdown(message["content"]["answer"])
            if message["content"]["sources"]:
                st.markdown("---")
                st.markdown("**Sources:**")
                for source in message["content"]["sources"]:
                    st.markdown(f"- [{source['url']}]({source['url']})")
        else:
            st.markdown(message["content"])

# Question input
if question := st.chat_input("Ask a question about Formula 1"):
    # Add user question to chat history
    st.session_state.chat_history.append({"role": "user", "content": question})
    
    # Display user question
    with st.chat_message("user"):
        st.write(question)
    
    # Generate and display response with enhanced formatting
    with st.chat_message("assistant"):
        with st.spinner("🤔 Analyzing Formula 1 knowledge..."):
            response = asyncio.run(st.session_state.f1_ai.ask_question(question))
            st.markdown(response["answer"])
            
            # Display sources if available
            if response["sources"]:
                st.markdown("---")
                st.markdown("**Sources:**")
                for source in response["sources"]:
                    st.markdown(f"- [{source['url']}]({source['url']})")
            
    # Add assistant response to chat history
    st.session_state.chat_history.append({"role": "assistant", "content": response})

# Add a footer with credits
st.markdown("---")
st.markdown("F1-AI: A Formula 1 RAG Application")