parthib07 commited on
Commit
409e7eb
·
verified ·
1 Parent(s): 77083f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -52
app.py CHANGED
@@ -1,53 +1,53 @@
1
- import streamlit as st
2
- from backend import uploaded_file_to_response, normal_response
3
- from llama_index.llms.gemini import Gemini
4
- from llama_index.embeddings.gemini import GeminiEmbedding
5
- import google.generativeai as genai
6
- import os
7
- from dotenv import load_dotenv
8
- load_dotenv()
9
-
10
- genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
11
-
12
- llm = Gemini(model_name="models/gemini-1.5-pro")
13
- embeddings = GeminiEmbedding(model_name="models/embedding-001")
14
-
15
- if "chat_history" not in st.session_state:
16
- st.session_state.chat_history = []
17
-
18
- st.markdown("""
19
- <style>
20
- .stApp { background-color: #ffffff; color: black; font-family: 'Arial', sans-serif; }
21
- .title { font-size: 36px; font-weight: bold; text-align: center; animation: fadeIn 2s ease-in-out; }
22
- @keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
23
- .chat-container { max-height: 500px; overflow-y: auto; display: flex; flex-direction: column-reverse; padding: 10px; border-radius: 10px; background: rgba(0, 0, 0, 0.05); margin-top: 10px; }
24
- .user-message { background: #0078ff; color: white; padding: 10px; border-radius: 10px; margin-bottom: 5px; text-align: left; }
25
- .ai-message { background: #f1f1f1; color: black; padding: 10px; border-radius: 10px; margin-bottom: 5px; text-align: left; }
26
- .btn-style { background: linear-gradient(45deg, #ff007f, #ff0055); color: white; padding: 8px 16px; border-radius: 6px; font-size: 14px; margin-top: 10px; transition: 0.3s ease-in-out; border: none; cursor: pointer; }
27
- .btn-style:hover { background: linear-gradient(45deg, #ff0055, #d4005a); }
28
- </style>
29
- """, unsafe_allow_html=True)
30
-
31
- st.markdown("<h1 class='title'>🧠 AI Code Companion</h1>", unsafe_allow_html=True)
32
- st.caption("🚀 Upload files & chat with AI")
33
-
34
- uploaded_file = st.file_uploader("Upload a File (Image, Document, Code, Video, or Audio)", type=["png", "jpg", "jpeg", "pdf", "docx", "txt", "py", "js", "java", "cpp", "mp4"], key="file_uploader")
35
-
36
- user_input = st.text_input("Type your message here...", key="chat_input", help="Chat with AI", label_visibility="collapsed")
37
-
38
- if st.button("Generate Response", key="generate_button", help="Click to get AI response", use_container_width=False):
39
- if user_input:
40
- with st.spinner("Processing..."):
41
- response = normal_response(user_input)
42
- if uploaded_file:
43
- response = uploaded_file_to_response(uploaded_file, user_input)
44
- st.session_state.chat_history.insert(0, (user_input, response))
45
-
46
- if st.session_state.chat_history:
47
- chat_container = st.container()
48
- with chat_container:
49
- st.markdown("### Chat History")
50
- for user_msg, ai_response in st.session_state.chat_history:
51
- st.markdown(f"<div class='user-message'><b>You:</b> {user_msg}</div>", unsafe_allow_html=True)
52
- st.markdown(f"<div class='ai-message'><b>AI:</b> {ai_response}</div>", unsafe_allow_html=True)
53
  st.markdown("<br>", unsafe_allow_html=True)
 
1
+ import streamlit as st
2
+ from backend import uploaded_file_to_response, normal_response
3
+ from llama_index.llms.gemini import Gemini
4
+ from llama_index.embeddings.gemini import GeminiEmbedding
5
+ import google.generativeai as genai
6
+ import os
7
+ from dotenv import load_dotenv
8
+ load_dotenv()
9
+
10
+ genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
11
+
12
+ llm = Gemini(model_name="models/gemini-1.5-pro")
13
+ embeddings = GeminiEmbedding(model_name="models/embedding-001")
14
+
15
+ if "chat_history" not in st.session_state:
16
+ st.session_state.chat_history = []
17
+
18
+ st.markdown("""
19
+ <style>
20
+ .stApp { background-color: #ffffff; color: black; font-family: 'Arial', sans-serif; }
21
+ .title { font-size: 36px; font-weight: bold; text-align: center; animation: fadeIn 2s ease-in-out; }
22
+ @keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
23
+ .chat-container { max-height: 500px; overflow-y: auto; display: flex; flex-direction: column-reverse; padding: 10px; border-radius: 10px; background: rgba(0, 0, 0, 0.05); margin-top: 10px; }
24
+ .user-message { background: #0078ff; color: white; padding: 10px; border-radius: 10px; margin-bottom: 5px; text-align: left; }
25
+ .ai-message { background: #f1f1f1; color: black; padding: 10px; border-radius: 10px; margin-bottom: 5px; text-align: left; }
26
+ .btn-style { background: linear-gradient(45deg, #ff007f, #ff0055); color: white; padding: 8px 16px; border-radius: 6px; font-size: 14px; margin-top: 10px; transition: 0.3s ease-in-out; border: none; cursor: pointer; }
27
+ .btn-style:hover { background: linear-gradient(45deg, #ff0055, #d4005a); }
28
+ </style>
29
+ """, unsafe_allow_html=True)
30
+
31
+ st.markdown("<h1 class='title'>🧠 VisionLang</h1>", unsafe_allow_html=True)
32
+ st.caption("🚀 Upload files & chat with VisionLang")
33
+
34
+ uploaded_file = st.file_uploader("Upload a File (Image, Document, Code, Video, or Audio)", type=["png", "jpg", "jpeg", "pdf", "docx", "txt", "py", "js", "java", "cpp", "mp4"], key="file_uploader")
35
+
36
+ user_input = st.text_input("Type your message here...", key="chat_input", help="Chat with AI", label_visibility="collapsed")
37
+
38
+ if st.button("Generate Response", key="generate_button", help="Click to get AI response", use_container_width=False):
39
+ if user_input:
40
+ with st.spinner("Processing..."):
41
+ response = normal_response(user_input)
42
+ if uploaded_file:
43
+ response = uploaded_file_to_response(uploaded_file, user_input)
44
+ st.session_state.chat_history.insert(0, (user_input, response))
45
+
46
+ if st.session_state.chat_history:
47
+ chat_container = st.container()
48
+ with chat_container:
49
+ st.markdown("### Chat History")
50
+ for user_msg, ai_response in st.session_state.chat_history:
51
+ st.markdown(f"<div class='user-message'><b>User:</b> {user_msg}</div>", unsafe_allow_html=True)
52
+ st.markdown(f"<div class='ai-message'><b>AI:</b> {ai_response}</div>", unsafe_allow_html=True)
53
  st.markdown("<br>", unsafe_allow_html=True)