Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,8 @@
|
|
1 |
-
import os
|
2 |
import streamlit as st
|
3 |
import requests
|
4 |
-
from streamlit_cookies_manager import EncryptedCookieManager
|
5 |
from openai import OpenAI
|
6 |
import google.generativeai as genai
|
7 |
|
8 |
-
# -----------------------------------------------------
|
9 |
-
# Move st.set_page_config() to the very top
|
10 |
-
# -----------------------------------------------------
|
11 |
-
st.set_page_config(page_title="AI Model Comparator", layout="wide")
|
12 |
-
|
13 |
# -----------------------------------------------------
|
14 |
# Retrieve API keys from Hugging Face Secrets
|
15 |
# -----------------------------------------------------
|
@@ -19,41 +12,32 @@ GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
|
|
19 |
# -----------------------------------------------------
|
20 |
# Initialize OpenAI & Gemini
|
21 |
# -----------------------------------------------------
|
22 |
-
client = OpenAI(api_key=OPENAI_API_KEY)
|
23 |
genai.configure(api_key=GEMINI_API_KEY)
|
24 |
gemini_model = genai.GenerativeModel("gemini-pro")
|
25 |
|
26 |
# -----------------------------------------------------
|
27 |
-
#
|
28 |
# -----------------------------------------------------
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
)
|
33 |
-
|
34 |
-
if not cookies.ready():
|
35 |
-
st.stop() # Wait until cookies are available
|
36 |
-
|
37 |
-
# Initialize request count if not set
|
38 |
-
if "request_count" not in cookies:
|
39 |
-
cookies["request_count"] = 0
|
40 |
-
|
41 |
-
request_count = int(cookies["request_count"])
|
42 |
|
43 |
# -----------------------------------------------------
|
44 |
-
#
|
45 |
# -----------------------------------------------------
|
46 |
-
|
47 |
-
st.warning("โ ๏ธ Request limit reached (3 per user). Please wait before trying again.")
|
48 |
-
st.stop()
|
49 |
|
50 |
# -----------------------------------------------------
|
51 |
-
# Sidebar: Model Settings
|
52 |
# -----------------------------------------------------
|
53 |
st.sidebar.header("โ๏ธ Model Parameters")
|
54 |
temperature = st.sidebar.slider("๐ Temperature", 0.0, 1.5, 0.7)
|
55 |
max_tokens = st.sidebar.slider("๐ Max Tokens", 50, 1000, 500)
|
56 |
|
|
|
|
|
|
|
57 |
with st.sidebar:
|
58 |
st.markdown("---")
|
59 |
st.markdown("## ๐ฎ Future Works: 'Prompt Tester'")
|
@@ -68,14 +52,9 @@ with st.sidebar:
|
|
68 |
""")
|
69 |
|
70 |
# -----------------------------------------------------
|
71 |
-
#
|
72 |
# -----------------------------------------------------
|
73 |
-
user_prompt = st.text_area("โ๏ธ Enter your prompt:", "Explain quantum computing in simple terms.")
|
74 |
|
75 |
-
# -----------------------------------------------------
|
76 |
-
# API Request Functions (Using st.cache_data for efficiency)
|
77 |
-
# -----------------------------------------------------
|
78 |
-
@st.cache_data # Replaces st.cache for caching API responses
|
79 |
def get_openai_response(prompt):
|
80 |
try:
|
81 |
completion = client.chat.completions.create(
|
@@ -88,7 +67,6 @@ def get_openai_response(prompt):
|
|
88 |
except Exception as e:
|
89 |
return f"โ OpenAI error: {e}"
|
90 |
|
91 |
-
@st.cache_data # Cache Gemini responses
|
92 |
def get_gemini_response(prompt):
|
93 |
try:
|
94 |
response = gemini_model.generate_content(
|
@@ -107,10 +85,6 @@ def get_gemini_response(prompt):
|
|
107 |
# Generate Responses when Button is Clicked
|
108 |
# -----------------------------------------------------
|
109 |
if st.button("๐ Generate Responses"):
|
110 |
-
request_count += 1
|
111 |
-
cookies["request_count"] = request_count # Update cookie count
|
112 |
-
cookies.save() # Persist changes
|
113 |
-
|
114 |
with st.spinner("Fetching responses..."):
|
115 |
openai_text = get_openai_response(user_prompt)
|
116 |
gemini_text = get_gemini_response(user_prompt)
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import requests
|
|
|
3 |
from openai import OpenAI
|
4 |
import google.generativeai as genai
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
# -----------------------------------------------------
|
7 |
# Retrieve API keys from Hugging Face Secrets
|
8 |
# -----------------------------------------------------
|
|
|
12 |
# -----------------------------------------------------
|
13 |
# Initialize OpenAI & Gemini
|
14 |
# -----------------------------------------------------
|
15 |
+
client = OpenAI(api_key=OPENAI_API_KEY) # OpenAI client
|
16 |
genai.configure(api_key=GEMINI_API_KEY)
|
17 |
gemini_model = genai.GenerativeModel("gemini-pro")
|
18 |
|
19 |
# -----------------------------------------------------
|
20 |
+
# Configure Streamlit Page
|
21 |
# -----------------------------------------------------
|
22 |
+
st.set_page_config(page_title="AI Model Comparator", layout="wide")
|
23 |
+
st.title("๐ค AI Model Comparator")
|
24 |
+
st.subheader("Compare responses across multiple LLMs.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
# -----------------------------------------------------
|
27 |
+
# User Input: Prompt
|
28 |
# -----------------------------------------------------
|
29 |
+
user_prompt = st.text_area("โ๏ธ Enter your prompt:", "Explain quantum computing in simple terms.")
|
|
|
|
|
30 |
|
31 |
# -----------------------------------------------------
|
32 |
+
# Sidebar: Model Settings
|
33 |
# -----------------------------------------------------
|
34 |
st.sidebar.header("โ๏ธ Model Parameters")
|
35 |
temperature = st.sidebar.slider("๐ Temperature", 0.0, 1.5, 0.7)
|
36 |
max_tokens = st.sidebar.slider("๐ Max Tokens", 50, 1000, 500)
|
37 |
|
38 |
+
# -----------------------------------------------------
|
39 |
+
# Sidebar Footer: Future Works Section
|
40 |
+
# -----------------------------------------------------
|
41 |
with st.sidebar:
|
42 |
st.markdown("---")
|
43 |
st.markdown("## ๐ฎ Future Works: 'Prompt Tester'")
|
|
|
52 |
""")
|
53 |
|
54 |
# -----------------------------------------------------
|
55 |
+
# API Request Functions
|
56 |
# -----------------------------------------------------
|
|
|
57 |
|
|
|
|
|
|
|
|
|
58 |
def get_openai_response(prompt):
|
59 |
try:
|
60 |
completion = client.chat.completions.create(
|
|
|
67 |
except Exception as e:
|
68 |
return f"โ OpenAI error: {e}"
|
69 |
|
|
|
70 |
def get_gemini_response(prompt):
|
71 |
try:
|
72 |
response = gemini_model.generate_content(
|
|
|
85 |
# Generate Responses when Button is Clicked
|
86 |
# -----------------------------------------------------
|
87 |
if st.button("๐ Generate Responses"):
|
|
|
|
|
|
|
|
|
88 |
with st.spinner("Fetching responses..."):
|
89 |
openai_text = get_openai_response(user_prompt)
|
90 |
gemini_text = get_gemini_response(user_prompt)
|