hruday96 commited on
Commit
4a2c354
ยท
verified ยท
1 Parent(s): ee9f6eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -38
app.py CHANGED
@@ -1,15 +1,8 @@
1
- import os
2
  import streamlit as st
3
  import requests
4
- from streamlit_cookies_manager import EncryptedCookieManager
5
  from openai import OpenAI
6
  import google.generativeai as genai
7
 
8
- # -----------------------------------------------------
9
- # Move st.set_page_config() to the very top
10
- # -----------------------------------------------------
11
- st.set_page_config(page_title="AI Model Comparator", layout="wide")
12
-
13
  # -----------------------------------------------------
14
  # Retrieve API keys from Hugging Face Secrets
15
  # -----------------------------------------------------
@@ -19,41 +12,32 @@ GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
19
  # -----------------------------------------------------
20
  # Initialize OpenAI & Gemini
21
  # -----------------------------------------------------
22
- client = OpenAI(api_key=OPENAI_API_KEY)
23
  genai.configure(api_key=GEMINI_API_KEY)
24
  gemini_model = genai.GenerativeModel("gemini-pro")
25
 
26
  # -----------------------------------------------------
27
- # Secure Cookie Manager for Request Limits
28
  # -----------------------------------------------------
29
- cookies = EncryptedCookieManager(
30
- prefix="ai-model-comparator/",
31
- password=os.environ.get("COOKIES_PASSWORD", "MySecretPassword")
32
- )
33
-
34
- if not cookies.ready():
35
- st.stop() # Wait until cookies are available
36
-
37
- # Initialize request count if not set
38
- if "request_count" not in cookies:
39
- cookies["request_count"] = 0
40
-
41
- request_count = int(cookies["request_count"])
42
 
43
  # -----------------------------------------------------
44
- # Check Request Limit (Max 3 Requests Per User)
45
  # -----------------------------------------------------
46
- if request_count >= 3:
47
- st.warning("โš ๏ธ Request limit reached (3 per user). Please wait before trying again.")
48
- st.stop()
49
 
50
  # -----------------------------------------------------
51
- # Sidebar: Model Settings and Future Works Section
52
  # -----------------------------------------------------
53
  st.sidebar.header("โš™๏ธ Model Parameters")
54
  temperature = st.sidebar.slider("๐ŸŽ› Temperature", 0.0, 1.5, 0.7)
55
  max_tokens = st.sidebar.slider("๐Ÿ“ Max Tokens", 50, 1000, 500)
56
 
 
 
 
57
  with st.sidebar:
58
  st.markdown("---")
59
  st.markdown("## ๐Ÿ”ฎ Future Works: 'Prompt Tester'")
@@ -68,14 +52,9 @@ with st.sidebar:
68
  """)
69
 
70
  # -----------------------------------------------------
71
- # User Input: Prompt
72
  # -----------------------------------------------------
73
- user_prompt = st.text_area("โœ๏ธ Enter your prompt:", "Explain quantum computing in simple terms.")
74
 
75
- # -----------------------------------------------------
76
- # API Request Functions (Using st.cache_data for efficiency)
77
- # -----------------------------------------------------
78
- @st.cache_data # Replaces st.cache for caching API responses
79
  def get_openai_response(prompt):
80
  try:
81
  completion = client.chat.completions.create(
@@ -88,7 +67,6 @@ def get_openai_response(prompt):
88
  except Exception as e:
89
  return f"โŒ OpenAI error: {e}"
90
 
91
- @st.cache_data # Cache Gemini responses
92
  def get_gemini_response(prompt):
93
  try:
94
  response = gemini_model.generate_content(
@@ -107,10 +85,6 @@ def get_gemini_response(prompt):
107
  # Generate Responses when Button is Clicked
108
  # -----------------------------------------------------
109
  if st.button("๐Ÿš€ Generate Responses"):
110
- request_count += 1
111
- cookies["request_count"] = request_count # Update cookie count
112
- cookies.save() # Persist changes
113
-
114
  with st.spinner("Fetching responses..."):
115
  openai_text = get_openai_response(user_prompt)
116
  gemini_text = get_gemini_response(user_prompt)
 
 
1
  import streamlit as st
2
  import requests
 
3
  from openai import OpenAI
4
  import google.generativeai as genai
5
 
 
 
 
 
 
6
  # -----------------------------------------------------
7
  # Retrieve API keys from Hugging Face Secrets
8
  # -----------------------------------------------------
 
12
  # -----------------------------------------------------
13
  # Initialize OpenAI & Gemini
14
  # -----------------------------------------------------
15
+ client = OpenAI(api_key=OPENAI_API_KEY) # OpenAI client
16
  genai.configure(api_key=GEMINI_API_KEY)
17
  gemini_model = genai.GenerativeModel("gemini-pro")
18
 
19
  # -----------------------------------------------------
20
+ # Configure Streamlit Page
21
  # -----------------------------------------------------
22
+ st.set_page_config(page_title="AI Model Comparator", layout="wide")
23
+ st.title("๐Ÿค– AI Model Comparator")
24
+ st.subheader("Compare responses across multiple LLMs.")
 
 
 
 
 
 
 
 
 
 
25
 
26
  # -----------------------------------------------------
27
+ # User Input: Prompt
28
  # -----------------------------------------------------
29
+ user_prompt = st.text_area("โœ๏ธ Enter your prompt:", "Explain quantum computing in simple terms.")
 
 
30
 
31
  # -----------------------------------------------------
32
+ # Sidebar: Model Settings
33
  # -----------------------------------------------------
34
  st.sidebar.header("โš™๏ธ Model Parameters")
35
  temperature = st.sidebar.slider("๐ŸŽ› Temperature", 0.0, 1.5, 0.7)
36
  max_tokens = st.sidebar.slider("๐Ÿ“ Max Tokens", 50, 1000, 500)
37
 
38
+ # -----------------------------------------------------
39
+ # Sidebar Footer: Future Works Section
40
+ # -----------------------------------------------------
41
  with st.sidebar:
42
  st.markdown("---")
43
  st.markdown("## ๐Ÿ”ฎ Future Works: 'Prompt Tester'")
 
52
  """)
53
 
54
  # -----------------------------------------------------
55
+ # API Request Functions
56
  # -----------------------------------------------------
 
57
 
 
 
 
 
58
  def get_openai_response(prompt):
59
  try:
60
  completion = client.chat.completions.create(
 
67
  except Exception as e:
68
  return f"โŒ OpenAI error: {e}"
69
 
 
70
  def get_gemini_response(prompt):
71
  try:
72
  response = gemini_model.generate_content(
 
85
  # Generate Responses when Button is Clicked
86
  # -----------------------------------------------------
87
  if st.button("๐Ÿš€ Generate Responses"):
 
 
 
 
88
  with st.spinner("Fetching responses..."):
89
  openai_text = get_openai_response(user_prompt)
90
  gemini_text = get_gemini_response(user_prompt)