shukdevdatta123 commited on
Commit
29cea1c
·
verified ·
1 Parent(s): fa4be47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -1
app.py CHANGED
@@ -20,7 +20,7 @@ def test_api_connection(api_key):
20
  client = Groq(api_key=api_key)
21
  # Making a minimal API call to test the connection
22
  client.chat.completions.create(
23
- model="deepseek-r1-distill-llama-70b",
24
  messages=[{"role": "user", "content": "test"}],
25
  max_tokens=5
26
  )
@@ -92,6 +92,45 @@ models = [
92
  # Create the Gradio interface
93
  with gr.Blocks(title="Groq AI Chat Interface") as app:
94
  gr.Markdown("# Groq AI Chat Interface")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  gr.Markdown("Enter your Groq API key to start chatting with AI models.")
96
 
97
  with gr.Row():
 
20
  client = Groq(api_key=api_key)
21
  # Making a minimal API call to test the connection
22
  client.chat.completions.create(
23
+ model="llama3-70b-8192",
24
  messages=[{"role": "user", "content": "test"}],
25
  max_tokens=5
26
  )
 
92
  # Create the Gradio interface
93
  with gr.Blocks(title="Groq AI Chat Interface") as app:
94
  gr.Markdown("# Groq AI Chat Interface")
95
+
96
+ # New model information accordion
97
+ with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
98
+ gr.Markdown("""
99
+ ### Available Models and Use Cases
100
+
101
+ **llama3-70b-8192**
102
+ - Meta's most powerful language model
103
+ - 70 billion parameters with 8192 token context window
104
+ - Best for: Complex reasoning, sophisticated content generation, creative writing, and detailed analysis
105
+ - Optimal for users needing the highest quality AI responses
106
+
107
+ **llama3-8b-8192**
108
+ - Lighter version of Llama 3
109
+ - 8 billion parameters with 8192 token context window
110
+ - Best for: Faster responses, everyday tasks, simpler queries
111
+ - Good balance between performance and speed
112
+
113
+ **mistral-saba-24b**
114
+ - Mistral AI's advanced model
115
+ - 24 billion parameters
116
+ - Best for: High-quality reasoning, code generation, and structured outputs
117
+ - Excellent for technical and professional use cases
118
+
119
+ **gemma2-9b-it**
120
+ - Google's instruction-tuned model
121
+ - 9 billion parameters
122
+ - Best for: Following specific instructions, educational content, and general knowledge queries
123
+ - Well-rounded performance for various tasks
124
+
125
+ **allam-2-7b**
126
+ - Specialized model from Aleph Alpha
127
+ - 7 billion parameters
128
+ - Best for: Multilingual support, concise responses, and straightforward Q&A
129
+ - Good for international users and simpler applications
130
+
131
+ *Note: Larger models generally provide higher quality responses but may take slightly longer to generate.*
132
+ """)
133
+
134
  gr.Markdown("Enter your Groq API key to start chatting with AI models.")
135
 
136
  with gr.Row():