GIGAParviz commited on
Commit
9c780ef
·
verified ·
1 Parent(s): b344778

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -5
app.py CHANGED
@@ -1,12 +1,13 @@
1
  import gradio as gr
2
  from groq import Groq
 
3
 
4
-
5
  client = Groq(
6
  api_key=("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"),
7
  )
8
 
9
-
10
  def generate_response(input_text):
11
  chat_completion = client.chat.completions.create(
12
  messages=[
@@ -17,14 +18,26 @@ def generate_response(input_text):
17
  ],
18
  model="llama3-8b-8192",
19
  )
20
- return chat_completion.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
21
 
 
22
  iface = gr.Interface(
23
  fn=generate_response,
24
  inputs="text",
25
  outputs="text",
26
- title="Parviz Chatbot",
27
- description="ye chi bepors",
28
  )
29
 
 
30
  iface.launch()
 
1
  import gradio as gr
2
  from groq import Groq
3
+ from transformers import TextStreamer
4
 
5
+ # Initialize the Groq client with your API key
6
  client = Groq(
7
  api_key=("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"),
8
  )
9
 
10
+ # Define the function that will generate responses
11
  def generate_response(input_text):
12
  chat_completion = client.chat.completions.create(
13
  messages=[
 
18
  ],
19
  model="llama3-8b-8192",
20
  )
21
+
22
+ # Set up TextStreamer for streaming the response
23
+ streamer = TextStreamer(client=client)
24
+
25
+ # Stream the assistant's response
26
+ response = ""
27
+ for chunk in chat_completion.choices[0].message.content:
28
+ response += chunk
29
+ streamer.write(response) # Stream each chunk of text
30
+
31
+ return response
32
 
33
+ # Create the Gradio interface
34
  iface = gr.Interface(
35
  fn=generate_response,
36
  inputs="text",
37
  outputs="text",
38
+ title="Fast Language Model Chatbot with Streaming",
39
+ description="Ask any question and receive a response from the Groq API using the llama3-8b-8192 model, streamed in real-time.",
40
  )
41
 
42
+ # Launch the interface
43
  iface.launch()