MRasheq commited on
Commit
23dc761
·
1 Parent(s): d95858e

First Commit

Browse files
Files changed (1) hide show
  1. app.py +49 -33
app.py CHANGED
@@ -1,11 +1,8 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
10
  def respond(
11
  message,
@@ -15,50 +12,69 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
 
18
  messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
 
26
  messages.append({"role": "user", "content": message})
27
-
 
 
 
 
28
  response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
 
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  gr.Slider(
53
  minimum=0.1,
54
  maximum=1.0,
55
  value=0.95,
56
  step=0.05,
57
- label="Top-p (nucleus sampling)",
58
  ),
59
  ],
 
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
 
 
 
 
3
 
4
+ # Initialize the pipeline
5
+ pipe = pipeline("Summarization", model="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B")
6
 
7
  def respond(
8
  message,
 
12
  temperature,
13
  top_p,
14
  ):
15
+ # Format the conversation history
16
  messages = [{"role": "system", "content": system_message}]
17
+
18
+ for user_msg, assistant_msg in history:
19
+ if user_msg:
20
+ messages.append({"role": "user", "content": user_msg})
21
+ if assistant_msg:
22
+ messages.append({"role": "assistant", "content": assistant_msg})
23
+
24
+ # Add the current message
25
  messages.append({"role": "user", "content": message})
26
+
27
+ # Convert messages to a single string format that the model can understand
28
+ prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
29
+
30
+ # Generate response using the pipeline
31
  response = ""
32
+ for output in pipe(
33
+ prompt,
34
+ max_new_tokens=max_tokens,
 
 
35
  temperature=temperature,
36
  top_p=top_p,
37
+ stream=True,
38
  ):
39
+ # Extract the generated text
40
+ new_text = output[0]['generated_text'][len(response):]
41
+ response = output[0]['generated_text']
42
+ yield new_text
43
 
44
+ # Create the Gradio interface
 
 
 
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
48
+ gr.Textbox(
49
+ value="You are a friendly and helpful assistant.",
50
+ label="System message"
51
+ ),
52
+ gr.Slider(
53
+ minimum=1,
54
+ maximum=2048,
55
+ value=512,
56
+ step=1,
57
+ label="Max new tokens"
58
+ ),
59
+ gr.Slider(
60
+ minimum=0.1,
61
+ maximum=4.0,
62
+ value=0.7,
63
+ step=0.1,
64
+ label="Temperature"
65
+ ),
66
  gr.Slider(
67
  minimum=0.1,
68
  maximum=1.0,
69
  value=0.95,
70
  step=0.05,
71
+ label="Top-p (nucleus sampling)"
72
  ),
73
  ],
74
+ title="DeepSeek Chat Interface",
75
+ description="Chat with the DeepSeek-R1-Distill-Qwen-1.5B model",
76
  )
77
 
78
+ # Launch the interface
79
  if __name__ == "__main__":
80
+ demo.launch()