rayl-aoit commited on
Commit
19f514f
·
verified ·
1 Parent(s): ba68858

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -0
app.py CHANGED
@@ -1,9 +1,11 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
3
  # from IPython.display import Audio as IPythonAudio
4
 
5
  playground = gr.Blocks()
6
 
 
7
  image_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
8
  summary_pipe = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
9
  ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
@@ -19,6 +21,32 @@ ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
19
  # audio = IPythonAudio(audio_data, rate=sampling_rate)
20
  # return audio_data, sampling_rate
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def launch_image_pipe(input):
23
  out = image_pipe(input)
24
  text = out[0]['generated_text']
@@ -163,6 +191,27 @@ with playground:
163
  ], inputs=[ner_text_input], outputs=[ner_text_output], run_on_click=True, cache_examples=True, fn=ner)
164
 
165
  ner_pipeline_button.click(ner, inputs=[ner_text_input], outputs=[ner_text_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
  create_playground_footer()
168
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ from huggingface_hub import InferenceClient
4
  # from IPython.display import Audio as IPythonAudio
5
 
6
  playground = gr.Blocks()
7
 
8
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
  image_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
10
  summary_pipe = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
11
  ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
 
21
  # audio = IPythonAudio(audio_data, rate=sampling_rate)
22
  # return audio_data, sampling_rate
23
 
24
+ def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
25
+ messages = [{"role": "system", "content": system_message}]
26
+
27
+ for val in history:
28
+ if val[0]:
29
+ messages.append({"role": "user", "content": val[0]})
30
+ if val[1]:
31
+ messages.append({"role": "assistant", "content": val[1]})
32
+
33
+ messages.append({"role": "user", "content": message})
34
+
35
+ response = ""
36
+
37
+ for message in client.chat_completion(
38
+ messages,
39
+ max_tokens=max_tokens,
40
+ stream=True,
41
+ temperature=temperature,
42
+ top_p=top_p,
43
+ ):
44
+ token = message.choices[0].delta.content
45
+
46
+ response += token
47
+ yield response
48
+
49
+
50
  def launch_image_pipe(input):
51
  out = image_pipe(input)
52
  text = out[0]['generated_text']
 
191
  ], inputs=[ner_text_input], outputs=[ner_text_output], run_on_click=True, cache_examples=True, fn=ner)
192
 
193
  ner_pipeline_button.click(ner, inputs=[ner_text_input], outputs=[ner_text_output])
194
+
195
+ ## ================================================================================================================================
196
+ ## Find entities
197
+ ## ================================================================================================================================
198
+ with gr.TabItem("Chatbot"):
199
+ gr.ChatInterface(
200
+ respond,
201
+ additional_inputs=[
202
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
203
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
204
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
205
+ gr.Slider(
206
+ minimum=0.1,
207
+ maximum=1.0,
208
+ value=0.95,
209
+ step=0.05,
210
+ label="Top-p (nucleus sampling)",
211
+ ),
212
+ ],
213
+ )
214
+
215
 
216
  create_playground_footer()
217