youzarsiph commited on
Commit
532bc0e
·
1 Parent(s): dae0e0e
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +16 -10
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  title: Chatbot
3
  emoji: 💬
4
- colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  app_file: app.py
@@ -9,4 +9,4 @@ pinned: false
9
  license: mit
10
  ---
11
 
12
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
  title: Chatbot
3
  emoji: 💬
4
+ colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
  app_file: app.py
 
9
  license: mit
10
  ---
11
 
12
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
app.py CHANGED
@@ -1,19 +1,21 @@
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
  def respond(
11
- message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
 
@@ -39,9 +41,13 @@ def respond(
39
  response += token
40
  yield response
41
 
 
42
  """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
44
  """
 
 
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
@@ -60,4 +66,4 @@ demo = gr.ChatInterface(
60
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ """
2
+ For more information on `huggingface_hub` Inference API support,
3
+ please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
4
+ """
5
+
6
  import gradio as gr
7
  from huggingface_hub import InferenceClient
8
 
 
 
 
9
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
 
11
 
12
  def respond(
13
+ message: str,
14
  history: list[tuple[str, str]],
15
+ system_message: str,
16
+ max_tokens: int,
17
+ temperature: float,
18
+ top_p: float,
19
  ):
20
  messages = [{"role": "system", "content": system_message}]
21
 
 
41
  response += token
42
  yield response
43
 
44
+
45
  """
46
+ For information on how to customize the ChatInterface,
47
+ peruse the gradio docs: https://www.gradio.app/docs/chatinterface
48
  """
49
+
50
+
51
  demo = gr.ChatInterface(
52
  respond,
53
  additional_inputs=[
 
66
 
67
 
68
  if __name__ == "__main__":
69
+ demo.launch()