veechan commited on
Commit
640a262
·
1 Parent(s): bf8527d

change the template from chat to text generation

Browse files
Files changed (1) hide show
  1. app.py +11 -19
app.py CHANGED
@@ -1,12 +1,13 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("veechan/gpt-neo-1.3B-platypus-finetuned")
8
 
9
-
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -15,33 +16,25 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
 
33
  stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
@@ -58,6 +51,5 @@ demo = gr.ChatInterface(
58
  ],
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import spaces
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("veechan/gpt-neo-1.3B-platypus-finetuned")
9
 
10
+ @spaces.GPU
11
  def respond(
12
  message,
13
  history: list[tuple[str, str]],
 
16
  temperature,
17
  top_p,
18
  ):
19
+ # Construct the prompt
20
+ prompt = system_message + "\n\n"
21
+ for user_msg, bot_msg in history:
22
+ prompt += f"Human: {user_msg}\nAI: {bot_msg}\n"
23
+ prompt += f"Human: {message}\nAI:"
 
 
 
 
24
 
25
  response = ""
26
 
27
+ for token in client.text_generation(
28
+ prompt,
29
+ model=model,
30
+ max_new_tokens=max_tokens,
31
  stream=True,
32
  temperature=temperature,
33
  top_p=top_p,
34
  ):
 
 
35
  response += token
36
  yield response
37
 
 
 
 
38
  demo = gr.ChatInterface(
39
  respond,
40
  additional_inputs=[
 
51
  ],
52
  )
53
 
 
54
  if __name__ == "__main__":
55
  demo.launch()