dar-tau commited on
Commit
b818b3f
·
verified ·
1 Parent(s): 38ede89

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import gradio as gr
3
  import spaces
4
- from transformers import AutoTokenizer, AutoModel
5
 
6
 
7
  model_name = "teknium/OpenHermes-2.5-Mistral-7B"
@@ -20,9 +20,8 @@ Suggest only up to 5 works ahead.
20
 
21
  @spaces.GPU
22
  def generate(text):
23
- data = [
24
  {'role': 'system', 'content': system_prompt},
25
  {'role': 'user', 'content': text}
26
  ]
27
- tokenized = tokenizer.apply_chat_template(data, return_tensors='pt')
28
- return tokenizer.deocode(model.generate(**tokenized).squeeze(0))
 
1
  import os
2
  import gradio as gr
3
  import spaces
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
 
6
 
7
  model_name = "teknium/OpenHermes-2.5-Mistral-7B"
 
20
 
21
  @spaces.GPU
22
  def generate(text):
23
+ messages = [
24
  {'role': 'system', 'content': system_prompt},
25
  {'role': 'user', 'content': text}
26
  ]
27
+ return pipe(messages)