prithivMLmods commited on
Commit
6693a45
·
verified ·
1 Parent(s): 2c8490a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -33,11 +33,12 @@ def generate_reply(model, tokenizer, prompt, max_length, do_sample):
33
  )
34
  return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
35
 
36
- @spaces.GPU
37
  def main():
38
  args, _ = get_args()
39
  model, tokenizer = load_model(args.model)
40
-
 
41
  def respond(user_message, chat_history):
42
  """
43
  Gradio expects a function that takes the last user message and the
 
33
  )
34
  return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
35
 
36
+
37
  def main():
38
  args, _ = get_args()
39
  model, tokenizer = load_model(args.model)
40
+
41
+ @spaces.GPU
42
  def respond(user_message, chat_history):
43
  """
44
  Gradio expects a function that takes the last user message and the