lagy commited on
Commit
48e043b
·
verified ·
1 Parent(s): 7f4f3b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -28,7 +28,7 @@ Escribe unha resposta que responda adecuadamente a entrada.
28
  return instruction
29
 
30
 
31
- def generate(model,tokenizer,instruction,context):
32
  prompt = generate_and_tokenize_instruction({'instruction':instruction,'input':context,'output':''},return_tensors="pt")
33
  input_ids = prompt["input_ids"]#.to('cuda:0')
34
  generation_output = model.generate(
@@ -40,7 +40,8 @@ def generate(model,tokenizer,instruction,context):
40
  temperature=temperature,
41
  top_k=10,
42
  repetition_penalty=repetition_penalty,
43
- eos_token_id=tokenizer.eos_token_id
 
44
  )
45
  outputs = []
46
  for o in generation_output.sequences:
@@ -64,8 +65,9 @@ model.eval()
64
 
65
  if st.button('Generate'):
66
  st.write("Generating...")
67
- thread = Thread(target=lambda: generate(model,instruction,context))
68
  streamer = TextIteratorStreamer(tokenizer)
 
 
69
 
70
  thread.start()
71
  print("Generating")
 
28
  return instruction
29
 
30
 
31
+ def generate(model,tokenizer,instruction,context,streamer):
32
  prompt = generate_and_tokenize_instruction({'instruction':instruction,'input':context,'output':''},return_tensors="pt")
33
  input_ids = prompt["input_ids"]#.to('cuda:0')
34
  generation_output = model.generate(
 
40
  temperature=temperature,
41
  top_k=10,
42
  repetition_penalty=repetition_penalty,
43
+ eos_token_id=tokenizer.eos_token_id,
44
+ streamer=streamer
45
  )
46
  outputs = []
47
  for o in generation_output.sequences:
 
65
 
66
  if st.button('Generate'):
67
  st.write("Generating...")
 
68
  streamer = TextIteratorStreamer(tokenizer)
69
+ thread = Thread(target=lambda: generate(model,instruction,context,streamer))
70
+
71
 
72
  thread.start()
73
  print("Generating")