lagy commited on
Commit
7f4f3b5
·
verified ·
1 Parent(s): 4f15ce0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -4,6 +4,8 @@ import torch
4
 
5
  from transformers import AutoTokenizer, GenerationConfig
6
  from transformers import AutoModelForCausalLM
 
 
7
  import numpy as np
8
 
9
  temperature = 1.0
@@ -54,14 +56,26 @@ context = st.text_area('Contexto')
54
  max_length = st.number_input('Max generation length',value=52)
55
 
56
 
57
- if (model == None):
58
- model = AutoModelForCausalLM.from_pretrained("lagy/carballo_finetuned")
59
- tokenizer = AutoTokenizer.from_pretrained("lagy/carballo_finetuned")
60
- model.eval()
 
61
 
62
  if st.button('Generate'):
63
- #st.json(out)
64
  st.write("Generating...")
65
- output = generate(model,tokenizer,instruction,context)
66
- st.write(output)
 
 
 
 
 
 
 
 
 
 
 
 
67
 
 
4
 
5
  from transformers import AutoTokenizer, GenerationConfig
6
  from transformers import AutoModelForCausalLM
7
+ from transformers import TextIteratorStreamer
8
+ from threading import Thread
9
  import numpy as np
10
 
11
  temperature = 1.0
 
56
  max_length = st.number_input('Max generation length',value=52)
57
 
58
 
59
+ model = AutoModelForCausalLM.from_pretrained("lagy/carballo_finetuned")
60
+ tokenizer = AutoTokenizer.from_pretrained("lagy/carballo_finetuned")
61
+ model.eval()
62
+
63
+
64
 
65
  if st.button('Generate'):
 
66
  st.write("Generating...")
67
+ thread = Thread(target=lambda: generate(model,instruction,context))
68
+ streamer = TextIteratorStreamer(tokenizer)
69
+
70
+ thread.start()
71
+ print("Generating")
72
+ text = ""
73
+ for new_text in streamer:
74
+ text += new_text
75
+ st.write(text)
76
+
77
+ #st.json(out)
78
+ #st.write("Generating...")
79
+ #output = generate(model,tokenizer,instruction,context)
80
+
81