Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -37,7 +37,8 @@ def generate(model,tokenizer,instruction,context):
|
|
37 |
max_new_tokens=max_length,
|
38 |
temperature=temperature,
|
39 |
top_k=10,
|
40 |
-
repetition_penalty=repetition_penalty
|
|
|
41 |
)
|
42 |
outputs = []
|
43 |
for o in generation_output.sequences:
|
@@ -50,15 +51,15 @@ st.write(" ")
|
|
50 |
|
51 |
instruction = st.text_area('Instrucci贸n')
|
52 |
context = st.text_area('Contexto')
|
53 |
-
max_length = st.number_input('Max generation length')
|
54 |
|
55 |
|
|
|
|
|
|
|
|
|
56 |
|
57 |
-
|
58 |
-
tokenizer = AutoTokenizer.from_pretrained("lagy/carballo_finetuned")
|
59 |
-
model.eval()
|
60 |
-
|
61 |
-
if st.button('Generate') and max_length and instruction:
|
62 |
#st.json(out)
|
63 |
st.write("Generating...")
|
64 |
output = generate(model,tokenizer,instruction,context)
|
|
|
37 |
max_new_tokens=max_length,
|
38 |
temperature=temperature,
|
39 |
top_k=10,
|
40 |
+
repetition_penalty=repetition_penalty,
|
41 |
+
eos_token_id=tokenizer.eos_token_id
|
42 |
)
|
43 |
outputs = []
|
44 |
for o in generation_output.sequences:
|
|
|
51 |
|
52 |
instruction = st.text_area('Instrucci贸n')
|
53 |
context = st.text_area('Contexto')
|
54 |
+
max_length = st.number_input('Max generation length',min=10,value=52)
|
55 |
|
56 |
|
57 |
+
if (model == None):
|
58 |
+
model = AutoModelForCausalLM.from_pretrained("lagy/carballo_finetuned")
|
59 |
+
tokenizer = AutoTokenizer.from_pretrained("lagy/carballo_finetuned")
|
60 |
+
model.eval()
|
61 |
|
62 |
+
if st.button('Generate'):
|
|
|
|
|
|
|
|
|
63 |
#st.json(out)
|
64 |
st.write("Generating...")
|
65 |
output = generate(model,tokenizer,instruction,context)
|