Update app.py
Browse files
app.py
CHANGED
@@ -34,14 +34,14 @@ def load_keybert_model():
|
|
34 |
@st.cache_resource
|
35 |
def load_flant5_pipeline():
|
36 |
# Explicitly load the Seq2Seq model & tokenizer to avoid truncation/classification fallback
|
37 |
-
seq_tok = AutoTokenizer.from_pretrained("google/flan-t5-
|
38 |
-
seq_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-
|
39 |
return pipeline(
|
40 |
"text2text-generation",
|
41 |
model=seq_model,
|
42 |
tokenizer=seq_tok,
|
43 |
-
# ensure we generate up to
|
44 |
-
max_new_tokens=
|
45 |
do_sample=True,
|
46 |
temperature=0.7
|
47 |
)
|
|
|
34 |
@st.cache_resource
|
35 |
def load_flant5_pipeline():
|
36 |
# Explicitly load the Seq2Seq model & tokenizer to avoid truncation/classification fallback
|
37 |
+
seq_tok = AutoTokenizer.from_pretrained("google/flan-t5-large")
|
38 |
+
seq_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-large")
|
39 |
return pipeline(
|
40 |
"text2text-generation",
|
41 |
model=seq_model,
|
42 |
tokenizer=seq_tok,
|
43 |
+
# ensure we generate up to 400 new tokens
|
44 |
+
max_new_tokens=400,
|
45 |
do_sample=True,
|
46 |
temperature=0.7
|
47 |
)
|