Update app.py
Browse files
app.py
CHANGED
@@ -13,14 +13,13 @@ def generate_text(prompt):
|
|
13 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
|
14 |
outputs = model.generate(
|
15 |
inputs["input_ids"],
|
16 |
-
attention_mask=inputs["attention_mask"], # Add attention mask
|
17 |
-
max_length=50, # Reduce max_length to conserve memory
|
18 |
num_return_sequences=1
|
19 |
)
|
20 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
21 |
|
22 |
# Create the Gradio interface
|
23 |
-
iface = gr.
|
24 |
fn=generate_text,
|
25 |
inputs=gr.Textbox(label="Enter your prompt", placeholder="Start typing...", lines=5),
|
26 |
outputs="text",
|
|
|
13 |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
|
14 |
outputs = model.generate(
|
15 |
inputs["input_ids"],
|
16 |
+
#attention_mask=inputs["attention_mask"], # Add attention mask
|
|
|
17 |
num_return_sequences=1
|
18 |
)
|
19 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
20 |
|
21 |
# Create the Gradio interface
|
22 |
+
iface = gr.ChatInterface(
|
23 |
fn=generate_text,
|
24 |
inputs=gr.Textbox(label="Enter your prompt", placeholder="Start typing...", lines=5),
|
25 |
outputs="text",
|