Update app.py
Browse files
app.py
CHANGED
@@ -60,10 +60,10 @@ def get_response(prompt, model_name, user_type):
|
|
60 |
|
61 |
# Define different prompt templates based on user type
|
62 |
user_type_templates = {
|
63 |
-
"Professional
|
64 |
-
"Home Cook": f"As a home cook, {prompt}\nAnswer:",
|
65 |
"Beginner": f"Explain in simple terms: {prompt}\nAnswer:",
|
66 |
-
"
|
|
|
67 |
}
|
68 |
|
69 |
# Get the appropriate prompt based on user type
|
@@ -74,11 +74,11 @@ def get_response(prompt, model_name, user_type):
|
|
74 |
return_tensors='pt',
|
75 |
padding=True,
|
76 |
truncation=True,
|
77 |
-
max_length=
|
78 |
).to(device)
|
79 |
|
80 |
-
#
|
81 |
-
max_new_tokens =
|
82 |
|
83 |
with torch.no_grad():
|
84 |
output = model.generate(
|
@@ -193,10 +193,10 @@ body {
|
|
193 |
response = gr.Textbox(
|
194 |
label="Response",
|
195 |
placeholder="Your answer will appear here...",
|
196 |
-
lines=
|
197 |
interactive=False,
|
198 |
show_copy_button=True,
|
199 |
-
max_lines=
|
200 |
)
|
201 |
|
202 |
submit_button.click(fn=process_input, inputs=[prompt, model_name, user_type], outputs=response)
|
|
|
60 |
|
61 |
# Define different prompt templates based on user type
|
62 |
user_type_templates = {
|
63 |
+
"Professional": f"As a professional chef, {prompt}\nAnswer:",
|
|
|
64 |
"Beginner": f"Explain in simple terms: {prompt}\nAnswer:",
|
65 |
+
"Intermediate": f"As an intermediate cook, {prompt}\nAnswer:",
|
66 |
+
"Expert": f"As an expert chef, {prompt}\nAnswer:"
|
67 |
}
|
68 |
|
69 |
# Get the appropriate prompt based on user type
|
|
|
74 |
return_tensors='pt',
|
75 |
padding=True,
|
76 |
truncation=True,
|
77 |
+
max_length=512 # Increased max length for larger inputs
|
78 |
).to(device)
|
79 |
|
80 |
+
# Increase max_new_tokens for longer responses
|
81 |
+
max_new_tokens = 200 # Increase this to allow more content in response
|
82 |
|
83 |
with torch.no_grad():
|
84 |
output = model.generate(
|
|
|
193 |
response = gr.Textbox(
|
194 |
label="Response",
|
195 |
placeholder="Your answer will appear here...",
|
196 |
+
lines=15, # Increased lines for a longer response display
|
197 |
interactive=False,
|
198 |
show_copy_button=True,
|
199 |
+
max_lines=20 # Allow for more lines if the response is lengthy
|
200 |
)
|
201 |
|
202 |
submit_button.click(fn=process_input, inputs=[prompt, model_name, user_type], outputs=response)
|