Poonawala commited on
Commit
a6f008b
·
verified ·
1 Parent(s): c3d7f6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -60,10 +60,10 @@ def get_response(prompt, model_name, user_type):
60
 
61
  # Define different prompt templates based on user type
62
  user_type_templates = {
63
- "Professional Chef": f"As a professional chef, {prompt}\nAnswer:",
64
- "Home Cook": f"As a home cook, {prompt}\nAnswer:",
65
  "Beginner": f"Explain in simple terms: {prompt}\nAnswer:",
66
- "Food Enthusiast": f"As a food enthusiast, {prompt}\nAnswer:"
 
67
  }
68
 
69
  # Get the appropriate prompt based on user type
@@ -74,11 +74,11 @@ def get_response(prompt, model_name, user_type):
74
  return_tensors='pt',
75
  padding=True,
76
  truncation=True,
77
- max_length=300 # Increased length for larger inputs
78
  ).to(device)
79
 
80
- # Reduce max_new_tokens for faster response time
81
- max_new_tokens = 50 # Reduced to speed up response time
82
 
83
  with torch.no_grad():
84
  output = model.generate(
@@ -193,10 +193,10 @@ body {
193
  response = gr.Textbox(
194
  label="Response",
195
  placeholder="Your answer will appear here...",
196
- lines=10,
197
  interactive=False,
198
  show_copy_button=True,
199
- max_lines=12
200
  )
201
 
202
  submit_button.click(fn=process_input, inputs=[prompt, model_name, user_type], outputs=response)
 
60
 
61
  # Define different prompt templates based on user type
62
  user_type_templates = {
63
+ "Professional": f"As a professional chef, {prompt}\nAnswer:",
 
64
  "Beginner": f"Explain in simple terms: {prompt}\nAnswer:",
65
+ "Intermediate": f"As an intermediate cook, {prompt}\nAnswer:",
66
+ "Expert": f"As an expert chef, {prompt}\nAnswer:"
67
  }
68
 
69
  # Get the appropriate prompt based on user type
 
74
  return_tensors='pt',
75
  padding=True,
76
  truncation=True,
77
+ max_length=512 # Increased max length for larger inputs
78
  ).to(device)
79
 
80
+ # Increase max_new_tokens for longer responses
81
+ max_new_tokens = 200 # Increase this to allow more content in response
82
 
83
  with torch.no_grad():
84
  output = model.generate(
 
193
  response = gr.Textbox(
194
  label="Response",
195
  placeholder="Your answer will appear here...",
196
+ lines=15, # Increased lines for a longer response display
197
  interactive=False,
198
  show_copy_button=True,
199
+ max_lines=20 # Allow for more lines if the response is lengthy
200
  )
201
 
202
  submit_button.click(fn=process_input, inputs=[prompt, model_name, user_type], outputs=response)