JaiPatel4717 commited on
Commit
03444e8
1 Parent(s): 19ae9ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -2,21 +2,28 @@ import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Load the model
6
  model_name = "Tom158/Nutri_Assist"
7
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
 
 
 
 
 
10
  # Streamlit App Interface
11
  st.title("Nutrition Chatbot")
12
  user_input = st.text_input("Ask me about nutrition:")
13
 
14
  if user_input:
15
- # Process user input
16
- inputs = tokenizer.encode(user_input, return_tensors="pt")
17
- outputs = model.generate(inputs, max_length=50)
 
 
 
 
18
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
 
20
  # Display answer
21
  st.write("Answer:", answer)
22
-
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # Load the model and tokenizer
6
  model_name = "Tom158/Nutri_Assist"
7
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
 
10
+ # Set pad token if not already set
11
+ if model.config.pad_token_id is None:
12
+ model.config.pad_token_id = model.config.eos_token_id
13
+
14
  # Streamlit App Interface
15
  st.title("Nutrition Chatbot")
16
  user_input = st.text_input("Ask me about nutrition:")
17
 
18
  if user_input:
19
+ # Encode the input with attention mask
20
+ inputs = tokenizer.encode(user_input, return_tensors="pt", return_attention_mask=True)
21
+ input_ids = inputs['input_ids']
22
+ attention_mask = inputs['attention_mask']
23
+
24
+ # Generate output with attention mask and pad token ID
25
+ outputs = model.generate(input_ids, attention_mask=attention_mask, max_length=50)
26
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
 
28
  # Display answer
29
  st.write("Answer:", answer)