JaiPatel4717 commited on
Commit
19ae9ab
1 Parent(s): 8895c4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -1,15 +1,22 @@
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import torch
3
 
 
4
  model_name = "Tom158/Nutri_Assist"
5
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
 
8
- def chat(question):
9
- inputs = tokenizer.encode(question, return_tensors="pt")
 
 
 
 
 
10
  outputs = model.generate(inputs, max_length=50)
11
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
12
- return answer
 
 
13
 
14
- iface = gr.Interface(fn=chat, inputs="text", outputs="text")
15
- iface.launch()
 
1
+ import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # Load the model
6
  model_name = "Tom158/Nutri_Assist"
7
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
 
10
+ # Streamlit App Interface
11
+ st.title("Nutrition Chatbot")
12
+ user_input = st.text_input("Ask me about nutrition:")
13
+
14
+ if user_input:
15
+ # Process user input
16
+ inputs = tokenizer.encode(user_input, return_tensors="pt")
17
  outputs = model.generate(inputs, max_length=50)
18
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+
20
+ # Display answer
21
+ st.write("Answer:", answer)
22