adi2606 commited on
Commit
2098428
·
verified ·
1 Parent(s): 1fa3025

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # Set up the device
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ # Load model and tokenizer
9
+ model = AutoModelForCausalLM.from_pretrained("adi2606/MenstrualQA").to(device)
10
+ tokenizer = AutoTokenizer.from_pretrained("adi2606/MenstrualQA")
11
+
12
+ # Function to generate a response from the chatbot
13
+ def generate_response(message: str, temperature: float = 0.4, repetition_penalty: float = 1.1) -> str:
14
+ inputs = tokenizer(message, return_tensors="pt").to(device)
15
+
16
+ # Generate the response
17
+ output = model.generate(
18
+ inputs['input_ids'],
19
+ max_length=512,
20
+ temperature=temperature,
21
+ repetition_penalty=repetition_penalty,
22
+ do_sample=True
23
+ )
24
+
25
+ # Decode the generated output
26
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
27
+ return generated_text
28
+
29
+ # Streamlit app layout
30
+ st.title("Menstrual QA Chatbot")
31
+ st.write("Ask any question related to menstrual health.")
32
+
33
+ # User input
34
+ user_input = st.text_input("You:", "")
35
+
36
+ if st.button("Send"):
37
+ if user_input:
38
+ with st.spinner("Generating response..."):
39
+ response = generate_response(user_input)
40
+ st.write(f"Chatbot: {response}")
41
+ else:
42
+ st.write("Please enter a question.")