ahmed792002 commited on
Commit
93ff22f
·
verified ·
1 Parent(s): 15e331f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -21
app.py CHANGED
@@ -1,28 +1,40 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import sentencepiece
 
 
4
  import torch
 
5
 
6
- # Load the tokenizer and model
7
- tokenizer = AutoTokenizer.from_pretrained("ahmed792002/Finetuning_T5_HealthCare_Chatbot", use_fast=True)
8
- model = AutoModelForSeq2SeqLM.from_pretrained("ahmed792002/Finetuning_T5_HealthCare_Chatbot")
9
 
10
- # Define the chatbot function
11
- def chatbot(input_text):
12
- inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
13
- outputs = model.generate(inputs["input_ids"], max_length=100, num_beams=4, early_stopping=True)
14
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
15
- return response
16
-
17
- # Set up the Gradio interface
18
- interface = gr.Interface(
19
- fn=chatbot,
20
- inputs=gr.Textbox(label="Enter your query"), # Corrected import
21
- outputs=gr.Textbox(label="Response"), # Corrected import
22
- title="Healthcare Chatbot",
23
- description="Ask healthcare-related questions, and get responses from the fine-tuned T5 model."
 
 
 
 
 
 
 
 
 
 
 
 
24
  )
25
 
26
- # Launch the app
27
  if __name__ == "__main__":
28
- interface.launch()
 
 
 
1
  import sentencepiece
2
+ import gradio as gr
3
+ import re
4
  import torch
5
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
6
 
7
+ tokenizer = T5Tokenizer.from_pretrained("ahmed792002/Finetuning_T5_HealthCare_Chatbot")
8
+ model = T5ForConditionalGeneration.from_pretrained("ahmed792002/Finetuning_T5_HealthCare_Chatbot")
 
9
 
10
+ def clean_text(text):
11
+ text = re.sub(r'\r\n', ' ', text) # Remove carriage returns and line breaks
12
+ text = re.sub(r'\s+', ' ', text) # Remove extra spaces
13
+ text = re.sub(r'<.*?>', '', text) # Remove any XML tags
14
+ text = text.strip().lower() # Strip and convert to lower case
15
+ return text
16
+ def chatbot(query):
17
+ query = clean_text(query)
18
+ input_ids = tokenizer(query,return_tensors="pt",max_length=256,truncation=True)
19
+ inputs = {key: value.to(device) for key, value in input_ids.items()}
20
+ outputs = model.generate(
21
+ input_ids["input_ids"],
22
+ max_length=1024,
23
+ num_beams=5,
24
+ early_stopping=True
25
+ )
26
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+
28
+ """
29
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
30
+ """
31
+ demo = gr.ChatInterface(
32
+ chatbot,
33
+ additional_inputs=[
34
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
35
+ ],
36
  )
37
 
38
+
39
  if __name__ == "__main__":
40
+ demo.launch()