Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,14 +3,6 @@ import torch
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import pandas as pd
|
5 |
|
6 |
-
# Load pretrained model and tokenizer
|
7 |
-
model_name = "jrocha/tiny_llama"
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
-
|
11 |
-
# Load data
|
12 |
-
df = pd.read_csv('splitted_df_jo.csv')
|
13 |
-
|
14 |
# Function to prepare context
|
15 |
def prepare_context():
|
16 |
pubmed_information_column = df['section_text']
|
@@ -18,26 +10,29 @@ def prepare_context():
|
|
18 |
for text in pubmed_information_column.tolist():
|
19 |
objective_index = text.find("Objective")
|
20 |
if objective_index != -1:
|
21 |
-
|
22 |
-
|
23 |
else:
|
24 |
-
|
25 |
-
max_length = 1000
|
26 |
return pubmed_information_cleaned[:max_length]
|
27 |
|
28 |
# Function to generate answer
|
29 |
def answer_question(question):
|
30 |
pubmed_information_cleaned = prepare_context()
|
|
|
|
|
|
|
31 |
|
32 |
# Prepare input sequence
|
33 |
messages = [
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
]
|
40 |
-
prompt_with_pubmed = f"{pubmed_information_cleaned}\n\n"
|
41 |
prompt_with_pubmed += tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
42 |
|
43 |
# Generate response
|
@@ -51,7 +46,7 @@ def answer_question(question):
|
|
51 |
|
52 |
def main():
|
53 |
""""
|
54 |
-
Initializes a
|
55 |
|
56 |
This function loads a pretrained tokenizer and model from the Hugging Face model hub
|
57 |
and creates a Gradio interface for the ChatBot. Users can input questions related to
|
@@ -63,15 +58,16 @@ def main():
|
|
63 |
>>> main()
|
64 |
"""
|
65 |
iface = gr.Interface(fn=answer_question,
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
return iface.launch(debug = True, share=True)
|
76 |
|
77 |
-
|
|
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import pandas as pd
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
# Function to prepare context
|
7 |
def prepare_context():
|
8 |
pubmed_information_column = df['section_text']
|
|
|
10 |
for text in pubmed_information_column.tolist():
|
11 |
objective_index = text.find("Objective")
|
12 |
if objective_index != -1:
|
13 |
+
cleaned_text = text[:objective_index]
|
14 |
+
pubmed_information_cleaned += cleaned_text
|
15 |
else:
|
16 |
+
pubmed_information_cleaned += text
|
17 |
+
max_length = 1000
|
18 |
return pubmed_information_cleaned[:max_length]
|
19 |
|
20 |
# Function to generate answer
|
21 |
def answer_question(question):
|
22 |
pubmed_information_cleaned = prepare_context()
|
23 |
+
model_name = "jrocha/tiny_llama"
|
24 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
|
27 |
# Prepare input sequence
|
28 |
messages = [
|
29 |
+
{
|
30 |
+
"role": "system",
|
31 |
+
"content": "You are a friendly chatbot who responds to questions about cancer. Please be considerate.",
|
32 |
+
},
|
33 |
+
{"role": "user", "content": question},
|
34 |
]
|
35 |
+
prompt_with_pubmed = f"{pubmed_information_cleaned}\n\n"
|
36 |
prompt_with_pubmed += tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
37 |
|
38 |
# Generate response
|
|
|
46 |
|
47 |
def main():
|
48 |
""""
|
49 |
+
Initializes a Cancer ChatBot interface using Hugging Face models for question answering.
|
50 |
|
51 |
This function loads a pretrained tokenizer and model from the Hugging Face model hub
|
52 |
and creates a Gradio interface for the ChatBot. Users can input questions related to
|
|
|
58 |
>>> main()
|
59 |
"""
|
60 |
iface = gr.Interface(fn=answer_question,
|
61 |
+
inputs=["text"],
|
62 |
+
outputs=[gr.Textbox(label="Answer")],
|
63 |
+
title="Cancer ChatBot",
|
64 |
+
description="How can I help you?",
|
65 |
+
examples=[
|
66 |
+
["What is prostate cancer?"],
|
67 |
+
["What are treatments for cervical cancer?"]
|
68 |
+
])
|
69 |
+
|
70 |
return iface.launch(debug = True, share=True)
|
71 |
|
72 |
+
if __name__ == "__main__":
|
73 |
+
main()
|