Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,71 +1,32 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
-
# Load the
|
5 |
tokenizer = AutoTokenizer.from_pretrained("harishussain12/PastelMed")
|
6 |
model = AutoModelForCausalLM.from_pretrained("harishussain12/PastelMed")
|
7 |
|
8 |
-
#
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
# Encode the input question
|
14 |
-
inputs = tokenizer(question, return_tensors="pt")
|
15 |
-
|
16 |
-
# Generate a response from the model
|
17 |
-
outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1)
|
18 |
|
19 |
-
# Decode the
|
20 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
21 |
-
|
22 |
return response
|
23 |
|
24 |
-
#
|
25 |
-
def register_patient(name, age, gender, password):
|
26 |
-
patient_id = len(patients_db) + 1
|
27 |
-
patients_db.append({
|
28 |
-
"ID": patient_id,
|
29 |
-
"Name": name,
|
30 |
-
"Age": age,
|
31 |
-
"Gender": gender,
|
32 |
-
"Password": password,
|
33 |
-
"Diagnosis": "",
|
34 |
-
"Medications": "",
|
35 |
-
"Precautions": "",
|
36 |
-
"Doctor": ""
|
37 |
-
})
|
38 |
-
return f"β
Patient {name} registered successfully. Patient ID: {patient_id}"
|
39 |
-
|
40 |
-
# Gradio Interface for Doctor Assistance
|
41 |
-
doctor_assistant_interface = gr.Interface(
|
42 |
-
fn=doctor_assistant,
|
43 |
-
inputs=gr.Textbox(label="Ask a Question to the Doctor Assistant"),
|
44 |
-
outputs="text",
|
45 |
-
title="Doctor Assistant",
|
46 |
-
description="Ask the assistant for medical advice and it will generate a response based on the PastelMed model."
|
47 |
-
)
|
48 |
-
|
49 |
-
# Gradio Interface for Patient Registration (for testing)
|
50 |
-
registration_interface = gr.Interface(
|
51 |
-
fn=register_patient,
|
52 |
-
inputs=[
|
53 |
-
gr.Textbox(label="Patient Name"),
|
54 |
-
gr.Number(label="Age"),
|
55 |
-
gr.Radio(label="Gender", choices=["Male", "Female", "Other"]),
|
56 |
-
gr.Textbox(label="Set Password", type="password"),
|
57 |
-
],
|
58 |
-
outputs="text",
|
59 |
-
)
|
60 |
-
|
61 |
-
# Gradio App Layout
|
62 |
with gr.Blocks() as app:
|
63 |
-
gr.Markdown("#
|
64 |
|
65 |
-
with gr.
|
66 |
-
|
67 |
|
68 |
-
with gr.
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
# Load the fine-tuned model and tokenizer
|
5 |
tokenizer = AutoTokenizer.from_pretrained("harishussain12/PastelMed")
|
6 |
model = AutoModelForCausalLM.from_pretrained("harishussain12/PastelMed")
|
7 |
|
8 |
+
# Function to generate response from the model
|
9 |
+
def doctor_consultant(query):
|
10 |
+
# Encode the input query and generate the model's response
|
11 |
+
inputs = tokenizer(query, return_tensors="pt")
|
12 |
+
outputs = model.generate(inputs['input_ids'], max_length=200, num_return_sequences=1, no_repeat_ngram_size=2, top_p=0.95, temperature=0.7)
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
# Decode the output and return the response
|
15 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
16 |
return response
|
17 |
|
18 |
+
# Gradio Interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
with gr.Blocks() as app:
|
20 |
+
gr.Markdown("# Doctor Consultant Assistant")
|
21 |
|
22 |
+
with gr.Row():
|
23 |
+
gr.Textbox(label="Ask the Doctor", placeholder="Enter your symptoms or question", lines=3, elem_id="input_text")
|
24 |
|
25 |
+
with gr.Row():
|
26 |
+
gr.Button("Get Response", elem_id="response_button")
|
27 |
+
|
28 |
+
with gr.Row():
|
29 |
+
gr.Textbox(label="Doctor's Response", elem_id="response_output", interactive=False)
|
30 |
+
|
31 |
+
# Connect the function to the interface
|
32 |
+
gr.Interface(fn=doctor_consultant, inputs="text", outputs="text").launch(share=True)
|