Update app.py
Browse files
app.py
CHANGED
@@ -10,90 +10,54 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
10 |
with open("models.txt", "r") as f:
|
11 |
model_list = [line.strip() for line in f.readlines()]
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
model.to(device)
|
17 |
-
tokenizer = AutoTokenizer.from_pretrained(current_model_name)
|
18 |
|
19 |
-
# Default values for system and user input
|
20 |
-
test_instruction_string = """
|
21 |
-
You are a helpful assistant with experience in the clinical domain and clinical trial design. You'll be asked queries related to clinical trials. These inquiries will be delineated by a '##Question' heading. Inside these queries, expect to find comprehensive details about the clinical trial structured within specific subsections, indicated by '<>' tags. These subsections include essential information such as the trial's title, brief summary, condition under study, inclusion and exclusion criteria, intervention, and outcomes.
|
22 |
-
In answer to this question, return a list of probable baseline features (each feature should be enclosed within a pair of backticks and each feature should be separated by commas from other features) of the clinical trial. Baseline features are the set of baseline or demographic characteristics that are assessed at baseline and used in the analysis of the primary outcome measure(s) to characterize the study population and assess validity. Clinical trial-related publications typically include a table of baseline features assessed by arm or comparison group and for the entire population of participants in the clinical trial.
|
23 |
-
Do not give any additional explanations or use any tags or headers, only return the list of baseline features.
|
24 |
-
"""
|
25 |
-
|
26 |
-
test_input_string = """
|
27 |
-
<Title:>Vinorelbine in Treating Patients With Advanced Solid Tumors That Have Not Responded to Treatment and Liver Dysfunction <BriefSummary:>RATIONALE: Drugs used in chemotherapy, such as vinorelbine, work in different ways to stop the growth of tumor cells, either by killing the cells or by stopping them from dividing.
|
28 |
-
PURPOSE: This pilot trial is studying the side effects and best dose of vinorelbine in treating patients with advanced solid tumors that have not responded to treatment and liver dysfunction. <EligibilityCriteria:>DISEASE CHARACTERISTICS:
|
29 |
-
* Histologically confirmed advanced solid tumor
|
30 |
-
* Any histology allowed
|
31 |
-
* Refractory to standard therapy OR no standard therapy exists
|
32 |
-
* Previously untreated non-small cell lung cancer allowed, provided abnormal liver function is present, defined as moderate (group 3) or severe (group 4)
|
33 |
-
* Measurable disease not required
|
34 |
-
* Present measurable disease requires baseline measurements within 4 weeks of study entry
|
35 |
-
* Patients with acute hepatitis from viral or drug etiologies should recover to a stable baseline prior to study therapy
|
36 |
-
* History of brain metastasis allowed, provided the following criteria are met:
|
37 |
-
* Metastasis has been controlled by radiotherapy or surgery
|
38 |
-
* Patient is not currently on corticosteroids
|
39 |
-
* Neurologic status is stable
|
40 |
-
PATIENT CHARACTERISTICS:
|
41 |
-
* Karnofsky performance status 70-100%
|
42 |
-
* Life expectancy ≥ 2 months
|
43 |
-
* ANC = 1,500/mm³
|
44 |
-
* Platelet count = 100,000/mm³
|
45 |
-
* Hemoglobin = 10 g/dL (transfusion to this level allowed)
|
46 |
-
* Creatinine \< 1.5 mg/dL OR creatinine clearance \> 60 mL/ min
|
47 |
-
* Patients with EKG evidence of first- or second-degree AV block or left or right bundle branch block are ineligible for the lidocaine bolus, but may otherwise be treated on this protocol
|
48 |
-
* Not pregnant or nursing
|
49 |
-
* Negative pregnancy test
|
50 |
-
* Fertile patients must use effective contraception
|
51 |
-
* No concurrent illness (e.g., cardiovascular, pulmonary, or central nervous system) that is poorly controlled or of such severity that the investigator deems unwise to enter the patient on protocol
|
52 |
-
* Must have ability to comply with study treatment and required tests
|
53 |
-
* Obstructive jaundice requires a drainage procedure prior to study treatment
|
54 |
-
PRIOR CONCURRENT THERAPY:
|
55 |
-
* See Disease Characteristics
|
56 |
-
* Recovered from prior therapy
|
57 |
-
* At least 3 weeks since prior chemotherapy (6 weeks for mitomycin or nitrosourea therapy)
|
58 |
-
* No prior radiotherapy to \> 30% of the bone marrow or more than standard adjuvant pelvic radiotherapy for rectal cancer <Conditions:>Lung Cancer, Unspecified Adult Solid Tumor, Protocol Specific, <Interventions:>indocyanine green, lidocaine, vinorelbine ditartrate, high performance liquid chromatography, intracellular fluorescence polarization analysis, liquid chromatography, mass spectrometry, pharmacological study <StudyType:>INTERVENTIONAL <PrimaryOutcomes:>Area Under the Curve, Number of Participants With Grade 3 and 4 Toxicities <OverallStatus:>COMPLETED
|
59 |
-
"""
|
60 |
-
|
61 |
-
# Adjust load_model to ensure models are loaded to the correct device on demand
|
62 |
def load_model(model_name):
|
63 |
global model, tokenizer
|
64 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
65 |
|
66 |
-
#
|
67 |
-
if
|
68 |
del model
|
69 |
torch.cuda.empty_cache()
|
70 |
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
|
|
|
|
|
|
74 |
|
75 |
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
76 |
-
|
77 |
### Instruction:
|
78 |
{}
|
79 |
-
|
80 |
### Input:
|
81 |
{}
|
82 |
-
|
83 |
### Response:
|
84 |
{}"""
|
85 |
|
86 |
@spaces.GPU
|
87 |
def generate_response(system_instruction, user_input):
|
88 |
-
#
|
89 |
-
|
90 |
-
|
91 |
|
92 |
-
# Prepare
|
93 |
-
|
94 |
-
|
95 |
-
return_tensors="pt"
|
96 |
-
).to(device)
|
97 |
|
98 |
# Define generation configuration
|
99 |
meta_config = {
|
@@ -120,7 +84,6 @@ with gr.Blocks() as demo:
|
|
120 |
gr.Markdown("# Clinical Trial Chatbot with Model Selection")
|
121 |
|
122 |
with gr.Row():
|
123 |
-
# Left column for user inputs
|
124 |
with gr.Column():
|
125 |
system_instruction = gr.Textbox(
|
126 |
value=test_instruction_string,
|
@@ -134,7 +97,6 @@ with gr.Blocks() as demo:
|
|
134 |
)
|
135 |
submit_btn = gr.Button("Submit")
|
136 |
|
137 |
-
# Right column for model selection and bot response
|
138 |
with gr.Column():
|
139 |
model_dropdown = gr.Dropdown(choices=model_list, value=current_model_name, label="Select Model")
|
140 |
response_display = gr.Textbox(
|
@@ -145,5 +107,5 @@ with gr.Blocks() as demo:
|
|
145 |
model_dropdown.change(lambda m: load_model(m), inputs=model_dropdown, outputs=[])
|
146 |
submit_btn.click(generate_response, [system_instruction, user_input], response_display)
|
147 |
|
148 |
-
# Launch the app
|
149 |
-
demo.launch()
|
|
|
10 |
with open("models.txt", "r") as f:
|
11 |
model_list = [line.strip() for line in f.readlines()]
|
12 |
|
13 |
+
# Define variables for the model and tokenizer
|
14 |
+
model = None
|
15 |
+
tokenizer = None
|
|
|
|
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
def load_model(model_name):
|
18 |
global model, tokenizer
|
19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
|
21 |
+
# Clear previous model from memory
|
22 |
+
if model:
|
23 |
del model
|
24 |
torch.cuda.empty_cache()
|
25 |
|
26 |
+
try:
|
27 |
+
# Load the specified model and move it to the device
|
28 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to(device)
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
30 |
+
print(f"Loaded model: {model_name}")
|
31 |
+
except ValueError as e:
|
32 |
+
print(f"Failed to load model '{model_name}': {e}")
|
33 |
+
model = None # Reset if loading fails
|
34 |
+
tokenizer = None
|
35 |
+
|
36 |
+
# Load the initial model
|
37 |
+
current_model_name = model_list[0]
|
38 |
+
load_model(current_model_name)
|
39 |
|
40 |
+
# Default values for system and user input
|
41 |
+
test_instruction_string = """You are a helpful assistant with experience in the clinical domain..."""
|
42 |
+
test_input_string = """<Title:>Vinorelbine in Treating Patients With Advanced Solid Tumors..."""
|
43 |
|
44 |
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
|
|
45 |
### Instruction:
|
46 |
{}
|
|
|
47 |
### Input:
|
48 |
{}
|
|
|
49 |
### Response:
|
50 |
{}"""
|
51 |
|
52 |
@spaces.GPU
|
53 |
def generate_response(system_instruction, user_input):
|
54 |
+
# Check if model is loaded
|
55 |
+
if model is None:
|
56 |
+
return "Model is not loaded. Please select a compatible model."
|
57 |
|
58 |
+
# Prepare input and move to the appropriate device
|
59 |
+
device = model.device
|
60 |
+
inputs = tokenizer([alpaca_prompt.format(system_instruction, user_input, "")], return_tensors="pt").to(device)
|
|
|
|
|
61 |
|
62 |
# Define generation configuration
|
63 |
meta_config = {
|
|
|
84 |
gr.Markdown("# Clinical Trial Chatbot with Model Selection")
|
85 |
|
86 |
with gr.Row():
|
|
|
87 |
with gr.Column():
|
88 |
system_instruction = gr.Textbox(
|
89 |
value=test_instruction_string,
|
|
|
97 |
)
|
98 |
submit_btn = gr.Button("Submit")
|
99 |
|
|
|
100 |
with gr.Column():
|
101 |
model_dropdown = gr.Dropdown(choices=model_list, value=current_model_name, label="Select Model")
|
102 |
response_display = gr.Textbox(
|
|
|
107 |
model_dropdown.change(lambda m: load_model(m), inputs=model_dropdown, outputs=[])
|
108 |
submit_btn.click(generate_response, [system_instruction, user_input], response_display)
|
109 |
|
110 |
+
# Launch the app with share=True to make it public
|
111 |
+
demo.launch(share=True)
|