sounar commited on
Commit
5da7650
·
verified ·
1 Parent(s): 3d49b4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -17,20 +17,22 @@ bnb_config = BitsAndBytesConfig(
17
  bnb_4bit_compute_dtype=torch.float16
18
  )
19
 
20
- # Load model
21
  model = AutoModel.from_pretrained(
22
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
23
  quantization_config=bnb_config,
24
  device_map="auto",
25
  torch_dtype=torch.float16,
26
  trust_remote_code=True,
27
- token=api_token
 
28
  )
29
 
30
  tokenizer = AutoTokenizer.from_pretrained(
31
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
32
  trust_remote_code=True,
33
- token=api_token
 
34
  )
35
 
36
  def analyze_input(image_data, question):
@@ -73,19 +75,18 @@ def analyze_input(image_data, question):
73
  demo = gr.Interface(
74
  fn=analyze_input,
75
  inputs=[
76
- gr.Image(type="numpy", label="Medical Image"), # Removed optional parameter
77
  gr.Textbox(label="Question", placeholder="Enter your medical query...")
78
  ],
79
  outputs=gr.JSON(label="Analysis"),
80
  title="Bio-Medical MultiModal Analysis",
81
  description="Ask questions with or without an image",
82
- allow_flagging="never",
83
  )
84
 
85
- # Launch with API access enabled
86
  demo.launch(
87
  share=True,
88
  server_name="0.0.0.0",
89
- server_port=7860,
90
- enable_queue=True
91
  )
 
17
  bnb_4bit_compute_dtype=torch.float16
18
  )
19
 
20
+ # Load model with revision pinning
21
  model = AutoModel.from_pretrained(
22
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
23
  quantization_config=bnb_config,
24
  device_map="auto",
25
  torch_dtype=torch.float16,
26
  trust_remote_code=True,
27
+ token=api_token,
28
+ revision="main" # Pin to specific revision
29
  )
30
 
31
  tokenizer = AutoTokenizer.from_pretrained(
32
  "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1",
33
  trust_remote_code=True,
34
+ token=api_token,
35
+ revision="main" # Pin to specific revision
36
  )
37
 
38
  def analyze_input(image_data, question):
 
75
  demo = gr.Interface(
76
  fn=analyze_input,
77
  inputs=[
78
+ gr.Image(type="numpy", label="Medical Image"),
79
  gr.Textbox(label="Question", placeholder="Enter your medical query...")
80
  ],
81
  outputs=gr.JSON(label="Analysis"),
82
  title="Bio-Medical MultiModal Analysis",
83
  description="Ask questions with or without an image",
84
+ flagging_mode="never" # Updated from allow_flagging
85
  )
86
 
87
+ # Launch with simplified parameters
88
  demo.launch(
89
  share=True,
90
  server_name="0.0.0.0",
91
+ server_port=7860
 
92
  )