smellslikeml commited on
Commit
5c03fa6
1 Parent(s): 1cd40cd

update app

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -30,7 +30,10 @@ INTRO_TEXT = """SpaceLlama3.1 demo\n\n
30
  **This is an experimental research model.** Make sure to add appropriate guardrails when using the model for applications.
31
  """
32
 
33
- def compute(image, prompt, model_location):
 
 
 
34
  """Runs model inference."""
35
  if image is None:
36
  raise gr.Error("Image required")
@@ -43,7 +46,7 @@ def compute(image, prompt, model_location):
43
 
44
  # Set device and load the model
45
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
46
- vlm = load(model_location) # No need to pass the token again
47
  vlm.to(device, dtype=torch.bfloat16)
48
 
49
  # Prepare prompt
@@ -85,13 +88,10 @@ def create_app():
85
  clear = gr.Button("Clear")
86
  highlighted_text = gr.HighlightedText(value="", label="Output", visible=True)
87
 
88
- # Model location
89
- model_location = "remyxai/SpaceLlama3.1" # Update as needed
90
-
91
  # Button event handlers
92
  run.click(
93
  fn=compute,
94
- inputs=[image, prompt, model_location],
95
  outputs=highlighted_text,
96
  )
97
  clear.click(fn=reset, inputs=None, outputs=[prompt, image])
@@ -100,7 +100,7 @@ def create_app():
100
  status = gr.Markdown(f"Startup: {datetime.datetime.now()}")
101
  gpu_kind = gr.Markdown(f"GPU=?")
102
  demo.load(
103
- fn=lambda: [f"Model `{model_location}` loaded."],
104
  inputs=None,
105
  outputs=model_info,
106
  )
 
30
  **This is an experimental research model.** Make sure to add appropriate guardrails when using the model for applications.
31
  """
32
 
33
+ # Set model location as a constant outside the function
34
+ MODEL_LOCATION = "remyxai/SpaceLlama3.1" # Update as needed
35
+
36
+ def compute(image, prompt):
37
  """Runs model inference."""
38
  if image is None:
39
  raise gr.Error("Image required")
 
46
 
47
  # Set device and load the model
48
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
49
+ vlm = load(MODEL_LOCATION) # Use the constant for model location
50
  vlm.to(device, dtype=torch.bfloat16)
51
 
52
  # Prepare prompt
 
88
  clear = gr.Button("Clear")
89
  highlighted_text = gr.HighlightedText(value="", label="Output", visible=True)
90
 
 
 
 
91
  # Button event handlers
92
  run.click(
93
  fn=compute,
94
+ inputs=[image, prompt],
95
  outputs=highlighted_text,
96
  )
97
  clear.click(fn=reset, inputs=None, outputs=[prompt, image])
 
100
  status = gr.Markdown(f"Startup: {datetime.datetime.now()}")
101
  gpu_kind = gr.Markdown(f"GPU=?")
102
  demo.load(
103
+ fn=lambda: [f"Model `{MODEL_LOCATION}` loaded."],
104
  inputs=None,
105
  outputs=model_info,
106
  )