daranaka commited on
Commit
4a78e11
1 Parent(s): e8897e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -5,16 +5,15 @@ import torch
5
  import numpy as np
6
  import urllib.request
7
 
8
- # Load the model without caching to avoid serialization issues
 
 
9
  def load_model():
10
  model = AutoModel.from_pretrained("ragavsachdeva/magi", trust_remote_code=True)
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
  model.to(device)
13
  return model
14
 
15
- # Initialize the model once at the top level, outside any caching functions
16
- model = load_model()
17
-
18
  @st.cache_data
19
  def read_image_as_np_array(image_path):
20
  if "http" in image_path:
@@ -70,6 +69,12 @@ def predict_ocr(
70
  ocr_results = model.predict_ocr([image], text_bboxes_for_all_images)
71
  return ocr_results
72
 
 
 
 
 
 
 
73
  # Streamlit UI elements
74
  st.markdown("""
75
  <style> .title-container { background-color: #0d1117; padding: 20px; border-radius: 10px; margin: 20px; }
 
5
  import numpy as np
6
  import urllib.request
7
 
8
+ memory = {}
9
+
10
+ @st.cache_resource
11
  def load_model():
12
  model = AutoModel.from_pretrained("ragavsachdeva/magi", trust_remote_code=True)
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
  model.to(device)
15
  return model
16
 
 
 
 
17
  @st.cache_data
18
  def read_image_as_np_array(image_path):
19
  if "http" in image_path:
 
69
  ocr_results = model.predict_ocr([image], text_bboxes_for_all_images)
70
  return ocr_results
71
 
72
+ model = load_model()
73
+
74
+ # Add a button to clear memory
75
+ if st.button("Clear Memory"):
76
+ memory.clear()
77
+
78
  # Streamlit UI elements
79
  st.markdown("""
80
  <style> .title-container { background-color: #0d1117; padding: 20px; border-radius: 10px; margin: 20px; }