doublelotus commited on
Commit
84fd7cf
·
1 Parent(s): 59dd6f3

cache writing

Browse files
Files changed (1) hide show
  1. main.py +5 -2
main.py CHANGED
@@ -7,8 +7,11 @@ import cv2
7
  from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
8
  from PIL import Image
9
  import zipfile
10
- from transformers import pipeline
11
  import os
 
 
 
12
 
13
  app = Flask(__name__)
14
  CORS(app)
@@ -16,7 +19,7 @@ CORS(app)
16
  cudaOrNah = "cuda" if torch.cuda.is_available() else "cpu"
17
  print(cudaOrNah)
18
 
19
- os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
20
  # Global model setup
21
  # running out of memory adjusted
22
  # checkpoint = "sam_vit_h_4b8939.pth"
 
7
  from segment_anything import sam_model_registry, SamAutomaticMaskGenerator
8
  from PIL import Image
9
  import zipfile
10
+
11
  import os
12
+ os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
13
+
14
+ from transformers import pipeline
15
 
16
  app = Flask(__name__)
17
  CORS(app)
 
19
  cudaOrNah = "cuda" if torch.cuda.is_available() else "cpu"
20
  print(cudaOrNah)
21
 
22
+
23
  # Global model setup
24
  # running out of memory adjusted
25
  # checkpoint = "sam_vit_h_4b8939.pth"