Kazel commited on
Commit
2b478c3
·
1 Parent(s): 86fba6f

solved gpu to cpu teething issues

Browse files
Files changed (2) hide show
  1. colpali_manager.py +0 -1
  2. rag.py +2 -4
colpali_manager.py CHANGED
@@ -17,7 +17,6 @@ import spaces
17
 
18
 
19
  #this part is for local runs
20
- torch.cpu.empty_cache()
21
 
22
  model_name = "vidore/colSmol-256M"
23
  device = get_torch_device("cpu") #try using cpu instead of cpu?
 
17
 
18
 
19
  #this part is for local runs
 
20
 
21
  model_name = "vidore/colSmol-256M"
22
  device = get_torch_device("cpu") #try using cpu instead of cpu?
rag.py CHANGED
@@ -71,9 +71,7 @@ class Rag:
71
 
72
  #ollama method below
73
 
74
- torch.cuda.empty_cache() #release cuda so that ollama can use gpu!
75
-
76
-
77
  os.environ['OLLAMA_FLASH_ATTENTION'] = '1'
78
 
79
 
@@ -144,4 +142,4 @@ class Rag:
144
  # query = "Based on attached images, how many new cases were reported during second wave peak"
145
  # imagesPaths = ["covid_slides_page_8.png", "covid_slides_page_8.png"]
146
 
147
- # rag.get_answer_from_gemini(query, imagesPaths)
 
71
 
72
  #ollama method below
73
 
74
+
 
 
75
  os.environ['OLLAMA_FLASH_ATTENTION'] = '1'
76
 
77
 
 
142
  # query = "Based on attached images, how many new cases were reported during second wave peak"
143
  # imagesPaths = ["covid_slides_page_8.png", "covid_slides_page_8.png"]
144
 
145
+ # rag.get_answer_from_gemini(query, imagesPaths)