ikraamkb commited on
Commit
db90043
·
verified ·
1 Parent(s): 38daff8

Update Visualisation/app.py

Browse files
Files changed (1) hide show
  1. Visualisation/app.py +6 -5
Visualisation/app.py CHANGED
@@ -4,14 +4,15 @@ import matplotlib.pyplot as plt
4
  import io
5
  from fastapi.responses import StreamingResponse
6
  from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
7
-
8
  app = FastAPI()
9
 
10
- # ✅ Load Hugging Face models dynamically from the internet
11
- #code_generator =pipeline("text-generation", model="codellama/CodeLlama-7b")
12
  table_analyzer = pipeline("table-question-answering", model="google/tapas-base")
13
- user_input_processor = pipeline("text-generation", model="tiiuae/falcon-7b-instruct") # comprend language naturel
14
- image_captioner = pipeline("image-to-text", model="Salesforce/blip2-opt-2.7b")
 
 
 
15
 
16
  # ✅ Load T5 Model (ensure correct architecture)
17
  model_name = "google/t5-small" # Change to the correct T5 model if needed
 
4
  import io
5
  from fastapi.responses import StreamingResponse
6
  from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
7
+ import torch
8
  app = FastAPI()
9
 
 
 
10
  table_analyzer = pipeline("table-question-answering", model="google/tapas-base")
11
+
12
+ user_input_processor = pipeline("text-generation", model="tiiuae/falcon-3b-instruct",device_map="auto",torch_dtype=torch.float16 # Use half-precision to save VRAM
13
+ )
14
+
15
+ image_captioner = pipeline("image-to-text", model="Salesforce/blip2-opt-2.7b",device_map="auto",torch_dtype=torch.float16 # Reduce VRAM usage)
16
 
17
  # ✅ Load T5 Model (ensure correct architecture)
18
  model_name = "google/t5-small" # Change to the correct T5 model if needed