Praveen0309 commited on
Commit
28840c0
·
1 Parent(s): f6df270

final_commit

Browse files
Files changed (2) hide show
  1. __pycache__/main.cpython-39.pyc +0 -0
  2. main.py +11 -11
__pycache__/main.cpython-39.pyc CHANGED
Binary files a/__pycache__/main.cpython-39.pyc and b/__pycache__/main.cpython-39.pyc differ
 
main.py CHANGED
@@ -23,25 +23,25 @@ app = FastAPI()
23
  warnings.filterwarnings('ignore')
24
 
25
 
26
- @app.get('/echo/')
27
- async def echo(query_param: str):
28
- return {"response": query_param}
29
 
30
 
31
  # app.mount("/", StaticFiles(directory="static", html=True), name="static")
32
 
33
 
34
 
35
- # model_id = "HuggingFaceH4/vsft-llava-1.5-7b-hf-trl"
36
- # quantization_config = BitsAndBytesConfig(load_in_4bit=True)
37
- # base_model = LlavaForConditionalGeneration.from_pretrained(model_id, quantization_config=quantization_config, torch_dtype=torch.float16)
38
 
39
- # # Load the PEFT Lora adapter
40
- # peft_lora_adapter_path = "Praveen0309/llava-1.5-7b-hf-ft-mix-vsft-3"
41
- # peft_lora_adapter = PeftModel.from_pretrained(base_model, peft_lora_adapter_path, adapter_name="lora_adapter")
42
- # base_model.load_adapter(peft_lora_adapter_path, adapter_name="lora_adapter")
43
 
44
- # processor = AutoProcessor.from_pretrained("HuggingFaceH4/vsft-llava-1.5-7b-hf-trl")
45
  # model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
46
  # tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
47
 
 
23
  warnings.filterwarnings('ignore')
24
 
25
 
26
+ # @app.get('/echo/')
27
+ # async def echo(query_param: str):
28
+ # return {"response": query_param}
29
 
30
 
31
  # app.mount("/", StaticFiles(directory="static", html=True), name="static")
32
 
33
 
34
 
35
+ model_id = "HuggingFaceH4/vsft-llava-1.5-7b-hf-trl"
36
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
37
+ base_model = LlavaForConditionalGeneration.from_pretrained(model_id, quantization_config=quantization_config, torch_dtype=torch.float16)
38
 
39
+ # Load the PEFT Lora adapter
40
+ peft_lora_adapter_path = "Praveen0309/llava-1.5-7b-hf-ft-mix-vsft-3"
41
+ peft_lora_adapter = PeftModel.from_pretrained(base_model, peft_lora_adapter_path, adapter_name="lora_adapter")
42
+ base_model.load_adapter(peft_lora_adapter_path, adapter_name="lora_adapter")
43
 
44
+ processor = AutoProcessor.from_pretrained("HuggingFaceH4/vsft-llava-1.5-7b-hf-trl")
45
  # model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
46
  # tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
47