Spaces:
Sleeping
Sleeping
Commit
·
40aa371
1
Parent(s):
7134637
modified
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ CLEARML_API_HOST = os.environ["CLEARML_API_HOST"]
|
|
20 |
CLEARML_WEB_HOST = os.environ["CLEARML_WEB_HOST"]
|
21 |
CLEARML_FILES_HOST = os.environ["CLEARML_FILES_HOST"]
|
22 |
CLEARML_ACCESS_KEY = os.environ["CLEARML_API_ACCESS_KEY"]
|
23 |
-
CLEARML_SECRET_KEY = os.environ["
|
24 |
|
25 |
# Apply to SDK
|
26 |
Task.set_credentials(
|
@@ -69,8 +69,10 @@ print("Base model architecture and tokenizer loaded.")
|
|
69 |
|
70 |
# Download the fine-tuned weights via ClearML using your injected creds
|
71 |
task = Task.get_task(task_id="2d65a9e213ea49a9b37e1cc89a2b7ff0")
|
72 |
-
|
73 |
-
|
|
|
|
|
74 |
|
75 |
# Create LoRA configuration matching the fine-tuned checkpoint
|
76 |
lora_cfg = LoraConfig(
|
@@ -84,7 +86,7 @@ lora_cfg = LoraConfig(
|
|
84 |
# Wrap base model with PEFT LoRA
|
85 |
peft_model = get_peft_model(base_model, lora_cfg)
|
86 |
# Load adapter-only weights and merge into base
|
87 |
-
adapter_state = torch.load(
|
88 |
peft_model.load_state_dict(adapter_state, strict=False)
|
89 |
model = peft_model.merge_and_unload()
|
90 |
print("Merged base model with LoRA adapters.")
|
|
|
20 |
CLEARML_WEB_HOST = os.environ["CLEARML_WEB_HOST"]
|
21 |
CLEARML_FILES_HOST = os.environ["CLEARML_FILES_HOST"]
|
22 |
CLEARML_ACCESS_KEY = os.environ["CLEARML_API_ACCESS_KEY"]
|
23 |
+
CLEARML_SECRET_KEY = os.environ["CLEARML_SECRET_KEY"]
|
24 |
|
25 |
# Apply to SDK
|
26 |
Task.set_credentials(
|
|
|
69 |
|
70 |
# Download the fine-tuned weights via ClearML using your injected creds
|
71 |
task = Task.get_task(task_id="2d65a9e213ea49a9b37e1cc89a2b7ff0")
|
72 |
+
extracted_adapter_dir = task.artifacts["lora-adapter"].get_local_copy() # This is the directory path
|
73 |
+
actual_weights_file_path = os.path.join(extracted_adapter_dir, "pytorch_model.bin") # Path to the actual model file
|
74 |
+
print(f"Fine-tuned adapter weights downloaded and extracted to directory: {extracted_adapter_dir}")
|
75 |
+
print(f"Loading fine-tuned adapter weights from file: {actual_weights_file_path}")
|
76 |
|
77 |
# Create LoRA configuration matching the fine-tuned checkpoint
|
78 |
lora_cfg = LoraConfig(
|
|
|
86 |
# Wrap base model with PEFT LoRA
|
87 |
peft_model = get_peft_model(base_model, lora_cfg)
|
88 |
# Load adapter-only weights and merge into base
|
89 |
+
adapter_state = torch.load(actual_weights_file_path, map_location="cpu") # Use the correct file path
|
90 |
peft_model.load_state_dict(adapter_state, strict=False)
|
91 |
model = peft_model.merge_and_unload()
|
92 |
print("Merged base model with LoRA adapters.")
|