Update app.py
Browse files
app.py
CHANGED
@@ -17,13 +17,15 @@ current_num = os.getenv("NUM")
|
|
17 |
print(f"stage ${current_num}")
|
18 |
|
19 |
api = HfApi(token=hf_token)
|
20 |
-
models = f"dad1909/CyberSentinel-{current_num}"
|
|
|
|
|
21 |
|
22 |
print("Starting model and tokenizer loading...")
|
23 |
|
24 |
# Load the model and tokenizer
|
25 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
26 |
-
model_name=
|
27 |
max_seq_length=max_seq_length,
|
28 |
dtype=dtype,
|
29 |
load_in_4bit=load_in_4bit,
|
@@ -141,7 +143,7 @@ print("Training completed.")
|
|
141 |
num = int(current_num)
|
142 |
num += 1
|
143 |
|
144 |
-
uploads_models = f"
|
145 |
|
146 |
print("Saving the trained model...")
|
147 |
model.save_pretrained_merged("model", tokenizer, save_method="merged_16bit")
|
|
|
17 |
print(f"stage ${current_num}")
|
18 |
|
19 |
api = HfApi(token=hf_token)
|
20 |
+
# models = f"dad1909/CyberSentinel-{current_num}"
|
21 |
+
|
22 |
+
model_base = "dad1909/cybersentinal-2.0"
|
23 |
|
24 |
print("Starting model and tokenizer loading...")
|
25 |
|
26 |
# Load the model and tokenizer
|
27 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
28 |
+
model_name=model_base,
|
29 |
max_seq_length=max_seq_length,
|
30 |
dtype=dtype,
|
31 |
load_in_4bit=load_in_4bit,
|
|
|
143 |
num = int(current_num)
|
144 |
num += 1
|
145 |
|
146 |
+
uploads_models = f"cybersentinal-2.0-{str(num)}"
|
147 |
|
148 |
print("Saving the trained model...")
|
149 |
model.save_pretrained_merged("model", tokenizer, save_method="merged_16bit")
|