Update README.md
Browse files
README.md
CHANGED
@@ -145,39 +145,66 @@ import torch
|
|
145 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
146 |
import logging
|
147 |
|
148 |
-
#
|
149 |
logging.basicConfig(level=logging.INFO)
|
150 |
logger = logging.getLogger(__name__)
|
151 |
|
152 |
def load_custom_model(model_name, device):
|
153 |
try:
|
|
|
154 |
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
155 |
logger.info(f"Model loaded successfully from {model_name}")
|
156 |
return model
|
157 |
except Exception as e:
|
158 |
-
logger.error(f"
|
159 |
raise
|
160 |
|
161 |
def load_tokenizer(tokenizer_name):
|
162 |
try:
|
|
|
163 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
164 |
logger.info(f"Tokenizer loaded successfully from {tokenizer_name}")
|
165 |
return tokenizer
|
166 |
except Exception as e:
|
167 |
-
logger.error(f"
|
168 |
raise
|
169 |
|
|
|
|
|
|
|
|
|
|
|
170 |
if __name__ == "__main__":
|
|
|
171 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
173 |
try:
|
174 |
-
tokenizer
|
|
|
175 |
model = load_custom_model(model_name, device)
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
except Exception as e:
|
178 |
-
logger.error(f"An
|
179 |
```
|
180 |
|
|
|
|
|
181 |
### π How It Works: The Mechanics
|
182 |
|
183 |
1. **Setting the Stage**: Our script begins by checking whether to summon the powers of CUDA or settle in the CPU realm.
|
@@ -192,7 +219,4 @@ This script isn't just a tool; it's a companion designed to make your AI endeavo
|
|
192 |
- **Error Logs**: Detailed logging ensures you're always in the know, making troubleshooting a breeze.
|
193 |
- **Flexibility**: Designed with customization in mind, feel free to tweak the script to fit the unique needs of your scholarly pursuits.
|
194 |
|
195 |
-
### π Final Words of Wisdom:
|
196 |
-
|
197 |
-
With `CustomModelLoader.py` at your side, you're not just loading a model; you're unlocking a world of possibilities. Whether you're fine-tuning for accuracy or predicting the unknown, your AI journey is about to get a whole lot smoother. So, scholars and AI enthusiasts, let the odyssey begin! π©β¨
|
198 |
---
|
|
|
145 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
146 |
import logging
|
147 |
|
148 |
+
# Set up logging
|
149 |
logging.basicConfig(level=logging.INFO)
|
150 |
logger = logging.getLogger(__name__)
|
151 |
|
152 |
def load_custom_model(model_name, device):
|
153 |
try:
|
154 |
+
# Load the model directly from Hugging Face Hub
|
155 |
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
156 |
logger.info(f"Model loaded successfully from {model_name}")
|
157 |
return model
|
158 |
except Exception as e:
|
159 |
+
logger.error(f"An error occurred while loading the model: {e}")
|
160 |
raise
|
161 |
|
162 |
def load_tokenizer(tokenizer_name):
|
163 |
try:
|
164 |
+
# Load the tokenizer
|
165 |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
166 |
logger.info(f"Tokenizer loaded successfully from {tokenizer_name}")
|
167 |
return tokenizer
|
168 |
except Exception as e:
|
169 |
+
logger.error(f"An error occurred while loading the tokenizer: {e}")
|
170 |
raise
|
171 |
|
172 |
+
def inspect_model_layers(model):
|
173 |
+
logger.info("Inspecting model layers and weights...")
|
174 |
+
for name, param in model.named_parameters():
|
175 |
+
logger.debug(f"Layer: {name} | Size: {param.size()} | Values : {param[:2]}...\n")
|
176 |
+
|
177 |
if __name__ == "__main__":
|
178 |
+
# Define device
|
179 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
180 |
+
logger.info(f"Using {'CUDA' if device.type == 'cuda' else 'CPU'}")
|
181 |
+
|
182 |
+
# Model name or path in Hugging Face Hub
|
183 |
+
model_name = "ayjays132/phillnet"
|
184 |
+
tokenizer_name = model_name # Assuming tokenizer is at the same path
|
185 |
+
|
186 |
try:
|
187 |
+
# Load the tokenizer and model
|
188 |
+
tokenizer = load_tokenizer(tokenizer_name)
|
189 |
model = load_custom_model(model_name, device)
|
190 |
+
|
191 |
+
# Inspect the model layers and weights
|
192 |
+
inspect_model_layers(model)
|
193 |
+
|
194 |
+
# Perform a simple test to verify model weights are loaded correctly (Optional)
|
195 |
+
input_ids = tokenizer.encode("Hello, world!", return_tensors="pt").to(device)
|
196 |
+
with torch.no_grad():
|
197 |
+
outputs = model(input_ids)
|
198 |
+
logger.info("Model test run completed successfully.")
|
199 |
+
|
200 |
+
print("Custom model and tokenizer loaded successfully.")
|
201 |
+
|
202 |
except Exception as e:
|
203 |
+
logger.error(f"An error occurred: {e}")
|
204 |
```
|
205 |
|
206 |
+
### With `CustomModelLoader.py` at your side, you're not just loading a model; you're unlocking a world of possibilities. Whether you're fine-tuning for accuracy or predicting the unknown, your AI journey is about to get a whole lot smoother. So, scholars and AI enthusiasts, let the odyssey begin! π©β¨
|
207 |
+
|
208 |
### π How It Works: The Mechanics
|
209 |
|
210 |
1. **Setting the Stage**: Our script begins by checking whether to summon the powers of CUDA or settle in the CPU realm.
|
|
|
219 |
- **Error Logs**: Detailed logging ensures you're always in the know, making troubleshooting a breeze.
|
220 |
- **Flexibility**: Designed with customization in mind, feel free to tweak the script to fit the unique needs of your scholarly pursuits.
|
221 |
|
|
|
|
|
|
|
222 |
---
|