Nbardy commited on
Commit
64a3ce9
·
1 Parent(s): 1de83e7
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./tiny-mistral",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -26,4 +26,4 @@
26
  "transformers_version": "4.34.1",
27
  "use_cache": true,
28
  "vocab_size": 50304
29
- }
 
1
  {
2
+ "_name_or_path": "./micro_mistral",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
26
  "transformers_version": "4.34.1",
27
  "use_cache": true,
28
  "vocab_size": 50304
29
+ }
init_model.py CHANGED
@@ -23,4 +23,4 @@ model = model.to(
23
  ) # Converts all floating point parameters to bfloat16
24
 
25
  # Save the model with SafeTensors
26
- model.save_pretrained("./model_bf16.safetensors", save_in_safe_tensors_format=True)
 
23
  ) # Converts all floating point parameters to bfloat16
24
 
25
  # Save the model with SafeTensors
26
+ model.save_pretrained("./micro_mistral", save_in_safe_tensors_format=True)
{model_bf16.safetensors → micro_mistral}/config.json RENAMED
File without changes
{model_bf16.safetensors → micro_mistral}/generation_config.json RENAMED
File without changes
micro_mistral/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb30b5b14f1785c3e9fc4fb7e26e33d8bffe091da6f9203cf637b27515b5bc4f
3
+ size 244613320
test.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Load model directly and generate text from a test string
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ tokenizer = AutoTokenizer.from_pretrained("Nbardy/mini-mistral")
5
+ model = AutoModelForCausalLM.from_pretrained("Nbardy/mini-mistral")
6
+
7
+ # Prepare the test string for input
8
+ input_text = "This is a test string"
9
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
10
+
11
+ # Generate text using the model
12
+ output = model.generate(input_ids)
13
+
14
+ # Decode and print the generated text
15
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
16
+ print(generated_text)