jatingocodeo commited on
Commit
90055ac
·
verified ·
1 Parent(s): 44302df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -3
app.py CHANGED
@@ -1,9 +1,84 @@
1
  import torch
2
  import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from huggingface_hub import hf_hub_download
5
  import json
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  # Cache for model and tokenizer
8
  MODEL = None
9
  TOKENIZER = None
@@ -16,8 +91,12 @@ def initialize():
16
  model_id = "jatingocodeo/SmolLM2"
17
 
18
  try:
19
- # Download model files from HF Hub
 
20
  config_path = hf_hub_download(repo_id=model_id, filename="config.json")
 
 
 
21
 
22
  # Load tokenizer
23
  print("Loading tokenizer...")
@@ -33,8 +112,9 @@ def initialize():
33
 
34
  # Load model
35
  print("Loading model...")
36
- MODEL = AutoModelForCausalLM.from_pretrained(
37
  model_id,
 
38
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
39
  trust_remote_code=True,
40
  low_cpu_mem_usage=True
 
1
  import torch
2
  import gradio as gr
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel, PretrainedConfig
4
  from huggingface_hub import hf_hub_download
5
  import json
6
 
7
+ # Define the model architecture
8
+ class SmolLM2Config(PretrainedConfig):
9
+ model_type = "smollm2"
10
+
11
+ def __init__(
12
+ self,
13
+ vocab_size=49152,
14
+ hidden_size=576,
15
+ intermediate_size=1536,
16
+ num_hidden_layers=30,
17
+ num_attention_heads=9,
18
+ num_key_value_heads=3,
19
+ hidden_act="silu",
20
+ max_position_embeddings=2048,
21
+ initializer_range=0.02,
22
+ rms_norm_eps=1e-5,
23
+ use_cache=True,
24
+ pad_token_id=None,
25
+ bos_token_id=0,
26
+ eos_token_id=0,
27
+ tie_word_embeddings=True,
28
+ **kwargs
29
+ ):
30
+ self.vocab_size = vocab_size
31
+ self.hidden_size = hidden_size
32
+ self.intermediate_size = intermediate_size
33
+ self.num_hidden_layers = num_hidden_layers
34
+ self.num_attention_heads = num_attention_heads
35
+ self.num_key_value_heads = num_key_value_heads
36
+ self.hidden_act = hidden_act
37
+ self.max_position_embeddings = max_position_embeddings
38
+ self.initializer_range = initializer_range
39
+ self.rms_norm_eps = rms_norm_eps
40
+ self.use_cache = use_cache
41
+ super().__init__(
42
+ pad_token_id=pad_token_id,
43
+ bos_token_id=bos_token_id,
44
+ eos_token_id=eos_token_id,
45
+ tie_word_embeddings=tie_word_embeddings,
46
+ **kwargs
47
+ )
48
+
49
+ # Register the model architecture
50
+ from transformers import AutoConfig
51
+ AutoConfig.register("smollm2", SmolLM2Config)
52
+
53
+ class SmolLM2ForCausalLM(PreTrainedModel):
54
+ config_class = SmolLM2Config
55
+
56
+ def __init__(self, config):
57
+ super().__init__(config)
58
+ self.config = config
59
+
60
+ # Load the model weights directly from the checkpoint
61
+ self.model = AutoModelForCausalLM.from_pretrained(
62
+ "meta-llama/Llama-2-7b-hf",
63
+ config=config,
64
+ torch_dtype=torch.float16,
65
+ low_cpu_mem_usage=True
66
+ )
67
+
68
+ def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs):
69
+ return self.model(
70
+ input_ids=input_ids,
71
+ attention_mask=attention_mask,
72
+ labels=labels,
73
+ **kwargs
74
+ )
75
+
76
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
77
+ return self.model.prepare_inputs_for_generation(input_ids, **kwargs)
78
+
79
+ # Register the model
80
+ AutoModelForCausalLM.register(SmolLM2Config, SmolLM2ForCausalLM)
81
+
82
  # Cache for model and tokenizer
83
  MODEL = None
84
  TOKENIZER = None
 
91
  model_id = "jatingocodeo/SmolLM2"
92
 
93
  try:
94
+ # Download and load config
95
+ print("Loading config...")
96
  config_path = hf_hub_download(repo_id=model_id, filename="config.json")
97
+ with open(config_path, 'r') as f:
98
+ config_dict = json.load(f)
99
+ config = SmolLM2Config(**config_dict)
100
 
101
  # Load tokenizer
102
  print("Loading tokenizer...")
 
112
 
113
  # Load model
114
  print("Loading model...")
115
+ MODEL = SmolLM2ForCausalLM.from_pretrained(
116
  model_id,
117
+ config=config,
118
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
119
  trust_remote_code=True,
120
  low_cpu_mem_usage=True