xiaohua828
commited on
Commit
·
8cbed98
1
Parent(s):
12088f2
Upload 2 files
Browse files- config_counter.py +34 -0
- modeling_glm.py +39 -0
config_counter.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import transformers
|
3 |
+
from transformers import PretrainedConfig,AutoConfig,AutoModelForCausalLM
|
4 |
+
from huggingface_hub import interpreter_login
|
5 |
+
from huggingface_hub import notebook_login
|
6 |
+
from transformers import PretrainedConfig
|
7 |
+
from typing import List
|
8 |
+
|
9 |
+
from transformers.configuration_utils import PretrainedConfig
|
10 |
+
class GLMConfig(PretrainedConfig):
|
11 |
+
|
12 |
+
def __init__(
|
13 |
+
self,
|
14 |
+
weight=1.,
|
15 |
+
bias=3,
|
16 |
+
torch_dtype = "float32",
|
17 |
+
**kwargs
|
18 |
+
):
|
19 |
+
|
20 |
+
self.num_layers = weight
|
21 |
+
self.vocab_size = bias
|
22 |
+
self.torch_dtype = torch_dtype
|
23 |
+
self.model_type = "counter"
|
24 |
+
super().__init__(**kwargs)
|
25 |
+
|
26 |
+
if __name__ == '__main__':
|
27 |
+
|
28 |
+
import login
|
29 |
+
glm_config = PretrainedConfig.from_json_file("./config.json")
|
30 |
+
# config = GLMConfig()
|
31 |
+
# config.update(glm_config)
|
32 |
+
glm_config.push_to_hub(login.name_or_path)
|
33 |
+
|
34 |
+
|
modeling_glm.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import math
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.utils.checkpoint
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from transformers import PreTrainedModel
|
8 |
+
|
9 |
+
from transformers.modeling_outputs import (ModelOutput,)
|
10 |
+
|
11 |
+
|
12 |
+
class CounterModel(PreTrainedModel):
|
13 |
+
|
14 |
+
|
15 |
+
def __init__(self, config):
|
16 |
+
super().__init__(config)
|
17 |
+
self.config = config
|
18 |
+
self.weight = config.weight
|
19 |
+
self.bias = config.bias
|
20 |
+
self.linear = torch.nn.Linear(1,1)
|
21 |
+
def forward(self, x,**kwargs):
|
22 |
+
x = self.weight * x + self.bias
|
23 |
+
logits = self.linear(x)
|
24 |
+
return ModelOutput(
|
25 |
+
last_hidden_states=None,
|
26 |
+
logits=x,
|
27 |
+
mems=None,
|
28 |
+
)
|
29 |
+
|
30 |
+
if __name__ == '__main__':
|
31 |
+
# import configuration_glm
|
32 |
+
# glmconfig = configuration_glm.GLMConfig()
|
33 |
+
from transformers import PretrainedConfig
|
34 |
+
glm_config = PretrainedConfig.from_json_file("./config.json")
|
35 |
+
glm_model = CounterModel(glm_config)
|
36 |
+
|
37 |
+
|
38 |
+
import login
|
39 |
+
glm_model.push_to_hub(login.name_or_path)
|