File size: 889 Bytes
8cbed98 5ae1029 8cbed98 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import json
import transformers
from transformers import PretrainedConfig,AutoConfig,AutoModelForCausalLM
from huggingface_hub import interpreter_login
from huggingface_hub import notebook_login
from transformers import PretrainedConfig
from typing import List
from transformers.configuration_utils import PretrainedConfig
class GLMConfig(PretrainedConfig):
def __init__(
self,
weight=1.,
bias=3,
torch_dtype = "float32",
**kwargs
):
self.weight = weight
self.bias = bias
self.torch_dtype = torch_dtype
self.model_type = "counter"
super().__init__(**kwargs)
if __name__ == '__main__':
import login
glm_config = PretrainedConfig.from_json_file("./config.json")
# config = GLMConfig()
# config.update(glm_config)
glm_config.push_to_hub(login.name_or_path)
|