AIAT
/

kkatiz commited on
Commit
d3e9efb
·
verified ·
1 Parent(s): 2141bcd

Delete https:

Browse files
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/README.md DELETED
@@ -1,7 +0,0 @@
1
- ---
2
- library_name: peft
3
- base_model: /project/lt900048-ai24tn/models/aisingapore/sea-lion-7b-instruct
4
- ---
5
- ### Framework versions
6
-
7
- - PEFT 0.7.2.dev0
 
 
 
 
 
 
 
 
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/adapter_config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "alpha_pattern": {},
3
- "auto_mapping": null,
4
- "base_model_name_or_path": "/project/lt900048-ai24tn/models/aisingapore/sea-lion-7b-instruct",
5
- "bias": "none",
6
- "fan_in_fan_out": false,
7
- "inference_mode": true,
8
- "init_lora_weights": true,
9
- "layers_pattern": null,
10
- "layers_to_transform": null,
11
- "loftq_config": {},
12
- "lora_alpha": 16,
13
- "lora_dropout": 0.05,
14
- "megatron_config": null,
15
- "megatron_core": "megatron.core",
16
- "modules_to_save": null,
17
- "peft_type": "LORA",
18
- "r": 64,
19
- "rank_pattern": {},
20
- "revision": null,
21
- "target_modules": [
22
- "down_proj",
23
- "up_proj",
24
- "Wqkv",
25
- "out_proj"
26
- ],
27
- "task_type": "CAUSAL_LM",
28
- "use_rslora": false
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/adapter_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:eee1bcbaac48e4ee2004b3f3daf01be12571cf2207c4eb7455f56685e6517f96
3
- size 2365623128
 
 
 
 
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/special_tokens_map.json DELETED
@@ -1,17 +0,0 @@
1
- {
2
- "eos_token": {
3
- "content": "<|endoftext|>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "pad_token": "<|endoftext|>",
10
- "unk_token": {
11
- "content": "<unk>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- }
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/tokenization_SEA_BPE.py DELETED
@@ -1,197 +0,0 @@
1
- import os
2
- from shutil import copyfile
3
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
4
- import sentencepiece as spm
5
- from tokenizers import processors
6
- from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
7
- from transformers.utils import logging
8
-
9
- logger = logging.get_logger(__name__)
10
- VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
11
- SPIECE_UNDERLINE = "▁"
12
-
13
-
14
- class SEABPETokenizer(PreTrainedTokenizer):
15
- """
16
- Construct the SEA BPE Tokenizer tailored for SEA languages. Based on the Byte-Pair-Encoding with an expanded voculabulary size
17
-
18
- Args:
19
- vocab_file (`str`):
20
- Path to the vocabulary file.
21
- legacy (`bool`, *optional*, defaults to `True`):
22
- Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
23
- which includes fixes to properly handle tokens that appear after special tokens.
24
- legacy means we are not modifying existing tokenizers without knowing. (And we need to manually update those core tokenizers)
25
-
26
- A simple example:
27
-
28
- - `legacy=True`:
29
- ```python
30
- >>> from transformers import T5Tokenizer
31
-
32
- >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
33
- >>> tokenizer.encode("Hello <extra_id_0>.")
34
- [8774, 32099, 3, 5, 1]
35
- ```
36
- - `legacy=False`:
37
- ```python
38
- >>> from transformers import T5Tokenizer
39
-
40
- >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
41
- >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
42
- [8774, 32099, 5, 1]
43
- ```
44
- Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for
45
- more details.
46
-
47
- """
48
-
49
- vocab_files_names = VOCAB_FILES_NAMES
50
-
51
- def __init__(
52
- self,
53
- vocab_file,
54
- unk_token="<unk>",
55
- bos_token=None,
56
- eos_token="<|endoftext|>",
57
- pad_token=None,
58
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
- add_bos_token=False,
60
- add_eos_token=False,
61
- clean_up_tokenization_spaces=False,
62
- legacy=None,
63
- **kwargs,
64
- ):
65
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
67
- self.sp_model.Load(vocab_file)
68
- super().__init__(
69
- bos_token=bos_token,
70
- eos_token=eos_token,
71
- unk_token=unk_token,
72
- pad_token=pad_token,
73
- add_bos_token=add_bos_token,
74
- add_eos_token=add_eos_token,
75
- sp_model_kwargs=self.sp_model_kwargs,
76
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
77
- legacy=legacy,
78
- **kwargs,
79
- )
80
- if legacy is None:
81
- logger.warning_once(
82
- f"You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly."
83
- )
84
- legacy = True
85
- self.legacy = legacy
86
- self.vocab_file = vocab_file
87
- self.add_bos_token = add_bos_token
88
- self.add_eos_token = add_eos_token
89
-
90
- def __getstate__(self):
91
- state = self.__dict__.copy()
92
- state["sp_model"] = None
93
- state["sp_model_proto"] = self.sp_model.serialized_model_proto()
94
- return state
95
-
96
- def __setstate__(self, d):
97
- self.__dict__ = d
98
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
99
- self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
100
-
101
- @property
102
- def vocab_size(self):
103
- """Returns vocab size"""
104
- return self.sp_model.get_piece_size()
105
-
106
- def get_vocab(self):
107
- """Returns vocab as a dict"""
108
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
109
- vocab.update(self.added_tokens_encoder)
110
- return vocab
111
-
112
- def tokenize(self, text, **kwargs) -> List[str]:
113
- if not self.legacy:
114
- text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ")
115
- return super().tokenize(text, **kwargs)
116
-
117
- def _tokenize(self, text):
118
- """
119
- Returns a tokenized string.
120
-
121
- Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,
122
- we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`
123
- function is called with specials tokens: the input is split on the special tokens, and each subsequence is
124
- passed to `_tokenize`. Thus if a subsequence did not start with a `" "` or SPIECE_UNDERLINE, we have to remove
125
- the extra `SPIECE_UNDERLINE` prepended.
126
- """
127
- if not self.legacy:
128
- is_first = text.startswith(SPIECE_UNDERLINE)
129
- if is_first:
130
- text = text[1:]
131
- tokens = self.sp_model.encode(text, out_type=str)
132
- if (
133
- not self.legacy
134
- and (not is_first)
135
- and (not text.startswith(" "))
136
- and tokens[0].startswith(SPIECE_UNDERLINE)
137
- ):
138
- tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
139
- return tokens
140
-
141
- def _convert_token_to_id(self, token):
142
- """Converts a token (str) in an id using the vocab."""
143
- return self.sp_model.piece_to_id(token)
144
-
145
- def _convert_id_to_token(self, index):
146
- """Converts an index (integer) in a token (str) using the vocab."""
147
- token = self.sp_model.IdToPiece(index)
148
- return token
149
-
150
- def convert_tokens_to_string(self, tokens):
151
- """Converts a sequence of tokens (string) in a single string."""
152
- current_sub_tokens = []
153
- out_string = ""
154
- prev_is_special = False
155
- for i, token in enumerate(tokens):
156
- if token in self.all_special_tokens:
157
- if not prev_is_special and i != 0:
158
- out_string += " "
159
- out_string += self.sp_model.decode(current_sub_tokens) + token
160
- prev_is_special = True
161
- current_sub_tokens = []
162
- else:
163
- current_sub_tokens.append(token)
164
- prev_is_special = False
165
- out_string += self.sp_model.decode(current_sub_tokens)
166
- return out_string
167
-
168
- def save_vocabulary(
169
- self, save_directory, filename_prefix: Optional[str] = None
170
- ) -> Tuple[str]:
171
- """
172
- Save the vocabulary and special tokens file to a directory.
173
-
174
- Args:
175
- save_directory (`str`):
176
- The directory in which to save the vocabulary.
177
-
178
- Returns:
179
- `Tuple(str)`: Paths to the files saved.
180
- """
181
- if not os.path.isdir(save_directory):
182
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
183
- return
184
- out_vocab_file = os.path.join(
185
- save_directory,
186
- (filename_prefix + "-" if filename_prefix else "")
187
- + VOCAB_FILES_NAMES["vocab_file"],
188
- )
189
- if os.path.abspath(self.vocab_file) != os.path.abspath(
190
- out_vocab_file
191
- ) and os.path.isfile(self.vocab_file):
192
- copyfile(self.vocab_file, out_vocab_file)
193
- elif not os.path.isfile(self.vocab_file):
194
- with open(out_vocab_file, "wb") as fi:
195
- content_spiece_model = self.sp_model.serialized_model_proto()
196
- fi.write(content_spiece_model)
197
- return (out_vocab_file,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:05234874580b169d3ba5a59b004b655ddba73fc9ea5493451053374629d9295d
3
- size 4569957
 
 
 
 
https:/huggingface.co/AIAT/Optimizer-sealion2pandas/tree/main/tokenizer_config.json DELETED
@@ -1,53 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<|endoftext|>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "<|endofline|>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- },
29
- "3": {
30
- "content": "<|padding|>",
31
- "lstrip": false,
32
- "normalized": false,
33
- "rstrip": false,
34
- "single_word": false,
35
- "special": true
36
- }
37
- },
38
- "auto_map": {
39
- "AutoTokenizer": [
40
- "tokenization_SEA_BPE.SEABPETokenizer",
41
- null
42
- ]
43
- },
44
- "bos_token": null,
45
- "clean_up_tokenization_spaces": false,
46
- "eos_token": "<|endoftext|>",
47
- "legacy": true,
48
- "model_max_length": 1000000000000000019884624838656,
49
- "pad_token": "<|endoftext|>",
50
- "sp_model_kwargs": {},
51
- "tokenizer_class": "SEABPETokenizer",
52
- "unk_token": "<unk>"
53
- }