diff --git a/README.md b/README.md index 5b89f059a261eb30e1fbbe11ff7d3e8dd47ad2fa..8baed9cb94eaaaac5634709e7fe582a6d02b9d3a 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,13 @@ tags: base_model: chatglm3-6b model-index: - name: coolshell-llm + results: [] --- # CoolShell LLM +\[ English | [中文](./README.zh.md) \] + We express our deepest gratitude to Mr. Chen Hao for his selfless sharing in the internet community, especially in the field of technology. > An orchid in deep forest won't stop giving out aroma despite nobody appreciating it. diff --git a/README.zh.md b/README.zh.md index 560c9edf54c1f4fd38e199f88d3f58af472be804..b57c59672bf997674cc4505add17c41e532371cc 100644 --- a/README.zh.md +++ b/README.zh.md @@ -7,10 +7,13 @@ tags: base_model: chatglm3-6b model-index: - name: coolshell-llm + results: [] --- # CoolShell LLM +\[ [English](./README.md) | 中文 \] + 感恩陈皓先生对中文互联网,尤其是技术领域无私的分享。 > 芝兰生于深谷,不以无人而不芳。 diff --git a/checkpoint-100/README.md b/checkpoint-100/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-100/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-100/adapter_config.json b/checkpoint-100/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-100/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-100/adapter_model.safetensors b/checkpoint-100/adapter_model.safetensors deleted file mode 100644 index 9fd61fd283aa45886ba4dae97bc177d3d44b697c..0000000000000000000000000000000000000000 --- a/checkpoint-100/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1f8a01b2ff9ae8d39695c90956fca3c08f1cbc215ff8ec47d39cdb42704f85f7 -size 31204248 diff --git a/checkpoint-100/optimizer.pt b/checkpoint-100/optimizer.pt deleted file mode 100644 index c23a962582f26fc2b764eda6853e713397c97536..0000000000000000000000000000000000000000 --- a/checkpoint-100/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7f89a17984e8f8325a843e199ab06bda3f078c75a4a70fd390368380879c4da9 -size 62437882 diff --git a/checkpoint-100/rng_state.pth b/checkpoint-100/rng_state.pth deleted file mode 100644 index e0c52e7520af7e10e7158e5ef5b94c6d124cd1d1..0000000000000000000000000000000000000000 --- a/checkpoint-100/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0dabbebc3b7aae0f1e2e08720110c236a4c4ad8bcc4021283756db5a9251a361 -size 14244 diff --git a/checkpoint-100/scheduler.pt b/checkpoint-100/scheduler.pt deleted file mode 100644 index 48f4ae9080da32e9f066992f7ab50ec4e3e59308..0000000000000000000000000000000000000000 --- a/checkpoint-100/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c5a75a62743becb9bf113e0f626f02da4c2bf599473c2d2862708dd9fbc349c5 -size 1064 diff --git a/checkpoint-100/special_tokens_map.json b/checkpoint-100/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-100/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-100/tokenization_chatglm.py b/checkpoint-100/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-100/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-100/tokenizer.model b/checkpoint-100/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-100/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-100/tokenizer_config.json b/checkpoint-100/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-100/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-100/trainer_state.json b/checkpoint-100/trainer_state.json deleted file mode 100644 index dfd8b46eae5e2150377f43d8e88b0328c7c053af..0000000000000000000000000000000000000000 --- a/checkpoint-100/trainer_state.json +++ /dev/null @@ -1,141 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 2.2727272727272725, - "eval_steps": 500, - "global_step": 100, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 5.099717548376064e+16, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-100/training_args.bin b/checkpoint-100/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-100/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-1000/README.md b/checkpoint-1000/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-1000/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-1000/adapter_config.json b/checkpoint-1000/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-1000/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-1000/adapter_model.safetensors b/checkpoint-1000/adapter_model.safetensors deleted file mode 100644 index f1459151d22b20b20a94adb4734c8ab8b49598fa..0000000000000000000000000000000000000000 --- a/checkpoint-1000/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:323caf0b1e8894e4ef8b0dbe356d83adafb2f8672a02f89fb8729684fbf30c82 -size 31204248 diff --git a/checkpoint-1000/optimizer.pt b/checkpoint-1000/optimizer.pt deleted file mode 100644 index cb1bd7c9fe8c1b607ce8cb00a3f71ea36572c142..0000000000000000000000000000000000000000 --- a/checkpoint-1000/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:914475ddbfdc97f3d9f8637d5b05f797d202f9a60e23df9d28710afb7e06205a -size 62437882 diff --git a/checkpoint-1000/rng_state.pth b/checkpoint-1000/rng_state.pth deleted file mode 100644 index 9ef1842deaabbd12b029eacd780378521b672e94..0000000000000000000000000000000000000000 --- a/checkpoint-1000/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c1073cb8b57930e10d4affaf055d83ef268bea78a4de9ff17cd6d0203574a40d -size 14244 diff --git a/checkpoint-1000/scheduler.pt b/checkpoint-1000/scheduler.pt deleted file mode 100644 index 0cc6cd963cd9ae2369bf8384f6239404dd96be65..0000000000000000000000000000000000000000 --- a/checkpoint-1000/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bed216a1f1980adb444c4a55e2b348e6b6c8174e1a232afea7a11177b3480627 -size 1064 diff --git a/checkpoint-1000/special_tokens_map.json b/checkpoint-1000/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-1000/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-1000/tokenization_chatglm.py b/checkpoint-1000/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-1000/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-1000/tokenizer.model b/checkpoint-1000/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-1000/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-1000/tokenizer_config.json b/checkpoint-1000/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-1000/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-1000/trainer_state.json b/checkpoint-1000/trainer_state.json deleted file mode 100644 index 50aed210e279ad94f838ad58d07469a05435ba36..0000000000000000000000000000000000000000 --- a/checkpoint-1000/trainer_state.json +++ /dev/null @@ -1,1221 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 22.727272727272727, - "eval_steps": 500, - "global_step": 1000, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - }, - { - "epoch": 11.48, - "learning_rate": 0.001128166191521093, - "loss": 1.1754, - "step": 505 - }, - { - "epoch": 11.59, - "learning_rate": 0.0011139914098905405, - "loss": 1.1848, - "step": 510 - }, - { - "epoch": 11.7, - "learning_rate": 0.0010997933838177826, - "loss": 1.2354, - "step": 515 - }, - { - "epoch": 11.82, - "learning_rate": 0.0010855750084788399, - "loss": 1.1984, - "step": 520 - }, - { - "epoch": 11.93, - "learning_rate": 0.0010713391831992322, - "loss": 1.2666, - "step": 525 - }, - { - "epoch": 12.05, - "learning_rate": 0.001057088810862768, - "loss": 1.1408, - "step": 530 - }, - { - "epoch": 12.16, - "learning_rate": 0.0010428267973196027, - "loss": 0.9385, - "step": 535 - }, - { - "epoch": 12.27, - "learning_rate": 0.0010285560507936962, - "loss": 1.0158, - "step": 540 - }, - { - "epoch": 12.39, - "learning_rate": 0.0010142794812897874, - "loss": 0.9936, - "step": 545 - }, - { - "epoch": 12.5, - "learning_rate": 0.001, - "loss": 0.9891, - "step": 550 - }, - { - "epoch": 12.61, - "learning_rate": 0.000985720518710213, - "loss": 1.0684, - "step": 555 - }, - { - "epoch": 12.73, - "learning_rate": 0.0009714439492063038, - "loss": 1.076, - "step": 560 - }, - { - "epoch": 12.84, - "learning_rate": 0.0009571732026803976, - "loss": 1.0609, - "step": 565 - }, - { - "epoch": 12.95, - "learning_rate": 0.000942911189137232, - "loss": 1.1297, - "step": 570 - }, - { - "epoch": 13.07, - "learning_rate": 0.0009286608168007677, - "loss": 0.9342, - "step": 575 - }, - { - "epoch": 13.18, - "learning_rate": 0.0009144249915211606, - "loss": 0.8511, - "step": 580 - }, - { - "epoch": 13.3, - "learning_rate": 0.0009002066161822172, - "loss": 0.8336, - "step": 585 - }, - { - "epoch": 13.41, - "learning_rate": 0.0008860085901094594, - "loss": 0.8652, - "step": 590 - }, - { - "epoch": 13.52, - "learning_rate": 0.0008718338084789072, - "loss": 0.9744, - "step": 595 - }, - { - "epoch": 13.64, - "learning_rate": 0.000857685161726715, - "loss": 0.9006, - "step": 600 - }, - { - "epoch": 13.75, - "learning_rate": 0.000843565534959769, - "loss": 0.9619, - "step": 605 - }, - { - "epoch": 13.86, - "learning_rate": 0.0008294778073673762, - "loss": 0.9123, - "step": 610 - }, - { - "epoch": 13.98, - "learning_rate": 0.0008154248516341547, - "loss": 0.9959, - "step": 615 - }, - { - "epoch": 14.09, - "learning_rate": 0.0008014095333542549, - "loss": 0.7503, - "step": 620 - }, - { - "epoch": 14.2, - "learning_rate": 0.0007874347104470233, - "loss": 0.7357, - "step": 625 - }, - { - "epoch": 14.32, - "learning_rate": 0.0007735032325742355, - "loss": 0.7477, - "step": 630 - }, - { - "epoch": 14.43, - "learning_rate": 0.0007596179405590076, - "loss": 0.8088, - "step": 635 - }, - { - "epoch": 14.55, - "learning_rate": 0.0007457816658065133, - "loss": 0.7652, - "step": 640 - }, - { - "epoch": 14.66, - "learning_rate": 0.0007319972297266214, - "loss": 0.7847, - "step": 645 - }, - { - "epoch": 14.77, - "learning_rate": 0.0007182674431585703, - "loss": 0.7984, - "step": 650 - }, - { - "epoch": 14.89, - "learning_rate": 0.0007045951057978, - "loss": 0.8732, - "step": 655 - }, - { - "epoch": 15.0, - "learning_rate": 0.0006909830056250527, - "loss": 0.8258, - "step": 660 - }, - { - "epoch": 15.11, - "learning_rate": 0.0006774339183378663, - "loss": 0.6311, - "step": 665 - }, - { - "epoch": 15.23, - "learning_rate": 0.0006639506067845697, - "loss": 0.6543, - "step": 670 - }, - { - "epoch": 15.34, - "learning_rate": 0.0006505358204009018, - "loss": 0.6421, - "step": 675 - }, - { - "epoch": 15.45, - "learning_rate": 0.0006371922946493591, - "loss": 0.6937, - "step": 680 - }, - { - "epoch": 15.57, - "learning_rate": 0.0006239227504614003, - "loss": 0.6887, - "step": 685 - }, - { - "epoch": 15.68, - "learning_rate": 0.0006107298936826086, - "loss": 0.7097, - "step": 690 - }, - { - "epoch": 15.8, - "learning_rate": 0.0005976164145209322, - "loss": 0.6778, - "step": 695 - }, - { - "epoch": 15.91, - "learning_rate": 0.0005845849869981136, - "loss": 0.7124, - "step": 700 - }, - { - "epoch": 16.02, - "learning_rate": 0.000571638268404419, - "loss": 0.7053, - "step": 705 - }, - { - "epoch": 16.14, - "learning_rate": 0.0005587788987567784, - "loss": 0.5863, - "step": 710 - }, - { - "epoch": 16.25, - "learning_rate": 0.0005460095002604533, - "loss": 0.5588, - "step": 715 - }, - { - "epoch": 16.36, - "learning_rate": 0.0005333326767743263, - "loss": 0.5363, - "step": 720 - }, - { - "epoch": 16.48, - "learning_rate": 0.0005207510132799435, - "loss": 0.6137, - "step": 725 - }, - { - "epoch": 16.59, - "learning_rate": 0.0005082670753543961, - "loss": 0.5606, - "step": 730 - }, - { - "epoch": 16.7, - "learning_rate": 0.0004958834086471683, - "loss": 0.629, - "step": 735 - }, - { - "epoch": 16.82, - "learning_rate": 0.00048360253836103817, - "loss": 0.5754, - "step": 740 - }, - { - "epoch": 16.93, - "learning_rate": 0.0004714269687371581, - "loss": 0.6239, - "step": 745 - }, - { - "epoch": 17.05, - "learning_rate": 0.0004593591825444028, - "loss": 0.5807, - "step": 750 - }, - { - "epoch": 17.16, - "learning_rate": 0.0004474016405730973, - "loss": 0.465, - "step": 755 - }, - { - "epoch": 17.27, - "learning_rate": 0.00043555678113323104, - "loss": 0.4871, - "step": 760 - }, - { - "epoch": 17.39, - "learning_rate": 0.00042382701955724725, - "loss": 0.4623, - "step": 765 - }, - { - "epoch": 17.5, - "learning_rate": 0.00041221474770752696, - "loss": 0.5059, - "step": 770 - }, - { - "epoch": 17.61, - "learning_rate": 0.00040072233348865304, - "loss": 0.5021, - "step": 775 - }, - { - "epoch": 17.73, - "learning_rate": 0.0003893521203645618, - "loss": 0.5138, - "step": 780 - }, - { - "epoch": 17.84, - "learning_rate": 0.00037810642688067796, - "loss": 0.5212, - "step": 785 - }, - { - "epoch": 17.95, - "learning_rate": 0.00036698754619112975, - "loss": 0.5611, - "step": 790 - }, - { - "epoch": 18.07, - "learning_rate": 0.00035599774559114475, - "loss": 0.4956, - "step": 795 - }, - { - "epoch": 18.18, - "learning_rate": 0.000345139266054715, - "loss": 0.4243, - "step": 800 - }, - { - "epoch": 18.3, - "learning_rate": 0.0003344143217776319, - "loss": 0.4391, - "step": 805 - }, - { - "epoch": 18.41, - "learning_rate": 0.00032382509972598086, - "loss": 0.4627, - "step": 810 - }, - { - "epoch": 18.52, - "learning_rate": 0.0003133737591901864, - "loss": 0.4208, - "step": 815 - }, - { - "epoch": 18.64, - "learning_rate": 0.0003030624313447067, - "loss": 0.45, - "step": 820 - }, - { - "epoch": 18.75, - "learning_rate": 0.00029289321881345256, - "loss": 0.44, - "step": 825 - }, - { - "epoch": 18.86, - "learning_rate": 0.0002828681952410366, - "loss": 0.4451, - "step": 830 - }, - { - "epoch": 18.98, - "learning_rate": 0.0002729894048699265, - "loss": 0.4494, - "step": 835 - }, - { - "epoch": 19.09, - "learning_rate": 0.00026325886212359495, - "loss": 0.3839, - "step": 840 - }, - { - "epoch": 19.2, - "learning_rate": 0.0002536785511957531, - "loss": 0.3728, - "step": 845 - }, - { - "epoch": 19.32, - "learning_rate": 0.00024425042564574185, - "loss": 0.4126, - "step": 850 - }, - { - "epoch": 19.43, - "learning_rate": 0.00023497640800017682, - "loss": 0.4183, - "step": 855 - }, - { - "epoch": 19.55, - "learning_rate": 0.0002258583893609175, - "loss": 0.3778, - "step": 860 - }, - { - "epoch": 19.66, - "learning_rate": 0.00021689822901944456, - "loss": 0.3758, - "step": 865 - }, - { - "epoch": 19.77, - "learning_rate": 0.000208097754077725, - "loss": 0.4034, - "step": 870 - }, - { - "epoch": 19.89, - "learning_rate": 0.0001994587590756397, - "loss": 0.4085, - "step": 875 - }, - { - "epoch": 20.0, - "learning_rate": 0.00019098300562505265, - "loss": 0.3673, - "step": 880 - }, - { - "epoch": 20.11, - "learning_rate": 0.0001826722220505931, - "loss": 0.363, - "step": 885 - }, - { - "epoch": 20.23, - "learning_rate": 0.000174528103037226, - "loss": 0.3707, - "step": 890 - }, - { - "epoch": 20.34, - "learning_rate": 0.00016655230928468257, - "loss": 0.369, - "step": 895 - }, - { - "epoch": 20.45, - "learning_rate": 0.00015874646716881869, - "loss": 0.3528, - "step": 900 - }, - { - "epoch": 20.57, - "learning_rate": 0.00015111216840997744, - "loss": 0.3581, - "step": 905 - }, - { - "epoch": 20.68, - "learning_rate": 0.00014365096974841107, - "loss": 0.3466, - "step": 910 - }, - { - "epoch": 20.8, - "learning_rate": 0.00013636439262684297, - "loss": 0.3274, - "step": 915 - }, - { - "epoch": 20.91, - "learning_rate": 0.00012925392288022297, - "loss": 0.3401, - "step": 920 - }, - { - "epoch": 21.02, - "learning_rate": 0.00012232101043274435, - "loss": 0.3435, - "step": 925 - }, - { - "epoch": 21.14, - "learning_rate": 0.00011556706900218572, - "loss": 0.2972, - "step": 930 - }, - { - "epoch": 21.25, - "learning_rate": 0.00010899347581163222, - "loss": 0.3153, - "step": 935 - }, - { - "epoch": 21.36, - "learning_rate": 0.00010260157130864178, - "loss": 0.3315, - "step": 940 - }, - { - "epoch": 21.48, - "learning_rate": 9.639265889190829e-05, - "loss": 0.3264, - "step": 945 - }, - { - "epoch": 21.59, - "learning_rate": 9.036800464548156e-05, - "loss": 0.3427, - "step": 950 - }, - { - "epoch": 21.7, - "learning_rate": 8.4528837080594e-05, - "loss": 0.3415, - "step": 955 - }, - { - "epoch": 21.82, - "learning_rate": 7.887634688515e-05, - "loss": 0.323, - "step": 960 - }, - { - "epoch": 21.93, - "learning_rate": 7.341168668092857e-05, - "loss": 0.2961, - "step": 965 - }, - { - "epoch": 22.05, - "learning_rate": 6.813597078854772e-05, - "loss": 0.3276, - "step": 970 - }, - { - "epoch": 22.16, - "learning_rate": 6.305027500023842e-05, - "loss": 0.3045, - "step": 975 - }, - { - "epoch": 22.27, - "learning_rate": 5.8155636360475384e-05, - "loss": 0.3167, - "step": 980 - }, - { - "epoch": 22.39, - "learning_rate": 5.345305295450997e-05, - "loss": 0.319, - "step": 985 - }, - { - "epoch": 22.5, - "learning_rate": 4.894348370484647e-05, - "loss": 0.2852, - "step": 990 - }, - { - "epoch": 22.61, - "learning_rate": 4.4627848175703315e-05, - "loss": 0.3034, - "step": 995 - }, - { - "epoch": 22.73, - "learning_rate": 4.050702638550274e-05, - "loss": 0.2845, - "step": 1000 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 5.092929071525069e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-1000/training_args.bin b/checkpoint-1000/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-1000/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-1100/README.md b/checkpoint-1100/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-1100/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-1100/adapter_config.json b/checkpoint-1100/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-1100/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-1100/adapter_model.safetensors b/checkpoint-1100/adapter_model.safetensors deleted file mode 100644 index 8b1d852d68a43b9671e9576f9427ded10ee0c12d..0000000000000000000000000000000000000000 --- a/checkpoint-1100/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2bc2583490c7dc47bededcc0eaaa25d9aafe96d7680d7ecf5ec077c85de59604 -size 31204248 diff --git a/checkpoint-1100/optimizer.pt b/checkpoint-1100/optimizer.pt deleted file mode 100644 index 0c15401b201f679108fd2da0aeba241cb2180799..0000000000000000000000000000000000000000 --- a/checkpoint-1100/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:71abfda018effb690a77e01b7df48e60cb730b12599e5ad6fdc26845b844760a -size 62437882 diff --git a/checkpoint-1100/rng_state.pth b/checkpoint-1100/rng_state.pth deleted file mode 100644 index f1bc286248c277727b6ed1b195d70c8943badfd8..0000000000000000000000000000000000000000 --- a/checkpoint-1100/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7866b8fc933c6248bae764638e49b94ebe1f35463171c6986de52c6a81632428 -size 14244 diff --git a/checkpoint-1100/scheduler.pt b/checkpoint-1100/scheduler.pt deleted file mode 100644 index 1bfb3c14ecef69c6229b4df2d31a66b2a224a72e..0000000000000000000000000000000000000000 --- a/checkpoint-1100/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:834bea796770b94431ea03d70df0b96b826ab2cbdccf7ff1204aca5c40cb9ee7 -size 1064 diff --git a/checkpoint-1100/special_tokens_map.json b/checkpoint-1100/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-1100/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-1100/tokenization_chatglm.py b/checkpoint-1100/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-1100/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-1100/tokenizer.model b/checkpoint-1100/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-1100/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-1100/tokenizer_config.json b/checkpoint-1100/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-1100/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-1100/trainer_state.json b/checkpoint-1100/trainer_state.json deleted file mode 100644 index 9cec2b6c51c14a05ed819b8995e5b82ad3df8168..0000000000000000000000000000000000000000 --- a/checkpoint-1100/trainer_state.json +++ /dev/null @@ -1,1341 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 25.0, - "eval_steps": 500, - "global_step": 1100, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - }, - { - "epoch": 11.48, - "learning_rate": 0.001128166191521093, - "loss": 1.1754, - "step": 505 - }, - { - "epoch": 11.59, - "learning_rate": 0.0011139914098905405, - "loss": 1.1848, - "step": 510 - }, - { - "epoch": 11.7, - "learning_rate": 0.0010997933838177826, - "loss": 1.2354, - "step": 515 - }, - { - "epoch": 11.82, - "learning_rate": 0.0010855750084788399, - "loss": 1.1984, - "step": 520 - }, - { - "epoch": 11.93, - "learning_rate": 0.0010713391831992322, - "loss": 1.2666, - "step": 525 - }, - { - "epoch": 12.05, - "learning_rate": 0.001057088810862768, - "loss": 1.1408, - "step": 530 - }, - { - "epoch": 12.16, - "learning_rate": 0.0010428267973196027, - "loss": 0.9385, - "step": 535 - }, - { - "epoch": 12.27, - "learning_rate": 0.0010285560507936962, - "loss": 1.0158, - "step": 540 - }, - { - "epoch": 12.39, - "learning_rate": 0.0010142794812897874, - "loss": 0.9936, - "step": 545 - }, - { - "epoch": 12.5, - "learning_rate": 0.001, - "loss": 0.9891, - "step": 550 - }, - { - "epoch": 12.61, - "learning_rate": 0.000985720518710213, - "loss": 1.0684, - "step": 555 - }, - { - "epoch": 12.73, - "learning_rate": 0.0009714439492063038, - "loss": 1.076, - "step": 560 - }, - { - "epoch": 12.84, - "learning_rate": 0.0009571732026803976, - "loss": 1.0609, - "step": 565 - }, - { - "epoch": 12.95, - "learning_rate": 0.000942911189137232, - "loss": 1.1297, - "step": 570 - }, - { - "epoch": 13.07, - "learning_rate": 0.0009286608168007677, - "loss": 0.9342, - "step": 575 - }, - { - "epoch": 13.18, - "learning_rate": 0.0009144249915211606, - "loss": 0.8511, - "step": 580 - }, - { - "epoch": 13.3, - "learning_rate": 0.0009002066161822172, - "loss": 0.8336, - "step": 585 - }, - { - "epoch": 13.41, - "learning_rate": 0.0008860085901094594, - "loss": 0.8652, - "step": 590 - }, - { - "epoch": 13.52, - "learning_rate": 0.0008718338084789072, - "loss": 0.9744, - "step": 595 - }, - { - "epoch": 13.64, - "learning_rate": 0.000857685161726715, - "loss": 0.9006, - "step": 600 - }, - { - "epoch": 13.75, - "learning_rate": 0.000843565534959769, - "loss": 0.9619, - "step": 605 - }, - { - "epoch": 13.86, - "learning_rate": 0.0008294778073673762, - "loss": 0.9123, - "step": 610 - }, - { - "epoch": 13.98, - "learning_rate": 0.0008154248516341547, - "loss": 0.9959, - "step": 615 - }, - { - "epoch": 14.09, - "learning_rate": 0.0008014095333542549, - "loss": 0.7503, - "step": 620 - }, - { - "epoch": 14.2, - "learning_rate": 0.0007874347104470233, - "loss": 0.7357, - "step": 625 - }, - { - "epoch": 14.32, - "learning_rate": 0.0007735032325742355, - "loss": 0.7477, - "step": 630 - }, - { - "epoch": 14.43, - "learning_rate": 0.0007596179405590076, - "loss": 0.8088, - "step": 635 - }, - { - "epoch": 14.55, - "learning_rate": 0.0007457816658065133, - "loss": 0.7652, - "step": 640 - }, - { - "epoch": 14.66, - "learning_rate": 0.0007319972297266214, - "loss": 0.7847, - "step": 645 - }, - { - "epoch": 14.77, - "learning_rate": 0.0007182674431585703, - "loss": 0.7984, - "step": 650 - }, - { - "epoch": 14.89, - "learning_rate": 0.0007045951057978, - "loss": 0.8732, - "step": 655 - }, - { - "epoch": 15.0, - "learning_rate": 0.0006909830056250527, - "loss": 0.8258, - "step": 660 - }, - { - "epoch": 15.11, - "learning_rate": 0.0006774339183378663, - "loss": 0.6311, - "step": 665 - }, - { - "epoch": 15.23, - "learning_rate": 0.0006639506067845697, - "loss": 0.6543, - "step": 670 - }, - { - "epoch": 15.34, - "learning_rate": 0.0006505358204009018, - "loss": 0.6421, - "step": 675 - }, - { - "epoch": 15.45, - "learning_rate": 0.0006371922946493591, - "loss": 0.6937, - "step": 680 - }, - { - "epoch": 15.57, - "learning_rate": 0.0006239227504614003, - "loss": 0.6887, - "step": 685 - }, - { - "epoch": 15.68, - "learning_rate": 0.0006107298936826086, - "loss": 0.7097, - "step": 690 - }, - { - "epoch": 15.8, - "learning_rate": 0.0005976164145209322, - "loss": 0.6778, - "step": 695 - }, - { - "epoch": 15.91, - "learning_rate": 0.0005845849869981136, - "loss": 0.7124, - "step": 700 - }, - { - "epoch": 16.02, - "learning_rate": 0.000571638268404419, - "loss": 0.7053, - "step": 705 - }, - { - "epoch": 16.14, - "learning_rate": 0.0005587788987567784, - "loss": 0.5863, - "step": 710 - }, - { - "epoch": 16.25, - "learning_rate": 0.0005460095002604533, - "loss": 0.5588, - "step": 715 - }, - { - "epoch": 16.36, - "learning_rate": 0.0005333326767743263, - "loss": 0.5363, - "step": 720 - }, - { - "epoch": 16.48, - "learning_rate": 0.0005207510132799435, - "loss": 0.6137, - "step": 725 - }, - { - "epoch": 16.59, - "learning_rate": 0.0005082670753543961, - "loss": 0.5606, - "step": 730 - }, - { - "epoch": 16.7, - "learning_rate": 0.0004958834086471683, - "loss": 0.629, - "step": 735 - }, - { - "epoch": 16.82, - "learning_rate": 0.00048360253836103817, - "loss": 0.5754, - "step": 740 - }, - { - "epoch": 16.93, - "learning_rate": 0.0004714269687371581, - "loss": 0.6239, - "step": 745 - }, - { - "epoch": 17.05, - "learning_rate": 0.0004593591825444028, - "loss": 0.5807, - "step": 750 - }, - { - "epoch": 17.16, - "learning_rate": 0.0004474016405730973, - "loss": 0.465, - "step": 755 - }, - { - "epoch": 17.27, - "learning_rate": 0.00043555678113323104, - "loss": 0.4871, - "step": 760 - }, - { - "epoch": 17.39, - "learning_rate": 0.00042382701955724725, - "loss": 0.4623, - "step": 765 - }, - { - "epoch": 17.5, - "learning_rate": 0.00041221474770752696, - "loss": 0.5059, - "step": 770 - }, - { - "epoch": 17.61, - "learning_rate": 0.00040072233348865304, - "loss": 0.5021, - "step": 775 - }, - { - "epoch": 17.73, - "learning_rate": 0.0003893521203645618, - "loss": 0.5138, - "step": 780 - }, - { - "epoch": 17.84, - "learning_rate": 0.00037810642688067796, - "loss": 0.5212, - "step": 785 - }, - { - "epoch": 17.95, - "learning_rate": 0.00036698754619112975, - "loss": 0.5611, - "step": 790 - }, - { - "epoch": 18.07, - "learning_rate": 0.00035599774559114475, - "loss": 0.4956, - "step": 795 - }, - { - "epoch": 18.18, - "learning_rate": 0.000345139266054715, - "loss": 0.4243, - "step": 800 - }, - { - "epoch": 18.3, - "learning_rate": 0.0003344143217776319, - "loss": 0.4391, - "step": 805 - }, - { - "epoch": 18.41, - "learning_rate": 0.00032382509972598086, - "loss": 0.4627, - "step": 810 - }, - { - "epoch": 18.52, - "learning_rate": 0.0003133737591901864, - "loss": 0.4208, - "step": 815 - }, - { - "epoch": 18.64, - "learning_rate": 0.0003030624313447067, - "loss": 0.45, - "step": 820 - }, - { - "epoch": 18.75, - "learning_rate": 0.00029289321881345256, - "loss": 0.44, - "step": 825 - }, - { - "epoch": 18.86, - "learning_rate": 0.0002828681952410366, - "loss": 0.4451, - "step": 830 - }, - { - "epoch": 18.98, - "learning_rate": 0.0002729894048699265, - "loss": 0.4494, - "step": 835 - }, - { - "epoch": 19.09, - "learning_rate": 0.00026325886212359495, - "loss": 0.3839, - "step": 840 - }, - { - "epoch": 19.2, - "learning_rate": 0.0002536785511957531, - "loss": 0.3728, - "step": 845 - }, - { - "epoch": 19.32, - "learning_rate": 0.00024425042564574185, - "loss": 0.4126, - "step": 850 - }, - { - "epoch": 19.43, - "learning_rate": 0.00023497640800017682, - "loss": 0.4183, - "step": 855 - }, - { - "epoch": 19.55, - "learning_rate": 0.0002258583893609175, - "loss": 0.3778, - "step": 860 - }, - { - "epoch": 19.66, - "learning_rate": 0.00021689822901944456, - "loss": 0.3758, - "step": 865 - }, - { - "epoch": 19.77, - "learning_rate": 0.000208097754077725, - "loss": 0.4034, - "step": 870 - }, - { - "epoch": 19.89, - "learning_rate": 0.0001994587590756397, - "loss": 0.4085, - "step": 875 - }, - { - "epoch": 20.0, - "learning_rate": 0.00019098300562505265, - "loss": 0.3673, - "step": 880 - }, - { - "epoch": 20.11, - "learning_rate": 0.0001826722220505931, - "loss": 0.363, - "step": 885 - }, - { - "epoch": 20.23, - "learning_rate": 0.000174528103037226, - "loss": 0.3707, - "step": 890 - }, - { - "epoch": 20.34, - "learning_rate": 0.00016655230928468257, - "loss": 0.369, - "step": 895 - }, - { - "epoch": 20.45, - "learning_rate": 0.00015874646716881869, - "loss": 0.3528, - "step": 900 - }, - { - "epoch": 20.57, - "learning_rate": 0.00015111216840997744, - "loss": 0.3581, - "step": 905 - }, - { - "epoch": 20.68, - "learning_rate": 0.00014365096974841107, - "loss": 0.3466, - "step": 910 - }, - { - "epoch": 20.8, - "learning_rate": 0.00013636439262684297, - "loss": 0.3274, - "step": 915 - }, - { - "epoch": 20.91, - "learning_rate": 0.00012925392288022297, - "loss": 0.3401, - "step": 920 - }, - { - "epoch": 21.02, - "learning_rate": 0.00012232101043274435, - "loss": 0.3435, - "step": 925 - }, - { - "epoch": 21.14, - "learning_rate": 0.00011556706900218572, - "loss": 0.2972, - "step": 930 - }, - { - "epoch": 21.25, - "learning_rate": 0.00010899347581163222, - "loss": 0.3153, - "step": 935 - }, - { - "epoch": 21.36, - "learning_rate": 0.00010260157130864178, - "loss": 0.3315, - "step": 940 - }, - { - "epoch": 21.48, - "learning_rate": 9.639265889190829e-05, - "loss": 0.3264, - "step": 945 - }, - { - "epoch": 21.59, - "learning_rate": 9.036800464548156e-05, - "loss": 0.3427, - "step": 950 - }, - { - "epoch": 21.7, - "learning_rate": 8.4528837080594e-05, - "loss": 0.3415, - "step": 955 - }, - { - "epoch": 21.82, - "learning_rate": 7.887634688515e-05, - "loss": 0.323, - "step": 960 - }, - { - "epoch": 21.93, - "learning_rate": 7.341168668092857e-05, - "loss": 0.2961, - "step": 965 - }, - { - "epoch": 22.05, - "learning_rate": 6.813597078854772e-05, - "loss": 0.3276, - "step": 970 - }, - { - "epoch": 22.16, - "learning_rate": 6.305027500023842e-05, - "loss": 0.3045, - "step": 975 - }, - { - "epoch": 22.27, - "learning_rate": 5.8155636360475384e-05, - "loss": 0.3167, - "step": 980 - }, - { - "epoch": 22.39, - "learning_rate": 5.345305295450997e-05, - "loss": 0.319, - "step": 985 - }, - { - "epoch": 22.5, - "learning_rate": 4.894348370484647e-05, - "loss": 0.2852, - "step": 990 - }, - { - "epoch": 22.61, - "learning_rate": 4.4627848175703315e-05, - "loss": 0.3034, - "step": 995 - }, - { - "epoch": 22.73, - "learning_rate": 4.050702638550274e-05, - "loss": 0.2845, - "step": 1000 - }, - { - "epoch": 22.84, - "learning_rate": 3.658185862742103e-05, - "loss": 0.3136, - "step": 1005 - }, - { - "epoch": 22.95, - "learning_rate": 3.285314529804295e-05, - "loss": 0.3187, - "step": 1010 - }, - { - "epoch": 23.07, - "learning_rate": 2.93216467341475e-05, - "loss": 0.2907, - "step": 1015 - }, - { - "epoch": 23.18, - "learning_rate": 2.5988083057666535e-05, - "loss": 0.2955, - "step": 1020 - }, - { - "epoch": 23.3, - "learning_rate": 2.2853134028840594e-05, - "loss": 0.2785, - "step": 1025 - }, - { - "epoch": 23.41, - "learning_rate": 1.9917438907606554e-05, - "loss": 0.3369, - "step": 1030 - }, - { - "epoch": 23.52, - "learning_rate": 1.7181596323244453e-05, - "loss": 0.2837, - "step": 1035 - }, - { - "epoch": 23.64, - "learning_rate": 1.4646164152307017e-05, - "loss": 0.3002, - "step": 1040 - }, - { - "epoch": 23.75, - "learning_rate": 1.231165940486234e-05, - "loss": 0.3062, - "step": 1045 - }, - { - "epoch": 23.86, - "learning_rate": 1.0178558119067316e-05, - "loss": 0.2859, - "step": 1050 - }, - { - "epoch": 23.98, - "learning_rate": 8.247295264097288e-06, - "loss": 0.284, - "step": 1055 - }, - { - "epoch": 24.09, - "learning_rate": 6.518264651449779e-06, - "loss": 0.2607, - "step": 1060 - }, - { - "epoch": 24.2, - "learning_rate": 4.991818854640395e-06, - "loss": 0.3164, - "step": 1065 - }, - { - "epoch": 24.32, - "learning_rate": 3.6682691373086663e-06, - "loss": 0.2597, - "step": 1070 - }, - { - "epoch": 24.43, - "learning_rate": 2.5478853897464847e-06, - "loss": 0.2907, - "step": 1075 - }, - { - "epoch": 24.55, - "learning_rate": 1.630896073864352e-06, - "loss": 0.3033, - "step": 1080 - }, - { - "epoch": 24.66, - "learning_rate": 9.174881766043087e-07, - "loss": 0.3089, - "step": 1085 - }, - { - "epoch": 24.77, - "learning_rate": 4.078071718107701e-07, - "loss": 0.2964, - "step": 1090 - }, - { - "epoch": 24.89, - "learning_rate": 1.0195699056669839e-07, - "loss": 0.2995, - "step": 1095 - }, - { - "epoch": 25.0, - "learning_rate": 0.0, - "loss": 0.2936, - "step": 1100 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 5.602696856046797e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-1100/training_args.bin b/checkpoint-1100/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-1100/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-200/README.md b/checkpoint-200/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-200/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-200/adapter_config.json b/checkpoint-200/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-200/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-200/adapter_model.safetensors b/checkpoint-200/adapter_model.safetensors deleted file mode 100644 index d16ae400b3b9a0c8fb7180d09fc6884dc5eb966f..0000000000000000000000000000000000000000 --- a/checkpoint-200/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d9079a8f13b0b663beb8af4a69f38304ffb47f535efa9d4fc2f28235905d33d6 -size 31204248 diff --git a/checkpoint-200/optimizer.pt b/checkpoint-200/optimizer.pt deleted file mode 100644 index 01aaef905d13ddbebb940c28939aa01d88bc20da..0000000000000000000000000000000000000000 --- a/checkpoint-200/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d3c20e12a6fe7711738ea34dd0ceeb02446ef057730b074a3f796920de8f458e -size 62437882 diff --git a/checkpoint-200/rng_state.pth b/checkpoint-200/rng_state.pth deleted file mode 100644 index 8345c9db73e65222f60443cc197cdfc365a9ac22..0000000000000000000000000000000000000000 --- a/checkpoint-200/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:754a649249169df5413cd1afec214b0e512a562b2d537b50c7822a329e86ab92 -size 14244 diff --git a/checkpoint-200/scheduler.pt b/checkpoint-200/scheduler.pt deleted file mode 100644 index f0a696fc98d6a9ebeb0e366716525b2c1a450364..0000000000000000000000000000000000000000 --- a/checkpoint-200/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ca49ceb5308a589ec72593fdfc170ba0798f7206328f597dc676a71ad4f62985 -size 1064 diff --git a/checkpoint-200/special_tokens_map.json b/checkpoint-200/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-200/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-200/tokenization_chatglm.py b/checkpoint-200/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-200/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-200/tokenizer.model b/checkpoint-200/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-200/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-200/tokenizer_config.json b/checkpoint-200/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-200/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-200/trainer_state.json b/checkpoint-200/trainer_state.json deleted file mode 100644 index 155f7540d2f7f88c83b8ac3895de2f0e2097f161..0000000000000000000000000000000000000000 --- a/checkpoint-200/trainer_state.json +++ /dev/null @@ -1,261 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 4.545454545454545, - "eval_steps": 500, - "global_step": 200, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 1.0268727547723776e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-200/training_args.bin b/checkpoint-200/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-200/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-300/README.md b/checkpoint-300/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-300/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-300/adapter_config.json b/checkpoint-300/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-300/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-300/adapter_model.safetensors b/checkpoint-300/adapter_model.safetensors deleted file mode 100644 index 56cce9da25eb0f46b158a873c9cc05206ecade2c..0000000000000000000000000000000000000000 --- a/checkpoint-300/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5e220d6419e740f923cc6124bc6265c9df3f562e96a78efcde9e7588717485b0 -size 31204248 diff --git a/checkpoint-300/optimizer.pt b/checkpoint-300/optimizer.pt deleted file mode 100644 index 598c5f1357484d3d8e05dcf0b04e7704b5c1a45c..0000000000000000000000000000000000000000 --- a/checkpoint-300/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3d10054eefdf5b0ca7a5d696876048300d180438f1352d2a4d5c1cfc16b17fdc -size 62437882 diff --git a/checkpoint-300/rng_state.pth b/checkpoint-300/rng_state.pth deleted file mode 100644 index 9ea1936e36c296d9a3e57d0d856fed7d05759cee..0000000000000000000000000000000000000000 --- a/checkpoint-300/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d2e382bd86a073d9ac435189aa47a7ad5d68e1129172b8fc68b1976c4a8b24c9 -size 14244 diff --git a/checkpoint-300/scheduler.pt b/checkpoint-300/scheduler.pt deleted file mode 100644 index 4f6dc82f0472e269724cd750d8cbe5d7d135e91c..0000000000000000000000000000000000000000 --- a/checkpoint-300/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9cd5a18fa2a68db0acda1cf96d93f0349c4089662fee086e3504535e77ceb535 -size 1064 diff --git a/checkpoint-300/special_tokens_map.json b/checkpoint-300/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-300/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-300/tokenization_chatglm.py b/checkpoint-300/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-300/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-300/tokenizer.model b/checkpoint-300/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-300/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-300/tokenizer_config.json b/checkpoint-300/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-300/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-300/trainer_state.json b/checkpoint-300/trainer_state.json deleted file mode 100644 index aff5fb80a09d1172535b6d961566c68db2450d37..0000000000000000000000000000000000000000 --- a/checkpoint-300/trainer_state.json +++ /dev/null @@ -1,381 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 6.818181818181818, - "eval_steps": 500, - "global_step": 300, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 1.530797220667392e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-300/training_args.bin b/checkpoint-300/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-300/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-400/README.md b/checkpoint-400/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-400/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-400/adapter_config.json b/checkpoint-400/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-400/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-400/adapter_model.safetensors b/checkpoint-400/adapter_model.safetensors deleted file mode 100644 index c7965b81c943c120cb2b07506d038b0241cbc1ca..0000000000000000000000000000000000000000 --- a/checkpoint-400/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1a1b000af1e72645f71b8a829536a3dd0711ea56ebf72dc454d96e1969765c38 -size 31204248 diff --git a/checkpoint-400/optimizer.pt b/checkpoint-400/optimizer.pt deleted file mode 100644 index 77d11a63d4326c54ade63beb5e49fc1e31581f2a..0000000000000000000000000000000000000000 --- a/checkpoint-400/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7d15e821214296476b4cc6e0d82589dc357ee7e77e8b4e89dfd884bbdcadb6a4 -size 62437882 diff --git a/checkpoint-400/rng_state.pth b/checkpoint-400/rng_state.pth deleted file mode 100644 index c20b3d543b369e9adb6095f40bcd6b1dfbada244..0000000000000000000000000000000000000000 --- a/checkpoint-400/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:31c361eb8eecde08a271d93e6c5eef525134c62bd7fbd49722fb023d9072b1ea -size 14244 diff --git a/checkpoint-400/scheduler.pt b/checkpoint-400/scheduler.pt deleted file mode 100644 index 9354fdb72269fbea1f865560f515702e795aca35..0000000000000000000000000000000000000000 --- a/checkpoint-400/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:604a73bb32bc94a03b6bddbde38878816f4e28ff342b8206bf7cbabe687c2424 -size 1064 diff --git a/checkpoint-400/special_tokens_map.json b/checkpoint-400/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-400/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-400/tokenization_chatglm.py b/checkpoint-400/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-400/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-400/tokenizer.model b/checkpoint-400/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-400/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-400/tokenizer_config.json b/checkpoint-400/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-400/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-400/trainer_state.json b/checkpoint-400/trainer_state.json deleted file mode 100644 index d562bb47ed6cb4818d41e7cc4617188528eec38f..0000000000000000000000000000000000000000 --- a/checkpoint-400/trainer_state.json +++ /dev/null @@ -1,501 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 9.090909090909092, - "eval_steps": 500, - "global_step": 400, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 2.0358076130328576e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-400/training_args.bin b/checkpoint-400/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-400/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-500/README.md b/checkpoint-500/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-500/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-500/adapter_config.json b/checkpoint-500/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-500/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-500/adapter_model.safetensors b/checkpoint-500/adapter_model.safetensors deleted file mode 100644 index 65b996d3f9bb37b9ab0e7419794f664a68b4cff3..0000000000000000000000000000000000000000 --- a/checkpoint-500/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:38c3f10badc4eca46e5391c0d76f664d91f2c5c96c52f05823964db29a131cc8 -size 31204248 diff --git a/checkpoint-500/optimizer.pt b/checkpoint-500/optimizer.pt deleted file mode 100644 index be94e82e99ebe5b1bd70640b320aaf46362cf277..0000000000000000000000000000000000000000 --- a/checkpoint-500/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7175631f9ef75d430eb08d54cd0bc47f9376aec47461deaaff8946e1fce80f12 -size 62437882 diff --git a/checkpoint-500/rng_state.pth b/checkpoint-500/rng_state.pth deleted file mode 100644 index 704ad9716a617526b738a271bd3896f9a0d51cb5..0000000000000000000000000000000000000000 --- a/checkpoint-500/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8270413ee4d1e27028e4c5fdc6f6f13e233f42ffd7aa9694385f4015d85edcf0 -size 14244 diff --git a/checkpoint-500/scheduler.pt b/checkpoint-500/scheduler.pt deleted file mode 100644 index 75e62ef52da8a1d95d04c12edb9de06ab1fe7772..0000000000000000000000000000000000000000 --- a/checkpoint-500/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2343a969c11ec4ec216a210d648b7b960d566bef5539e79c2765af49aa68625b -size 1064 diff --git a/checkpoint-500/special_tokens_map.json b/checkpoint-500/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-500/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-500/tokenization_chatglm.py b/checkpoint-500/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-500/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-500/tokenizer.model b/checkpoint-500/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-500/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-500/tokenizer_config.json b/checkpoint-500/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-500/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-500/trainer_state.json b/checkpoint-500/trainer_state.json deleted file mode 100644 index 8bc3cbe0d4b39a50542f5e3f999aa29cb17c2020..0000000000000000000000000000000000000000 --- a/checkpoint-500/trainer_state.json +++ /dev/null @@ -1,621 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 11.363636363636363, - "eval_steps": 500, - "global_step": 500, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 2.5448112270753792e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-500/training_args.bin b/checkpoint-500/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-500/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-600/README.md b/checkpoint-600/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-600/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-600/adapter_config.json b/checkpoint-600/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-600/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-600/adapter_model.safetensors b/checkpoint-600/adapter_model.safetensors deleted file mode 100644 index 8693c6108b7e2b17168eba8728bc677e3462f80c..0000000000000000000000000000000000000000 --- a/checkpoint-600/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:596b8d994195af594c60807528156dc655f2338206cea1219d8f9f17699a39c3 -size 31204248 diff --git a/checkpoint-600/optimizer.pt b/checkpoint-600/optimizer.pt deleted file mode 100644 index ccdb293468110b27a67449fea8d5b7d9580d6516..0000000000000000000000000000000000000000 --- a/checkpoint-600/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7dc673b7b24101a181f20152f8feeba5e0436d16b2fad104b913270a4bd9d6b9 -size 62437882 diff --git a/checkpoint-600/rng_state.pth b/checkpoint-600/rng_state.pth deleted file mode 100644 index 7f5da71bcba3027ac48bce6222b0505d26b2e6c4..0000000000000000000000000000000000000000 --- a/checkpoint-600/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:31bcb95a31206dc96fa1e1bee6e4245055f6da2ff17b25d0d135dcb8b39f69a8 -size 14244 diff --git a/checkpoint-600/scheduler.pt b/checkpoint-600/scheduler.pt deleted file mode 100644 index ea69f62db04a8bd99c58a3f1efab6b8e610856c6..0000000000000000000000000000000000000000 --- a/checkpoint-600/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:20bfc76a2ebb40ddf377497ba1bdf8baec41588c6508711337f299764de5cf80 -size 1064 diff --git a/checkpoint-600/special_tokens_map.json b/checkpoint-600/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-600/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-600/tokenization_chatglm.py b/checkpoint-600/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-600/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-600/tokenizer.model b/checkpoint-600/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-600/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-600/tokenizer_config.json b/checkpoint-600/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-600/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-600/trainer_state.json b/checkpoint-600/trainer_state.json deleted file mode 100644 index a0f146d013be443c5899780301bd74a540333b1f..0000000000000000000000000000000000000000 --- a/checkpoint-600/trainer_state.json +++ /dev/null @@ -1,741 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 13.636363636363637, - "eval_steps": 500, - "global_step": 600, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - }, - { - "epoch": 11.48, - "learning_rate": 0.001128166191521093, - "loss": 1.1754, - "step": 505 - }, - { - "epoch": 11.59, - "learning_rate": 0.0011139914098905405, - "loss": 1.1848, - "step": 510 - }, - { - "epoch": 11.7, - "learning_rate": 0.0010997933838177826, - "loss": 1.2354, - "step": 515 - }, - { - "epoch": 11.82, - "learning_rate": 0.0010855750084788399, - "loss": 1.1984, - "step": 520 - }, - { - "epoch": 11.93, - "learning_rate": 0.0010713391831992322, - "loss": 1.2666, - "step": 525 - }, - { - "epoch": 12.05, - "learning_rate": 0.001057088810862768, - "loss": 1.1408, - "step": 530 - }, - { - "epoch": 12.16, - "learning_rate": 0.0010428267973196027, - "loss": 0.9385, - "step": 535 - }, - { - "epoch": 12.27, - "learning_rate": 0.0010285560507936962, - "loss": 1.0158, - "step": 540 - }, - { - "epoch": 12.39, - "learning_rate": 0.0010142794812897874, - "loss": 0.9936, - "step": 545 - }, - { - "epoch": 12.5, - "learning_rate": 0.001, - "loss": 0.9891, - "step": 550 - }, - { - "epoch": 12.61, - "learning_rate": 0.000985720518710213, - "loss": 1.0684, - "step": 555 - }, - { - "epoch": 12.73, - "learning_rate": 0.0009714439492063038, - "loss": 1.076, - "step": 560 - }, - { - "epoch": 12.84, - "learning_rate": 0.0009571732026803976, - "loss": 1.0609, - "step": 565 - }, - { - "epoch": 12.95, - "learning_rate": 0.000942911189137232, - "loss": 1.1297, - "step": 570 - }, - { - "epoch": 13.07, - "learning_rate": 0.0009286608168007677, - "loss": 0.9342, - "step": 575 - }, - { - "epoch": 13.18, - "learning_rate": 0.0009144249915211606, - "loss": 0.8511, - "step": 580 - }, - { - "epoch": 13.3, - "learning_rate": 0.0009002066161822172, - "loss": 0.8336, - "step": 585 - }, - { - "epoch": 13.41, - "learning_rate": 0.0008860085901094594, - "loss": 0.8652, - "step": 590 - }, - { - "epoch": 13.52, - "learning_rate": 0.0008718338084789072, - "loss": 0.9744, - "step": 595 - }, - { - "epoch": 13.64, - "learning_rate": 0.000857685161726715, - "loss": 0.9006, - "step": 600 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 3.0530793988521984e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-600/training_args.bin b/checkpoint-600/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-600/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-700/README.md b/checkpoint-700/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-700/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-700/adapter_config.json b/checkpoint-700/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-700/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-700/adapter_model.safetensors b/checkpoint-700/adapter_model.safetensors deleted file mode 100644 index 4767f1582b9ee8b60a97766601e351cf7cea6d6e..0000000000000000000000000000000000000000 --- a/checkpoint-700/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:05656560aa7a8f94e8f2bf807a12c24e19dacd8d31a96f306a1433fe40d79ef5 -size 31204248 diff --git a/checkpoint-700/optimizer.pt b/checkpoint-700/optimizer.pt deleted file mode 100644 index f756d30f846da793d15ac9e15efb1991a6c7a539..0000000000000000000000000000000000000000 --- a/checkpoint-700/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:be7ec8ec24f42c78b7996ef8b2f525221fc280463d56dd2b8462fe452c9a8d9a -size 62437882 diff --git a/checkpoint-700/rng_state.pth b/checkpoint-700/rng_state.pth deleted file mode 100644 index 0a4733dc4ba242b62110eecf221e730d1e0ed237..0000000000000000000000000000000000000000 --- a/checkpoint-700/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3d30c0642c2a797dc3cd110d33cefef65ae7eed01705207c0ce1a5f0e3e64fff -size 14244 diff --git a/checkpoint-700/scheduler.pt b/checkpoint-700/scheduler.pt deleted file mode 100644 index c78d3b24500c24f1c1dc29b079138a5943b138d1..0000000000000000000000000000000000000000 --- a/checkpoint-700/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da3efcc637a9bb9c201f18cb1b9c77a473adaa751a7764564db5488980f490fc -size 1064 diff --git a/checkpoint-700/special_tokens_map.json b/checkpoint-700/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-700/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-700/tokenization_chatglm.py b/checkpoint-700/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-700/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-700/tokenizer.model b/checkpoint-700/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-700/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-700/tokenizer_config.json b/checkpoint-700/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-700/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-700/trainer_state.json b/checkpoint-700/trainer_state.json deleted file mode 100644 index e5a0a5376acc30e969b68dd118c4cdb5fefdf06d..0000000000000000000000000000000000000000 --- a/checkpoint-700/trainer_state.json +++ /dev/null @@ -1,861 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 15.909090909090908, - "eval_steps": 500, - "global_step": 700, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - }, - { - "epoch": 11.48, - "learning_rate": 0.001128166191521093, - "loss": 1.1754, - "step": 505 - }, - { - "epoch": 11.59, - "learning_rate": 0.0011139914098905405, - "loss": 1.1848, - "step": 510 - }, - { - "epoch": 11.7, - "learning_rate": 0.0010997933838177826, - "loss": 1.2354, - "step": 515 - }, - { - "epoch": 11.82, - "learning_rate": 0.0010855750084788399, - "loss": 1.1984, - "step": 520 - }, - { - "epoch": 11.93, - "learning_rate": 0.0010713391831992322, - "loss": 1.2666, - "step": 525 - }, - { - "epoch": 12.05, - "learning_rate": 0.001057088810862768, - "loss": 1.1408, - "step": 530 - }, - { - "epoch": 12.16, - "learning_rate": 0.0010428267973196027, - "loss": 0.9385, - "step": 535 - }, - { - "epoch": 12.27, - "learning_rate": 0.0010285560507936962, - "loss": 1.0158, - "step": 540 - }, - { - "epoch": 12.39, - "learning_rate": 0.0010142794812897874, - "loss": 0.9936, - "step": 545 - }, - { - "epoch": 12.5, - "learning_rate": 0.001, - "loss": 0.9891, - "step": 550 - }, - { - "epoch": 12.61, - "learning_rate": 0.000985720518710213, - "loss": 1.0684, - "step": 555 - }, - { - "epoch": 12.73, - "learning_rate": 0.0009714439492063038, - "loss": 1.076, - "step": 560 - }, - { - "epoch": 12.84, - "learning_rate": 0.0009571732026803976, - "loss": 1.0609, - "step": 565 - }, - { - "epoch": 12.95, - "learning_rate": 0.000942911189137232, - "loss": 1.1297, - "step": 570 - }, - { - "epoch": 13.07, - "learning_rate": 0.0009286608168007677, - "loss": 0.9342, - "step": 575 - }, - { - "epoch": 13.18, - "learning_rate": 0.0009144249915211606, - "loss": 0.8511, - "step": 580 - }, - { - "epoch": 13.3, - "learning_rate": 0.0009002066161822172, - "loss": 0.8336, - "step": 585 - }, - { - "epoch": 13.41, - "learning_rate": 0.0008860085901094594, - "loss": 0.8652, - "step": 590 - }, - { - "epoch": 13.52, - "learning_rate": 0.0008718338084789072, - "loss": 0.9744, - "step": 595 - }, - { - "epoch": 13.64, - "learning_rate": 0.000857685161726715, - "loss": 0.9006, - "step": 600 - }, - { - "epoch": 13.75, - "learning_rate": 0.000843565534959769, - "loss": 0.9619, - "step": 605 - }, - { - "epoch": 13.86, - "learning_rate": 0.0008294778073673762, - "loss": 0.9123, - "step": 610 - }, - { - "epoch": 13.98, - "learning_rate": 0.0008154248516341547, - "loss": 0.9959, - "step": 615 - }, - { - "epoch": 14.09, - "learning_rate": 0.0008014095333542549, - "loss": 0.7503, - "step": 620 - }, - { - "epoch": 14.2, - "learning_rate": 0.0007874347104470233, - "loss": 0.7357, - "step": 625 - }, - { - "epoch": 14.32, - "learning_rate": 0.0007735032325742355, - "loss": 0.7477, - "step": 630 - }, - { - "epoch": 14.43, - "learning_rate": 0.0007596179405590076, - "loss": 0.8088, - "step": 635 - }, - { - "epoch": 14.55, - "learning_rate": 0.0007457816658065133, - "loss": 0.7652, - "step": 640 - }, - { - "epoch": 14.66, - "learning_rate": 0.0007319972297266214, - "loss": 0.7847, - "step": 645 - }, - { - "epoch": 14.77, - "learning_rate": 0.0007182674431585703, - "loss": 0.7984, - "step": 650 - }, - { - "epoch": 14.89, - "learning_rate": 0.0007045951057978, - "loss": 0.8732, - "step": 655 - }, - { - "epoch": 15.0, - "learning_rate": 0.0006909830056250527, - "loss": 0.8258, - "step": 660 - }, - { - "epoch": 15.11, - "learning_rate": 0.0006774339183378663, - "loss": 0.6311, - "step": 665 - }, - { - "epoch": 15.23, - "learning_rate": 0.0006639506067845697, - "loss": 0.6543, - "step": 670 - }, - { - "epoch": 15.34, - "learning_rate": 0.0006505358204009018, - "loss": 0.6421, - "step": 675 - }, - { - "epoch": 15.45, - "learning_rate": 0.0006371922946493591, - "loss": 0.6937, - "step": 680 - }, - { - "epoch": 15.57, - "learning_rate": 0.0006239227504614003, - "loss": 0.6887, - "step": 685 - }, - { - "epoch": 15.68, - "learning_rate": 0.0006107298936826086, - "loss": 0.7097, - "step": 690 - }, - { - "epoch": 15.8, - "learning_rate": 0.0005976164145209322, - "loss": 0.6778, - "step": 695 - }, - { - "epoch": 15.91, - "learning_rate": 0.0005845849869981136, - "loss": 0.7124, - "step": 700 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 3.56150844862464e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-700/training_args.bin b/checkpoint-700/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-700/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-800/README.md b/checkpoint-800/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-800/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-800/adapter_config.json b/checkpoint-800/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-800/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-800/adapter_model.safetensors b/checkpoint-800/adapter_model.safetensors deleted file mode 100644 index 089800c9e88069633551e1a3bf5c91e95ff64428..0000000000000000000000000000000000000000 --- a/checkpoint-800/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:54c6dd53d5326506ece69a9bd54a9eadb264d1e8d1c423195a0e633060f3be5e -size 31204248 diff --git a/checkpoint-800/optimizer.pt b/checkpoint-800/optimizer.pt deleted file mode 100644 index 7d8828678b2e988b4532aebc0d1478274424e4fa..0000000000000000000000000000000000000000 --- a/checkpoint-800/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e689221fb9dfdd617b5e0d8a6f5ca10763fb1da5e6ecec7d8290ccfb4a1ee339 -size 62437882 diff --git a/checkpoint-800/rng_state.pth b/checkpoint-800/rng_state.pth deleted file mode 100644 index 57e84c10055685a7d471cfe72bbb1dbcaf00992a..0000000000000000000000000000000000000000 --- a/checkpoint-800/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b4f906f32e0cd50ee989e776447fc7f92a946657e5aecdb6935a8e559c806dbb -size 14244 diff --git a/checkpoint-800/scheduler.pt b/checkpoint-800/scheduler.pt deleted file mode 100644 index 837e7bfac73063ef71e537cd0c9e889e0dc86f98..0000000000000000000000000000000000000000 --- a/checkpoint-800/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5d2864f4c40324dd6b958b5301dad812f50da0efe5f184e1572889a4e267b23b -size 1064 diff --git a/checkpoint-800/special_tokens_map.json b/checkpoint-800/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-800/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-800/tokenization_chatglm.py b/checkpoint-800/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-800/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-800/tokenizer.model b/checkpoint-800/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-800/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-800/tokenizer_config.json b/checkpoint-800/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-800/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-800/trainer_state.json b/checkpoint-800/trainer_state.json deleted file mode 100644 index ad79a35be68ecf98916bc1c4cb598985aa6d081e..0000000000000000000000000000000000000000 --- a/checkpoint-800/trainer_state.json +++ /dev/null @@ -1,981 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 18.181818181818183, - "eval_steps": 500, - "global_step": 800, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - }, - { - "epoch": 11.48, - "learning_rate": 0.001128166191521093, - "loss": 1.1754, - "step": 505 - }, - { - "epoch": 11.59, - "learning_rate": 0.0011139914098905405, - "loss": 1.1848, - "step": 510 - }, - { - "epoch": 11.7, - "learning_rate": 0.0010997933838177826, - "loss": 1.2354, - "step": 515 - }, - { - "epoch": 11.82, - "learning_rate": 0.0010855750084788399, - "loss": 1.1984, - "step": 520 - }, - { - "epoch": 11.93, - "learning_rate": 0.0010713391831992322, - "loss": 1.2666, - "step": 525 - }, - { - "epoch": 12.05, - "learning_rate": 0.001057088810862768, - "loss": 1.1408, - "step": 530 - }, - { - "epoch": 12.16, - "learning_rate": 0.0010428267973196027, - "loss": 0.9385, - "step": 535 - }, - { - "epoch": 12.27, - "learning_rate": 0.0010285560507936962, - "loss": 1.0158, - "step": 540 - }, - { - "epoch": 12.39, - "learning_rate": 0.0010142794812897874, - "loss": 0.9936, - "step": 545 - }, - { - "epoch": 12.5, - "learning_rate": 0.001, - "loss": 0.9891, - "step": 550 - }, - { - "epoch": 12.61, - "learning_rate": 0.000985720518710213, - "loss": 1.0684, - "step": 555 - }, - { - "epoch": 12.73, - "learning_rate": 0.0009714439492063038, - "loss": 1.076, - "step": 560 - }, - { - "epoch": 12.84, - "learning_rate": 0.0009571732026803976, - "loss": 1.0609, - "step": 565 - }, - { - "epoch": 12.95, - "learning_rate": 0.000942911189137232, - "loss": 1.1297, - "step": 570 - }, - { - "epoch": 13.07, - "learning_rate": 0.0009286608168007677, - "loss": 0.9342, - "step": 575 - }, - { - "epoch": 13.18, - "learning_rate": 0.0009144249915211606, - "loss": 0.8511, - "step": 580 - }, - { - "epoch": 13.3, - "learning_rate": 0.0009002066161822172, - "loss": 0.8336, - "step": 585 - }, - { - "epoch": 13.41, - "learning_rate": 0.0008860085901094594, - "loss": 0.8652, - "step": 590 - }, - { - "epoch": 13.52, - "learning_rate": 0.0008718338084789072, - "loss": 0.9744, - "step": 595 - }, - { - "epoch": 13.64, - "learning_rate": 0.000857685161726715, - "loss": 0.9006, - "step": 600 - }, - { - "epoch": 13.75, - "learning_rate": 0.000843565534959769, - "loss": 0.9619, - "step": 605 - }, - { - "epoch": 13.86, - "learning_rate": 0.0008294778073673762, - "loss": 0.9123, - "step": 610 - }, - { - "epoch": 13.98, - "learning_rate": 0.0008154248516341547, - "loss": 0.9959, - "step": 615 - }, - { - "epoch": 14.09, - "learning_rate": 0.0008014095333542549, - "loss": 0.7503, - "step": 620 - }, - { - "epoch": 14.2, - "learning_rate": 0.0007874347104470233, - "loss": 0.7357, - "step": 625 - }, - { - "epoch": 14.32, - "learning_rate": 0.0007735032325742355, - "loss": 0.7477, - "step": 630 - }, - { - "epoch": 14.43, - "learning_rate": 0.0007596179405590076, - "loss": 0.8088, - "step": 635 - }, - { - "epoch": 14.55, - "learning_rate": 0.0007457816658065133, - "loss": 0.7652, - "step": 640 - }, - { - "epoch": 14.66, - "learning_rate": 0.0007319972297266214, - "loss": 0.7847, - "step": 645 - }, - { - "epoch": 14.77, - "learning_rate": 0.0007182674431585703, - "loss": 0.7984, - "step": 650 - }, - { - "epoch": 14.89, - "learning_rate": 0.0007045951057978, - "loss": 0.8732, - "step": 655 - }, - { - "epoch": 15.0, - "learning_rate": 0.0006909830056250527, - "loss": 0.8258, - "step": 660 - }, - { - "epoch": 15.11, - "learning_rate": 0.0006774339183378663, - "loss": 0.6311, - "step": 665 - }, - { - "epoch": 15.23, - "learning_rate": 0.0006639506067845697, - "loss": 0.6543, - "step": 670 - }, - { - "epoch": 15.34, - "learning_rate": 0.0006505358204009018, - "loss": 0.6421, - "step": 675 - }, - { - "epoch": 15.45, - "learning_rate": 0.0006371922946493591, - "loss": 0.6937, - "step": 680 - }, - { - "epoch": 15.57, - "learning_rate": 0.0006239227504614003, - "loss": 0.6887, - "step": 685 - }, - { - "epoch": 15.68, - "learning_rate": 0.0006107298936826086, - "loss": 0.7097, - "step": 690 - }, - { - "epoch": 15.8, - "learning_rate": 0.0005976164145209322, - "loss": 0.6778, - "step": 695 - }, - { - "epoch": 15.91, - "learning_rate": 0.0005845849869981136, - "loss": 0.7124, - "step": 700 - }, - { - "epoch": 16.02, - "learning_rate": 0.000571638268404419, - "loss": 0.7053, - "step": 705 - }, - { - "epoch": 16.14, - "learning_rate": 0.0005587788987567784, - "loss": 0.5863, - "step": 710 - }, - { - "epoch": 16.25, - "learning_rate": 0.0005460095002604533, - "loss": 0.5588, - "step": 715 - }, - { - "epoch": 16.36, - "learning_rate": 0.0005333326767743263, - "loss": 0.5363, - "step": 720 - }, - { - "epoch": 16.48, - "learning_rate": 0.0005207510132799435, - "loss": 0.6137, - "step": 725 - }, - { - "epoch": 16.59, - "learning_rate": 0.0005082670753543961, - "loss": 0.5606, - "step": 730 - }, - { - "epoch": 16.7, - "learning_rate": 0.0004958834086471683, - "loss": 0.629, - "step": 735 - }, - { - "epoch": 16.82, - "learning_rate": 0.00048360253836103817, - "loss": 0.5754, - "step": 740 - }, - { - "epoch": 16.93, - "learning_rate": 0.0004714269687371581, - "loss": 0.6239, - "step": 745 - }, - { - "epoch": 17.05, - "learning_rate": 0.0004593591825444028, - "loss": 0.5807, - "step": 750 - }, - { - "epoch": 17.16, - "learning_rate": 0.0004474016405730973, - "loss": 0.465, - "step": 755 - }, - { - "epoch": 17.27, - "learning_rate": 0.00043555678113323104, - "loss": 0.4871, - "step": 760 - }, - { - "epoch": 17.39, - "learning_rate": 0.00042382701955724725, - "loss": 0.4623, - "step": 765 - }, - { - "epoch": 17.5, - "learning_rate": 0.00041221474770752696, - "loss": 0.5059, - "step": 770 - }, - { - "epoch": 17.61, - "learning_rate": 0.00040072233348865304, - "loss": 0.5021, - "step": 775 - }, - { - "epoch": 17.73, - "learning_rate": 0.0003893521203645618, - "loss": 0.5138, - "step": 780 - }, - { - "epoch": 17.84, - "learning_rate": 0.00037810642688067796, - "loss": 0.5212, - "step": 785 - }, - { - "epoch": 17.95, - "learning_rate": 0.00036698754619112975, - "loss": 0.5611, - "step": 790 - }, - { - "epoch": 18.07, - "learning_rate": 0.00035599774559114475, - "loss": 0.4956, - "step": 795 - }, - { - "epoch": 18.18, - "learning_rate": 0.000345139266054715, - "loss": 0.4243, - "step": 800 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 4.074154800139469e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-800/training_args.bin b/checkpoint-800/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-800/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920 diff --git a/checkpoint-900/README.md b/checkpoint-900/README.md deleted file mode 100644 index 0a4640bc0bab946c21e07f36639d991fc5d9f684..0000000000000000000000000000000000000000 --- a/checkpoint-900/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -library_name: peft -base_model: /root/chatglm3-6b ---- - -# Model Card for Model ID - - - - - -## Model Details - -### Model Description - - - - - -- **Developed by:** [More Information Needed] -- **Funded by [optional]:** [More Information Needed] -- **Shared by [optional]:** [More Information Needed] -- **Model type:** [More Information Needed] -- **Language(s) (NLP):** [More Information Needed] -- **License:** [More Information Needed] -- **Finetuned from model [optional]:** [More Information Needed] - -### Model Sources [optional] - - - -- **Repository:** [More Information Needed] -- **Paper [optional]:** [More Information Needed] -- **Demo [optional]:** [More Information Needed] - -## Uses - - - -### Direct Use - - - -[More Information Needed] - -### Downstream Use [optional] - - - -[More Information Needed] - -### Out-of-Scope Use - - - -[More Information Needed] - -## Bias, Risks, and Limitations - - - -[More Information Needed] - -### Recommendations - - - -Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. - -## How to Get Started with the Model - -Use the code below to get started with the model. - -[More Information Needed] - -## Training Details - -### Training Data - - - -[More Information Needed] - -### Training Procedure - - - -#### Preprocessing [optional] - -[More Information Needed] - - -#### Training Hyperparameters - -- **Training regime:** [More Information Needed] - -#### Speeds, Sizes, Times [optional] - - - -[More Information Needed] - -## Evaluation - - - -### Testing Data, Factors & Metrics - -#### Testing Data - - - -[More Information Needed] - -#### Factors - - - -[More Information Needed] - -#### Metrics - - - -[More Information Needed] - -### Results - -[More Information Needed] - -#### Summary - - - -## Model Examination [optional] - - - -[More Information Needed] - -## Environmental Impact - - - -Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - -- **Hardware Type:** [More Information Needed] -- **Hours used:** [More Information Needed] -- **Cloud Provider:** [More Information Needed] -- **Compute Region:** [More Information Needed] -- **Carbon Emitted:** [More Information Needed] - -## Technical Specifications [optional] - -### Model Architecture and Objective - -[More Information Needed] - -### Compute Infrastructure - -[More Information Needed] - -#### Hardware - -[More Information Needed] - -#### Software - -[More Information Needed] - -## Citation [optional] - - - -**BibTeX:** - -[More Information Needed] - -**APA:** - -[More Information Needed] - -## Glossary [optional] - - - -[More Information Needed] - -## More Information [optional] - -[More Information Needed] - -## Model Card Authors [optional] - -[More Information Needed] - -## Model Card Contact - -[More Information Needed] - - -### Framework versions - -- PEFT 0.7.1 \ No newline at end of file diff --git a/checkpoint-900/adapter_config.json b/checkpoint-900/adapter_config.json deleted file mode 100644 index e437b533e257864a38c04ed024f90cab5eebcd8d..0000000000000000000000000000000000000000 --- a/checkpoint-900/adapter_config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "alpha_pattern": {}, - "auto_mapping": null, - "base_model_name_or_path": "/root/chatglm3-6b", - "bias": "none", - "fan_in_fan_out": false, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "loftq_config": {}, - "lora_alpha": 64.0, - "lora_dropout": 0.1, - "megatron_config": null, - "megatron_core": "megatron.core", - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "rank_pattern": {}, - "revision": null, - "target_modules": [ - "query_key_value" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-900/adapter_model.safetensors b/checkpoint-900/adapter_model.safetensors deleted file mode 100644 index 33e5a787630d1ae5a1bb574f3af127e2d85d5dbe..0000000000000000000000000000000000000000 --- a/checkpoint-900/adapter_model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d76fc7df89c1372ba69b5ea09d4556926ab8898e1ec1309a212a9c093f148066 -size 31204248 diff --git a/checkpoint-900/optimizer.pt b/checkpoint-900/optimizer.pt deleted file mode 100644 index d5323b5d71980670a680724e495ebdc170e0383e..0000000000000000000000000000000000000000 --- a/checkpoint-900/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:19221d4f40b031762d83f752f44f9806a0eff4370bb1d088a3e90d5130de401d -size 62437882 diff --git a/checkpoint-900/rng_state.pth b/checkpoint-900/rng_state.pth deleted file mode 100644 index f0cfdc7b516bfceed6ea16757f9be14b76258fca..0000000000000000000000000000000000000000 --- a/checkpoint-900/rng_state.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b52d148e0bdcfae756cc5d1bed2f078908b8ca30fa4898562ed9c81aba81cf6c -size 14244 diff --git a/checkpoint-900/scheduler.pt b/checkpoint-900/scheduler.pt deleted file mode 100644 index 4a8fe22ce3f23364bc2f1add52716d45d01ec762..0000000000000000000000000000000000000000 --- a/checkpoint-900/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:91ad790c8ce464cf3a5f4d7efae5aed7c0aca618d8bf7bd220d6b628d8fbd816 -size 1064 diff --git a/checkpoint-900/special_tokens_map.json b/checkpoint-900/special_tokens_map.json deleted file mode 100644 index dd02cd16ef3e1cfed3ce0f8cd09b983412317a48..0000000000000000000000000000000000000000 --- a/checkpoint-900/special_tokens_map.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "additional_special_tokens": [ - { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - }, - { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false - } - ] -} diff --git a/checkpoint-900/tokenization_chatglm.py b/checkpoint-900/tokenization_chatglm.py deleted file mode 100644 index 862e8f9a75bc874741cababc3b352cbbfe3611ad..0000000000000000000000000000000000000000 --- a/checkpoint-900/tokenization_chatglm.py +++ /dev/null @@ -1,300 +0,0 @@ -import json -import os -import re -from typing import List, Optional, Union, Dict -from sentencepiece import SentencePieceProcessor -from transformers import PreTrainedTokenizer -from transformers.utils import logging, PaddingStrategy -from transformers.tokenization_utils_base import EncodedInput, BatchEncoding - - -class SPTokenizer: - def __init__(self, model_path: str): - # reload tokenizer - assert os.path.isfile(model_path), model_path - self.sp_model = SentencePieceProcessor(model_file=model_path) - - # BOS / EOS token IDs - self.n_words: int = self.sp_model.vocab_size() - self.bos_id: int = self.sp_model.bos_id() - self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.unk_id() - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - self.special_tokens = {} - self.index_special_tokens = {} - for token in special_tokens: - self.special_tokens[token] = self.n_words - self.index_special_tokens[self.n_words] = token - self.n_words += 1 - self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) - - def tokenize(self, s: str, encode_special_tokens=False): - if encode_special_tokens: - last_index = 0 - t = [] - for match in re.finditer(self.role_special_token_expression, s): - if last_index < match.start(): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) - t.append(s[match.start():match.end()]) - last_index = match.end() - if last_index < len(s): - t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) - return t - else: - return self.sp_model.EncodeAsPieces(s) - - def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: - assert type(s) is str - t = self.sp_model.encode(s) - if bos: - t = [self.bos_id] + t - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - text, buffer = "", [] - for token in t: - if token in self.index_special_tokens: - if buffer: - text += self.sp_model.decode(buffer) - buffer = [] - text += self.index_special_tokens[token] - else: - buffer.append(token) - if buffer: - text += self.sp_model.decode(buffer) - return text - - def decode_tokens(self, tokens: List[str]) -> str: - text = self.sp_model.DecodePieces(tokens) - return text - - def convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - if token in self.special_tokens: - return self.special_tokens[token] - return self.sp_model.PieceToId(token) - - def convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: - return self.index_special_tokens[index] - if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size(): - return "" - return self.sp_model.IdToPiece(index) - - -class ChatGLMTokenizer(PreTrainedTokenizer): - vocab_files_names = {"vocab_file": "tokenizer.model"} - - model_input_names = ["input_ids", "attention_mask", "position_ids"] - - def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, - **kwargs): - self.name = "GLMTokenizer" - - self.vocab_file = vocab_file - self.tokenizer = SPTokenizer(vocab_file) - self.special_tokens = { - "": self.tokenizer.bos_id, - "": self.tokenizer.eos_id, - "": self.tokenizer.pad_id - } - self.encode_special_tokens = encode_special_tokens - super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, - encode_special_tokens=encode_special_tokens, - **kwargs) - - def get_command(self, token): - if token in self.special_tokens: - return self.special_tokens[token] - assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" - return self.tokenizer.special_tokens[token] - - @property - def unk_token(self) -> str: - return "" - - @property - def pad_token(self) -> str: - return "" - - @property - def pad_token_id(self): - return self.get_command("") - - @property - def eos_token(self) -> str: - return "" - - @property - def eos_token_id(self): - return self.get_command("") - - @property - def vocab_size(self): - return self.tokenizer.n_words - - def get_vocab(self): - """ Returns vocab as a dict """ - vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} - vocab.update(self.added_tokens_encoder) - return vocab - - def _tokenize(self, text, **kwargs): - return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) - - def _convert_token_to_id(self, token): - """ Converts a token (str) in an id using the vocab. """ - return self.tokenizer.convert_token_to_id(token) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.tokenizer.convert_id_to_token(index) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.tokenizer.decode_tokens(tokens) - - def save_vocabulary(self, save_directory, filename_prefix=None): - """ - Save the vocabulary and special tokens file to a directory. - - Args: - save_directory (`str`): - The directory in which to save the vocabulary. - filename_prefix (`str`, *optional*): - An optional prefix to add to the named of the saved files. - - Returns: - `Tuple(str)`: Paths to the files saved. - """ - if os.path.isdir(save_directory): - vocab_file = os.path.join( - save_directory, self.vocab_files_names["vocab_file"] - ) - else: - vocab_file = save_directory - - with open(self.vocab_file, 'rb') as fin: - proto_str = fin.read() - - with open(vocab_file, "wb") as writer: - writer.write(proto_str) - - return (vocab_file,) - - def get_prefix_tokens(self): - prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] - return prefix_tokens - - def build_single_message(self, role, metadata, message): - assert role in ["system", "user", "assistant", "observation"], role - role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") - message_tokens = self.tokenizer.encode(message) - tokens = role_tokens + message_tokens - return tokens - - def build_chat_input(self, query, history=None, role="user"): - if history is None: - history = [] - input_ids = [] - for item in history: - content = item["content"] - if item["role"] == "system" and "tools" in item: - content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) - input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) - input_ids.extend(self.build_single_message(role, "", query)) - input_ids.extend([self.get_command("<|assistant|>")]) - return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BERT sequence has the following format: - - - single sequence: `[CLS] X [SEP]` - - pair of sequences: `[CLS] A [SEP] B [SEP]` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - prefix_tokens = self.get_prefix_tokens() - token_ids_0 = prefix_tokens + token_ids_0 - if token_ids_1 is not None: - token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] - return token_ids_0 - - def _pad( - self, - encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad encoded inputs (on left/right and up to predefined length or max length in the batch) - - Args: - encoded_inputs: - Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). - max_length: maximum length of the returned list and optionally padding length (see below). - Will truncate by taking into account the special tokens. - padding_strategy: PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The tokenizer padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability - `>= 7.5` (Volta). - return_attention_mask: - (optional) Set to False to avoid returning attention mask (default: set to model specifics) - """ - # Load from model defaults - assert self.padding_side == "left" - - required_input = encoded_inputs[self.model_input_names[0]] - seq_length = len(required_input) - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length - - # Initialize attention mask if not present. - if "attention_mask" not in encoded_inputs: - encoded_inputs["attention_mask"] = [1] * seq_length - - if "position_ids" not in encoded_inputs: - encoded_inputs["position_ids"] = list(range(seq_length)) - - if needs_to_be_padded: - difference = max_length - len(required_input) - - if "attention_mask" in encoded_inputs: - encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] - if "position_ids" in encoded_inputs: - encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] - encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input - - return encoded_inputs diff --git a/checkpoint-900/tokenizer.model b/checkpoint-900/tokenizer.model deleted file mode 100644 index 8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba..0000000000000000000000000000000000000000 --- a/checkpoint-900/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 -size 1018370 diff --git a/checkpoint-900/tokenizer_config.json b/checkpoint-900/tokenizer_config.json deleted file mode 100644 index f0e543dcb5c184576e9e88e2c48b586290d71953..0000000000000000000000000000000000000000 --- a/checkpoint-900/tokenizer_config.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "added_tokens_decoder": { - "64795": { - "content": "<|user|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - }, - "64797": { - "content": "<|observation|>", - "lstrip": false, - "normalized": false, - "rstrip": false, - "single_word": false, - "special": true - } - }, - "additional_special_tokens": [ - "<|user|>", - "<|observation|>" - ], - "auto_map": { - "AutoTokenizer": [ - "tokenization_chatglm.ChatGLMTokenizer", - null - ] - }, - "clean_up_tokenization_spaces": false, - "do_lower_case": false, - "encode_special_tokens": false, - "eos_token": "", - "model_max_length": 1000000000000000019884624838656, - "pad_token": "", - "padding_side": "right", - "remove_space": false, - "split_special_tokens": false, - "tokenizer_class": "ChatGLMTokenizer", - "unk_token": "" -} diff --git a/checkpoint-900/trainer_state.json b/checkpoint-900/trainer_state.json deleted file mode 100644 index 0b463bff7fe978a33b0ad47b8bf8fc159d62873d..0000000000000000000000000000000000000000 --- a/checkpoint-900/trainer_state.json +++ /dev/null @@ -1,1101 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 20.454545454545453, - "eval_steps": 500, - "global_step": 900, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.11, - "learning_rate": 0.001999898043009433, - "loss": 4.5094, - "step": 5 - }, - { - "epoch": 0.23, - "learning_rate": 0.0019995921928281893, - "loss": 3.8047, - "step": 10 - }, - { - "epoch": 0.34, - "learning_rate": 0.001999082511823396, - "loss": 3.8813, - "step": 15 - }, - { - "epoch": 0.45, - "learning_rate": 0.0019983691039261358, - "loss": 3.7188, - "step": 20 - }, - { - "epoch": 0.57, - "learning_rate": 0.0019974521146102534, - "loss": 3.6695, - "step": 25 - }, - { - "epoch": 0.68, - "learning_rate": 0.001996331730862691, - "loss": 3.7078, - "step": 30 - }, - { - "epoch": 0.8, - "learning_rate": 0.0019950081811453595, - "loss": 3.6844, - "step": 35 - }, - { - "epoch": 0.91, - "learning_rate": 0.0019934817353485504, - "loss": 3.6961, - "step": 40 - }, - { - "epoch": 1.02, - "learning_rate": 0.0019917527047359027, - "loss": 3.5758, - "step": 45 - }, - { - "epoch": 1.14, - "learning_rate": 0.001989821441880933, - "loss": 3.4102, - "step": 50 - }, - { - "epoch": 1.25, - "learning_rate": 0.0019876883405951376, - "loss": 3.3984, - "step": 55 - }, - { - "epoch": 1.36, - "learning_rate": 0.001985353835847693, - "loss": 3.3602, - "step": 60 - }, - { - "epoch": 1.48, - "learning_rate": 0.0019828184036767556, - "loss": 3.4461, - "step": 65 - }, - { - "epoch": 1.59, - "learning_rate": 0.0019800825610923932, - "loss": 3.3461, - "step": 70 - }, - { - "epoch": 1.7, - "learning_rate": 0.0019771468659711597, - "loss": 3.4172, - "step": 75 - }, - { - "epoch": 1.82, - "learning_rate": 0.0019740119169423336, - "loss": 3.4359, - "step": 80 - }, - { - "epoch": 1.93, - "learning_rate": 0.0019706783532658523, - "loss": 3.5141, - "step": 85 - }, - { - "epoch": 2.05, - "learning_rate": 0.001967146854701957, - "loss": 3.2242, - "step": 90 - }, - { - "epoch": 2.16, - "learning_rate": 0.0019634181413725788, - "loss": 3.0227, - "step": 95 - }, - { - "epoch": 2.27, - "learning_rate": 0.0019594929736144974, - "loss": 2.8984, - "step": 100 - }, - { - "epoch": 2.39, - "learning_rate": 0.001955372151824297, - "loss": 3.0781, - "step": 105 - }, - { - "epoch": 2.5, - "learning_rate": 0.0019510565162951536, - "loss": 3.1203, - "step": 110 - }, - { - "epoch": 2.61, - "learning_rate": 0.00194654694704549, - "loss": 3.1828, - "step": 115 - }, - { - "epoch": 2.73, - "learning_rate": 0.0019418443636395248, - "loss": 3.0531, - "step": 120 - }, - { - "epoch": 2.84, - "learning_rate": 0.001936949724999762, - "loss": 3.1523, - "step": 125 - }, - { - "epoch": 2.95, - "learning_rate": 0.0019318640292114524, - "loss": 3.1156, - "step": 130 - }, - { - "epoch": 3.07, - "learning_rate": 0.0019265883133190713, - "loss": 2.7844, - "step": 135 - }, - { - "epoch": 3.18, - "learning_rate": 0.0019211236531148502, - "loss": 2.6711, - "step": 140 - }, - { - "epoch": 3.3, - "learning_rate": 0.0019154711629194062, - "loss": 2.6609, - "step": 145 - }, - { - "epoch": 3.41, - "learning_rate": 0.0019096319953545184, - "loss": 2.7531, - "step": 150 - }, - { - "epoch": 3.52, - "learning_rate": 0.0019036073411080917, - "loss": 2.7977, - "step": 155 - }, - { - "epoch": 3.64, - "learning_rate": 0.0018973984286913585, - "loss": 2.7914, - "step": 160 - }, - { - "epoch": 3.75, - "learning_rate": 0.0018910065241883678, - "loss": 2.8188, - "step": 165 - }, - { - "epoch": 3.86, - "learning_rate": 0.0018844329309978143, - "loss": 2.8945, - "step": 170 - }, - { - "epoch": 3.98, - "learning_rate": 0.0018776789895672556, - "loss": 2.8883, - "step": 175 - }, - { - "epoch": 4.09, - "learning_rate": 0.0018707460771197773, - "loss": 2.4617, - "step": 180 - }, - { - "epoch": 4.2, - "learning_rate": 0.001863635607373157, - "loss": 2.4633, - "step": 185 - }, - { - "epoch": 4.32, - "learning_rate": 0.001856349030251589, - "loss": 2.5094, - "step": 190 - }, - { - "epoch": 4.43, - "learning_rate": 0.0018488878315900226, - "loss": 2.432, - "step": 195 - }, - { - "epoch": 4.55, - "learning_rate": 0.0018412535328311812, - "loss": 2.5648, - "step": 200 - }, - { - "epoch": 4.66, - "learning_rate": 0.0018334476907153176, - "loss": 2.4836, - "step": 205 - }, - { - "epoch": 4.77, - "learning_rate": 0.001825471896962774, - "loss": 2.6617, - "step": 210 - }, - { - "epoch": 4.89, - "learning_rate": 0.0018173277779494068, - "loss": 2.6734, - "step": 215 - }, - { - "epoch": 5.0, - "learning_rate": 0.0018090169943749475, - "loss": 2.6742, - "step": 220 - }, - { - "epoch": 5.11, - "learning_rate": 0.0018005412409243604, - "loss": 2.1379, - "step": 225 - }, - { - "epoch": 5.23, - "learning_rate": 0.0017919022459222751, - "loss": 2.1508, - "step": 230 - }, - { - "epoch": 5.34, - "learning_rate": 0.0017831017709805555, - "loss": 2.2582, - "step": 235 - }, - { - "epoch": 5.45, - "learning_rate": 0.0017741416106390826, - "loss": 2.2367, - "step": 240 - }, - { - "epoch": 5.57, - "learning_rate": 0.0017650235919998232, - "loss": 2.325, - "step": 245 - }, - { - "epoch": 5.68, - "learning_rate": 0.0017557495743542584, - "loss": 2.2703, - "step": 250 - }, - { - "epoch": 5.8, - "learning_rate": 0.0017463214488042471, - "loss": 2.3703, - "step": 255 - }, - { - "epoch": 5.91, - "learning_rate": 0.001736741137876405, - "loss": 2.4648, - "step": 260 - }, - { - "epoch": 6.02, - "learning_rate": 0.0017270105951300739, - "loss": 2.2734, - "step": 265 - }, - { - "epoch": 6.14, - "learning_rate": 0.0017171318047589637, - "loss": 1.9898, - "step": 270 - }, - { - "epoch": 6.25, - "learning_rate": 0.0017071067811865474, - "loss": 1.9816, - "step": 275 - }, - { - "epoch": 6.36, - "learning_rate": 0.0016969375686552938, - "loss": 1.9648, - "step": 280 - }, - { - "epoch": 6.48, - "learning_rate": 0.0016866262408098134, - "loss": 2.1672, - "step": 285 - }, - { - "epoch": 6.59, - "learning_rate": 0.0016761749002740195, - "loss": 2.0074, - "step": 290 - }, - { - "epoch": 6.7, - "learning_rate": 0.0016655856782223683, - "loss": 2.1598, - "step": 295 - }, - { - "epoch": 6.82, - "learning_rate": 0.0016548607339452852, - "loss": 2.0996, - "step": 300 - }, - { - "epoch": 6.93, - "learning_rate": 0.0016440022544088554, - "loss": 2.1434, - "step": 305 - }, - { - "epoch": 7.05, - "learning_rate": 0.0016330124538088703, - "loss": 2.0699, - "step": 310 - }, - { - "epoch": 7.16, - "learning_rate": 0.0016218935731193223, - "loss": 1.7312, - "step": 315 - }, - { - "epoch": 7.27, - "learning_rate": 0.0016106478796354383, - "loss": 1.7799, - "step": 320 - }, - { - "epoch": 7.39, - "learning_rate": 0.0015992776665113468, - "loss": 1.7008, - "step": 325 - }, - { - "epoch": 7.5, - "learning_rate": 0.0015877852522924731, - "loss": 1.8969, - "step": 330 - }, - { - "epoch": 7.61, - "learning_rate": 0.0015761729804427528, - "loss": 1.8156, - "step": 335 - }, - { - "epoch": 7.73, - "learning_rate": 0.0015644432188667695, - "loss": 1.9336, - "step": 340 - }, - { - "epoch": 7.84, - "learning_rate": 0.0015525983594269026, - "loss": 1.9918, - "step": 345 - }, - { - "epoch": 7.95, - "learning_rate": 0.0015406408174555976, - "loss": 2.0055, - "step": 350 - }, - { - "epoch": 8.07, - "learning_rate": 0.0015285730312628418, - "loss": 1.7168, - "step": 355 - }, - { - "epoch": 8.18, - "learning_rate": 0.001516397461638962, - "loss": 1.5531, - "step": 360 - }, - { - "epoch": 8.3, - "learning_rate": 0.001504116591352832, - "loss": 1.5922, - "step": 365 - }, - { - "epoch": 8.41, - "learning_rate": 0.001491732924645604, - "loss": 1.618, - "step": 370 - }, - { - "epoch": 8.52, - "learning_rate": 0.0014792489867200569, - "loss": 1.6738, - "step": 375 - }, - { - "epoch": 8.64, - "learning_rate": 0.0014666673232256737, - "loss": 1.7461, - "step": 380 - }, - { - "epoch": 8.75, - "learning_rate": 0.0014539904997395467, - "loss": 1.6746, - "step": 385 - }, - { - "epoch": 8.86, - "learning_rate": 0.0014412211012432212, - "loss": 1.7711, - "step": 390 - }, - { - "epoch": 8.98, - "learning_rate": 0.0014283617315955814, - "loss": 1.8387, - "step": 395 - }, - { - "epoch": 9.09, - "learning_rate": 0.0014154150130018866, - "loss": 1.475, - "step": 400 - }, - { - "epoch": 9.2, - "learning_rate": 0.001402383585479068, - "loss": 1.4523, - "step": 405 - }, - { - "epoch": 9.32, - "learning_rate": 0.0013892701063173917, - "loss": 1.4812, - "step": 410 - }, - { - "epoch": 9.43, - "learning_rate": 0.0013760772495385997, - "loss": 1.525, - "step": 415 - }, - { - "epoch": 9.55, - "learning_rate": 0.001362807705350641, - "loss": 1.398, - "step": 420 - }, - { - "epoch": 9.66, - "learning_rate": 0.0013494641795990985, - "loss": 1.4477, - "step": 425 - }, - { - "epoch": 9.77, - "learning_rate": 0.00133604939321543, - "loss": 1.5801, - "step": 430 - }, - { - "epoch": 9.89, - "learning_rate": 0.0013225660816621341, - "loss": 1.6422, - "step": 435 - }, - { - "epoch": 10.0, - "learning_rate": 0.0013090169943749475, - "loss": 1.5535, - "step": 440 - }, - { - "epoch": 10.11, - "learning_rate": 0.0012954048942022001, - "loss": 1.2324, - "step": 445 - }, - { - "epoch": 10.23, - "learning_rate": 0.0012817325568414298, - "loss": 1.2613, - "step": 450 - }, - { - "epoch": 10.34, - "learning_rate": 0.001268002770273379, - "loss": 1.3293, - "step": 455 - }, - { - "epoch": 10.45, - "learning_rate": 0.0012542183341934872, - "loss": 1.2852, - "step": 460 - }, - { - "epoch": 10.57, - "learning_rate": 0.0012403820594409924, - "loss": 1.3295, - "step": 465 - }, - { - "epoch": 10.68, - "learning_rate": 0.0012264967674257645, - "loss": 1.3287, - "step": 470 - }, - { - "epoch": 10.8, - "learning_rate": 0.0012125652895529767, - "loss": 1.3566, - "step": 475 - }, - { - "epoch": 10.91, - "learning_rate": 0.0011985904666457455, - "loss": 1.4414, - "step": 480 - }, - { - "epoch": 11.02, - "learning_rate": 0.0011845751483658454, - "loss": 1.3695, - "step": 485 - }, - { - "epoch": 11.14, - "learning_rate": 0.0011705221926326238, - "loss": 1.1363, - "step": 490 - }, - { - "epoch": 11.25, - "learning_rate": 0.001156434465040231, - "loss": 1.1354, - "step": 495 - }, - { - "epoch": 11.36, - "learning_rate": 0.0011423148382732854, - "loss": 1.0725, - "step": 500 - }, - { - "epoch": 11.48, - "learning_rate": 0.001128166191521093, - "loss": 1.1754, - "step": 505 - }, - { - "epoch": 11.59, - "learning_rate": 0.0011139914098905405, - "loss": 1.1848, - "step": 510 - }, - { - "epoch": 11.7, - "learning_rate": 0.0010997933838177826, - "loss": 1.2354, - "step": 515 - }, - { - "epoch": 11.82, - "learning_rate": 0.0010855750084788399, - "loss": 1.1984, - "step": 520 - }, - { - "epoch": 11.93, - "learning_rate": 0.0010713391831992322, - "loss": 1.2666, - "step": 525 - }, - { - "epoch": 12.05, - "learning_rate": 0.001057088810862768, - "loss": 1.1408, - "step": 530 - }, - { - "epoch": 12.16, - "learning_rate": 0.0010428267973196027, - "loss": 0.9385, - "step": 535 - }, - { - "epoch": 12.27, - "learning_rate": 0.0010285560507936962, - "loss": 1.0158, - "step": 540 - }, - { - "epoch": 12.39, - "learning_rate": 0.0010142794812897874, - "loss": 0.9936, - "step": 545 - }, - { - "epoch": 12.5, - "learning_rate": 0.001, - "loss": 0.9891, - "step": 550 - }, - { - "epoch": 12.61, - "learning_rate": 0.000985720518710213, - "loss": 1.0684, - "step": 555 - }, - { - "epoch": 12.73, - "learning_rate": 0.0009714439492063038, - "loss": 1.076, - "step": 560 - }, - { - "epoch": 12.84, - "learning_rate": 0.0009571732026803976, - "loss": 1.0609, - "step": 565 - }, - { - "epoch": 12.95, - "learning_rate": 0.000942911189137232, - "loss": 1.1297, - "step": 570 - }, - { - "epoch": 13.07, - "learning_rate": 0.0009286608168007677, - "loss": 0.9342, - "step": 575 - }, - { - "epoch": 13.18, - "learning_rate": 0.0009144249915211606, - "loss": 0.8511, - "step": 580 - }, - { - "epoch": 13.3, - "learning_rate": 0.0009002066161822172, - "loss": 0.8336, - "step": 585 - }, - { - "epoch": 13.41, - "learning_rate": 0.0008860085901094594, - "loss": 0.8652, - "step": 590 - }, - { - "epoch": 13.52, - "learning_rate": 0.0008718338084789072, - "loss": 0.9744, - "step": 595 - }, - { - "epoch": 13.64, - "learning_rate": 0.000857685161726715, - "loss": 0.9006, - "step": 600 - }, - { - "epoch": 13.75, - "learning_rate": 0.000843565534959769, - "loss": 0.9619, - "step": 605 - }, - { - "epoch": 13.86, - "learning_rate": 0.0008294778073673762, - "loss": 0.9123, - "step": 610 - }, - { - "epoch": 13.98, - "learning_rate": 0.0008154248516341547, - "loss": 0.9959, - "step": 615 - }, - { - "epoch": 14.09, - "learning_rate": 0.0008014095333542549, - "loss": 0.7503, - "step": 620 - }, - { - "epoch": 14.2, - "learning_rate": 0.0007874347104470233, - "loss": 0.7357, - "step": 625 - }, - { - "epoch": 14.32, - "learning_rate": 0.0007735032325742355, - "loss": 0.7477, - "step": 630 - }, - { - "epoch": 14.43, - "learning_rate": 0.0007596179405590076, - "loss": 0.8088, - "step": 635 - }, - { - "epoch": 14.55, - "learning_rate": 0.0007457816658065133, - "loss": 0.7652, - "step": 640 - }, - { - "epoch": 14.66, - "learning_rate": 0.0007319972297266214, - "loss": 0.7847, - "step": 645 - }, - { - "epoch": 14.77, - "learning_rate": 0.0007182674431585703, - "loss": 0.7984, - "step": 650 - }, - { - "epoch": 14.89, - "learning_rate": 0.0007045951057978, - "loss": 0.8732, - "step": 655 - }, - { - "epoch": 15.0, - "learning_rate": 0.0006909830056250527, - "loss": 0.8258, - "step": 660 - }, - { - "epoch": 15.11, - "learning_rate": 0.0006774339183378663, - "loss": 0.6311, - "step": 665 - }, - { - "epoch": 15.23, - "learning_rate": 0.0006639506067845697, - "loss": 0.6543, - "step": 670 - }, - { - "epoch": 15.34, - "learning_rate": 0.0006505358204009018, - "loss": 0.6421, - "step": 675 - }, - { - "epoch": 15.45, - "learning_rate": 0.0006371922946493591, - "loss": 0.6937, - "step": 680 - }, - { - "epoch": 15.57, - "learning_rate": 0.0006239227504614003, - "loss": 0.6887, - "step": 685 - }, - { - "epoch": 15.68, - "learning_rate": 0.0006107298936826086, - "loss": 0.7097, - "step": 690 - }, - { - "epoch": 15.8, - "learning_rate": 0.0005976164145209322, - "loss": 0.6778, - "step": 695 - }, - { - "epoch": 15.91, - "learning_rate": 0.0005845849869981136, - "loss": 0.7124, - "step": 700 - }, - { - "epoch": 16.02, - "learning_rate": 0.000571638268404419, - "loss": 0.7053, - "step": 705 - }, - { - "epoch": 16.14, - "learning_rate": 0.0005587788987567784, - "loss": 0.5863, - "step": 710 - }, - { - "epoch": 16.25, - "learning_rate": 0.0005460095002604533, - "loss": 0.5588, - "step": 715 - }, - { - "epoch": 16.36, - "learning_rate": 0.0005333326767743263, - "loss": 0.5363, - "step": 720 - }, - { - "epoch": 16.48, - "learning_rate": 0.0005207510132799435, - "loss": 0.6137, - "step": 725 - }, - { - "epoch": 16.59, - "learning_rate": 0.0005082670753543961, - "loss": 0.5606, - "step": 730 - }, - { - "epoch": 16.7, - "learning_rate": 0.0004958834086471683, - "loss": 0.629, - "step": 735 - }, - { - "epoch": 16.82, - "learning_rate": 0.00048360253836103817, - "loss": 0.5754, - "step": 740 - }, - { - "epoch": 16.93, - "learning_rate": 0.0004714269687371581, - "loss": 0.6239, - "step": 745 - }, - { - "epoch": 17.05, - "learning_rate": 0.0004593591825444028, - "loss": 0.5807, - "step": 750 - }, - { - "epoch": 17.16, - "learning_rate": 0.0004474016405730973, - "loss": 0.465, - "step": 755 - }, - { - "epoch": 17.27, - "learning_rate": 0.00043555678113323104, - "loss": 0.4871, - "step": 760 - }, - { - "epoch": 17.39, - "learning_rate": 0.00042382701955724725, - "loss": 0.4623, - "step": 765 - }, - { - "epoch": 17.5, - "learning_rate": 0.00041221474770752696, - "loss": 0.5059, - "step": 770 - }, - { - "epoch": 17.61, - "learning_rate": 0.00040072233348865304, - "loss": 0.5021, - "step": 775 - }, - { - "epoch": 17.73, - "learning_rate": 0.0003893521203645618, - "loss": 0.5138, - "step": 780 - }, - { - "epoch": 17.84, - "learning_rate": 0.00037810642688067796, - "loss": 0.5212, - "step": 785 - }, - { - "epoch": 17.95, - "learning_rate": 0.00036698754619112975, - "loss": 0.5611, - "step": 790 - }, - { - "epoch": 18.07, - "learning_rate": 0.00035599774559114475, - "loss": 0.4956, - "step": 795 - }, - { - "epoch": 18.18, - "learning_rate": 0.000345139266054715, - "loss": 0.4243, - "step": 800 - }, - { - "epoch": 18.3, - "learning_rate": 0.0003344143217776319, - "loss": 0.4391, - "step": 805 - }, - { - "epoch": 18.41, - "learning_rate": 0.00032382509972598086, - "loss": 0.4627, - "step": 810 - }, - { - "epoch": 18.52, - "learning_rate": 0.0003133737591901864, - "loss": 0.4208, - "step": 815 - }, - { - "epoch": 18.64, - "learning_rate": 0.0003030624313447067, - "loss": 0.45, - "step": 820 - }, - { - "epoch": 18.75, - "learning_rate": 0.00029289321881345256, - "loss": 0.44, - "step": 825 - }, - { - "epoch": 18.86, - "learning_rate": 0.0002828681952410366, - "loss": 0.4451, - "step": 830 - }, - { - "epoch": 18.98, - "learning_rate": 0.0002729894048699265, - "loss": 0.4494, - "step": 835 - }, - { - "epoch": 19.09, - "learning_rate": 0.00026325886212359495, - "loss": 0.3839, - "step": 840 - }, - { - "epoch": 19.2, - "learning_rate": 0.0002536785511957531, - "loss": 0.3728, - "step": 845 - }, - { - "epoch": 19.32, - "learning_rate": 0.00024425042564574185, - "loss": 0.4126, - "step": 850 - }, - { - "epoch": 19.43, - "learning_rate": 0.00023497640800017682, - "loss": 0.4183, - "step": 855 - }, - { - "epoch": 19.55, - "learning_rate": 0.0002258583893609175, - "loss": 0.3778, - "step": 860 - }, - { - "epoch": 19.66, - "learning_rate": 0.00021689822901944456, - "loss": 0.3758, - "step": 865 - }, - { - "epoch": 19.77, - "learning_rate": 0.000208097754077725, - "loss": 0.4034, - "step": 870 - }, - { - "epoch": 19.89, - "learning_rate": 0.0001994587590756397, - "loss": 0.4085, - "step": 875 - }, - { - "epoch": 20.0, - "learning_rate": 0.00019098300562505265, - "loss": 0.3673, - "step": 880 - }, - { - "epoch": 20.11, - "learning_rate": 0.0001826722220505931, - "loss": 0.363, - "step": 885 - }, - { - "epoch": 20.23, - "learning_rate": 0.000174528103037226, - "loss": 0.3707, - "step": 890 - }, - { - "epoch": 20.34, - "learning_rate": 0.00016655230928468257, - "loss": 0.369, - "step": 895 - }, - { - "epoch": 20.45, - "learning_rate": 0.00015874646716881869, - "loss": 0.3528, - "step": 900 - } - ], - "logging_steps": 5, - "max_steps": 1100, - "num_input_tokens_seen": 0, - "num_train_epochs": 25, - "save_steps": 100, - "total_flos": 4.587283785641165e+17, - "train_batch_size": 4, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-900/training_args.bin b/checkpoint-900/training_args.bin deleted file mode 100644 index ff8dbcdca96337fe706e3b8a5e49365cea791f82..0000000000000000000000000000000000000000 --- a/checkpoint-900/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fef6a3ae006ec4c51dbcf0a3e569288ca5ab1bbc97f41768934c32153b03277c -size 4920