Spaces:
Build error
Build error
eval with merged model
Browse files
competition/12_InterLM_Merged_Model_M3.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":476,"status":"ok","timestamp":1720679526275,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"uWKRSV6eZsCn"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","execution_count":2,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"eb33b19f-1206-41ee-84e2-e6258a12eef7","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2534,"status":"ok","timestamp":1720679529344,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"xwFh14uiZBrI","outputId":"d767799c-34c2-46a5-f052-378146a55321"},"outputs":[],"source":["from pathlib import Path\n","\n","try:\n"," from google.colab import drive\n","\n"," drive.mount(\"/content/drive\")\n"," workding_dir = \"/content/drive/MyDrive/logical-reasoning/\"\n","except ModuleNotFoundError:\n"," workding_dir = str(Path.cwd().parent)"]},{"cell_type":"code","execution_count":3,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"G5pNu3zgZBrL","outputId":"160a554f-fb08-4aa0-bc00-0422fb7c1fac"},"outputs":[{"name":"stdout","output_type":"stream","text":["workding dir: /Users/inflaton/code/engd/projects/logical-reasoning\n"]}],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hPCC-6m7ZBrM","outputId":"c7aa2c96-5e99-440a-c148-201d79465ff9"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading env vars from: /Users/inflaton/code/engd/projects/logical-reasoning/.env\n"]},{"data":{"text/plain":["True"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":5,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"1M3IraVtZBrM","outputId":"29ab35f6-2970-4ade-d85d-3174acf8cda0"},"outputs":[{"name":"stdout","output_type":"stream","text":["llama-factory/merged_models/internlm2_5-7b-chat-1m_sft_bf16_p2_full None False datasets/mgtv results/mgtv-results_merged_model.csv\n"]}],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","adapter_name_or_path = os.getenv(\"ADAPTER_NAME_OR_PATH\")\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","data_path = os.getenv(\"LOGICAL_REASONING_DATA_PATH\")\n","results_path = os.getenv(\"LOGICAL_REASONING_RESULTS_PATH\")\n","use_english_datasets = os.getenv(\"USE_ENGLISH_DATASETS\") == \"true\"\n","\n","\n","print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path)"]},{"cell_type":"code","execution_count":6,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"b2a43943-9324-4839-9a47-cfa72de2244b","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":564,"status":"ok","timestamp":1720679529907,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"UgMvt6dIZBrM","outputId":"ce37581c-fd26-46c2-ad87-d933d99f68f7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.11.9\n","\u001b[33mWARNING: Package(s) not found: flash-attn\u001b[0m\u001b[33m\n","\u001b[0mCPU times: user 3.5 ms, sys: 6.71 ms, total: 10.2 ms\n","Wall time: 634 ms\n"]}],"source":["%%time\n","!python --version\n","!pip show flash-attn"]},{"cell_type":"code","execution_count":7,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1685,"status":"ok","timestamp":1720679531591,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"ZuS_FsLyZBrN","outputId":"2cba0105-c505-4395-afbd-2f2fee6581d0"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading /Users/inflaton/code/engd/projects/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","MPS is available\n"]}],"source":["from llm_toolkit.llm_utils import *\n","from llm_toolkit.logical_reasoning_utils import *\n","\n","device = check_gpu()"]},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[{"name":"stderr","output_type":"stream","text":["Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"]},{"name":"stdout","output_type":"stream","text":["loading model: llama-factory/merged_models/internlm2_5-7b-chat-1m_sft_bf16_p2_full\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"6f6cf944680d466d817f8f57fd56cd5b","version_major":2,"version_minor":0},"text/plain":["Loading checkpoint shards: 0%| | 0/8 [00:00<?, ?it/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["CPU times: user 782 ms, sys: 3.73 s, total: 4.51 s\n","Wall time: 30.3 s\n"]}],"source":["%%time\n","\n","model, tokenizer = load_model(model_name)"]},{"cell_type":"code","execution_count":9,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n"]}],"source":["datasets = load_logical_reasoning_dataset(\n"," data_path,\n"," tokenizer=tokenizer,\n"," chinese_prompt=not use_english_datasets,\n"," using_p1=False,\n",")"]},{"cell_type":"code","execution_count":10,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["<s><|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个情景猜谜游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。\n","2. 主持人知道谜底,谜底是谜面的答案。\n","3. 参与者可以询问任何封闭式问题来找寻事件的真相。\n","4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:\n"," - 若谜面和谜底能找到问题的答案,回答:是或者不是\n"," - 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要\n"," - 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误\n"," - 若参与者提问基本还原了谜底真相,回答:回答正确\n","5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","**谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","**谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","**参与者提出的问题:** 哭泣和村庄有关系吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","----------------------------------------\n","<s><s><|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个情景猜谜游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。\n","2. 主持人知道谜底,谜底是谜面的答案。\n","3. 参与者可以询问任何封闭式问题来找寻事件的真相。\n","4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:\n"," - 若谜面和谜底能找到问题的答案,回答:是或者不是\n"," - 若谜���和谜底不能直接或者间接推断出问题的答案,回答:不重要\n"," - 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误\n"," - 若参与者提问基本还原了谜底真相,回答:回答正确\n","5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","**谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","**谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","**参与者提出的问题:** 哭泣和村庄有关系吗\n","<|im_end|>\n","<|im_start|>assistant\n","是</s>\n","CPU times: user 624 ms, sys: 865 ms, total: 1.49 s\n","Wall time: 8.11 s\n"]}],"source":["%%time\n","\n","prompt1 = datasets[\"test\"][\"prompt\"][1000]\n","print(prompt1)\n","print(\"--\" * 20)\n","test_model(model, tokenizer, prompt1, device=device)"]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"name":"stderr","output_type":"stream","text":[" 12%|█▎ | 1/8 [00:02<00:18, 2.59s/it]"]},{"name":"stdout","output_type":"stream","text":["--------\n","step 1: 不是</s>\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n"]},{"name":"stderr","output_type":"stream","text":["100%|██████████| 8/8 [00:17<00:00, 2.17s/it]"]},{"name":"stdout","output_type":"stream","text":["CPU times: user 2.59 s, sys: 483 ms, total: 3.07 s\n","Wall time: 17.4 s\n"]},{"name":"stderr","output_type":"stream","text":["\n"]},{"data":{"text/plain":["['不是', '是', '是', '是', '不是', '是', '是', '不是']"]},"execution_count":11,"metadata":{},"output_type":"execute_result"}],"source":["%%time\n","\n","eval_model(model, tokenizer, datasets[\"test\"][:10], device=device)"]},{"cell_type":"code","execution_count":14,"metadata":{"id":"L370pvGTZBrN"},"outputs":[],"source":["def evaluate_model(model, tokenizer, model_name, dataset):\n"," print(\"Evaluating model: \" + model_name)\n"," predictions = eval_model(model, tokenizer, dataset, device=device)\n","\n"," model_name += \"_M3\"\n","\n"," save_results(\n"," model_name,\n"," results_path,\n"," dataset,\n"," predictions,\n"," debug=True,\n"," )\n","\n"," metrics = calc_metrics(dataset[\"label\"], predictions, debug=True)\n"," print(metrics)"]},{"cell_type":"code","execution_count":16,"metadata":{"id":"WUFjhxmiZBrN"},"outputs":[{"name":"stdout","output_type":"stream","text":["Evaluating model: llama-factory/merged_models/internlm2_5-7b-chat-1m_sft_bf16_p2_full\n"]},{"name":"stderr","output_type":"stream","text":[" 0%| | 1/3000 [00:02<1:55:37, 2.31s/it]"]},{"name":"stdout","output_type":"stream","text":["--------\n","step 1: 不是</s>\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n"]},{"name":"stderr","output_type":"stream","text":[" 5%|▍ | 148/3000 [09:35<2:17:04, 2.88s/it]"]}],"source":["%%time\n","\n","evaluate_model(model, tokenizer, model_name, datasets[\"test\"])"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"mostRecentlyExecutedCommandWithImplicitDF":{"commandId":-1,"dataframes":["_sqldf"]},"pythonIndentUnit":4},"notebookName":"10_eval-lf-medium-py3.11","widgets":{}},"colab":{"gpuType":"L4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"}},"nbformat":4,"nbformat_minor":0}
|
llm_toolkit/llm_utils.py
CHANGED
@@ -1,14 +1,13 @@
|
|
1 |
import os
|
2 |
import re
|
3 |
-
import sys
|
4 |
import torch
|
5 |
-
from llamafactory.chat import ChatModel
|
6 |
from transformers import (
|
7 |
AutoModelForCausalLM,
|
8 |
AutoTokenizer,
|
9 |
BitsAndBytesConfig,
|
10 |
TextStreamer,
|
11 |
)
|
|
|
12 |
|
13 |
|
14 |
def load_model(
|
@@ -21,6 +20,8 @@ def load_model(
|
|
21 |
print(f"loading model: {model_name}")
|
22 |
|
23 |
if adapter_name_or_path:
|
|
|
|
|
24 |
template = "llama3" if "llama-3" in model_name.lower() else "chatml"
|
25 |
|
26 |
args = dict(
|
@@ -32,7 +33,9 @@ def load_model(
|
|
32 |
)
|
33 |
chat_model = ChatModel(args)
|
34 |
if os.getenv("RESIZE_TOKEN_EMBEDDINGS") == "true":
|
35 |
-
chat_model.engine.model.resize_token_embeddings(
|
|
|
|
|
36 |
return chat_model.engine.model, chat_model.engine.tokenizer
|
37 |
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
@@ -63,11 +66,28 @@ def load_model(
|
|
63 |
return model, tokenizer
|
64 |
|
65 |
|
66 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
inputs = tokenizer(
|
68 |
[prompt],
|
69 |
return_tensors="pt",
|
70 |
-
).to(
|
71 |
|
72 |
text_streamer = TextStreamer(tokenizer)
|
73 |
|
@@ -114,14 +134,14 @@ def extract_answer(text, debug=False):
|
|
114 |
return text
|
115 |
|
116 |
|
117 |
-
def eval_model(model, tokenizer, eval_dataset):
|
118 |
total = len(eval_dataset)
|
119 |
predictions = []
|
120 |
for i in tqdm(range(total)):
|
121 |
inputs = tokenizer(
|
122 |
eval_dataset["prompt"][i : i + 1],
|
123 |
return_tensors="pt",
|
124 |
-
).to(
|
125 |
|
126 |
outputs = model.generate(**inputs, max_new_tokens=4096, use_cache=False)
|
127 |
decoded_output = tokenizer.batch_decode(outputs)
|
|
|
1 |
import os
|
2 |
import re
|
|
|
3 |
import torch
|
|
|
4 |
from transformers import (
|
5 |
AutoModelForCausalLM,
|
6 |
AutoTokenizer,
|
7 |
BitsAndBytesConfig,
|
8 |
TextStreamer,
|
9 |
)
|
10 |
+
from tqdm import tqdm
|
11 |
|
12 |
|
13 |
def load_model(
|
|
|
20 |
print(f"loading model: {model_name}")
|
21 |
|
22 |
if adapter_name_or_path:
|
23 |
+
from llamafactory.chat import ChatModel
|
24 |
+
|
25 |
template = "llama3" if "llama-3" in model_name.lower() else "chatml"
|
26 |
|
27 |
args = dict(
|
|
|
33 |
)
|
34 |
chat_model = ChatModel(args)
|
35 |
if os.getenv("RESIZE_TOKEN_EMBEDDINGS") == "true":
|
36 |
+
chat_model.engine.model.resize_token_embeddings(
|
37 |
+
len(chat_model.engine.tokenizer), pad_to_multiple_of=32
|
38 |
+
)
|
39 |
return chat_model.engine.model, chat_model.engine.tokenizer
|
40 |
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
|
|
66 |
return model, tokenizer
|
67 |
|
68 |
|
69 |
+
def check_gpu():
|
70 |
+
# torch.cuda.is_available() checks and returns a Boolean True if a GPU is available, else it'll return False
|
71 |
+
is_cuda = torch.cuda.is_available()
|
72 |
+
|
73 |
+
# If we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.
|
74 |
+
if is_cuda:
|
75 |
+
device = torch.device("cuda")
|
76 |
+
print("GPU is available")
|
77 |
+
elif torch.backends.mps.is_available():
|
78 |
+
device = torch.device("mps")
|
79 |
+
print("MPS is available")
|
80 |
+
else:
|
81 |
+
device = torch.device("cpu")
|
82 |
+
print("GPU/MPS not available, CPU used")
|
83 |
+
return device
|
84 |
+
|
85 |
+
|
86 |
+
def test_model(model, tokenizer, prompt, device="cuda"):
|
87 |
inputs = tokenizer(
|
88 |
[prompt],
|
89 |
return_tensors="pt",
|
90 |
+
).to(device)
|
91 |
|
92 |
text_streamer = TextStreamer(tokenizer)
|
93 |
|
|
|
134 |
return text
|
135 |
|
136 |
|
137 |
+
def eval_model(model, tokenizer, eval_dataset, device="cuda"):
|
138 |
total = len(eval_dataset)
|
139 |
predictions = []
|
140 |
for i in tqdm(range(total)):
|
141 |
inputs = tokenizer(
|
142 |
eval_dataset["prompt"][i : i + 1],
|
143 |
return_tensors="pt",
|
144 |
+
).to(device)
|
145 |
|
146 |
outputs = model.generate(**inputs, max_new_tokens=4096, use_cache=False)
|
147 |
decoded_output = tokenizer.batch_decode(outputs)
|
llm_toolkit/logical_reasoning_utils.py
CHANGED
@@ -4,7 +4,6 @@ import pandas as pd
|
|
4 |
import seaborn as sns
|
5 |
import matplotlib.pyplot as plt
|
6 |
from datasets import load_dataset
|
7 |
-
from tqdm import tqdm
|
8 |
|
9 |
print(f"loading {__file__}")
|
10 |
|
@@ -203,87 +202,6 @@ Please strictly follow these rules when answering the participant's questions.
|
|
203 |
return datasets
|
204 |
|
205 |
|
206 |
-
def eval_model(model, tokenizer, eval_dataset):
|
207 |
-
total = len(eval_dataset)
|
208 |
-
predictions = []
|
209 |
-
for i in tqdm(range(total)):
|
210 |
-
inputs = tokenizer(
|
211 |
-
eval_dataset["prompt"][i : i + 1],
|
212 |
-
return_tensors="pt",
|
213 |
-
).to("cuda")
|
214 |
-
|
215 |
-
outputs = model.generate(**inputs, max_new_tokens=4096, use_cache=False)
|
216 |
-
decoded_output = tokenizer.batch_decode(outputs)
|
217 |
-
debug = i == 0
|
218 |
-
decoded_output = [
|
219 |
-
extract_answer(output, debug=debug) for output in decoded_output
|
220 |
-
]
|
221 |
-
predictions.extend(decoded_output)
|
222 |
-
|
223 |
-
return predictions
|
224 |
-
|
225 |
-
|
226 |
-
def save_model(
|
227 |
-
model,
|
228 |
-
tokenizer,
|
229 |
-
include_gguf=True,
|
230 |
-
include_merged=True,
|
231 |
-
publish=True,
|
232 |
-
):
|
233 |
-
try:
|
234 |
-
token = os.getenv("HF_TOKEN") or None
|
235 |
-
model_name = os.getenv("MODEL_NAME")
|
236 |
-
|
237 |
-
save_method = "lora"
|
238 |
-
quantization_method = "q5_k_m"
|
239 |
-
|
240 |
-
model_names = get_model_names(
|
241 |
-
model_name, save_method=save_method, quantization_method=quantization_method
|
242 |
-
)
|
243 |
-
|
244 |
-
model.save_pretrained(model_names["local"])
|
245 |
-
tokenizer.save_pretrained(model_names["local"])
|
246 |
-
|
247 |
-
if publish:
|
248 |
-
model.push_to_hub(
|
249 |
-
model_names["hub"],
|
250 |
-
token=token,
|
251 |
-
)
|
252 |
-
tokenizer.push_to_hub(
|
253 |
-
model_names["hub"],
|
254 |
-
token=token,
|
255 |
-
)
|
256 |
-
|
257 |
-
if include_merged:
|
258 |
-
model.save_pretrained_merged(
|
259 |
-
model_names["local"] + "-merged", tokenizer, save_method=save_method
|
260 |
-
)
|
261 |
-
if publish:
|
262 |
-
model.push_to_hub_merged(
|
263 |
-
model_names["hub"] + "-merged",
|
264 |
-
tokenizer,
|
265 |
-
save_method="lora",
|
266 |
-
token="",
|
267 |
-
)
|
268 |
-
|
269 |
-
if include_gguf:
|
270 |
-
model.save_pretrained_gguf(
|
271 |
-
model_names["local-gguf"],
|
272 |
-
tokenizer,
|
273 |
-
quantization_method=quantization_method,
|
274 |
-
)
|
275 |
-
|
276 |
-
if publish:
|
277 |
-
model.push_to_hub_gguf(
|
278 |
-
model_names["hub-gguf"],
|
279 |
-
tokenizer,
|
280 |
-
quantization_method=quantization_method,
|
281 |
-
token=token,
|
282 |
-
)
|
283 |
-
except Exception as e:
|
284 |
-
print(e)
|
285 |
-
|
286 |
-
|
287 |
def get_metrics(df):
|
288 |
metrics_df = pd.DataFrame(df.columns.T)[2:]
|
289 |
metrics_df.rename(columns={0: "model"}, inplace=True)
|
|
|
4 |
import seaborn as sns
|
5 |
import matplotlib.pyplot as plt
|
6 |
from datasets import load_dataset
|
|
|
7 |
|
8 |
print(f"loading {__file__}")
|
9 |
|
|
|
202 |
return datasets
|
203 |
|
204 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
def get_metrics(df):
|
206 |
metrics_df = pd.DataFrame(df.columns.T)[2:]
|
207 |
metrics_df.rename(columns={0: "model"}, inplace=True)
|
requirements.txt
CHANGED
@@ -12,5 +12,7 @@ ipywidgets
|
|
12 |
packaging
|
13 |
langchain_openai==0.1.13
|
14 |
wandb==0.17.4
|
15 |
-
|
16 |
-
|
|
|
|
|
|
12 |
packaging
|
13 |
langchain_openai==0.1.13
|
14 |
wandb==0.17.4
|
15 |
+
transformers==4.41.2
|
16 |
+
sentencepiece==0.2.0
|
17 |
+
einops==0.8.0
|
18 |
+
accelerate==0.32.1
|