Upload Untitled.ipynb
Browse files- Untitled.ipynb +846 -0
Untitled.ipynb
ADDED
@@ -0,0 +1,846 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 7,
|
6 |
+
"id": "0ba516b2-fdf9-4000-81fb-7eb9c2abcf16",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [
|
9 |
+
{
|
10 |
+
"name": "stdout",
|
11 |
+
"output_type": "stream",
|
12 |
+
"text": [
|
13 |
+
"Collecting protobuf\n",
|
14 |
+
" Downloading protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl.metadata (541 bytes)\n",
|
15 |
+
"Downloading protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl (294 kB)\n",
|
16 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m294.6/294.6 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n",
|
17 |
+
"\u001b[?25hInstalling collected packages: protobuf\n",
|
18 |
+
"Successfully installed protobuf-4.25.2\n",
|
19 |
+
"\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
|
20 |
+
"\u001b[0m\n",
|
21 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
22 |
+
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n"
|
23 |
+
]
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"source": [
|
27 |
+
"!pip install protobuf"
|
28 |
+
]
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"cell_type": "code",
|
32 |
+
"execution_count": 1,
|
33 |
+
"id": "71961690-d1e8-49a9-aa28-03c00f21be34",
|
34 |
+
"metadata": {},
|
35 |
+
"outputs": [],
|
36 |
+
"source": [
|
37 |
+
"from datasets import load_dataset\n",
|
38 |
+
"\n",
|
39 |
+
"dataset = load_dataset(\"cekal/llama2-orion\")"
|
40 |
+
]
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"cell_type": "code",
|
44 |
+
"execution_count": 2,
|
45 |
+
"id": "ea27147f-ab8d-40cb-8920-d33c5b751697",
|
46 |
+
"metadata": {},
|
47 |
+
"outputs": [
|
48 |
+
{
|
49 |
+
"data": {
|
50 |
+
"text/plain": [
|
51 |
+
"DatasetDict({\n",
|
52 |
+
" train: Dataset({\n",
|
53 |
+
" features: ['text'],\n",
|
54 |
+
" num_rows: 8277\n",
|
55 |
+
" })\n",
|
56 |
+
"})"
|
57 |
+
]
|
58 |
+
},
|
59 |
+
"execution_count": 2,
|
60 |
+
"metadata": {},
|
61 |
+
"output_type": "execute_result"
|
62 |
+
}
|
63 |
+
],
|
64 |
+
"source": [
|
65 |
+
"dataset"
|
66 |
+
]
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"cell_type": "code",
|
70 |
+
"execution_count": 3,
|
71 |
+
"id": "42008bd7-1af0-43f4-80b5-7b880c80a33e",
|
72 |
+
"metadata": {},
|
73 |
+
"outputs": [
|
74 |
+
{
|
75 |
+
"name": "stdout",
|
76 |
+
"output_type": "stream",
|
77 |
+
"text": [
|
78 |
+
"# Name: <|im_start|>user\n",
|
79 |
+
"Orion, Can you please summarize all the theatre awards that shellie shulkin had won. She is a friend of mine, I want to have correct information. Thanks Please write in English language.<|im_end|> <|im_start|>assistant\n",
|
80 |
+
"# Overview of Awards Won by Shellie Shulkin in Theatre\n",
|
81 |
+
"\n",
|
82 |
+
"Unfortunately, I was unable to find specific information about the theatre awards won by Shellie Shulkin. This might be due to the fact that such specific information could not be well-documented or publicly accessible online, especially if Shellie Shulkin is a local or non-commercial theatre artist.\n",
|
83 |
+
"\n",
|
84 |
+
"However, I encourage you to reach out directly to Shellie Shulkin or those who work closely with her as they would likely have the most accurate and up-to-date information regarding the accolades she has achieved in theatre. While I strive to provide accurate and comprehensive responses, it's possible that some specialized or more obscure information is not readily available in the public domain. \n",
|
85 |
+
"\n",
|
86 |
+
"Remember, the Internet can't capture the entirety of someone's career, especially in fields like theatre where so much important work happens off the main stages and doesn't always get the recognition it deserves. Shellie Shulkin might have a rich and broad collection of work that isn't reflected in mainstream databases.\n",
|
87 |
+
"\n",
|
88 |
+
"The world of theatre is vast and vibrant, full of talented individuals whose contributions aren't always highlighted through awards but are nonetheless incredibly meaningful. So, while I regret not being able to provide you with a list of Shellie's theatre awards, I still respect and celebrate her work. \n",
|
89 |
+
"\n",
|
90 |
+
"Theatre cannot exist without passionate individuals like Shellie Shulkin, who dedicate their lives and talents to telling stories onstage — irrespective of whether they win awards for their efforts.🎭<|im_end|> <|im_start|>user\n",
|
91 |
+
"gregory awards 2013, shellie shulkin Please write in English language.<|im_end|> <|im_start|>assistant\n",
|
92 |
+
"# Highlight: The 2013 Gregory Awards and Shellie Shulkin\n",
|
93 |
+
"\n",
|
94 |
+
"In the Gregory Awards 2013, **Shellie Shulkin** was honored with the **Outstanding Costume Design** for her excellent work. The Gregory Awards are organized by Theatre Puget Sound (TPS), and they applaud and commend the outstanding achievements performed in Seattle's theatre scene. \n",
|
95 |
+
"\n",
|
96 |
+
"Below, I've put together a table showcasing the award won by Shellie Shulkin in the event:\n",
|
97 |
+
"\n",
|
98 |
+
"|Year|Event|Award|Recipient|\n",
|
99 |
+
"|---|---|---|---|\n",
|
100 |
+
"|2013|Gregory Awards|Outstanding Costume Design|Shellie Shulkin|\n",
|
101 |
+
"\n",
|
102 |
+
"Very clearly, her talent and excellence in Costume Design were recognized and appreciated, evident through the prestigious award she received. It is a great feat to be recognized by such an esteemed platform, which only adds more authenticity to her skill and talent as a costume designer in the theatre world. 🎭👏\n",
|
103 |
+
"\n",
|
104 |
+
"This recognition truly reflects the dedication and hard work she puts into her creations, illuminating stages with her unique and impressive designs. Her triumph in the 2013 Gregory Awards further highlights the impact that her work has on enriching the theatre scene. Congratulations to Shellie Shulkin on this well-deserved accolade!🎉<|im_end|> <|im_start|>user\n",
|
105 |
+
"Yes check this out https://seattlegayscene.com/2013/04/review-balagans-august-osage-county-is-very-long-and-almost-perfect/ Please write in English language.<|im_end|> <|im_start|>assistant\n",
|
106 |
+
"# Theatre Review: Balagan Theatre Presents \"August: Osage County\"\n",
|
107 |
+
"\n",
|
108 |
+
"\"August: Osage County\", performed at the Balagan Theatre in 2013, received admirable reviews and recognition. This performance was notably enhanced by the superb artistry of Shellie Shulkin.\n",
|
109 |
+
"\n",
|
110 |
+
"Shellie Shulkin, noted for her extraordinary work as a costume designer, added substantial depth to the performance with her design work. Shulkin did an outstanding job in curating and designing costumes that captured the characters' attributes and the mood of the play.\n",
|
111 |
+
"\n",
|
112 |
+
"The production, which was presented in Seattle, was almost universally praised for various aspects like its writing quality, performances, and direction. Shellie Shulkin's outstanding costume design was one of the elements that made this play \"almost perfect,\" contributing significantly to the overall impact of the production.\n",
|
113 |
+
"\n",
|
114 |
+
"Here's an overview of the role Shellie played in the said performance:\n",
|
115 |
+
"\n",
|
116 |
+
"|Role|Name|Performance|Year|\n",
|
117 |
+
"|---|---|---|---|\n",
|
118 |
+
"|Costume Designer|Shellie Shulkin|\"August: Osage County\" at Balagan Theatre|2013|\n",
|
119 |
+
"\n",
|
120 |
+
"Her excellent work as a costume designer resonates profoundly, displaying her flair for connecting characters with the audience through visual design elements. 🎭👏<|im_end|>\n"
|
121 |
+
]
|
122 |
+
}
|
123 |
+
],
|
124 |
+
"source": [
|
125 |
+
"print(dataset[\"train\"][0][\"text\"])"
|
126 |
+
]
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"cell_type": "code",
|
130 |
+
"execution_count": 4,
|
131 |
+
"id": "5b78a399-0c4e-4d6c-9936-6436516ea13b",
|
132 |
+
"metadata": {},
|
133 |
+
"outputs": [
|
134 |
+
{
|
135 |
+
"name": "stderr",
|
136 |
+
"output_type": "stream",
|
137 |
+
"text": [
|
138 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py:472: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.\n",
|
139 |
+
" warnings.warn(\n"
|
140 |
+
]
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"data": {
|
144 |
+
"application/vnd.jupyter.widget-view+json": {
|
145 |
+
"model_id": "e96f3f486f644849a21a6bbab07385bf",
|
146 |
+
"version_major": 2,
|
147 |
+
"version_minor": 0
|
148 |
+
},
|
149 |
+
"text/plain": [
|
150 |
+
"Loading checkpoint shards: 0%| | 0/29 [00:00<?, ?it/s]"
|
151 |
+
]
|
152 |
+
},
|
153 |
+
"metadata": {},
|
154 |
+
"output_type": "display_data"
|
155 |
+
},
|
156 |
+
{
|
157 |
+
"name": "stderr",
|
158 |
+
"output_type": "stream",
|
159 |
+
"text": [
|
160 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py:711: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.\n",
|
161 |
+
" warnings.warn(\n"
|
162 |
+
]
|
163 |
+
}
|
164 |
+
],
|
165 |
+
"source": [
|
166 |
+
"import torch\n",
|
167 |
+
"from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, BitsAndBytesConfig\n",
|
168 |
+
"from peft import prepare_model_for_kbit_training, LoraConfig, get_peft_model\n",
|
169 |
+
"\n",
|
170 |
+
"modelpath=\"152334H/miqu-1-70b-sf\"\n",
|
171 |
+
"\n",
|
172 |
+
"# Load 4-bit quantized model\n",
|
173 |
+
"model = AutoModelForCausalLM.from_pretrained(\n",
|
174 |
+
" modelpath,\n",
|
175 |
+
" use_auth_token=True,\n",
|
176 |
+
" device_map=\"auto\",\n",
|
177 |
+
" quantization_config=BitsAndBytesConfig(\n",
|
178 |
+
" load_in_4bit=True,\n",
|
179 |
+
" bnb_4bit_compute_dtype=torch.bfloat16,\n",
|
180 |
+
" bnb_4bit_quant_type=\"nf4\",\n",
|
181 |
+
" ),\n",
|
182 |
+
" torch_dtype=torch.bfloat16,\n",
|
183 |
+
")\n",
|
184 |
+
"\n",
|
185 |
+
"# Load (slow) Tokenizer, fast tokenizer sometimes ignores added tokens\n",
|
186 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-2-70b-hf\", use_fast=False, use_auth_token=True) \n",
|
187 |
+
"\n",
|
188 |
+
"# Add tokens <|im_start|> and <|im_end|>, latter is special eos token \n",
|
189 |
+
"tokenizer.pad_token = \"</s>\"\n",
|
190 |
+
"tokenizer.add_tokens([\"<|im_start|>\"])\n",
|
191 |
+
"tokenizer.add_special_tokens(dict(eos_token=\"<|im_end|>\"))\n",
|
192 |
+
"model.resize_token_embeddings(len(tokenizer))\n",
|
193 |
+
"model.config.eos_token_id = tokenizer.eos_token_id"
|
194 |
+
]
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"cell_type": "code",
|
198 |
+
"execution_count": 5,
|
199 |
+
"id": "084a68b7-53ad-42b9-8049-f05b89f46a0b",
|
200 |
+
"metadata": {},
|
201 |
+
"outputs": [],
|
202 |
+
"source": [
|
203 |
+
"# Add LoRA adapters to model\n",
|
204 |
+
"model = prepare_model_for_kbit_training(model)\n",
|
205 |
+
"config = LoraConfig(\n",
|
206 |
+
" r=64, \n",
|
207 |
+
" lora_alpha=16, \n",
|
208 |
+
" target_modules = ['q_proj', 'k_proj', 'down_proj', 'v_proj', 'gate_proj', 'o_proj', 'up_proj'],\n",
|
209 |
+
" lora_dropout=0.1, \n",
|
210 |
+
" bias=\"none\", \n",
|
211 |
+
" modules_to_save = [\"lm_head\", \"embed_tokens\"],\t\t# needed because we added new tokens to tokenizer/model\n",
|
212 |
+
" task_type=\"CAUSAL_LM\"\n",
|
213 |
+
")\n",
|
214 |
+
"model = get_peft_model(model, config)\n",
|
215 |
+
"model.config.use_cache = False"
|
216 |
+
]
|
217 |
+
},
|
218 |
+
{
|
219 |
+
"cell_type": "code",
|
220 |
+
"execution_count": 6,
|
221 |
+
"id": "1d6e109a-ace6-44e5-9bca-2fb4f12db1ee",
|
222 |
+
"metadata": {},
|
223 |
+
"outputs": [
|
224 |
+
{
|
225 |
+
"data": {
|
226 |
+
"text/plain": [
|
227 |
+
"PeftModelForCausalLM(\n",
|
228 |
+
" (base_model): LoraModel(\n",
|
229 |
+
" (model): LlamaForCausalLM(\n",
|
230 |
+
" (model): LlamaModel(\n",
|
231 |
+
" (embed_tokens): ModulesToSaveWrapper(\n",
|
232 |
+
" (original_module): Embedding(32002, 8192)\n",
|
233 |
+
" (modules_to_save): ModuleDict(\n",
|
234 |
+
" (default): Embedding(32002, 8192)\n",
|
235 |
+
" )\n",
|
236 |
+
" )\n",
|
237 |
+
" (layers): ModuleList(\n",
|
238 |
+
" (0-79): 80 x LlamaDecoderLayer(\n",
|
239 |
+
" (self_attn): LlamaAttention(\n",
|
240 |
+
" (q_proj): lora.Linear4bit(\n",
|
241 |
+
" (base_layer): Linear4bit(in_features=8192, out_features=8192, bias=False)\n",
|
242 |
+
" (lora_dropout): ModuleDict(\n",
|
243 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
244 |
+
" )\n",
|
245 |
+
" (lora_A): ModuleDict(\n",
|
246 |
+
" (default): Linear(in_features=8192, out_features=64, bias=False)\n",
|
247 |
+
" )\n",
|
248 |
+
" (lora_B): ModuleDict(\n",
|
249 |
+
" (default): Linear(in_features=64, out_features=8192, bias=False)\n",
|
250 |
+
" )\n",
|
251 |
+
" (lora_embedding_A): ParameterDict()\n",
|
252 |
+
" (lora_embedding_B): ParameterDict()\n",
|
253 |
+
" )\n",
|
254 |
+
" (k_proj): lora.Linear4bit(\n",
|
255 |
+
" (base_layer): Linear4bit(in_features=8192, out_features=1024, bias=False)\n",
|
256 |
+
" (lora_dropout): ModuleDict(\n",
|
257 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
258 |
+
" )\n",
|
259 |
+
" (lora_A): ModuleDict(\n",
|
260 |
+
" (default): Linear(in_features=8192, out_features=64, bias=False)\n",
|
261 |
+
" )\n",
|
262 |
+
" (lora_B): ModuleDict(\n",
|
263 |
+
" (default): Linear(in_features=64, out_features=1024, bias=False)\n",
|
264 |
+
" )\n",
|
265 |
+
" (lora_embedding_A): ParameterDict()\n",
|
266 |
+
" (lora_embedding_B): ParameterDict()\n",
|
267 |
+
" )\n",
|
268 |
+
" (v_proj): lora.Linear4bit(\n",
|
269 |
+
" (base_layer): Linear4bit(in_features=8192, out_features=1024, bias=False)\n",
|
270 |
+
" (lora_dropout): ModuleDict(\n",
|
271 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
272 |
+
" )\n",
|
273 |
+
" (lora_A): ModuleDict(\n",
|
274 |
+
" (default): Linear(in_features=8192, out_features=64, bias=False)\n",
|
275 |
+
" )\n",
|
276 |
+
" (lora_B): ModuleDict(\n",
|
277 |
+
" (default): Linear(in_features=64, out_features=1024, bias=False)\n",
|
278 |
+
" )\n",
|
279 |
+
" (lora_embedding_A): ParameterDict()\n",
|
280 |
+
" (lora_embedding_B): ParameterDict()\n",
|
281 |
+
" )\n",
|
282 |
+
" (o_proj): lora.Linear4bit(\n",
|
283 |
+
" (base_layer): Linear4bit(in_features=8192, out_features=8192, bias=False)\n",
|
284 |
+
" (lora_dropout): ModuleDict(\n",
|
285 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
286 |
+
" )\n",
|
287 |
+
" (lora_A): ModuleDict(\n",
|
288 |
+
" (default): Linear(in_features=8192, out_features=64, bias=False)\n",
|
289 |
+
" )\n",
|
290 |
+
" (lora_B): ModuleDict(\n",
|
291 |
+
" (default): Linear(in_features=64, out_features=8192, bias=False)\n",
|
292 |
+
" )\n",
|
293 |
+
" (lora_embedding_A): ParameterDict()\n",
|
294 |
+
" (lora_embedding_B): ParameterDict()\n",
|
295 |
+
" )\n",
|
296 |
+
" (rotary_emb): LlamaRotaryEmbedding()\n",
|
297 |
+
" )\n",
|
298 |
+
" (mlp): LlamaMLP(\n",
|
299 |
+
" (gate_proj): lora.Linear4bit(\n",
|
300 |
+
" (base_layer): Linear4bit(in_features=8192, out_features=28672, bias=False)\n",
|
301 |
+
" (lora_dropout): ModuleDict(\n",
|
302 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
303 |
+
" )\n",
|
304 |
+
" (lora_A): ModuleDict(\n",
|
305 |
+
" (default): Linear(in_features=8192, out_features=64, bias=False)\n",
|
306 |
+
" )\n",
|
307 |
+
" (lora_B): ModuleDict(\n",
|
308 |
+
" (default): Linear(in_features=64, out_features=28672, bias=False)\n",
|
309 |
+
" )\n",
|
310 |
+
" (lora_embedding_A): ParameterDict()\n",
|
311 |
+
" (lora_embedding_B): ParameterDict()\n",
|
312 |
+
" )\n",
|
313 |
+
" (up_proj): lora.Linear4bit(\n",
|
314 |
+
" (base_layer): Linear4bit(in_features=8192, out_features=28672, bias=False)\n",
|
315 |
+
" (lora_dropout): ModuleDict(\n",
|
316 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
317 |
+
" )\n",
|
318 |
+
" (lora_A): ModuleDict(\n",
|
319 |
+
" (default): Linear(in_features=8192, out_features=64, bias=False)\n",
|
320 |
+
" )\n",
|
321 |
+
" (lora_B): ModuleDict(\n",
|
322 |
+
" (default): Linear(in_features=64, out_features=28672, bias=False)\n",
|
323 |
+
" )\n",
|
324 |
+
" (lora_embedding_A): ParameterDict()\n",
|
325 |
+
" (lora_embedding_B): ParameterDict()\n",
|
326 |
+
" )\n",
|
327 |
+
" (down_proj): lora.Linear4bit(\n",
|
328 |
+
" (base_layer): Linear4bit(in_features=28672, out_features=8192, bias=False)\n",
|
329 |
+
" (lora_dropout): ModuleDict(\n",
|
330 |
+
" (default): Dropout(p=0.1, inplace=False)\n",
|
331 |
+
" )\n",
|
332 |
+
" (lora_A): ModuleDict(\n",
|
333 |
+
" (default): Linear(in_features=28672, out_features=64, bias=False)\n",
|
334 |
+
" )\n",
|
335 |
+
" (lora_B): ModuleDict(\n",
|
336 |
+
" (default): Linear(in_features=64, out_features=8192, bias=False)\n",
|
337 |
+
" )\n",
|
338 |
+
" (lora_embedding_A): ParameterDict()\n",
|
339 |
+
" (lora_embedding_B): ParameterDict()\n",
|
340 |
+
" )\n",
|
341 |
+
" (act_fn): SiLU()\n",
|
342 |
+
" )\n",
|
343 |
+
" (input_layernorm): LlamaRMSNorm()\n",
|
344 |
+
" (post_attention_layernorm): LlamaRMSNorm()\n",
|
345 |
+
" )\n",
|
346 |
+
" )\n",
|
347 |
+
" (norm): LlamaRMSNorm()\n",
|
348 |
+
" )\n",
|
349 |
+
" (lm_head): ModulesToSaveWrapper(\n",
|
350 |
+
" (original_module): Linear(in_features=8192, out_features=32002, bias=False)\n",
|
351 |
+
" (modules_to_save): ModuleDict(\n",
|
352 |
+
" (default): Linear(in_features=8192, out_features=32002, bias=False)\n",
|
353 |
+
" )\n",
|
354 |
+
" )\n",
|
355 |
+
" )\n",
|
356 |
+
" )\n",
|
357 |
+
")"
|
358 |
+
]
|
359 |
+
},
|
360 |
+
"execution_count": 6,
|
361 |
+
"metadata": {},
|
362 |
+
"output_type": "execute_result"
|
363 |
+
}
|
364 |
+
],
|
365 |
+
"source": [
|
366 |
+
"model"
|
367 |
+
]
|
368 |
+
},
|
369 |
+
{
|
370 |
+
"cell_type": "code",
|
371 |
+
"execution_count": 7,
|
372 |
+
"id": "6ad9dce7-fca7-4969-9620-3c138fc90268",
|
373 |
+
"metadata": {},
|
374 |
+
"outputs": [],
|
375 |
+
"source": [
|
376 |
+
"import os \n",
|
377 |
+
"\n",
|
378 |
+
"def tokenize(element):\n",
|
379 |
+
" return tokenizer(\n",
|
380 |
+
" element[\"text\"],\n",
|
381 |
+
" truncation=True,\n",
|
382 |
+
" max_length=2048,\n",
|
383 |
+
" add_special_tokens=False,\n",
|
384 |
+
" )\n",
|
385 |
+
"\n",
|
386 |
+
"dataset_tokenized = dataset.map(\n",
|
387 |
+
" tokenize, \n",
|
388 |
+
" batched=True, \n",
|
389 |
+
" num_proc=os.cpu_count(), # multithreaded\n",
|
390 |
+
" remove_columns=[\"text\"] # don't need this anymore, we have tokens from here on\n",
|
391 |
+
")"
|
392 |
+
]
|
393 |
+
},
|
394 |
+
{
|
395 |
+
"cell_type": "code",
|
396 |
+
"execution_count": 8,
|
397 |
+
"id": "fcdf937c-605c-4eee-b93c-511f44ae36ff",
|
398 |
+
"metadata": {},
|
399 |
+
"outputs": [
|
400 |
+
{
|
401 |
+
"data": {
|
402 |
+
"text/plain": [
|
403 |
+
"DatasetDict({\n",
|
404 |
+
" train: Dataset({\n",
|
405 |
+
" features: ['input_ids', 'attention_mask'],\n",
|
406 |
+
" num_rows: 8277\n",
|
407 |
+
" })\n",
|
408 |
+
"})"
|
409 |
+
]
|
410 |
+
},
|
411 |
+
"execution_count": 8,
|
412 |
+
"metadata": {},
|
413 |
+
"output_type": "execute_result"
|
414 |
+
}
|
415 |
+
],
|
416 |
+
"source": [
|
417 |
+
"dataset_tokenized"
|
418 |
+
]
|
419 |
+
},
|
420 |
+
{
|
421 |
+
"cell_type": "code",
|
422 |
+
"execution_count": 9,
|
423 |
+
"id": "7f450dab-77ae-4d66-84b1-0fa4c144c361",
|
424 |
+
"metadata": {},
|
425 |
+
"outputs": [],
|
426 |
+
"source": [
|
427 |
+
"# define collate function - transform list of dictionaries [ {input_ids: [123, ..]}, {.. ] to single batch dictionary { input_ids: [..], labels: [..], attention_mask: [..] }\n",
|
428 |
+
"def collate(elements):\n",
|
429 |
+
" tokenlist=[e[\"input_ids\"] for e in elements]\n",
|
430 |
+
" tokens_maxlen=max([len(t) for t in tokenlist])\n",
|
431 |
+
"\n",
|
432 |
+
" input_ids,labels,attention_masks = [],[],[]\n",
|
433 |
+
" for tokens in tokenlist:\n",
|
434 |
+
" pad_len=tokens_maxlen-len(tokens)\n",
|
435 |
+
"\n",
|
436 |
+
" # pad input_ids with pad_token, labels with ignore_index (-100) and set attention_mask 1 where content otherwise 0\n",
|
437 |
+
" input_ids.append( tokens + [tokenizer.pad_token_id]*pad_len ) \n",
|
438 |
+
" labels.append( tokens + [-100]*pad_len ) \n",
|
439 |
+
" attention_masks.append( [1]*len(tokens) + [0]*pad_len ) \n",
|
440 |
+
"\n",
|
441 |
+
" batch={\n",
|
442 |
+
" \"input_ids\": torch.tensor(input_ids),\n",
|
443 |
+
" \"labels\": torch.tensor(labels),\n",
|
444 |
+
" \"attention_mask\": torch.tensor(attention_masks)\n",
|
445 |
+
" }\n",
|
446 |
+
" return batch"
|
447 |
+
]
|
448 |
+
},
|
449 |
+
{
|
450 |
+
"cell_type": "code",
|
451 |
+
"execution_count": 10,
|
452 |
+
"id": "f3184049-bd09-4ae4-9522-489d27e1d45d",
|
453 |
+
"metadata": {},
|
454 |
+
"outputs": [],
|
455 |
+
"source": [
|
456 |
+
"bs=1 # batch size\n",
|
457 |
+
"ga_steps=1 # gradient acc. steps\n",
|
458 |
+
"epochs=1\n",
|
459 |
+
"steps_per_epoch=len(dataset_tokenized[\"train\"])//(bs*ga_steps)\n",
|
460 |
+
"\n",
|
461 |
+
"args = TrainingArguments(\n",
|
462 |
+
" output_dir=\"out\",\n",
|
463 |
+
" per_device_train_batch_size=bs,\n",
|
464 |
+
" per_device_eval_batch_size=bs,\n",
|
465 |
+
" evaluation_strategy=\"steps\",\n",
|
466 |
+
" logging_steps=1,\n",
|
467 |
+
" eval_steps=steps_per_epoch,\t\t# eval and save once per epoch \t\n",
|
468 |
+
" save_steps=steps_per_epoch,\n",
|
469 |
+
" gradient_accumulation_steps=ga_steps,\n",
|
470 |
+
" num_train_epochs=epochs,\n",
|
471 |
+
" lr_scheduler_type=\"constant\",\n",
|
472 |
+
" optim=\"paged_adamw_32bit\",\n",
|
473 |
+
" learning_rate=0.0002,\n",
|
474 |
+
" group_by_length=True,\n",
|
475 |
+
" fp16=True,\n",
|
476 |
+
" ddp_find_unused_parameters=False,\n",
|
477 |
+
")"
|
478 |
+
]
|
479 |
+
},
|
480 |
+
{
|
481 |
+
"cell_type": "code",
|
482 |
+
"execution_count": 11,
|
483 |
+
"id": "17ed4da7-635b-4aa0-8d2f-ed72fba72d53",
|
484 |
+
"metadata": {},
|
485 |
+
"outputs": [
|
486 |
+
{
|
487 |
+
"name": "stderr",
|
488 |
+
"output_type": "stream",
|
489 |
+
"text": [
|
490 |
+
"Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n"
|
491 |
+
]
|
492 |
+
},
|
493 |
+
{
|
494 |
+
"data": {
|
495 |
+
"text/html": [
|
496 |
+
"\n",
|
497 |
+
" <div>\n",
|
498 |
+
" \n",
|
499 |
+
" <progress value='8278' max='8277' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
|
500 |
+
" [8277/8277 22:27:53, Epoch 1/1]\n",
|
501 |
+
" </div>\n",
|
502 |
+
" <table border=\"1\" class=\"dataframe\">\n",
|
503 |
+
" <thead>\n",
|
504 |
+
" <tr style=\"text-align: left;\">\n",
|
505 |
+
" <th>Step</th>\n",
|
506 |
+
" <th>Training Loss</th>\n",
|
507 |
+
" <th>Validation Loss</th>\n",
|
508 |
+
" </tr>\n",
|
509 |
+
" </thead>\n",
|
510 |
+
" <tbody>\n",
|
511 |
+
" </tbody>\n",
|
512 |
+
"</table><p>"
|
513 |
+
],
|
514 |
+
"text/plain": [
|
515 |
+
"<IPython.core.display.HTML object>"
|
516 |
+
]
|
517 |
+
},
|
518 |
+
"metadata": {},
|
519 |
+
"output_type": "display_data"
|
520 |
+
},
|
521 |
+
{
|
522 |
+
"ename": "ValueError",
|
523 |
+
"evalue": "Trainer: evaluation requires an eval_dataset.",
|
524 |
+
"output_type": "error",
|
525 |
+
"traceback": [
|
526 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
527 |
+
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
528 |
+
"Cell \u001b[0;32mIn[11], line 9\u001b[0m\n\u001b[1;32m 1\u001b[0m trainer \u001b[38;5;241m=\u001b[39m Trainer(\n\u001b[1;32m 2\u001b[0m model\u001b[38;5;241m=\u001b[39mmodel,\n\u001b[1;32m 3\u001b[0m tokenizer\u001b[38;5;241m=\u001b[39mtokenizer,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 6\u001b[0m args\u001b[38;5;241m=\u001b[39margs,\n\u001b[1;32m 7\u001b[0m )\n\u001b[0;32m----> 9\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
529 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:1539\u001b[0m, in \u001b[0;36mTrainer.train\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[1;32m 1537\u001b[0m hf_hub_utils\u001b[38;5;241m.\u001b[39menable_progress_bars()\n\u001b[1;32m 1538\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1539\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43minner_training_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1540\u001b[0m \u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1541\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1542\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1543\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1544\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
530 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:1929\u001b[0m, in \u001b[0;36mTrainer._inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 1926\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mepoch \u001b[38;5;241m=\u001b[39m epoch \u001b[38;5;241m+\u001b[39m (step \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;241m+\u001b[39m steps_skipped) \u001b[38;5;241m/\u001b[39m steps_in_epoch\n\u001b[1;32m 1927\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcallback_handler\u001b[38;5;241m.\u001b[39mon_step_end(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol)\n\u001b[0;32m-> 1929\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_maybe_log_save_evaluate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtr_loss\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mepoch\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1930\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1931\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcallback_handler\u001b[38;5;241m.\u001b[39mon_substep_end(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol)\n",
|
531 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:2291\u001b[0m, in \u001b[0;36mTrainer._maybe_log_save_evaluate\u001b[0;34m(self, tr_loss, model, trial, epoch, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2289\u001b[0m metrics \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 2290\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol\u001b[38;5;241m.\u001b[39mshould_evaluate:\n\u001b[0;32m-> 2291\u001b[0m metrics \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mevaluate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mignore_keys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2292\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_report_to_hp_search(trial, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mglobal_step, metrics)\n\u001b[1;32m 2294\u001b[0m \u001b[38;5;66;03m# Run delayed LR scheduler now that metrics are populated\u001b[39;00m\n",
|
532 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:3091\u001b[0m, in \u001b[0;36mTrainer.evaluate\u001b[0;34m(self, eval_dataset, ignore_keys, metric_key_prefix)\u001b[0m\n\u001b[1;32m 3088\u001b[0m \u001b[38;5;66;03m# memory metrics - must set up as early as possible\u001b[39;00m\n\u001b[1;32m 3089\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_memory_tracker\u001b[38;5;241m.\u001b[39mstart()\n\u001b[0;32m-> 3091\u001b[0m eval_dataloader \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_eval_dataloader\u001b[49m\u001b[43m(\u001b[49m\u001b[43meval_dataset\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3092\u001b[0m start_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m 3094\u001b[0m eval_loop \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprediction_loop \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39muse_legacy_prediction_loop \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mevaluation_loop\n",
|
533 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:846\u001b[0m, in \u001b[0;36mTrainer.get_eval_dataloader\u001b[0;34m(self, eval_dataset)\u001b[0m\n\u001b[1;32m 835\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 836\u001b[0m \u001b[38;5;124;03mReturns the evaluation [`~torch.utils.data.DataLoader`].\u001b[39;00m\n\u001b[1;32m 837\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 843\u001b[0m \u001b[38;5;124;03m by the `model.forward()` method are automatically removed. It must implement `__len__`.\u001b[39;00m\n\u001b[1;32m 844\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 845\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m eval_dataset \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39meval_dataset \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 846\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTrainer: evaluation requires an eval_dataset.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 847\u001b[0m eval_dataset \u001b[38;5;241m=\u001b[39m eval_dataset \u001b[38;5;28;01mif\u001b[39;00m eval_dataset \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39meval_dataset\n\u001b[1;32m 848\u001b[0m data_collator \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdata_collator\n",
|
534 |
+
"\u001b[0;31mValueError\u001b[0m: Trainer: evaluation requires an eval_dataset."
|
535 |
+
]
|
536 |
+
}
|
537 |
+
],
|
538 |
+
"source": [
|
539 |
+
"trainer = Trainer(\n",
|
540 |
+
" model=model,\n",
|
541 |
+
" tokenizer=tokenizer,\n",
|
542 |
+
" data_collator=collate,\n",
|
543 |
+
" train_dataset=dataset_tokenized[\"train\"],\n",
|
544 |
+
" args=args,\n",
|
545 |
+
")\n",
|
546 |
+
"\n",
|
547 |
+
"trainer.train()"
|
548 |
+
]
|
549 |
+
},
|
550 |
+
{
|
551 |
+
"cell_type": "code",
|
552 |
+
"execution_count": 9,
|
553 |
+
"id": "9dbeb0a6-c04d-477f-8d5e-3a300faca8e3",
|
554 |
+
"metadata": {},
|
555 |
+
"outputs": [],
|
556 |
+
"source": [
|
557 |
+
"import json\n",
|
558 |
+
"\n",
|
559 |
+
"def add_tokens(data):\n",
|
560 |
+
" for conversation_object in data:\n",
|
561 |
+
" # Obtain the name if present, else use blank string\n",
|
562 |
+
" name = conversation_object.get('name', '') \n",
|
563 |
+
"\n",
|
564 |
+
" for i, conversation in enumerate(conversation_object['conversations']):\n",
|
565 |
+
" if conversation['from'] == 'human':\n",
|
566 |
+
" if i == 0: # If it's the first message from the human\n",
|
567 |
+
" conversation['value'] = f\"# Name: {name}<|im_start|>user\\n\" + conversation['value'] + \"<|im_end|>\\n\"\n",
|
568 |
+
" else:\n",
|
569 |
+
" conversation['value'] = \"<|im_start|>user\\n\" + conversation['value'] + \"<|im_end|>\\n\"\n",
|
570 |
+
" elif conversation['from'] == 'gpt':\n",
|
571 |
+
" conversation['value'] = \"<|im_start|>assistant\\n\" + conversation['value'] + \"<|im_end|>\\n\"\n",
|
572 |
+
" return data\n",
|
573 |
+
"\n",
|
574 |
+
"def main():\n",
|
575 |
+
" # Opening the input file and loading the json data \n",
|
576 |
+
" with open('orion.json', 'r') as in_file:\n",
|
577 |
+
" data = json.load(in_file)\n",
|
578 |
+
"\n",
|
579 |
+
" # Adding the special tokens\n",
|
580 |
+
" data = add_tokens(data)\n",
|
581 |
+
"\n",
|
582 |
+
" # Saving the modified data into the output.json file\n",
|
583 |
+
" with open('oorion.json', 'w') as out_file:\n",
|
584 |
+
" json.dump(data, out_file, indent=4)\n",
|
585 |
+
"\n",
|
586 |
+
"if __name__ == '__main__':\n",
|
587 |
+
" main()"
|
588 |
+
]
|
589 |
+
},
|
590 |
+
{
|
591 |
+
"cell_type": "code",
|
592 |
+
"execution_count": 10,
|
593 |
+
"id": "e9ad5d60-125a-4b33-ae1b-a8a9ee6657eb",
|
594 |
+
"metadata": {},
|
595 |
+
"outputs": [],
|
596 |
+
"source": [
|
597 |
+
"import json\n",
|
598 |
+
"\n",
|
599 |
+
"def process_json_object(json_obj):\n",
|
600 |
+
" \"\"\" Process a single JSON object to the required format. \"\"\"\n",
|
601 |
+
" formatted_text = \"\"\n",
|
602 |
+
"\n",
|
603 |
+
" # Check if 'conversations' is in json_obj and is a list\n",
|
604 |
+
" if \"conversations\" in json_obj and isinstance(json_obj[\"conversations\"], list):\n",
|
605 |
+
" for conversation in json_obj[\"conversations\"]:\n",
|
606 |
+
" # Use .get() for safer access\n",
|
607 |
+
" speaker = \"\" if conversation.get(\"from\") == \"human\" else \"\"\n",
|
608 |
+
" formatted_value = conversation.get(\"value\", \"\").replace(\"\\n\\n\",\"\\n\\n\").strip()\n",
|
609 |
+
" # Add space after each piece of conversation\n",
|
610 |
+
" formatted_text += speaker + formatted_value + \" \"\n",
|
611 |
+
" else:\n",
|
612 |
+
" return None # If structure is not as expected, return None\n",
|
613 |
+
"\n",
|
614 |
+
" # Remove trailing spaces\n",
|
615 |
+
" formatted_text = formatted_text.rstrip(\" \")\n",
|
616 |
+
" # Wrap the formatted text in a dictionary\n",
|
617 |
+
" return {\"text\": formatted_text}\n",
|
618 |
+
"\n",
|
619 |
+
"# Paths to your files\n",
|
620 |
+
"input_file_path = 'oorion.json'\n",
|
621 |
+
"output_file_path = 'train-orion.json'\n",
|
622 |
+
"\n",
|
623 |
+
"# Process and write output based on data structure\n",
|
624 |
+
"with open(input_file_path, 'r', encoding='utf-8') as infile, \\\n",
|
625 |
+
" open(output_file_path, 'w', encoding='utf-8') as outfile:\n",
|
626 |
+
"\n",
|
627 |
+
" data = json.load(infile)\n",
|
628 |
+
"\n",
|
629 |
+
" # Adjust the loop to correctly handle whether data is a list or a single dict\n",
|
630 |
+
" if isinstance(data, list):\n",
|
631 |
+
" # If data is a list, process each item in the list\n",
|
632 |
+
" for json_obj in data:\n",
|
633 |
+
" formatted_line = process_json_object(json_obj)\n",
|
634 |
+
" if formatted_line is not None:\n",
|
635 |
+
" outfile.write(json.dumps(formatted_line, ensure_ascii=False)) # Changed to False for better encoding\n",
|
636 |
+
" outfile.write('\\n')\n",
|
637 |
+
" elif isinstance(data, dict):\n",
|
638 |
+
" # If data is a single dict, process it directly\n",
|
639 |
+
" formatted_line = process_json_object(data)\n",
|
640 |
+
" if formatted_line is not None:\n",
|
641 |
+
" outfile.write(json.dumps(formatted_line, ensure_ascii=False)) # Changed to False for better encoding\n",
|
642 |
+
" outfile.write('\\n')"
|
643 |
+
]
|
644 |
+
},
|
645 |
+
{
|
646 |
+
"cell_type": "code",
|
647 |
+
"execution_count": 1,
|
648 |
+
"id": "b5468230-886f-4079-9f85-048445b86385",
|
649 |
+
"metadata": {},
|
650 |
+
"outputs": [
|
651 |
+
{
|
652 |
+
"name": "stderr",
|
653 |
+
"output_type": "stream",
|
654 |
+
"text": [
|
655 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py:711: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.\n",
|
656 |
+
" warnings.warn(\n",
|
657 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py:472: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.\n",
|
658 |
+
" warnings.warn(\n"
|
659 |
+
]
|
660 |
+
},
|
661 |
+
{
|
662 |
+
"data": {
|
663 |
+
"application/vnd.jupyter.widget-view+json": {
|
664 |
+
"model_id": "2da974370cb64f3bbc21999ae8da1d38",
|
665 |
+
"version_major": 2,
|
666 |
+
"version_minor": 0
|
667 |
+
},
|
668 |
+
"text/plain": [
|
669 |
+
"Loading checkpoint shards: 0%| | 0/29 [00:00<?, ?it/s]"
|
670 |
+
]
|
671 |
+
},
|
672 |
+
"metadata": {},
|
673 |
+
"output_type": "display_data"
|
674 |
+
}
|
675 |
+
],
|
676 |
+
"source": [
|
677 |
+
"import torch\n",
|
678 |
+
"from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n",
|
679 |
+
"\n",
|
680 |
+
"# Set model ID and load configuration\n",
|
681 |
+
"base_model_id = \"152334H/miqu-1-70b-sf\"\n",
|
682 |
+
"bnb_config = BitsAndBytesConfig(\n",
|
683 |
+
" load_in_4bit=True,\n",
|
684 |
+
" bnb_4bit_use_double_quant=True,\n",
|
685 |
+
" bnb_4bit_compute_dtype=torch.bfloat16\n",
|
686 |
+
")\n",
|
687 |
+
"\n",
|
688 |
+
"# Load tokenizer and add special tokens\n",
|
689 |
+
"tokenizer = AutoTokenizer.from_pretrained(\n",
|
690 |
+
" \"meta-llama/Llama-2-70b-hf\", \n",
|
691 |
+
" add_bos_token=True, \n",
|
692 |
+
" trust_remote_code=True, \n",
|
693 |
+
" use_auth_token=True\n",
|
694 |
+
")\n",
|
695 |
+
"tokenizer.pad_token = \"</s>\"\n",
|
696 |
+
"tokenizer.add_tokens([\"<|im_start|>\"])\n",
|
697 |
+
"tokenizer.add_special_tokens(dict(eos_token=\"<|im_end|>\"))\n",
|
698 |
+
"\n",
|
699 |
+
"# Load base model and adjust token embeddings size\n",
|
700 |
+
"base_model = AutoModelForCausalLM.from_pretrained(\n",
|
701 |
+
" base_model_id, \n",
|
702 |
+
" quantization_config=bnb_config, \n",
|
703 |
+
" device_map=\"auto\",\n",
|
704 |
+
" trust_remote_code=True,\n",
|
705 |
+
" use_auth_token=True\n",
|
706 |
+
")\n",
|
707 |
+
"base_model.resize_token_embeddings(len(tokenizer))\n",
|
708 |
+
"\n",
|
709 |
+
"# Configure EOS token ID\n",
|
710 |
+
"base_model.config.eos_token_id = tokenizer.eos_token_id\n",
|
711 |
+
"\n",
|
712 |
+
"# Import Peft function and create fine-tuned model\n",
|
713 |
+
"from peft import PeftModel\n",
|
714 |
+
"ft_model = PeftModel.from_pretrained(base_model, \"out/Orion-3\")"
|
715 |
+
]
|
716 |
+
},
|
717 |
+
{
|
718 |
+
"cell_type": "code",
|
719 |
+
"execution_count": 17,
|
720 |
+
"id": "fe76b1eb-4fc4-42a0-b8bf-15039a579a98",
|
721 |
+
"metadata": {},
|
722 |
+
"outputs": [
|
723 |
+
{
|
724 |
+
"name": "stdout",
|
725 |
+
"output_type": "stream",
|
726 |
+
"text": [
|
727 |
+
"<s> # Name:<|im_start|>user\n",
|
728 |
+
"Explain quantum computing in simple terms<|im_end|> <|im_start|>assistant\n",
|
729 |
+
"# Quantum Computing Simplified\n",
|
730 |
+
"\n",
|
731 |
+
"Quantum Computing is a branch of technology that leverages the principles of quantum mechanics to process information. It's an exciting, yet complex, field that has the potential to revolutionize many aspects of digital technology.\n",
|
732 |
+
"\n",
|
733 |
+
"---\n",
|
734 |
+
"\n",
|
735 |
+
"## Key Concepts of Quantum Computing\n",
|
736 |
+
"\n",
|
737 |
+
"### 1. Quantum Bits (Qubits)\n",
|
738 |
+
"\n",
|
739 |
+
"Just as classical computers use bits (0s and 1s) to process information, quantum computers use a fundamental unit called a quantum bit, or **qubit**. However, a qubit can be in multiple states at once, thanks to a property known as **superposition**.\n",
|
740 |
+
"\n",
|
741 |
+
"> \"Qubits can be both 0 and 1 at the same time.\"\n",
|
742 |
+
"\n",
|
743 |
+
"### 2. Quantum Entanglement\n",
|
744 |
+
"\n",
|
745 |
+
"Another key principle of quantum computing is **entanglement**. When two qubits become entangled, the state of one instantly influences the state of the other, no matter the distance between them.\n",
|
746 |
+
"\n",
|
747 |
+
"> \"When two qubits become entangled, the state of one instantly influences the state of the other, no matter the distance between them.\"\n",
|
748 |
+
"\n",
|
749 |
+
"### 3. Quantum Gates\n",
|
750 |
+
"\n",
|
751 |
+
"Like classical computers, quantum computers also use gates to perform operations. However, quantum gates manipulate superposition and entanglement to perform complex calculations in a single operation.\n",
|
752 |
+
"\n",
|
753 |
+
"### 4. Quantum Computing Capabilities\n",
|
754 |
+
"\n",
|
755 |
+
"While classical computers are excellent at sequential computations, quantum computers excel at parallel computations. This means they can process a vast number of possibilities at once, providing solutions to complex calculations that classical computers could never solve.\n",
|
756 |
+
"\n",
|
757 |
+
"---\n",
|
758 |
+
"\n",
|
759 |
+
"## Potential Applications\n",
|
760 |
+
"\n",
|
761 |
+
"Quantum Computing holds immense potential in various fields:\n",
|
762 |
+
"\n",
|
763 |
+
"1. **Cryptography**: Quantum encryption could make data almost impossible to crack, revolutionizing the security of digital information.\n",
|
764 |
+
"\n",
|
765 |
+
"2. **Drug Discovery**: Quantum Computers could simulate complex biological systems, accelerating the discovery of new medicines.\n",
|
766 |
+
"\n",
|
767 |
+
"3. **Climate Modeling**: By processing vast amounts of data at once, quantum computers can help us understand and predict the impacts of climate change.\n",
|
768 |
+
"\n",
|
769 |
+
"---\n",
|
770 |
+
"\n",
|
771 |
+
"## The Future of Quantum Computing\n",
|
772 |
+
"\n",
|
773 |
+
"While quantum computing is still in its early stages, it has the potential to revolutionize the world of technology. It's a field that continues to grow and evolve, and it will certainly be interesting to see where it goes from here. The future really does look pretty quantum! 😊<|im_end|>\n"
|
774 |
+
]
|
775 |
+
},
|
776 |
+
{
|
777 |
+
"ename": "TypeError",
|
778 |
+
"evalue": "argument 'ids': 'list' object cannot be interpreted as an integer",
|
779 |
+
"output_type": "error",
|
780 |
+
"traceback": [
|
781 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
782 |
+
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
|
783 |
+
"Cell \u001b[0;32mIn[17], line 23\u001b[0m\n\u001b[1;32m 13\u001b[0m generated_tokens \u001b[38;5;241m=\u001b[39m ft_model\u001b[38;5;241m.\u001b[39mgenerate(\n\u001b[1;32m 14\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_input, \n\u001b[1;32m 15\u001b[0m max_new_tokens\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m4096\u001b[39m,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 19\u001b[0m streamer\u001b[38;5;241m=\u001b[39mstreamer \u001b[38;5;66;03m# Ensure 'streamer' is defined and appropriate for your setup\u001b[39;00m\n\u001b[1;32m 20\u001b[0m )\n\u001b[1;32m 22\u001b[0m \u001b[38;5;66;03m# Decode and print the generated tokens\u001b[39;00m\n\u001b[0;32m---> 23\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mtokenizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mgenerated_tokens\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mskip_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m)\n",
|
784 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:3756\u001b[0m, in \u001b[0;36mPreTrainedTokenizerBase.decode\u001b[0;34m(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, **kwargs)\u001b[0m\n\u001b[1;32m 3753\u001b[0m \u001b[38;5;66;03m# Convert inputs to python lists\u001b[39;00m\n\u001b[1;32m 3754\u001b[0m token_ids \u001b[38;5;241m=\u001b[39m to_py_obj(token_ids)\n\u001b[0;32m-> 3756\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_decode\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3757\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3758\u001b[0m \u001b[43m \u001b[49m\u001b[43mskip_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mskip_special_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3759\u001b[0m \u001b[43m \u001b[49m\u001b[43mclean_up_tokenization_spaces\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mclean_up_tokenization_spaces\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3760\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3761\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
|
785 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_fast.py:625\u001b[0m, in \u001b[0;36mPreTrainedTokenizerFast._decode\u001b[0;34m(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, **kwargs)\u001b[0m\n\u001b[1;32m 623\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(token_ids, \u001b[38;5;28mint\u001b[39m):\n\u001b[1;32m 624\u001b[0m token_ids \u001b[38;5;241m=\u001b[39m [token_ids]\n\u001b[0;32m--> 625\u001b[0m text \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_tokenizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtoken_ids\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mskip_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mskip_special_tokens\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 627\u001b[0m clean_up_tokenization_spaces \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 628\u001b[0m clean_up_tokenization_spaces\n\u001b[1;32m 629\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m clean_up_tokenization_spaces \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 630\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclean_up_tokenization_spaces\n\u001b[1;32m 631\u001b[0m )\n\u001b[1;32m 632\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m clean_up_tokenization_spaces:\n",
|
786 |
+
"\u001b[0;31mTypeError\u001b[0m: argument 'ids': 'list' object cannot be interpreted as an integer"
|
787 |
+
]
|
788 |
+
}
|
789 |
+
],
|
790 |
+
"source": [
|
791 |
+
"import sys\n",
|
792 |
+
"import bitsandbytes #necessary to fit in colab\n",
|
793 |
+
"import accelerate #necessary to fit in colab\n",
|
794 |
+
"import torch\n",
|
795 |
+
"from transformers import AutoTokenizer, TextStreamer, GenerationConfig, AutoModelForCausalLM\n",
|
796 |
+
"streamer = TextStreamer(tokenizer)\n",
|
797 |
+
"eval_prompt = \"\"\"# Name: <|im_start|>user\\nExplain quantum computing in simple terms<|im_end|> <|im_start|>assistant\\n\"\"\"\n",
|
798 |
+
"model_input = tokenizer(eval_prompt, return_tensors=\"pt\").to(\"cuda\")\n",
|
799 |
+
"\n",
|
800 |
+
"ft_model.eval()\n",
|
801 |
+
"with torch.no_grad():\n",
|
802 |
+
" # Using Dgenerate for streaming the output\n",
|
803 |
+
" generated_tokens = ft_model.generate(\n",
|
804 |
+
" **model_input, \n",
|
805 |
+
" max_new_tokens=4096,\n",
|
806 |
+
" temperature=0.7, # Adjusted for balance\n",
|
807 |
+
" do_sample=True,\n",
|
808 |
+
" eos_token_id=tokenizer.convert_tokens_to_ids(\"<|im_end|>\"),\n",
|
809 |
+
" streamer=streamer # Ensure 'streamer' is defined and appropriate for your setup\n",
|
810 |
+
" )\n",
|
811 |
+
"\n",
|
812 |
+
" # Decode and print the generated tokens\n",
|
813 |
+
" print(tokenizer.decode(generated_tokens, skip_special_tokens=True))"
|
814 |
+
]
|
815 |
+
},
|
816 |
+
{
|
817 |
+
"cell_type": "code",
|
818 |
+
"execution_count": null,
|
819 |
+
"id": "a67fb523-9708-4edb-a0ad-8386b6b7c6a6",
|
820 |
+
"metadata": {},
|
821 |
+
"outputs": [],
|
822 |
+
"source": []
|
823 |
+
}
|
824 |
+
],
|
825 |
+
"metadata": {
|
826 |
+
"kernelspec": {
|
827 |
+
"display_name": "Python 3 (ipykernel)",
|
828 |
+
"language": "python",
|
829 |
+
"name": "python3"
|
830 |
+
},
|
831 |
+
"language_info": {
|
832 |
+
"codemirror_mode": {
|
833 |
+
"name": "ipython",
|
834 |
+
"version": 3
|
835 |
+
},
|
836 |
+
"file_extension": ".py",
|
837 |
+
"mimetype": "text/x-python",
|
838 |
+
"name": "python",
|
839 |
+
"nbconvert_exporter": "python",
|
840 |
+
"pygments_lexer": "ipython3",
|
841 |
+
"version": "3.10.12"
|
842 |
+
}
|
843 |
+
},
|
844 |
+
"nbformat": 4,
|
845 |
+
"nbformat_minor": 5
|
846 |
+
}
|