Safetensors
File size: 6,111 Bytes
4ab846c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import os
from tqdm import tqdm
import shutil
os.environ['HF_ENDPOINT']="https://hf-mirror.com"
from qwenva import tokenizer
from qwenva import processor
from qwenva import qwenva
images_file_path="/root/autodl-tmp/images"

import torch
from torch.utils.data import Dataset, DataLoader
import os
import json
from PIL import Image
import json
with open('/root/autodl-tmp/chat.json', 'r', encoding='utf-8') as f:
    chat_data = json.load(f)
image_token=tokenizer.encode('<image>')[0]
pad_token=tokenizer.pad_token_id
image_token=tokenizer.encode('<image>')[0]
pad_token=tokenizer.pad_token_id
def process_data(sample,max_len=8012):
    conversations=sample['conversations']
    labels=[]
    input_ids=[]
    flag=0
    messages=[]
    input_ids=[]
    for index,item in enumerate(conversations):
        if item['from']=='human':
            old_input_ids=input_ids
            messages.append({'role':'user','content':item['value']})
            input_ids=tokenizer.apply_chat_template(
                messages,
                add_generation_prompt=True
            )
            #input_ids+=input_token[]
            labels+=[-100]*(len(input_ids)-len(old_input_ids))
            if  index==flag:
                try:
                    image_index=input_ids.index(image_token)
                    labels[image_index]=image_token
                except ValueError:
                    print("image token not found")
                    flag=index+1
                    continue
        elif item['from']=='gpt':
            old_input_ids=input_ids
            messages.append({'role':'assistant','content':item['value']})
            input_ids=tokenizer.apply_chat_template(
               messages
            )
            flag=index+1
            labels+=input_ids[len(old_input_ids):]
    #填充或者截断,使得长度相同
    if len(input_ids)>max_len:
        input_ids=input_ids[:max_len]
        labels=labels[:max_len]
        attention_mask=[1]*len(input_ids)
    else:
        attention_mask=[1]*len(input_ids)+[0]*(max_len-len(input_ids))
        input_ids+=[pad_token]*(max_len-len(input_ids))
        labels+=[-100]*(max_len-len(labels))     
    #转化为张量
    input_ids=torch.tensor(input_ids)
    attention_mask=torch.tensor(attention_mask)
    labels=torch.tensor(labels)
    image_index=torch.tensor(image_index)
    return {
        'input_ids':input_ids,
        'attention_mask':attention_mask,
        'labels':labels,
        'image_idx':image_index
    }

         
import os
import torch
from torch.utils.data import Dataset
from PIL import Image
class MyDataset(Dataset):
    def __init__(self, images_file_path,data,max_len=1024):
        self.max_len=max_len
        self.images_file_path = images_file_path
        self.data = data
        self.max_len=max_len
    def __len__(self):
        return len(self.data)
    def __getitem__(self, index):
        output_=process_data(self.data[index],max_len=self.max_len)
        img_path=os.path.join(self.images_file_path,self.data[index]['image'])
        img=Image.open(img_path)
        input_pixel= processor(images=img, return_tensors="pt")
        output_['input_pixel']=input_pixel['pixel_values'].squeeze()
        return output_
dataset=MyDataset(images_file_path,chat_data,max_len=360)
train_loader=DataLoader(dataset,batch_size=8,shuffle=True)
import deepspeed
import argparse
# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
qwenva=qwenva.to(device)
model_engine,optimizer,_,_=deepspeed.initialize(
    model=qwenva,
    args=argparse.Namespace(),
    model_parameters=qwenva.parameters(),
    config_params="./deepspeed_config.json"
)
#checkpoint_path = "/root/autodl-tmp/best_model_2"
#model_engine.load_checkpoint(checkpoint_path)
import torch.optim as optim
import torch.nn as nn
from torch.amp import autocast, GradScaler
#optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
#eps = 1e-8
accumulation_steps = 2
# 训练函数
def train(model_engine, train_dataloader, optimizer, loss_fn, device, epochs):
    model_engine.train()
    #model_engine.to(device)
    for epoch in range(epochs):
        # 使用 tqdm 显示进度条
        with tqdm(total=len(train_dataloader), desc=f'Epoch {epoch + 1}/{epochs}', unit='batch') as pbar:
            optimizer.zero_grad()
            for batch_idx, batch in enumerate(train_dataloader):
                # 将数据拷贝到 GPU 上
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                input_pixel = batch['input_pixel'].to(device)
                labels = batch['labels'].to(device)
                image_idx=batch['image_idx'].to(device)
                logits = model_engine(input_ids, attention_mask, input_pixel,image_idx)
                # 计算损失
                #max_logits= logits.max(dim=-1, keepdim=True)[0]  # 计算最大值
                #stable_logits= logits - max_logits  # 减去最大值得到数值稳定的值
                loss= loss_fn(logits[:, :-1, :].reshape(-1, logits.shape[-1]), labels[:, 1:].reshape(-1).clone())
                # 反向传播
                model_engine.backward(loss)
                if (batch_idx+1)%accumulation_steps==0:
                    model_engine.step()
                pbar.update(1)
                pbar.set_postfix(loss=loss.item())  # 显示当前损失
                if (batch_idx+1)%24807==0:
                        # 如果文件夹存在,则删除并重新创建
                        if os.path.exists("/root/autodl-tmp/best_model_instruct"):
                            shutil.rmtree("/root/autodl-tmp/best_model_instruct")  # 删除文件夹及其内容
                            os.makedirs("/root/autodl-tmp/best_model_instruct")  # 重新创建文件夹
                        model_engine.save_checkpoint("/root/autodl-tmp/best_model_instruct")
                        print(f" model saved at batch {batch_idx+1}")
train(model_engine, train_loader, optimizer, loss_fn, device, epochs=1)