csuhan commited on
Commit
9d46ee0
·
1 Parent(s): b11d339

Delete gpt4_eval.py

Browse files
Files changed (1) hide show
  1. gpt4_eval.py +0 -170
gpt4_eval.py DELETED
@@ -1,170 +0,0 @@
1
- import json
2
- import os
3
- import glob
4
- import sys
5
- import time
6
- from pathlib import Path
7
- from typing import Tuple
8
-
9
- import shortuuid
10
- # from huggingface_hub import hf_hub_download
11
- from PIL import Image
12
- import gradio as gr
13
- import torch
14
- from fairscale.nn.model_parallel.initialize import initialize_model_parallel
15
-
16
- from llama import LLaMA, ModelArgs, Tokenizer, Transformer, VisionModel
17
-
18
- os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
19
-
20
- PROMPT_DICT = {
21
- "prompt_input": (
22
- "Below is an instruction that describes a task, paired with an input that provides further context. "
23
- "Write a response that appropriately completes the request.\n\n"
24
- "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
25
- ),
26
- "prompt_no_input": (
27
- "Below is an instruction that describes a task. "
28
- "Write a response that appropriately completes the request.\n\n"
29
- "### Instruction:\n{instruction}\n\n### Response:"
30
- ),
31
- }
32
-
33
-
34
- def setup_model_parallel() -> Tuple[int, int]:
35
- os.environ['RANK'] = '0'
36
- os.environ['WORLD_SIZE'] = '1'
37
- os.environ['MP'] = '1'
38
- os.environ['MASTER_ADDR'] = '127.0.0.1'
39
- os.environ['MASTER_PORT'] = '2223'
40
- local_rank = int(os.environ.get("LOCAL_RANK", -1))
41
- world_size = int(os.environ.get("WORLD_SIZE", -1))
42
-
43
- torch.distributed.init_process_group("nccl")
44
- initialize_model_parallel(world_size)
45
- torch.cuda.set_device(local_rank)
46
-
47
- # seed must be the same in all processes
48
- torch.manual_seed(1)
49
- return local_rank, world_size
50
-
51
-
52
- def load(
53
- ckpt_path: str,
54
- param_path: str,
55
- tokenizer_path: str,
56
- instruct_adapter_path: str,
57
- caption_adapter_path: str,
58
- local_rank: int,
59
- world_size: int,
60
- max_seq_len: int,
61
- max_batch_size: int,
62
- ) -> LLaMA:
63
- start_time = time.time()
64
- print("Loading")
65
- instruct_adapter_checkpoint = torch.load(
66
- instruct_adapter_path, map_location="cpu")
67
- caption_adapter_checkpoint = torch.load(
68
- caption_adapter_path, map_location="cpu")
69
- with open(param_path, "r") as f:
70
- params = json.loads(f.read())
71
-
72
- model_args: ModelArgs = ModelArgs(
73
- max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params
74
- )
75
- model_args.adapter_layer = int(
76
- instruct_adapter_checkpoint['adapter_query.weight'].shape[0] / model_args.adapter_len)
77
- model_args.cap_adapter_layer = int(
78
- caption_adapter_checkpoint['cap_adapter_query.weight'].shape[0] / model_args.cap_adapter_len)
79
-
80
- tokenizer = Tokenizer(model_path=tokenizer_path)
81
- model_args.vocab_size = tokenizer.n_words
82
- torch.set_default_tensor_type(torch.cuda.HalfTensor)
83
- model = Transformer(model_args)
84
-
85
- ckpt = torch.load(ckpt_path, map_location='cuda')
86
- model.load_state_dict(ckpt, strict=False)
87
-
88
- vision_model = VisionModel(model_args)
89
-
90
- torch.set_default_tensor_type(torch.FloatTensor)
91
- model.load_state_dict(instruct_adapter_checkpoint, strict=False)
92
- model.load_state_dict(caption_adapter_checkpoint, strict=False)
93
- vision_model.load_state_dict(caption_adapter_checkpoint, strict=False)
94
-
95
- generator = LLaMA(model, tokenizer, vision_model)
96
- print(f"Loaded in {time.time() - start_time:.2f} seconds")
97
- return generator
98
-
99
-
100
- def instruct_generate(
101
- instruct: str,
102
- input: str = 'none',
103
- max_gen_len=512,
104
- temperature: float = 0.1,
105
- top_p: float = 0.75,
106
- ):
107
- if input == 'none':
108
- prompt = PROMPT_DICT['prompt_no_input'].format_map(
109
- {'instruction': instruct, 'input': ''})
110
- else:
111
- prompt = PROMPT_DICT['prompt_input'].format_map(
112
- {'instruction': instruct, 'input': input})
113
-
114
- results = generator.generate(
115
- [prompt], max_gen_len=max_gen_len, temperature=temperature, top_p=top_p
116
- )
117
- result = results[0].strip()
118
- # print(result)
119
- return result
120
-
121
-
122
- ckpt_path = "/data1/llma/7B/consolidated.00.pth"
123
- param_path = "/data1/llma/7B/params.json"
124
- tokenizer_path = "/data1/llma/tokenizer.model"
125
- instruct_adapter_path = "llama_adapter_len10_layer30_release.pth"
126
- caption_adapter_path = "llama_adapter_len10_layer30_caption_vit_l.pth"
127
- max_seq_len = 512
128
- max_batch_size = 32
129
-
130
-
131
- local_rank, world_size = setup_model_parallel()
132
- if local_rank > 0:
133
- sys.stdout = open(os.devnull, "w")
134
-
135
- generator = load(
136
- ckpt_path, param_path, tokenizer_path, instruct_adapter_path, caption_adapter_path, local_rank, world_size, max_seq_len, max_batch_size
137
- )
138
-
139
- answer_data = []
140
- for line in open('question.jsonl').readlines():
141
- line = json.loads(line)
142
- question_text = line["text"]
143
- answer = {
144
- "answer_id": shortuuid.uuid(),
145
- "model_id": "LLaMA-Adapter",
146
- "question_id": line["question_id"],
147
- "question_text": question_text,
148
- "text": '',
149
- "metadata": {}
150
- }
151
- answer_data.append(answer)
152
-
153
- prompts = [PROMPT_DICT['prompt_no_input'].format_map({'instruction': x['question_text']}) for x in answer_data]
154
-
155
- results = []
156
- result = generator.generate(prompts[:32], max_gen_len=512, temperature=0.1, top_p=0.75)
157
- results.extend(result)
158
- result = generator.generate(prompts[32:64], max_gen_len=512, temperature=0.1, top_p=0.75)
159
- results.extend(result)
160
- result = generator.generate(prompts[64:], max_gen_len=512, temperature=0.1, top_p=0.75)
161
- results.extend(result)
162
-
163
- for i in range(len(answer_data)):
164
- answer_i = answer_data[i]
165
- answer_i['text'] = results[i].strip()
166
- del answer_i['question_text']
167
- answer_data[i] = answer_i
168
-
169
- with open('llama_adapter_7b.json', 'w') as f:
170
- f.write("\n".join([json.dumps(x) for x in answer_data]))