Spaces:
Runtime error
Runtime error
File size: 3,073 Bytes
7900c16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
"""
This script provides an example to wrap TencentPretrain for C3 (a multiple choice dataset) inference.
"""
import sys
import os
import argparse
import torch
import torch.nn as nn
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(tencentpretrain_dir)
from tencentpretrain.utils.constants import *
from tencentpretrain.utils import *
from tencentpretrain.utils.config import load_hyperparam
from tencentpretrain.model_loader import load_model
from tencentpretrain.opts import infer_opts, tokenizer_opts
from finetune.run_classifier import batch_loader
from finetune.run_c3 import MultipleChoice, read_dataset
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
infer_opts(parser)
parser.add_argument("--max_choices_num", default=4, type=int,
help="The maximum number of cadicate answer, shorter than this will be padded.")
tokenizer_opts(parser)
args = parser.parse_args()
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
# Build tokenizer.
args.tokenizer = str2tokenizer[args.tokenizer](args)
# Build classification model and load parameters.
model = MultipleChoice(args)
model = load_model(model, args.load_model_path)
# For simplicity, we use DataParallel wrapper to use multiple GPUs.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
dataset = read_dataset(args, args.test_path)
src = torch.LongTensor([example[0] for example in dataset])
tgt = torch.LongTensor([example[1] for example in dataset])
seg = torch.LongTensor([example[2] for example in dataset])
batch_size = args.batch_size
instances_num = src.size()[0]
print("The number of prediction instances: ", instances_num)
model.eval()
with open(args.test_path) as f:
data = json.load(f)
question_ids = []
for i in range(len(data)):
questions = data[i][1]
for question in questions:
question_ids.append(question["id"])
index = 0
with open(args.prediction_path, "w") as f:
for i, (src_batch, _, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
src_batch = src_batch.to(device)
seg_batch = seg_batch.to(device)
with torch.no_grad():
_, logits = model(src_batch, None, seg_batch)
pred = (torch.argmax(logits, dim=1)).cpu().numpy().tolist()
for j in range(len(pred)):
output = {}
output["id"] = question_ids[index]
index += 1
output["label"] = int(pred[j])
f.write(json.dumps(output))
f.write("\n")
if __name__ == "__main__":
main()
|