比赛地址:【NLP】医学搜索Query相关性判断 https://tianchi.aliyun.com/competition/entrance/532001/introduction?spm=a2c22.12281925.0.0.684a71371ZOgK4V1版本: 分数:0.8258方案:BERT 通用模型 预训练模型:bert-base-chinese训练结果: Val P: 0.758, Val R: 76.6730%, Val F1: 0.7595, Val Acc: 81.7135%, Time: 0:01:28V2版本: 分数:0.8534方案:BERT 行业模型 预训练模型:ernie-health-chinese训练结果: loss: 0.50159, accu: 0.82927V3版本:使用V2的代码,只要在参数中换个模型名称:ernie-3.0-xbase-zh 方案:BERT 更大的模型 预训练模型:ernie-3.0-xbase-zh训练结果: loss: 0.4193, accu: 0.85303V4版本: 方案:BERT 多模型融合:为各个模型配置权重,预测结果=各个模型(结果*权重)相加求和 分数:0.8703 排名:60(本次)/23398(团体或个人)运行脚本: python train_main.py# 安装paddlenlp #windows版本paddlenlp==2.6.0 python -m pip install paddlenlp==2.6.0 -f https://www.paddlepaddle.org.cn/whl/paddlenlp.html# 安装paddle-gpu #windows版本paddlepaddle==2.6.1 python -m pip install paddlepaddle==2.6.1 -i https://pypi.tuna.tsinghua.edu.cn/simple #python -m pip install paddlepaddle-gpu==2.6.1 -i https://pypi.tuna.tsinghua.edu.cn/simpleV1+V2源码如下:
NLP医学搜索Query相关性判断【阿里云:天池比赛】包括V1+V2+V33个版本的代码资源-CSDN文库
V2核心代码:参考:医学搜索Query相关性判断【Baseline】 - 飞桨AI Studio星河社区
多模型融合:
# 模型的定义
from paddlenlp.transformers import AutoModelForSequenceClassification, AutoTokenizer# 直接从训练好的模型路径载入
params_paths = ['/home/aistudio/data/data224188/model/ernie-3.0-xbase-zh_7_260','/home/aistudio/data/data224188/model/ernie-health-chinese_1', '/home/aistudio/data/data224188/model/roberta-wwm-ext-large_3',]aotu_model = [True, True, True]model_list = []
tokenizer_list = []
for i, params_path in enumerate(params_paths):if aotu_model[i]:model = AutoModelForSequenceClassification.from_pretrained(params_path, num_classes=3)else:model = CustomModel(params_path, num_classes=3)tokenizer = AutoTokenizer.from_pretrained(params_path, padding=True, max_length=128, truncation=True)model_list.append(model)tokenizer_list.append(tokenizer)# 测试集处理
import copytest_data_loader_list = []
for tokenizer in tokenizer_list:# 测试集数据预处理,利用分词器将文本转化为整数序列new_dataset = copy.deepcopy(test_data)trans_func_test = functools.partial(preprocess_function, tokenizer=tokenizer, max_seq_length=128, is_test=True)test_ds_trans = new_dataset.map(trans_func_test)# 进行采样组batchcollate_fn_test = DataCollatorWithPadding(tokenizer)test_batch_sampler = BatchSampler(test_ds_trans, batch_size=8, shuffle=False)test_data_loader = DataLoader(dataset=test_ds_trans, batch_sampler=test_batch_sampler, collate_fn=collate_fn_test)test_data_loader_list.append(test_data_loader)# 模型预测分类结果
import paddle.nn.functional as F# 设置权重
weight_list = [0.50, 0.11, 0.15]results = []
for model in model_list:model.eval()for batch_id, all_batches in enumerate(zip(*test_data_loader_list)):for model_id, batch in enumerate(all_batches):if model_id==0:logits = model_list[model_id](batch['input_ids'], batch['token_type_ids'])logits *= weight_list[0]else:this_logits = model_list[model_id](batch['input_ids'], batch['token_type_ids'])logits += this_logits * weight_list[model_id]probs = F.softmax(logits, axis=-1)idx = paddle.argmax(probs, axis=1).numpy()idx = idx.tolist()preds = [str(i) for i in idx]results.extend(preds)
配置或参数:
import osfrom paddlenlp.transformers import AutoTokenizer# 获取当前脚本所在的目录
current_path = os.path.dirname(os.path.abspath(__file__))# 获取当前脚本所在的项目根目录
root_path = os.path.dirname(current_path)print("项目根目录路径:", root_path)run_env = os.getenv('ENV', 'NULL')print("当前环境:", run_env)model_root_dir = '/data/nfs/baozhi/models'data_root_dir = 'data/'class Config(object):"""配置参数"""def __init__(self):self.hidden_size = 768self.batch_size = 64# 训练时使用# ***** epochs轮数 *****self.train_epochs = 10self.seed = 123self.lr = 5e-5self.other_lr = 3e-4self.early_stop = 20 # 验证多少次,loss没有上升,提前结束训练# 超参数设置self.log_steps = 10 * int(128 / self.batch_size) # 多少个step展示一次训练结果self.eval_steps = 10 * int(128 / self.batch_size) # 每隔几步评估模型,同时保存模型# 分类标签数量self.num_tags = 3self.dataloader_num_workers = 4self.max_length = 128self.dropout = 0.1# 如果不存在该文件,需要解压缩company.rar文件self.data_dir = data_root_dirself.log_dir = data_root_dir + "logs/"self.output_dir = data_root_dir + "/output_data/"self.output_redict = data_root_dir + "/output_data/predict.json"# 本地环境if run_env == 'local':self.model_path = "ernie-3.0-xbase-zh"else:self.model_path = "ernie-3.0-xbase-zh"self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, padding=True, max_length=self.max_length,truncation=True)print('confs end...')print(self.tokenizer)
训练:
import loggingimport torch
from sklearn.metrics import accuracy_score, f1_score, classification_reportfrom eval import evaluatelogger = logging.getLogger(__name__)
import torch.nn.functional as F
import paddle
import os
from visualdl import LogWriter
import json
from confs import Config
# 开始训练
import time
import paddle.nn.functional as F
from paddlenlp.transformers import AutoModelForSequenceClassification# 根据概率排序
def sort_list(data_list, min=0.5, size=3):ids = []data_dict = {}if not data_list or len(data_list) < 1:return idsfor index, item in enumerate(data_list[0]):if item and type(item) == float:data_dict[index] = itemsort_result = sorted(data_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)for index, value in sort_result:if len(ids) >= size:breakif value > min:ids.append(index)return idsclass Trainer:def __init__(self, args: Config, train_loader, dev_loader, test_loader):self.checkpoint = Noneself.args = argsself.device_type = 'cpu'# self.device_type2 = 'gpu' if torch.cuda.is_available() else 'cpu'self.device = torch.device(self.device_type)self.model = AutoModelForSequenceClassification.from_pretrained(args.model_path, num_classes=args.num_tags, dropout=args.dropout)# Adam优化器、交叉熵损失函数、accuracy评价指标self.optimizer = paddle.optimizer.AdamW(learning_rate=self.args.lr, parameters=self.model.parameters())self.criterion = paddle.nn.loss.CrossEntropyLoss()# 设置类别的Loss权重self.class_weight = paddle.to_tensor([1.3, 1.0, 1.0], dtype='float32')self.criterion = paddle.nn.loss.CrossEntropyLoss(weight=self.class_weight)self.train_loader = train_loaderself.dev_loader = dev_loaderself.test_loader = test_loader# self.model.to(self.device)def get_params(self):checkpoint = self.checkpointreturn self.model, self.optimizer, checkpoint['epoch'], checkpoint['loss']def load_ckp(self, model, optimizer, check_point_path):if self.device.type == 'cpu':checkpoint = torch.load(check_point_path, map_location=torch.device('cpu'))print('checkpoint use cpu')else:checkpoint = torch.load(check_point_path)print('checkpoint use gpu')self.checkpoint = checkpointmodel.load_state_dict(checkpoint['state_dict'])optimizer.load_state_dict(checkpoint['optimizer'])epoch = checkpoint['epoch']loss = checkpoint['loss']return model, optimizer, epoch, lossdef save_ckp(self, state, checkpoint_path):torch.save(state, checkpoint_path)"""def save_ckp(self, state, is_best, checkpoint_path, best_model_path):tmp_checkpoint_path = checkpoint_pathtorch.save(state, tmp_checkpoint_path)if is_best:tmp_best_model_path = best_model_pathshutil.copyfile(tmp_checkpoint_path, tmp_best_model_path)"""# def evaluate( model, class_list, data_iter, test=False):# model.eval()# loss_total = 0# predict_all = np.array([], dtype=int)# labels_all = np.array([], dtype=int)# with torch.no_grad():# for texts, labels in data_iter:# outputs = model(texts)# loss = F.cross_entropy(outputs, labels)# loss_total += loss# labels = labels.data.cpu().numpy()# predic = torch.max(outputs.data, 1)[1].cpu().numpy()# labels_all = np.append(labels_all, labels)# predict_all = np.append(predict_all, predic)# p = metrics.precision_score(labels_all, predict_all, average='macro')# r = metrics.recall_score(labels_all, predict_all, average='macro')# f1 = metrics.f1_score(labels_all, predict_all, average='macro')# acc = metrics.accuracy_score(labels_all, predict_all)# if test:# report = metrics.classification_report(labels_all, predict_all, target_names=class_list, digits=4)# confusion = metrics.confusion_matrix(labels_all, predict_all)# # return acc, loss_total / len(data_iter), report, confusion, predict_all# return p, r, f1, acc, loss_total / len(data_iter), report, confusion, predict_all# # return acc, loss_total / len(data_iter)# return p, r, f1, acc, loss_total / len(data_iter)def train(self):# 冻结模型# ernie.poolername_list = []for name, param in self.model.named_parameters():name_list.append(name)if not name.startswith('classifier') and not name.startswith('roberta.pooler'):param.requires_grad = Falseelse:param.requires_grad = Trueprint(name_list[-10:])# 超参数设置lr = self.args.lr # 学习率epochs = self.args.train_epochs # 训练轮次early_stop = self.args.early_stop # 验证多少次,loss没有上升,提前结束训练save_dir = self.args.output_dir # 训练过程中保存模型参数的文件夹log_dir = self.args.log_dir # VisaulDL的保存路径twriter = LogWriter(log_dir)metric = paddle.metric.Accuracy()# rdrop_loss = paddlenlp.losses.RDropLoss() # R-Drop数据增广stop_trainning = Falsebest_train_dev_error = 1eval_times = 0best_loss = 1000.best_acc = 0best_step = 0global_step = 0 # 迭代次数tic_train = time.time()# 可以看一下整个模型的结构print(self.model)for epoch in range(self.args.train_epochs):if stop_trainning:breakfor step, batch in enumerate(self.train_loader):self.model.train()print(step)# input_ids, token_type_ids, labels, attention_mask = batch['input_ids'], batch['token_type_ids'], batch['labels'], batch['attention_mask']input_ids, token_type_ids, labels = batch['input_ids'], batch['token_type_ids'], batch['labels']# 计算模型输出、损失函数值、分类概率值、准确率logits = self.model(input_ids, token_type_ids)# logits = self.model(input_ids, attention_mask, token_type_ids)loss = self.criterion(logits, labels)probs = F.softmax(logits, axis=1)correct = metric.compute(probs, labels)metric.update(correct)acc = metric.accumulate()# # R-Drop数据增强# logits_2 = model(input_ids, token_type_ids) # 因为dropout层的存在,每个时刻的模型的输出都不同# kl_loss = rdrop_loss(logits, logits_2)# loss = loss + kl_loss# 每迭代10次,打印损失函数值、准确率、计算速度global_step += 1if global_step % self.args.log_steps == 0:print("global step %d, epoch: %d, batch: %d, loss: %.5f, accu: %.5f, speed: %.2f step/s"% (global_step, epoch, step, loss, acc,10 / (time.time() - tic_train)))tic_train = time.time()# VisualDL 推流twriter.add_scalar('loss', loss, global_step)twriter.add_scalar('train acc', acc, global_step)# 反向梯度回传,更新参数loss.backward()self.optimizer.step()self.optimizer.clear_grad()# 评估当前训练的模型、保存当前最佳模型参数和分词器的词表等if global_step % self.args.eval_steps == 0:if not os.path.exists(save_dir):os.makedirs(save_dir)print("global step", global_step, end=' ')acc_eval, loss_eval = evaluate(self.model, self.criterion, metric, self.dev_loader)# VisualDL 推流twriter.add_scalar('eval acc', acc_eval, global_step)twriter.add_scalar('eval loss', loss_eval, global_step)eval_times += 1if eval_times > early_stop:print('-----------------Early Stopping-----------------')stop_trainning = Truebreak# 保存模型if acc_eval > best_acc and loss_eval < best_loss:best_acc = acc_evalbest_loss = loss_evalbest_step = global_stepself.model.save_pretrained(save_dir)self.args.tokenizer.save_pretrained(save_dir)print('save model to {}'.format(save_dir))eval_times = 0# 这里是保存一些,虽然loss和acc没有同时升高,但是train acc和eval acc都比较可观的模型if acc_eval > 0.84 and acc > 0.90:best_train_dev_error = abs(acc - acc_eval)new_save_dir = save_dir[:-1] + '_' + str(global_step) + '/'if not os.path.exists(new_save_dir):os.makedirs(new_save_dir)print('new save model to {}'.format(new_save_dir))self.model.save_pretrained(new_save_dir)self.args.tokenizer.save_pretrained(new_save_dir)def dev(self):self.model.eval()total_loss = 0.0dev_outputs = []dev_targets = []with torch.no_grad():for dev_step, dev_data in enumerate(self.dev_loader):token_ids = dev_data['input_ids'].to(self.device)attention_masks = dev_data['attention_mask'].to(self.device)token_type_ids = dev_data['token_type_ids'].to(self.device)labels = dev_data['labels'].to(self.device)outputs = self.model(token_ids, attention_masks, token_type_ids)self.model.zero_grad()loss = F.cross_entropy(outputs, labels)# val_loss = val_loss + ((1 / (dev_step + 1))) * (loss.item() - val_loss)total_loss += loss.item()outputs1 = torch.max(outputs, 1)[1]dev_outputs.extend(outputs1.tolist())dev_targets.extend(labels.cpu().detach().numpy().tolist())return total_loss, dev_outputs, dev_targetsdef test(self, checkpoint_path):model = self.modeloptimizer = self.optimizermodel, optimizer, epoch, loss = self.load_ckp(model, optimizer, checkpoint_path)model.eval()model.to(self.device)total_loss = 0.0test_outputs = []test_targets = []index = 0test_examples = self.test_loader.dataset.examplesresult_examples = []with torch.no_grad():for test_step, test_data in enumerate(self.test_loader):token_ids = test_data['input_ids'].to(self.device)attention_masks = test_data['attention_mask'].to(self.device)token_type_ids = test_data['token_type_ids'].to(self.device)outputs = model(token_ids, attention_masks, token_type_ids)self.model.zero_grad()outputs1 = torch.max(outputs, 1)[1].cpu()outputs1 = outputs1.cpu().detach().numpy().tolist()test_outputs.extend(outputs1)for item in outputs1:example = test_examples[index]example.label = str(item)index += 1result_examples.append({"id": example.guid, "query1":example.text_a, "query2": example.text_b, "label": example.label})for example in test_examples:result_examples.append({"id": example.guid, "query1": example.text_a, "query2": example.text_b, "label": example.label})with open(self.args.output_redict, 'w', encoding='utf-8') as f:f.write(json.dumps(result_examples, ensure_ascii=False))assert len(test_outputs) == len(test_examples)print(len(test_examples))def predict(self, tokenizer, text, id2label, max_seq_len):model, optimizer, epoch, loss = self.get_params()model.eval()model.to(self.device)with torch.no_grad():inputs = tokenizer.encode_plus(text=text,add_special_tokens=True,max_length=max_seq_len,truncation='longest_first',padding="max_length",return_token_type_ids=True,return_attention_mask=True,return_tensors='pt')token_ids = inputs['input_ids'].to(self.device)attention_masks = inputs['attention_mask'].to(self.device)token_type_ids = inputs['token_type_ids'].to(self.device)outputs = model(token_ids, attention_masks, token_type_ids)self.model.zero_grad()outputs1 = torch.max(outputs, 1)[1]if len(outputs1) != 0:return outputs1[0]else:return Nonedef get_metrics(self, outputs, targets):accuracy = accuracy_score(targets, outputs)micro_f1 = f1_score(targets, outputs, average='micro')macro_f1 = f1_score(targets, outputs, average='macro')return accuracy, micro_f1, macro_f1def get_classification_report(self, outputs, targets, labels):# confusion_matrix = multilabel_confusion_matrix(targets, outputs)report = classification_report(targets, outputs, target_names=labels)return report
数据处理:
# 分词,batch化
import functools
import numpy as npfrom paddle.io import DataLoader, BatchSampler
from paddlenlp.data import DataCollatorWithPadding# 定义数据集
from paddlenlp.datasets import MapDatasetfrom PIL import Image
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
import sys
import matplotlib.pyplot as plt
import os
import pickle
from visualdl import LogWriter
from tqdm import tqdm
import json
import copyclass MyDataset(paddle.io.Dataset):def __init__(self, mode='train'):super(MyDataset, self).__init__()self.mode = modeif mode == 'train':self.data_path = 'data/KUAKE-QQR_train.json'if mode == 'dev':self.data_path = 'data/KUAKE-QQR_dev.json'if mode == 'test':self.data_path = 'data/KUAKE-QQR_test.json'with open(self.data_path, 'r', encoding='utf-8') as input_data:self.json_content = json.load(input_data)self.data = []for block in self.json_content:sample = {}sample['record_id'] = block['id']sample['query1'] = block['query1']sample['query2'] = block['query2']sample['label'] = block['label']if sample['label'] == 'NA':continueself.data.append(sample)def __getitem__(self, idx):query1 = self.data[idx]['query1']query2 = self.data[idx]['query2']label = self.data[idx]['label']return self.data[idx]def __len__(self):return len(self.data)def label_num(self):# 返回该数据集下各个label的个数label_0, label_1, label_2 = 0, 0, 0for sample in self.data:if sample['label'] == '0':label_0 += 1if sample['label'] == '1':label_1 += 1if sample['label'] == '2':label_2 += 1return (label_0, label_1, label_2)# 数据预处理函数,利用分词器将文本转化为整数序列
def preprocess_function(examples, tokenizer, max_seq_length, is_test=False):result = tokenizer(text=examples["query1"], text_pair=examples["query2"], max_length=max_seq_length)if not is_test:if examples["label"] == '0':result["labels"] = 0elif examples["label"] == '1':result["labels"] = 1elif examples["label"] == '2':result["labels"] = 2else:print(examples)# result["labels"] = int(examples["label"])else:result['id'] = int(examples["record_id"][1:]) # record_id = 's1'return resultdef gen_data_load(train_data: MapDataset, tokenizer, batch_size: int, max_length:int, shuffle=False):trans_func = functools.partial(preprocess_function, tokenizer=tokenizer, max_seq_length=max_length)train_data_cp = copy.deepcopy(train_data).map(trans_func)print(train_data_cp[0])# collate_fn函数构造,将不同长度序列充到批中数据的最大长度,再将数据堆叠collate_fn = DataCollatorWithPadding(tokenizer, padding=True)# 定义BatchSampler,选择批大小和是否随机乱序,进行DataLoadertrain_batch_sampler = BatchSampler(train_data_cp, batch_size=batch_size, shuffle=shuffle)# 数据集定义train_data_loader = DataLoader(dataset=train_data_cp, batch_sampler=train_batch_sampler,collate_fn=collate_fn)for _, data in enumerate(train_data_loader, start=1):print("gen_data_load:pp")print(data)breakprint("gen_data_load end...")return train_data_loader
评估:
import numpy as np
def evaluate(model, criterion, metric, data_loader, phase="dev"):model.eval()metric.reset()losses = []for batch in data_loader:input_ids, token_type_ids, labels = batch['input_ids'], batch['token_type_ids'], batch['labels']logits = model(input_ids=input_ids, token_type_ids=token_type_ids)loss = criterion(logits, labels)losses.append(loss.numpy())correct = metric.compute(logits, labels)metric.update(correct)accu = metric.accumulate()print("eval {} loss: {:.5}, accu: {:.5}".format(phase,np.mean(losses), accu))model.train()metric.reset()return accu, np.mean(losses)
程序入口:
import loggingfrom bert_trainer import Trainer
from confs import Configlogger = logging.getLogger(__name__)import os
# 训练数据数量
# 定义数据集
from paddlenlp.datasets import MapDataset
from preprocess import MyDataset, gen_data_loadif __name__ == '__main__':label2id = {"0": 0, "1": 1, "2": 2}id2label = {0: "0", 1: "1", 2: "2"}config = Config()config.num_tags = len(label2id.keys())print(config)train_flag = Truetest_flag = Truepredict_flag = False# initialize tokenizertokenizer = config.tokenizer# 训练和验证print('initialize dataloader')train_data = MyDataset()test_data = MyDataset('test')dev_data = MyDataset('dev')# 转换为paddleNPL专用的数据集格式train_data = MapDataset(train_data)test_data = MapDataset(test_data)dev_data = MapDataset(dev_data)print("gen_data_load start...")train_loader = gen_data_load(train_data, config.tokenizer, config.batch_size, config.max_length, shuffle=True)dev_loader = gen_data_load(dev_data, config.tokenizer, config.batch_size, config.max_length, shuffle=False)test_loader = gen_data_load(test_data, config.tokenizer, config.batch_size, config.max_length, shuffle=False)if train_flag:print("train...")trainer = Trainer(config, train_loader, dev_loader, dev_loader)trainer.train()if test_flag:# 测试print('========进行测试========')trainer = Trainer(config, None, None, test_loader)checkpoint_path = os.path.join(config.output_dir, 'best.pt')trainer.test(checkpoint_path)