huggface

from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
example_text = 'I will watch Memento tonight'
bert_input = tokenizer(example_text,padding='max_length', 
                       max_length = 10, 
                       truncation=True,
                       return_tensors="pt")
# ------- bert_input ------
print(bert_input['input_ids'])
print(bert_input['token_type_ids'])
print(bert_input['attention_mask'])

tensor([[  101,   146,  1209,  2824,  2508,
         26173,  3568,   102,     0,     0]])
tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])

  • padding:将每个sequence填充到指定的最大长度。
  • max_length: 每个sequence的最大长度。本示例中我们使用 10,但对于本文实际数据集,我们将使用 512,这是 BERT 允许的sequence 的最大长度。
  • truncation:如果为True,则每个序列中超过最大长度的标记将被截断。
  • return_tensors:将返回的张量类型。由于我们使用的是 Pytorch,所以我们使用pt;如果你使用 Tensorflow,那么你需要使用tf

  




# 自带的trainer去训练

from transformers import BertForSequenceClassification, BertTokenizerFast

tokenizer = BertTokenizerFast.from_pretrained(args.pretrain)
model = BertForSequenceClassification.from_pretrained(args.pretrain, num_labels=2, output_hidden_states=False)


from transformers import Trainer, TrainingArguments, EarlyStoppingCallback
from sklearn.metrics import classification_report, precision_score, \
recall_score, f1_score, accuracy_score, precision_recall_fscore_support


from torch.utils.data import Dataset


# 比较关键的是__getitem__方法的处理,这里需要返回一个dict对象,
# 里面需要包含input_ids, token_tpye_ids, attention_mask, labels四个key
# (以Bert为例,其他模型可能会有稍许不同,注意这里虽然是单条数据,但是标签的key称为“labels”),然后返回该dict对象即可(即代码示例中的item。

# 数据格式为“text\tlabel”,即文本和标签中间用制表符\t隔开,每行一条数据,形如:
#
# 我的梦想是星辰大海 1
#
# 雄心万丈躺在床上 0
# 在 __init__初始化方法中读入数据,之后截断至最大设置长度max_len(由于tokenizer会自动补上[CLS]和[SEP],因此这里需要对最大长度做-2处理。
# 对字符串分割出文本与标签后,使用tokenizer(texts, padding=True, truncation=True, return_tensors='pt')得到Bert所需要的输入encoding(transformers.BatchEncoding对象,
# encoding.data包含了'input_ids','token_type_ids','attention_mask'三个张量对象,通常情况下在微调任务中不需要进行额外处理),之后将标签转为张量对象就基本完成了数据集初始化流程。

class MyDataset(Dataset):
def __init__(self, file, tokenizer, max_len=512):
assert os.path.exists(file)
data = open(file, 'r', encoding='utf-8').read().strip().split('\n')
texts = [x.split('\t')[0][:max_len-2] for x in data]
labels = [int(x.split('\t')[1]) for x in data]
self.encodings = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
self.labels = torch.tensor(labels)

def __getitem__(self, idx):
item = {key: val[idx] for key, val in self.encodings.items()}
item['labels'] = self.labels[idx]
return item

def __len__(self):
return len(self.labels)

def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}

training_args = TrainingArguments(
output_dir=args.save_path, # 存储结果文件的目录
overwrite_output_dir=True,
num_train_epochs=args.epoch,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=args.batch_size,
learning_rate=1e-5,
eval_steps=500,
load_best_model_at_end=True,
metric_for_best_model="precision", # 最后载入最优模型的评判标准,这里选用precision最高的那个模型参数
weight_decay=0.01,
warmup_steps=500,
evaluation_strategy="steps", # 这里设置每100个batch做一次评估,也可以为“epoch”,也就是每个epoch进行一次
logging_strategy="steps",
save_strategy='steps',
logging_steps=100,
save_total_limit=3,
seed=2021,
logging_dir=args.logging_dir # 存储logs的目录
)

trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_set,
eval_dataset=valid_set,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3)], # 早停Callback
)

trainer.train()

##########################################################################################################################################################

import transformers tokenizer = transformers.BertTokenizer.from_pretrained('nghuyong/ernie-1.0-base-zh') model = transformers.ErnieForMaskedLM.from_pretrained('nghuyong/ernie-1.0-base-zh') input_ids = torch.tensor([tokenizer.encode(text="[MASK][MASK][MASK]是中国神魔小说的经典之作,与《三国演义》《水浒传》《红楼梦》并称为中国古典四大名著。", add_special_tokens=True)]) model.eval() with torch.no_grad(): predictions = model(input_ids)[0][0] predicted_index = [torch.argmax(predictions[i]).item() for i in range(predictions.shape[0])] predicted_token = [tokenizer._convert_id_to_token(predicted_index[i]) for i in range(1, (predictions.shape[0] - 1))] print('predict result:\t', predicted_token)




import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModel

class Config(object):

    def __init__(self):
        self.pre_bert_path="nghuyong/ernie-1.0"
        self.train_path = 'data/dataset_train.csv'  # 训练集
        self.dev_path = 'data/dataset_valid.csv'  # 验证集
        self.test_path = 'data/test.csv'  # 测试集
        self.class_path = 'data/class.json'  # 类别名单
        self.save_path ='mymodel/ernie.pth'        # 模型训练结果
        self.num_classes=10
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')   # 设备

        self.epochs = 10  # epoch数
        self.batch_size = 128  # mini-batch大小
        self.maxlen = 32  # 每句话处理成的长度(短填长切)
        self.learning_rate = 5e-4                                       # 学习率
        self.hidden_size=768
        self.tokenizer = AutoTokenizer.from_pretrained(self.pre_bert_path)

class Model(nn.Module):
    def __init__(self, config):
        super(Model, self).__init__()
        self.ernie=AutoModel.from_pretrained(config.pre_bert_path)
        #设置不更新预训练模型的参数
        for param in self.ernie.parameters():
            param.requires_grad = False
        self.fc = nn.Linear(config.hidden_size, config.num_classes)
    def forward(self, input):
        out=self.ernie(input_ids =input['input_ids'],attention_mask=input['attention_mask'],token_type_ids=input['token_type_ids'])
        #只取最后一层CLS对应的输出
        out = self.fc(out.pooler_output)
        return out

train_evl.py

import json
from mymodel import myBert,myAlbertl,myERNIE
import mydataset
import torch
import pandas as pd
from torch import nn,optim
from torch.utils.data import DataLoader

config=myERNIE.Config()

label_dict=json.load(open(config.class_path,'r',encoding='utf-8'))
# 加载训练,验证,测试数据集
train_df = pd.read_csv(config.train_path)
#这里将标签转化为数字
train_ds=mydataset.GetLoader(train_df['review'],[label_dict[i] for i in train_df['cat']])
train_dl=DataLoader(train_ds,batch_size=config.batch_size,shuffle=True)
valid_df = pd.read_csv(config.dev_path)
valid_ds=mydataset.GetLoader(valid_df['review'],[label_dict[i] for i in valid_df['cat']])
valid_dl=DataLoader(valid_ds,batch_size=config.batch_size,shuffle=True)
test_df = pd.read_csv(config.test_path)
test_ds=mydataset.GetLoader(test_df['review'],[label_dict[i] for i in test_df['cat']])
test_dl=DataLoader(test_ds,batch_size=config.batch_size,shuffle=True)

#计算准确率
def accuracys(pre,label):
    pre=torch.max(pre.data,1)[1]
    accuracy=pre.eq(label.data.view_as(pre)).sum()
    return accuracy,len(label)

#导入网络结构
model=myERNIE.Model(config).to(config.device)

#训练
criterion=nn.CrossEntropyLoss()
optimizer=optim.Adam(model.parameters(),lr=config.learning_rate)
best_loss=float('inf')
for epoch in range(config.epochs):
    train_acc = []
    for batch_idx,(data,target)in enumerate(train_dl):
        inputs = config.tokenizer(list(data),truncation=True, return_tensors="pt",padding=True,max_length=config.maxlen)
        model.train()
        out = model(inputs)
        loss=criterion(out,target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_acc.append(accuracys(out,target))
        train_r = (sum(tup[0] for tup in train_acc), sum(tup[1] for tup in train_acc))
        print('当前epoch:{}\t[{}/{}]{:.0f}%\t损失:{:.6f}\t训练集准确率:{:.2f}%\t'.format(
            epoch, batch_idx, len(train_dl), 100. * batch_idx / len(train_dl), loss.data,
                   100. * train_r[0].numpy() / train_r[1]
        ))
        #每100批次进行一次验证
        if batch_idx%100==0 and batch_idx!=0:
            model.eval()
            val_acc=[]
            loss_total=0
            with torch.no_grad():
                for (data,target) in valid_dl:
                    inputs = config.tokenizer(list(data), truncation=True, return_tensors="pt", padding=True,
                                              max_length=config.maxlen)
                    out = model(inputs)
                    loss_total = criterion(out, target).data+loss_total
                    val_acc.append(accuracys(out,target))
            val_r = (sum(tup[0] for tup in val_acc), sum(tup[1] for tup in val_acc))
            print('损失:{:.6f}\t验证集准确率:{:.2f}%\t'.format(loss_total/len(valid_dl),100. * val_r[0].numpy() / val_r[1]))
            #如果验证损失低于最好损失,则保存模型
            if loss_total < best_loss:
                best_loss = loss_total
                torch.save(model.state_dict(), config.save_path)

#测试
model.load_state_dict(torch.load(config.save_path))
model.eval()
test_acc=[]
with torch.no_grad():
    for (data, target) in test_dl:
        inputs = config.tokenizer(list(data),truncation=True, return_tensors="pt",padding=True,max_length=config.maxlen)
        out = model(inputs)
        test_acc.append(accuracys(out, target))
test_r = (sum(tup[0] for tup in test_acc), sum(tup[1] for tup in test_acc))

print('测试集准确率:{:.2f}%\t'.format(100. * test_r[0].numpy() / test_r[1]))





HAN网络:

train.py

# coding=utf-8
import torch
import torchkeras
import torch.nn.functional as F

from matplotlib import pyplot as plt
import copy
import datetime
import pandas as pd
from sklearn.metrics import accuracy_score
import math
import time
import sys

sys.path.append('/home/xxx/document-level-classification/')
from han.config import *
from han.prepare_data import load_data

ngpu = 4

use_cuda = torch.cuda.is_available() # 检测是否有可用的gpu
device = torch.device("cuda:0" if (use_cuda and ngpu>0) else "cpu")
print('*'*8, 'device:', device)

# 设置损失函数和评价指标
loss_func = torch.nn.CrossEntropyLoss()
metric_func = lambda y_pred, y_true: accuracy_score(y_true, y_pred)
metric_name = 'acc'
df_history = pd.DataFrame(columns=["epoch", "loss", metric_name, "val_loss", "val_"+metric_name])

# 打印时间
def printbar():
    nowtime = datetime.datetime.now().strftime('%Y-%m_%d %H:%M:%S')
    print('\n' + "=========="*8 + '%s'%nowtime)

class MyHAN(torch.nn.Module):
    def __init__(self, max_word_num, max_sents_num, vocab_size, hidden_size, num_classes, embedding_dim, embedding_matrix=None, dropout_p=0.5):
        super(MyHAN, self).__init__()

        self.max_word_num = max_word_num  # 15 句子所含最大词数
        self.max_sents_num = max_sents_num  # 60 文档所含最大句子数

        self.embedding_dim = embedding_dim
        self.hidden_size = hidden_size
        self.num_classes = num_classes
        self.dropout_p = dropout_p

        self.embedding = torch.nn.Embedding(vocab_size, self.embedding_dim, padding_idx=pad_id)
        if embedding_matrix is not None:
            self.embedding.weight.data.copy_(torch.from_numpy(embedding_matrix))
            for p in self.embedding.parameters():
                p.requires_grad = False

        self.dropout0 = torch.nn.Dropout(dropout_p)

        # self.layernorm1 = torch.nn.LayerNorm(normalized_shape=(sent_maxlen, embedding_dim), eps=1e-6)
        # self.layernorm2 = torch.nn.LayerNorm(normalized_shape=2*hidden_size, eps=1e-6)

        self.bi_rnn1 = torch.nn.GRU(self.embedding_dim, self.hidden_size, bidirectional=True, batch_first=True, dropout=0.2)
        self.word_attn = torch.nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.word_ctx = torch.nn.Linear(self.hidden_size, 1, bias=False)

        self.bi_rnn2 = torch.nn.GRU(2 * self.hidden_size, self.hidden_size, bidirectional=True, batch_first=True, dropout=0.2)
        self.sent_attn = torch.nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.sent_ctx = torch.nn.Linear(self.hidden_size, 1, bias=False)

        self.dropout = torch.nn.Dropout(dropout_p)
        self.out = torch.nn.Linear(self.hidden_size * 2, self.num_classes)

    def forward(self, inputs, hidden1=None, hidden2=None):  # [b, 60, 15]
        embedded = self.dropout0(self.embedding(inputs))  # =>[b, 60, 15, 100]

        word_inputs = embedded.view(-1, embedded.size()[-2], embedded.size()[-1])  # =>[b*60, 15, embedding_dim]
        # word_inputs = self.layernorm1(word_inputs)
        self.bi_rnn1.flatten_parameters()
        """
        为了提高内存的利用率和效率,调用flatten_parameters让parameter的数据存放成contiguous chunk(连续的块)。
        类似我们调用tensor.contiguous
        """
        word_encoder_output, hidden1 = self.bi_rnn1(word_inputs,
                                                    hidden1)  # =>[b*60,15,2*hidden_size], [b*60,2,hidden_size]
        word_attn = self.word_attn(word_encoder_output).tanh()  # =>[b*60,15,hidden_size]
        word_attn_energy = self.word_ctx(word_attn)  # =>[b*60,15,1]
        word_attn_weights = F.softmax(word_attn_energy, dim=1).transpose(1, 2)  # =>[b*60,15,1]=>[b*60,1,15]
        word_att_level_output = torch.bmm(word_attn_weights, word_encoder_output)  # =>[b*60,1,2*hidden_size]

        sent_inputs = word_att_level_output.squeeze(1).view(-1, self.max_sents_num,
                                                            2 * self.hidden_size)  # =>[b*60,2*hidden_size]=>[b,60,2*hidden_size]
        self.bi_rnn2.flatten_parameters()
        sent_encoder_output, hidden2 = self.bi_rnn2(sent_inputs, hidden2)  # =>[b,60,2*hidden_size], [b,2,hidden_size]
        sent_attn = self.sent_attn(sent_encoder_output).tanh()  # =>[b,60,hidden_size]
        sent_attn_energy = self.sent_ctx(sent_attn)  # =>[b,60,1]
        sent_attn_weights = F.softmax(sent_attn_energy, dim=1).transpose(1, 2)  # =>[b,60,1]=>[b,1,60]
        sent_att_level_output = torch.bmm(sent_attn_weights, sent_encoder_output)  # =>[b,1,2*hidden_size]

        # logits = self.out(self.dropout(self.layernorm2(sent_att_level_output.squeeze(1))))  # =>[b,2*hidden_size]=>[b,num_classes]
        logits = self.out(self.dropout(sent_att_level_output.squeeze(1)))  # =>[b,2*hidden_size]=>[b,num_classes]
        return logits  # [b,num_classes]

def train_step(model, inps, labs, optimizer):
    inps = inps.to(device)
    labs = labs.to(device)

    model.train()  # 设置train mode
    optimizer.zero_grad()  # 梯度清零

    # forward
    logits = model(inps)
    loss = loss_func(logits, labs)

    pred = torch.argmax(logits, dim=-1)
    metric = metric_func(pred.cpu().numpy(), labs.cpu().numpy()) # 返回的是tensor还是标量?
    # print('*'*8, metric)

    # backward
    loss.backward()  # 反向传播计算梯度
    optimizer.step()  # 更新参数

    return loss.item(), metric.item()

@torch.no_grad()
def validate_step(model, inps, labs):
    inps = inps.to(device)
    labs = labs.to(device)
    model.eval()  # 设置eval mode

    # forward
    logits = model(inps)
    loss = loss_func(logits, labs)

    pred = torch.argmax(logits, dim=-1)
    metric = metric_func(pred.cpu().numpy(), labs.cpu().numpy())  # 返回的是tensor还是标量?

    return loss.item(), metric.item()

def train_model(model, train_dloader, val_dloader, optimizer, scheduler_1r=None, num_epochs=10, print_every=150):
    starttime = time.time()
    print('*' * 27, 'start training...')
    printbar()

    best_metric = 0.
    for epoch in range(1, num_epochs+1):
        # 训练
        loss_sum, metric_sum = 0., 0.
        for step, (inps, labs) in enumerate(train_dloader, start=1):
            loss, metric = train_step(model, inps, labs, optimizer)
            loss_sum += loss
            metric_sum += metric

            # 打印batch级别日志
            if step % print_every == 0:
                print('*'*27, f'[step = {step}] loss: {loss_sum/step:.3f}, {metric_name}: {metric_sum/step:.3f}')

        # 验证 一个epoch的train结束,做一次验证
        val_loss_sum, val_metric_sum = 0., 0.
        for val_step, (inps, labs) in enumerate(val_dloader, start=1):
            val_loss, val_metric = validate_step(model, inps, labs)
            val_loss_sum += val_loss
            val_metric_sum += val_metric

        if scheduler_1r:
            scheduler_1r.step()

        # 记录和收集 1个epoch的训练和验证信息
        # columns=['epoch', 'loss', metric_name, 'val_loss', 'val_'+metric_name]
        record = (epoch, loss_sum/step, metric_sum/step, val_loss_sum/val_step, val_metric_sum/val_step)
        df_history.loc[epoch - 1] = record

        # 打印epoch级别日志
        print('EPOCH = {} loss: {:.3f}, {}: {:.3f}, val_loss: {:.3f}, val_{}: {:.3f}'.format(
               record[0], record[1], metric_name, record[2], record[3], metric_name, record[4]))
        printbar()

        # 保存最佳模型参数

        current_metric_avg = val_metric_sum/val_step
        if current_metric_avg > best_metric:
            best_metric = current_metric_avg
            # checkpoint = save_dir + '{:03d}_{:.3f}_ckpt.tar'.format(epoch, current_metric_avg) ############################################################
            checkpoint = save_dir + f'epoch{epoch:03d}_valacc{current_metric_avg:.3f}_ckpt.tar'
            if device.type == 'cuda' and ngpu > 1:
                model_sd = copy.deepcopy(model.module.state_dict())
            else:
                model_sd = copy.deepcopy(model.state_dict())
            # 保存
            torch.save({
                'loss': loss_sum / step,
                'epoch': epoch,
                'net': model_sd,
                'opt': optimizer.state_dict(),
            }, checkpoint)


    endtime = time.time()
    time_elapsed = endtime - starttime
    print('*' * 27, 'training finished...')
    print('*' * 27, 'and it costs {} h {} min {:.2f} s'.format(int(time_elapsed // 3600),
                                                               int((time_elapsed % 3600) // 60),
                                                               (time_elapsed % 3600) % 60))

    print('Best val Acc: {:4f}'.format(best_metric))
    return df_history

# 绘制训练曲线
def plot_metric(df_history, metric):
    plt.figure()

    train_metrics = df_history[metric]
    val_metrics = df_history['val_' + metric]  #

    epochs = range(1, len(train_metrics) + 1)

    plt.plot(epochs, train_metrics, 'bo--')
    plt.plot(epochs, val_metrics, 'ro-')  #

    plt.title('Training and validation ' + metric)
    plt.xlabel("Epochs")
    plt.ylabel(metric)
    plt.legend(["train_" + metric, 'val_' + metric])

    plt.savefig(imgs_dir + 'han_'+ metric + '.png')  # 保存图片
    plt.show()


if __name__=='__main__':
    train_dloader = load_data(data_base_dir + 'cnews.train.txt', traindata=True, shuffle=True)
    val_dloader = load_data(data_base_dir + 'cnews.val.txt', traindata=False, shuffle=False)

    print('*' * 27, '%d 个 step:' % len(train_dloader))  # 1000 个step/batch
    sample_batch = next(iter(train_dloader))
    print('*'*27, 'sample_batch:', len(sample_batch), sample_batch[0].size(), sample_batch[0].dtype,
          sample_batch[1].size(), sample_batch[1].dtype)  # 4   [b, doc_maxlen] int64


    model = MyHAN(sent_maxlen, doc_maxlen, total_words+2, hidden_size, num_classes, embedding_dim)

    torchkeras.summary(model, input_shape=(doc_maxlen, sent_maxlen), input_dtype=torch.int64)

    model = model.to(device)
    if ngpu > 1:
        model = torch.nn.DataParallel(model, device_ids=list(range(ngpu)))  # 设置并行执行  device_ids=[0,1]

    model.eval()
    sample_out = model(sample_batch[0])
    print('*' * 10, 'sample_out:', sample_out.shape)  # [b, 10]

    params_to_update = []
    for name, param in model.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)

    optimizer = torch.optim.AdamW(params_to_update, lr=LR, weight_decay=1e-4)
    scheduler_1r = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                     lr_lambda=lambda epoch: 0.1 if epoch>EPOCHS*0.6 else 0.5 if epoch>EPOCHS*0.3 else 1)
    # optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    train_model(model, train_dloader, val_dloader, optimizer, scheduler_1r,
                num_epochs=EPOCHS, print_every=50)

    plot_metric(df_history, 'loss')
    plot_metric(df_history, metric_name)

eval.py

# coding=utf-8
import torch
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, \
            confusion_matrix, classification_report
import time
import sys

sys.path.append('/home/xx/document-level-classification/')
from han.config import *
from han.prepare_data import load_data
from han.train import MyHAN, printbar

ngpu = 4

use_cuda = torch.cuda.is_available() # 检测是否有可用的gpu
device = torch.device("cuda:0" if (use_cuda and ngpu>0) else "cpu")
print('*'*8, 'device:', device)

checkpoint = save_dir + 'epoch007_valacc0.947_ckpt.tar'

@torch.no_grad()
def eval_step(model, inps, labs):
    inps = inps.to(device)
    labs = labs.to(device)

    model.eval()  # 设置eval mode

    # forward
    logits = model(inps)
    pred = torch.argmax(logits, dim=-1)

    return pred, labs

def evaluate(model, test_dloader):
    starttime = time.time()
    print('*' * 27, 'start evaluating...')
    printbar()
    preds, labels = [], []
    for step, (inps, labs) in enumerate(tqdm(test_dloader), start=1):
        pred, labs = eval_step(model, inps, labs)
        preds.append(pred)
        labels.append(labs)

    y_true = torch.cat(labels, dim=0)
    y_pred = torch.cat(preds, dim=0)
    endtime = time.time()
    print('evaluating costs: {:.2f}s'.format(endtime - starttime))
    return y_true.cpu(), y_pred.cpu()

def get_metrics(y_true, y_pred):
    if num_classes == 2:
        print('*'*27, 'precision_score:', precision_score(y_true, y_pred, pos_label=1))
        print('*'*27, 'recall_score:', recall_score(y_true, y_pred, pos_label=1))
        print('*'*27, 'f1_score:', f1_score(y_true, y_pred, pos_label=1))
    else:
        average = 'weighted'
        print('*'*27, average+'_precision_score:{:.3f}'.format(precision_score(y_true, y_pred, average=average)))
        print('*'*27, average+'_recall_score:{:.3}'.format(recall_score(y_true, y_pred, average=average)))
        print('*'*27, average+'_f1_score:{:.3f}'.format(f1_score(y_true, y_pred, average=average)))

    print('*'*27, 'accuracy:{:.3f}'.format(accuracy_score(y_true, y_pred)))
    print('*'*27, 'confusion_matrix:\n', confusion_matrix(y_true, y_pred))
    print('*'*27, 'classification_report:\n', classification_report(y_true, y_pred))

if __name__ == '__main__':
    test_dloader = load_data(data_base_dir + 'cnews.test.txt', traindata=False, shuffle=False)

    sample_batch = next(iter(test_dloader))
    print('*' * 27, 'sample_batch:', len(sample_batch), sample_batch[0].size(), sample_batch[0].dtype,
          sample_batch[1].size(), sample_batch[1].dtype)

    model = MyHAN(sent_maxlen, doc_maxlen, total_words + 2, hidden_size, num_classes, embedding_dim)
    model = model.to(device)
    if ngpu > 1:
        model = torch.nn.DataParallel(model, device_ids=list(range(ngpu)))  # 设置并行执行  device_ids=[0,1,2,3]

    print('*' * 27, 'Loading model weights...')
    # ckpt = torch.load(checkpoint, map_location=device)  # dict  save在CPU 加载到GPU
    ckpt = torch.load(checkpoint)  # dict  save在GPU 加载到 GPU
    model_sd = ckpt['net']
    if device.type == 'cuda' and ngpu > 1:
        model.module.load_state_dict(model_sd)
    else:
        model.load_state_dict(model_sd)
    print('*' * 27, 'Model loaded success!')

    y_true, y_pred = evaluate(model, test_dloader)
    get_metrics(y_true, y_pred)

prepare_data.py

# coding=utf-8
import pandas as pd
import re
import jieba
from collections import Counter
from tqdm import tqdm
import pickle
import os
import numpy as np
import torch
import torchtext
import sys
sys.path.append('/home/xx/document-level-classification/')
from han.config import *

df_stopwords = pd.read_csv(stopwords_file, index_col=False, quoting=3, sep="\t", names=['stopword'], encoding='utf-8')
STOPWORDS_SET = set(df_stopwords['stopword'].values)

# 读取数据  数据格式:content    label
def read_data(filepath):
    df_data = pd.read_csv(filepath, encoding='UTF-8', sep='\t', names=['label', 'content'], index_col=False)
    df_data = df_data.dropna()
    print(df_data.head())

    # x_data, y_data = df_data['content'][:100], df_data['label'][:100] # 用于测试功能
    x_data, y_data = df_data['content'], df_data['label']
    print('*'*27, x_data.shape, len(x_data[0]), y_data.shape)  # (50000,) 746 (50000,)
    print(label2id)
    y_data = [label2id[str(y)] for y in y_data]
    # y_data = torch.tensor(y_data, dtype=torch.long)

    return x_data, y_data

# 保留文本中文、数字、英文、短横线
def clear_text(text):
    p = re.compile(r"[^\u4e00-\u9fa5^0-9^a-z^A-Z\-、,。!?:;()《》【】,!\?:;[\]()]")  # 匹配不是中文、数字、字母、短横线的部分字符
    return p.sub('', text)  # 将text中匹配到的字符替换成空字符

# 分词
def tokenize(text):
    text = clear_text(text)
    segs = jieba.lcut(text.strip(), cut_all=False)  # cut_all=False是精确模式,True是全模式;默认模式是False 返回分词后的列表
    segs = filter(lambda x: len(x.strip()) > 1, segs)  # 词长度要>1,没有保留标点符号

    global STOPWORDS_SET
    segs = filter(lambda x: x not in STOPWORDS_SET, segs) # 去除停用词 segs是一个filter object
    return list(segs)

# 只分句
def do_seg_sentences(doc):
    # sents = re.split(r',|。|!|?|:|;|,|!|\?|:|;', doc)
    sents = re.split(r',|。|!|?|,|!|\?', doc)
    sentences = [s for s in sents if len(s.strip()) != 0]
    return sentences

# 过滤低频词
def filter_lowfreq_words(arr, vocab):
    # arr是一个batch,以list的形式出现,list长度=batchsize,list中每个元素是长度=MAX_LEN的句子,句子已经分词,词已经转化为index
    arr = [[x if x < total_words else 0 for x in example] for example in arr]  # 词的ID是按频率降序排序的 <unk>=0
    return arr


# 顺序:tokenize分词,preprocessing,建立词表build vocab,batch(padding & truncate to maxlen),postprocessing
NESTED = torchtext.data.Field(tokenize=tokenize,
                              sequential=True,
                              fix_length=sent_maxlen,
                              postprocessing=filter_lowfreq_words) # after numericalizing but before the numbers are turned into a Tensor)
TEXT = torchtext.data.NestedField(NESTED,
                            fix_length=doc_maxlen,
                            tokenize=do_seg_sentences,
                            )
LABEL = torchtext.data.Field(sequential=False,
                             use_vocab=False
                             )

def get_dataset(inp, lab):
    fields = [('inp', TEXT), ('lab', LABEL)]  # filed信息 fields dict[str, Field])
    examples = []  # list(Example)
    for inp, lab in tqdm(zip(inp, lab)): # 进度条
        # 创建Example时会调用field.preprocess方法
        examples.append(torchtext.data.Example.fromlist([inp, lab], fields))
    return examples, fields

class DataLoader:
    def __init__(self, data_iter):
        self.data_iter = data_iter
        self.length = len(data_iter)  # 一共有多少个batch?

    def __len__(self):
        return self.length

    def __iter__(self):
        # 注意,在此处调整text的shape为batch first,并调整label的shape和dtype
        for batch in self.data_iter:
            yield (batch.inp, batch.lab.long())  # label->long

def load_data(data_path, traindata=False, shuffle=False):
    x_data, y_data = read_data(data_path)

    ds = torchtext.data.Dataset(*get_dataset(x_data, y_data))
    # 查看1个样本的信息
    print('*'*27, len(ds[0].inp), len(ds[1].inp), ds[0].inp, ds[0].lab) # 还是汉字,还未ID化

    if os.path.exists(vocab_path):
        print('词表存在!')
        with open(vocab_path, 'rb') as handle:
            c = pickle.load(handle)
        TEXT.vocab = torchtext.vocab.Vocab(c, max_size=total_words)
        NESTED.vocab = torchtext.vocab.Vocab(c, max_size=total_words)
    else:
        print('词表不存在!')
        TEXT.build_vocab(ds, max_size=total_words)
        with open(vocab_path, 'wb') as handle:  # 可用于infer阶段
            pickle.dump(TEXT.vocab.freqs, handle)
    print('*' * 27, '词表大小:', len(TEXT.vocab))
    print('*' * 27, TEXT.vocab.itos[0])  # <unk>
    print('*' * 27, TEXT.vocab.itos[1])  # <pad>
    print(ds.fields['inp'].vocab.itos[0])
    print(ds.fields['inp'].vocab.itos[1])

    ds_iter = torchtext.data.Iterator(ds,
                                      batch_size,
                                      # sort_key=lambda x: len(x.inp),
                                      # device=,
                                      train=traindata,
                                      # repeat=,
                                      shuffle=shuffle,
                                      sort=False,
                                      # sort_within_batch=,
                                      )
    data_loader = DataLoader(ds_iter)
    return data_loader

if __name__=='__main__':
    train_dataloader = load_data(data_base_dir + 'cnews.train.txt', traindata=True, shuffle=True)
    val_dataloader = load_data(data_base_dir + 'cnews.val.txt', traindata=False, shuffle=False)

    print('*' * 27, 'len(train_dataloader):', len(train_dataloader))  # 1000 个step/batch
    for batch_text, batch_label in train_dataloader:
        print(batch_text.shape, batch_label.shape)  # [b,100,10], [b]
        # print(batch_text[0])
        print(batch_label[0], batch_label[0].dtype)  # tensor(5) torch.int64
        break

config.py

# coding=utf-8
project_dir = '/home/xijian/pycharm_projects/document-level-classification/'
data_base_dir = project_dir + 'data/thucnews/'

save_dir = './save/20210316/'
imgs_dir = './imgs/20210316/'
stopwords_file = project_dir + 'data/zh_data/stopwords.txt'
vocab_path = 'tokenizer/vocab.pkl'

labels = ['体育', '娱乐', '家居', '房产', '教育', '时尚', '时政', '游戏', '科技', '财经']
label2id = {l:i for i,l in enumerate(labels)}
id2label = {i:l for i,l in enumerate(labels)}

LR = 1e-2
EPOCHS = 15

total_words = 6000 # 仅考虑频率最高的6000个词
doc_maxlen = 60 # 每个句子最大长度
sent_maxlen = 15
embedding_dim = 100

num_classes = len(labels)
hidden_size = 64

pad_token = '<pad>'
pad_id = 1

batch_size = 512
posted @ 2022-09-19 17:42  15375357604  阅读(190)  评论(0编辑  收藏  举报