simpletransformers的 single sentence classification和sentence pair classification

1. 导入相关模块

import warnings
warnings.simplefilter('ignore')

import gc
import os

import numpy as np
import pandas as pd

from sklearn.model_selection import StratifiedKFold

from simpletransformers.classification import ClassificationModel, ClassificationArgs

os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

2. 读取数据,并处理空值


train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')

train['content'].fillna('', inplace=True)
test['content'].fillna('', inplace=True)

3. 设置模型的参数

TransformerModel具有dict参数,其中包含许多属性,这些属性提供对超参数的控制。

def get_model_args():
    model_args = ClassificationArgs()
    model_args.max_seq_length = 32
    model_args.train_batch_size = 16
    model_args.num_train_epochs = 1
    model_args.sliding_window=True
    model_args.evaluate_during_training = True
    model_args.evaluate_during_training_verbose = True
    model_args.fp16 = False
    model_args.no_save = True
    model_args.save_steps = -1
    model_args.overwrite_output_dir = True
    model_args.output_dir = dir
    return model_args

4. single sentence classification 交叉验证训练模型

model = ClassificationModel(
    "roberta", "roberta-base"
)
model = ClassificationModel(
    "bert", "KB/bert-base-swedish-cased"
)

outputs/best_model为本地保存模型的路径。

model = ClassificationModel(
    "bert", "outputs/best_model"
)
oof = []
prediction = test[['id']]
prediction['bert_pred'] = 0

n_folds = 3
kfold = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=2021)
for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(train, train['label'])):
    train_df = train.iloc[trn_idx][['content', 'label']]
    valid_df = train.iloc[val_idx][['content', 'label']]
    train_df.columns = ['text', 'label']
    valid_df.columns = ['text', 'label']

    model_args = get_model_args()
    model = ClassificationModel('bert',
                                'hfl/chinese-roberta-wwm-ext',
                                args=model_args)
    model.train_model(train_df, eval_df=valid_df)

    _, vaild_outputs, _  = model.eval_model(valid_df)

    df_oof = train.iloc[val_idx][['id', 'label']].copy()
    df_oof['bert_pred'] = vaild_outputs[:,1]
    oof.append(df_oof)

    print('predict')
    _, test_outputs = model.predict([text for text in test['content']])
    prediction['bert_pred'] += test_outputs[:, 1] / kfold.n_splits

    del model, train_df, valid_df, vaild_outputs, test_outputs
    gc.collect()

不同任务所对应的模型

TaskModelBinary and multi-class text classification

Conversational AI (chatbot training)

Language generation

Language model training/fine-tuning

Multi-label text classification

Multi-modal classification (text and image data combined)

Named entity recognition

Question answering

Regression

Sentence-pair classification

Text Representation Generation

Document Retrieval

df_oof = pd.concat(oof)
df_oof = df_oof.sort_values(by='id')
df_oof.head(10)
df_oof[['id', 'bert_pred']].to_csv('roberta_pred_oof.csv', index=False)
prediction[['id', 'bert_pred']].to_csv('roberta_pred_test.csv', index=False)

5. sentence pair classification 交叉验证训练模型

def get_model_args():
    model_args = ClassificationArgs()
    model_args.max_seq_length = 32
    model_args.train_batch_size = 16
    model_args.num_train_epochs = 1
    model_args.sliding_window=True
    model_args.evaluate_during_training = True
    model_args.evaluate_during_training_verbose = True
    model_args.fp16 = False
    model_args.no_save = True
    model_args.save_steps = -1
    model_args.overwrite_output_dir = True
    model_args.output_dir = dir
    return model_args
oof = []
prediction = test[['id']]
prediction['bert_pred'] = 0

n_folds = 3
kfold = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=2021)
for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(train, train['label'])):
    train_df = train.iloc[trn_idx][['level_4', 'content', 'label']]
    valid_df = train.iloc[val_idx][['level_4', 'content', 'label']]
    train_df.columns = ['text_a', 'text_b', 'label']
    valid_df.columns = ['text_a', 'text_b', 'label']

    model_args = get_model_args()
    model = ClassificationModel('bert',
                                'hfl/chinese-roberta-wwm-ext',
                                num_labels=2,
                                args=model_args)
    model.train_model(train_df, eval_df=valid_df)

    _, vaild_outputs, _  = model.eval_model(valid_df)

    df_oof = train.iloc[val_idx][['id', 'label']].copy()
    df_oof['bert_pred'] = vaild_outputs[:,1]
    oof.append(df_oof)

    print('predict')
    _, test_outputs = model.predict([list(text) for text in test[['level_4', 'content']].values])
    prediction['bert_pred'] += test_outputs[:, 1] / kfold.n_splits

    del model, train_df, valid_df, vaild_outputs, test_outputs
    gc.collect()
df_oof = pd.concat(oof)
df_oof = df_oof.sort_values(by='id')
df_oof.head(10)
df_oof[['id', 'bert_pred']].to_csv('roberta_pred_oof.csv', index=False)
prediction[['id', 'bert_pred']].to_csv('roberta_pred_test.csv', index=False)

6. sentence-transformers

获取文本相关性

  • 直接使用预训练模型,获取文本相关性
  • 使用训练样本微调之后,获取文本相关性
import numpy as np
import torch
from sentence_transformers import SentenceTransformer, util

Original: https://blog.csdn.net/qq_30129009/article/details/121530992
Author: junjian Li
Title: simpletransformers的 single sentence classification和sentence pair classification

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/531622/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球