본문 바로가기

텍스트 마이닝

BERT 사전학습 모형에 대한 미세조정학습 (2)

반응형

15.2 트랜스포머의 트레이너를 이용한 미세조정학습

import nltk
from nltk.corpus import movie_reviews
from sklearn.model_selection import train_test_split #sklearn에서 제공하는 split 함수를 사용
import numpy as np

nltk.download('movie_reviews')
fileids = movie_reviews.fileids() #movie review data에서 file id를 가져옴
reviews = [movie_reviews.raw(fileid) for fileid in fileids] #file id를 이용해 raw text file을 가져옴
categories = [movie_reviews.categories(fileid)[0] for fileid in fileids]

# label을 0, 1의 값으로 변환
label_dict = {'pos':1, 'neg':0}
y = [label_dict[c] for c in categories]

X_train, X_test, y_train, y_test = train_test_split(reviews, y, test_size=0.2, random_state=7)

print('Train set count: ', len(X_train))
print('Test set count: ', len(X_test))


"""
Train set count:  1600
Test set count:  400
[nltk_data] Downloading package movie_reviews to /root/nltk_data...
[nltk_data]   Package movie_reviews is already up-to-date!
"""
from transformers import BertTokenizerFast, BertForSequenceClassification

tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained("bert-base-uncased")

train_input = tokenizer(X_train, truncation=True, padding=True, return_tensors="pt")
test_input = tokenizer(X_test, truncation=True, padding=True, return_tensors="pt")


"""
Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
"""
import torch

class OurDataset(torch.utils.data.Dataset):
    def __init__(self, inputs, labels):
        self.inputs = inputs
        self.labels = labels

    def __getitem__(self, idx):
        item = {key: torch.tensor(val[idx]) for key, val in self.inputs.items()}
        item['labels'] = torch.tensor(self.labels[idx])
        return item

    def __len__(self):
        return len(self.labels)

train_dataset = OurDataset(train_input, y_train)
test_dataset = OurDataset(test_input, y_test)
!pip install datasets
from datasets import load_metric

metric = load_metric("accuracy")

def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    return metric.compute(predictions=predictions, references=labels)
from transformers import Trainer, TrainingArguments

# Trainer에서 사용할 하이퍼 파라미터 지정
training_args = TrainingArguments(
    output_dir='./results',          # 모형 예측이나 체크포인트 출력 폴더, 반드시 필요함
    num_train_epochs=2,              # 학습 에포크 수
    per_device_train_batch_size=8,   # 학습에 사용할 배치 사이즈
    per_device_eval_batch_size=16,   # 평가에 사용할 배치 사이즈
)

trainer = Trainer(
    model=model,                     # 학습할 모형
    args=training_args,              # 위에서 정의한 학습 매개변수
    train_dataset=train_dataset,     # 훈련 데이터셋
    compute_metrics=compute_metrics,
)

# 미세조정학습 실행
trainer.train()
trainer.evaluate(eval_dataset=test_dataset)

"""
{'eval_loss': 0.3673644959926605,
 'eval_accuracy': 0.87,
 'eval_runtime': 6.8058,
 'eval_samples_per_second': 58.774,
 'eval_steps_per_second': 3.673,
 'epoch': 2.0}
"""

 

 

 

 

 

※ 해당 내용은 <파이썬 텍스트 마이닝 완벽 가이드>의 내용을 토대로 학습하며 정리한 내용입니다.

반응형