import torch
Copyright By PowCoder代写 加微信 powcoder
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer
tokenizer= BertTokenizer.from_pretrained(‘bert-base-cased’)
example_text = ‘I will watch Memento tonight’
bert_input = tokenizer(example_text,padding=’max_length’, max_length = 10,
truncation=True, return_tensors=”pt”)
print(bert_input[‘input_ids’])
print(bert_input[‘token_type_ids’])
print(bert_input[‘attention_mask’])
tensor([[ 101, 146, 1209, 2824, 2508, 26173, 3568, 102, 0, 0]])
tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
df = pd.read_csv(‘train.csv’)
test = pd.read_csv(‘test.csv’)
np.random.seed(112)
df_train,df_val = train_test_split(data,test_size = 0.2)
np_val = np.array(df_val[‘rating’])
print(np_val.shape)
print(len(df_train))
print(len(df_val))
print(len(test))
# dataset class
tokenizer = BertTokenizer.from_pretrained(‘bert-base-cased’)
class Dataset(torch.utils.data.Dataset):
def __init__(self, df):
self.labels = df[‘rating’].tolist()
self.texts = [tokenizer(text,
padding=’max_length’, max_length = 512, truncation=True,
return_tensors=”pt”) for text in df[‘sentences’]]
def classes(self):
return self.labels
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
# Fetch a batch of labels
return np.array(self.labels[idx])
def get_batch_texts(self, idx):
# Fetch a batch of inputs
return self.texts[idx]
def __getitem__(self, idx):
batch_texts = self.get_batch_texts(idx)
batch_y = self.get_batch_labels(idx)
return batch_texts, batch_y
#Model Building
from torch import nn
from transformers import BertModel
class BertClassifier(nn.Module):
def __init__(self, dropout=0.5):
super(BertClassifier, self).__init__()
self.bert = BertModel.from_pretrained(‘bert-base-cased’)
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, 5)
self.relu = nn.ReLU()
def forward(self, input_id, mask):
_, pooled_output = self.bert(input_ids= input_id, attention_mask=mask,return_dict=False)
dropout_output = self.dropout(pooled_output)
linear_output = self.linear(dropout_output)
final_layer = self.relu(linear_output)
return final_layer
# Training loop
from torch.optim import Adam
from tqdm import tqdm
def train(model, train_data, val_data, learning_rate, epochs):
train, val = Dataset(train_data), Dataset(val_data)
train_dataloader = torch.utils.data.DataLoader(train, batch_size=2, shuffle=True)
val_dataloader = torch.utils.data.DataLoader(val, batch_size=2)
use_cuda = torch.cuda.is_available()
device = torch.device(“cuda” if use_cuda else “cpu”)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr= learning_rate)
if use_cuda:
model = model.cuda()
criterion = criterion.cuda()
for epoch_num in range(epochs):
total_acc_train = 0
total_loss_train = 0
for train_input, train_label in tqdm(train_dataloader):
train_label = train_label.to(device)
mask = train_input[‘attention_mask’].to(device)
input_id = train_input[‘input_ids’].squeeze(1).to(device)
output = model(input_id, mask)
batch_loss = criterion(output, val_label)
total_loss_train += batch_loss.item()
acc = (output.argmax(dim=1) == val_label).sum().item()
total_acc_train += acc
pred_np = np.array(pred_list)
f1_test = f1_score(np_val,pred_np, average = ‘weighted’)
model.zero_grad()
batch_loss.backward()
optimizer.step()
total_acc_val = 0
total_loss_val = 0
with torch.no_grad():
for val_input, val_label in val_dataloader:
val_label = val_label.to(device)
mask = val_input[‘attention_mask’].to(device)
input_id = val_input[‘input_ids’].squeeze(1).to(device)
output = model(input_id, mask)
pred = output.argmax(dim=1).todolist()
pred_list = pred_list + pred
batch_loss = criterion(output, val_label)
total_loss_val += batch_loss.item()
acc = (output.argmax(dim=1) == val_label).sum().item()
total_acc_val += acc
f’Epochs: {epoch_num + 1} | Train Loss: {total_loss_train / len(train_data): .3f} \
| Train Accuracy: {total_acc_train / len(train_data): .3f} \
| Val Loss: {total_loss_val / len(val_data): .3f} \
| Val Accuracy: {total_acc_val / len(val_data): .3f}’)
EPOCHS = 5
model = BertClassifier()
train(model, df_train, df_val, LR, EPOCHS)
Some weights of the model checkpoint at bert-base-cased were not used when initializing BertModel: [‘cls.predictions.bias’, ‘cls.predictions.transform.LayerNorm.bias’, ‘cls.predictions.transform.dense.bias’, ‘cls.seq_relationship.bias’, ‘cls.seq_relationship.weight’, ‘cls.predictions.transform.dense.weight’, ‘cls.predictions.decoder.weight’, ‘cls.predictions.transform.LayerNorm.weight’]
– This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
– This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
—————————————————————————
KeyError Traceback (most recent call last)
E:\Software\ANACONDA\lib\site-packages\pandas\core\indexes\base.py in get_loc(self, key, method, tolerance)
2894 try:
-> 2895 return self._engine.get_loc(casted_key)
2896 except KeyError as err:
pandas\_libs\index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas\_libs\index.pyx in pandas._libs.index.IndexEngine.get_loc()
pandas\_libs\hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
pandas\_libs\hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
KeyError: ‘sentences’
The above exception was the direct cause of the following exception:
KeyError Traceback (most recent call last)
77 LR = 1e-6
—> 79 train(model, df_train, df_val, LR, EPOCHS)
4 def train(model, train_data, val_data, learning_rate, epochs):
—-> 6 train, val = Dataset(train_data), Dataset(val_data)
8 train_dataloader = torch.utils.data.DataLoader(train, batch_size=2, shuffle=True)
8 self.texts = [tokenizer(text,
9 padding=’max_length’, max_length = 512, truncation=True,
—> 10 return_tensors=”pt”) for text in df[‘sentences’]]
12 def classes(self):
E:\Software\ANACONDA\lib\site-packages\pandas\core\frame.py in __getitem__(self, key)
2900 if self.columns.nlevels > 1:
2901 return self._getitem_multilevel(key)
-> 2902 indexer = self.columns.get_loc(key)
2903 if is_integer(indexer):
2904 indexer = [indexer]
E:\Software\ANACONDA\lib\site-packages\pandas\core\indexes\base.py in get_loc(self, key, method, tolerance)
2895 return self._engine.get_loc(casted_key)
2896 except KeyError as err:
-> 2897 raise KeyError(key) from err
2899 if tolerance is not None:
KeyError: ‘sentences’
程序代写 CS代考 加微信: powcoder QQ: 1823890830 Email: powcoder@163.com