RNN with LSTM¶
In [1]:
import torch
import torch.nn.functional as F
from torchtext import data
from torchtext import datasets
import time
import random
import pandas as pd
torch.backends.cudnn.deterministic = True
General Settings¶
In [2]:
RANDOM_SEED = 123
torch.manual_seed(RANDOM_SEED)
VOCABULARY_SIZE = 20000
LEARNING_RATE = 1e-3 # <-- changes
BATCH_SIZE = 128
NUM_EPOCHS = 10
DEVICE = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
EMBEDDING_DIM = 32 # <-- changes
HIDDEN_DIM = 64 # <-- changes
OUTPUT_DIM = 1
Dataset¶
The following cells will download the IMDB movie review dataset (http://ai.stanford.edu/~amaas/data/sentiment/) for positive-negative sentiment classification in as CSV-formatted file:
In [ ]:
!gunzip -f movie_data.csv.gz
Check that the dataset looks okay:
In [ ]:
df = pd.read_csv('movie_data.csv')
df.head()
In [ ]:
del df
Define the Label and Text field formatters:
In [ ]:
TEXT = data.Field(sequential=True,
tokenize='spacy',
include_lengths=True) # necessary for packed_padded_sequence
LABEL = data.LabelField(dtype=torch.float)
Process the dataset:
In [ ]:
fields = [('review', TEXT), ('sentiment', LABEL)]
dataset = data.TabularDataset(
path="movie_data.csv", format='csv',
skip_header=True, fields=fields)
Split the dataset into training, validation, and test partitions:
In [ ]:
train_data, valid_data, test_data = dataset.split(
split_ratio=[0.75, 0.05, 0.2],
random_state=random.seed(RANDOM_SEED))
#One may want to vary the test, train split percentages
print(f'Num Train: {len(train_data)}')
print(f'Num Valid: {len(valid_data)}')
print(f'Num Test: {len(test_data)}')
Build the vocabulary based on the top "VOCABULARY_SIZE" words:
In [ ]:
TEXT.build_vocab(train_data, max_size=VOCABULARY_SIZE)
LABEL.build_vocab(train_data)
print(f'Vocabulary size: {len(TEXT.vocab)}')
print(f'Number of classes: {len(LABEL.vocab)}')
In [ ]:
LABEL.vocab.freqs
The TEXT.vocab dictionary will contain the word counts and indices. The reason why the number of words is VOCABULARY_SIZE + 2 is that it contains to special tokens for padding and unknown words:
Make dataset iterators:
In [ ]:
train_loader, valid_loader, test_loader = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
sort_within_batch=True, # necessary for packed_padded_sequence
sort_key=lambda x: len(x.review),
device=DEVICE)
Testing the iterators (note that the number of rows depends on the longest document in the respective batch):
In [ ]:
print(‘Train’)
for batch in train_loader:
print(f’Text matrix size: {batch.review[0].size()}’)
print(f’Target vector size: {batch.sentiment.size()}’)
break
print(‘\nValid:’)
for batch in valid_loader:
print(f’Text matrix size: {batch.review[0].size()}’)
print(f’Target vector size: {batch.sentiment.size()}’)
break
print(‘\nTest:’)
for batch in test_loader:
print(f’Text matrix size: {batch.review[0].size()}’)
print(f’Target vector size: {batch.sentiment.size()}’)
break
Model¶
In [ ]:
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
#Here is a preliminary model using LSTM cell
#The primary goal of this lab is to vary the dimensions of the embeddings and see the results
#The second task is to use a another RNN cell such as GRU and perform parameter tuning and report the results.
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.rnn = nn.GRU(embedding_dim, hidden_dim) # <-- changes here
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text, text_length):
#[sentence len, batch size] => [sentence len, batch size, embedding size]
embedded = self.embedding(text)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, text_length)
#[sentence len, batch size, embedding size] =>
# output: [sentence len, batch size, hidden size]
# hidden: [1, batch size, hidden size]
packed_output, hidden = self.rnn(packed) # <-- changes here
return self.fc(hidden.squeeze(0)).view(-1)
In [ ]:
INPUT_DIM = len(TEXT.vocab)
torch.manual_seed(RANDOM_SEED)
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
Training¶
In [ ]:
def compute_binary_accuracy(model, data_loader, device):
model.eval()
correct_pred, num_examples = 0, 0
with torch.no_grad():
for batch_idx, batch_data in enumerate(data_loader):
text, text_lengths = batch_data.review
logits = model(text, text_lengths)
predicted_labels = (torch.sigmoid(logits) > 0.5).long()
num_examples += batch_data.sentiment.size(0)
correct_pred += (predicted_labels.long() == batch_data.sentiment.long()).sum()
return correct_pred.float()/num_examples * 100
In [ ]:
start_time = time.time()
for epoch in range(NUM_EPOCHS):
model.train()
for batch_idx, batch_data in enumerate(train_loader):
text, text_lengths = batch_data.review
### FORWARD AND BACK PROP
logits = model(text, text_lengths)
cost = F.binary_cross_entropy_with_logits(logits, batch_data.sentiment)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print (f’Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | ‘
f’Batch {batch_idx:03d}/{len(train_loader):03d} | ‘
f’Cost: {cost:.4f}’)
with torch.set_grad_enabled(False):
print(f’training accuracy: ‘
f'{compute_binary_accuracy(model, train_loader, DEVICE):.2f}%’
f’\nvalid accuracy: ‘
f'{compute_binary_accuracy(model, valid_loader, DEVICE):.2f}%’)
print(f’Time elapsed: {(time.time() – start_time)/60:.2f} min’)
print(f’Total Training Time: {(time.time() – start_time)/60:.2f} min’)
print(f’Test accuracy: {compute_binary_accuracy(model, test_loader, DEVICE):.2f}%’)
In [ ]:
import spacy
nlp = spacy.load(‘en’)
def predict_sentiment(model, sentence):
# based on:
# https://github.com/bentrevett/pytorch-sentiment-analysis/blob/
# master/2%20-%20Upgraded%20Sentiment%20Analysis.ipynb
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
length = [len(indexed)]
tensor = torch.LongTensor(indexed).to(DEVICE)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor(length)
prediction = torch.sigmoid(model(tensor, length_tensor))
return prediction.item()
In [ ]:
print(‘Probability positive:’)
1-predict_sentiment(model, “This is such an awesome movie, I really love it!”)
In [ ]:
print(‘Probability negative:’)
predict_sentiment(model, “I really hate this movie. It is really bad and sucks!”)