PyTorch Tutorial
PyTorch is a deep learning framework, one of the two main frameworks alongside TensforFlow.
import torch
import torch.nn as nn
Tensors
Tensors are PyTorch's most basic builing block. Each tensor is a multi-dimensional matrix; for example, a \(256 \times 256\) square image might be represented by a \(3 \times 256 \times 256\) tensor, where the first dimension represents color. Here's how to create a tensor:
# Initializing a tensor
data = torch.tensor([
[0, 1],
[2, 3],
[4, 5]
])
print(data)
'''
tensor([[0, 1],
[2, 3]],
[4, 5])
'''
Each tensor has a data type: the major data types you'll need to worry about are floats (torch.float32) and integers (torch.int). You can specify the data type explicitly when you create the tensor:
# Initializing a tensor with an explicit data type
# Notice the dots after the numbers, which specify that they're floats
data = torch.tensor([
[0, 1],
[2, 3],
[4, 5]
], dtype=torch.float32)
print(data)
'''
tensor([[0., 1.],
[2., 3.]],
[4., 5.])
'''
# Initializing a tensor without an explicit data type
# But because there are floats in the list, PyTorch implicitly treats dtype as float32
data = torch.tensor([
[0.11111111, 1],
[2, 3],
[4, 5]
])
print(data)
'''
tensor([[0.1111, 1.0000],
[2.0000, 3.0000],
[4.0000, 5.0000]])
'''
Utility functions also exist to create tensors with given shapes and contents:
# a tensor of all zeros
zeros = torch.zeros(2, 5)
print(zeros)
'''
tensor([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
'''
# a tensor of all ones
ones = torch.ones(3, 4)
print(ones)
'''
tensor([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]])
'''
# range from [1, 10)
rr = torch.arange(1, 10)
print(rr) # tensor([1, 2, 3, 4, 5, 6, 7, 8, 9])
rr + 2 # tensor([ 3, 4, 5, 6, 7, 8, 9, 10, 11])
rr * 2 # tensor([ 2, 4, 6, 8, 10, 12, 14, 16, 18])
########################################################
a = torch.tensor([[1, 2], [2, 3], [4, 5]])
b = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]])
print("A is", a)
print("B is", b)
print("The product is", a.matmul(b))
print("The other product is", a @ b) # +, -, *, @
'''
A is tensor([[1, 2],
[2, 3],
[4, 5]])
B is tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
The product is tensor([[11, 14, 17, 20],
[17, 22, 27, 32],
[29, 38, 47, 56]])
The other product is tensor([[11, 14, 17, 20],
[17, 22, 27, 32],
[29, 38, 47, 56]])
'''
##################################################
v = torch.tensor([1, 2, 3])
v.shape # torch.Size([3])
torch.tensor([[1, 2, 3], [4, 5, 6]]) @ v # tensor([14, 32])
The shape of a matrix (which can be accessed by .shape
) is defined as the dimensions of the matrix. Here's some examples:
matr_2d = torch.tensor([[1, 2, 3], [4, 5, 6]])
print(matr_2d.shape)
print(matr_2d)
'''
torch.Size([2, 3])
tensor([[1, 2, 3],
[4, 5, 6]])
'''
matr_3d = torch.tensor([[[1, 2, 3, 4], [-2, 5, 6, 9]], [[5, 6, 7, 2], [8, 9, 10, 4]], [[-3, 2, 2, 1], [4, 6, 5, 9]]])
print(matr_3d)
print(matr_3d.shape)
'''
tensor([[[ 1, 2, 3, 4],
[-2, 5, 6, 9]],
[[ 5, 6, 7, 2],
[ 8, 9, 10, 4]],
[[-3, 2, 2, 1],
[ 4, 6, 5, 9]]])
torch.Size([3, 2, 4])
'''
Reshaping tensors can be used to make batch operations easier (more on that later), but be careful that the data is reshaped in the order you expect:
rr = torch.arange(1, 16)
print("The shape is currently", rr.shape)
print("The contents are currently", rr)
print()
rr = rr.view(5, 3)
print("After reshaping, the shape is currently", rr.shape)
print("The contents are currently", rr)
'''
The shape is currently torch.Size([15])
The contents are currently tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
After reshaping, the shape is currently torch.Size([5, 3])
The contents are currently tensor([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12],
[13, 14, 15]])
'''
Finally, you can also inter-convert tensors with NumPy arrays:
import numpy as np
# numpy.ndarray --> torch.Tensor:
arr = np.array([[1, 0, 5]])
data = torch.tensor(arr)
print("This is a torch.tensor", data) # This is a torch.tensor tensor([[1, 0, 5]])
# torch.Tensor --> numpy.ndarray:
new_arr = data.numpy()
print("This is a np.ndarray", new_arr) # This is a np.ndarray [[1 0 5]]
One of the reasons why we use tensors is vectorized operations: operations that be conducted in parallel over a particular dimension of a tensor.
data = torch.arange(1, 36, dtype=torch.float32).reshape(5, 7)
print("Data is:", data)
'''
Data is: tensor([[ 1., 2., 3., 4., 5., 6., 7.],
[ 8., 9., 10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19., 20., 21.],
[22., 23., 24., 25., 26., 27., 28.],
[29., 30., 31., 32., 33., 34., 35.]])
'''
# We can perform operations like *sum* over each colums...
print(data.sum(dim=0)) # tensor([ 75., 80., 85., 90., 95., 100., 105.])
# or over rows.
print(data.sum(dim=1)) # tensor([ 28., 77., 126., 175., 224.])
# Other operations are available:
print("Taking the stdev over rows:")
print(data.std(dim=1))
'''
Taking the stdev over rows:
tensor([2.1602, 2.1602, 2.1602, 2.1602, 2.1602])
'''
data.sum() # tensor(630.)
Indexing
You can access arbitrary elements of a tensor using the []
operator.
matr = torch.arange(1, 16).view(5, 3)
print(matr)
'''
tensor([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12],
[13, 14, 15]])
'''
matr[0] # tensor([1, 2, 3])
mart[0, :] # tensor([1, 2, 3])
matr[:, 0] # tensor([ 1, 4, 7, 10, 13])
matr[0:3]
'''
tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
'''
matr[:, 0:2]
'''
tensor([[ 1, 2],
[ 4, 5],
[ 7, 8],
[10, 11],
[13, 14]])
'''
matr[0:3, 0:2]
'''
tensor([[1, 2],
[4, 5],
[7, 8]])
'''
matr[0][2] # tensor(3)
matr[0:3, 2] # tensor([3, 6, 9])
matr[0:3][2] # tensor([7, 8, 9])
matr[[0, 2, 4]]
'''
tensor([[ 1, 2, 3],
[ 7, 8, 9],
[13, 14, 15]])
'''
We can also index into multiple dimensions with :
.
# Initialize an example tensor
x = torch.Tensor([
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]
])
x.shape # torch.Size([3, 2, 2])
# Access the 0th element, which is the first row
x[0] # Equivalent to x[0, :]
'''
tensor([[1., 2.],
[3., 4.]])
'''
x[:, 0]
'''
tensor([[ 1., 2.],
[ 5., 6.],
[ 9., 10.]])
'''
# Get the top left element of each element in our tensor
x[:, 0, 0] # tensor([1., 5., 9.])
# Let's access the 0th and 1st elements, each twice
i = torch.tensor([0, 0, 1, 1])
x[i]
'''
tensor([[[1., 2.],
[3., 4.]],
[[1., 2.],
[3., 4.]],
[[5., 6.],
[7., 8.]],
[[5., 6.],
[7., 8.]]])
'''
# Let's access the 0th elements of the 1st and 2nd elements
i = torch.tensor([1, 2])
j = torch.tensor([0])
x[i, j]
'''
tensor([[ 5., 6.],
[ 9., 10.]])
'''
We can get a Python
scalar value from a tensor with item()
.
x[0, 0, 0] # tensor(1.)
x[0, 0, 0].item() # 1.0
Autograd
Pytorch is well-known for its automatic differentiation feature. We can call the backward()
method to ask PyTorch
to calculate the gradients, which are then stored in the grad
attribute.
# Create an example tensor
# requires_grad parameter tells PyTorch to store gradients
x = torch.tensor([2.], requires_grad=True)
# Print the gradient if it is calculated
# Currently None since x is a scalar
pp.pprint(x.grad) # None
# Calculating the gradient of y with respect to x
y = x * x * 3 # 3x^2
y.backward() # d(y)/d(x) = d(3x^2)/d(x) = 6x = 12
pp.pprint(x.grad) # tensor([12.])
Let's run backprop from a different tensor again to see what happens.
z = x * x * 3 # 3x^2
z.backward()
pp.pprint(x.grad) # tensor([24.])
We can see that the x.grad
is updated to be the sum of the gradients calculated so far. When we run backprop in a neural network, we sum up all the gradients for a particular neuron before making an update. This is exactly what is happening here! This is also the reason why we need to run zero_grad()
in every training iteration (more on this later). Otherwise our gradients would keep building up from one training iteration to the other, which would cause our updates to be wrong.
Neural Network Module
So far we have looked into the tensors, their properties and basic operations on tensors. These are especially useful to get familiar with if we are building the layers of our network from scratch. We will utilize these in Assignment 3, but moving forward, we will use predefined blocks in the torch.nn
module of PyTorch
. We will then put together these blocks to create complex networks. Let's start by importing this module with an alias so that we don't have to type torch
every time we use it.
import torch.nn as nn
Linear Layer
We can use nn.Linear(H_in, H_out)
to create a a linear layer. This will take a matrix of (N, *, H_in)
dimensions and output a matrix of (N, *, H_out)
. The *
denotes that there could be arbitrary number of dimensions in between. The linear layer performs the operation Ax+b
, where A
and b
are initialized randomly. If we don't want the linear layer to learn the bias parameters, we can initialize our layer with bias=False
.
# Create the inputs
input = torch.ones(2,3,4)
# (N * H_in) -> (N * H_out)
# Make a linear layers transforming N,*,H_in dimensinal inputs to N,*,H_out
# dimensional outputs
linear = nn.Linear(4, 2)
linear_output = linear(input)
'''
tensor([[[0.1659, 0.8844],
[0.1659, 0.8844],
[0.1659, 0.8844]],
[[0.1659, 0.8844],
[0.1659, 0.8844],
[0.1659, 0.8844]]], grad_fn=<ViewBackward0>)
'''
list(linear.parameters()) # Ax + b
'''
[Parameter containing:
tensor([[-0.1401, -0.0146, 0.3152, -0.3624],
[ 0.4872, 0.0768, -0.0075, 0.1984]], requires_grad=True),
Parameter containing:
tensor([0.3678, 0.1296], requires_grad=True)]
'''
Other Module Layers
There are several other preconfigured layers in the nn
module. Some commonly used examples are nn.Conv2d
, nn.ConvTranspose2d
, nn.BatchNorm1d
, nn.BatchNorm2d
, nn.Upsample
and nn.MaxPool2d
among many others. We will learn more about these as we progress in the course. For now, the only important thing to remember is that we can treat each of these layers as plug and play components: we will be providing the required dimensions and PyTorch
will take care of setting them up.
Activation Function Layer
We can also use the nn
module to apply activations functions to our tensors. Activation functions are used to add non-linearity to our network. Some examples of activations functions are nn.ReLU()
, nn.Sigmoid()
and nn.LeakyReLU()
. Activation functions operate on each element seperately, so the shape of the tensors we get as an output are the same as the ones we pass in.
sigmoid = nn.Sigmoid()
output = sigmoid(linear_output)
'''
tensor([[[0.5414, 0.7077],
[0.5414, 0.7077],
[0.5414, 0.7077]],
[[0.5414, 0.7077],
[0.5414, 0.7077],
[0.5414, 0.7077]]], grad_fn=<SigmoidBackward0>)
'''
Putting the Layers Together
So far we have seen that we can create layers and pass the output of one as the input of the next. Instead of creating intermediate tensors and passing them around, we can use nn.Sequentual
, which does exactly that.
block = nn.Sequential(
nn.Linear(4, 2),
nn.Sigmoid()
)
input = torch.ones(2,3,4)
output = block(input)
'''
tensor([[[0.5967, 0.2129],
[0.5967, 0.2129],
[0.5967, 0.2129]],
[[0.5967, 0.2129],
[0.5967, 0.2129],
[0.5967, 0.2129]]], grad_fn=<SigmoidBackward0>)
'''
Custom Modules
Instead of using the predefined modules, we can also build our own by extending the nn.Module
class. For example, we can build a the nn.Linear
(which also extends nn.Module
) on our own using the tensor introduced earlier! We can also build new, more complex modules, such as a custom neural network. You will be practicing these in the later assignment.
To create a custom module, the first thing we have to do is to extend the nn.Module
. We can then initialize our parameters in the __init__
function, starting with a call to the __init__
function of the super class. All the class attributes we define which are nn
module objects are treated as parameters, which can be learned during the training. Tensors are not parameters, but they can be turned into parameters if they are wrapped in nn.Parameter
class.
All classes extending nn.Module
are also expected to implement a forward(x)
function, where x
is a tensor. This is the function that is called when a parameter is passed to our module, such as in model(x)
.
class MultilayerPerceptron(nn.Module):
def __init__(self, input_size, hidden_size):
# Call to the __init__ function of the super class
super(MultilayerPerceptron, self).__init__()
# Bookkeeping: Saving the initialization parameters
self.input_size = input_size
self.hidden_size = hidden_size
# Defining of our model
# There isn't anything specific about the naming of `self.model`. It could
# be something arbitrary.
self.model = nn.Sequential(
nn.Linear(self.input_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.input_size),
nn.Sigmoid()
)
def forward(self, x):
output = self.model(x)
return output
Here is an alternative way to define the same class. You can see that we can replace nn.Sequential
by defining the individual layers in the __init__
method and connecting the in the forward
method.
class MultilayerPerceptron(nn.Module):
def __init__(self, input_size, hidden_size):
# Call to the __init__ function of the super class
super(MultilayerPerceptron, self).__init__()
# Bookkeeping: Saving the initialization parameters
self.input_size = input_size
self.hidden_size = hidden_size
# Defining of our layers
self.linear = nn.Linear(self.input_size, self.hidden_size)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(self.hidden_size, self.input_size)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
linear = self.linear(x)
relu = self.relu(linear)
linear2 = self.linear2(relu)
output = self.sigmoid(linear2)
return output
Now that we have defined our class, we can instantiate it and see what it does.
# Make a sample input
input = torch.randn(2, 5)
# Create our model
model = MultilayerPerceptron(5, 3)
# Pass our input through our model
model(input)
'''
tensor([[0.4361, 0.4140, 0.5664, 0.5579, 0.5824],
[0.3058, 0.4544, 0.5923, 0.4600, 0.7283]], grad_fn=<SigmoidBackward0>)
'''
We can inspect the parameters of our model with named_parameters()
and parameters()
methods.
list(model.named_parameters())
'''
[('model.0.weight',
Parameter containing:
tensor([[ 0.3175, -0.3833, 0.0397, 0.2366, 0.2105],
[ 0.1959, 0.3521, 0.4309, 0.0044, 0.1123],
[-0.2488, 0.1220, -0.3528, 0.0298, -0.3953]], requires_grad=True)),
('model.0.bias',
Parameter containing:
tensor([ 0.1331, -0.3920, 0.2241], requires_grad=True)),
('model.2.weight',
Parameter containing:
tensor([[-0.0850, 0.3045, -0.3813],
[-0.1728, 0.1982, 0.1539],
[-0.4842, 0.4425, 0.1310],
[-0.5038, 0.0357, -0.3473],
[-0.5223, 0.4909, 0.5637]], requires_grad=True)),
('model.2.bias',
Parameter containing:
tensor([-0.3302, -0.3806, 0.2055, 0.2857, 0.2624], requires_grad=True))]
'''
Optimization
We have showed how gradients are calculated with the backward()
function. Having the gradients isn't enought for our models to learn. We also need to know how to update the parameters of our models. This is where the optomozers comes in. torch.optim
module contains several optimizers that we can use. Some popular examples are optim.SGD
and optim.Adam
. When initializing optimizers, we pass our model parameters, which can be accessed with model.parameters()
, telling the optimizers which values it will be optimizing. Optimizers also has a learning rate (lr
) parameter, which determines how big of an update will be made in every step. Different optimizers have different hyperparameters as well.
import torch.optim as optim
After we have our optimization function, we can define a loss
that we want to optimize for. We can either define the loss ourselves, or use one of the predefined loss function in PyTorch
, such as nn.BCELoss()
. Let's put everything together now! We will start by creating some dummy data.
# Create the y data
y = torch.ones(10, 5)
# Add some noise to our goal y to generate our x
# We want out model to predict our original data, albeit the noise
x = y + torch.randn_like(y)
'''
tensor([[ 1.6883, 2.4363, 1.0077, 1.5601, 1.0519],
[ 2.0385, 1.5589, 1.5018, 0.7759, 1.2133],
[ 0.2063, 2.3570, -0.4019, 0.9125, 1.3907],
[ 1.1276, 1.4252, 0.7107, 0.9918, 1.7002],
[ 0.2084, 0.5709, -0.5708, 1.0059, 2.7542],
[ 2.0215, 1.8618, 0.9618, 0.3904, 0.8664],
[ 0.5091, 1.8797, -1.1590, 0.0373, 1.7211],
[ 1.5021, -0.5343, 0.6154, 0.5425, 1.9724],
[ 2.0551, 3.1374, 2.6641, 1.5988, -0.1447],
[ 1.3315, 0.6051, 0.5282, 1.8110, 1.0074]])
'''
Now, we can define our model, optimizer and the loss function.
# Instantiate the model
model = MultilayerPerceptron(5, 3)
# Define the optimizer
adam = optim.Adam(model.parameters(), lr=1e-1)
# Define loss using a predefined loss function
loss_function = nn.BCELoss()
# Calculate how our model is doing now
y_pred = model(x)
loss_function(y_pred, y).item() # 0.7979347109794617
Let's see if we can have our model achieve a smaller loss. Now that we have everything we need, we can setup our training loop.
# Set the number of epoch, which determines the number of training iterations
n_epoch = 10
for epoch in range(n_epoch):
# Set the gradients to 0
adam.zero_grad()
# Get the model predictions
y_pred = model(x)
# Get the loss
loss = loss_function(y_pred, y)
# Print stats
print(f"Epoch {epoch}: traing loss: {loss}")
# Compute the gradients
loss.backward()
# Take a step to optimize the weights
adam.step()
'''
Epoch 0: traing loss: 0.7979347109794617
Epoch 1: traing loss: 0.7053328156471252
Epoch 2: traing loss: 0.5688669085502625
Epoch 3: traing loss: 0.39817267656326294
Epoch 4: traing loss: 0.24695901572704315
Epoch 5: traing loss: 0.13362935185432434
Epoch 6: traing loss: 0.06120799481868744
Epoch 7: traing loss: 0.024198632687330246
Epoch 8: traing loss: 0.008802199736237526
Epoch 9: traing loss: 0.003131276462227106
'''
list(model.parameters())
'''
[Parameter containing:
tensor([[ 0.8050, 0.5290, 0.6128, 0.5318, 0.5301],
[ 0.6621, 1.0525, 0.0742, 0.7920, 0.9096],
[-0.5899, -0.8121, -0.2686, -0.1143, -0.6714]], requires_grad=True),
Parameter containing:
tensor([ 0.7624, 0.6068, -0.1993], requires_grad=True),
Parameter containing:
tensor([[ 1.0197, 1.3931, 0.1966],
[ 0.4320, 1.2615, -0.0090],
[ 0.4649, 0.8703, 0.4081],
[ 1.1602, 1.2160, 0.3957],
[ 0.7272, 0.5396, 0.4657]], requires_grad=True),
Parameter containing:
tensor([0.7430, 0.9329, 0.5573, 0.3067, 0.5419], requires_grad=True)]
'''
You can see that our loss is decreasing. Let's check the predictions of our model now and see if they are close to our original y
, which was all 1s
.
# See how our model performs on the training data
y_pred = model(x)
y_pred
'''
tensor([[1.0000, 1.0000, 0.9998, 1.0000, 0.9997],
[1.0000, 1.0000, 0.9995, 1.0000, 0.9993],
[1.0000, 0.9999, 0.9985, 1.0000, 0.9964],
[1.0000, 0.9999, 0.9992, 1.0000, 0.9985],
[1.0000, 0.9997, 0.9973, 0.9999, 0.9941],
[1.0000, 0.9999, 0.9992, 1.0000, 0.9987],
[0.9999, 0.9995, 0.9960, 0.9998, 0.9907],
[0.9999, 0.9986, 0.9934, 0.9997, 0.9918],
[1.0000, 1.0000, 0.9999, 1.0000, 0.9999],
[1.0000, 0.9998, 0.9982, 1.0000, 0.9972]], grad_fn=<SigmoidBackward0>)
'''
# Create test data and check how our model performs on it
x2 = y + torch.randn_like(y)
y_pred = model(x2)
y_pred
'''
tensor([[1.0000, 0.9998, 0.9985, 1.0000, 0.9976],
[1.0000, 0.9998, 0.9985, 1.0000, 0.9977],
[1.0000, 1.0000, 0.9999, 1.0000, 0.9998],
[1.0000, 0.9998, 0.9983, 1.0000, 0.9975],
[1.0000, 0.9999, 0.9986, 1.0000, 0.9967],
[1.0000, 0.9999, 0.9992, 1.0000, 0.9981],
[1.0000, 0.9999, 0.9988, 1.0000, 0.9975],
[1.0000, 0.9997, 0.9982, 1.0000, 0.9979],
[1.0000, 0.9999, 0.9991, 1.0000, 0.9988],
[1.0000, 1.0000, 0.9997, 1.0000, 0.9995]], grad_fn=<SigmoidBackward0>)
'''
Great! Looks like our model almost perfectly learned to filter out the noise from the x
that we passed in!
Demo: Word Window Classification
Until this part of the notebook, we have learned the fundamentals of PyTorch and built a basic network solving a toy task. Now we will attempt to solve an example NLP task. Here are the things we will learn:
- Data: Creating a Dataset of Batched Tensors
- Modeling
- Training
- Prediction
In this section, our goal will be to train a model that will find the words in a sentence corresponding to a LOCATION
, which will be always of span 1
(meaning that San Fransisco
won't be recognized as a LOCATION
). Our task is called Word Window Classification
for a reason. Instead of letting our model to only take a look at one word in each forward pass, we would like it to be able to consider the context of the word in question. That is, for each word, we want our model to be aware of the surrounding words. Let's dive in!
Data
The very first task of any machine learning project is to set up our training set. Usually, there will be a training corpus we will be utilizing. In NLP tasks, the corpus would generally be a .txt
or .csv
file where each row corresponds to a sentence or a tabular datapoint. In our toy task, we will assume that we have already read our data and the corresponding labels into a Python
list.
# Our raw data, which consists of sentences
corpus = [
"We always come to Paris",
"The professor is from Australia",
"I live in Stanford",
"He comes from Taiwan",
"The capital of Turkey is Ankara"
]
Preprocessing
To make it easier for our models to learn, we usually apply a few preprocessing steps to our data. This is especially important when dealing with text data. Here are some examples of text preprocessing:
- Tokenization: Tokenizing the sentences into words.
- Lowercasing: Changing all the letters to be lowercase.
- Noise removal: Removing special characters (such as punctuations).
- Stop words removal: Removing commonly used words.
Which preprocessing steps are necessary is determined by the task at hand. For example, although it is useful to remove special characters in some tasks, for others they may be important (for example, if we are dealing with multiple languages). For our task, we will lowercase our words and tokenize.
# The preprocessing function we will use to generate our training examples
# Our function is a simple one, we lowercase the letters
# and then tokenize the words.
def preprocess_sentence(sentence):
return sentence.lower().split()
# Create our training set
train_sentences = [preprocess_sentence(sent) for sent in corpus]
'''
[['we', 'always', 'come', 'to', 'paris'],
['the', 'professor', 'is', 'from', 'australia'],
['i', 'live', 'in', 'stanford'],
['he', 'comes', 'from', 'taiwan'],
['the', 'capital', 'of', 'turkey', 'is', 'ankara']]
'''
For each training example we have, we should also have a corresponding label. Recall that the goal of our model was to determine which words correspond to a LOCATION
. That is, we want our model to output 0
for all the words that are not LOCATION
s and 1
for the ones that are LOCATION
s.
# Set of locations that appear in our corpus
locations = set(["australia", "ankara", "paris", "stanford", "taiwan", "turkey"])
# Our train labels
train_labels = [[1 if word in locations else 0 for word in sent] for sent in train_sentences]
'''
[[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1, 0, 1]]
'''
Converting Words to Embeddings
Let's look at our training data a little more closely. Each datapoint we have is a sequence of words. On the other hand, we know that machine learning models work with numbers in vectors. How are we going to turn words into numbers? You may be thinking embeddings and you are right!
Imagine that we have an embedding lookup table E
, where each row corresponds to an embedding. That is, each word in our vocabulary would have a corresponding embedding row i
in this table. Whenever we want to find an embedding for a word, we will follow these steps:
- Find the corresponding index
i
of the word in the embedding table:word->index
. - Index into the embedding table and get the embedding:
index->embedding
.
Let's look at the first step. We should assign all the words in our vocabulary to a corresponding index. We can do it as follows:
- Find all the unique words in our corpus.
- Assign an index to each.
# Find all the unique words in our corpus
vocabulary = set(w for s in train_sentences for w in s)
'''
{'always',
'ankara',
'australia',
'capital',
'come',
'comes',
'from',
'he',
'i',
'in',
'is',
'live',
'of',
'paris',
'professor',
'stanford',
'taiwan',
'the',
'to',
'turkey',
'we'}
'''
vocabulary
now contains all the words in our corpus. On the other hand, during the test time, we can see words that are not contained in our vocabulary. If we can figure out a way to represent the unknown words, our model can still reason about whether they are a LOCATION
or not, since we are also looking at the neighboring words for each prediction.
We introduce a special token, <unk>
, to tackle the words that are out of vocabulary. We could pick another string for our unknown token if we wanted. The only requirement here is that our token should be unique: we should only be using this token for unknown words. We will also add this special token to our vocabulary.
# Add the unknown token to our vocabulary
vocabulary.add("<unk>")
Earlier we mentioned that our task was called Word Window Classification
because our model is looking at the surroundings words in addition to the given word when it needs to make a prediction.
For example, let's take the sentence "We always come to Paris". The corresponding training label for this sentence is 0, 0, 0, 0, 1
since only Paris, the last word, is a LOCATION
. In one pass (meaning a call to forward()
), our model will try to generate the correct label for one word. Let's say our model is trying to generate the correct label 1
for Paris
. If we only allow our model to see Paris
, but nothing else, we will miss out on the important information that the word to
often times appears with LOCATION
s.
Word windows allow our model to consider the surrounding +N
or -N
words of each word when making a prediction. In our earlier example for Paris
, if we have a window size of 1, that means our model will look at the words that come immediately before and after Paris
, which are to
, and, well, nothing. Now, this raises another issue. Paris
is at the end of our sentence, so there isn't another word following it. Remember that we define the input dimensions of our PyTorch
models when we are initializing them. If we set the window size to be 1
, it means that our model will be accepting 3
words in every pass. We cannot have our model expect 2
words from time to time.
The solution is to introduce a special token, such as <pad>
, that will be added to our sentences to make sure that every word has a valid window around them. Similar to <unk>
token, we could pick another string for our pad token if we wanted, as long as we make sure it is used for a unique purpose.
# Add the <pad> token to our vocabulary
vocabulary.add("<pad>")
# Function that pads the given sentence
# We are introducing this function here as an example
# We will be utilizing it later in the tutorial
def pad_window(sentence, window_size, pad_token="<pad>"):
window = [pad_token] * window_size
return window + sentence + window
# Show padding example
window_size = 2
pad_window(train_sentences[0], window_size=window_size)
Earlier we mentioned that our task was called Word Window Classification
because our model is looking at the surroundings words in addition to the given word when it needs to make a prediction.
For example, let's take the sentence "We always come to Paris". The corresponding training label for this sentence is 0, 0, 0, 0, 1
since only Paris, the last word, is a LOCATION
. In one pass (meaning a call to forward()
), our model will try to generate the correct label for one word. Let's say our model is trying to generate the correct label 1
for Paris
. If we only allow our model to see Paris
, but nothing else, we will miss out on the important information that the word to
often times appears with LOCATION
s.
Word windows allow our model to consider the surrounding +N
or -N
words of each word when making a prediction. In our earlier example for Paris
, if we have a window size of 1, that means our model will look at the words that come immediately before and after Paris
, which are to
, and, well, nothing. Now, this raises another issue. Paris
is at the end of our sentence, so there isn't another word following it. Remember that we define the input dimensions of our PyTorch
models when we are initializing them. If we set the window size to be 1
, it means that our model will be accepting 3
words in every pass. We cannot have our model expect 2
words from time to time.
The solution is to introduce a special token, such as <pad>
, that will be added to our sentences to make sure that every word has a valid window around them. Similar to <unk>
token, we could pick another string for our pad token if we wanted, as long as we make sure it is used for a unique purpose.
# Add the <pad> token to our vocabulary
vocabulary.add("<pad>")
# Function that pads the given sentence
# We are introducing this function here as an example
# We will be utilizing it later in the tutorial
def pad_window(sentence, window_size, pad_token="<pad>"):
window = [pad_token] * window_size
return window + sentence + window
# Show padding example
window_size = 2
pad_window(train_sentences[0], window_size=window_size)
# ['<pad>', '<pad>', 'we', 'always', 'come', 'to', 'paris', '<pad>', '<pad>']
Now that our vocabularly is ready, let's assign an index to each of our words.
# We are just converting our vocabularly to a list to be able to index into it
# Sorting is not necessary, we sort to show an ordered word_to_ind dictionary
# That being said, we will see that having the index for the padding token
# be 0 is convenient as some PyTorch functions use it as a default value
# such as nn.utils.rnn.pad_sequence, which we will cover in a bit
ix_to_word = sorted(list(vocabulary))
# Creating a dictionary to find the index of a given word
word_to_ix = {word: ind for ind, word in enumerate(ix_to_word)}
'''
{'<pad>': 0,
'<unk>': 1,
'always': 2,
'ankara': 3,
'australia': 4,
'capital': 5,
'come': 6,
'comes': 7,
'from': 8,
'he': 9,
'i': 10,
'in': 11,
'is': 12,
'live': 13,
'of': 14,
'paris': 15,
'professor': 16,
'stanford': 17,
'taiwan': 18,
'the': 19,
'to': 20,
'turkey': 21,
'we': 22}
'''
Great! We are ready to convert our training sentences into a sequence of indices corresponding to each token.
# Given a sentence of tokens, return the corresponding indices
def convert_token_to_indices(sentence, word_to_ix):
indices = []
for token in sentence:
# Check if the token is in our vocabularly. If it is, get it's index.
# If not, get the index for the unknown token.
if token in word_to_ix:
index = word_to_ix[token]
else:
index = word_to_ix["<unk>"]
indices.append(index)
return indices
# More compact version of the same function
def _convert_token_to_indices(sentence, word_to_ix):
return [word_to_ix.get(token, word_to_ix["<unk>"]) for token in sentence]
# Show an example
example_sentence = ["we", "always", "come", "to", "kuwait"]
example_indices = convert_token_to_indices(example_sentence, word_to_ix)
restored_example = [ix_to_word[ind] for ind in example_indices]
print(f"Original sentence is: {example_sentence}")
print(f"Going from words to indices: {example_indices}")
print(f"Going from indices to words: {restored_example}")
'''
Original sentence is: ['we', 'always', 'come', 'to', 'kuwait']
Going from words to indices: [22, 2, 6, 20, 1]
Going from indices to words: ['we', 'always', 'come', 'to', '<unk>']
'''
In the example above, kuwait
shows up as <unk>
, because it is not included in our vocabulary. Let's convert our train_sentences
to example_padded_indices
.
# Converting our sentences to indices
example_padded_indices = [convert_token_to_indices(s, word_to_ix) for s in train_sentences]
'''
[[22, 2, 6, 20, 15],
[19, 16, 12, 8, 4],
[10, 13, 11, 17],
[9, 7, 8, 18],
[19, 5, 14, 21, 12, 3]]
'''
Now that we have an index for each word in our vocabularly, we can create an embedding table with nn.Embedding
class in PyTorch
. It is called as follows nn.Embedding(num_words, embedding_dimension)
where num_words
is the number of words in our vocabulary and the embedding_dimension
is the dimension of the embeddings we want to have. There is nothing fancy about nn.Embedding
: it is just a wrapper class around a trainabe NxE
dimensional tensor, where N
is the number of words in our vocabulary and E
is the number of embedding dimensions. This table is initially random, but it will change over time. As we train our network, the gradients will be backpropagated all the way to the embedding layer, and hence our word embeddings would be updated. We will initiliaze the embedding layer we will use for our model in our model, but we are showing an example here.
# Creating an embedding table for our words
embedding_dim = 5
embeds = nn.Embedding(len(vocabulary), embedding_dim)
# Printing the parameters in our embedding table
list(embeds.parameters())
[Parameter containing:
tensor([[ 0.1718, 0.3165, -0.4836, -0.6429, 1.3020],
[ 0.0453, 0.8202, 0.6442, -1.8969, 0.3662],
[ 0.8688, 0.4435, -1.7014, 1.5562, 0.0156],
[ 0.1211, -1.5682, 1.0828, 0.9704, -0.0135],
[-0.3049, -0.3976, 0.7114, 0.8750, -0.7537],
[ 0.0366, 1.5825, 1.8194, -2.5429, -0.1560],
[-0.2402, 0.9947, 0.0371, -0.3243, 1.1177],
[ 0.9853, 0.4371, 0.1822, 0.5456, -0.0931],
[ 0.4316, -0.3532, 1.9703, 1.4578, 0.2749],
[-0.2652, 0.0584, 2.4948, 1.1073, 0.9632],
[ 1.2871, -0.3748, 0.1025, -0.6221, 0.3312],
[-0.1708, 0.3517, 0.3932, 2.4081, -1.3188],
[-1.5728, -1.3237, 0.2663, 0.7961, 1.1243],
[-0.7672, 0.8067, -0.3083, -1.3566, -1.0609],
[ 0.1682, -0.0464, -0.9953, 0.9492, -0.0099],
[-0.0984, -0.7362, -1.0464, -1.7572, 1.2555],
[ 1.7908, -0.4643, -0.0625, -0.6261, -0.2631],
[-1.5049, -1.6065, 0.5007, 1.0853, 2.0203],
[ 1.0128, 0.2351, -0.6392, 1.1595, 1.3439],
[-0.2066, -1.6737, 0.5103, -1.0767, 0.7366],
[ 1.4603, 1.2867, 0.0709, -0.6476, -0.7879],
[ 0.3139, -0.7187, -0.9319, 0.1240, -0.4636],
[ 0.1071, 0.1757, -1.1584, -0.0905, 0.0190]], requires_grad=True)]
To get the word embedding for a word in our vocabulary, all we need to do is to create a lookup tensor. The lookup tensor is just a tensor containing the index we want to look up nn.Embedding
class expects an index tensor that is of type Long Tensor, so we should create our tensor accordingly.
# Get the embedding for the word Paris
index = word_to_ix["paris"]
index_tensor = torch.tensor(index, dtype=torch.long)
paris_embed = embeds(index_tensor)
'''
tensor([-0.0984, -0.7362, -1.0464, -1.7572, 1.2555],
grad_fn=<EmbeddingBackward0>)
'''
# We can also get multiple embeddings at once
index_paris = word_to_ix["paris"]
index_ankara = word_to_ix["ankara"]
indices = [index_paris, index_ankara]
indices_tensor = torch.tensor(indices, dtype=torch.long)
embeddings = embeds(indices_tensor)
'''
tensor([[-0.0984, -0.7362, -1.0464, -1.7572, 1.2555],
[ 0.1211, -1.5682, 1.0828, 0.9704, -0.0135]],
grad_fn=<EmbeddingBackward0>)
'''
Usually, we define the embedding layer as part of our model, which you will see in the later sections of our notebook.
Batching Sentences
We have learned about batches in class. Waiting our whole training corpus to be processed before making an update is constly. On the other hand, updating the parameters after every training example causes the loss to be less stable between updates. To combat these issues, we instead update our parameters after training on a batch of data. This allows us to get a better estimate of the gradient of the global loss. In this section, we will learn how to structure our data into batches using the torch.util.data.DataLoader
class.
We will be calling the DataLoader
class as follows: DataLoader(data, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
. The batch_size
parameter determines the number of examples per batch. In every epoch, we will be iterating over all the batches using the DataLoader
. The order of batches is deterministic by default, but we can ask DataLoader
to shuffle the batches by setting the shuffle
parameter to True
. This way we ensure that we don't encounter a bad batch multiple times.
If provided, DataLoader
passes the batches it prepares to the collate_fn
. We can write a custom function to pass to the collate_fn
parameter in order to print stats about our batch or perform extra processing. In our case, we will use the collate_fn
to:
- Window pad our train sentences.
- Convert the words in the training examples to indices.
- Pad the training examples so that all the sentences and labels have the same length. Similarly, we also need to pad the labels. This creates an issue because when calculating the loss, we need to know the actual number of words in a given example. We will also keep track of this number in the function we pass to the
collate_fn
parameter.
Because our version of the collate_fn
function will need to access to our word_to_ix
dictionary (so that it can turn words into indices), we will make use of the partial
function in Python
, which passes the parameters we give to the function we pass it.
from torch.utils.data import DataLoader
from functools import partial
def custom_collate_fn(batch, window_size, word_to_ix):
# Break our batch into the training examples (x) and labels (y)
# We are turning our x and y into tensors because nn.utils.rnn.pad_sequence
# method expects tensors. This is also useful since our model will be
# expecting tensor inputs.
x, y = zip(*batch)
# Now we need to window pad our training examples. We have already defined a
# function to handle window padding. We are including it here again so that
# everything is in one place.
def pad_window(sentence, window_size, pad_token="<pad>"):
window = [pad_token] * window_size
return window + sentence + window
# Pad the train examples.
x = [pad_window(s, window_size=window_size) for s in x]
# Now we need to turn words in our training examples to indices. We are
# copying the function defined earlier for the same reason as above.
def convert_tokens_to_indices(sentence, word_to_ix):
return [word_to_ix.get(token, word_to_ix["<unk>"]) for token in sentence]
# Convert the train examples into indices.
x = [convert_tokens_to_indices(s, word_to_ix) for s in x]
# We will now pad the examples so that the lengths of all the example in
# one batch are the same, making it possible to do matrix operations.
# We set the batch_first parameter to True so that the returned matrix has
# the batch as the first dimension.
pad_token_ix = word_to_ix["<pad>"]
# pad_sequence function expects the input to be a tensor, so we turn x into one
x = [torch.LongTensor(x_i) for x_i in x]
x_padded = nn.utils.rnn.pad_sequence(x, batch_first=True, padding_value=pad_token_ix)
# We will also pad the labels. Before doing so, we will record the number
# of labels so that we know how many words existed in each example.
lengths = [len(label) for label in y]
lenghts = torch.LongTensor(lengths)
y = [torch.LongTensor(y_i) for y_i in y]
y_padded = nn.utils.rnn.pad_sequence(y, batch_first=True, padding_value=0)
# We are now ready to return our variables. The order we return our variables
# here will match the order we read them in our training loop.
return x_padded, y_padded, lenghts
This function seems long, but it really doesn't have to be. Check out the alternative version below where we remove the extra function declarations and comments.
def _custom_collate_fn(batch, window_size, word_to_ix):
# Prepare the datapoints
x, y = zip(*batch)
x = [pad_window(s, window_size=window_size) for s in x]
x = [convert_tokens_to_indices(s, word_to_ix) for s in x]
# Pad x so that all the examples in the batch have the same size
pad_token_ix = word_to_ix["<pad>"]
x = [torch.LongTensor(x_i) for x_i in x]
x_padded = nn.utils.rnn.pad_sequence(x, batch_first=True, padding_value=pad_token_ix)
# Pad y and record the length
lengths = [len(label) for label in y]
lenghts = torch.LongTensor(lengths)
y = [torch.LongTensor(y_i) for y_i in y]
y_padded = nn.utils.rnn.pad_sequence(y, batch_first=True, padding_value=0)
return x_padded, y_padded, lenghts
Now, we can see the DataLoader
in action.
# Parameters to be passed to the DataLoader
data = list(zip(train_sentences, train_labels))
batch_size = 2
shuffle = True
window_size = 2
collate_fn = partial(custom_collate_fn, window_size=window_size, word_to_ix=word_to_ix)
# Instantiate the DataLoader
loader = DataLoader(data, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn)
# Go through one loop
counter = 0
for batched_x, batched_y, batched_lengths in loader:
print(f"Iteration {counter}")
print("Batched Input:")
print(batched_x)
print("Batched Labels:")
print(batched_y)
print("Batched Lengths:")
print(batched_lengths)
print("")
counter += 1
'''
Iteration 0
Batched Input:
tensor([[ 0, 0, 19, 5, 14, 21, 12, 3, 0, 0],
[ 0, 0, 9, 7, 8, 18, 0, 0, 0, 0]])
Batched Labels:
tensor([[0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0]])
Batched Lengths:
tensor([6, 4])
Iteration 1
Batched Input:
tensor([[ 0, 0, 19, 16, 12, 8, 4, 0, 0],
[ 0, 0, 22, 2, 6, 20, 15, 0, 0]])
Batched Labels:
tensor([[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]])
Batched Lengths:
tensor([5, 5])
Iteration 2
Batched Input:
tensor([[ 0, 0, 10, 13, 11, 17, 0, 0]])
Batched Labels:
tensor([[0, 0, 0, 1]])
Batched Lengths:
tensor([4])
'''
The batched input tensors you see above will be passed into our model. On the other hand, we started off saying that our model will be a window classifier. The way our input tensors are currently formatted, we have all the words in a sentence in one datapoint. When we pass this input to our model, it needs to create the windows for each word, make a prediction as to whether the center word is a LOCATION
or not for each window, put the predictions together and return.
We could avoid this problem if we formatted our data by breaking it into windows beforehand. In this example, we will instead how our model take care of the formatting.
Given that our window_size
is N
we want our model to make a prediction on every 2N+1
tokens. That is, if we have an input with 9
tokens, and a window_size
of 2
, we want our model to return 5
predictions. This makes sense because before we padded it with 2
tokens on each side, our input also had 5
tokens in it!
We can create these windows by using for loops, but there is a faster PyTorch
alternative, which is the unfold(dimension, size, step)
method. We can create the windows we need using this method as follows:
# Print the original tensor
print(f"Original Tensor: ")
print(batched_x)
print("")
# Create the 2 * 2 + 1 chunks
chunk = batched_x.unfold(1, window_size*2 + 1, 1)
print(f"Windows: ")
print(chunk)
'''
Original Tensor:
tensor([[ 0, 0, 10, 13, 11, 17, 0, 0]])
Windows:
tensor([[[ 0, 0, 10, 13, 11],
[ 0, 10, 13, 11, 17],
[10, 13, 11, 17, 0],
[13, 11, 17, 0, 0]]])
'''
Model
Now that we have prepared our data, we are ready to build our model. We have learned how to write custom nn.Module
classes. We will do the same here and put everything we have learned so far together.
class WordWindowClassifier(nn.Module):
def __init__(self, hyperparameters, vocab_size, pad_ix=0):
super(WordWindowClassifier, self).__init__()
""" Instance variables """
self.window_size = hyperparameters["window_size"]
self.embed_dim = hyperparameters["embed_dim"]
self.hidden_dim = hyperparameters["hidden_dim"]
self.freeze_embeddings = hyperparameters["freeze_embeddings"]
""" Embedding Layer
Takes in a tensor containing embedding indices, and returns the
corresponding embeddings. The output is of dim
(number_of_indices * embedding_dim).
If freeze_embeddings is True, set the embedding layer parameters to be
non-trainable. This is useful if we only want the parameters other than the
embeddings parameters to change.
"""
self.embeds = nn.Embedding(vocab_size, self.embed_dim, padding_idx=pad_ix)
if self.freeze_embeddings:
self.embed_layer.weight.requires_grad = False
""" Hidden Layer
"""
full_window_size = 2 * window_size + 1
self.hidden_layer = nn.Sequential(
nn.Linear(full_window_size * self.embed_dim, self.hidden_dim),
nn.Tanh()
)
""" Output Layer
"""
self.output_layer = nn.Linear(self.hidden_dim, 1)
""" Probabilities
"""
self.probabilities = nn.Sigmoid()
def forward(self, inputs):
"""
Let B:= batch_size
L:= window-padded sentence length
D:= self.embed_dim
S:= self.window_size
H:= self.hidden_dim
inputs: a (B, L) tensor of token indices
"""
B, L = inputs.size()
"""
Reshaping.
Takes in a (B, L) LongTensor
Outputs a (B, L~, S) LongTensor
"""
# Fist, get our word windows for each word in our input.
token_windows = inputs.unfold(1, 2 * self.window_size + 1, 1)
_, adjusted_length, _ = token_windows.size()
# Good idea to do internal tensor-size sanity checks, at the least in comments!
assert token_windows.size() == (B, adjusted_length, 2 * self.window_size + 1)
"""
Embedding.
Takes in a torch.LongTensor of size (B, L~, S)
Outputs a (B, L~, S, D) FloatTensor.
"""
embedded_windows = self.embeds(token_windows)
"""
Reshaping.
Takes in a (B, L~, S, D) FloatTensor.
Resizes it into a (B, L~, S*D) FloatTensor.
-1 argument "infers" what the last dimension should be based on leftover axes.
"""
embedded_windows = embedded_windows.view(B, adjusted_length, -1)
"""
Layer 1.
Takes in a (B, L~, S*D) FloatTensor.
Resizes it into a (B, L~, H) FloatTensor
"""
layer_1 = self.hidden_layer(embedded_windows)
"""
Layer 2
Takes in a (B, L~, H) FloatTensor.
Resizes it into a (B, L~, 1) FloatTensor.
"""
output = self.output_layer(layer_1)
"""
Softmax.
Takes in a (B, L~, 1) FloatTensor of unnormalized class scores.
Outputs a (B, L~, 1) FloatTensor of (log-)normalized class scores.
"""
output = self.probabilities(output)
output = output.view(B, -1)
return output
Training
We are now ready to put everything together. Let's start with preparing our data and intializing our model. We can then intialize our optimizer and define our loss function. This time, instead of using one of the predefined loss function as we did before, we will define our own loss function.
# Prepare the data
data = list(zip(train_sentences, train_labels))
batch_size = 2
shuffle = True
window_size = 2
collate_fn = partial(custom_collate_fn, window_size=window_size, word_to_ix=word_to_ix)
# Instantiate a DataLoader
loader = DataLoader(data, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn)
# Initialize a model
# It is useful to put all the model hyperparameters in a dictionary
model_hyperparameters = {
"batch_size": 4,
"window_size": 2,
"embed_dim": 25,
"hidden_dim": 25,
"freeze_embeddings": False,
}
vocab_size = len(word_to_ix)
model = WordWindowClassifier(model_hyperparameters, vocab_size)
# Define an optimizer
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Define a loss function, which computes to binary cross entropy loss
def loss_function(batch_outputs, batch_labels, batch_lengths):
# Calculate the loss for the whole batch
bceloss = nn.BCELoss()
loss = bceloss(batch_outputs, batch_labels.float())
# Rescale the loss. Remember that we have used lengths to store the
# number of words in each training example
loss = loss / batch_lengths.sum().float()
return loss
Unlike our earlier example, this time instead of passing all of our training data to the model at once in each epoch, we will be utilizing batches. Hence, in each training epoch iteration, we also iterate over the batches.
# Function that will be called in every epoch
def train_epoch(loss_function, optimizer, model, loader):
# Keep track of the total loss for the batch
total_loss = 0
for batch_inputs, batch_labels, batch_lengths in loader:
# Clear the gradients
optimizer.zero_grad()
# Run a forward pass
outputs = model.forward(batch_inputs)
# Compute the batch loss
loss = loss_function(outputs, batch_labels, batch_lengths)
# Calculate the gradients
loss.backward()
# Update the parameteres
optimizer.step()
total_loss += loss.item()
return total_loss
# Function containing our main training loop
def train(loss_function, optimizer, model, loader, num_epochs=10000):
# Iterate through each epoch and call our train_epoch function
for epoch in range(num_epochs):
epoch_loss = train_epoch(loss_function, optimizer, model, loader)
if epoch % 100 == 0: print(epoch_loss)
Let's start training!
num_epochs = 1000
train(loss_function, optimizer, model, loader, num_epochs=num_epochs)
'''
0.27049100399017334
0.19846535474061966
0.16969897970557213
0.14401804842054844
0.09773657470941544
0.08393518626689911
0.07200734317302704
0.0557590126991272
0.0521390987560153
0.04269423242658377
'''
Prediction
Let's see how well our model is at making predictions. We can start by creating our test data.
# Create test sentences
test_corpus = ["She comes from Paris"]
test_sentences = [s.lower().split() for s in test_corpus]
test_labels = [[0, 0, 0, 1]]
# Create a test loader
test_data = list(zip(test_sentences, test_labels))
batch_size = 1
shuffle = False
window_size = 2
collate_fn = partial(custom_collate_fn, window_size=2, word_to_ix=word_to_ix)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=1,
shuffle=False,
collate_fn=collate_fn)
Let's loop over our test examples to see how well we are doing.
for test_instance, labels, _ in test_loader:
outputs = model.forward(test_instance)
print(labels)
print(outputs)
'''
tensor([[0, 0, 0, 1]])
tensor([[0.3131, 0.0581, 0.0696, 0.7416]], grad_fn=<ViewBackward0>)
'''
本文来自博客园,作者:hzyuan,转载请注明原文链接:https://www.cnblogs.com/hzyuan/p/18081635
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· DeepSeek “源神”启动!「GitHub 热点速览」
· 我与微信审核的“相爱相杀”看个人小程序副业
· 微软正式发布.NET 10 Preview 1:开启下一代开发框架新篇章
· 如何使用 Uni-app 实现视频聊天(源码,支持安卓、iOS)
· C# 集成 DeepSeek 模型实现 AI 私有化(本地部署与 API 调用教程)
2022-03-18 【Vue3】组件间通信