Skip to content

Instantly share code, notes, and snippets.

@aigoncharov
Created March 9, 2025 17:13
Show Gist options
  • Save aigoncharov/e12931624f3538d6f4270370c807376b to your computer and use it in GitHub Desktop.
Save aigoncharov/e12931624f3538d6f4270370c807376b to your computer and use it in GitHub Desktop.
Unit tests for HW2 P2T1 for ML course at Skoltech
max_pooling = MaxPooling(kernel_size=2, stride=2)
t_in = torch.tensor([[[[6, 2, 1, 3], [4, 8, 5, 7], [3, 1, 9, 2], [6, 5, 4, 0]]]])
t_expected = torch.tensor([[[[8, 7], [6, 9]]]])
assert max_pooling.forward(t_in).equal(t_expected)
b_in = torch.tensor([[[[1, 2], [3, 4]]]])
b_expected = torch.tensor([[[[0, 0, 0, 0], [0, 1, 0, 2], [0, 0, 4, 0], [3, 0, 0, 0]]]])
assert max_pooling.backward(b_in).equal(b_expected)
max_pooling = MaxPooling(kernel_size=2, stride=1)
t_in = torch.tensor([[[[6, 2, 1, 3], [4, 8, 5, 7], [3, 1, 9, 2], [6, 5, 4, 0]]]])
t_expected = torch.tensor([[[[8, 8, 7], [8, 9, 9], [6, 9, 9]]]])
assert max_pooling.forward(t_in).equal(t_expected)
b_in = torch.tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]])
b_expected = torch.tensor([[[[0, 0, 0, 0], [0, 7, 0, 3], [0, 0, 28, 0], [7, 0, 0, 0]]]])
assert max_pooling.backward(b_in).equal(b_expected)
flatten_layer = FlattenLayer()
t_in = torch.tensor([[[[1, 2], [3, 4]]], [[[1, 2], [3, 4]]]])
t_expected = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]])
assert flatten_layer.forward(t_in).equal(t_expected)
assert flatten_layer.backward(t_expected).equal(t_in)
softmax = Softmax()
t_in = torch.tensor([[1.0, 2.0], [1.0, 10.0]], requires_grad=True)
t_out = torch.nn.functional.softmax(t_in, dim=1)
assert softmax.forward(t_in).allclose(t_out)
b_in = torch.tensor([[0.5, 0.2], [0.1, 0.6]])
t_out.backward(b_in)
b_out = t_in.grad
# Subtract use numerically stable version of softmax: e^{x-x_max}/\sum{e^{x-x_max}}
assert softmax.backward(b_in).allclose(b_out)
batch_size = 2
in_features = 3
out_features = 4
dense_layer = DenseLayer(in_features, out_features)
torch.manual_seed(42)
dense_layer.weights.data = torch.randn(in_features, out_features) * 0.1
dense_layer.bias.data = torch.zeros(out_features)
x = torch.randn(batch_size, in_features, requires_grad=True)
output = dense_layer.forward(x)
torch_linear = nn.Linear(in_features, out_features)
torch_linear.weight.data = dense_layer.weights.data.t() # PyTorch uses transposed weights internally
torch_linear.bias.data = dense_layer.bias.data
torch_output = torch_linear(x)
assert output.allclose(torch_output)
grad_output = torch.randn(batch_size, out_features)
torch_output.backward(grad_output)
assert x.grad.allclose(dense_layer.backward(grad_output))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment