import torch from torch import nn from torch.autograd import Variable import torchvision.datasets as dsets import torchvision.transforms as transforms import matplotlib.pyplot as plt
# Mnist 手写数字 train_data = torchvision.datasets.MNIST( root=\\'./mnist/\\', # 保存或者提取位置 train=True, # this is training data transform=torchvision.transforms.ToTensor(), # 转换 PIL.Image or numpy.ndarray 成 # torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间 download=DOWNLOAD_MNIST, # 没下载就下载, 下载了就不用再下了 )
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# training and testing for epoch inrange(EPOCH): for step, (x, y) inenumerate(train_loader): # gives batch data b_x = Variable(x.view(-1, 28, 28)) # reshape x to (batch, time_step, input_size) b_y = Variable(y) # batch y
output = rnn(b_x) # rnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients """ ... Epoch: 0 | train loss: 0.0945 | test accuracy: 0.94 Epoch: 0 | train loss: 0.0984 | test accuracy: 0.94 Epoch: 0 | train loss: 0.0332 | test accuracy: 0.95 Epoch: 0 | train loss: 0.1868 | test accuracy: 0.96 """