parent
3a12b95ce7
commit
826e6fe89a
@ -0,0 +1,61 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
from torchvision import datasets, transforms
|
||||
from torch.autograd import Variable
|
||||
|
||||
class LeNet5(nn.Module):
|
||||
def __init__(self):
|
||||
super(LeNet5,self).__init__()
|
||||
self.layer1 = nn.Sequential(
|
||||
nn.Conv2d(3,16,5,1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2,2,0,1),
|
||||
nn.Conv2d(16,32,5,1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2,2,0,1)
|
||||
)
|
||||
self.fc = nn.Sequential(
|
||||
nn.Linear(32*5*5,120),
|
||||
nn.Linear(120,84),
|
||||
nn.Linear(84, 10)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.layer1(x)
|
||||
x = x.view(-1, 800)
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
||||
|
||||
lr = 0.01
|
||||
momentum = 0.5
|
||||
|
||||
log_interval = 10
|
||||
epochs = 10
|
||||
batch_size = 64
|
||||
test_batch_size = 1000
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
model = LeNet5()
|
||||
optimizer = torch.optim.SGD(model.parameters(),lr=lr,momentum=momentum)
|
||||
|
||||
train_loader = torch.utils.data.DataLoader(
|
||||
datasets.MNIST('../data', train=True, download=True,
|
||||
transform=transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,))
|
||||
])),
|
||||
batch_size=batch_size, shuffle=True)
|
||||
|
||||
def train(epoch): # 定义每个epoch的训练细节
|
||||
model.train() # 设置为trainning模式
|
||||
for batch_idx, (data, target) in enumerate(train_loader):
|
||||
data = data.to(device)
|
||||
target = target.to(device)
|
||||
data, target = Variable(data), Variable(target)
|
||||
optimizer.zero_grad() # 优化器梯度初始化为零
|
||||
output = model(data) # 把数据输入网络并得到输出,即进行前向传播
|
||||
loss = F.cross_entropy(output, target) # 交叉熵损失函数
|
||||
loss.backward() # 反向传播梯度
|
||||
optimizer.step() # 结束一次前传+反传之后,更新参数
|
Loading…
Reference in new issue