学习资料
- 动手深度学习
- zhihu
- 什么是卷积神经网络
基础的例子
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.tensorboard import SummaryWriter
x_train = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
y_train = np.array([[2.0], [4.0], [7.0], [9.0]], dtype=np.float32)
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc = nn.Linear(1, 200,bias=True)
self.relu = nn.ReLU()
self.fc1 = nn.Linear(200, 1,bias=True)
def forward(self, x):
x = self.fc(x)
x = self.relu(x)
x = self.fc1(x)
return x
net = Net()
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)
writer = SummaryWriter()
for epoch in range(10000):
y_pred = net(x_train)
loss = criterion(y_pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('Loss/train', loss.item(), epoch)
if (epoch+1) % 100 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, 1000, loss.item()))
x_test = torch.tensor([[7.0]])
y_test = net(x_test)
for name, param in net.named_parameters():
print('named_parameters: {}: {}'.format(name, param.shape))
for name, param in net.state_dict().items():
print('state_dict: {}: {}'.format(name, param))
print('预测结果: {}'.format(y_test.item()))
writer.close()