学习资料

  1. 动手深度学习
  2. zhihu
  3. 什么是卷积神经网络

基础的例子


import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.tensorboard import SummaryWriter

# 定义数据集
x_train = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
y_train = np.array([[2.0], [4.0], [7.0], [9.0]], dtype=np.float32)

# 将数据集转换为Tensor
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)

# 定义神经网络模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc = nn.Linear(1, 200,bias=True)
        self.relu = nn.ReLU()
        self.fc1 = nn.Linear(200, 1,bias=True)

    def forward(self, x):
        x = self.fc(x)
        x = self.relu(x)
        x = self.fc1(x)
        return x

net = Net()

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)

# 定义TensorBoard的SummaryWriter
writer = SummaryWriter()

# 训练模型
for epoch in range(10000):
    # 前向传播
    y_pred = net(x_train)

    # 计算损失
    loss = criterion(y_pred, y_train)

    # 反向传播
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    # 记录损失值
    writer.add_scalar('Loss/train', loss.item(), epoch)

    if (epoch+1) % 100 == 0:
        print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, 1000, loss.item()))

# 测试模型
x_test = torch.tensor([[7.0]])
y_test = net(x_test)

for name, param in net.named_parameters():
    print('named_parameters: {}: {}'.format(name, param.shape))

for name, param in net.state_dict().items():
    print('state_dict: {}: {}'.format(name, param))
print('预测结果: {}'.format(y_test.item()))

# 关闭SummaryWriter
writer.close()