LLM学习笔记-5

摘要:今天想整理一下Pytorch常用操作,以便以后进行预习(不是)
在这里插入图片描述

1.多层神经网络的实现

这是常用的操作,要会

class NeuralNetwork(torch.nn.Module):
    def __init__(self, num_inputs, num_outputs):
        super().__init__()

        self.layers = torch.nn.Sequential(
                
            # 第一个隐藏层
            torch.nn.Linear(num_inputs, 30),
            torch.nn.ReLU(),

            # 第二个隐藏层
            torch.nn.Linear(30, 20),
            torch.nn.ReLU(),

            # 输出层
            torch.nn.Linear(20, num_outputs),
        )

    def forward(self, x):
        logits = self.layers(x)
        return logits
        
model = NeuralNetwork(50, 3)
print(model)

NeuralNetwork(
(layers): Sequential(
(0): Linear(in_features=50, out_features=30, bias=True)
(1): ReLU()
(2): Linear(in_features=30, out_features=20, bias=True)
(3): ReLU()
(4): Linear(in_features=20, out_features=3, bias=True)
)
)

2. 训练轮次示例

import torch.nn.functional as F


torch.manual_seed(123)
model = NeuralNetwork(num_inputs=2, num_outputs=2)
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)

num_epochs = 3

for epoch in range(num_epochs):
    
    model.train()
    for batch_idx, (features, labels) in enumerate(train_loader):

        logits = model(features)
        
        loss = F.cross_entropy(logits, labels) # 损失函数
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
        ### 日志
        print(f"Epoch: {epoch+1:03d}/{num_epochs:03d}"
              f" | Batch {batch_idx:03d}/{len(train_loader):03d}"
              f" | Train/Val Loss: {loss:.2f}")

    model.eval()
    # 可选的模型评估指标

Epoch: 001/003 | Batch 000/002 | Train/Val Loss: 0.75
Epoch: 001/003 | Batch 001/002 | Train/Val Loss: 0.65
Epoch: 002/003 | Batch 000/002 | Train/Val Loss: 0.44
Epoch: 002/003 | Batch 001/002 | Train/Val Loss: 0.13
Epoch: 003/003 | Batch 000/002 | Train/Val Loss: 0.03
Epoch: 003/003 | Batch 001/002 | Train/Val Loss: 0.00

3. 保存并加载模型

就一句话

torch.save(model.state_dict(), "model.pth")

4. 使用GPU加速训练

我们常常说的CUDA就是在GPU上训练

import torch
# 显示PyTorch是否支持GPU
print(torch.cuda.is_available())

如果显示True,则代表可以用GPU,否则则要用CPU

# 根据设备可用情况选择设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

5. 使用上面所教,进行一次训练

创建了一个简单的神经网络模型来对二分类问题进行训练,并且使用了 PyTorch 提供的 Dataset 和 DataLoader 类来加载数据集并进行批处理。此外,你还定义了一个函数来计算模型的准确率。

import torch
X_train = torch.tensor([
    [-1.2, 3.1],
    [-0.9, 2.9],
    [-0.5, 2.6],
    [2.3, -1.1],
    [2.7, -1.5]
])
y_train = torch.tensor([0, 0, 0, 1, 1])
X_test = torch.tensor([
    [-0.8, 2.8],
    [2.6, -1.6],
])
y_test = torch.tensor([0, 1])



from torch.utils.data import Dataset
class ToyDataset(Dataset):
    def __init__(self, X, y):
        self.features = X
        self.labels = y
    def __getitem__(self, index):
        one_x = self.features[index]
        one_y = self.labels[index]
        return one_x, one_y
    def __len__(self):
        return self.labels.shape[0]
train_ds = ToyDataset(X_train, y_train)
test_ds = ToyDataset(X_test, y_test)



from torch.utils.data import DataLoader
torch.manual_seed(123)
train_loader = DataLoader(
    dataset=train_ds,
    batch_size=2,
    shuffle=True,
    num_workers=1,
    drop_last=True
)
test_loader = DataLoader(
    dataset=test_ds,
    batch_size=2,
    shuffle=False,
    num_workers=1
)



class NeuralNetwork(torch.nn.Module):
    def __init__(self, num_inputs, num_outputs):
        super().__init__()
        self.layers = torch.nn.Sequential(
            # 第一个隐藏层
            torch.nn.Linear(num_inputs, 30),
            torch.nn.ReLU(),
            # 第二个隐藏层
            torch.nn.Linear(30, 20),
            torch.nn.ReLU(),
            # 输出层
            torch.nn.Linear(20, num_outputs),
        )
    def forward(self, x):
        logits = self.layers(x)
        return logits



# 使用accuracy(准确率)作为指标
def compute_accuracy(model, dataloader, device):
    model = model.eval()
    correct = 0.0
    total_examples = 0
    for idx, (features, labels) in enumerate(dataloader):
        # 将数据移动到指定的设备上
        features, labels = features.to(device), labels.to(device) # New
        with torch.no_grad():
            logits = model(features)
        # 获取预测结果并计算准确数量
        predictions = torch.argmax(logits, dim=1)
        compare = labels == predictions
        correct += torch.sum(compare)
        total_examples += len(compare)
    # 计算并返回准确率
    return (correct / total_examples).item()




import torch.nn.functional as F
# 设置随机数种子,以确保可复现性
torch.manual_seed(123)
# 创建神经网络模型
model = NeuralNetwork(num_inputs=2, num_outputs=2)
# 根据设备可用情况选择设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 将模型移动到所选设备上
model = model.to(device)
# 定义优化器,使用随机梯度下降 (SGD)
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)
# 定义训练循环的 epoch 数量
num_epochs = 3
for epoch in range(num_epochs):
    model.train()
    for batch_idx, (features, labels) in enumerate(train_loader):
        features, labels = features.to(device), labels.to(device) 
        logits = model(features)
        loss = F.cross_entropy(logits, labels) # 损失函数
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        ### 训练日志
        print(f"Epoch: {epoch+1:03d}/{num_epochs:03d}"
              f" | Batch {batch_idx:03d}/{len(train_loader):03d}"
              f" | Train/Val Loss: {loss:.2f}")
    model.eval()

print('accuracy',str(compute_accuracy(model, train_loader, device=device)))

Epoch: 001/003 | Batch 000/002 | Train/Val Loss: 0.75
Epoch: 001/003 | Batch 001/002 | Train/Val Loss: 0.65
Epoch: 002/003 | Batch 000/002 | Train/Val Loss: 0.44
Epoch: 002/003 | Batch 001/002 | Train/Val Loss: 0.13
Epoch: 003/003 | Batch 000/002 | Train/Val Loss: 0.03
Epoch: 003/003 | Batch 001/002 | Train/Val Loss: 0.00
accuracy:1.0

相关推荐

  1. LLM设计原理学习笔记

    2024-04-27 06:36:02       32 阅读
  2. LLVM学习笔记(64)

    2024-04-27 06:36:02       25 阅读
  3. LLVM TableGen 系统学习笔记

    2024-04-27 06:36:02       20 阅读
  4. css学习笔记5

    2024-04-27 06:36:02       39 阅读

最近更新

  1. TCP协议是安全的吗?

    2024-04-27 06:36:02       16 阅读
  2. 阿里云服务器执行yum,一直下载docker-ce-stable失败

    2024-04-27 06:36:02       16 阅读
  3. 【Python教程】压缩PDF文件大小

    2024-04-27 06:36:02       15 阅读
  4. 通过文章id递归查询所有评论(xml)

    2024-04-27 06:36:02       18 阅读

热门阅读

  1. linux安装opencv

    2024-04-27 06:36:02       14 阅读
  2. Kafka

    Kafka

    2024-04-27 06:36:02      13 阅读
  3. js的基础知识

    2024-04-27 06:36:02       13 阅读
  4. 【动态规划】Leetcode 416. 分割等和子集【中等】

    2024-04-27 06:36:02       13 阅读
  5. Jsoup爬虫

    2024-04-27 06:36:02       13 阅读
  6. 快速使用之Log4j2日志框架

    2024-04-27 06:36:02       12 阅读