来源:投稿 作者:Fairy
编辑:学姐
PyTorch,作为一款开源的机器学习库,以其灵活性和易用性在深度学习领域迅速崛起。无论你是初学者还是经验丰富的开发者,掌握PyTorch的常用操作都是提升深度学习技能的关键。本文将详细介绍PyTorch的24个常用操作,帮助你更好地理解和应用这一强大的工具。
一、基础张量操作
-
import torch
# 从Python列表创建张量
tensor_from_list = torch.tensor([1, 2, 3, 4])
print(tensor_from_list)
# 创建全零张量
zeros_tensor = torch.zeros(2, 3)
print(zeros_tensor)
# 创建全一张量
ones_tensor = torch.ones(2, 3)
print(ones_tensor)
# 创建随机张量
rand_tensor = torch.rand(2, 3)
print(rand_tensor)
-
# 改变张量的形状
reshaped_tensor = rand_tensor.reshape(3, 2)
print(reshaped_tensor)
# 增加一个新的维度
unsqueeze_tensor = rand_tensor.unsqueeze(0)
print(unsqueeze_tensor.shape)
# 压缩维度
flatten_tensor = rand_tensor.flatten()
print(flatten_tensor)
-
# 沿指定维度拼接张量
tensor1 = torch.tensor([[1, 2], [3, 4]])
tensor2 = torch.tensor([[5, 6], [7, 8]])
concatenated_tensor = torch.cat((tensor1, tensor2), dim=0)
print(concatenated_tensor)
# 在新的维度上堆叠张量
stacked_tensor = torch.stack((tensor1, tensor2), dim=0)
print(stacked_tensor)
-
# 根据索引获取张量中的元素
element = tensor1[0, 1]
print(element)
# 切片操作
sliced_tensor = tensor1[:, 1:]
print(sliced_tensor)
-
# 张量相加
sum_tensor = tensor1 + tensor2
print(sum_tensor)
# 张量相乘
prod_tensor = tensor1 * tensor2
print(prod_tensor)
# 计算张量的和、均值、最大值、最小值
tensor_sum = torch.sum(tensor1)
tensor_mean = torch.mean(tensor1)
tensor_max = torch.max(tensor1)
tensor_min = torch.min(tensor1)
print(tensor_sum, tensor_mean, tensor_max, tensor_min)
二、高级张量操作
-
# 交换张量的维度
transposed_tensor = tensor1.transpose(0, 1)
print(transposed_tensor)
-
# 在张量中插入新的维度
expanded_tensor = tensor1.unsqueeze(1)
print(expanded_tensor.shape)
-
# 按指定维度和索引选择数据
selected_tensor = torch.index_select(tensor1, dim=0, index=torch.tensor([0, 1]))
print(selected_tensor)
-
# 根据布尔掩码选择元素
mask = tensor1 > 2
masked_tensor = torch.masked_select(tensor1, mask)
print(masked_tensor)
-
# 按大小或张量数量分割张量
split_tensors = torch.split(tensor1, split_size_or_sections=2, dim=0)
for tensor in split_tensors:
print(tensor)
-
# 将张量分割成特定数量的块
chunked_tensors = torch.chunk(tensor1, chunks=2, dim=0)
for tensor in chunked_tensors:
print(tensor)
-
# 返回张量中最大的k个元素
topk_tensor, topk_indices = torch.topk(tensor1, k=2, dim=1, largest=True, sorted=True)
print(topk_tensor, topk_indices)
三、神经网络组件
-
import torch.nn as nn
# 创建一个全连接层
input_size = 10
output_size = 5
linear_layer = nn.Linear(input_size, output_size)
print(linear_layer)
-
# 创建一个2D卷积层
conv_layer = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1)
print(conv_layer)
-
# ReLU激活函数
relu_layer = nn.ReLU()
output = relu_layer(torch.tensor([-1.0, 0.0, 1.0, 2.0]))
print(output)
# Sigmoid激活函数
sigmoid_layer = nn.Sigmoid()
output = sigmoid_layer(torch.tensor([-1.0, 0.0, 1.0]))
print(output)
-
# 交叉熵损失函数
criterion = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
loss = criterion(input, target)
print(loss)
-
# Adam优化器
optimizer = torch.optim.Adam(params=linear_layer.parameters(), lr=0.001)
print(optimizer)
四、数据处理与加载
-
from torchvision import transforms
# 改变图像大小
resize_transform = transforms.Resize((128, 128))
# 标准化图像数据
normalize_transform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-
from torchvision.datasets import CIFAR10
# 加载CIFAR-10数据集
dataset = CIFAR10(root='./data', train=True, download=True, transform=transforms.Compose([resize_transform, normalize_transform]))
print(dataset)
-
from torch.utils.data import DataLoader
# 创建数据加载器
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=2)
for images, labels in dataloader:
print(images.shape, labels.shape)
break
五、模型训练与评估
-
# 使用线性层进行前向传播
input_data = torch.randn(1, input_size)
output_data = linear_layer(input_data)
print(output_data)
-
# 计算损失
loss_fn = nn.MSELoss()
target = torch.randn(1, output_size)
loss = loss_fn(output_data, target)
# 反向传播计算梯度
optimizer.zero_grad()
loss.backward()
# 打印梯度
print(linear_layer.weight.grad)
-
# 使用优化器更新模型参数
optimizer.step()
-