基本配置
导入包和版本查询
- 显示 torch的版本import torch import torch.nn as nn import torchvision print(torch.__version__) print(torch.version.cuda) print(torch.backends.cudnn.version()) print(torch.cuda.get_device_name(0))
可复现性
- 固定 torch和numpy的随机数种子np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed_all(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
设置显卡
- 单张显卡 - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- 多张显卡 - import os os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
- 清除显存 - torch.cuda.empty_cache()
张量处理
张量的数据类型
- Pytorch有 9 种张量类型 
张量的基本信息
- 张量的类型,数据类型,形状,维度数量 - tensor = torch.randn(3, 4, 5) print(tensor.type()) print(tensor.dtype) print(tensor.size(), tensor.shape) print(tensor.dim())
- 命名张量,给维度起名字,防止出错 - # 在PyTorch 1.3之前,需要使用注释 # Tensor[N, C, H, W] images = torch.randn(32, 3, 56, 56) images.sum(dim=1) images.select(dim=1, index=0) # PyTorch 1.3之后 NCHW = [‘N’, ‘C’, ‘H’, ‘W’] images = torch.randn(32, 3, 56, 56, names=NCHW) images.sum('C') images.select('C', index=0) # 也可以这么设置 tensor = torch.rand(3,4,1,2,names=('C', 'N', 'H', 'W')) # 使用align_to可以对维度方便地排序 tensor = tensor.align_to('N', 'C', 'H', 'W')
数据类型转换
- 默认类型转换 - # 设置默认类型,pytorch中的FloatTensor远远快于DoubleTensor torch.set_default_tensor_type(torch.FloatTensor) # 类型转换 tensor = tensor.cuda() tensor = tensor.cpu() tensor = tensor.float() tensor = tensor.long()
- torch.Tensor与- np.ndarray转换- ndarray = tensor.cpu().numpy() tensor = torch.from_numpy(ndarray).float() tensor = torch.from_numpy(ndarray.copy()).float() # If ndarray has negative stride.
张量的形状操作
- 张量形变 - # 在将卷积层输入全连接层的情况下通常需要对张量做形变处理, # 相比torch.view,torch.reshape可以自动处理输入张量不连续的情况。 tensor = torch.rand(2,3,4) shape = (6, 4) tensor = torch.reshape(tensor, shape)
- 打乱顺序 - tensor = tensor[torch.randperm(tensor.size(0))] # 打乱第一个维度
- 水平翻转 - # pytorch不支持tensor[::-1]这样的负步长操作,水平翻转可以通过张量索引实现 # 假设张量的维度为[N, D, H, W]. tensor = tensor[:,:,:,torch.arange(tensor.size(3) - 1, -1, -1).long()]
- 张量复制 - Operation - New/Shared memory - Still in computation graph - tensor.clone() - New - Yes - tensor.detach() - Shared - No - tensor.detach.clone()() - New - No 
- 张量拼接 - ''' 注意torch.cat和torch.stack的区别在于torch.cat沿着给定的维度拼接, 而torch.stack会新增一维。例如当参数是3个10x5的张量,torch.cat的结果是30x5的张量, 而torch.stack的结果是3x10x5的张量。 ''' tensor = torch.cat(list_of_tensors, dim=0) tensor = torch.stack(list_of_tensors, dim=0)
- 张量扩展 - # Expand tensor of shape 64*512 to shape 64*512*7*7. tensor = torch.rand(64,512) torch.reshape(tensor, (64, 512, 1, 1)).expand(64, 512, 7, 7)
张量计算
- 单元素张量数值提取 - value = torch.rand(1).item()
- 得到非零元素 - torch.nonzero(tensor) # index of non-zero elements torch.nonzero(tensor==0) # index of zero elements torch.nonzero(tensor).size(0) # number of non-zero elements torch.nonzero(tensor == 0).size(0) # number of zero elements
- 将整数标签转为one-hot编码 - # pytorch的标记默认从0开始 tensor = torch.tensor([0, 2, 1, 3]) N = tensor.size(0) num_classes = 4 one_hot = torch.zeros(N, num_classes).long() one_hot.scatter_(dim=1, index=torch.unsqueeze(tensor, dim=1), src=torch.ones(N, num_classes).long())
- 判断两个张量相等 - torch.allclose(tensor1, tensor2) # float tensor torch.equal(tensor1, tensor2) # int tensor
- 矩阵乘法 - # Matrix multiplcation: (m*n) * (n*p) * -> (m*p). result = torch.mm(tensor1, tensor2) # Batch matrix multiplication: (b*m*n) * (b*n*p) -> (b*m*p) result = torch.bmm(tensor1, tensor2) # Element-wise multiplication. result = tensor1 * tensor2
- 计算两组数据之间的两两欧式距离 - dist = torch.sqrt(torch.sum((X1[:,None,:] - X2) ** 2, dim=2))
 
                        
                        