PyTorch 常用操作记录

发布 : 2020-01-25 分类 : 深度学习 浏览 :

PyTorch 输出模型参数总量

1
2
3
4
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}

CReLU

官方文档:

1
2
3
4
5
An implementation of CReLU - https://arxiv.org/abs/1603.05201

>>> m = nn.ReLU()
>>> input = torch.randn(2).unsqueeze(0)
>>> output = torch.cat((m(input),m(-input)))

非官方实现:

1
2
3
4
5
class CReLU(nn.Module):
def __init__(self):
super(CReLU, self).__init__()
def forward(self, x):
return torch.cat((F.relu(x), F.relu(-x)), 1)

cross_entropy2d

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
def cross_entropy2d(input, target, weight=None, size_average=True):
# input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# log_p: (n*h*w, c)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
log_p = log_p.view(-1, c)
# target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, reduction='sum')
if size_average:
loss /= mask.data.sum()
return loss

bilinear_kernel

1
2
3
4
5
6
7
8
9
10
11
12
13
def bilinear_kernel(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype='float32')
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.tensor(weight)

FlattenLayer

1
2
3
4
5
6
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()

def forward(self, X):
return X.view(X.shape[0], -1)

GlobalAvgPool2d

1
2
3
4
5
6
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()

def forward(self, x):
return F.avg_pool2d(x, kernel_size=x.size()[2:])

自定义指定层参数初始化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
    net.add_module("LinearTranspose", nn.Conv2d(512, num_classes, kernel_size=1))
net.add_module("ConvTranspose2d",
nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, padding=16, stride=32))
# 为最后两层替换参数初始化
net[-1].weight = nn.Parameter(bilinear_kernel(num_classes, num_classes, 64), True)
net[-2].weight = nn.init.xavier_uniform_(net[-2].weight)

# 双线性差值kernel
def bilinear_kernel(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype='float32')
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.tensor(weight)
本文作者 : HeoLis
原文链接 : https://ishero.net/PyTorch%20%E5%B8%B8%E7%94%A8%E6%93%8D%E4%BD%9C%E8%AE%B0%E5%BD%95.html
版权声明 : 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明出处!

学习、记录、分享、获得

微信扫一扫, 向我投食

微信扫一扫, 向我投食