BatchNormalization

1. 简介

Batch Norm(Batch Normalization)是以进行时学习的 mini-batch 为单位,按 mini-batch 进行正规化(即就是进行使数据分布的均值为 0、方差为 1)。通过将这个处理插入到激活函数的前面(或者后面),可以减小数据分布的偏向。

2. 实现

考虑 mini-batch 的 mm 个输入样本数据 {x1,x2,,xm}\{x_1,x_2,\cdots,x_m\}

Input: Values of xx over a mini-batch: B={x1,,xm}\mathcal{B} = \{x_1,\cdots,x_m\}
    Parameters to be learned: γ,β\gamma,\beta

Output: {yi=BNγ,β(xi)}\{y_i = \mathrm{BN}_{\gamma,\beta}(x_i)\}

μB1mi=1mxiσB21mi=1m(xiμB)2xi^xiμBσB2+εyiγxi^+βBNγ,β(xi)\begin{array}{c} \mu_B \leftarrow \frac{1}{m} \sum_{i=1}^{m} x_i \\ \sigma_B^2 \leftarrow \frac{1}{m} \sum_{i=1}^m (x_i - \mu_B)^2 \\ \hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_B^2+\varepsilon}} \\ y_i \leftarrow \gamma \hat{x_i} + \beta \equiv \mathrm{BN}_{\gamma,\beta}(x_i) \end{array}

其中,ε\varepsilon 是一个微小值(比如 10710^{-7}),是为了防止出现 σB=0 \sigma_B = 0 导致除零的情况;最后一项是对 xi^\hat{x_i} 进行缩放和平移。

  • 使用 Batch Norm 后,神经网络的学习速度更快,并且,对权重初始值变得健壮(「对初始值健壮」表示不那么依赖初始值)。

3. Python 代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# Batch-Norm 层
class BatchNorm:
def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
self.gamma = gamma
self.beta = beta
self.momentum = momentum
self.input_shape = None

# 测试时使用的平均值和方差
self.running_mean = running_mean
self.running_var = running_var

# backward 时使用的中间数据
self.batch_size = None
self.xc = None
self.xn = None
self.std = None
self.dgamma = None
self.dbeta = None

def forward(self, x, train_flg=True):
self.input_shape = x.shape
if x.ndim != 2:
N, C, H, W = x.shape
x = x.reshape(N, -1)
out = self.__forward(x, train_flg)
return out.reshape(*self.input_shape)

def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)

if train_flg:
mu = x.mean(axis = 0)
xc = x - mu
var = np.mean(xc**2, axis = 0)
std = np.sqrt(var + 10e-7)
xn = xc / std

self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mu
self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / np.sqrt(self.running_var + 10e-7)

out = self.gamma * xn + self.beta
return out

def backward(self, dout):
if dout.ndim != 2:
N, C, H, W = dout.shape
dout = dout.reshape(N, -1)
dx = self.__backward(dout)
dx = dx.reshape(*self.input_shape)
return dx

def __backward(self, dout):
dbeta = dout.sum(axis = 0)
dgamma = np.sum(self.xn * dout, axis = 0)
dxn = self.gamma * dout
dxc = dxn / self.std
dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis = 0)
dvar = 0.5 * dstd / self.std
dxc += (2.0 / self.batch_size) * self.xc * dvar
dmu = np.sum(dxc, axis = 0)
dx = dxc + dmu / self.batch_size

self.dgamma = dgamma
self.dbeta = dbeta

return dx

本博客所有文章除特别声明外,均采用 CC BY-SA 4.0 协议 ,转载请注明出处!