宽屏公司网站源码php,云和网站建设,环境设计专业作品集,百度用户服务中心投诉电话文章目录计算下Z#xff08;矩阵#xff09;1.1 一般参数形式1.2 简化形式Z1.3 矩阵形式1.3.2 Z2.维特比算法3.前向算法4.后向算法5.使用前向后向的概率计算6.期望计算7.参数估计#xff08;学习#xff09;7.1 梯度上升参考文献CRF 是无向图模型code 它是一个判别式模型建…
文章目录计算下Z矩阵1.1 一般参数形式1.2 简化形式Z1.3 矩阵形式1.3.2 Z2.维特比算法3.前向算法4.后向算法5.使用前向后向的概率计算6.期望计算7.参数估计学习7.1 梯度上升参考文献CRF 是无向图模型code 它是一个判别式模型建模了每个状态和整个观测序列的依赖
https://www.unclewang.info/learn/machine-learning/756/ https://blog.csdn.net/u012421852/article/details/80287567
计算下Z矩阵
import numpy as npy01#start
y41#stop
从start到stop的所有路径的规范化因子Z其实就是上面所有路径的非规范化概率之和
class CCRF(object):条件随机场的矩阵表示实现def __init__(self,M):self.MM#条件随机场的矩阵形式的存储体self.ZNone#规范化因子self.MP[]#矩阵乘积self.work()returndef work(self):print(work......)self.MPnp.full(shape(np.shape(self.M[0])),fill_value1.0)
# print(self.MP)for i in range(np.shape(self.M)[0]):#四个矩阵就循环四次print(\nML\n,self.MP)print(M%d\n%i,self.M[i])self.MPnp.dot(self.MP,self.M[i])#矩阵乘法print(dot\n,self.MP)def ZValue(self):return self.MP[0,0]def CCRF_manual():M1 np.array([[0.5, 0.5],[0, 0]])#a01 a02-AM2 np.array([[0.3, 0.7],[0.7, 0.3]])#b11,b12,b21,b22-BM3 np.array([[0.5, 0.5],[0.6, 0.4]])M4 np.array([[1, 0],[1, 0]])M[]M.append(M1)M.append(M2)M.append(M3)M.append(M4)Mnp.array(M)print(CRF 矩阵\n,M)crfCCRF(M)retcrf.ZValue()print(从start到stop的规范因子Z:,ret)if __name____main__:CCRF_manual()
CRF 矩阵[[[0.5 0.5][0. 0. ]][[0.3 0.7][0.7 0.3]][[0.5 0.5][0.6 0.4]][[1. 0. ][1. 0. ]]]
work......ML[[1. 1.][1. 1.]]
M0[[0.5 0.5][0. 0. ]]
dot[[0.5 0.5][0.5 0.5]]ML[[0.5 0.5][0.5 0.5]]
M1[[0.3 0.7][0.7 0.3]]
dot[[0.5 0.5][0.5 0.5]]ML[[0.5 0.5][0.5 0.5]]
M2[[0.5 0.5][0.6 0.4]]
dot[[0.55 0.45][0.55 0.45]]ML[[0.55 0.45][0.55 0.45]]
M3[[1. 0.][1. 0.]]
dot[[1. 0.][1. 0.]]
从start到stop的规范因子Z: 1.01.1 一般参数形式 import torchimport torch.nn as nn sequence_len 3;y_size 2;k5l4#转移#每个t有k组每个y有2种状态y1-y2,y2-y3,序列长3k,i1,2,y_i-y_i1)ttorch.tensor( [[[[0,1],[0,0]],[[0,1],[0,0]]],[[[1,0],[0,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[0,0],[1,0]]],[[[0,0],[1,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[0,0],[0,1]]]],dtypefloat);lambtorch.tensor([1,0.5,1,1,0.2])# 发射# 序列长3每个y和x出现的情况(l,t(y),状态storch.tensor( [[[1,0],[0,0],[0,0]],[[0,1],[0,1],[0,0]],[[0,0],[1,0],[1,0]],[[0,0],[0,0],[0,1]]],dtypefloat)mu torch.tensor([1, 0.5, 0.8, 0.5])#比上面多了个开始y0和结束y4def P_y_x_condition(y):# 参数形式sumt0sums0for i in range(k):for j in range(len(y)-1):sumtlamb[i]*t[i,j,y[j],y[j1]]# print(i,j,lamb[i]*t[i,j,y[j],y[j1]])for i in range(l):for j in range(len(y)):sumsmu[i]*s[i,j,y[j]]print(sumssumt)return torch.exp(sumssumt)y[0,1,1]print(p(y|x)p(y11,y22,y33|x),P_y_x_condition(y))tensor(3.2000, dtypetorch.float64)
p(y|x)p(y11,y22,y33|x) tensor(24.5325, dtypetorch.float64)1.2 简化形式 这里也引入了起点下面代码中引入了起点和终点只要起点就行
ftorch.tensor([ [[[0,0],[0,0]],[[0,1],[0,0]],[[0,1],[0,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[1,0],[0,0]],[[0,0],[0,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[0,0],[0,0]],[[0,0],[1,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[0,0],[1,0]],[[0,0],[0,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[0,0],[0,0]],[[0,0],[0,1]],[[0,0],[0,0]]],[[[1,0],[1,0]],[[0,0],[0,0]],[[0,0],[0,0]],[[0,0],[0,0]]],[[[0,1],[0,1]],[[0,1],[0,1]],[[0,0],[0,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[1,0],[1,0]],[[1,0],[1,0]],[[0,0],[0,0]]],[[[0,0],[0,0]],[[0,0],[0,0]],[[0,1],[0,1]],[[0,0],[0,0]]]],dtypefloat);
wtorch.tensor([1,0.5,1,1,0.2,1, 0.5, 0.8, 0.5])
def P_y_x_condition_with_f( y):sum0for i in range(kl):for j in range(len(y)-1):sumw[i]*f[i,j,y[j],y[j1]]print(sum)return torch.exp(sum)
p_y_x_conP_y_x_condition_with_f([0,0,1,1,0])
print(p(y|x)p(y11,y22,y33|x),p_y_x_con)tensor(3.2000, dtypetorch.float64)
p(y|x)p(y11,y22,y33|x) tensor(24.5325, dtypetorch.float64)Z 1.3 矩阵形式 a01表示从starty01到y11的概率 b21表示从y12到y21的概率
wtorch.tensor([1,0.5,1,1,0.2,1, 0.5, 0.8, 0.5])
Mf;
# print(M[0])
for i in range(kl):M[i]w[i]*f[i]
print(torch.sum(M,axis0))
Mtorch.exp(torch.sum(M,axis0))
print(M(i,y_i-1,y_i):\n,M)
# 因为y00,yn10
# 所以可以令M[0,1,0],M[0,1,1],M[3,0,1],M[3,1,1]0
# M[0,1,0]M[0,1,1]M[3,0,1]M[3,1,1]0
# print(M(i,y_i-1,y_i):\n,M)#与上图对应上了
tensor([[[1.0000, 0.5000],[1.0000, 0.5000]],[[1.3000, 1.5000],[1.8000, 0.5000]],[[0.8000, 1.5000],[1.8000, 0.7000]],[[0.0000, 0.0000],[0.0000, 0.0000]]], dtypetorch.float64)
M(i,y_i-1,y_i):tensor([[[2.7183, 1.6487],[2.7183, 1.6487]],[[3.6693, 4.4817],[6.0496, 1.6487]],[[2.2255, 4.4817],[6.0496, 2.0138]],[[1.0000, 1.0000],[1.0000, 1.0000]]], dtypetorch.float64)1.3.2 Z
Z(M1(x)M2(x)...Mn1(x))start,stopZ(M_1(x)M_2(x)...M_{n1}(x))_{start,stop}Z(M1(x)M2(x)...Mn1(x))start,stop 从start到stop对应于y(1,1,1),y(1,1,2), …, y(2,2,2)个路径的非规范化概率分别是 a01b11c11a01b11c12a01b12c21a01b12c22 a02b21c11a01b21c12a02b22c21a02b22c22然后按式11.12求规范化因子通过计算矩阵乘积M1(x) M2(x) M3(x) M4(x)可知其第一行第一列的元素为
a01b11c11 a01b11c12 a01b12c21 a01b12c22 a02b21c11 a01b21c12 a02b22c21 a02b22c22
恰好等于从start到stop的所有路径的非规范化概率之和即规范化因子Z(x)。
def Z_M(M):zM[0]for i in range(1,sequence_len1):ztorch.matmul(z,M[i])return z[0,0]
print(Z_M(M))
tensor(253.9492, dtypetorch.float64)def P_y_x_condition_with_M(y):p1;for i in range(len(y)-1):p*M[i,y[i],y[i1]]print(p)return p/Z_M(M)
p_y_x_conP_y_x_condition_with_M([0,0,1,1,0])
print(p(y|x)p(y11,y22,y33|x),p_y_x_con)tensor(24.5325, dtypetorch.float64)
p(y|x)p(y11,y22,y33|x) tensor(0.0966, dtypetorch.float64)2.维特比算法 print(torch.log(M))tensor([[[1.0000, 0.5000],[1.0000, 0.5000]],[[1.3000, 1.5000],[1.8000, 0.5000]],[[0.8000, 1.5000],[1.8000, 0.7000]],[[0.0000, 0.0000],[0.0000, 0.0000]]], dtypetorch.float64)def Viterbi_M():deltatorch.zeros(3,2)logMtorch.log(M)delta[0]logM[0,0]torch.max(delta[0].reshape(y_size,1)logM[1],axis0)indices[]for i in range(1,sequence_len):print(delta[i-1].reshape(y_size,1)logM[i])delta[i],indicetorch.max(delta[i-1].reshape(y_size,1)logM[i],axis0)indices.append(indice) print(delta)
# print(indices)pathtorch.zeros(sequence_len,dtypetorch.int)
# print(path)path[sequence_len-1]torch.argmax(delta[sequence_len-1])
# print(path)for i in range(sequence_len-2,-1,-1):path[i]indices[i][path[i1]]
# print(path)return pathViterbi_M()tensor([[2.3000, 2.5000],[2.3000, 1.0000]], dtypetorch.float64)
tensor([[3.1000, 3.8000],[4.3000, 3.2000]], dtypetorch.float64)
tensor([[1.0000, 0.5000],[2.3000, 2.5000],[4.3000, 3.8000]])tensor([0, 1, 0], dtypetorch.int32)3.前向算法 一般
α0(y0∣x){1y0start10y0!start这个是一个值αi1(yi1∣x)αi(yi∣x)Mi1(yi1,yi∣x)\alpha_0(y_0|x)\begin{cases}1y_0start1\\0y_0!start\end{cases}\\ 这个是一个值\alpha_{i1}(y_{i1}|x)\alpha_i(y_i|x)M_{i1}(y_{i1},y_i|x)α0(y0∣x){10y0start1y0!start这个是一个值αi1(yi1∣x)αi(yi∣x)Mi1(yi1,yi∣x)
矩阵形式
这是一个向量αi(x)(αi(yi1∣x)αi(yi2∣x)...αi(yim−1∣x)αi(yim∣x))Tαi1T(x)αiT(x)Mi1(x)M1M[0],M2M[1],M3M[2],M4M[3]这是一个向量\alpha_i(x)(\alpha_i(y_i1|x)\alpha_i(y_i2|x)...\alpha_i(y_im-1|x)\alpha_i(y_im|x))^T\\ \alpha_{i1}^T(x)\alpha_i^T(x)M_{i1}(x)\\ M_1M[0],M_2M[1],M_3M[2],M_4M[3]这是一个向量αi(x)(αi(yi1∣x)αi(yi2∣x)...αi(yim−1∣x)αi(yim∣x))Tαi1T(x)αiT(x)Mi1(x)M1M[0],M2M[1],M3M[2],M4M[3]
M[0,1,0]M[0,1,1]M[3,0,1]M[3,1,1]0
def alpha():alphatorch.zeros(sequence_len2,y_size,dtypefloat)alpha[0,0]1for i in range(sequence_len1):alpha[i1]torch.matmul(alpha[i].reshape(1,y_size),M[i])print(alpha)return alpha
alphaalpha()tensor([[ 1.0000, 0.0000],[ 2.7183, 1.6487],[ 19.9484, 14.9008],[134.5403, 119.4088],[253.9492, 0.0000]], dtypetorch.float64)4.后向算法
βi(yi∣x)Mi1(yi,yi1∣x)βi1(yi1∣x)β0(y0∣x){1yn1stop10yn1!stop向量形式βi(x)Mi1(x)βi1(x)Z(x)αnT(x)⋅11T⋅β1(x)\beta_i(y_i|x)M_{i1}(y_i,y_{i1}|x)\beta_{i1}(y_{i1}|x)\\ \beta_0(y_0|x)\begin{cases}1y_{n1}stop1\\0y_{n1}!stop\end{cases}\\ 向量形式\\ \beta_i(x)M_{i1}(x)\beta_{i1}(x)\\ Z(x)\alpha_n^T(x)·11^T·\beta_1(x)βi(yi∣x)Mi1(yi,yi1∣x)βi1(yi1∣x)β0(y0∣x){10yn1stop1yn1!stop向量形式βi(x)Mi1(x)βi1(x)Z(x)αnT(x)⋅11T⋅β1(x)
def beta():betatorch.zeros(sequence_len2,y_size,dtypefloat)beta[sequence_len1,0]1for i in range(sequence_len,-1,-1):
# print(M[i],beta[i1].reshape(y_size,1))beta[i]torch.matmul(M[i],beta[i1].reshape(y_size,1)).reshape(y_size)print(beta)return beta
betabeta()tensor([[253.9492, 0.0000],[ 60.7485, 53.8707],[ 6.7072, 8.0634],[ 1.0000, 1.0000],[ 1.0000, 0.0000]], dtypetorch.float64)def Z_alpha(alpha):return torch.sum(alpha[sequence_len1])
print(Z_alpha(alpha))tensor(253.9492, dtypetorch.float64)def Z_beta(beta):
# print(beta)return torch.sum(beta[0])print(Z_beta(betta))tensor(253.9492, dtypetorch.float64)5.使用前向后向的概率计算
这是一个值p(yi∣x)αiT(yi∣x)βi(yi∣x)Z(x)p(yi−1,yi∣x)αi−1T(yi∣x)Mi(yi−1,yi∣x)βi(yi∣x)Z(x)这是一个值p(y_i|x)\frac{\alpha_i^T(y_i|x)\beta_i(y_i|x)}{Z(x)}\\ p(y_{i-1},y_i|x)\frac{\alpha_{i-1}^T(y_i|x)M_i(y_{i-1},y_i|x)\beta_i(y_i|x)}{Z(x)}这是一个值p(yi∣x)Z(x)αiT(yi∣x)βi(yi∣x)p(yi−1,yi∣x)Z(x)αi−1T(yi∣x)Mi(yi−1,yi∣x)βi(yi∣x)
推导 p(yti∣x)Σy1,y2,...,yt−1,yt,...,yTp(y∣x)Σy1,y2,...,yt−1Σyt,...,yTp(y∣x)Σy1,y2,...,yt−1Σyt,...,yT1ZΠt′1TΦt′(yt′−1,yt′,x)1ZΣy1,y2,...,yt−1Πt′1tΦt′(yt′−1,yt′,x)Σyt,...,yTΠt′t1TΦt′(yt′−1,yt′,x)1ZΔleftΔrightαt(i)ΔleftΣy1,y2,...,yt−1Πt′1tΦt′(yt′−1,yt′,x)Σy1,y2,...,yt−1Φ1(y0,y1,x)Φ2(y1,y2,x)...Φt−1(yt−2,yt−1,x)Φt(yt−1,yti,x)Σyt−1Φ(yt−1,yt,x)Σyt−2Φ(yt−2,yt−1,x)...Σy1Φ(y1,y2,x)Σy0Φ1(y0,y1,x)βt(i)ΔrightΣyt,...,yTΠt′t1TΦt′(yt′−1,yt′,x)p(y_ti|x)\Sigma_{y_1,y_2,...,y_{t-1},y_t,...,y_T}p(y|x)\\ \Sigma_{y_1,y_2,...,y_{t-1}}\Sigma_{y_t,...,y_T}p(y|x)\\ \Sigma_{y_1,y_2,...,y_{t-1}}\Sigma_{y_t,...,y_T}\frac{1}{Z}\Pi_{t1}^T\Phi_{t}(y_{t-1},y_{t},x)\\ \frac{1}{Z}\Sigma_{y_1,y_2,...,y_{t-1}}\Pi_{t1}^{t}\Phi_{t}(y_{t-1},y_{t},x)\Sigma_{y_t,...,y_T}\Pi_{tt1}^T\Phi_{t}(y_{t-1},y_{t},x)\\ \frac{1}{Z}\Delta_{left}\Delta_{right}\\ \alpha_t(i)\Delta_{left}\Sigma_{y_1,y_2,...,y_{t-1}}\Pi_{t1}^{t}\Phi_{t}(y_{t-1},y_{t},x)\\ \Sigma_{y_1,y_2,...,y_{t-1}}\Phi_1(y_0,y_1,x)\Phi_2(y_1,y_2,x)...\Phi_{t-1}(y_{t-2},y_{t-1},x)\Phi_t(y_{t-1},y_ti,x)\\ \Sigma_{y_{t-1}}\Phi(y_{t-1},y_{t},x)\Sigma_{y_{t-2}}\Phi(y_{t-2},y_{t-1},x)...\Sigma_{y_1}\Phi(y_1,y_2,x)\Sigma_{y_0}\Phi_1(y_0,y_1,x)\\ \beta_t(i)\Delta_{right}\Sigma_{y_t,...,y_T}\Pi_{tt1}^T\Phi_{t}(y_{t-1},y_{t},x)p(yti∣x)Σy1,y2,...,yt−1,yt,...,yTp(y∣x)Σy1,y2,...,yt−1Σyt,...,yTp(y∣x)Σy1,y2,...,yt−1Σyt,...,yTZ1Πt′1TΦt′(yt′−1,yt′,x)Z1Σy1,y2,...,yt−1Πt′1tΦt′(yt′−1,yt′,x)Σyt,...,yTΠt′t1TΦt′(yt′−1,yt′,x)Z1ΔleftΔrightαt(i)ΔleftΣy1,y2,...,yt−1Πt′1tΦt′(yt′−1,yt′,x)Σy1,y2,...,yt−1Φ1(y0,y1,x)Φ2(y1,y2,x)...Φt−1(yt−2,yt−1,x)Φt(yt−1,yti,x)Σyt−1Φ(yt−1,yt,x)Σyt−2Φ(yt−2,yt−1,x)...Σy1Φ(y1,y2,x)Σy0Φ1(y0,y1,x)βt(i)ΔrightΣyt,...,yTΠt′t1TΦt′(yt′−1,yt′,x)
def p_y_x_condition_alpha_beta(alpha,beta):#p(y_i|x)p_y_xalpha*beta/Z_alpha(alpha)# print(alpha[2].reshape(1,y_size)*beta[2].reshape(y_size,1))return p_y_xy[0,1,1]
p_y_x_condition_alpha_beta(alpha,beta)tensor([[1.0000, 0.0000],[0.6503, 0.3497],[0.5269, 0.4731],[0.5298, 0.4702],[1.0000, 0.0000]], dtypetorch.float64)def p_y12_x_condition_alpha_beta(alpha,beta):pM.clone().detach()for i in range(sequence_len1):p[i]alpha[i].reshape(y_size,1)*p[i]*beta[i1]return p/Z_alpha(alpha);p_y12_x_condition_alpha_beta(alpha,beta)tensor([[[0.6503, 0.3497],[0.0000, 0.0000]],[[0.2634, 0.3868],[0.2634, 0.0863]],[[0.1748, 0.3520],[0.3550, 0.1182]],[[0.5298, 0.0000],[0.4702, 0.0000]]], dtypetorch.float64)6.期望计算 def E_fk_py_x(k,alpha,beta):#E_{p(y|x)}(f_k)return torch.sum(f[k]*p_y12_x_condition_alpha_beta(alpha,beta))
E_fk_py_x(1,alpha,beta)tensor(0.1317, dtypetorch.float64)7.参数估计学习
θ^argmaxΠi1Np(y(i)∣x(i))λ^,η^argmaxλ,ηΠi1Np(y(i)∣x(i))Σi1Nlogp(y(i)∣x(i))Σi1N(−log(Z)Σt1T(λTf(yt−1,yt,x)ηTg(yt,x)))L∂L∂λΣi1Nlogp(y(i)∣x(i))Σi1N(−∂∂λlog(Z)Σt1Tf(yt−1,yt,x))log−partitionfunction:∂∂λlog(Z)(积分就是期望E(Σt1Tf(yt−1,yt,x(i)))ΣyP(y∣x(i))Σt1Tf(yt−1,yt,x(i))Σt1TΣyP(y∣x(i))f(yt−1,yt,x(i))Σt1TΣy1,y2,...,yt−2Σyt−1ΣytΣyt1,yt2,...,yTP(y∣x(i))f(yt−1,yt,x(i))Σt1TΣyt−1Σyt(Σy1,y2,...,yt−2Σyt1,yt2,...,yTP(y∣x(i))f(yt−1,yt,x(i)))Σt1TΣyt−1ΣytP(yt−1,yt∣x(i))f(yt−1,yt,x(i))\hat{\theta}argmax\Pi_{i1}^N p(y^{(i)}|x^{(i)})\\ \hat{\lambda},\hat{\eta}argmax_{\lambda,\eta}\Pi_{i1}^N p(y^{(i)}|x^{(i)})\\ \Sigma_{i1}^Nlog p(y^{(i)}|x^{(i)})\Sigma_{i1}^N(-log(Z)\Sigma_{t1}^T(\lambda^Tf(y_{t-1},y_t,x)\eta^Tg(y_t,x)))\\ L\\ \frac{\partial L}{\partial \lambda}\Sigma_{i1}^Nlog p(y^{(i)}|x^{(i)})\Sigma_{i1}^N(-\frac{\partial }{\partial \lambda} log(Z)\Sigma_{t1}^Tf(y_{t-1},y_t,x))\\ log-partition function:\\ \frac{\partial }{\partial \lambda} log(Z)\\ (积分就是期望E(\Sigma_{t1}^Tf(y_{t-1},y_t,x^{(i)}))\\ \Sigma_y P(y|x^{(i)})\Sigma_{t1}^T f(y_{t-1},y_t,x^{(i)})\\ \Sigma_{t1}^T\Sigma_y P(y|x^{(i)}) f(y_{t-1},y_t,x^{(i)})\\ \Sigma_{t1}^T\Sigma_{y_1,y_2,...,y_{t-2}}\Sigma_{y_{t-1}}\Sigma_{y_t}\Sigma_{y_{t1},y_{t2},...,y_T} P(y|x^{(i)}) f(y_{t-1},y_t,x^{(i)})\\ \Sigma_{t1}^T\Sigma_{y_{t-1}}\Sigma_{y_t} (\Sigma_{y_1,y_2,...,y_{t-2}}\Sigma_{y_{t1},y_{t2},...,y_T}P(y|x^{(i)}) f(y_{t-1},y_t,x^{(i)}))\\ \Sigma_{t1}^T\Sigma_{y_{t-1}}\Sigma_{y_t}P(y_{t-1},y_t|x^{(i)}) f(y_{t-1},y_t,x^{(i)})θ^argmaxΠi1Np(y(i)∣x(i))λ^,η^argmaxλ,ηΠi1Np(y(i)∣x(i))Σi1Nlogp(y(i)∣x(i))Σi1N(−log(Z)Σt1T(λTf(yt−1,yt,x)ηTg(yt,x)))L∂λ∂LΣi1Nlogp(y(i)∣x(i))Σi1N(−∂λ∂log(Z)Σt1Tf(yt−1,yt,x))log−partitionfunction:∂λ∂log(Z)(积分就是期望E(Σt1Tf(yt−1,yt,x(i)))ΣyP(y∣x(i))Σt1Tf(yt−1,yt,x(i))Σt1TΣyP(y∣x(i))f(yt−1,yt,x(i))Σt1TΣy1,y2,...,yt−2Σyt−1ΣytΣyt1,yt2,...,yTP(y∣x(i))f(yt−1,yt,x(i))Σt1TΣyt−1Σyt(Σy1,y2,...,yt−2Σyt1,yt2,...,yTP(y∣x(i))f(yt−1,yt,x(i)))Σt1TΣyt−1ΣytP(yt−1,yt∣x(i))f(yt−1,yt,x(i)) p(yi−1,yi∣x)αi−1T(yi∣x)Mi(yi−1,yi∣x)βi(yi∣x)Z(x)p(y_{i-1},y_i|x)\frac{\alpha_{i-1}^T(y_i|x)M_i(y_{i-1},y_i|x)\beta_i(y_i|x)}{Z(x)}p(yi−1,yi∣x)Z(x)αi−1T(yi∣x)Mi(yi−1,yi∣x)βi(yi∣x)
7.1 梯度上升
λt1λtstep∗∂L∂ληt1ηtstep∗∂L∂η\lambda^{t1}\lambda^tstep*\frac{\partial L}{\partial \lambda}\\ \eta^{t1}\eta^tstep*\frac{\partial L}{\partial \eta}λt1λtstep∗∂λ∂Lηt1ηtstep∗∂η∂L
def delta_log_L(self,alpha,beta,y):# print(self.f[:,3,[0,0,1,1],[0,1,1,0]])#y[0,1,1]deltatorch.sum(self.f[:,len(y),[0]y,y[9]],axis(1))-torch.sum(self.f* self.p_y12_x_condition_alpha_beta(alpha, beta),axis(1,2,3))return deltadef predict(self,x):self.sequence_len len(x)self.get_ts(x)self.M self.f2M()return self.Viterbi_M()def train(self,traindata):delta0batch_size100num_batchint(len(traindata[0])/batch_size)for e in range(num_batch):delta0for i in range(batch_size):x traindata[0][e*batch_sizei]y traindata[1][e*batch_sizei]self.sequence_len len(x)# print(x)self.get_ts(x)self.Mself.f2M()alpha self.alpha()beta self.beta()delta self.delta_log_L(alpha, beta, y)print(delta)print(self.Viterbi_M())print(y)self.w self.w 0.0001 * delta◼实际上, 梯度上升收敛非常慢 ⚫ 替代选择: ◆ 共轭梯度方法◆ 内存受限拟牛顿法 目前的实现速度贼慢……以后再改
参考文献
国科大prml课程国科大nlp课程条件随机场CRF(一)从随机场到线性链条件随机场统计学习方法李航白板推导CRF一个crf实现用了他的特征函数