LoginSignup
0
0

More than 3 years have passed since last update.

EM algorithm and Extensions Geoffrey J. McLachlan, Thriyambakam Krishnan

Last updated at Posted at 2019-09-04

#p.17 

g=9

n=10

mu=sample(1:9,g,replace=T)

y=sample(seq(1,9,0.1),n,replace=T)

z=array(1,dim=c(g,n))

ite=100

for(l in 1:ite){

pi=apply(z,1,sum)/n

fij=array(0,dim=c(g,n))

for(i in 1:g){

fij[i,]=dnorm(y,mu[i])

}


loglik=sum(z*c(log(pi)))+sum(z*fij)

print(loglik)

z=t(t(fij*c(pi))/c(t(fij)%*%pi))

}

#EM algorithm

#Healy-Westmacott Procedure as an EM algorithm p.55

data=data.frame(num=1:20,y=c(0.20, 0.10, 0.49, 0.26, 0.92, 0.95, 0.18, 0.19, 0.44, 0.79, 0.61, 0.41, 0.49, 0.34, 0.62, 0.99, 0.38, 0.22,0.71,0.40),x1=c(84,87,86,85,82,87,92,94,88,84.9,78,90,88,87,82,84,86,83,85.2,82),x2=c(61,55.5,57,57,50,50,66.5,65,60.5,49.5,49.5,61,59.5,58.4,53.5,54,60,58.8,54,56),x3=c(24.5,24,23,22.5,22,24,25,26,25,24,23,21,26,25.5,24,23,24,24,24,22))

X=as.matrix(data[,colnames(data) %in% c("x1","x2","x3")])

Y=data$y

n=nrow(X);m=18

Y=Y[1:m]

beta=rep(1,ncol(X))

sigma=1

ite=100

for(l in 1:ite){

y_sub=X[(m+1):n,]%*%beta

beta=solve(t(X)%*%X)%*%t(X)%*%c(Y,y_sub)

sigma=(sum((Y-X[1:m,]%*%beta)^2)+(n-m)*sigma)/n

print(sigma)

}


#Buck`s method p.51

data=data.frame(num=1:20,y=c(0.20, 0.10, 0.49, 0.26, 0.92, 0.95, 0.18, 0.19, 0.44, 0.79, 0.61, 0.41, 0.49, 0.34, 0.62, 0.99, 0.38, 0.22,0.71,0.40),x1=c(84,87,86,85,82,87,92,94,88,84.9,78,90,88,87,82,84,86,83,85.2,82),x2=c(61,55.5,57,57,50,50,66.5,65,60.5,49.5,49.5,61,59.5,58.4,53.5,54,60,58.8,54,56),x3=c(24.5,24,23,22.5,22,24,25,26,25,24,23,21,26,25.5,24,23,24,24,24,22))

X=as.matrix(data[,colnames(data) %in% c("x1","x2","x3")])

Y=data$y

n=nrow(X)

nums=20;W=X;

n1=sample(1:dim(X)[1],nums,replace=T)
n2=sample(1:dim(X)[2],nums,replace=T)

W[n1,n2]=NA;W_sub=W

beta=c(1,1,1)

W_ave=c()

for(j in 1:ncol(W)){

vec=W[,j];  

W_ave=c(W_ave,mean(vec[is.na(vec)!=T]))  

}

ite=100


for(s in 1:ite){

W_pre=W  

W=W_sub  

for(j in 1:nrow(W)){
for(i in 1:ncol(W)){


if(is.na(W[j,i])>0){  

W_vec=(t(W)-c(W_ave));W_vec[is.na(W_vec)==T]=0

W_vec=apply(W_vec,1,sum)  

W[j,i]=W_ave[i]+sum(beta*W_vec)  

}  

}  
}

beta=solve(t(W)%*%W)%*%t(W)%*%Y  

print(sum(abs(W_pre-W)))

W_ave=apply(W,2,mean)

}



#書籍:THE EM Algorithm and Extensions

#p.60 pois model(Z is an incomplete data、Z~pois(lambda*P) )

#パラメータの次元数
d=10;n=20


#サンプルデータの作成(PとQをサンプルデータ)

#条件付確率
P=array(0,dim=c(n,d))

#実際に取りうる値(目的変数的なもの)
Z=array(0,dim=c(n,d))

for(i in 1:nrow(P)){
for(j in 1:ncol(P)){

P[i,j]=rpois(1,sample(10:100,1)) 

Z[i,j]=rpois(1,sample(10:100,1)) 

}  
}

#t(P)%*%lambdaはこのデータ値Yに近くなるはず
Y=apply(Z,2,sum)

#もともとデータとしてある条件付確率
P=(P)/apply(P,1,sum)

#p.62の(2.29)のqi

Q=apply(P,1,sum)




#パラメータの初期値設定

lambda=sample(1:10,n,replace = T)

Z_pre=Z

Z=array(0,dim=c(n,d))

for(i in 1:nrow(P)){
for(j in 1:ncol(P)){

Z[i,j]=rpois(1,sample(10:100,1)) 

}  
}



#iterations

ite=100000

#パラメータの更新具合を見るため

diff_Z=c();diff_lambda=c()

for(k in 1:ite){

lambda_pre=lambda ;Z_pre2=Z 

for(i in 1:n){
for(j in 1:d){

#(2.28)  

Z[i,j]=(Y[j]*lambda[i]*P[i,j]/sum(lambda*P[,j]))

}  
}  

for(i in 1:n){

#(2.29)  

lambda[i]=lambda[i]*(Q[i]^(-1))*sum(Y*P[i,]/(t(P)%*%lambda))

}  

diff_lambda=abs(lambda-lambda_pre)

diff_Z=sum(abs(Z-Z_pre2))

print(sum(diff_lambda+diff_Z))

#(2.26)

logliklihood=c()

Z2=Z;Z2[Z2==0]=1

for(j in 1:d){
for(i in 1:n){  

logliklihood=c(logliklihood,-lambda[i]*P[i,j]+Z[i,j]*log(lambda[i]*P[i,j])-sum(log(c(1:round(Z2[i,j])))))  

}
}

logliklihood=sum(logliklihood)

#print(sum(logliklihood))

}



#p.63

v=3;p=10;n=20

W=array(0,dim=c(n,p))

for(j in 1:nrow(W)){

W[j,]=rpois(p,sample(10:100,1))

}

Y=W

U=rep(1,n)

ite=100

mu=rep(1,p);sigma=t(t(rep(0,p)))%*%rep(0,p)

for(j in 1:ite){

mu_pre=mu ;sigma_pre=sigma ;U_pre=U

if(j==1){

mu=t(Y)%*%U/sum(U)

sigma=t(t(rep(0,p)))%*%rep(0,p)

for(i in 1:n){

sigma=sigma+U[i]*t(t(c(W[i,]-mu)))%*%t(W[i,]-mu)  

}

sigma=sigma/n

}else{

U=c()

for(i in 1:n){

U=c(U,(v+p)/(v+t(W[i,]-mu)%*%solve(sigma)%*%(W[i,]-mu)))  

}

mu=t(Y)%*%U/sum(U)

sigma=t(t(rep(0,p)))%*%rep(0,p)

for(i in 1:n){

sigma=sigma+U[i]*t(t(c(W[i,]-mu)))%*%t(W[i,]-mu)  

}

sigma=sigma/n


}

#print(sum(abs(mu-mu_pre)))  

#print(sum(abs(sigma-sigma_pre)))

#print(sum(abs(U-U_pre)))

log_lik=-n*p*log(pi*v)/2+n*(log(gamma((v+p)/2))-log(gamma(v/2)))-n*log(det(sigma))/2+n*(v+p)*log(v)/2

for(i in 1:n){

log_lik=log_lik-(v+p)*log(v+t(W[i,]-mu)%*%solve(sigma)%*%(W[i,]-mu))/2  

}

print(log_lik)

}  


#p.125 Group Data from an Exponential Distribution

mu=4;

n=2000

values=rexp(n,1/mu);d=1

W=seq(1,10,d)

v=length(W)-1

W_data=data.frame(value1=0,value2=0,n=0)

for(j in 2:length(W)){

W_data_sub=data.frame(value1=W[j-1],value2=W[j],n=length(values[values<W[j] & values>=W[j-1]]))

W_data=rbind(W_data,W_data_sub)

}

W_data=tail(W_data,nrow(W_data)-1)



#iterations

#initial values

mu=1

w_j=c();a_j=c();Y=W_data$n

ite=100

for(k in 1:ite){

w_j=c();a_j=c();Y=W_data$n  

for(j in 1:v){

a_val=W_data$value1[j]-d/(exp(d/mu)-1)

a_j=c(a_j,a_val)

w_val=mu+a_val

w_j=c(w_j,mu+a_val)

}

mu=mu+sum(W_data$n*a_j)/n;

a_val2=sum(W_data$n*a_j)/n

h=(a_val2*mu^2)/(sum(W_data$n*a_j^2)/n-a_val2^2)

I=(n/(mu^4))*sum(W_data$n*a_j^2)/n

print(paste0("average:",mu))

print(paste0("S(y,mu):",sum(W_data$n*a_j)/(mu^2)))

print(paste0("h:",h));print(paste0("var_mu:",1/I))

}

print(paste0("error:",abs(mean(values)-mu)));print(paste0("average:",mu));print(paste0("S(y,mu):",sum(W_data$n*a_j)/(mu^2)));print(paste0("h:",h));print(paste0("var_mu:",1/I))




#p.143 Example 4.7: Geometric Mixture

n=100;v=5

Y=sample(10:30,n,replace = T)

#initial conditions

Z=array(0,dim=c(v,n))

for(j in 1:v){

vec=rpois(2,sample(10:20,1))  

prob=vec/sum(vec)

Z[j,]=c(sample(0:1,n,prob=prob,replace=T))

}



#iteraions

ite=20

for(j in 1:ite){

Z_pre=Z  

n_vec=apply(Z,1,sum)

pis=n_vec/n

ps=1/(c(Z%*%Y)/c(n_vec))

for(k in 1:v){

Z[k,]=(pis[k]*ps[k]*(1-ps[k])^(Y-1))  

}

Z=Z/sum(Z)

#print(sum(abs(Z-Z_pre)))

print(paste0("log_lik:",sum(n_vec*(log(pis)+log(ps))+(c(Z%*%Y)-c(n_vec))*log(1-ps))))

}


#p.75

library(dplyr);library(mvtnorm)

n=1000;G=2;p=10

n_vec=rep(n/G,G)

mu0=100;sigma0=500

W=rnorm(n,4,8)

val=seq(min(W),max(W),(max(W)-min(W))/G)

ite=200

for(i in 1:ite){

mu0_pre=mu0;sigma0_pre=sigma0

func1=function(x){

return(x*exp(-(x-mu0)^2/(2*sigma0))/sqrt(2*pi*sigma0))  

}

func2=function(x){

return(((x-mu0)^2)*exp(-(x-mu0)^2/(2*sigma0))/sqrt(2*pi*sigma0))  

}

values1=c();values2=c()

for(j in 1:G){

values1=c(values1,integrate(func1,val[j],val[j+1])$value)

values2=c(values2,integrate(func2,val[j],val[j+1])$value)

#values1=c(values1,mean(W[W>=val[j] & W<val[j+1]]))  

#values2=c(values2,mean((W[W>=val[j] & W<val[j+1]]-mu0)^2))

}

n_vec=c()

for(j in 1:G){

n_vec=c(n_vec,n*(pnorm(val[j+1],mu0,sigma0)-pnorm(val[j],mu0,sigma0))/(pnorm(max(val),mu0,sigma0)-pnorm(min(val),mu0,sigma0))  )

}

mu0=sum(n_vec*values1)/sum(n_vec)

sigma0=sum(n_vec*values2)/sum(n_vec)

print(abs(sigma0-sigma0_pre))

#plot(n_vec)

}


#p.191

library(dplyr);library(stringr)

m=50;n=sample(20:100,m,replace=T)

p=5;q=3

X_vec=array(0,dim=c(1,p));Z_vec=array(0,dim=c(1,q))

colnames(X_vec)=paste0("X.",c(1:p))

colnames(Z_vec)=paste0("Z.",c(1:q))

data=data.frame(class=0,y=0,X_vec,Z_vec)

for(j in 1:m){

X_mat=array(0,dim=c(n[j],p))

Z_mat=array(0,dim=c(n[j],q))

for(i in 1:p){

X_mat[,i]=rpois(n[j],sample(1:30,1))  

}

for(i in 1:q){

Z_mat[,i]=rpois(n[j],sample(30:60,1))  

}

colnames(X_mat)=paste0("X.",c(1:p))

colnames(Z_mat)=paste0("Z.",c(1:q))


y_vec=rpois(n[j],sample(1:10,1))

data_sub=data.frame(class=j,y=y_vec,X_mat,Z_mat)

data=rbind(data,data_sub)

}

data=tail(data,nrow(data)-1)

D=diag(1,q)

beta=rep(1,p);b=rep(1,q)

sigma=1

ite=100

b_box=array(1,dim=c(q,m))

beta=rep(1,p)

for(j in 1:ite){

beta_pre=beta  

D_pre=diag(0,q)  

for(i in 1:m){

dat=data %>% filter(class==i) 

y=dat$y;Z=as.matrix(dat[,str_count(colnames(dat),"Z.")>0]);X=as.matrix(dat[,str_count(colnames(dat),"X.")>0])

b=solve(t(Z)%*%solve(diag(1,n[i]))%*%Z+(sigma^2)*solve(D))%*%t(Z)%*%solve(diag(1,n[i]))%*%(y-X%*%beta)

b_box[,i]=b

D_pre=D_pre+(sigma^2)*solve((sigma^(-2))*t(Z)%*%solve(diag(1,n[i]))%*%Z+solve(D))+b%*%t(b)  

}

D=D_pre/m

sigma_pre=0;

sigma11=diag(0,p)

beta_pre1=diag(0,p);beta_pre2=rep(0,p)

for(i in 1:m){

dat=data %>% filter(class==i) 

y=dat$y;Z=as.matrix(dat[,str_count(colnames(dat),"Z.")>0]);X=as.matrix(dat[,str_count(colnames(dat),"X.")>0])

b_box[,i]=b;

R=diag(1,n[i])

e=y-X%*%beta-Z%*%b

sigma_pre=sigma_pre+sum(diag((t(Z)%*%solve(R)%*%Z)%*%solve((sigma^2)*t(Z)%*%solve(R)%*%Z+solve(D))))+as.numeric(t(e)%*%solve(R)%*%e)

sigma11=Z%*%D%*%t(Z)+(sigma^2)*R

beta_pre1=beta_pre1+(t(X)%*%solve(sigma11)%*%X)

beta_pre2=beta_pre2+c(t(X)%*%solve(sigma11)%*%(y))

}

sigma=sqrt(sigma_pre/m)

beta=solve(beta_pre1)%*%beta_pre2

print(sum(abs(beta-beta_pre)))

}



#p.210 additive pois model(Z is an incomplete data)

#one step rate algorithm

d=10;n=20

lambda=sample(1:10,n,replace = T)

P=array(0,dim=c(n,d))

guzai=0.1

Z=array(0,dim=c(n,d))

for(i in 1:nrow(P)){
for(j in 1:ncol(P)){

P[i,j]=rpois(1,sample(10:100,1)) 

Z[i,j]=rpois(1,sample(10:100,1)) 

}  
}

P=(P)/apply(P,1,sum);Q=apply(P,1,sum)


Y=apply(Z,2,sum)

Z_pre=Z

Z=array(0,dim=c(n,d))

for(i in 1:nrow(P)){
for(j in 1:ncol(P)){

Z[i,j]=rpois(1,sample(10:100,1)) 

}  
}


kernel=function(x){

return(exp(-x) ) 

}


#iterations

ite=10000

diff_lambda=c()

diff_Z=c()

for(k in 1:ite){

lambda_pre=lambda ;Z_pre2=Z 

for(i in 1:n){
for(j in 1:d){

Z[i,j]=Y[j]*lambda[i]*P[i,j]/sum(lambda*P[,j])

}  
}  

h=0.01

for(i in 1:n){

lambda[i]=sum(Z[i,])/(sum(P[i,])+guzai*(kernel(lambda[i]+h)-kernel(lambda[i]))/h)

}  

#print(paste0("times:",k))    
print(sum(abs(lambda-lambda_pre)))

diff_lambda=c(diff_lambda,sum(abs(lambda-lambda_pre)))

diff_Z=c(diff_Z,sum(abs(Z-Z_pre2)))

}



0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0