1
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

機械学習のための確率過程入門 内山祐介先生 オーム社

Last updated at Posted at 2024-03-07

#p.99 ガウス過程回帰

library(mvtnorm)
data("airquality")
data=(airquality)
data=na.omit(data)
test_dat=data[101:111,]
data=data[-c(101:111),]
Y=as.numeric(data$Ozone)
X=data[,-c(1,5,6)]
X=matrix((as.numeric(unlist(X))),nrow=nrow(X),ncol=ncol(X))

diff=array(0,dim=c(nrow(X),nrow(X)))
for(j in 1:nrow(X)){
diff[j,]=apply((t(X)-X[j,])^2,2,sum)  
}

#rbf
kernel=function(alpha,Ls){
return(alpha*exp(-Ls*sqrt(diff)))  
}

mu=function(sitas){
return((X%*%sitas))  
}

ite=10^4
ave=apply(X,2,mean)
a=1
L=1
x=c(solve(t(X)%*%kernel(a,L)%*%X)%*%t(X)%*%kernel(a,L)%*%Y)
eta=0.1
ep=0

for(l in 1:ite){
pred=mu(x)
K=kernel(a,L)
invK=solve(K+diag(ep,nrow(K)))
dmu=c(Y-pred)%*%invK%*%X  
trKa=sum(diag(invK%*%kernel(1,L)))
trKL=sum(diag(-invK%*%(diff*kernel(a,L))))
invKy=invK%*%(Y-pred)
da=as.numeric(-trKa/2+c(invKy)%*%kernel(1,L)%*%c(invKy)/2)
dL=as.numeric(-trKL/2+c(invKy)%*%(diff*kernel(a,L))%*%c(invKy)/2)
x_pre=x;a_pre=a
x=c(x+eta*dmu)
a=a+eta*da
L=L+eta*dL
loglik=as.numeric(c(Y-pred)%*%invK%*%c(Y-pred))
print(loglik)
}



#予測値の乱数
pred_kernel=function(xx){
return(as.numeric(a*exp(0)-c(a*exp(-L*sqrt(apply((t(X)-xx)^2,2,sum))))%*%solve(kernel(a,L))%*%c(a*exp(-L*sqrt(apply((t(X)-xx)^2,2,sum))))))  
}

pred_mu=function(xx){
return(sum(xx*x)+c(a*exp(-L*sqrt(apply((t(X)-xx)^2,2,sum))))%*%solve(kernel(a,L))%*%(Y-mu(x)))}  

library(mvtnorm)

#学習データの確認
mu_vec=c()
kstar=c()
for(j in 1:nrow(X)){
vec=X[j,]  
mus=c(mu(x),pred_mu(vec))
mu_vec=c(mu_vec,pred_mu(vec))
kernels=cbind(kernel(a,L),c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum)))))
kernels=rbind(kernels,c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum))),a*exp(0)))
kstar=c(kstar,pred_kernel(vec))
}
train_mu_vec=mu_vec
train_k_star=kstar

#テストデータでの確認
testY=test_dat$Ozone
mu_vec=c()
kstar=c()
rvar=c()
for(j in 1:nrow(test_dat)){
vec=c(unlist(test_dat[j,2:4]))
mus=c(mu(x),pred_mu(vec))
mu_vec=c(mu_vec,pred_mu(vec))
kernels=cbind(kernel(a,L),c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum)))))
kernels=rbind(kernels,c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum))),a*exp(0)))
kstar=c(kstar,pred_kernel(vec))
rv=rmvnorm(1, mean = mus, sigma = kernels)
rvar=c(rvar,rv[length(rv)])
}


#p.99 ガウス過程回帰 ガンマ分布正則化

library(mvtnorm)
data("airquality")
data=(airquality)
data=(na.omit(data))
test_dat=data[101:111,]
data=data[-c(101:111),]
Y=as.numeric(data$Ozone)
X=data[,-c(1,5,6)]
X=matrix((as.numeric(unlist(X))),nrow=nrow(X),ncol=ncol(X))

diff=array(0,dim=c(nrow(X),nrow(X)))
for(j in 1:nrow(X)){
diff[j,]=apply((t(X)-X[j,])^2,2,sum)  
}

#rbf
kernel=function(alpha,Ls){
return(alpha*exp(-Ls*sqrt(diff)))  
}

mu=function(sitas){
return((X%*%sitas))  
}

ite=1000
ave=apply(X,2,mean)
a=1
L=0.5
x=c(solve(t(X)%*%kernel(a,L)%*%X)%*%t(X)%*%kernel(a,L)%*%Y)
#x=rep(1,3)
eta=0.1
ep=0.1

#ガンマ分布の正則化項
#aについて
scale_a=100
shape_a=1

#Lについて
scale_L=5
shape_L=4

for(l in 1:ite){
pred=mu(x)
K=kernel(a,L)
invK=solve(K+diag(ep,nrow(K)))
dmu=c(Y-pred)%*%invK%*%X  
trKa=sum(diag(invK%*%kernel(1,L)))
trKL=sum(diag(-invK%*%(diff*kernel(a,L))))
invKy=invK%*%(Y-pred)
da=as.numeric(-trKa/2+c(invKy)%*%kernel(1,L)%*%c(invKy)/2)
dL=as.numeric(-trKL/2+c(invKy)%*%(diff*kernel(a,L))%*%c(invKy)/2)

#正則化
da=da+((shape_a-1)/a-1/scale_a)
dL=dL+((shape_L-1)/L-1/scale_L)

x=c(x+eta*dmu)
a=a+eta*da
L=L+eta*dL
loglik=as.numeric(c(Y-pred)%*%invK%*%c(Y-pred))
print(loglik)
}



#予測値の乱数
pred_kernel=function(xx){
return(as.numeric(a*exp(0)-c(a*exp(-L*sqrt(apply((t(X)-xx)^2,2,sum))))%*%solve(kernel(a,L))%*%c(a*exp(-L*sqrt(apply((t(X)-xx)^2,2,sum))))))  
}

pred_mu=function(xx){
return(sum(xx*x)+c(a*exp(-L*sqrt(apply((t(X)-xx)^2,2,sum))))%*%solve(kernel(a,L))%*%(Y-mu(x)))}  

library(mvtnorm)

#学習データの確認
mu_vec=c()
kstar=c()
for(j in 1:nrow(X)){
vec=X[j,]  
mus=c(mu(x),pred_mu(vec))
mu_vec=c(mu_vec,pred_mu(vec))
kernels=cbind(kernel(a,L),c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum)))))
kernels=rbind(kernels,c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum))),a*exp(0)))
kstar=c(kstar,pred_kernel(vec))
}
train_mu_vec=mu_vec
train_k_star=kstar

#テストデータでの確認
testY=test_dat$Ozone
mu_vec=c()
kstar=c()
rvar=c()
for(j in 1:nrow(test_dat)){
vec=c(unlist(test_dat[j,2:4]))
mus=c(mu(x),pred_mu(vec))
mu_vec=c(mu_vec,pred_mu(vec))
kernels=cbind(kernel(a,L),c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum)))))
kernels=rbind(kernels,c(a*exp(-L*sqrt(apply((t(X)-vec)^2,2,sum))),a*exp(0)))
kstar=c(kstar,pred_kernel(vec))
rv=rmvnorm(1, mean = mus, sigma = kernels)
rvar=c(rvar,rv[length(rv)])
}


#p.109 t分布過程回帰

library(mvtnorm)
data("airquality")
data=(airquality)
data=na.omit(data)
test_dat=data[101:111,]
data=data[-c(101:111),]
Y=as.numeric(data$Ozone)
X=data[,-c(1,5,6)]
X=matrix((as.numeric(unlist(X))),nrow=nrow(X),ncol=ncol(X))
N=length(Y)

diff=array(0,dim=c(nrow(X),nrow(X)))
for(j in 1:nrow(X)){
diff[j,]=apply((t(X)-X[j,])^2,2,sum)  
}

#rbf
kernel=function(alpha,Ls){
return(alpha*exp(-Ls*diff))  
}

mu=function(sitas){
return((X%*%sitas))  
}

ite=2*10^4
v=1
a=1
L=1
x=rep(1,3)
eta=10^(-6)
ep=0

for(l in 1:ite){
pred=mu(x)
K=kernel(a,L)
invK=solve(K+diag(ep,nrow(K)))
d=as.numeric(c(Y-pred)%*%invK%*%c(Y-pred))
dmu=((v+N)/(1+d/v))*c(Y-pred)%*%invK%*%X
trKa=sum(diag(invK%*%kernel(1,L)))
trKL=sum(diag(-invK%*%(diff*kernel(a,L))))
invKy=invK%*%(Y-pred)
da=as.numeric(-trKa/2+(v+N)/(2*(1+d/v))*c(invKy)%*%kernel(1,L)%*%c(invKy))
dL=as.numeric(-trKL/2-(v+N)/(2*(1+d/v))*c(invKy)%*%(diff*kernel(a,L))%*%c(invKy))
dv=-N/(2*v)+digamma((v+N)/2)/2-digamma(v/2)/2-log(1+d/v)/2+(d/(1+d/v))*(v+N)/(2*v^2)
x_pre=x;a_pre=a
x=c(x+eta*dmu)
a=a+eta*da
v=v+eta*dv
L=L+eta*dL

loglik=-N*log(pi*v)/2-log(det(K))/2+log(gamma((v+N)/2))-log(gamma(v/2))-(v+N)*log(1+d/v)
print(loglik)
}


#予測値の乱数
pred_kernel=function(xx){
return(as.numeric(a*exp(0)-c(a*exp(-L*apply((t(X)-xx)^2,2,sum)))%*%solve(kernel(a,L))%*%c(a*exp(-L*apply((t(X)-xx)^2,2,sum)))))  
}

pred_mu=function(xx){
return(sum(xx*x)+c(a*exp(-L*apply((t(X)-xx)^2,2,sum)))%*%solve(kernel(a,L))%*%(Y-mu(x)))}  

library(mvnfast)
library(LaplacesDemon)

#学習データの確認
mu_vec=c()
kstar=c()
for(j in 1:nrow(X)){
vec=X[j,]  
mus=c(mu(x),pred_mu(vec))
mu_vec=c(mu_vec,pred_mu(vec))
kernels=cbind(kernel(a,L),c(a*exp(-L*apply((t(X)-vec)^2,2,sum))))
kernels=rbind(kernels,c(a*exp(-L*apply((t(X)-vec)^2,2,sum)),a*exp(0)))
kstar=c(kstar,pred_kernel(vec))
}
train_mu_vec=mu_vec
train_k_star=kstar

#テストデータでの確認
testY=test_dat$Ozone
mu_vec=c()
kstar=c()
rvar=c()
for(j in 1:nrow(test_dat)){
vec=c(unlist(test_dat[j,2:4]))
mus=c(mu(x),pred_mu(vec))
mu_vec=c(mu_vec,pred_mu(vec))
kernels=cbind(kernel(a,L),c(a*exp(-L*apply((t(X)-vec)^2,2,sum))))
kernels=rbind(kernels,c(a*exp(-L*apply((t(X)-vec)^2,2,sum)),a*exp(0)))
kstar=c(kstar,pred_kernel(vec))
rv=mvnfast::rmvt(1, mu = mus, sigma = kernels, df = v)
rvar=c(rvar,rv[length(rv)])
}

1
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
1
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?