# MCMC in two-component normal mixture, common variance # in this version the prior on the means is U(.|-5,5) # for both mu0 and mu1 but then conditions on mu0probx y_sort(rnorm(n,(1-zx)*mux0+zx*mux1,sigmax)) # Gibbs sampling for inference in above problem # conditional priors U(mu0|-5,mu1), U(mu1|mu0,5) on the means, # U(0,1) on pi and U(0,10) for sigma # MC sample size, burn-in and starting values nit_200 nmc_5000 mu0_-3; mu1_3; sigma_2; prob_.5 mu0.s_mu1.s_prob.s_sigma.s_NULL z_c(rep(0,n/2),rep(1,n/2)) t1_sum(z) t0_n-t1 # MCMC iterations for (i in 1:(nit+nmc)) { # m1_sum(z*y)/t1 m0_sum((1-z)*y)/t0 v1_sigma/sqrt(t1) v0_sigma/sqrt(t0) # sample mus subject to -5 < mu0 < mu1 < 5 # .. conditionally mu0|mu1 then mu1|mu0 a_pnorm((mu0-m1)/v1) b_pnorm(( 5-m1)/v1) mu1_m1+v1*qnorm(a+runif(1)*(b-a)) a_pnorm((-5-m0)/v0) b_pnorm(( mu1-m0)/v0) mu0_ m0+v0*qnorm(a+runif(1)*(b-a)) mu0.s_c(mu0.s,mu0) mu1.s_c(mu1.s,mu1) # sample sigma -- use inverse of truncated gamma cdf e_y-((1-z)*mu0+z*mu1); q_sum(e*e) a_pgamma((1/100)*(q/2),(n-1)/2) sigma_1/sqrt( qgamma(a+runif(1)*(1-a),(n-1)/2)/(q/2) ) sigma.s_c(sigma.s,sigma) # sample prob prob_rbeta(1,t0+1,t1+1) prob.s_c(prob.s,prob) # then sample new component indicators q_(y-mu0)^2-(y-mu1)^2 qz_(1-prob)/(1-prob+prob*exp(-q/(2*sigma^2))) z_runif(n)<=qz t1_sum(z); t0_n-t1 # check that each component has some data, or reassign if not ... while(t0==0 || t1==0) { z_runif(n)<=qz; t1_sum(z); t0_n-t1 } } # remove initial MCMC iterations mu0.s_mu0.s[-(1:nit)] mu1.s_mu1.s[-(1:nit)] prob.s_prob.s[-(1:nit)] sigma.s_sigma.s[-(1:nit)] # plot MCMC iterations at start to check convergence # postscript(file="mix2.ps",horizontal=T) postscript(file="a.ps",horizontal=T) a_range(c(mu0.s,mu1.s)) # histograms of posterior samples par(mfrow=c(2,2),bty='n') hist(y,nclass=20,probability=T,xlab='data y') x_seq(mux0-3*sigmax,mux1+3*sigmax,length=100) lines(x,probx*dnorm(x,mux0,sigmax)+ (1-probx)*dnorm(x,mux1,sigmax),lwd=2.5) hist(mu0.s,nclass=20,xlim=a,probability=T,xlab='mu0') hist(mu1.s,nclass=20,xlim=a,probability=T,xlab='mu1') hist(prob.s,nclass=20,probability=T,xlab='pi') par(mfrow=c(2,2),bty='n',pty='s',oma=c(0,0,0,0),mar=c(4,4,4,4)) persp(hist2d(mu0.s,mu1.s,nxbins=17,nybins=17), ylab='mu1',xlab='mu0',zlab='') contour(hist2d(mu0.s,mu1.s,nxbins=17,nybins=17), ylab='mu1',xlab='mu0',nlevels=10) plot(mu0.s,mu1.s,ylab='mu1',xlab='mu0') par(mfrow=c(1,1),bty='n',oma=c(4,4,4,4),pty='s') # plot all saved MCMC iterations to check convergence par(mfrow=c(3,1),bty='n',pty='r',oma=c(1,1,1,1)) tsplot(mu0.s,xlab="MCMC iteration",ylab="mu0") tsplot(mu1.s,xlab="MCMC iteration",ylab="mu1") tsplot(prob.s,xlab="MCMC iteration",ylab="pi") # now sample and plot the predictive distribution ... z_prob.s