CS计算机代考程序代写 # Rat tumors example from Section 5.3 of Gelman et al

# Rat tumors example from Section 5.3 of Gelman et al

# (Hierarchical model with Binomial likelihood)

rm(list=ls()); library(boot); set.seed(20210204);

# Read in data (you can download Rats.txt off Courseworks)

Data <- read.table("~/Bayes/Data/Rats.txt", header=T) y <- Data$y; n <- Data$n; rm(Data); Sim.size <- 1000; # Find range of values on which to numerically evaulate posterior mu <- mean(y/n); psi <- mu * (1-mu) / var(y/n) - 1; logit(mu); log(psi); rm(mu); rm(psi); # Take 5 orders of magnitude in each direction logit.mu <- seq(-12, 8, .20); length(logit.mu); log.psi <- seq(-10, 14, .20); length(log.psi); # Compute log.posterior for logit.mu in [-12,8], log.psi in [-10,14] # That should be plenty # log-posterior function log.p(alpha, beta | y) log.post.fun <- function(alpha, beta, y, n) { J <- length(y) lpost <- -2.5 * log(alpha + beta) lpost <- lpost - J * lbeta(alpha, beta) lpost <- lpost + sum( lbeta(alpha+y, beta+n-y) ) lpost } # Compute the log-posterior in a grid of values I <- length(logit.mu); J <- length(log.psi); log.post <- matrix(NA, I, J); for(i in 1:I) { for(j in 1:J) { mu <- inv.logit(logit.mu[i]); psi <- exp(log.psi[j]); alpha <- mu*psi; beta <- (1-mu)*psi; log.post[i,j] <- log(alpha) + log(beta) + log.post.fun(alpha,beta,y,n) }} maxie <- max(log.post); maxie; log.post <- log.post - maxie; rm(maxie); post <- exp(log.post); rm(log.post); # Contour plot contours <- c(.001, .01, seq(.05, .95, .10)) contour(logit.mu, log.psi, post, levels=contours, drawlabels=F, xlab="logit(mu)", ylab="log(psi)") # Clearly we are working in way too big a region. # So let's shrink it down # Lots of trial and error not shown here. # Ultimately get it to logit.mu in [-2.4,-1.2], log.psi in [1.0,5.0] logit.mu <- seq(-2.4, -1.2, .01); I <- length(logit.mu); I; log.psi <- seq(1, 5, .02); J <- length(log.psi); J; log.post <- matrix(NA, I, J) for(i in 1:I) { for(j in 1:J) { mu <- inv.logit(logit.mu[i]); psi <- exp(log.psi[j]); alpha <- mu*psi; beta <- (1-mu)*psi; log.post[i,j] <- log(alpha) + log(beta) + log.post.fun(alpha,beta,y,n) }} maxie <- max(log.post); maxie; log.post <- log.post - maxie; rm(maxie); post <- exp(log.post); rm(log.post); # Contour contour(logit.mu, log.psi, post, levels=contours, drawlabels=F, xlab="logit(mu)", ylab="log(psi)") # Sampling from the posterior delta <- (logit.mu[2] - logit.mu[1]) / 2 epsilon <- (log.psi[2] - log.psi[1]) / 2 post.lm <- apply(post, 1, sum) # marginal posterior of logit.mu S <- Sim.size; logit.mu.sim <- rep(NA, S); log.psi.sim <- rep(NA, S); for(s in 1:S) { i <- sample(I, 1, prob=post.lm) j <- sample(J, 1, prob=post[i,]) logit.mu.sim[s] <- logit.mu[i] + runif(1, -delta, delta) log.psi.sim[s] <- log.psi[j] + runif(1, -epsilon, epsilon) } plot(logit.mu.sim, log.psi.sim) # Put contours and scatterplot in same display contour(logit.mu, log.psi, post, levels=contours, drawlabels=F, xlab="log(alpha/beta)", ylab="log(alpha+beta)") points(logit.mu.sim, log.psi.sim, cex=.5, pch=19) # Posterior inference for tumor probabilities: the theta_j J <- length(y); J; mu.sim <- inv.logit(logit.mu.sim); psi.sim <- exp(log.psi.sim); alpha.sim <- mu.sim * psi.sim; beta.sim <- (1-mu.sim) * psi.sim; theta.sim <- rbeta(J*S, shape1=outer(y, alpha.sim, "+"), shape2=outer(n-y, beta.sim, "+")) theta.sim <- matrix(theta.sim, J, S) meds <- apply(theta.sim, 1, median) ints <- apply(theta.sim, 1, quantile, prob=c(.05, .95)) obs <- jitter(y/n, factor=50) plot(obs, meds, xlim=range(ints), ylim=range(ints), pch=19, cex=.50, xlab="Observed rate", ylab="90% posterior interval") abline(0, 1, lty=2); abline(h=sum(y)/sum(n), lty=2); for(j in 1:J){ lines(rep(obs[j],2), ints[,j]) } # Figure 5.4 on page 113 (but with 90% intervals)