R : Copyright 2004, The R Foundation for Statistical Computing Version 1.9.1 (2004-06-21), ISBN 3-900051-00-3 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > # ---------------------------------------------------------------------- > # > # PURPOSE: Compute the relative efficiency for overdispersed count data > # > # AUTHOR: P. Heagerty/D. Gillen > # > # DATE: 27Jan2002 > # 02Feb2007 > # > # ---------------------------------------------------------------------- > # > ##### Function to sample data > # > getData <- function( beta ){ + nobs <- 200 + # + ##### a linear predictor + # + x1 <- (c(1:nobs)-nobs/2)/(nobs/2) + # + ##### a binary predictor + # + x2 <- rep( c(0,1), c(nobs,nobs)/2 ) + # + X <- cbind( 1, x1, x2 ) + mu <- exp( X %*% beta ) + data <- list( X, mu ) + names( data ) <- c( "X", "mu" ) + data + } > # > ##### > ##### Part (a): ARE when true model is N-B > ##### > # > ##### Function to calculate ARE when true model is N-B > # > areNB <- function( beta, alpha ){ + data <- getData( beta ) + X <- as.matrix( data$X ) + mu <- as.vector( data$mu ) + A.poisson <- t(X)%*%diag(mu)%*%X + B.true <- t(X)%*%diag( mu*(1+alpha*mu) )%*%X + A.negbin <- t(X)%*%diag( mu/(1+alpha*mu) )%*%X + # + se.poisson <- sqrt( diag( solve(A.poisson)%*%B.true%*%solve(A.poisson) ) ) + se.optimal <- sqrt( diag( solve(A.negbin) ) ) + # + cbind( se.poisson, se.optimal, (se.optimal)^2 / (se.poisson)^2 ) + } > # > areNB( beta=c( 1, 1, .5 ), alpha=.25 ) se.poisson se.optimal 0.1224201 0.1209440 0.9760290 x1 0.1892193 0.1800327 0.9052574 x2 0.2122920 0.2070426 0.9511566 > areNB( beta=c( 1, 1, .5 ), alpha=.50 ) se.poisson se.optimal 0.1480222 0.1447846 0.9567334 x1 0.2414323 0.2216376 0.8427451 x2 0.2640731 0.2527474 0.9160622 > areNB( beta=c( 1, 1, .5 ), alpha=1.0 ) se.poisson se.optimal 0.1890987 0.1829376 0.9358984 x1 0.3213422 0.2841420 0.7818714 x2 0.3450624 0.3237958 0.8805365 > # > ##### > ##### Part (b): Effect of changing beta > ##### > # > areNB( beta=c( 1, 0.0, .5 ), alpha=1.0 ) se.poisson se.optimal 0.1826141 0.1807405 0.9795854 x1 0.2833264 0.2783800 0.9653881 x2 0.3258588 0.3215673 0.9738338 > areNB( beta=c( 1, 0.5, .5 ), alpha=1.0 ) se.poisson se.optimal 0.1856275 0.1815567 0.9566209 x1 0.2987967 0.2803417 0.8802866 x2 0.3338151 0.3224053 0.9328081 > areNB( beta=c( 1, 1.0, .5 ), alpha=1.0 ) se.poisson se.optimal 0.1890987 0.1829376 0.9358984 x1 0.3213422 0.2841420 0.7818714 x2 0.3450624 0.3237958 0.8805365 > # > ##### > ##### Part (c): What if really scale and we fit negbin? > ##### > # > ##### Function to calculate ARE when true model is scale, but N-B is fit > # > areScale <- function( beta, scale ){ + data <- getData( beta ) + X <- as.matrix( data$X ) + mu <- as.vector( data$mu ) + A.poisson <- t(X)%*%diag(mu)%*%X + # + alpha <- mean( (scale-1)/mu ) + alpha <- (scale-1)/mean( mu ) + A.negbin <- t(X)%*%diag( mu/(1+alpha*mu) )%*%X + B.true <- t(X)%*%diag( scale*mu/(1+alpha*mu)^2 )%*%X + se.negbin <- sqrt( diag( solve(A.negbin)%*%B.true%*%solve(A.negbin) ) ) + se.optimal <- sqrt( diag( scale* solve(A.poisson) ) ) + # + cbind( se.negbin, se.optimal, (se.optimal)^2 / (se.negbin)^2 ) + } > # > areScale( beta=c( 1, 1, .5 ), scale=2.0 ) se.negbin se.optimal 0.1280890 0.1269867 0.9828624 x1 0.1717002 0.1632073 0.9035200 x2 0.2063138 0.2019959 0.9585809 > areScale( beta=c( 1, 1, .5 ), scale=3.0 ) se.negbin se.optimal 0.1581635 0.1555263 0.9669295 x1 0.2204717 0.1998874 0.8219871 x2 0.2577708 0.2473935 0.9211052 > areScale( beta=c( 1, 1, .5 ), scale=4.0 ) se.negbin se.optimal 0.1837370 0.1795862 0.9553294 x1 0.2630849 0.2308100 0.7696932 x2 0.3019721 0.2856654 0.8949144 > # > ##### > ##### Part (d): Verification via simulation > ##### > # > ####### (1) alpha = 1.0 > # > nobs <- 200 > # > ##### a linear predictor > # > x1 <- (c(1:nobs)-nobs/2)/(nobs/2) > # > ##### a binary predictor > # > x2 <- rep( c(0,1), c(nobs,nobs)/2 ) > # > mu <- exp( 1.0 + 1.0*x1 + 0.5*x2 ) > alpha <- 1.0 > # > Nsim <- 1000 > beta0 <- matrix( NA, Nsim, 3 ) > beta1 <- matrix( NA, Nsim, 3 ) > beta2 <- matrix( NA, Nsim, 3 ) > # > library(MASS) > for( j in 1:Nsim ){ + # + ##### generate data + # + bbb <- rgamma( rep(1,nobs), shape=1/alpha ) * alpha + y <- rpois( rep(1,nobs), lambda=mu*bbb ) + # + fit0 <- glm( y ~ x1 + x2, family=poisson ) + beta0[j,] <- fit0$coef + # + #### + #### Using GLM and weights + #### + # + maxits <- 10 + converge <- F + fit <- fit0 + count <- 0 + while( !converge & count # > s0 <- sqrt( diag( var( beta0 ) ) ) > s1 <- sqrt( diag( var( beta1 ) ) ) > s2 <- sqrt( diag( var( beta2 ) ) ) > print( cbind( s0, s1, s2, (s1/s0)^2, (s2/s0)^2 ) ) s0 s1 s2 [1,] 0.2044217 0.1977598 0.1978523 0.9358840 0.9367593 [2,] 0.3343063 0.2909473 0.2911912 0.7574249 0.7586952 [3,] 0.3745006 0.3493469 0.3495659 0.8701796 0.8712710 > # > ####### (2) scale = 4.0 > # > scale <- 4.0 > # > Nsim <- 1000 > beta0 <- matrix( NA, Nsim, 3 ) > beta1 <- matrix( NA, Nsim, 3 ) > beta2 <- matrix( NA, Nsim, 3 ) > # > for( j in 1:Nsim ){ + # + ##### generate data + # + bbb <- rgamma( rep(1,nobs), shape=mu/(scale-1) ) * ( (scale-1)/mu ) + y <- rpois( rep(1,nobs), lambda=mu*bbb ) + # + fit0 <- glm( y ~ x1 + x2, family=poisson ) + mu0 <- fitted(fit0) + beta0[j,] <- fit0$coef + # + #### + #### Using GLM and weights + #### + # + maxits <- 10 + converge <- F + fit <- fit0 + count <- 0 + while( !converge & count # > s0 <- sqrt( diag( var( beta0 ) ) ) > s1 <- sqrt( diag( var( beta1 ) ) ) > s2 <- sqrt( diag( var( beta2 ) ) ) > print( cbind( s0, s1, s2, (s0/s1)^2, (s0/s2)^2 ) ) s0 s1 s2 [1,] 0.1776494 0.1854413 0.1835163 0.9177292 0.9370836 [2,] 0.2360241 0.2840994 0.2707311 0.6901954 0.7600402 [3,] 0.2832774 0.3108544 0.3035469 0.8304432 0.8709080 > # > ##### > ##### Part (e): Effect of estimation of alpha > ##### > # > alpha <- 1.0 > # > Nsim <- 1000 > beta0 <- matrix( NA, Nsim, 3 ) > scale0 <- rep( NA, Nsim ) > beta1 <- matrix( NA, Nsim, 3 ) > alpha1 <- rep( NA, Nsim ) > beta2 <- matrix( NA, Nsim, 3 ) > alpha2 <- rep( NA, Nsim ) > # > for( j in 1:Nsim ){ + # + ##### generate data + # + bbb <- rgamma( rep(1,nobs), shape=1/alpha ) * alpha + y <- rpois( rep(1,nobs), lambda=mu*bbb ) + # + fit0 <- glm( y ~ x1 + x2, family=poisson ) + mu0 <- fitted(fit0) + beta0[j,] <- fit0$coef + r2 <- (y-mu0)^2 / mu0 + scale0[j] <- mean(r2) + # + #### + #### Using GLM and weights + #### + # + maxits <- 10 + converge <- F + aaa <- mean( (r2 - 1)/mu0 ) + fit <- fit0 + count <- 0 + while( !converge & count # > s0 <- sqrt( diag( var( beta0 ) ) ) > s1 <- sqrt( diag( var( beta1 ) ) ) > s2 <- sqrt( diag( var( beta2 ) ) ) > print( cbind( s0, s1, s2, (s1/s0)^2, (s2/s0)^2 ) ) s0 s1 s2 [1,] 0.1863679 0.1797364 0.1797289 0.9301003 0.9300222 [2,] 0.3278402 0.2910926 0.2912822 0.7883839 0.7894115 [3,] 0.3454551 0.3260808 0.3261542 0.8909786 0.8913799 > # > # > # end-of-file... >