extract_mcmc(object)
Output from tmbstan::tmbstan()
run on the tmb_obj
element of an sdmTMB()
model. E.g., tmbstan(your_model$tmb_obj)
.
Returns a matrix of parameter samples. Rows correspond to the order
of your_model$tmb_obj$env$last.par.best
. Columns correspond to
posterior samples. Is used internally by predict.sdmTMB()
to make
fully Bayesian predictions. See the tmbstan_model
argument
in predict.sdmTMB()
.
# \donttest{
mesh <- make_mesh(pcod_2011, c("X", "Y"), cutoff = 35) # quite coarse
# Fit with marginal maximum likelihood first:
fit <- sdmTMB(
density ~ 0 + as.factor(year),
data = pcod_2011, mesh = mesh,
family = tweedie(link = "log"),
priors = sdmTMBpriors(
matern_s = pc_matern(range_gt = 10, sigma_lt = 5),
matern_st = pc_matern(range_gt = 10, sigma_lt = 5),
b = normal(rep(0, 4), scale = rep(10, 4)) # 4 main effects
),
time = "year"
)
fit
#> Spatiotemporal model fit by ML ['sdmTMB']
#> Formula: density ~ 0 + as.factor(year)
#> Mesh: mesh
#> Time column: year
#> Data: pcod_2011
#> Family: tweedie(link = 'log')
#>
#> coef.est coef.se
#> as.factor(year)2011 3.17 0.38
#> as.factor(year)2013 3.20 0.40
#> as.factor(year)2015 3.23 0.40
#> as.factor(year)2017 2.52 0.41
#>
#> Dispersion parameter: 15.89
#> Tweedie p: 1.59
#> Matern range: 21.80
#> Spatial SD: 2.56
#> Spatiotemporal SD: 1.35
#> ML criterion at convergence: 3057.095
#>
#> See ?tidy.sdmTMB to extract these values as a data frame.
# Create a 'map' vector for TMB to 'fix' kappa at the MLE value
# to improve speed of convergence.
# Factor NA values cause TMB to fix or map the parameter
# at the starting value.
pars <- sdmTMB::get_pars(fit)
kappa_map <- factor(rep(NA, length(pars$ln_kappa)))
# Rebuild model updating some elements:
fit_mle <- update(
fit,
control = sdmTMBcontrol(
start = list(
ln_kappa = pars$ln_kappa
),
map = list(
ln_kappa = kappa_map
)
),
do_fit = FALSE # no need to actually fit
)
#> ℹ Initiating `ln_kappa` at specified starting value(s) of:
#> -2.042, -2.042
#> ℹ Fixing (mapping) `ln_kappa` at specified starting value(s) of:
#> -2.042, -2.042
# Will take a few minutes:
library(tmbstan)
m_stan <- tmbstan(fit_mle$tmb_obj, iter = 100, chains = 1)
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.005362 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 53.62 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: WARNING: There aren't enough warmup iterations to fit the
#> Chain 1: three stages of adaptation as currently configured.
#> Chain 1: Reducing each adaptation stage to 15%/75%/10% of
#> Chain 1: the given number of warmup iterations:
#> Chain 1: init_buffer = 7
#> Chain 1: adapt_window = 38
#> Chain 1: term_buffer = 5
#> Chain 1:
#> Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
#> Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
#> Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
#> Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
#> Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
#> Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
#> Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
#> Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
#> Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
#> Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
#> Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
#> Chain 1: Iteration: 100 / 100 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 25.0524 seconds (Warm-up)
#> Chain 1: 22.5095 seconds (Sampling)
#> Chain 1: 47.5619 seconds (Total)
#> Chain 1:
#> Warning: The largest R-hat is 1.26, indicating chains have not mixed.
#> Running the chains for more iterations may help. See
#> https://mc-stan.org/misc/warnings.html#r-hat
#> Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
#> Running the chains for more iterations may help. See
#> https://mc-stan.org/misc/warnings.html#bulk-ess
#> Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
#> Running the chains for more iterations may help. See
#> https://mc-stan.org/misc/warnings.html#tail-ess
print(
m_stan,
pars = c("b_j", "thetaf", "ln_phi", "omega_s[1]", "epsilon_st[1]")
)
#> Inference for Stan model: sdmTMB.
#> 1 chains, each with iter=100; warmup=50; thin=1;
#> post-warmup draws per chain=50, total post-warmup draws=50.
#>
#> mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
#> b_j[1] 3.19 0.04 0.32 2.64 3.02 3.25 3.42 3.86 51 1.00
#> b_j[2] 3.19 0.05 0.33 2.53 3.03 3.20 3.40 3.86 37 1.03
#> b_j[3] 3.26 0.04 0.32 2.66 3.04 3.28 3.50 3.65 65 1.00
#> b_j[4] 2.59 0.08 0.37 1.82 2.45 2.61 2.84 3.18 22 1.03
#> thetaf 0.37 0.01 0.07 0.26 0.32 0.37 0.42 0.49 44 1.00
#> ln_phi 2.76 0.01 0.04 2.69 2.74 2.77 2.79 2.85 37 0.98
#> omega_s[1] -0.53 0.07 0.60 -1.50 -0.96 -0.51 -0.12 0.64 85 0.98
#> epsilon_st[1] 0.36 0.07 0.61 -0.66 -0.10 0.47 0.79 1.43 85 0.98
#>
#> Samples were drawn using NUTS(diag_e) at Sat Jun 25 01:39:30 2022.
#> For each parameter, n_eff is a crude measure of effective sample size,
#> and Rhat is the potential scale reduction factor on split chains (at
#> convergence, Rhat=1).
post <- extract_mcmc(m_stan)
dim(post)
#> [1] 223 50
nd <- subset(qcs_grid, year >= 2011)
p <- predict(fit_mle, newdata = nd, tmbstan_model = m_stan)
p_last <- p[nd$year == max(nd$year), ] # just plot last year
pred <- qcs_grid[qcs_grid$year == max(qcs_grid$year), ]
pred$est <- apply(exp(p_last), 1, median)
pred$lwr <- apply(exp(p_last), 1, quantile, probs = 0.1)
pred$upr <- apply(exp(p_last), 1, quantile, probs = 0.9)
pred$cv <- apply(exp(p_last), 1, function(x) sd(x) / mean(x))
library(ggplot2)
ggplot(pred, aes(X, Y, fill = est)) + geom_raster() +
scale_fill_viridis_c(trans = "log")
ggplot(pred, aes(X, Y, fill = cv)) + geom_raster() +
scale_fill_viridis_c(trans = "log")
index_quantiles <- get_index_sims(p)
ggplot(index_quantiles, aes(year, est, ymin = lwr, ymax = upr)) +
geom_line() + geom_ribbon(alpha = 0.5)
index_samples <- get_index_sims(p, return_sims = TRUE)
ggplot(index_samples, aes(as.factor(year), .value)) +
geom_violin()
# }