[r-cran-mcmcpack] 15/90: Imported Upstream version 0.7-3
Andreas Tille
tille at debian.org
Fri Dec 16 09:07:34 UTC 2016
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-mcmcpack.
commit 352ebd380af6a3c3c109e73e08f3ed7f1647ff94
Author: Andreas Tille <tille at debian.org>
Date: Fri Dec 16 08:07:06 2016 +0100
Imported Upstream version 0.7-3
---
DESCRIPTION | 6 +--
HISTORY | 17 +++++++++
R/MCMCmetrop1R.R | 104 +++++++++++++++++++++++++++++++--------------------
R/MCMCpoisson.R | 2 +-
R/MCMCregress.R | 3 +-
R/automate.R | 8 +++-
R/hidden.R | 1 -
R/tomog.R | 29 ++++++++------
man/MCMCmetrop1R.Rd | 48 +++++++++++++++++-------
src/MCMCdynamicEI.cc | 2 +-
src/MCMChierEI.cc | 7 +++-
src/MCMCirtKdRob.cc | 4 +-
src/MCMCmnlslice.cc | 2 +-
13 files changed, 153 insertions(+), 80 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index 7c7f3c7..ea98c73 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: MCMCpack
-Version: 0.7-2
-Date: 2006-5-29
+Version: 0.7-3
+Date: 2006-9-15
Title: Markov chain Monte Carlo (MCMC) Package
Author: Andrew D. Martin <admartin at wustl.edu>, and
Kevin M. Quinn <kevin_quinn at harvard.edu>
@@ -17,4 +17,4 @@ Description: This package contains functions to perform Bayesian
sampling algorithm, and tools for visualization.
License: GPL version 2
URL: http://mcmcpack.wustl.edu
-Packaged: Mon May 29 13:11:44 2006; adm
+Packaged: Fri Sep 15 15:17:51 2006; adm
diff --git a/HISTORY b/HISTORY
index 437b51c..f4ef523 100644
--- a/HISTORY
+++ b/HISTORY
@@ -2,6 +2,23 @@
// Changes and Bug Fixes
//
+0.7-2 to 0.7-3
+ * following posting by Radford Neal at: http://www.stat.columbia.edu/~cook/movabletype/archives/2006/03/adaptive_metrop.html
+ switched a < to a <= in the shrinkage procedures used in the various slice
+ sampling implementations.
+ * modified tomogplot() and dtomogplot() to handle situations in which
+ r1[i] == 0. [thanks to David Hugh-Jones for making this suggestion].
+ * allowed for more user control of the initial call to optim in
+ MCMCmetrop1R [thanks to Luca La Rocca for this suggestion].
+ * allowed users to pass the variance-covariance matrix of the Gaussian
+ proposal directly without resorting to a call to optim
+ [thanks to Luca La Rocca for this suggestion].
+ * fixed minor bug in developer mode in automate.R [thanks to Ben Goodrich].
+ * fixed a minor bug in the developer mode documentation template.
+ * fixed minor bug in nonconst call in MCMCregress.
+
+
+
0.7-1 to 0.7-2
* added procrustes() to NAMESPACE so that it can be seen (function and
documentation already there).
diff --git a/R/MCMCmetrop1R.R b/R/MCMCmetrop1R.R
index 11d1dda..267f10b 100644
--- a/R/MCMCmetrop1R.R
+++ b/R/MCMCmetrop1R.R
@@ -1,4 +1,4 @@
-## samples from a user-written posterior code in R using a
+## samples from a user-written posterior coded in R using a
## random walk Metropolis algorithm
##
## KQ 6/24/2004
@@ -8,12 +8,19 @@
## changed the method used to pass additional arguments to the user-defined
## function KQ 8/15/2005
##
+## changed to allow more user control of optim KQ 6/18/2006
+##
"MCMCmetrop1R" <- function(fun, theta.init,
burnin=500, mcmc=20000, thin=1,
tune=1, verbose=0, seed=NA, logfun=TRUE,
- force.samp=FALSE, optim.trace=0, optim.REPORT=10,
- optim.maxit=500, ...){
+ force.samp=FALSE, V=NULL,
+ optim.method="BFGS",
+ optim.lower= -Inf,
+ optim.upper= Inf,
+ optim.control=list(fnscale=-1, trace=0,
+ REPORT=10, maxit=500),
+ ...){
## error checking here
check.offset(list(...))
@@ -46,52 +53,67 @@
stop("Respecifiy and call MCMCmetrop1R() again. \n",
call.=FALSE)
}
-
- ## find approx mode and Hessian using optim()
- opt.out <- optim(theta.init, maxfun,
- control=list(fnscale=-1, trace=optim.trace,
- REPORT=optim.REPORT, maxit=optim.maxit),
- method="BFGS", hessian=TRUE, ...)
- if(opt.out$convergence!=0){
- warning("Mode and Hessian were not found with call to optim().\nSampling proceeded anyway. \n")
- }
-
- CC <- NULL
- try(CC <- chol(-1*opt.out$hessian), silent=TRUE)
- hess.new <- opt.out$hessian
- hess.flag <- 0
- if (force.samp==TRUE){
- if (max(diag(optim.out$hessian)==0)){
- for (i in 1:nrow(hess.new)){
- if (hess.new[i,i] == 0){
- hess.new[i,i] <- -1e-6
+ if (is.null(V)){
+ ## find approx mode and Hessian using optim()
+ opt.out <- optim(theta.init, maxfun,
+ control=optim.control,
+ lower=optim.lower, upper=optim.upper,
+ method=optim.method, hessian=TRUE, ...)
+ if(opt.out$convergence!=0){
+ warning("Mode and Hessian were not found with call to optim().\nSampling proceeded anyway. \n")
+ }
+
+
+ CC <- NULL
+ try(CC <- chol(-1*opt.out$hessian), silent=TRUE)
+ hess.new <- opt.out$hessian
+ hess.flag <- 0
+ if (force.samp==TRUE){
+ if (max(diag(optim.out$hessian)==0)){
+ for (i in 1:nrow(hess.new)){
+ if (hess.new[i,i] == 0){
+ hess.new[i,i] <- -1e-6
+ }
}
}
+ while (is.null(CC)){
+ hess.flag <- 1
+ hess.new <- hess.new - diag(diag(0.01 * abs(opt.out$hessian)))
+ try(CC <- chol(-1*hess.new), silent=TRUE)
+ }
+ }
+ else{
+ if (is.null(CC)){
+ hess.flag <- 2
+ }
+ }
+ if (hess.flag==1){
+ warning("Hessian from call to optim() not negative definite.\nSampling proceeded after enforcing negative definiteness. \n")
}
- while (is.null(CC)){
- hess.flag <- 1
- hess.new <- hess.new - diag(diag(0.01 * abs(opt.out$hessian)))
- try(CC <- chol(-1*hess.new), silent=TRUE)
+ if (hess.flag==2){
+ cat("Hessian from call to optim() not negative definite.\n")
+ cat("Sampling (as specified) cannot proceed.\n")
+ stop("Check data and fun() and call MCMCmetrop1R() again. \n",
+ call.=FALSE)
}
+
+ V <- tune %*% solve(-1*hess.new) %*% tune
}
- else{
+ else{ ## V is non NULL
+ if (nrow(V) != ncol(V) || nrow(V) != length(theta.init)){
+ cat("V not of appropriate dimension.\n")
+ stop("Check V and theta.init and call MCMCmetrop1R() again. \n",
+ call.=FALSE)
+ }
+ CC <- NULL
+ try(CC <- chol(V), silent=TRUE)
if (is.null(CC)){
- hess.flag <- 2
+ cat("V not positive definite.\n")
+ stop("Check V and call MCMCmetrop1R() again. \n",
+ call.=FALSE)
}
}
- if (hess.flag==1){
- warning("Hessian from call to optim() not negative definite.\nSampling proceeded after enforcing negative definiteness. \n")
- }
- if (hess.flag==2){
- cat("Hessian from call to optim() not negative definite.\n")
- cat("Sampling (as specified) cannot proceed.\n")
- stop("Check data and fun() and call MCMCmetrop1R() again. \n",
- call.=FALSE)
- }
-
- propvar <- tune %*% solve(-1*hess.new) %*% tune
-
## Call the C++ function to do the MCMC sampling
sample <- .Call("MCMCmetrop1R_cc", userfun, as.double(theta.init),
@@ -102,7 +124,7 @@
seedarray=as.integer(seed.array),
lecuyerstream=as.integer(lecuyer.stream),
as.logical(logfun),
- as.matrix(propvar),
+ as.matrix(V),
PACKAGE="MCMCpack")
## turn sample into an mcmc object
diff --git a/R/MCMCpoisson.R b/R/MCMCpoisson.R
index 16253e7..d7885c0 100644
--- a/R/MCMCpoisson.R
+++ b/R/MCMCpoisson.R
@@ -94,7 +94,7 @@
b0=b0, B0=B0, V=V)
## put together matrix and build MCMC object to return
- output <- form.mcmc.object(posterior, names=xnames,
+ output <- form.mcmc.object(sample, names=xnames,
title="MCMCpoisson Posterior Sample",
y=Y, call=cl, logmarglike=logmarglike)
return(output)
diff --git a/R/MCMCregress.R b/R/MCMCregress.R
index c5a084a..511d435 100644
--- a/R/MCMCregress.R
+++ b/R/MCMCregress.R
@@ -60,7 +60,8 @@
## call C++ code to draw sample
auto.Scythe.call(output.object="posterior", cc.fun.name="MCMCregress",
- sample=sample, Y=Y, X=X, burnin=as.integer(burnin),
+ sample.nonconst=sample, Y=Y, X=X,
+ burnin=as.integer(burnin),
mcmc=as.integer(mcmc), thin=as.integer(thin),
lecuyer=as.integer(lecuyer),
seedarray=as.integer(seed.array),
diff --git a/R/automate.R b/R/automate.R
index 633953f..0d9ae9a 100644
--- a/R/automate.R
+++ b/R/automate.R
@@ -21,6 +21,7 @@
##
## R.file: the file used to store the clean R template
## (string, output to the screen if "")
+## this is just the function call indented nicely
##
## ...: list of objects passed to C++
## NOTE: this will only take integers (which have
@@ -100,7 +101,9 @@
## write out a template R help file
callfun <- strsplit(toString(sys.call(which=-1)),",")[[1]][1]
if (help.file){
- prompt(callfun,file=paste(callfun, ".template.Rd", sep=""))
+ prompt.call <- paste("prompt(", callfun, ", file =\"",
+ paste(callfun, ".template.Rd\"", sep=""), ")")
+ eval(parse(text=prompt.call))
}
##
@@ -188,11 +191,12 @@
c.names[[k]], "data);\n", sep="")
together.call <- paste(together.call, scythe, sep="")
- }
+
holder.row <- paste("const int *", c.names[[k]], "row,", sep="")
holder.col <- paste("const int *", c.names[[k]], "col,", sep="")
c.middle <- c(c.middle, holder.data, holder.row, holder.col)
}
+ }
# clean up and print C++ function call
c.middle <- paste(c.middle, sep=" ", collapse=" ")
diff --git a/R/hidden.R b/R/hidden.R
index b7602f5..7543a70 100644
--- a/R/hidden.R
+++ b/R/hidden.R
@@ -557,7 +557,6 @@
posterior.object$samplerow,
posterior.object$samplecol,
byrow=TRUE)
-
output <- mcmc(data=holder, start=(posterior.object$burnin+1),
end=(posterior.object$burnin+posterior.object$mcmc),
diff --git a/R/tomog.R b/R/tomog.R
index 7a65166..cb4a7d0 100644
--- a/R/tomog.R
+++ b/R/tomog.R
@@ -1,9 +1,11 @@
########## Tomography Plots for Ecological Inference ##########
-# produces tomography plots (see King, 1997, A Solution to the
-# Ecological Inference Problem, Princeton University Press)
-#
-# KQ 11/9/2002
+## produces tomography plots (see King, 1997, A Solution to the
+## Ecological Inference Problem, Princeton University Press)
+##
+## KQ 11/9/2002
+##
+## Modification added suggested by David Hugh-Jones 6/10/2006
"tomogplot" <-
function(r0, r1, c0, c1,
@@ -29,7 +31,9 @@
rect(0, 0, 1, 1, col=bgcol, lty=0)
for (year in 1:N) {
- abline(intercept[year], slope[year])
+ if (is.finite(intercept[year]) & is.finite(slope[year]))
+ abline(intercept[year], slope[year])
+ else abline(v=c0[year]/(c0[year]+c1[year]))
}
rect(-0.05, -0.05, 1.05, 0, col="white", lty=0)
@@ -41,11 +45,12 @@
return(0)
}
-# produces temporally organized tomography plots
-# (see King, 1997, A Solution to the Ecological Inference
-# Problem, Princeton University Press)
-#
-# KQ 11/9/2002
+## produces temporally organized tomography plots
+## (see King, 1997, A Solution to the Ecological Inference
+## Problem, Princeton University Press)
+##
+## KQ 11/9/2002
+## Modification added suggested by David Hugh-Jones 6/10/2006
"dtomogplot" <-
function(r0, r1, c0, c1, time.vec=NA, delay=0,
@@ -102,7 +107,9 @@
while ( (time.next - time.last) < delay ){
time.next <- proc.time()[3]
}
- abline(intercept[year], slope[year], col=col.vec[year])
+ if (is.finite(intercept[year]) & is.finite(slope[year]))
+ abline(intercept[year], slope[year], col=col.vec[year])
+ else abline(v=c0[year]/(c0[year]+c1[year]), col=col.vec[year])
}
rect(-0.05, -0.05, 1.05, 0, col="white", lty=0)
diff --git a/man/MCMCmetrop1R.Rd b/man/MCMCmetrop1R.Rd
index 6e88a8c..b7f0d99 100644
--- a/man/MCMCmetrop1R.Rd
+++ b/man/MCMCmetrop1R.Rd
@@ -8,8 +8,10 @@
\usage{
MCMCmetrop1R(fun, theta.init, burnin = 500, mcmc = 20000, thin = 1,
tune = 1, verbose = 0, seed=NA, logfun = TRUE,
- force.samp=FALSE, optim.trace = 0, optim.REPORT = 10,
- optim.maxit = 500, ...)
+ force.samp = FALSE, V = NULL, optim.method = "BFGS",
+ optim.lower = -Inf, optim.upper = Inf,
+ optim.control = list(fnscale = -1, trace = 0, REPORT = 10,
+ maxit=500), ...)
}
\arguments{
\item{fun}{The unnormalized (log)density of the distribution from
@@ -74,15 +76,31 @@ MCMCmetrop1R(fun, theta.init, burnin = 500, mcmc = 20000, thin = 1,
density. Thus, \code{force.samp} should only be set equal to
\code{TRUE} with great caution.}
}
-
- \item{optim.trace}{The value of the \code{trace} parameter sent to
- \code{optim} during an initial maximization of \code{fun}. }
- \item{optim.REPORT}{The value of the \code{REPORT} parameter sent to
- \code{optim} during an initial maximization of \code{fun}.}
+ \item{V}{The variance-covariance matrix for the Gaussian proposal
+ distribution. Must be a square matrix or \code{NULL}. If a square
+ matrix, \code{V} must have dimension equal to the length of
+ \code{theta.init}. If \code{NULL}, \code{V} is calculated from
+ \code{tune} and an initial call to \code{optim}. See the Details
+ section below for more information. Unless the log-posterior is
+ expensive to compute it will typically be best to use the default
+ \code{V = NULL}.}
+
+ \item{optim.method}{The value of the \code{method} parameter sent to
+ \code{optim} during an initial maximization of \code{fun}. See
+ \code{optim} for more details.}
+
+ \item{optim.lower}{The value of the \code{lower} parameter sent to
+ \code{optim} during an initial maximization of \code{fun}. See
+ \code{optim} for more details.}
- \item{optim.maxit}{The value of the \code{maxit} parameter sent to
- \code{optim} during an initial maximization of \code{fun}.}
+ \item{optim.upper}{The value of the \code{upper} parameter sent to
+ \code{optim} during an initial maximization of \code{fun}. See
+ \code{optim} for more details.}
+
+ \item{optim.control}{The value of the \code{control} parameter sent to
+ \code{optim} during an initial maximization of \code{fun}. See
+ \code{optim} for more details.}
\item{\dots}{Additional arguments.}
}
@@ -93,15 +111,17 @@ distribution. See Gelman et al. (2003) and Robert & Casella (2004) for
details of the random walk Metropolis algorithm.
The proposal distribution is centered at the current value of
-\eqn{\theta}{theta} and has variance-covariance \eqn{V = T
+\eqn{\theta}{theta} and has variance-covariance \eqn{V}{V}. If
+\eqn{V}{V} is specified by the user to be \code{NULL} then \eqn{V}{V} is
+calculated as: \eqn{V = T
(-1\cdot H)^{-1} T }{V = T (-1*H)^{-1} T}, where \eqn{T}{T} is a the
diagonal positive definite matrix formed from the \code{tune} and
-\eqn{H}{H} is the approximate Hessian of \code{fun} evaluated at it's
+\eqn{H}{H} is the approximate Hessian of \code{fun} evaluated at its
mode. This last calculation is done via an initial call to
\code{optim}.
}
\value{
- An mcmc object that contains the posterior density sample. This
+ An mcmc object that contains the posterior sample. This
object can be summarized by functions provided by the coda package.
}
\examples{
@@ -126,7 +146,7 @@ mode. This last calculation is done via an initial call to
X=Xdata, y=yvector,
thin=1, mcmc=40000, burnin=500,
tune=c(1.5, 1.5, 1.5),
- verbose=500, logfun=TRUE, optim.maxit=100)
+ verbose=500, logfun=TRUE)
raftery.diag(post.samp)
plot(post.samp)
@@ -158,7 +178,7 @@ mode. This last calculation is done via an initial call to
post.samp <- MCMCmetrop1R(negbinfun, theta.init=c(0,0,0,0), y=yy, X=XX,
thin=1, mcmc=35000, burnin=1000,
tune=1.5, verbose=500, logfun=TRUE,
- optim.maxit=500, seed=list(NA,1))
+ seed=list(NA,1))
raftery.diag(post.samp)
plot(post.samp)
summary(post.samp)
diff --git a/src/MCMCdynamicEI.cc b/src/MCMCdynamicEI.cc
index c85fac5..adb4bc7 100644
--- a/src/MCMCdynamicEI.cc
+++ b/src/MCMCdynamicEI.cc
@@ -161,7 +161,7 @@ static double shrinkage(double (*logfun)(double[], const double&,
const double U = stream->runif();
const double x1 = Lbar + U*(Rbar - Lbar);
theta_x1[index] = x1;
- if (z < logfun(theta_x1, r0, r1, c0, mu0, mu1, sigma0, sigma1) &&
+ if (z <= logfun(theta_x1, r0, r1, c0, mu0, mu1, sigma0, sigma1) &&
Accept(logfun, theta_x1, index, x0, z, w,
r0, r1, c0, mu0, mu1,
sigma0, sigma1, L, R)){
diff --git a/src/MCMChierEI.cc b/src/MCMChierEI.cc
index 305ecd5..16b5963 100644
--- a/src/MCMChierEI.cc
+++ b/src/MCMChierEI.cc
@@ -8,6 +8,8 @@
// ADM 7/24/2004 [updated to new Scythe version]
// KQ 8/10/2004 bug fix and major overhaul of sampling scheme
+#include <iostream>
+
#include "matrix.h"
#include "distributions.h"
#include "stat.h"
@@ -159,7 +161,7 @@ static double shrinkage(double (*logfun)(double[], const double&,
const double U = stream->runif();
const double x1 = Lbar + U*(Rbar - Lbar);
theta_x1[index] = x1;
- if (z < logfun(theta_x1, r0, r1, c0, mu0, mu1, sigma0, sigma1) &&
+ if (z <= logfun(theta_x1, r0, r1, c0, mu0, mu1, sigma0, sigma1) &&
Accept(logfun, theta_x1, index, x0, z, w,
r0, r1, c0, mu0, mu1,
sigma0, sigma1, L, R)){
@@ -331,12 +333,13 @@ extern "C"{
// sampling constants
- const double w = mean(widthmat);
+ double w = mean(widthmat);
int p_temp = 2;
while ((w * pow(2.0, p_temp) ) < max(widthmat)){
++p_temp;
}
const int p = p_temp + 1;
+
// @@@@@@@@@@@@@@ The real sampling @@@@@@@@@@@@@@@
for (int iter=0; iter<tot_iter; ++iter){
diff --git a/src/MCMCirtKdRob.cc b/src/MCMCirtKdRob.cc
index 946bf09..05eb0ef 100644
--- a/src/MCMCirtKdRob.cc
+++ b/src/MCMCirtKdRob.cc
@@ -616,7 +616,7 @@ static double shrinkageDoubling(double (*logfun)(const double&,
for (;;){
const double U = stream->runif();
const double x1 = Lbar + U*(Rbar - Lbar);
- if (z < logfun(x1, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
+ if (z <= logfun(x1, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
Lambda_prior_prec, Lambda_ineq, theta_ineq,
k0, k1, c0, d0, c1, d1, rowindex, colindex) &&
Accept(logfun, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
@@ -700,7 +700,7 @@ static double shrinkageStep(double (*logfun)(const double&,
for (;;){
const double U = stream->runif();
const double x1 = Lbar + U*(Rbar - Lbar);
- if (z < logfun(x1, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
+ if (z <= logfun(x1, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
Lambda_prior_prec, Lambda_ineq, theta_ineq,
k0, k1, c0, d0, c1, d1, rowindex, colindex) ){
return(x1);
diff --git a/src/MCMCmnlslice.cc b/src/MCMCmnlslice.cc
index 4f1fe5e..9b5f70e 100644
--- a/src/MCMCmnlslice.cc
+++ b/src/MCMCmnlslice.cc
@@ -185,7 +185,7 @@ static double shrinkage(double (*logfun)(const Matrix<double>&,
const double U = stream->runif();
const double x1 = Lbar + U*(Rbar - Lbar);
beta_x1[index] = x1;
- if (z < logfun(Y, X, beta_x1, beta_prior_mean, beta_prior_prec) &&
+ if (z <= logfun(Y, X, beta_x1, beta_prior_mean, beta_prior_prec) &&
Accept(logfun, beta_x1, index, x0, z, w,
Y, X, beta_prior_mean, beta_prior_prec, L, R)){
return(x1);
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-mcmcpack.git
More information about the debian-science-commits
mailing list