[r-cran-mnp] 04/51: Import Upstream version 2.1-2

Andreas Tille tille at debian.org
Fri Sep 8 14:14:44 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-mnp.

commit 3bf98dc20f4d00b0e2009fe7dbcbb9bf1f3dfe66
Author: Andreas Tille <tille at debian.org>
Date:   Fri Sep 8 15:54:41 2017 +0200

    Import Upstream version 2.1-2
---
 DESCRIPTION           |  30 ++++++++------
 NAMESPACE             |   1 +
 R/mnp.R               |  16 ++++----
 R/onLoad.R            |   2 +-
 R/predict.mnp.R       |  96 +++++++++++++++++++++++++++++++++++++++++++++
 R/print.summary.mnp.R |   3 +-
 R/summary.mnp.R       |   2 +-
 data/detergent.txt    |   2 +-
 man/detergent.Rd      |  14 +++----
 man/japan.Rd          |  15 +++----
 man/mnp.Rd            |  50 ++++++++++++++----------
 man/predict.mnp.Rd    | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++
 man/summary.mnp.Rd    |   1 +
 src/MNP.c             |  36 ++++++++---------
 14 files changed, 297 insertions(+), 77 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 8d65dc7..ca79af7 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,19 +1,25 @@
 Package: MNP
-Version: 1.4-1
-Date: 2004-12-16
-Title: MNP: R Package for Fitting Multinomial Probit Models
+Version: 2.1-2
+Date: 2005-03-22
+Title: R Package for the Fitting the Multinomial Probit Model
 Author: Kosuke Imai <kimai at princeton.edu>, 
-        Jordan Vance<jvance at princeton.edu>, 
         David A. van Dyk <dvd at uci.edu>. 
 Maintainer: Kosuke Imai <kimai at princeton.edu>
-Depends: R (>= 2.0.0)
-Description: MNP is a publicly available R package that fits 
-  Bayesian Multinomial Probit models via Markov chain Monte Carlo. Along
-  with the standard Multinomial Probit model, it can also fit models
-  with different choice sets for each observation and complete or
-  partial ordering of all the available alternatives. The estimation
+Depends: R (>= 2.0.0), MASS
+Description: MNP is a publicly available R package that fits the Bayesian
+  multinomial probit model via Markov chain Monte Carlo. The
+  multinomial probit model is often used to analyze the discrete
+  choices made by individuals recorded in survey data. Examples where
+  the multinomial probit model may be useful include the analysis of
+  product choice by consumers in market research and the analysis of
+  candidate or party choice by voters in electoral studies.  The MNP
+  software can also fit the model with different choice sets for each
+  individual, and complete or partial individual choice orderings of
+  the available alternatives from the choice set. The estimation
   is based on the efficient marginal data augmentation algorithm that
-  is developed by Imai and van Dyk (2005).
+  is developed by Imai and van Dyk (2005). ``A Bayesian Analysis of
+  the Multinomial Probit Model Using the Data Augmentation,'' Journal
+  of Econometrics, Vol. 124, No. 2 (February), pp. 311-334.
 License: GPL (version 2 or later)
 URL: http://www.princeton.edu/~kimai/research/MNP.html
-Packaged: Thu Dec 16 15:16:36 2004; kimai
+Packaged: Tue Mar 22 19:38:10 2005; kimai
diff --git a/NAMESPACE b/NAMESPACE
index d8df404..d0a80a1 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -3,5 +3,6 @@ useDynLib(MNP)
 export(mnp, summary.mnp, print.summary.mnp)
 
 S3method(summary, mnp)
+S3method(predict, mnp)
 S3method(print, mnp)
 S3method(print, summary.mnp)
diff --git a/R/mnp.R b/R/mnp.R
index b0defd3..fb9da2b 100644
--- a/R/mnp.R
+++ b/R/mnp.R
@@ -122,7 +122,8 @@ mnp <- function(formula, data = parent.frame(), choiceX = NULL,
   if(verbose)
     cat("Starting Gibbs sampler...\n")
   # recoding NA into -1
-  Y[is.na(Y)] <- -1 
+  Y[is.na(Y)] <- -1
+
   param <- .C("cMNPgibbs", as.integer(n.dim),
               as.integer(n.cov), as.integer(n.obs), as.integer(n.draws),
               as.double(p.mean), as.double(p.prec), as.integer(p.df),
@@ -139,18 +140,19 @@ mnp <- function(formula, data = parent.frame(), choiceX = NULL,
                dim = c(n.dim, n.obs, floor((n.draws-burnin)/keep)),
                dimnames = list(lev[-1], rownames(Y), NULL))
     param <- param[,1:(n.par-n.dim*n.obs)]
-  }
+    }
   else
     W <- NULL
   colnames(param) <- c(coefnames, Signames)
-
+    
   ##recoding -1 back into NA
   Y[Y==-1] <- NA
+
   ## returning the object
-  res <- list(param = param, x = X, y = Y, W = W, call = call, n.alt = p,
-              p.mean = if(p.imp) NULL else p.mean, p.var = p.var,
-              p.df = p.df, p.scale = p.scale, burnin = burnin, thin =
-              thin) 
+  res <- list(param = param, x = X, y = Y, w = W, call = call, alt = lev,
+              n.alt = p, base = base, p.mean = if(p.imp) NULL else p.mean,
+              p.var = p.var,
+              p.df = p.df, p.scale = p.scale, burnin = burnin, thin = thin) 
   class(res) <- "mnp"
   return(res)
 }
diff --git a/R/onLoad.R b/R/onLoad.R
index d4123a0..39784cf 100644
--- a/R/onLoad.R
+++ b/R/onLoad.R
@@ -1,6 +1,6 @@
 ".onLoad" <- function(lib, pkg) {
   mylib <- dirname(system.file(package = "MNP"))
-  title <- packageDescription("MNP", lib = mylib)$Title
+  title <- paste("MNP:", packageDescription("MNP", lib = mylib)$Title)
   ver <- packageDescription("MNP", lib = mylib)$Version
   url <- packageDescription("MNP", lib = mylib)$URL
   cat(title, "\nVersion:", ver, "\nURL:", url, "\n")
diff --git a/R/predict.mnp.R b/R/predict.mnp.R
new file mode 100644
index 0000000..958b4a8
--- /dev/null
+++ b/R/predict.mnp.R
@@ -0,0 +1,96 @@
+predict.mnp <- function(object, newdata = NULL, newdraw = NULL,
+                        type = c("prob", "choice", "order", "latent"),
+                        verbose = FALSE, ...){
+
+  if (NA %in% match(type, c("prob", "choice", "order", "latent")))
+    stop("Invalid input for `type'.")
+      
+  p <- object$n.alt
+  if (is.null(newdraw))
+    param <- object$param
+  else
+    param <- newdraw
+  n.cov <- ncol(param) - p*(p-1)/2
+  n.draws <- nrow(param)
+  coef <- param[,1:n.cov]
+  cov <- param[,(n.cov+1):ncol(param)]
+  
+  ## get X matrix ready
+  if (is.null(newdata)) 
+    x <- object$x
+  else {
+    call <- object$call
+    x <- xmatrix.mnp(as.formula(call$formula), data = newdata,
+                     choiceX = call$choiceX,
+                     cXnames = eval(call$cXnames),
+                     base = object$base, n.dim = p-1,
+                     lev = object$alt, MoP = is.matrix(object$y),
+                     verbose = FALSE, extra = FALSE)    
+  }
+
+  n.obs <- nrow(x)
+  if (verbose)
+    cat("There are", n.obs, "observations to predict. Please wait...\n")
+
+  alt <- object$alt
+  if (object$base != alt[1]) 
+    alt <- c(object$base, alt[1:(length(alt)-1)])
+  
+  ## computing W
+  W <- array(NA, dim=c(p-1, n.obs, n.draws), dimnames=c(alt[2:p],
+                                               NULL, 1:n.draws))
+  tmp <- floor(n.draws/10)
+  inc <- 1
+  for (i in 1:n.draws) {
+    Sigma <- matrix(0, p-1, p-1)
+    count <- 1
+    for (j in 1:(p-1)) {
+      Sigma[j,j:(p-1)] <- cov[i,count:(count+p-j-1)]
+      count <- count + p - j
+    }
+    diag(Sigma) <- diag(Sigma)/2
+    Sigma <- Sigma + t(Sigma)
+    for (j in 1:n.obs) 
+      W[,j,i] <- matrix(x[j,], ncol=n.cov) %*% matrix(coef[i,]) +
+        mvrnorm(1, mu = rep(0, p-1), Sigma = Sigma)
+    if (i == inc*tmp & verbose) {
+      cat("", inc*10, "percent done.\n")
+      inc <- inc + 1
+    }
+  }
+  ans <- list()
+  if ("latent" %in% type)
+    ans$w <- W
+  else
+    ans$w <- NULL
+
+  ## computing Y
+  Y <- matrix(NA, nrow = n.obs, ncol = n.draws, dimnames=list(NULL, 1:n.draws))
+  O <- array(NA, dim=c(p, n.obs, n.draws), dimnames=list(alt, NULL, 1:n.draws))
+  for (i in 1:n.obs) 
+    for (j in 1:n.draws) {
+      Y[i,j] <- alt[match(max(c(0, W[,i,j])), c(0,W[,i,j]))]
+      O[,i,j] <- rank(c(0, -W[,i,j]))
+    }
+  if ("choice" %in% type)
+    ans$y <- Y
+  else
+    ans$y <- NULL
+  if ("order" %in% type)
+    ans$o <- O
+  else
+    ans$o <- NULL
+
+  ## computing P
+  if ("prob" %in% type) {
+    P <- matrix(NA, nrow = n.obs, ncol = p)
+    colnames(P) <- alt
+    for (i in 1:p)
+      P[,i] <- apply(Y==alt[i], 1, mean) 
+    ans$p <- P
+  }
+  else
+    ans$p <- NULL
+
+  return(ans)
+}
diff --git a/R/print.summary.mnp.R b/R/print.summary.mnp.R
index 086e07b..1c7cafc 100644
--- a/R/print.summary.mnp.R
+++ b/R/print.summary.mnp.R
@@ -10,9 +10,10 @@ print.summary.mnp <- function(x, digits = max(3, getOption("digits") - 3), ...)
   cat("\nCovariances:\n")
   printCoefmat(x$cov.table, digits = digits, na.print = "NA", ...)
   
+  cat("\nBase category:", x$base)  
   cat("\nNumber of alternatives:", x$n.alt)
   cat("\nNumber of observations:", x$n.obs)
-  cat("\nNumber of Gibbs draws:", x$n.draws)
+  cat("\nNumber of stored MCMC draws:", x$n.draws)
   cat("\n\n")
   invisible(x)
 }
diff --git a/R/summary.mnp.R b/R/summary.mnp.R
index abf3a74..4a8d5d0 100644
--- a/R/summary.mnp.R
+++ b/R/summary.mnp.R
@@ -10,7 +10,7 @@ summary.mnp <- function(object, CI=c(2.5, 97.5),...){
   colnames(param.table) <- c("mean", "std.dev.", paste(min(CI), "%", sep=""),
                              paste(max(CI), "%", sep=""))
   
-  ans <- list(call=object$call, n.alt=p, n.obs=if(is.matrix(object$y))
+  ans <- list(call=object$call, base = object$base, n.alt=p, n.obs=if(is.matrix(object$y))
               nrow(object$y) else length(object$y), n.draws = n.draws,
               coef.table=param.table[1:n.cov,],
               cov.table=param.table[(n.cov+1):ncol(param),])  
diff --git a/data/detergent.txt b/data/detergent.txt
index 5852645..71f77f0 100644
--- a/data/detergent.txt
+++ b/data/detergent.txt
@@ -1,4 +1,4 @@
-"choice" "Tide" "Wisk" "EraPlus" "Surf" "Solo" "All"
+"choice" "TidePrice" "WiskPrice" "EraPlusPrice" "SurfPrice" "SoloPrice" "AllPrice"
 Wisk 0.060625000 0.0548625200 0.05867188 0.038906250 0.05562500 0.03890625
 All 0.058408200 0.0450000000 0.06453125 0.063016830 0.06453125 0.03890625
 Wisk 0.058671880 0.0467187500 0.06453125 0.064531250 0.05867188 0.03890625
diff --git a/man/detergent.Rd b/man/detergent.Rd
index 849ffd9..7c260a9 100644
--- a/man/detergent.Rd
+++ b/man/detergent.Rd
@@ -8,16 +8,16 @@
 }
 \usage{data(detergent)}
 
-\format{A matrix containing the following 7 variables and 2657 observations.
+\format{A data frame containing the following 7 variables and 2657 observations.
 
   \tabular{lll}{
     choice \tab factor \tab a brand chosen by each household\cr
-    Tide \tab numeric \tab log price of Tide\cr
-    Wisk \tab numeric \tab log price of Wisk\cr
-    EraPlus \tab numeric \tab log price of EraPlus\cr
-    Surf \tab numeric \tab log price of Surf\cr
-    Solo \tab numeric \tab log price of Solo\cr
-    All \tab numeric \tab log price of All\cr
+    TidePrice \tab numeric \tab log price of Tide\cr
+    WiskPrice \tab numeric \tab log price of Wisk\cr
+    EraPlusPrice \tab numeric \tab log price of EraPlus\cr
+    SurfPrice \tab numeric \tab log price of Surf\cr
+    SoloPrice \tab numeric \tab log price of Solo\cr
+    AllPrice \tab numeric \tab log price of All\cr
   }
 
 }
diff --git a/man/japan.Rd b/man/japan.Rd
index 31c0ad5..fbd1194 100644
--- a/man/japan.Rd
+++ b/man/japan.Rd
@@ -3,17 +3,18 @@
 \alias{japan}
 \title{Voters' Preferences of Political Parties in Japan (1995)}
 \description{
-  This dataset gives voters' preferences of political parties (Liberal
-  Democratic Party (LDP), New Frontier Party (NFP), Sakigake (SKG), and
-  Japanese Communist Party (JCP)) in Japan on the 0 (least preferred) - 100
-  (most preferred) scale. It is based on the 1995 survey data of 418
-  individual voters. The data also include the sex, education level, and age of
-  the voters. 
+  This dataset gives voters' preferences of political parties
+  in Japan on the 0 (least preferred) - 100 (most preferred) scale.
+  It is based on the 1995 survey data of 418 individual voters.
+  The data also include the sex, education level, and age of
+  the voters. The survey allowed voters to chose among four parties:
+  Liberal Democratic Party (LDP), New Frontier Party (NFP), Sakigake
+  (SKG), and Japanese Communist Party (JCP).
 }
 
 \usage{data(japan)}
 
-\format{A matrix containing the following 7 variables for 418 observations.
+\format{A data frame containing the following 7 variables for 418 observations.
   
   \tabular{lll}{
     LDP \tab preference for Liberal Democratic Party \tab 0 - 100 \cr
diff --git a/man/mnp.Rd b/man/mnp.Rd
index 9270ab1..4f6de23 100644
--- a/man/mnp.Rd
+++ b/man/mnp.Rd
@@ -3,16 +3,15 @@
 \alias{mnp}
 \alias{MNP}
 
-\title{Fitting Multinomial Probit Models via Markov chain Monte Carlo} 
+\title{Fitting the Multinomial Probit Model via Markov chain Monte Carlo} 
 
 \description{
   \code{mnp} is used to fit (Bayesian) multinomial probit
-  models via Markov chain Monte Carlo. Along with the standard
-  multinomial probit model, \code{mnp} can also fit models with
-  different choice sets for each observation, and complete or partial
-  ordering of all the available alternatives. The computation uses the
-  efficient marginal data augmentation algorithm that is developed by
-  Imai and van Dyk (2005).  }
+  model via Markov chain Monte Carlo.  \code{mnp} can also fit the model
+  with different choice sets for each observation, and complete or
+  partial ordering of all the available alternatives. The computation
+  uses the efficient marginal data augmentation algorithm that is
+  developed by Imai and van Dyk (2005).  }
 
 \usage{
 mnp(formula, data = parent.frame(), choiceX = NULL, cXnames = NULL,
@@ -41,9 +40,9 @@ mnp(formula, data = parent.frame(), choiceX = NULL, cXnames = NULL,
   }
   \item{base}{The name of the base category. For the standard
     multinomial probit model, the default is the lowest level of the
-    response variable. For the multinomial ordered probit model, the
-    default base category is the last column in the matrix of
-    response variables.
+    response variable. For the multinomial probit model with ordered
+    preferences, the default base category is the last column in the
+    matrix of response variables.
   }
   \item{latent}{logical. If \code{TRUE}, then the latent variable W will
     be returned. See Imai and van Dyk (2005) for the notation. The
@@ -97,14 +96,14 @@ mnp(formula, data = parent.frame(), choiceX = NULL, cXnames = NULL,
 }
 
 \details{
-  For \strong{the standard multinomial probit model} where only the most
+  To fit the multinomial probit model when only the most
   preferred choice is observed, use the syntax for the formula, \code{y
   ~ x1 + x2}, where \code{y} is a factor variable indicating the most
   preferred choice and \code{x1} and \code{x2} are individual-specific
   covariates. The interactions of individual-specific variables with each
   of the choice indicator variables will be fit.
   
-  To specify \strong{choice-specific covariates}, use the syntax,
+  To specify choice-specific covariates, use the syntax,
   \code{choiceX=list(A=cbind(z1, z2), B=cbind(z3, z4), C=cbind(z5,
   z6))}, where \code{A}, \code{B}, and \code{C} represent the choice
   names of the response variable, and \code{z1} and \code{z2} are each
@@ -116,7 +115,7 @@ mnp(formula, data = parent.frame(), choiceX = NULL, cXnames = NULL,
   name for \code{z1}, \code{z3}, and \code{z5}, and \code{quantity}
   refers to that for \code{z2}, \code{z4}, and \code{z6}.
 
-  If \strong{the choice set} varies from one observation to another, use the
+  If the choice set varies from one observation to another, use the
   syntax, \code{cbind(y1, y2, y3) ~ x1 + x2}, in the case of a
   three choice problem, and indicate unavailable alternatives by
   \code{NA}. If only the most preferred choice is observed, \code{y1},
@@ -126,9 +125,9 @@ mnp(formula, data = parent.frame(), choiceX = NULL, cXnames = NULL,
    response matrix, \code{y3} in this particular example syntax, is
    used as the base category.
   
-  For \strong{the multinomial ordered probit model} where the complete
-  or partial ordering of the available alternatives is recorded, use the
-  same syntax as when the choice set varies (i.e., \code{cbind(y1, y2,
+  To fit the multinomial probit model when the complete
+  or partial ordering of the available alternatives is recorded, use
+  the same syntax as when the choice set varies (i.e., \code{cbind(y1, y2,
   y3, y4) ~ x1 + x2}). For each observation, all the available
   alternatives in the response variables should be numerically ordered
   in terms of preferences such as \code{1 2 2 3}. Ties are allowed. The
@@ -144,18 +143,25 @@ mnp(formula, data = parent.frame(), choiceX = NULL, cXnames = NULL,
 ## load the detergent data
 data(detergent)
 ## run the standard multinomial probit model with intercepts and the price
-res1 <- mnp(choice ~ 1, choiceX = list(Surf=Surf, Tide=Tide, Wisk=Wisk,
-                                       EraPlus=EraPlus, Solo=Solo, All=All),
+res1 <- mnp(choice ~ 1, choiceX = list(Surf=SurfPrice, Tide=TidePrice,
+                                       Wisk=WiskPrice, EraPlus=EraPlusPrice,
+                                       Solo=SoloPrice, All=AllPrice),
             cXnames = "price", data = detergent, n.draws = 500, burnin = 100,
-            thin = 3, verbose = TRUE) 
+            thin = 3, verbose = TRUE)
+## summarize the results
 summary(res1)
+## calculate the predicted probabilities for the first 5 observations
+predict(res1, newdata = detergent[1:3,], type="prob", verbose = TRUE)
 
 ## load the Japanese election data
 data(japan)
-## run the multinomial ordered probit model
+## run the multinomial probit model with ordered preferences
 res2 <- mnp(cbind(LDP, NFP, SKG, JCP) ~ sex + education + age, data = japan,
             verbose = TRUE)
+## summarize the results
 summary(res2)
+## calculate the predicted probabilities for the 10th observation
+predict(res2, newdata = japan[10,], type = "prob")
 }
 
 \value{
@@ -171,8 +177,10 @@ summary(res2)
     first dimension represents the alternatives, and the second
     dimension indexes the observations. The third dimension represents
     the Gibbs draws. Note that the latent variable for the base category
-    is set to 0, and therefore omitted from the output.} 
+    is set to 0, and therefore omitted from the output.}
+  \item{alt}{The names of alternatives.}
   \item{n.alt}{The total number of alternatives.}
+  \item{base}{The base category used for fitting.}
   \item{p.var}{The prior variance for the coefficients.}
   \item{p.df}{The prior degrees of freedom parameter for the covariance matrix.}
   \item{p.scale}{The prior scale matrix for the covariance matrix.}
diff --git a/man/predict.mnp.Rd b/man/predict.mnp.Rd
new file mode 100644
index 0000000..96e020b
--- /dev/null
+++ b/man/predict.mnp.Rd
@@ -0,0 +1,106 @@
+\name{predict.mnp}
+
+\alias{predict.mnp}
+
+\title{Posterior Prediction under the Bayesian Multinomial Probit Models}
+
+\description{
+  Obtains posterior predictions under a fitted (Bayesian) multinomial
+  probit model. \code{predict} method for class \code{mnp}.
+}
+
+\usage{
+  \method{predict}{mnp}(object, newdata = NULL, newdraw = NULL,
+          type = c("prob", "choice", "order", "latent"), verbose = FALSE, ...)
+}
+
+\arguments{
+  \item{object}{An output object from \code{mnp}.}
+  \item{newdata}{An optional data frame containing the values of the
+    predictor variables. Predictions for multiple values of the
+    predictor variables can be made simultaneously if \code{newdata} has
+    multiple rows. The default is the original data frame used
+    for fitting the model.
+  }
+  \item{newdraw}{An optional matrix of MCMC draws to be used for
+    posterior predictions. The default is the original MCMC draws stored
+    in \code{object}.
+  }
+  \item{type}{The type of posterior predictions required. There are
+    four options:
+    \code{type = "prob"} returns the predictive probabilities of being
+    the most preferred choice among the choice set.
+    \code{type = "choice"} returns the Monte Carlo sample of the 
+    most preferred choice,
+    \code{type = "order"} returns the Monte Carlo sample of the
+    ordered preferences,
+    and \code{type = "latent"} returns the Monte Carlo sample of the
+    predictive values of the latent variable. The default is to return
+    all four types of posterior predictions.
+  }
+  \item{verbose}{logical. If \code{TRUE}, helpful messages along with a
+    progress report on the Monte Carlo sampling from the posterior 
+    predictive distributions are printed on the screen. The default is
+    \code{FALSE}.
+  }
+  \item{...}{further arguments passed to or from other methods.}
+}
+
+\details{The posterior predictive values are computed using the
+  Monte Carlo sample stored in the \code{mnp} output (or other sample if
+  \code{newdraw} is specified). Given each Monte Carlo sample of the
+  parameters and each vector of predictor variables, we sample the
+  vector-valued latent variable from the appropriate multivariate Normal
+  distribution. Then, using the sampled predictive values of the latent
+  variable, we construct the most preferred choice as well as the
+  ordered preferences. Averaging over the Monte Carlo sample
+  of the preferred choice, we obtain the predictive probabilities of
+  each choice being most preferred given the values of the predictor 
+  variables.  Since the predictive values are computed via Monte Carlo
+  simulations, each run may produce somewhat different values. The
+  computation may be slow if predictions with many values of the
+  predictor variables are required and/or if a large Monte Carlo
+  sample of the model parameters is used. In either case, setting
+  \code{verbose = TRUE} may be helpful in monitoring the progress of the
+  code.
+}
+
+\value{
+  \code{predict.mnp} yields a list containing at least one of the
+  following elements:
+  \item{o}{A three dimensional array of the Monte Carlo sample from the
+    posterior predictive distribution of the ordered preferences. The
+    first dimension corresponds to the alternatives in the choice set,
+    the second dimension corresponds to the rows of \code{newdata} (or
+    the original data set if \code{newdata} is left unspecified), and
+    the third dimension indexes the Monte Carlo sample.}
+  \item{p}{A matrix of the posterior predictive probabilities for each
+  alternative in the choice set being most preferred. The
+  rows correspond to the rows of \code{newdata} (or the original data
+  set if \code{newdata} is left unspecified) and the columns correspond
+  to the alternatives in the choice set.
+  }
+  \item{y}{A matrix of the Monte Carlo sample from the posterior predictive
+   distribution of the most preferred choice. The rows
+   correspond to the rows of \code{newdata} (or the original data set if
+   \code{newdata} is left unspecified) and the columns index the Monte
+   Carlo sample.
+  }
+  \item{w}{A three dimensional array of the Monte Carlo sample from the 
+    posterior predictive distribution of the latent
+    variable. The first dimension corresponds to the alternatives in the
+     choice set, the second dimension corresponds to the rows of
+     \code{newdata} (or the original data set if \code{newdata} is left
+     unspecified), and the third dimension indexes the Monte Carlo sample.
+  }
+}
+
+\seealso{\code{mnp}; MNP home page at
+  \url{http://www.princeton.edu/~kimai/research/MNP.html}}
+
+\author{
+  Kosuke Imai, Department of Politics, Princeton University
+  \email{kimai at Princeton.Edu}
+}
+
+\keyword{methods}
diff --git a/man/summary.mnp.Rd b/man/summary.mnp.Rd
index 2445151..1f31b16 100644
--- a/man/summary.mnp.Rd
+++ b/man/summary.mnp.Rd
@@ -31,6 +31,7 @@
   containing the following elements:
   \item{call}{The call from \code{mnp}.}
   \item{n.alt}{The total number of alternatives.}
+  \item{base}{The base category used for fitting.}
   \item{n.obs}{The number of observations.}
   \item{n.draws}{The number of Gibbs draws used for the summary.}
   \item{coef.table}{The summary of the posterior distribution of the
diff --git a/src/MNP.c b/src/MNP.c
index 47de465..8951b1b 100644
--- a/src/MNP.c
+++ b/src/MNP.c
@@ -1,7 +1,6 @@
 /******************************************************************
   This file is a part of MNP: R Package for Estimating the 
-  Multinomial Probit Models by Kosuke Imai, Jordan R. Vance, and 
-  David A. van Dyk.
+  Multinomial Probit Models by Kosuke Imai and David A. van Dyk.
   Copyright: GPL version 2 or later.
 *******************************************************************/
 
@@ -148,17 +147,15 @@ void cMNPgibbs(int *piNDim, int *piNCov, int *piNSamp, int *piNGen,
     }
   }
 
-  /** initialize W **/
+  /** starting values for W **/
   for(i=0;i<n_samp;i++) {
-    for(j=0;j<n_dim;j++) {
-      if(*piMoP) 
-	W[i][j]=(double)(Y[i][j]-Y[i][n_dim])/10;
-      else {
-	if(y[i]==(j+1))
-	  W[i][j]=0.1;
-	else
-	  W[i][j]=-0.1;
-      }
+    for(j=0;j<n_dim;j++){
+      Xbeta[i][j]=0;
+      for (k=0;k<n_cov;k++) Xbeta[i][j] += X[i*n_dim+j][k]*beta[k];
+      if(*piMoP) /* you DO need to worry about ordering for this */
+	W[i][j] = (double)(Y[i][j]-Y[i][n_dim])/(double) n_dim;
+      else /* you DON'T need to worry about ordering for this */
+	W[i][j] = Xbeta[i][j] + norm_rand();
     }
     W[i][n_dim]=0.0;
   }
@@ -203,7 +200,7 @@ void cMNPgibbs(int *piNDim, int *piNCov, int *piNSamp, int *piNGen,
     }
 
     /** Truncated Multinomial Normal Sampling for W **/
-    if(*piMoP) { /* Multinomial ordered Probit */
+    if(*piMoP) { /* Multinomial Probit with Ordered Preferences */
       for(i=0;i<n_samp;i++){
 	for(j=0;j<n_dim;j++){
 	  Xbeta[i][j]=0;
@@ -248,8 +245,8 @@ void cMNPgibbs(int *piNDim, int *piNCov, int *piNSamp, int *piNGen,
 	  if(Y[i][j]==-1) 
 	    W[i][j] = cmean + norm_rand()*sqrt(cvar);
 	  else {
-	    if(Y[i][j]==0) minw = cmean - 100*sqrt(cvar);
-	    if(Y[i][j]==maxy[i]) maxw = cmean + 100*sqrt(cvar);
+	    if(Y[i][j]==0) minw = cmean - 1000*sqrt(cvar);
+	    if(Y[i][j]==maxy[i]) maxw = cmean + 1000*sqrt(cvar);
 	    W[i][j]=TruncNorm(minw,maxw,cmean,cvar); 
 	  }
 	  /* printf("%14g\n", W[i][j]); */
@@ -258,7 +255,7 @@ void cMNPgibbs(int *piNDim, int *piNCov, int *piNSamp, int *piNGen,
 	}
       }
     }
-    else { /* standard MNP */
+    else { /* standard multinomial probit */
       for(i=0;i<n_samp;i++){
 	for(j=0;j<n_dim;j++) {
 	  Xbeta[i][j]=0;
@@ -268,7 +265,8 @@ void cMNPgibbs(int *piNDim, int *piNCov, int *piNSamp, int *piNGen,
 	  maxw=0.0; itemp=0; 
 	  for(k=0;k<n_dim;k++) {	  		  		  
 	    if(j!=k) {
-	      if(maxw<W[i][k]) maxw=W[i][k];
+	      maxw = fmax2(maxw, W[i][k]);
+	      /* if(maxw<W[i][k]) maxw=W[i][k]; */
 	      vtemp[itemp++]=W[i][k]-Xbeta[i][k];
 	    }
 	  }
@@ -281,9 +279,9 @@ void cMNPgibbs(int *piNDim, int *piNCov, int *piNSamp, int *piNGen,
 	  if(y[i]==-1)
 	    W[i][j]=cmean+norm_rand()*sqrt(cvar);
 	  else if(y[i]==(j+1)) 
-	    W[i][j]=TruncNorm(maxw,cmean+100*sqrt(cvar),cmean,cvar); 
+	    W[i][j]=TruncNorm(maxw,cmean+1000*sqrt(cvar),cmean,cvar); 
 	  else
-	    W[i][j]=TruncNorm(cmean-100*sqrt(cvar),maxw,cmean,cvar);
+	    W[i][j]=TruncNorm(cmean-1000*sqrt(cvar),maxw,cmean,cvar);
 	  X[i*n_dim+j][n_cov]=W[i][j];
 	  X[i*n_dim+j][n_cov]*=sqrt(alpha2);
 	}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-mnp.git



More information about the debian-science-commits mailing list