[r-cran-bayesm] 01/44: Import Upstream version 0.0-2

Andreas Tille tille at debian.org
Thu Sep 7 11:16:19 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-bayesm.

commit 7c84b4fa93ed895fbe0680bb5f1afef66bdd3b82
Author: Andreas Tille <tille at debian.org>
Date:   Thu Sep 7 13:08:04 2017 +0200

    Import Upstream version 0.0-2
---
 DESCRIPTION                         |  28 +++
 NAMESPACE                           |  14 ++
 R/breg.R                            |  25 +++
 R/cgetC.R                           |  14 ++
 R/condMom.R                         |  27 +++
 R/createX.R                         |  72 ++++++
 R/eMixMargDen.R                     |  22 ++
 R/fsh.R                             |  13 ++
 R/ghkvec.R                          |  12 +
 R/init.rmultiregfp.R                |  28 +++
 R/llmnl.R                           |  25 +++
 R/llmnp.R                           |  70 ++++++
 R/llnhlogit.R                       |  39 ++++
 R/lndIChisq.R                       |  11 +
 R/lndIWishart.R                     |  31 +++
 R/lndMvn.R                          |  13 ++
 R/lndMvst.R                         |  12 +
 R/logMargDenNR.R                    |  17 ++
 R/mixDen.R                          |  51 +++++
 R/mnlHess.R                         |  33 +++
 R/momMix.R                          |  67 ++++++
 R/nmat.R                            |  12 +
 R/numEff.R                          |  25 +++
 R/rScaleUsage.R                     | 427 ++++++++++++++++++++++++++++++++++++
 R/rbprobitGibbs.R                   | 152 +++++++++++++
 R/rdirichlet.R                      |  12 +
 R/rhierLinearModel.R                | 260 ++++++++++++++++++++++
 R/rhierMnlRwMixture.R               | 364 ++++++++++++++++++++++++++++++
 R/rivGibbs.R                        | 210 ++++++++++++++++++
 R/rmixGibbs.R                       |  84 +++++++
 R/rmixture.R                        |  36 +++
 R/rmnlIndepMetrop.R                 | 153 +++++++++++++
 R/rmnpGibbs.R                       | 197 +++++++++++++++++
 R/rmultireg.R                       |  56 +++++
 R/rmultiregfp.R                     |  53 +++++
 R/rmvpGibbs.R                       | 193 ++++++++++++++++
 R/rmvst.R                           |   8 +
 R/rnmixGibbs.R                      | 160 ++++++++++++++
 R/rtrun.R                           |  11 +
 R/runireg.R                         |  47 ++++
 R/runiregGibbs.R                    | 143 ++++++++++++
 R/rwishart.R                        |  30 +++
 R/simmnl.R                          |  51 +++++
 R/simmnlwX.R                        |  41 ++++
 R/simmnp.R                          |  27 +++
 R/simmvp.R                          |  18 ++
 R/simnhlogit.R                      |  51 +++++
 inst/doc/Some_Useful_R_Pointers.pdf | Bin 0 -> 598836 bytes
 inst/doc/Tips_On_Using_bayesm.pdf   | Bin 0 -> 54746 bytes
 man/breg.Rd                         |  68 ++++++
 man/cgetC.Rd                        |  39 ++++
 man/condMom.Rd                      |  54 +++++
 man/createX.Rd                      |  58 +++++
 man/eMixMargDen.Rd                  |  53 +++++
 man/fsh.Rd                          |  21 ++
 man/ghkvec.Rd                       |  48 ++++
 man/init.rmultiregfp.Rd             |  46 ++++
 man/llmnl.Rd                        |  50 +++++
 man/llmnp.Rd                        |  66 ++++++
 man/llnhlogit.Rd                    |  55 +++++
 man/lndIChisq.Rd                    |  44 ++++
 man/lndIWishart.Rd                  |  46 ++++
 man/lndMvn.Rd                       |  48 ++++
 man/lndMvst.Rd                      |  49 +++++
 man/logMargDenNR.Rd                 |  35 +++
 man/mixDen.Rd                       |  46 ++++
 man/mnlHess.Rd                      |  44 ++++
 man/momMix.Rd                       |  51 +++++
 man/nmat.Rd                         |  37 ++++
 man/numEff.Rd                       |  44 ++++
 man/rbprobitGibbs.Rd                |  79 +++++++
 man/rdirichlet.Rd                   |  40 ++++
 man/rhierLinearModel.Rd             | 109 +++++++++
 man/rhierMnlRwMixture.Rd            | 211 ++++++++++++++++++
 man/rivGibbs.Rd                     |  87 ++++++++
 man/rmixGibbs.Rd                    |  47 ++++
 man/rmixture.Rd                     |  39 ++++
 man/rmnlIndepMetrop.Rd              |  62 ++++++
 man/rmnpGibbs.Rd                    | 101 +++++++++
 man/rmultireg.Rd                    |  83 +++++++
 man/rmultiregfp.Rd                  |  48 ++++
 man/rmvpGibbs.Rd                    | 104 +++++++++
 man/rmvst.Rd                        |  42 ++++
 man/rnmixGibbs.Rd                   | 114 ++++++++++
 man/rscaleUsage.Rd                  |  61 ++++++
 man/rtrun.Rd                        |  44 ++++
 man/runireg.Rd                      |  71 ++++++
 man/runiregGibbs.Rd                 |  70 ++++++
 man/rwishart.Rd                     |  49 +++++
 man/simmnl.Rd                       |  42 ++++
 man/simmnlwX.Rd                     |  36 +++
 man/simmnp.Rd                       |  44 ++++
 man/simmvp.Rd                       |  43 ++++
 man/simnhlogit.Rd                   |  45 ++++
 src/bayesmc.c                       | 347 +++++++++++++++++++++++++++++
 src/bayesmcpp.cpp                   |  80 +++++++
 96 files changed, 6545 insertions(+)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100755
index 0000000..7ac5454
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,28 @@
+Package: bayesm
+Version: 0.0-2
+Date: 2005-04-26
+Title:Bayesian Inference for Marketing/Micro-econometrics 
+Author: Peter Rossi <peter.rossi at ChicagoGsb.edu>, 
+        Rob McCulloch <robert.mcculloch at ChicagoGsb.edu>.
+Maintainer: Peter Rossi <peter.rossi at chicagosb.edu>
+Depends: R (>= 1.8.1)
+Description: bayesm covers many important models used
+  in marketing and micro-econometrics applications. 
+  The package includes:
+  Bayes Regression (univariate or multivariate dep var),
+  Multinomial Logit (MNL) and Multinomial Probit (MNP),
+  Multivariate Probit,
+  Multivariate Mixtures of Normals,
+  Hierarchical Linear Models with normal prior and covariates,
+  Hierarchical Multinomial Logits with mixture of normals prior
+     and covariates,
+  Bayesian analysis of choice-based conjoint data,
+  Bayesian treatment of linear instrumental variables models,
+  and
+  Analyis of Multivariate Ordinal survey data with scale
+     usage heterogeneity (as in Rossi et al, JASA (01)).
+  For further reference, consult our book, Bayesian Statistics and
+  Marketing by Allenby, McCulloch and Rossi. 
+License: GPL (version 2 or later)
+URL: http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html
+Packaged: Tue Apr 26 16:13:16 2005; per
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100755
index 0000000..7b2ac6e
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,14 @@
+useDynLib(bayesm)
+
+export(breg,cgetC,createX,eMixMargDen,mixDen,fsh,llmnl,llmnp,llnhlogit,
+lndIChisq,lndIWishart,lndMvn,lndMvst,mnlHess,momMix,nmat,numEff,rdirichlet,
+rmixture,rmultireg,rmultiregfp,rwishart,rmvst,rtrun,rbprobitGibbs,runireg,
+runiregGibbs,simmnl,simmnp,simmvp,simnhlogit,rmnpGibbs,rmixGibbs,rnmixGibbs,
+rmvpGibbs,rhierLinearModel,rhierMnlRwMixture,simmnlwX,rivGibbs,
+rmnlIndepMetrop,rscaleUsage,ghkvec,condMom,logMargDenNR,init.rmultiregfp)
+
+
+
+
+
+
diff --git a/R/breg.R b/R/breg.R
new file mode 100755
index 0000000..8ccb857
--- /dev/null
+++ b/R/breg.R
@@ -0,0 +1,25 @@
+breg=
+function(y,X,betabar,A) 
+{
+#
+# P.Rossi 12/04
+#  revision history:
+#    P. Rossi 3/27/05 -- changed to augment strategy
+#
+# Purpose: draw from posterior for linear regression, sigmasq=1.0
+#
+# Output:  draw from posterior
+# 
+# Model: y = Xbeta + e  e ~ N(0,I)
+#
+# Prior:  beta ~ N(betabar,A^-1)
+#
+k=length(betabar)
+RA=chol(A)
+W=rbind(X,RA)
+z=c(y,as.vector(RA%*%betabar))
+IR=backsolve(chol(crossprod(W)),diag(k))
+#      W'W=R'R ;  (W'W)^-1 = IR IR'  -- this is UL decomp
+return(crossprod(t(IR))%*%crossprod(W,z)+IR%*%rnorm(k))
+
+}
diff --git a/R/cgetC.R b/R/cgetC.R
new file mode 100755
index 0000000..fdd5f39
--- /dev/null
+++ b/R/cgetC.R
@@ -0,0 +1,14 @@
+cgetC = function(e,k) 
+{
+# purpose: get a list of cutoffs for use with scale usage problems
+#
+# arguments:
+#   e: the "e" parameter from the paper
+#   k: the point scale, eg. items are rated from 1,2,...k
+# output:
+#   vector of grid points
+temp = (1:(k-1))+.5
+m1 = sum(temp)
+m2 = sum(temp^2)
+return(.C('getC',as.double(e),as.integer(k),as.double(m1),as.double(m2),cc=double(k+1))$cc)
+}
diff --git a/R/condMom.R b/R/condMom.R
new file mode 100755
index 0000000..2759bfb
--- /dev/null
+++ b/R/condMom.R
@@ -0,0 +1,27 @@
+condMom=
+function(x,mu,sigi,i)
+{
+#
+# revision history:
+#    rossi modified allenby code 4/05
+#
+# purpose:compute moments of conditional distribution of ith element of normal given
+# all others
+#
+# arguments:
+#   x: vector of values to condition on
+#   mu: mean vector of length(x)-dim MVN
+#   sigi: inverse of covariance matrix
+#   i: element to condition on
+# 
+# output:
+#   list with conditional mean and variance
+#
+# Model: x ~MVN(mu,Sigma)
+#   computes moments of x_i given x_{-1}
+#
+sig=1./sigi[i,i]
+m=mu[i] - as.vector(x[-i]-mu[-i])%*%as.vector(sigi[-i,i])*sig
+return(list(cmean=as.vector(m),cvar=sig))
+}
+
diff --git a/R/createX.R b/R/createX.R
new file mode 100755
index 0000000..4d22773
--- /dev/null
+++ b/R/createX.R
@@ -0,0 +1,72 @@
+createX=
+function(p,na,nd,Xa,Xd,INT=TRUE,DIFF=FALSE,base=p)
+{
+#
+# Revision History:
+#   P. Rossi 3/05
+#
+# purpose:
+# function to create X array in format needed MNL and MNP routines
+#
+# Arguments:
+#  p is number of choices
+#  na is number of choice attribute variables (choice-specific characteristics)
+#  nd is number of "demo" variables or characteristics of choosers
+#  Xa is a n x (nx*p) matrix of choice attributes.  First p cols are 
+#     values of attribute #1 for each of p chocies, second p for attribute
+#     # 2 ...
+#  Xd is an n x nd matrix of values of "demo" variables
+#  INT is a logical flag for intercepts 
+#  DIFF is a logical flag for differencing wrt to base alternative
+#     (required for MNP)
+#  base is base alternative (default is p)
+#
+#  note: if either you don't have any attributes or "demos", set 
+#        corresponding na, XA or nd,XD to NULL
+#        YOU must specify p,na,nd,XA,XD for the function to work
+#
+# Output:
+#  modified X matrix with n*p rows and INT*(p-1)+nd*(p-1) + na cols
+#
+#
+# check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(p)) pandterm("requires p (# choice alternatives)")
+if(missing(na)) pandterm("requires na arg (use na=NULL if none)")
+if(missing(nd)) pandterm("requires nd arg (use nd=NULL if none)")
+if(missing(Xa)) pandterm("requires Xa arg (use Xa=NULL if none)")
+if(missing(Xd)) pandterm("requires Xd arg (use Xd=NULL if none)")
+if(is.null(Xa) && is.null(Xd)) pandterm("both Xa and Xd NULL -- requires one non-null")
+if(!is.null(na)  && !is.null(Xa)) 
+   {if(ncol(Xa) != p*na) pandterm(paste("bad Xa dim, dim=",dim(Xa)))}
+if(!is.null(nd) && !is.null(Xd))
+   {if(ncol(Xd) != nd) pandterm(paste("ncol(Xd) ne nd, ncol(Xd)=",ncol(Xd)))}
+if(!is.null(Xa) && !is.null(Xd)) 
+   {if(nrow(Xa) != nrow(Xd)) 
+       {pandterm(paste("nrow(Xa) ne nrow(Xd),nrow(Xa)= ",nrow(Xa)," nrow(Xd)= ",nrow(Xd)))}} 
+if(is.null(Xa)) {n=nrow(Xd)} else {n=nrow(Xa)}
+
+if(INT)  {Xd=cbind(c(rep(1,n)),Xd)}
+if(DIFF) {Imod=diag(p-1)} else {Imod=matrix(0,p,p-1); Imod[-base,]=diag(p-1)}
+if(!is.null(Xd)) Xone=Xd %x%Imod else Xone=NULL
+
+Xtwo=NULL
+if(!is.null(Xa))
+   {if(DIFF) 
+      {tXa=matrix(t(Xa),nrow=p)
+       Idiff=diag(p); Idiff[,base]=c(rep(-1,p));Idiff=Idiff[-base,] 
+       tXa=Idiff%*%tXa
+       Xa=matrix(as.vector(tXa),ncol=(p-1)*na,byrow=TRUE)
+       for (i in 1:na) 
+           {Xext=Xa[,((i-1)*(p-1)+1):((i-1)*(p-1)+p-1)] 
+            Xtwo=cbind(Xtwo,as.vector(t(Xext)))}
+       }
+    else
+      { for (i in 1:na) 
+            { Xext=Xa[,((i-1)*p+1):((i-1)*p+p)] 
+              Xtwo=cbind(Xtwo,as.vector(t(Xext)))}
+      }
+    }
+return(cbind(Xone,Xtwo))
+}
diff --git a/R/eMixMargDen.R b/R/eMixMargDen.R
new file mode 100755
index 0000000..5269c3e
--- /dev/null
+++ b/R/eMixMargDen.R
@@ -0,0 +1,22 @@
+eMixMargDen=
+function(grid,probdraw,compdraw) 
+{
+#
+# Revision History:
+#   R. McCulloch 11/04
+#
+# purpose: plot the marginal density of a normal mixture averaged over MCMC draws
+#
+# arguments:
+#    grid -- array of grid points, grid[,i] are ordinates for ith component
+#    probdraw -- ith row is ith draw of probabilities of mixture comp
+#    compdraw -- list of lists of draws of mixture comp moments (each sublist is from mixgibbs)
+#
+# output:
+#    array of same dim as grid with density values
+#
+#
+den=matrix(0,nrow(grid),ncol(grid))
+for(i in 1:length(compdraw)) den=den+mixDen(grid,probdraw[i,],compdraw[[i]])
+return(den/length(compdraw))
+}
diff --git a/R/fsh.R b/R/fsh.R
new file mode 100755
index 0000000..4fe20dd
--- /dev/null
+++ b/R/fsh.R
@@ -0,0 +1,13 @@
+fsh=function() 
+{
+# 
+# P. Rossi
+# revision history: 3/27/05
+#
+# Purpose:
+#  function to flush console (needed only under windows)
+#
+if (Sys.info()[1] == "Windows") flush.console()
+return()
+}
+
diff --git a/R/ghkvec.R b/R/ghkvec.R
new file mode 100755
index 0000000..6ffe94f
--- /dev/null
+++ b/R/ghkvec.R
@@ -0,0 +1,12 @@
+ghkvec = 
+function(L,trunpt,above,r){
+#
+# R interface to GHK code -- allows for a vector of truncation points
+# revision history-
+#    P. Rossi 4/05
+#
+   dim=length(above)
+   n=length(trunpt)/dim
+return(.C('ghk_vec',as.integer(n),as.double(L),as.double(trunpt),
+   as.integer(above),as.integer(dim), as.integer(r),res=double(n))$res)
+}
diff --git a/R/init.rmultiregfp.R b/R/init.rmultiregfp.R
new file mode 100755
index 0000000..2a61535
--- /dev/null
+++ b/R/init.rmultiregfp.R
@@ -0,0 +1,28 @@
+init.rmultiregfp=
+function(X,A,Bbar,nu,V) 
+{
+#
+#       revision history:
+#       revised 1/11/05 by P. Rossi
+# purpose: 
+#    prepare parameters for rmultiregfp  
+#        rmultiregfp is designed to take advantage of the fact that posterior calculations
+#        don't change X or prior parms even when the "data" or Y is changing
+#
+#        use to create list for call of rmultiregfp as in:
+#               Fparm=init.rmultiregfp(X,A,Bbar,nu,V)
+#	        rmultiregfp(Y,X,Fparm)
+#
+# 
+# for multivariate regression model Y = XB + U  Y is n x m, X, n x k, B is k x m
+#			ith row of U is N(0,Sigma)
+#
+#	 prior parms:  vec(B)| Sigma ~ N(vec(Bbar),Sigma (x) A^-1)  
+#                      Sigma ~ IW(nu,V)
+#       
+k=ncol(X)
+IR=backsolve(chol(crossprod(X)+A),diag(k))
+RA=chol(A)
+RABbar=RA%*%Bbar
+return(list(IR=IR,RA=RA,RABbar=RABbar,nu=nu,V=V))
+}
diff --git a/R/llmnl.R b/R/llmnl.R
new file mode 100755
index 0000000..ef49256
--- /dev/null
+++ b/R/llmnl.R
@@ -0,0 +1,25 @@
+llmnl= 
+function(y,X,beta) 
+{
+#    p. rossi 2004
+#
+# Purpose:evaluate log-like for MNL
+#
+# Arguments:
+#   y is n vector with element = 1,...,j indicating which alt chosen
+#   X is nj x k matrix of xvalues for each of j alt on each of n occasions
+#   beta is k vector of coefs
+# 
+# Output: value of loglike
+#
+n=length(y)
+j=nrow(X)/n
+Xbeta=X%*%beta
+Xbeta=matrix(Xbeta,byrow=T,ncol=j)
+ind=cbind(c(1:n),y)
+xby=Xbeta[ind]
+Xbeta=exp(Xbeta)
+iota=c(rep(1,j))
+denom=log(Xbeta%*%iota)
+return(sum(xby-denom))
+}
diff --git a/R/llmnp.R b/R/llmnp.R
new file mode 100755
index 0000000..4c1daa0
--- /dev/null
+++ b/R/llmnp.R
@@ -0,0 +1,70 @@
+llmnp=
+function(X,y,beta,Sigma,r) 
+{
+#
+# revision history:
+#   edited by rossi 2/8/05
+#
+# purpose:
+#   function to evaluate MNP likelihood using GHK
+#
+# arguments: 
+#   X is n*(p-1) x k array of covariates (including intercepts)
+#      note: X is from the "differenced" system
+#   y is vector of n indicators of multinomial response
+#   beta is k x 1  with k = ncol(X)
+#   Sigma is p-1 x p-1 
+#   r is the number of random draws to use in GHK
+#
+# output -- value of log-likelihood
+# for each observation w = Xbeta + e   e ~N(0,Sigma)
+#   if y=j (j<p)
+#      w_j > max(w_-j) and w_j >0
+#   if y=p,  w < 0
+#
+# to use GHK we must transform so that these are rectangular regions
+# e.g.  if y=1,  w_1 > 0 and w_1 - w_-1 > 0 
+#
+# define Aj such that if j=1,..,p-1, Ajw = Ajmu + Aje > 0 is equivalent to y=j
+#   implies Aje > -Ajmu
+#   lower truncation is -Ajmu and cov = AjSigma t(Aj)
+#
+# for p, e < - mu
+#
+#
+# define functions needed
+#
+ghkvec = function(L,trunpt,above,r){
+   dim=length(above)
+   n=length(trunpt)/dim
+   .C('ghk_vec',as.integer(n),as.double(L),as.double(trunpt),as.integer(above),as.integer(dim),
+   as.integer(r),res=double(n))$res}
+#   
+# compute means for each observation
+#
+pm1=ncol(Sigma)
+k=length(beta)
+mu=matrix(X%*%beta,nrow=pm1)
+logl=0.0
+above=rep(0,pm1)
+for (j in 1:pm1) {
+   muj=mu[,y==j]
+   Aj=diag(rep(-1,pm1))
+   Aj[,j]=rep(1,pm1)
+   trunpt=as.vector(-Aj%*%muj)
+   Lj=t(chol(Aj%*%Sigma%*%t(Aj)))
+#     note: rob's routine expects lower triangular root
+   logl=logl + sum(log(ghkvec(Lj,trunpt,above,r)))
+#     note:  ghkvec does an entire vector of n probs each with different truncation points but the
+#            same cov matrix.  
+}
+#
+# now do obs for y=p
+#
+trunpt=as.vector(-mu[,y==(pm1+1)])
+Lj=t(chol(Sigma))
+above=rep(1,pm1)
+logl=logl+sum(log(ghkvec(Lj,trunpt,above,r)))
+return(logl)
+
+}
diff --git a/R/llnhlogit.R b/R/llnhlogit.R
new file mode 100755
index 0000000..cdf2f08
--- /dev/null
+++ b/R/llnhlogit.R
@@ -0,0 +1,39 @@
+llnhlogit=function(theta,choice,lnprices,Xexpend) 
+{
+#   function to evaluate non-homothetic logit likelihood
+#       choice is a n x 1 vector with indicator of choice (1,...,m)
+#	lnprices is n x m array of log-prices faced
+#       Xexpend is n x d array of variables predicting expenditure
+#
+#       non-homothetic model specifies ln(psi_i(u))= alpha_i - exp(k_i)u
+#
+#       structure of theta vector:
+#           alpha  (m x 1)
+#           k      (m x 1)
+#           gamma  (k x 1)   expenditure function coefficients
+#           tau   scaling of v
+#	    
+root=function(c1,c2,tol,iterlim) {
+   u=double(length(c1))
+   .C("callroot",as.integer(length(c1)),as.double(c1),as.double(c2),as.double(tol),
+       as.integer(iterlim),r=as.double(u))$r}
+
+   m=ncol(lnprices)
+   n=length(choice)
+   d=ncol(Xexpend)
+   alpha=theta[1:m]
+   k=theta[(m+1):(2*m)]
+   gamma=theta[(2*m+1):(2*m+d)]
+   tau=theta[length(theta)]
+   iotam=c(rep(1,m))
+   c1=as.vector(Xexpend%*%gamma)%x%iotam-as.vector(t(lnprices))+alpha
+   c2=c(rep(exp(k),n))   
+   u=root(c1,c2,.0000001,20)
+   v=alpha - u*exp(k)-as.vector(t(lnprices))
+   vmat=matrix(v,ncol=m,byrow=TRUE)
+   vmat=tau*vmat
+   ind=seq(1,n)
+   vchosen=vmat[cbind(ind,choice)]
+   lnprob=vchosen-log((exp(vmat))%*%iotam)
+   return(sum(lnprob))
+}
diff --git a/R/lndIChisq.R b/R/lndIChisq.R
new file mode 100755
index 0000000..51bf2c6
--- /dev/null
+++ b/R/lndIChisq.R
@@ -0,0 +1,11 @@
+lndIChisq=
+function(nu,ssq,x)
+{
+#
+# P. Rossi 12/04
+#
+# Purpose: evaluate log-density of scaled Inverse Chi-sq
+#  density of r.var. Z=nu*ssq/chisq(nu)
+#
+return(-lgamma(nu/2)+(nu/2)*log((nu*ssq)/2)-((nu/2)+1)*log(x)-(nu*ssq)/(2*x))
+}
diff --git a/R/lndIWishart.R b/R/lndIWishart.R
new file mode 100755
index 0000000..197e23a
--- /dev/null
+++ b/R/lndIWishart.R
@@ -0,0 +1,31 @@
+lndIWishart=
+function(nu,S,IW)
+{
+# 
+# P. Rossi 12/04
+#
+# purpose: evaluate log-density of inverted Wishart
+#    includes normalizing constant
+#
+# arguments:
+#   nu is d. f. parm
+#   S is location matrix
+#   IW is the value at which the density should be evaluated
+#
+# output:
+#   value of log density
+#
+# note: in this parameterization, E[IW]=S/(nu-k-1)
+#
+k=ncol(S)
+Uiw=chol(IW)
+lndetSd2=sum(log(diag(chol(S))))
+lndetIWd2=sum(log(diag(Uiw)))
+#
+# first evaluate constant
+#
+const=((nu*k)/2)*log(2)+((k*(k-1))/4)*log(pi)
+arg=(nu+1-c(1:k))/2
+const=const+sum(lgamma(arg))
+return(-const+nu*lndetSd2-(nu+k+1)*lndetIWd2-.5*sum(diag(S%*%chol2inv(Uiw))))
+}
diff --git a/R/lndMvn.R b/R/lndMvn.R
new file mode 100755
index 0000000..bafc680
--- /dev/null
+++ b/R/lndMvn.R
@@ -0,0 +1,13 @@
+lndMvn=
+function(x,mu,rooti)
+{
+#
+# function to evaluate log of MV NOrmal density with  mean mu, var Sigma
+# and with sigma^-1=rooti%*%t(rooti)   
+# rooti is in the inverse of upper triangular chol root of sigma
+#          note: this is the UL decomp of sigmai not LU!
+#                Sigma=root'root   root=inv(rooti)
+#
+z=as.vector(t(rooti)%*%(x-mu))
+return(-.5*(z%*%z) + sum(log(diag(rooti))))
+}
diff --git a/R/lndMvst.R b/R/lndMvst.R
new file mode 100755
index 0000000..ad756be
--- /dev/null
+++ b/R/lndMvst.R
@@ -0,0 +1,12 @@
+lndMvst=
+function(x,nu,mu,rooti)
+{
+#
+# function to evaluate log of MVstudent t density with nu df, mean mu,
+# and with sigmai=rooti%*%t(rooti)   note: this is the UL decomp of sigmai not LU!
+# rooti is in the inverse of upper triangular chol root of sigma
+# or Sigma=root'root   root=inv(rooti)
+#
+z=as.vector(t(rooti)%*%(x-mu))
+return(-((length(x)+nu)/2)*log(nu+z%*%z)+sum(log(diag(rooti))))
+}
diff --git a/R/logMargDenNR.R b/R/logMargDenNR.R
new file mode 100755
index 0000000..af8fe54
--- /dev/null
+++ b/R/logMargDenNR.R
@@ -0,0 +1,17 @@
+logMargDenNR = function(ll) {
+#
+# purpose:  compute log marginal density using Newton-Raftery
+#  importance sampling estimator: 1/ (1/g sum_g exp(-log like) )
+#    where log like is the likelihood of the model evaluated as the 
+#    posterior draws (x).
+#
+# arguments:
+#  ll -- vector of log-likelihood values evaluated at posterior draws
+#
+# output:
+#  estimated log-marginal density
+
+med=median(ll)
+return(med-log(mean(exp(-ll+med))))
+}
+
diff --git a/R/mixDen.R b/R/mixDen.R
new file mode 100755
index 0000000..3a63660
--- /dev/null
+++ b/R/mixDen.R
@@ -0,0 +1,51 @@
+mixDen=
+function(x,p,comps) 
+{
+# Revision History:
+#   R. McCulloch 11/04
+#   P. Rossi 3/05 -- put in backsolve
+#
+# purpose: compute marginal densities for multivariate mixture of normals (given by p and comps) at x
+#
+# arguments:
+#     x: ith columns gives evaluations for density of ith variable
+#     p: prior probabilities of normal components
+#     comps: list, each member is a list comp with ith normal component ~ N(comp[[1]],Sigma), 
+#            Sigma = t(R)%*%R, R^{-1} = comp[[2]]
+# Output:
+#     matrix with same shape as input, x, ith column gives margial density of ith variable
+#
+# ---------------------------------------------------------------------------------------------
+# define function needed
+#
+ums=function(comps)
+{
+# purpose: obtain marginal means and standard deviations from list of normal components
+# arguments:
+#     comps: list, each member is a list comp with ith normal component ~N(comp[[1]],Sigma), 
+#            Sigma = t(R)%*%R, R^{-1} = comp[[2]]
+# returns:
+#  a list with [[1]]=$mu a matrix whose ith row is the means for ith component
+#  [[2]]=$sigma a matrix whose ith row is the standard deviations for the ith component
+#
+nc = length(comps)
+dim = length(comps[[1]][[1]])
+mu = matrix(0.0,nc,dim)
+sigma = matrix(0.0,nc,dim)
+for(i in 1:nc) {
+   mu[i,] = comps[[i]][[1]]
+#   root = solve(comps[[i]][[2]])
+   root= backsolve(comps[[i]][[2]],diag(rep(1,dim)))
+   sigma[i,] = sqrt(diag(t(root)%*%root))
+}
+return(list(mu=mu,sigma=sigma))
+}
+# ----------------------------------------------------------------------------------------------
+nc = length(comps)
+mars = ums(comps)
+den = matrix(0.0,nrow(x),ncol(x))
+for(i in 1:ncol(x)) {
+   for(j in 1:nc) den[,i] = den[,i] + dnorm(x[,i],mean = mars$mu[j,i],sd=mars$sigma[j,i])*p[j]
+}
+return(den)
+}
diff --git a/R/mnlHess.R b/R/mnlHess.R
new file mode 100755
index 0000000..a3cfb2c
--- /dev/null
+++ b/R/mnlHess.R
@@ -0,0 +1,33 @@
+mnlHess =
+function(y,X,beta) 
+{
+#   p.rossi 2004
+#
+# Purpose: compute mnl -Expected[Hessian]  
+#
+# Arguments:
+#   y is n vector with element = 1,...,j indicating which alt chosen
+#   X is nj x k matrix of xvalues for each of j alt on each of n occasions
+#   beta is k vector of coefs
+#
+# Output:  -Hess evaluated at beta
+#
+n=length(y)
+j=nrow(X)/n
+k=ncol(X)
+Xbeta=X%*%beta
+Xbeta=matrix(Xbeta,byrow=T,ncol=j)
+Xbeta=exp(Xbeta)
+iota=c(rep(1,j))
+denom=Xbeta%*%iota
+Prob=Xbeta/as.vector(denom)
+
+Hess=matrix(double(k*k),ncol=k)
+for (i in 1:n) {
+        p=as.vector(Prob[i,])
+        A=diag(p)-outer(p,p)
+        Xt=X[(j*(i-1)+1):(j*i),]
+        Hess=Hess+crossprod(Xt,A)%*%Xt
+}
+return(Hess)
+}
diff --git a/R/momMix.R b/R/momMix.R
new file mode 100755
index 0000000..72b1180
--- /dev/null
+++ b/R/momMix.R
@@ -0,0 +1,67 @@
+momMix=
+function(probdraw,compdraw) 
+{
+#
+# Revision History:
+#   R. McCulloch 11/04
+#   P. Rossi 3/05  put in backsolve fixed documentation
+#
+# purpose: compute moments of normal mixture averaged over MCMC draws
+#
+# arguments:
+#    probdraw -- ith row is ith draw of probabilities of mixture comp
+#    compdraw -- list of lists of draws of mixture comp moments (each sublist is from mixgibbs)
+#
+# output:
+#   a list with the mean vector, covar matrix, vector of std deve, and corr matrix
+#
+# ----------------------------------------------------------------------------------
+# define function needed
+mom=function(prob,comps){
+# purpose: obtain mu and cov from list of normal components
+#
+# arguments:
+#     prob: vector of mixture probs
+#     comps: list, each member is a list comp with ith normal component ~N(comp[[1]],Sigma), 
+#            Sigma = t(R)%*%R, R^{-1} = comp[[2]]
+# returns:
+#  a list with [[1]]=$mu a vector
+#  [[2]]=$sigma a matrix 
+#
+nc = length(comps)
+dim = length(comps[[1]][[1]])
+mu = double(dim)
+sigma = matrix(0.0,dim,dim)
+for(i in 1:nc) {
+   mu = mu+ prob[i]*comps[[i]][[1]]
+}
+var=matrix(double(dim*dim),ncol=dim)
+for(i in 1:nc) {
+   mui=comps[[i]][[1]]
+#   root = solve(comps[[i]][[2]])
+   root=backsolve(comps[[i]][[2]],diag(rep(1,dim)))
+   sigma=t(root)%*%root
+   var=var+prob[i]*sigma+prob[i]*(mui-mu)%o%(mui-mu)
+}
+list(mu=mu,sigma=sigma)
+}
+#---------------------------------------------------------------------------------------
+dim=length(compdraw[[1]][[1]][[1]])
+mu=double(dim)
+sigma=matrix(double(dim*dim),ncol=dim)
+sd=double(dim)
+corr=matrix(double(dim*dim),ncol=dim)
+for(i in 1:length(compdraw)) 
+{
+   out=mom(probdraw[i,],compdraw[[i]])
+   sd=sd+sqrt(diag(out$sigma))
+   corr=corr+matrix(nmat(out$sigma),ncol=dim)
+   mu=mu+out$mu
+   sigma=sigma+out$sigma
+}
+mu=mu/length(compdraw)
+sigma=sigma/length(compdraw)
+sd=sd/length(compdraw)
+corr=corr/length(compdraw)
+return(list(mu=mu,sigma=sigma,sd=sd,corr=corr))
+}
diff --git a/R/nmat.R b/R/nmat.R
new file mode 100755
index 0000000..144c768
--- /dev/null
+++ b/R/nmat.R
@@ -0,0 +1,12 @@
+nmat=function(vec) 
+{
+#
+#    function to take var-cov matrix in vector form and create correlation matrix 
+#       and store in vector form
+#
+p=as.integer(sqrt(length(vec)))
+sigma=matrix(vec,ncol=p)
+nsig=1/sqrt(diag(sigma))
+return(as.vector(nsig*(t(nsig*sigma))))
+}
+
diff --git a/R/numEff.R b/R/numEff.R
new file mode 100755
index 0000000..f3d43a3
--- /dev/null
+++ b/R/numEff.R
@@ -0,0 +1,25 @@
+numEff= 
+function(x,m=max(length(x),(100/sqrt(5000))*sqrt(length(x))))
+{
+#
+# P. Rossi
+# revision history: 3/27/05
+#
+# purpose:
+#  compute N-W std error and relative numerical efficiency
+# 
+# Arguments:
+#  x is vector of draws
+#  m is number of lags to truncate acf
+#    def is such that m=100 if length(x)= 5000 and grows with sqrt(length)
+#
+# Output:
+#  list with numerical std error and variance multiple (f)
+#
+wgt=as.vector(seq(m,1,-1))/(m+1)
+z=acf(x,lag.max=m,plot=FALSE)
+f=1+2*wgt%*%as.vector(z$acf[-1])
+stderr=sqrt(var(x)*f/length(x))
+list(stderr=stderr,f=f)
+}
+
diff --git a/R/rScaleUsage.R b/R/rScaleUsage.R
new file mode 100755
index 0000000..cf5be38
--- /dev/null
+++ b/R/rScaleUsage.R
@@ -0,0 +1,427 @@
+rscaleUsage=
+function(Data,Prior,Mcmc) 
+{
+#
+# purpose: run scale-usage mcmc
+#    draws y,Sigma,mu,tau,sigma,Lambda,e
+#                                R. McCulloch 12/28/04
+# 
+# arguments:
+#    Data:
+#     all components are required:
+#       k:  integer giving the scale of the responses, each observation is an integer from 1,2,...k
+#       x:  data, num rows=number of respondents, num columns = number of questions
+#    Prior:
+#     all components are optional
+#       nu,V: Sigma ~ IW(nu,V)
+#       mubar,Am: mu ~N(mubar,Am^{-1})
+#       gsigma: grid for sigma
+#       gl11,gl22,gl12: grids for ij element of Lamda
+#       Lambdanu,LambdaV: Lambda ~ IW(Lambdanu,LambdaV)
+#       ge: grid for e
+#    Mcmc:
+#     all components are optional (but you would typically want to specify R= number of draws)
+#       R: number of mcmc iterations
+#       keep: frequency with which draw is kept
+#       ndghk: number of draws for ghk
+#       printevery: how often to print out how many draws are done
+#       e,y,mu,Sigma,sigma,tau,Lamda: initial values for the state
+#       doe, ...doLambda: indicates whether draw should be made
+# output:
+#    List with draws of each of Sigma,mu,tau,sigma,Lambda,e
+#    eg. result$Sigma is the draws of Sigma
+#    Each component is a matrix expept e, which is a vector
+#    for the matrices Sigma and Lambda each row transpose of the Vec
+#    eg. result$Lambda has rows (Lambda11,Lamda21,Lamda12,Lamda22)
+
+#
+# define functions needed
+#
+# -----------------------------------------------------------------------------------
+rlpx = function(x,e,k,mu,tau,Sigma,sigma,nd=500) {
+n=nrow(x); p = ncol(x)
+cc = cgetC(e,k)
+L=t(chol(Sigma))
+lpv = rep(0,n)
+offset = p*log(k)
+for(i in 1:n) {
+   Li = sigma[i]*L
+   a = cc[x[i,]]-mu-tau[i]; b = cc[x[i,]+1]-mu-tau[i]
+   ghkres = rghk(Li,a,b,nd)
+   lghkres = log(ghkres)
+   if(is.nan(lghkres)) {
+      #print("nan in ghk:")
+      #print(paste('ghkres: ',ghkres))
+      lghkres = log(1e-320)
+   }
+   if(is.infinite(lghkres)) {
+      #print("infinite in ghk:")
+      #print(paste('ghkres: ',ghkres))
+      lghkres = log(1e-320)
+   }
+   lpv[i] = lghkres + offset
+}
+sum(lpv)
+}
+rghk = function(L,a,b,nd) {
+.C('ghk',as.double(L),as.double(a),as.double(b),as.integer(nrow(L)),
+          as.integer(nd),res=double(1))$res
+}
+condd = function(Sigma) {
+p = nrow(Sigma)
+Si = solve(Sigma)
+cbeta = matrix(0,p-1,p)
+for(i in 1:p) {
+ind = (1:p)[-i]
+cbeta[,i] = -Si[ind,i]/Si[i,i]
+}
+list(beta=cbeta,s=sqrt(1/diag(Si)))
+}
+pandterm = function(message) { stop(paste("in rscaleUsage: ",message),call.=FALSE) }
+myin = function(i,ind) {i %in% ind}
+getS = function(Lam,n,moms) {
+S=matrix(0.0,2,2)
+S[1,1] = (n-1)*moms[3] + n*moms[1]^2
+S[1,2] = (n-1)*moms[4] + n*moms[1]*(moms[2]-Lam[2,2])
+S[2,1] = S[1,2]
+S[2,2] = (n-1)*moms[5] + n*(moms[2]-Lam[2,2])^2
+S
+}
+llL = function(Lam,n,S,V,nu) {
+dlam = Lam[1,1]*Lam[2,2]-Lam[1,2]^2
+M = (S+V) %*%  chol2inv(chol(Lam))
+ll = -.5*(n+nu+3)*log(dlam) -.5*sum(diag(M))
+}
+ispd = function(mat,d=nrow(mat)) {
+if(!is.matrix(mat)) {
+res = FALSE
+} else if(!((nrow(mat)==d) & (ncol(mat)==d))) {
+res = FALSE
+} else {
+diff = (t(mat)+mat)/2 - mat
+perdiff = sum(diff^2)/sum(mat^2)
+res = ((det(mat)>0) & (perdiff < 1e-10))
+}
+res
+}
+#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+# print out components of inputs ----------------------------------------------
+cat('\nIn function rscaleUsage\n\n')
+if(!missing(Data)) {
+cat('   Data has components: ')
+cat(paste(names(Data),collapse=' ')[1],'\n')
+}
+if(!missing(Prior)) {
+cat('   Prior has components: ')
+cat(paste(names(Prior),collapse=' ')[1],'\n')
+}
+if(!missing(Mcmc)) {
+cat('   Mcmc has components: ')
+cat(paste(names(Mcmc),collapse=' ')[1],'\n')
+}
+cat('\n')
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+# process Data argument --------------------------
+if(missing(Data)) {pandterm("Requires Data argument - list of k=question scale and x = data")}
+if(is.null(Data$k)) {
+   pandterm("k not specified")
+} else {
+   k = as.integer(Data$k)
+   if(!((k>0) & (k<50))) {pandterm("Data$k must be integer between 1 and 50")}
+}
+if(is.null(Data$x)) {
+   pandterm('x (the data), not specified')
+} else {
+   if(!is.matrix(Data$x)) {pandterm('Data$x must be a matrix')}
+   x = matrix(as.integer(Data$x),nrow=nrow(Data$x))
+   checkx = sum(sapply(as.vector(x),myin,1:k))
+   if(!(checkx == nrow(x)*ncol(x))) {pandterm('each element of Data$x must be in 1,2...k')}
+   p = ncol(x)
+   n = nrow(x)
+   if((p<2) | (n<1)) {pandterm(paste('invalid dimensions for x: nrow,ncol: ',n,p))}
+}
+# ++++++++++++++++++++++++++++++++++++++++++++++++
+
+# process Mcmc argument ---------------------
+
+#run mcmc
+R = as.integer(1000)
+keep = as.integer(1)
+ndghk= as.integer(100)
+printevery = as.integer(10)
+if(!missing(Mcmc)) {
+if(!is.null(Mcmc$R))              { R = as.integer(Mcmc$R) }
+if(!is.null(Mcmc$keep))           { keep = as.integer(Mcmc$keep) }
+if(!is.null(Mcmc$ndghk))          { ndghk = as.integer(Mcmc$ndghk) }
+if(!is.null(Mcmc$printevery))     { printevery = as.integer(Mcmc$printevery) }
+}
+if(R<1) { pandterm('R must be positive')}
+if(keep<1) { pandterm('keep must be positive') }
+if(ndghk<1) { pandterm('ndghk must be positive') }
+if(printevery<1) { pandterm('printevery must be positive') }
+
+
+#state
+y = matrix(as.double(x),nrow=nrow(x))
+mu = apply(y,2,mean)
+Sigma = var(y)
+tau = rep(0,n)
+sigma = rep(1,n)
+#Lamda = matrix(c(3.7,-.22,-.22,.32),ncol=2)
+#Lamda = matrix(c((k/4)^2,(k/4)*.5*(-.2),0,.25),nrow=2); Lamda[1,2]=Lamda[2,1]
+Lamda = matrix(c(4,0,0,.5),ncol=2)
+e=0
+if(!missing(Mcmc)) {
+if(!is.null(Mcmc$y))         { y = Mcmc$y }
+if(!is.null(Mcmc$mu))        { mu = Mcmc$mu }
+if(!is.null(Mcmc$Sigma))     { Sigma = Mcmc$Sigma }
+if(!is.null(Mcmc$tau))       { tau = Mcmc$tau }
+if(!is.null(Mcmc$sigma))     { sigma = Mcmc$sigma }
+if(!is.null(Mcmc$Lambda))    { Lamda = Mcmc$Lambda }
+if(!is.null(Mcmc$e))         { e = Mcmc$e }
+}
+if(!ispd(Sigma,p)) { pandterm(paste('Sigma must be positive definite with dimension ',p)) }
+if(!ispd(Lamda,2)) { pandterm(paste('Lambda must be positive definite with dimension ',2)) }
+if(!is.vector(mu)) { pandterm('mu must be a vector') }
+if(length(mu) != p) { pandterm(paste('mu must have length ',p)) }
+if(!is.vector(tau)) { pandterm('tau must be a vector') }
+if(length(tau) != n) { pandterm(paste('tau must have length ',n)) }
+if(!is.vector(sigma)) { pandterm('sigma must be a vector') }
+if(length(sigma) != n) { pandterm(paste('sigma must have length ',n)) }
+if(!is.matrix(y)) { pandterm('y must be a matrix') }
+if(nrow(y) != n) { pandterm(paste('y must have',n,'rows')) }
+if(ncol(y) != p) { pandterm(paste('y must have',p,'columns')) }
+
+#do draws
+domu=TRUE
+doSigma=TRUE
+dosigma=TRUE
+dotau=TRUE
+doLamda=TRUE
+doe=TRUE
+if(!missing(Mcmc)) {
+if(!is.null(Mcmc$domu))        { domu = Mcmc$domu }
+if(!is.null(Mcmc$doSigma))     { doSigma = Mcmc$doSigma }
+if(!is.null(Mcmc$dotau))       { dotau = Mcmc$dotau }
+if(!is.null(Mcmc$dosigma))     { dosigma = Mcmc$dosigma }
+if(!is.null(Mcmc$doLambda))    { doLamda = Mcmc$doLambda }
+if(!is.null(Mcmc$doe))         { doe = Mcmc$doe }
+}
+
+
+#++++++++++++++++++++++++++++++++++++++
+
+#process Prior argument ----------------------------------
+nu = p+3
+V= nu*diag(p)
+mubar = matrix(rep(k/2,p),ncol=1)
+Am = .0001*diag(p)
+gs = 500
+gsigma = 6*(1:gs)/gs
+gl11 = .1 + 5.9*(1:gs)/gs
+gl22 = .1 + 2.0*(1:gs)/gs
+#gl12 = -.8 + 1.6*(1:gs)/gs
+gl12 = -2.0 + 4*(1:gs)/gs
+nuL=20
+VL = (nuL-3)*Lamda
+ge = -.1+.2*(0:gs)/gs
+
+if(!missing(Prior)) {
+if(!is.null(Prior$nu))       { nu = Prior$nu; V = nu*diag(p) }
+if(!is.null(Prior$V))        { V = Prior$V }
+if(!is.null(Prior$mubar))    { mubar = matrix(Prior$mubar,ncol=1) }
+if(!is.null(Prior$Am))       { Am = Prior$Am }
+if(!is.null(Prior$gsigma))   { gsigma = Prior$gsigma }
+if(!is.null(Prior$gl11))     { gl11 = Prior$gl11 }
+if(!is.null(Prior$gl22))     { gl22 = Prior$gl22 }
+if(!is.null(Prior$gl12))     { gl12 = Prior$gl12 }
+if(!is.null(Prior$Lambdanu)) { nuL = Prior$Lambdanu; VL = (nuL-3)*Lamda }
+if(!is.null(Prior$LambdaV))  { VL = Prior$LambdaV }
+if(!is.null(Prior$ge))       { ge = Prior$ge }
+}
+if(!ispd(V,p)) { pandterm(paste('V must be positive definite with dimension ',p)) }
+if(!ispd(Am,p)) { pandterm(paste('Am must be positive definite with dimension ',p)) }
+if(!ispd(VL,2)) { pandterm(paste('VL must be positive definite with dimension ',2)) }
+if(nrow(mubar) != p) { pandterm(paste('mubar must have length',p)) }
+#++++++++++++++++++++++++++++++++++++++++
+
+#print out run info -------------------------
+cat('   n,p,k: ', n,p,k,'\n')
+cat('   R,keep,ndghk,printevery: ', R,keep,ndghk,printevery,'\n')
+cat('\n')
+cat('   Data:\n')
+cat('      x11,n1,1p,np: ',x[1,1],x[n,1],x[1,p],x[n,p],'\n\n')
+cat('   Prior:\n')
+cat('      ','nu: ',nu,'\n')
+cat('      ','V11,pp/nu: ',V[1,1]/nu,V[p,p]/nu,'\n')
+cat('      ','mubar1,p: ',mubar[1],mubar[p],'\n')
+cat('      ','Am11,pp: ',Am[1,1],Am[p,p],'\n')
+cat('      ','Lambdanu: ',nuL,'\n')
+cat('      ','LambdaV11,22/(Lambdanu-3): ',VL[1,1]/(nuL-3),VL[2,2]/(nuL-3),'\n')
+cat('      ','mubar1,p: ',mubar[1],mubar[p],'\n')
+cat('      ','sigma grid, 1,',length(gsigma),': ',gsigma[1],', ',gsigma[length(gsigma)],'\n')
+cat('      ','Lambda11 grid, 1,',length(gl11),': ',gl11[1],', ',gl11[length(gl11)],'\n')
+cat('      ','Lambda12 grid, 1,',length(gl12),': ',gl12[1],', ',gl12[length(gl12)],'\n')
+cat('      ','Lambda22 grid, 1,',length(gl22),': ',gl22[1],', ',gl22[length(gl22)],'\n')
+cat('      ','e grid, 1,',length(ge),': ',ge[1],', ',ge[length(ge)],'\n')
+cat('      ','draw e: ',doe,'\n')
+cat('      ','draw Lambda: ',doLamda,'\n')
+#++++++++++++++++++++++++++++++++++++++++++++
+
+nk = floor(R/keep)
+ndpost = nk*keep
+drSigma=matrix(0.0,nk,p^2)
+drmu = matrix(0.0,nk,p)
+drtau = matrix(0.0,nk,n)
+drsigma = matrix(0.0,nk,n)
+drLamda = matrix(0.0,nk,4)
+dre = rep(0,nk)
+
+itime = proc.time()[3]
+cat("Mcmc Iteration (est time to end - min)",'\n')
+for(rep in 1:ndpost) {
+   if(1) { # y
+      cc = cgetC(e,k)
+      bs = condd(Sigma)
+      y = matrix(.C('dy',as.integer(p),as.integer(n),y=as.double(t(y)),as.integer(t(x)),as.double(cc),as.double(mu),as.double(bs$beta),as.double(bs$s),
+	                                        as.double(tau),as.double(sigma))$y,ncol=p,byrow=TRUE)
+   }
+   if(doSigma) { #Sigma
+      Res = (t(t(y)-mu)-tau)/sigma
+      S = crossprod(Res)
+      Sigma = rwishart(nu+n,chol2inv(chol(V+S)))$IW
+   }
+   if(domu) { #mu
+      yd = y-tau
+      Si = chol2inv(chol(Sigma))
+      Vmi = sum(1/sigma^2)*Si + Am
+      R = chol(Vmi)
+      Ri = backsolve(R,diag(rep(1,p)))
+      Vm = chol2inv(chol(Vmi))
+      mm = Vm %*% (Si %*% (t(yd) %*% matrix(1/sigma^2,ncol=1)) + Am %*% mubar)
+      mu = as.vector(mm + Ri %*% matrix(rnorm(p),ncol=1))
+   }
+   if(dotau) { #tau
+      Ai = Lamda[1,1] - (Lamda[1,2]^2)/Lamda[2,2]
+      A = 1.0/Ai
+      onev = matrix(1.0,p,1)
+      R = chol(Sigma)
+      xx = backsolve(R,onev,transpose=TRUE)
+      yy = backsolve(R,t(y)-mu,transpose=TRUE)
+      xtx = sum(xx^2)
+      xty = as.vector(t(xx) %*% yy)
+      beta = A*Lamda[1,2]/Lamda[2,2]
+      for(j in 1:n) {
+	 s2 = xtx/sigma[j]^2   + A
+         s2 = 1.0/s2 
+	 m = s2*((xty[j]/sigma[j]^2) + beta*(log(sigma[j])-Lamda[2,2]))
+	 tau[j] = m + sqrt(s2)*rnorm(1)
+      }
+   }
+   if(dosigma) { #sigma
+      R = chol(Sigma)
+      eps = backsolve(R,t(y-tau)-mu,transpose=TRUE)
+      ete = as.vector(matrix(rep(1,p),nrow=1) %*% eps^2)
+      a= Lamda[2,2]
+      b= Lamda[1,2]/Lamda[1,1]
+      s=sqrt(Lamda[2,2]-(Lamda[1,2]^2/Lamda[1,1]))
+      for(j in 1:n) {
+	 pv = -(p+1)*log(gsigma) -.5*ete[j]/gsigma^2 -.5*((log(gsigma)-(a+b*tau[j]))/s)^2
+	 pv = exp(pv-max(pv))
+	 pv = pv/sum(pv)
+	 sigma[j] = sample(gsigma,size=1,prob=pv)
+      }
+   }
+   
+   if(doLamda) { # Lamda
+      h=log(sigma)
+      dat = cbind(tau,h)
+      temp = var(dat)
+      moms = c(mean(tau),mean(h),temp[1,1],temp[1,2],temp[2,2])
+
+      SS = getS(Lamda,n,moms)
+      rgl11 = gl11[gl11 > (Lamda[1,2]^2/Lamda[2,2])]
+      ng = length(rgl11)
+      pv = rep(0,ng)
+      for(j in 1:ng) {
+         Lamda[1,1] = rgl11[j]
+         pv[j] = llL(Lamda,n,SS,VL,nuL)
+      }
+      pv = exp(pv-max(pv)); pv = pv/sum(pv)
+      Lamda[1,1] = sample(rgl11,size=1,prob=pv)
+
+      rgl12 = gl12[(gl12<sqrt(Lamda[1,1]*Lamda[2,2])) & (gl12>-sqrt(Lamda[1,1]*Lamda[2,2]))]
+      ng = length(rgl12)
+      pv = rep(0,ng)
+      for(j in 1:ng) {
+         Lamda[1,2] = rgl12[j]; Lamda[2,1]=Lamda[1,2]
+         pv[j] = llL(Lamda,n,SS,VL,nuL)
+      }
+      pv = exp(pv-max(pv)); pv = pv/sum(pv)
+      Lamda[1,2] = sample(rgl12,size=1,prob=pv)
+      Lamda[2,1]=Lamda[1,2]
+
+      rgl22 = gl22[gl22 > (Lamda[1,2]^2/Lamda[1,1])]
+      ng = length(rgl22)
+      pv = rep(0,ng)
+      for(j in 1:ng) {
+         Lamda[2,2] = rgl22[j]
+	 SS = getS(Lamda,n,moms)
+         pv[j] = llL(Lamda,n,SS,VL,nuL)
+      }
+      pv = exp(pv-max(pv)); pv = pv/sum(pv)
+      Lamda[2,2] = sample(rgl22,size=1,prob=pv)
+   }
+
+   if(doe) { # e
+      ng = length(ge)
+      ei = which.min(abs(e-ge))
+      if(ei==1) {
+         pi =2
+         qr = .5
+      } else if(ei==ng) {
+         pi = ng-1
+         qr = .5
+      } else {
+         pi = ei + rbinom(1,1,.5)*2-1
+         qr = 1
+      }
+      eold = ge[ei]
+      eprop = ge[pi]
+      llold = rlpx(x,eold,k,mu,tau,Sigma,sigma,ndghk)
+      llprop = rlpx(x,eprop,k,mu,tau,Sigma,sigma,ndghk)
+      lrat = llprop - llold + log(qr)
+      if(lrat>0) {
+         e = eprop
+      } else {
+         paccept = min(1,exp(lrat))
+         e = ifelse(rbinom(1,1,paccept),eprop,eold)
+      }
+   }
+   mkeep = rep/keep
+   if(mkeep == floor(mkeep)) {
+      drSigma[mkeep,] = Sigma
+      drmu[mkeep,] = mu
+      drtau[mkeep,] = tau
+      drsigma[mkeep,] = sigma 
+      drLamda[mkeep,] = Lamda
+      dre[mkeep] = e
+   }
+   if((rep/printevery)==floor(rep/printevery)) {
+      ctime = proc.time()[3]
+      timetoend = ((ctime-itime)/rep)*(ndpost-rep)
+      cat(rep,' (', round(timetoend/60,1), ') \n')
+      fsh()
+   }
+}
+ctime = proc.time()[3]
+cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
+return(list(Sigmadraw=drSigma,mudraw=drmu,taudraw = drtau,
+ sigmadraw=drsigma,Lambdadraw=drLamda,edraw=dre))
+}
+
+
+
+
diff --git a/R/rbprobitGibbs.R b/R/rbprobitGibbs.R
new file mode 100755
index 0000000..c951916
--- /dev/null
+++ b/R/rbprobitGibbs.R
@@ -0,0 +1,152 @@
+rbprobitGibbs=
+function(Data,Prior,Mcmc)
+{
+#
+# revision history:
+#   p. rossi 1/05
+#
+# purpose: 
+#   draw from posterior for binary probit using Gibbs Sampler
+#
+# Arguments:
+#   Data - list of X,y  
+#     X is nobs x nvar, y is nobs vector of 0,1
+#   Prior - list of A, betabar
+#     A is nvar x nvar prior preci matrix
+#     betabar is nvar x 1 prior mean
+#   Mcmc
+#     R is number of draws
+#     keep is thinning parameter
+#
+# Output:
+#   list of betadraws
+#
+# Model:   y = 1 if  w=Xbeta + e   > 0  e ~N(0,1)
+#
+# Prior:   beta ~ N(betabar,A^-1)
+#
+#
+# ----------------------------------------------------------------------
+# define functions needed
+#
+breg1=
+function(root,X,y,Abetabar) 
+{
+#
+#     p.rossi 12/04
+#
+# Purpose: draw from posterior for linear regression, sigmasq=1.0
+# 
+# Arguments:
+#  root is chol((X'X+A)^-1)
+#  Abetabar = A*betabar
+#
+# Output:  draw from posterior
+# 
+# Model: y = Xbeta + e  e ~ N(0,I)
+#
+# Prior:  beta ~ N(betabar,A^-1)
+#
+cov=crossprod(root,root)
+betatilde=cov%*%(crossprod(X,y)+Abetabar)
+betatilde+t(root)%*%rnorm(length(betatilde))
+}
+
+pandterm=function(message) {stop(message,call.=FALSE)}
+#
+# ----------------------------------------------------------------------
+#
+# check arguments
+#
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(length(y) != nrow(X) ) {pandterm("y and X not of same row dim")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=.01*diag(nvar)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {Deltabar=Prior$Deltabar}
+    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
+       else {A=Prior$A}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Binary Probit Model",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep,fill=TRUE)
+cat(" ",fill=TRUE)
+
+betadraw=matrix(double(floor(R/keep)*nvar),ncol=nvar)
+beta=c(rep(0,nvar))
+sigma=c(rep(1,nrow(X)))
+root=chol(chol2inv(chol((crossprod(X,X)+A))))
+Abetabar=crossprod(A,betabar)
+        a=ifelse(y == 0,-100, 0)
+        b=ifelse(y == 0, 0, 100)
+#
+#	start main iteration loop
+#
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+fsh()
+
+for (rep in 1:R) 
+{
+  # draw z given beta(i-1)
+  mu=X%*%beta
+  z=rtrun(mu,sigma,a,b)
+  beta=breg1(root,X,z,Abetabar)
+#
+#       print time to completion and draw # every 100th draw
+#
+  if(rep%%100 == 0)
+    {ctime=proc.time()[3]
+    timetoend=((ctime-itime)/rep)*(R-rep)
+    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+    fsh()}
+
+  if(rep%%keep == 0) 
+    {mkeep=rep/keep; betadraw[mkeep,]=beta}
+}
+ctime = proc.time()[3]
+cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
+return(list(betadraw=betadraw))
+}
diff --git a/R/rdirichlet.R b/R/rdirichlet.R
new file mode 100755
index 0000000..5262611
--- /dev/null
+++ b/R/rdirichlet.R
@@ -0,0 +1,12 @@
+rdirichlet = 
+function(alpha)
+{
+#
+# Purpose:
+# draw from Dirichlet(alpha)
+#
+dim = length(alpha)
+y=rep(0,dim)
+for(i in 1:dim) y[i] = rgamma(1,alpha[i])
+return(y/sum(y))
+}
diff --git a/R/rhierLinearModel.R b/R/rhierLinearModel.R
new file mode 100755
index 0000000..e6c529c
--- /dev/null
+++ b/R/rhierLinearModel.R
@@ -0,0 +1,260 @@
+rhierLinearModel=
+function(Data,Prior,Mcmc)
+{
+#
+# Revision History
+#     1/17/05  P. Rossi
+#
+# Purpose:
+#   run hiearchical regression model
+#
+# Arguments:
+#   Data list of regdata,Z 
+#     regdata is a list of lists each list with members y, X
+#        e.g. regdata[[i]]=list(y=y,X=X)
+#     X has nvar columns
+#     Z is nreg=length(regdata) x nz
+#   Prior list of prior hyperparameters
+#     Deltabar,A, nu.e,ssq,nu,V
+#          note: ssq is a nreg x 1 vector!
+#   Mcmc
+#     list of Mcmc parameters
+#     R is number of draws
+#     keep is thining parameter -- keep every keepth draw
+#
+# Output: 
+#   list of 
+#   betadraw -- nreg x nvar x R/keep array of individual regression betas
+#   taudraw -- R/keep x nreg  array of error variances for each regression
+#   Deltadraw -- R/keep x nz x nvar array of Delta draws
+#   Vbetadraw -- R/keep x nvar*nvar array of Vbeta draws
+#
+# Model:
+# nreg regression equations 
+#        y_i = X_ibeta_i + epsilon_i  
+#        epsilon_i ~ N(0,tau_i)
+#             nvar X vars in each equation
+#
+# Priors:
+#        tau_i ~ nu.e*ssq_i/chisq(nu.e)  tau_i is the variance of epsilon_i
+#        beta_i ~ N(ZDelta[i,],V_beta)
+#               Note:  ZDelta is the matrix Z * Delta; [i,] refers to ith row of this product!
+#
+#          vec(Delta) | V_beta ~ N(vec(Deltabar),Vbeta (x) A^-1)
+#          V_beta ~ IW(nu,V)  or V_beta^-1 ~ W(nu,V^-1)
+#              Delta, Deltabar are nz x nvar
+#              A is nz x nz
+#              Vbeta is nvar x nvar
+#        
+#          NOTE: if you don't have any z vars, set Z=iota (nreg x 1) 
+#
+#
+#  create needed functions
+#
+#------------------------------------------------------------------------------
+append=function(l) { l=c(l,list(XpX=crossprod(l$X),Xpy=crossprod(l$X,l$y)))}
+#
+getvar=function(l) { var(l$y)}
+#
+runiregG=
+function(y,X,XpX,Xpy,sigmasq,A,betabar,nu,ssq){
+# 
+# Purpose:
+#   perform one Gibbs iteration for Univ Regression Model
+#   only does one iteration so can be used in rhierLinearModel
+#
+# Model:
+#   y = Xbeta + e  e ~N(0,sigmasq)
+#          y is n x 1
+#          X is n x k
+#          beta is k x 1 vector of coefficients
+#
+# Priors:  beta ~ N(betabar,A^-1)
+#          sigmasq ~ (nu*ssq)/chisq_nu
+# 
+n=length(y)
+k=ncol(XpX)
+sigmasq=as.vector(sigmasq)
+#
+#     first draw beta | sigmasq
+#
+  IR=backsolve(chol(XpX/sigmasq+A),diag(k))
+  btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
+  beta = btilde + IR%*%rnorm(k)
+#
+#    now draw sigmasq | beta
+#
+  res=y-X%*%beta
+  s=t(res)%*%res
+  sigmasq=(nu*ssq + s)/rchisq(1,nu+n)
+
+list(betadraw=beta,sigmasqdraw=sigmasq)
+}
+
+#------------------------------------------------------------------------------
+#
+
+#
+# check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of regdata and Z")}
+    if(is.null(Data$regdata)) {pandterm("Requires Data element regdata")}
+    regdata=Data$regdata
+    if(is.null(Data$Z)) {pandterm("Requires Data element Z")}
+    Z=Data$Z
+nz=ncol(Z)
+nreg=length(regdata)
+#
+# check data for validity
+#
+dimfun=function(l) {c(length(l$y),dim(l$X))}
+dims=sapply(regdata,dimfun)
+dims=t(dims)
+nvar=quantile(dims[,3],prob=.5)
+
+for (i in 1:nreg) 
+{
+   if(dims[i,1] != dims[i,2]  || dims[i,3] !=nvar) 
+      {pandterm(paste("Bad Data dimensions for unit ",i," dims(y,X) =",dims[i,]))}
+}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { Deltabar=matrix(rep(0,nz*nvar),ncol=nvar); A=01*diag(nz);
+     nu.e=3; ssq=sapply(regdata,getvar) ; nu=nvar+3 ; V= nu*diag(nvar)}
+else
+   {
+    if(is.null(Prior$Deltabar)) {Deltabar=matrix(rep(0,nz*nvar),ncol=nvar)} 
+       else {Deltabar=Prior$Deltabar}
+    if(is.null(Prior$A)) {A=.01*diag(nz)} 
+       else {A=Prior$A}
+    if(is.null(Prior$nu.e)) {nu.e=3} 
+       else {nu.e=Prior$nu.e}
+    if(is.null(Prior$ssq)) {ssq=sapply(regdata,getvar)} 
+       else {ssq=Prior$ssq}
+    if(is.null(Prior$nu)) {nu=nvar+3} 
+       else {nu=Prior$nu}
+    if(is.null(Prior$V)) {V=nu*diag(nvar)} 
+       else {V=Prior$V}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nz || nrow(A) != nz) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(nrow(Deltabar) != nz || ncol(Deltabar) != nvar)
+   {pandterm(paste("bad dimensions for Deltabar ",dim(Deltabar)))}
+if(length(ssq) != nreg) {pandterm(paste("bad length for ssq ",length(ssq)))}
+if(ncol(V) != nvar || nrow(V) != nvar) {pandterm(paste("bad dimensions for V ",dim(V)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Linear Hierarchical Model",fill=TRUE)
+cat("   ",nreg," Regressions",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("Deltabar",fill=TRUE)
+print(Deltabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu.e (d.f. parm for regression error variances)= ",nu.e,fill=TRUE)
+cat("Vbeta ~ IW(nu,V)",fill=TRUE)
+cat("nu = ",nu,fill=TRUE)
+cat("V ",fill=TRUE)
+print(V)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep,fill=TRUE)
+cat(" ",fill=TRUE)
+#
+#  allocate space for the draws and set initial values of Vbeta and Delta
+#
+Vbetadraw=matrix(double(floor(R/keep)*nvar*nvar),ncol=nvar*nvar)
+Deltadraw=matrix(double(floor(R/keep)*nz*nvar),ncol=nz*nvar)
+taudraw=matrix(double(floor(R/keep)*nreg),ncol=nreg)
+betadraw=array(double(floor(R/keep)*nreg*nvar),dim=c(nreg,nvar,floor(R/keep)))
+
+tau=double(nreg)
+Delta=c(rep(0,nz*nvar))
+Vbeta=as.vector(diag(nvar))
+betas=matrix(double(nreg*nvar),ncol=nvar)
+#
+#  set up fixed parms for the draw of Vbeta,Delta
+#
+#  note: in the notation of the MVR  Y =    X      B  
+#                                  n x m  n x k  k x m
+#                           "n" = nreg
+#                           "m" = nvar
+#                           "k" = nz
+#			general model: Beta = Z Delta + U
+#
+Fparm=init.rmultiregfp(Z,A,Deltabar,nu,V)
+#
+#       Create XpX elements of regdata and initialize tau
+#
+regdata=lapply(regdata,append)
+
+tau=sapply(regdata,getvar)
+#
+#	start main iteration loop
+#
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+fsh()
+
+for(rep in 1:R)
+{
+   Abeta=chol2inv(chol(matrix(Vbeta,ncol=nvar)))
+   betabar=Z%*%matrix(Delta,ncol=nvar)
+#
+#       loop over all regressions
+#
+   for (reg in 1:nreg) 
+   {
+      regout=runiregG(regdata[[reg]]$y,regdata[[reg]]$X,regdata[[reg]]$XpX,
+                regdata[[reg]]$Xpy,tau[reg],Abeta,betabar[reg,],nu.e,ssq[reg])
+      betas[reg,]=regout$betadraw
+      tau[reg]=regout$sigmasqdraw
+   }
+#
+#          draw Vbeta, Delta | {beta_i}
+#
+   rmregout=rmultiregfp(betas,Z,Fparm)
+   Vbeta=as.vector(rmregout$Sigma)
+   Delta=as.vector(rmregout$B)
+#
+#       print time to completion and draw # every 100th draw
+#
+  if(rep%%100 == 0)
+    {ctime=proc.time()[3]
+    timetoend=((ctime-itime)/rep)*(R-rep)
+    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+    fsh()}
+
+
+  if(rep%%keep == 0) 
+    {mkeep=rep/keep
+     Vbetadraw[mkeep,]=Vbeta
+     Deltadraw[mkeep,]=Delta
+     taudraw[mkeep,]=tau
+     betadraw[,,mkeep]=betas}
+
+}
+ctime = proc.time()[3]
+cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
+
+return(list(Vbetadraw=Vbetadraw,Deltadraw=Deltadraw,betadraw=betadraw,taudraw=taudraw))
+}
diff --git a/R/rhierMnlRwMixture.R b/R/rhierMnlRwMixture.R
new file mode 100755
index 0000000..1d6f524
--- /dev/null
+++ b/R/rhierMnlRwMixture.R
@@ -0,0 +1,364 @@
+rhierMnlRwMixture=
+function(Data,Prior,Mcmc)
+{
+#
+# revision history:
+#   changed 12/17/04 by rossi to fix bug in drawdelta when there is zero/one unit
+#   in a mixture component
+#
+# purpose: run hierarchical mnl logit model with mixture of normals 
+#   using RW and cov(RW inc) = (hess_i + Vbeta^-1)^-1
+#   uses normal approximation to pooled likelihood
+#
+# Arguments:
+#   Data contains a list of (m,lgtdata, and possibly Z)
+#      m is number of choice alternatives
+#      lgtdata is a list of lists (one list per unit)
+#          lgtdata[[i]]=list(y,X)
+#             y is a vector indicating alternative chosen
+#               integers 1:m indicate alternative
+#             X is a length(y)*m x nvar matrix of values of
+#               X vars including intercepts
+#             Z is an length(lgtdata) x nz matrix of values of variables
+#               note: Z should NOT contain an intercept
+#   Prior contains a list of (deltabar,Ad,mubar,Amu,nu,V,ncomp) 
+#      ncomp is the number of components in normal mixture
+#           if elements of Prior (other than ncomp) do not exist, defaults are used
+#   Mcmc contains a list of (s,c,R,keep)
+#
+# Output:  as list containing
+#   Deltadraw R/keep  x nz*nvar matrix of draws of Delta, first row is initial value
+#   betadraw is nlgt x nvar x R/keep array of draws of betas
+#   probdraw is R/keep x ncomp matrix of draws of probs of mixture components
+#   compdraw is a list of list of lists (length R/keep)
+#      compdraw[[rep]] is the repth draw of components for mixtures
+#
+# Priors:
+#    beta_i = D %*% z[i,] + u_i
+#       u_i ~ N(mu_ind[i],Sigma_ind[i])
+#       ind[i] ~multinomial(p)
+#       p ~ dirichlet (a)
+#       D is a k x nz array
+#          delta= vec(D) ~ N(deltabar,A_d^-1)
+#    mu_j ~ N(mubar,A_mu^-1(x)Sigma_j)
+#    Sigma_j ~ IW(nu,V^-1)
+#    ncomp is number of components
+#
+# MCMC parameters
+#   s is the scaling parameter for the RW inc covariance matrix; s^2 Var is inc cov
+#      matrix
+#   c is parameter for weighting function in fractional likelihood
+#      weight= n_i/(cN)
+#   R is number of draws
+#   keep is thinning parameter, keep every keepth draw
+#
+#  check arguments
+#
+pandterm=function(message) { stop(message,call.=FALSE) }
+if(missing(Data)) {pandterm("Requires Data argument -- list of m,lgtdata, and (possibly) Z")}
+  if(is.null(Data$m)) {pandterm("Requires Data element m (# chce alternatives)") }
+  m=Data$m
+  if(is.null(Data$lgtdata)) {pandterm("Requires Data element lgtdata (list of data for each unit)")}
+  lgtdata=Data$lgtdata
+  nlgt=length(lgtdata)
+  drawdelta=TRUE
+if(is.null(Data$Z)) { cat("Z not specified",fill=TRUE); fsh() ; drawdelta=FALSE}
+  else {if (nrow(Data$Z) != nlgt) {pandterm(paste("Nrow(Z) ",nrow(Z),"ne number logits ",nlgt))}
+      else {Z=Data$Z}}
+  if(drawdelta) nz=ncol(Z)
+#
+# check lgtdata for validity
+#
+ypooled=NULL
+Xpooled=NULL
+if(!is.null(lgtdata[[1]]$X)) {oldncol=ncol(lgtdata[[1]]$X)}
+for (i in 1:nlgt) 
+{
+    if(is.null(lgtdata[[i]]$y)) {pandterm(paste("Requires element y of lgtdata[[",i,"]]"))}
+    if(is.null(lgtdata[[i]]$X)) {pandterm(paste("Requires element X of lgtdata[[",i,"]]"))}
+    ypooled=c(ypooled,lgtdata[[i]]$y)
+    nrowX=nrow(lgtdata[[i]]$X)
+    if((nrowX/m) !=length(lgtdata[[i]]$y)) {pandterm(paste("nrow(X) ne m*length(yi); exception at unit",i))}
+    newncol=ncol(lgtdata[[i]]$X)
+    if(newncol != oldncol) {pandterm(paste("All X elements must have same # of cols; exception at unit",i))}
+    Xpooled=rbind(Xpooled,lgtdata[[i]]$X)
+    oldncol=newncol
+}
+nvar=ncol(Xpooled)
+levely=as.numeric(levels(as.factor(ypooled)))
+if(length(levely) != m) {pandterm(paste("y takes on ",length(levely)," values -- must be = m"))}
+bady=FALSE
+for (i in 1:m )
+{
+    if(levely[i] != i) bady=TRUE
+}
+cat("Table of Y values pooled over all units",fill=TRUE)
+print(table(ypooled))
+if (bady) 
+  {pandterm("Invalid Y")}
+#
+# check on prior
+#
+if(missing(Prior)) 
+{pandterm("Requires Prior list argument (at least ncomp)")} 
+if(is.null(Prior$ncomp)) {pandterm("Requires Prior element ncomp (num of mixture components)")} else {ncomp=Prior$ncomp}
+if(is.null(Prior$mubar)) {mubar=matrix(rep(0,nvar),nrow=1)} else { mubar=matrix(Prior$mubar,nrow=1)}
+  if(ncol(mubar) != nvar) {pandterm(paste("mubar must have ncomp cols, ncol(mubar)= ",ncol(mubar)))}
+if(is.null(Prior$Amu)) {Amu=matrix(.01,ncol=1)} else {Amu=matrix(Prior$Amu,ncol=1)}
+  if(ncol(Amu) != 1 | nrow(Amu) != 1) {pandterm("Am must be a 1 x 1 array")}
+if(is.null(Prior$nu)) {nu=nvar+3}  else {nu=Prior$nu}
+  if(nu < 1) {pandterm("invalid nu value")}
+if(is.null(Prior$V)) {V=nu*diag(rep(1,nvar))} else {V=Prior$V}
+  if(sum(dim(V)==c(nvar,nvar)) !=2) pandterm("Invalid V in prior")
+if(is.null(Prior$Ad) & drawdelta) {Ad=.01*diag(rep(1,nvar*nz))} else {Ad=Prior$Ad}
+if(drawdelta) {if(ncol(Ad) != nvar*nz | nrow(Ad) != nvar*nz) {pandterm("Ad must be nvar*nz x nvar*nz")}}
+if(is.null(Prior$deltabar)& drawdelta) {deltabar=rep(0,nz*nvar)} else {deltabar=Prior$deltabar}
+  if(drawdelta) {if(length(deltabar) != nz*nvar) {pandterm("deltabar must be of length nvar*nz")}}
+if(is.null(Prior$a)) { a=rep(5,ncomp)} else {a=Prior$a}
+if(length(a) != ncomp) {pandterm("Requires dim(a)= ncomp (no of components)")}
+bada=FALSE
+   for(i in 1:ncomp) { if(a[i] < 1) bada=TRUE}
+  if(bada) pandterm("invalid values in a vector")
+#
+# check on Mcmc
+#
+if(missing(Mcmc)) 
+  {pandterm("Requires Mcmc list argument")}
+else 
+   { 
+    if(is.null(Mcmc$s)) {s=2.93/sqrt(nvar)} else {s=Mcmc$s}
+    if(is.null(Mcmc$c)) {c=2}  else {c=Mcmc$c}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$R)) {pandterm("Requires R argument in Mcmc list")} else {R=Mcmc$R}
+    }
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Attempting MCMC Inference for Hierarchical Logit:",fill=TRUE)
+cat("   Normal Mixture with",ncomp,"components for first stage prior",fill=TRUE)
+cat(paste("  ",m," alternatives; ",nvar," variables in X"),fill=TRUE)
+cat(paste("   for ",nlgt," cross-sectional units"),fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("nu =",nu,fill=TRUE)
+cat("V ",fill=TRUE)
+print(V)
+cat("mubar ",fill=TRUE)
+print(mubar)
+cat("Amu ", fill=TRUE)
+print(Amu)
+cat("a ",fill=TRUE)
+print(a)
+if(drawdelta) 
+{
+   cat("deltabar",fill=TRUE)
+   print(deltabar)
+   cat("Ad",fill=TRUE)
+   print(Ad)
+}
+cat(" ",fill=TRUE)
+cat("MCMC Parms: ",fill=TRUE)
+cat(paste("s=",round(s,3)," c= ",c," R= ",R," keep= ",keep),fill=TRUE)
+cat("",fill=TRUE)
+#
+# allocate space for draws
+#
+if(drawdelta) Deltadraw=matrix(double((floor(R/keep))*nz*nvar),ncol=nz*nvar)
+betadraw=array(double((floor(R/keep))*nlgt*nvar),dim=c(nlgt,nvar,floor(R/keep)))
+probdraw=matrix(double((floor(R/keep))*ncomp),ncol=ncomp)
+olddelta=double(nz*nvar)
+oldbetas=matrix(double(nlgt*nvar),ncol=nvar)
+oldll=double(nlgt)
+oldcomp=NULL
+compdraw=NULL
+#--------------------------------------------------------------------------------------------------
+#
+#  create functions needed
+#
+llmnlFract=
+function(beta,y,X,betapooled,rootH,w){
+z=as.vector(rootH%*%(beta-betapooled))
+llmnl(y,X,beta)+w*(-5.*(z%*%z))
+}
+
+mnlRwMetropOnce=
+function(y,X,oldbeta,oldll,s,inc.root,betabar,rootpi){ 
+#
+# function to execute rw metropolis for the MNL
+# y is n vector with element = 1,...,j indicating which alt chosen
+# X is nj x k matrix of xvalues for each of j alt on each of n occasions
+# RW increments are N(0,s^2*t(inc.root)%*%inc.root)
+# prior on beta is N(betabar,Sigma)  Sigma^-1=rootpi*t(rootpi)
+#	inc.root, rootpi are upper triangular
+#	this means that we are using the UL decomp of Sigma^-1 for prior 
+# oldbeta is the current
+     stay=0
+     betac=oldbeta + s*t(inc.root)%*%(matrix(rnorm(ncol(X)),ncol=1))
+     cll=llmnl(y,X,betac)
+     clpost=cll+lndMvn(betac,betabar,rootpi)
+     ldiff=clpost-oldll-lndMvn(oldbeta,betabar,rootpi)
+     alpha=min(1,exp(ldiff))
+     if(alpha < 1) {unif=runif(1)} else {unif=0}
+     if (unif <= alpha)
+             {betadraw=betac; oldll=cll}
+           else
+             {betadraw=oldbeta; stay=1}
+list(betadraw=betadraw,stay=stay,oldll=oldll)             
+}
+drawDelta=
+function(x,y,z,comps,deltabar,Ad){
+# delta = vec(D)
+#  given z and comps (z[i] gives component indicator for the ith observation, 
+#   comps is a list of mu and rooti)
+#y is n x p
+#x is n x k
+#y = xD' + U , rows of U are indep with covs Sigma_i given by z and comps
+p=ncol(y)
+k=ncol(x)
+xtx = matrix(0.0,k*p,k*p)
+xty = matrix(0.0,p,k) #this is the unvecced version, have to vec after sum
+for(i in 1:length(comps)) {
+   nobs=sum(z==i)
+   if(nobs > 0) {
+      if(nobs == 1) 
+        { yi = matrix(y[z==i,],ncol=p); xi = matrix(x[z==i,],ncol=k)}
+      else
+        { yi = y[z==i,]; xi = x[z==i,]}
+          
+      yi = t(t(yi)-comps[[i]][[1]])
+      sigi = crossprod(t(comps[[i]][[2]]))
+      xtx = xtx + crossprod(xi) %x% sigi
+      xty = xty + (sigi %*% crossprod(yi,xi))
+      }
+}
+xty = matrix(xty,ncol=1)
+
+# then vec(t(D)) ~ N(V^{-1}(xty + Ad*deltabar),V^{-1}) V = (xtx+Ad)
+cov=chol2inv(chol(xtx+Ad))
+cov%*%(xty+Ad%*%deltabar) + t(chol(cov))%*%rnorm(length(deltabar))
+}
+#-------------------------------------------------------------------------------------------------------
+#
+# intialize priors and compute quantities for Metropolis
+#
+cat("initializing priors and Metropolis candidate densities ...",fill=TRUE)
+fsh()
+#
+#  now go thru and computed fraction likelihood estimates and hessians
+#
+#       Lbar_i=L_i x L^w
+#
+betainit=c(rep(0,nvar))
+#
+#  compute pooled optimum
+#
+out=optim(betainit,llmnl,method="BFGS",control=list( fnscale=-1,trace=0,reltol=1e-6), 
+     X=Xpooled,y=ypooled)
+betapooled=out$par
+H=mnlHess(ypooled,Xpooled,betapooled)
+rootH=chol(H)
+for (i in 1:nlgt) 
+{
+   w=length(lgtdata[[i]]$y)/(c*length(ypooled))
+   out=optim(betapooled,llmnlFract,method="BFGS",control=list( fnscale=-1,trace=0,reltol=1e-4), 
+   X=lgtdata[[i]]$X,y=lgtdata[[i]]$y,betapooled=betapooled,rootH=rootH,w=w)
+   if(out$convergence == 0) 
+     { hess=mnlHess(lgtdata[[i]]$y,lgtdata[[i]]$X,out$par)
+       lgtdata[[i]]=c(lgtdata[[i]],list(converge=1,betafmle=out$par,hess=hess)) }
+   else
+     { lgtdata[[i]]=c(lgtdata[[i]],list(converge=0,betafmle=c(rep(0,nvar)),
+        hess=diag(rep(0,nvar)))) }
+   oldbetas[i,]=lgtdata[[i]]$betafmle
+}
+#
+#  initialize values
+#
+# set initial values for the indicators
+#     ind is of length(nlgt) and indicates which mixture component this obs
+#     belongs to.
+#
+ind=NULL
+ninc=floor(nlgt/ncomp)
+for (i in 1:(ncomp-1)) {ind=c(ind,rep(i,ninc))}
+if(ncomp != 1) {ind = c(ind,rep(ncomp,nlgt-length(ind)))} else {ind=rep(1,nlgt)}
+#
+# initialize delta
+#
+olddelta=rep(0,nz*nvar)
+#
+# initialize probs
+#
+oldprob=rep(1/ncomp,ncomp)
+#
+# initialize comps
+#
+tcomp=list(list(mu=rep(0,nvar),rooti=diag(rep(1,nvar))))
+oldcomp=rep(tcomp,ncomp)
+#
+#
+#	start main iteration loop
+#
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+fsh()
+for(rep in 1:R)
+{
+   # first draw comps,ind,p | {beta_i}, delta
+   #        ind,p need initialization comps is drawn first in sub-Gibbs
+   if(drawdelta) 
+      {mgout=rmixGibbs(oldbetas-Z%*%t(matrix(olddelta,ncol=nz)),
+      mubar,Amu,nu,V,a,oldprob,ind,oldcomp)}
+   else
+      {mgout=rmixGibbs(oldbetas,
+      mubar,Amu,nu,V,a,oldprob,ind,oldcomp)}
+   oldprob=mgout[[1]]
+   oldcomp=mgout[[3]]
+   ind=mgout[[2]]
+   # now draw delta | {beta_i}, ind, comps
+   if(drawdelta) {olddelta=drawDelta(Z,oldbetas,ind,oldcomp,deltabar,Ad)}
+   #
+   #  loop over all lgt equations drawing beta_i | ind[i],z[i,],mu[ind[i]],rooti[ind[i]]
+   #
+      for (lgt in 1:nlgt) 
+      {
+         rootpi=oldcomp[[ind[lgt]]]$rooti
+         #  note: beta_i = Delta*z_i + u_i  Delta is nvar x nz
+         if(drawdelta) {
+            betabar=oldcomp[[ind[lgt]]]$mu+matrix(olddelta,ncol=nz)%*%as.vector(Z[lgt,])}
+         else {
+            betabar=oldcomp[[ind[lgt]]]$mu }
+         if (rep == 1) 
+            { oldll[lgt]=llmnl(lgtdata[[lgt]]$y,lgtdata[[lgt]]$X,oldbetas[lgt,])}  
+         #   compute inc.root
+         inc.root=chol(chol2inv(chol(lgtdata[[lgt]]$hess+rootpi%*%t(rootpi))))
+         metropout=mnlRwMetropOnce(lgtdata[[lgt]]$y,lgtdata[[lgt]]$X,oldbetas[lgt,],
+                                   oldll[lgt],s,inc.root,betabar,rootpi)      
+         oldbetas[lgt,]=metropout$betadraw
+         oldll[lgt]=metropout$oldll
+      }
+   #
+   #
+   #       print time to completion and draw # every 100th draw
+   #
+   if(((rep/100)*100) ==(floor(rep/100)*100))
+     {ctime=proc.time()[3]
+      timetoend=((ctime-itime)/rep)*(R+1-rep)
+      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+      fsh()}
+   #
+   #       save every keepth draw
+   #
+   mkeep=rep/keep
+   if((mkeep*keep) == (floor(mkeep)*keep))
+      { betadraw[,,mkeep]=oldbetas 
+        probdraw[mkeep,]=oldprob
+        if(drawdelta) Deltadraw[mkeep,]=olddelta
+        compdraw[[mkeep]]=oldcomp }
+        
+}
+ctime=proc.time()[3]
+cat(" Total Time Elapsed: ",round((ctime-itime)/60,2),fill=TRUE)
+return(if(drawdelta) {list(Deltadraw=Deltadraw,betadraw=betadraw,probdraw=probdraw,compdraw=compdraw)} 
+else {list(betadraw=betadraw,probdraw=probdraw,compdraw=compdraw)})
+}
diff --git a/R/rivGibbs.R b/R/rivGibbs.R
new file mode 100755
index 0000000..52e05bb
--- /dev/null
+++ b/R/rivGibbs.R
@@ -0,0 +1,210 @@
+rivGibbs=
+function(Data,Prior,Mcmc) 
+{
+#
+# revision history:
+#    R. McCulloch original version 2/05 
+#    p. rossi 3/05 
+#
+# purpose: 
+#   draw from posterior for linear I.V. model
+#
+# Arguments:
+#   Data -- list of z,w,x,y
+#        y is vector of obs on lhs var in structural equation
+#        x is "endogenous" var in structural eqn
+#        w is matrix of obs on "exogenous" vars in the structural eqn
+#        z is matrix of obs on instruments
+#   Prior -- list of md,Ad,mbg,Abg,nu,V
+#        md is prior mean of delta
+#        Ad is prior prec
+#        mbg is prior mean vector for beta,gamma
+#        Abg is prior prec of same
+#        nu,V parms for IW on Sigma
+#
+#   Mcmc -- list of R,keep 
+#        R is number of draws
+#        keep is thinning parameter
+#
+#   Output: 
+#      list of draws of delta,beta,gamma and Sigma
+# 
+#   Model:
+#
+#    x=z'delta + e1
+#    y=beta*x + w'gamma + e2
+#        e1,e2 ~ N(0,Sigma)
+#
+#   Priors
+#   delta ~ N(md,Ad^-1)
+#   vec(beta,gamma) ~ N(mbg,Abg^-1)
+#   Sigma ~ IW(nu,V)
+#
+#   check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of z,w,x,y")}
+    if(is.null(Data$z)) {pandterm("Requires Data element z")}
+    z=Data$z
+    if(is.null(Data$w)) {pandterm("Requires Data element w")}
+    w=Data$w
+    if(is.null(Data$x)) {pandterm("Requires Data element x")}
+    x=Data$x
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+
+#
+# check data for validity
+#
+if(!is.vector(x)) {pandterm("x must be a vector")}
+if(!is.vector(y)) {pandterm("y must be a vector")}
+n=length(y)
+if(!is.matrix(w)) {pandterm("w is not a matrix")}
+if(!is.matrix(z)) {pandterm("z is not a matrix")}
+dimd=ncol(z)
+dimg=ncol(w)
+if(n != length(x) ) {pandterm("length(y) ne length(x)")}
+if(n != nrow(w) ) {pandterm("length(y) ne nrow(w)")}
+if(n != nrow(z) ) {pandterm("length(y) ne nrow(z)")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { md=c(rep(0,nins));Ad=.01*diag(dimd); 
+     mbg=c(rep(0,(1+dimg))); Abg=.01*diag((1+dimg))}
+else
+   {
+    if(is.null(Prior$md)) {md=c(rep(0,dimd))} 
+       else {md=Prior$md}
+    if(is.null(Prior$Ad)) {Ad=.01*diag(dimd)} 
+       else {Ad=Prior$Ad}
+    if(is.null(Prior$mbg)) {mbg=c(rep(0,(1+dimg)))} 
+       else {mbg=Prior$mbg}
+    if(is.null(Prior$Abg)) {Abg=.01*diag((1+dimg))} 
+       else {Abg=Prior$Abg}
+    if(is.null(Prior$nu)) {nu=3}
+       else {nu=Prior$nu}
+    if(is.null(Prior$V)) {V=nu*diag(2)}
+       else {V=Prior$V}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(Ad) != nrow(Ad) || ncol(Ad) != dimd || nrow(Ad) != dimd) 
+   {pandterm(paste("bad dimensions for Ad",dim(Ad)))}
+if(length(md) != dimd)
+   {pandterm(paste("md wrong length, length= ",length(md)))}
+if(ncol(Abg) != nrow(Abg) || ncol(Abg) != (1+dimg) || nrow(Abg) != (1+dimg)) 
+   {pandterm(paste("bad dimensions for Abg",dim(Abg)))}
+if(length(mbg) != (1+dimg))
+   {pandterm(paste("mbg wrong length, length= ",length(mbg)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+   }
+
+#
+# print out model
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for Linear IV Model",fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" nobs= ",n,"; ",ncol(z)," instruments; ",ncol(w)," included exog vars",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("mean of delta ",fill=TRUE)
+print(md)
+cat("Adelta",fill=TRUE)
+print(Ad)
+cat("mean of beta/gamma",fill=TRUE)
+print(mbg)
+cat("Abeta/gamma",fill=TRUE)
+print(Abg)
+cat("Sigma Prior Parms",fill=TRUE)
+cat("nu= ",nu," V=",fill=TRUE)
+print(V)
+cat(" ",fill=TRUE)
+cat("MCMC parms: R= ",R," keep= ",keep,fill=TRUE)
+cat(" ",fill=TRUE)
+
+deltadraw = matrix(double(floor(R/keep)*dimd),ncol=dimd)
+betadraw = rep(0.0,floor(R/keep))
+gammadraw = matrix(double(floor(R/keep)*dimg),ncol=dimg)
+Sigmadraw = matrix(double(floor(R/keep)*4),ncol=4)
+
+#set initial values
+Sigma=diag(2)
+delta=c(rep(.1,dimd))
+
+#
+# start main iteration loop
+#
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end -min) ",fill=TRUE)
+fsh()
+xtd=matrix(nrow=2*n,ncol=dimd)
+ind=seq(1,(2*n-1),by=2)
+zvec=as.vector(t(z))
+
+for(rep in 1:R) {
+
+    # draw beta,gamma
+      e1 = as.vector(x-z%*%delta)
+      ee2 = (Sigma[1,2]/Sigma[1,1])*e1
+      sig = sqrt(Sigma[2,2]-(Sigma[1,2]^2/Sigma[1,1]))
+      yt = (y-ee2)/sig
+      xt = cbind(x,w)/sig
+      bg = breg(yt,xt,mbg,Abg)  
+      beta = bg[1]
+      gamma = bg[2:length(bg)]
+
+    # draw delta
+      C = matrix(c(1,beta,0,1),nrow=2)
+      B = C%*%Sigma%*%t(C)
+      L = t(chol(B))
+      Li=backsolve(L,diag(2),upper.tri=FALSE)
+      u = as.vector((y-w%*%gamma))
+      yt = as.vector(Li %*% rbind(x,u))
+
+      z2=rbind(zvec,beta*zvec)
+      z2=Li%*%z2
+      zt1=z2[1,]
+      zt2=z2[2,]
+      dim(zt1)=c(dimd,n)
+      zt1=t(zt1)
+      dim(zt2)=c(dimd,n)
+      zt2=t(zt2)
+      xtd[ind,]=zt1
+      xtd[-ind,]=zt2
+      delta = breg(yt,xtd,md,Ad)
+
+    # draw Sigma
+      Res = cbind(x-z%*%delta,y-beta*x-w%*%gamma)
+      S = crossprod(Res)
+      Sigma = rwishart(nu+n,chol2inv(chol(V+S)))$IW
+  
+   if(rep%%100==0)
+     {
+      ctime=proc.time()[3]
+      timetoend=((ctime-itime)/rep)*(R-rep)
+      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+      fsh()
+      }
+   if(rep%%keep ==0)
+     {
+      mkeep=rep/keep
+      deltadraw[mkeep,]=delta
+      betadraw[mkeep]=beta
+      gammadraw[mkeep,]=gamma
+      Sigmadraw[mkeep,]=Sigma
+      }
+}
+
+return(list(deltadraw=deltadraw,betadraw=betadraw,gammadraw=gammadraw,Sigmadraw=Sigmadraw))
+}
diff --git a/R/rmixGibbs.R b/R/rmixGibbs.R
new file mode 100755
index 0000000..af4f416
--- /dev/null
+++ b/R/rmixGibbs.R
@@ -0,0 +1,84 @@
+rmixGibbs=
+function(y,Bbar,A,nu,V,a,p,z,comps)
+{
+#
+# Revision History: 
+#   R. McCulloch 11/04
+#   P. Rossi 3/05 put in backsolve and improved documentation
+#
+# purpose: do gibbs sampling inference for a mixture of multivariate normals
+#
+# arguments:
+#     y: data, rows are observations, assumed to be iid draws from normal mixture
+#     Bbar,A,nu,V: common prior for mean and variance of each normal component
+#        beta ~ N(betabar,Sigma (x) A^-1)
+#                   betabar=vec(Bbar)
+#          Sigma ~ IW(nu,V) or Sigma^-1 ~ W(nu, V^-1)
+#              note: if you want Sigma ~ A, use nu big and rwishart(nu,nu(A)^{-1})$IW
+#     a: Dirichlet parameters for prior on p
+#     p: prior probabilities of normal components
+#     z: components indentities for each observation 
+#        (vector of intergers each in {1,2,...number of components})
+#     comps: list, each member is a list comp with ith normal component
+#                        ~N(comp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = comp[[2]]
+# Output:
+#  list with elements [[1]=$p, [[2]]=$z, and [[3]]=$comps, with the updated values
+#
+#------------------------------------------------------------------------------------
+#  define functions needed
+#
+rcompsC = function(x,p,comps) {
+# purpose:
+#  draws class membership of rows of x, given x rows are iid draws from 
+#  mixture of multivariate normals
+# arguments:
+#     x: observations (number of observations x dimension)
+#     p: prior probabilities of mixture components
+#     comps: list, each member is a list with mean and R^{-1}, Sigma = t(R)%*%R 
+dim = ncol(x)
+nob = nrow(x)
+nc = length(comps)
+mumat = matrix(0.0,dim,nc)
+rivmat = matrix(0.0,dim*(dim+1)/2,nc)
+for(i in 1:nc) {
+   mumat[,i] = comps[[i]][[1]]
+   rivmat[,i] = uttovC(comps[[i]][[2]])
+}
+xx=t(x)
+.C('crcomps',as.double(xx),as.double(mumat),as.double(rivmat),as.double(p),
+    as.integer(dim),as.integer(nc),as.integer(nob),res=integer(nob))$res
+}
+
+uttovC = function(rooti) {
+# returns vector of square upper triangular matrix rooti, goes down columns dropping the zeros
+dim = nrow(rooti)
+n = dim*(dim+1)/2
+.C('cuttov',as.double(rooti),res = double(n),as.integer(dim))$res
+}
+#-----------------------------------------------------------------------------------------
+nmix = length(a)
+#draw comps
+for(i in 1:nmix) {
+   nobincomp = sum(z==i)         # get number of observations "in" component i
+   if(nobincomp>0) {             # if more than one obs in this component, draw from posterior
+      yi=y[z==i,]
+      dim(yi)=c(nobincomp,ncol(y))
+          #  worry about case where y has only one col (univ mixtures) or only one row
+          #  then yi gets converted to a vector
+      temp = rmultireg(yi,matrix(rep(1,nobincomp),ncol=1),Bbar,A,nu,V)
+      comps[[i]] = list(mu = as.vector(temp$B),
+                        rooti=backsolve(chol(temp$Sigma),diag(rep(1,nrow(temp$Sigma)))))
+   } 
+   else { # else draw from the prior
+      rw=rwishart(nu,chol2inv(chol(V)))
+      comps[[i]] = list(mu = as.vector(t(Bbar) + (rw$CI %*% rnorm(length(Bbar)))/sqrt(A[1,1])),
+                        rooti=backsolve(chol(rw$IW),diag(rep(1,nrow(V)))))
+   }
+}
+#draw z
+z=rcompsC(y,p,comps)
+#draw p
+for(i in 1:length(a)) a[i] = a[i] + sum(z==i)
+p = rdirichlet(a)
+return(list(p=p,z=z,comps=comps))
+}
diff --git a/R/rmixture.R b/R/rmixture.R
new file mode 100755
index 0000000..2ffb7dc
--- /dev/null
+++ b/R/rmixture.R
@@ -0,0 +1,36 @@
+rmixture=
+function(n,p,comps)
+{
+#
+# R. McCulloch 12/04
+# revision history:
+#   commented by rossi 3/05
+#
+# purpose: iid draws from mixture of multivariate normals
+# arguments:
+#     n: number of draws
+#     p: prior probabilities of normal components
+#     comps: list, each member is a list comp with ith normal component
+#                     ~N(comp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = comp[[2]]
+# output:
+#  list of x (n by length(comp[[1]]) matrix of draws) and z latent indicators of
+#  component
+#
+#----------------------------------------------------------------------------------
+# define function needed
+#
+rcomp=function(comp) {
+# purpose: draw multivariate normal with mean and variance given by comp 
+# arguments:
+#     comp is a list of length 2,
+#     comp[[1]] is the mean and comp[[2]] is R^{-1} = comp[[2]], Sigma = t(R)%*%R
+invUT = function(ut) {
+backsolve(ut,diag(rep(1,nrow(ut))))
+}
+as.vector(comp[[1]] + t(invUT(comp[[2]]))%*%rnorm(length(comp[[1]])))
+}
+#----------------------------------------------------------------------------------
+#
+z = sample(1:length(p), n, replace = TRUE, prob = p)
+return(list(x = t(sapply(comps[z],rcomp)),z=z))
+}
diff --git a/R/rmnlIndepMetrop.R b/R/rmnlIndepMetrop.R
new file mode 100755
index 0000000..e473162
--- /dev/null
+++ b/R/rmnlIndepMetrop.R
@@ -0,0 +1,153 @@
+rmnlIndepMetrop=
+function(Data,Prior,Mcmc)
+{
+#
+# revision history:
+#   p. rossi 1/05
+#   2/9/05 fixed error in Metrop eval
+#
+# purpose: 
+#   draw from posterior for MNL using Independence Metropolis
+#
+# Arguments:
+#   Data - list of m,X,y  
+#     m is number of alternatives
+#     X is nobs*m x nvar matrix
+#     y is nobs vector of values from 1 to m
+#   Prior - list of A, betabar
+#     A is nvar x nvar prior preci matrix
+#     betabar is nvar x 1 prior mean
+#   Mcmc
+#     R is number of draws
+#     keep is thinning parameter
+#     nu degrees of freedom parameter for independence 
+#        sampling density
+#
+# Output:
+#   list of betadraws
+#
+# Model:   Pr(y=j) = exp(x_j'beta)/sum(exp(x_k'beta)
+#
+# Prior:   beta ~ N(betabar,A^-1)
+#
+# check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+    if(is.null(Data$m)) {pandterm("Requires Data element m")}
+    m=Data$m
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(length(y) != (nrow(X)/m) ) {pandterm("length(y) ne nrow(X)/m")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=.01*diag(nvar)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
+       else {A=Prior$A}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$nu)) {nu=6} else {nu=Mcmc$nu}
+   }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Multinomial Logit Model",fill=TRUE)
+cat("  with ",m," alternatives",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep," nu (df for st candidates) = ",nu,fill=TRUE)
+cat(" ",fill=TRUE)
+
+betadraw=matrix(double(floor(R/keep)*nvar),ncol=nvar)
+#
+# compute required quantities for indep candidates
+#
+beta=c(rep(0,nvar))
+mle=optim(beta,llmnl,X=X,y=y,method="BFGS",hessian=TRUE,control=list(fnscale=-1))
+beta=mle$par
+betastar=mle$par
+mhess=mnlHess(y,X,beta)
+candcov=chol2inv(chol(mhess))
+root=chol(candcov)
+rooti=backsolve(root,diag(nvar))
+priorcov=chol2inv(chol(A))
+rootp=chol(priorcov)
+rootpi=backsolve(rootp,diag(nvar))
+
+#
+#	start main iteration loop
+#
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+fsh()
+
+oldlpost=llmnl(y,X,beta)+lndMvn(beta,betabar,rootpi)
+oldlimp=lndMvst(beta,nu,betastar,rooti)
+#       note: we don't need the determinants as they cancel in
+#       computation of acceptance prob
+naccept=0
+
+for (rep in 1:R) 
+{
+   betac=rmvst(nu,betastar,root)
+   clpost=llmnl(y,X,betac)+lndMvn(betac,betabar,rootpi)
+   climp=lndMvst(betac,nu,betastar,rooti)
+   ldiff=clpost+oldlimp-oldlpost-climp
+   alpha=min(1,exp(ldiff))
+   if(alpha < 1) {unif=runif(1)} else {unif=0}
+   if (unif <= alpha)
+      { beta=betac
+        oldlpost=clpost
+        oldlimp=climp
+        naccept=naccept+1}
+#
+#       print time to completion and draw # every 100th draw
+#
+  if(rep%%100 == 0)
+    {ctime=proc.time()[3]
+    timetoend=((ctime-itime)/rep)*(R-rep)
+    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+    fsh()}
+
+  if(rep%%keep == 0) 
+    {mkeep=rep/keep; betadraw[mkeep,]=beta}
+}
+ctime = proc.time()[3]
+cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
+return(list(betadraw=betadraw,acceptr=naccept/R))
+}
diff --git a/R/rmnpGibbs.R b/R/rmnpGibbs.R
new file mode 100755
index 0000000..e687257
--- /dev/null
+++ b/R/rmnpGibbs.R
@@ -0,0 +1,197 @@
+rmnpGibbs=
+function(Data,Prior,Mcmc) 
+{
+#
+# Revision History:
+#   modified by rossi 12/18/04 to include error checking`
+#
+# purpose:  Gibbs MNP model with full covariance matrix
+#
+# Arguments:
+#   Data contains 
+#      p the number of choice alternatives
+#      y -- a vector of length n with choices (takes on values from 1, .., p)
+#      X -- n(p-1) x k matrix of covariates (including intercepts)
+#           note: X is the differenced matrix unlike MNL X=stack(X_1,..,X_n) 
+#                 each X_i is (p-1) x nvar
+#
+#   Prior contains a list of (betabar, A, nu, V)
+#      if elements of prior do not exist, defaults are used
+#
+#   Mcmc is a list of (beta0,sigma0,R,keep)  
+#     beta0,sigma0 are intial values, if not supplied defaults are used
+#     R is number of draws
+#     keep is thinning parm, keep every keepth draw
+#
+# Output: a list of every keepth betadraw and sigmsdraw
+#
+#  model: 
+#    w_i = X_ibeta + e    e~N(0,Sigma)     note w_i,e are (p-1) x 1
+#    y_i = j  if w_ij > w_i-j  j=1,...,p-1
+#    y_i = p  if all w_i < 0
+#  
+#  priors:
+#    beta ~ N(betabar,A^-1)
+#    Sigma ~ IW(nu,V)
+#
+#  Check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
+  if(is.null(Data$p)) {pandterm("Requires Data element p -- number of alternatives")}
+  p=Data$p
+  if(is.null(Data$y)) {pandterm("Requires Data element y -- number of alternatives")}
+  y=Data$y
+  if(is.null(Data$X)) {pandterm("Requires Data element X -- matrix of covariates")}
+  X=Data$X
+#
+# check data for validity
+#
+levely=as.numeric(levels(as.factor(y)))
+if(length(levely) != p) {pandterm(paste("y takes on ",length(levely),
+  " values -- must be ",p))}
+  bady=FALSE
+  for (i in 1:p) 
+  {
+      if(levely[i] != i) bady=TRUE
+  }
+cat("Table of y values",fill=TRUE)
+print(table(y))
+if (bady) {pandterm("Invalid y")}
+n=length(y)
+k=ncol(X)
+pm1=p-1
+if(nrow(X)/n != pm1) {pandterm(paste("X has ",nrow(X)," rows; must be = (p-1)n"))}
+#
+# check for prior elements
+#
+if(missing(Prior)) 
+  { betabar=rep(0,k) ; A=.01*diag(k) ; nu=pm1+3; V=nu*diag(pm1)}
+else 
+  {if(is.null(Prior$betabar)) {betabar=rep(0,k)} else {betabar=Prior$betabar}
+   if(is.null(Prior$A)) {A=.01*diag(k)} else {A=Prior$A}
+   if(is.null(Prior$nu)) {nu=pm1+3} else {nu=Prior$nu}
+   if(is.null(Prior$V)) {V=nu*diag(pm1)} else {V=Prior$V}}
+if(length(betabar) != k) pandterm("length betabar ne k")
+if(sum(dim(A)==c(k,k)) != 2) pandterm("A is of incorrect dimension")
+if(nu < 1) pandterm("invalid nu value")
+if(sum(dim(V)==c(pm1,pm1)) != 2) pandterm("V is of incorrect dimension")
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R must be included")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$beta0)) {beta0=rep(0,k)} else {beta0=Mcmc$beta0}
+if(is.null(Mcmc$sigma0)) {sigma0=diag(pm1)} else {sigma0=Mcmc$sigma0}
+if(length(beta0) != k) pandterm("beta0 is not of length k")
+if(sum(dim(sigma0) == c(pm1,pm1)) != 2) pandterm("sigma0 is of incorrect dimension")
+if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for MNP",fill=TRUE)
+cat("  ",n," obs; ",p," choice alternatives; ",k," indep vars (including intercepts)",fill=TRUE)
+cat("  ",R," reps; keeping every ",keep,"th draw",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms:",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu",fill=TRUE)
+print(nu)
+cat("V",fill=TRUE)
+print(V)
+cat(" ",fill=TRUE)
+cat("MCMC Parms:",fill=TRUE)
+cat("R= ",R,fill=TRUE)
+cat("initial beta= ",beta0,fill=TRUE)
+cat("initial sigma= ",sigma0,fill=TRUE)
+cat(" ",fill=TRUE)
+#
+# allocate space for draws
+#
+sigmadraw=matrix(double(floor(R/keep)*pm1*pm1),ncol=pm1*pm1)
+betadraw=matrix(double(floor(R/keep)*k),ncol=k)
+wnew=double(nrow(X))
+betanew=double(k)
+
+#
+#  set initial values of w,beta, sigma (or root of inv)
+#
+wold=c(rep(0,nrow(X)))
+betaold=beta0
+C=chol(solve(sigma0))
+#
+#  C is upper triangular root of sigma^-1 (G) = C'C
+#
+#  create functions needed
+#
+drawwc=function(w,mu,y,sigi) {
+      .C("draww",w=as.double(w),as.double(mu),as.double(sigi),
+        as.integer(length(y)),as.integer(ncol(sigi)),as.integer(y))$w}
+
+draww=
+function(w,X,y,beta,sigmai){
+#
+#   draw latent vector
+#
+#  	w is n x (p-1) vector
+#       X ix n(p-1) x k  matrix
+#       y is multinomial 1,..., p
+#       beta is k x 1 vector
+#       sigmai is (p-1) x (p-1) 
+#
+
+Xbeta=as.vector(X%*%beta)
+drawwc(w,Xbeta,y,sigmai)
+}
+
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+for (rep in 1:R) 
+   {
+   #
+   # draw w given beta(rep-1),sigma(rep-1)
+   #
+   sigmai=crossprod(C)
+   wnew=draww(wold,X,y,betaold,sigmai)
+   #
+   # draw beta given w(rep) and sigma(rep-1)
+   #
+   #  note:  if Sigma^-1 (G) = C'C then Var(Ce)=CSigmaC' = I
+   #  first, transform w_i = X_ibeta + e_i by premultiply by C
+   #
+   zmat=matrix(cbind(wnew,X),nrow=pm1)
+   zmat=C%*%zmat
+   zmat=matrix(zmat,nrow=nrow(X))
+   betanew=breg(zmat[,1],zmat[,2:(k+1)],betabar,A)
+   #
+   # draw sigmai given w and beta
+   #
+   epsilon=matrix((wnew-X%*%betanew),nrow=pm1)
+   S=crossprod(t(epsilon))
+   W=rwishart(nu+n,chol2inv(chol(V+S)))
+   C=W$C
+   #
+   #       print time to completion and draw # every 100th draw
+   #
+   if(rep%%100 == 0)
+     {ctime=proc.time()[3]
+      timetoend=((ctime-itime)/rep)*(R+1-rep)
+      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+      fsh()}
+   #
+   #       save every keepth draw
+   #
+   if(rep%%keep ==0)
+      {mkeep=rep/keep
+      betadraw[mkeep,]=betanew
+      sigmadraw[mkeep,]=as.vector(W$IW)}
+   wold=wnew
+   betaold=betanew
+   }
+
+list(betadraw=betadraw,sigmadraw=sigmadraw)
+}
diff --git a/R/rmultireg.R b/R/rmultireg.R
new file mode 100755
index 0000000..2064b47
--- /dev/null
+++ b/R/rmultireg.R
@@ -0,0 +1,56 @@
+rmultireg=
+function(Y,X,Bbar,A,nu,V)
+{
+#
+# revision history:
+#    changed 1/11/05 by P. Rossi to fix sum of squares error
+#
+# purpose:
+#    draw from posterior for Multivariate Regression Model with
+#    natural conjugate prior
+# arguments:
+#    Y is n x m matrix
+#    X is n x k
+#    Bbar is the prior mean of regression coefficients  (k x m)
+#    A is prior precision matrix
+#    nu, V are parameters for prior on Sigma
+# output:
+#    list of B, Sigma draws of matrix of coefficients and Sigma matrix
+# model:
+#    Y=XB+U  cov(u_i) = Sigma
+#    B is k x m matrix of coefficients
+# priors:  beta|Sigma  ~ N(betabar,Sigma (x) A^-1)
+#                   betabar=vec(Bbar)
+#                   beta = vec(B) 
+#          Sigma ~ IW(nu,V) or Sigma^-1 ~ W(nu, V^-1)
+n=nrow(Y)
+m=ncol(Y)
+k=ncol(X)
+#
+# first draw Sigma
+#
+RA=chol(A)
+W=rbind(X,RA)
+Z=rbind(Y,RA%*%Bbar)
+#   note:  Y,X,A,Bbar must be matrices!
+IR=backsolve(chol(crossprod(W)),diag(k))
+#                      W'W = R'R  &  (W'W)^-1 = IRIR'  -- this is the UL decomp!
+Btilde=crossprod(t(IR))%*%crossprod(W,Z)   
+#                      IRIR'(W'Z) = (X'X+A)^-1(X'Y + ABbar)
+S=crossprod(Z-W%*%Btilde)
+#                      E'E
+rwout=rwishart(nu+n,chol2inv(chol(V+S)))
+#
+# now draw B given Sigma
+#   note beta ~ N(vec(Btilde),Sigma (x) Covxxa)
+#       Cov=(X'X + A)^-1  = IR t(IR)  
+#       Sigma=CICI'    
+#       therefore, cov(beta)= Omega = CICI' (x) IR IR' = (CI (x) IR) (CI (x) IR)'
+#	so to draw beta we do beta= vec(Btilde) +(CI (x) IR)vec(Z_mk)  
+#			Z_mk is m x k matrix of N(0,1)
+#	since vec(ABC) = (C' (x) A)vec(B), we have 
+#		B = Btilde + IR Z_mk CI'
+#
+B = Btilde + IR%*%matrix(rnorm(m*k),ncol=m)%*%t(rwout$CI)
+return(list(B=B,Sigma=rwout$IW))
+}
diff --git a/R/rmultiregfp.R b/R/rmultiregfp.R
new file mode 100755
index 0000000..d7fb186
--- /dev/null
+++ b/R/rmultiregfp.R
@@ -0,0 +1,53 @@
+rmultiregfp=
+function(Y,X,Fparm)
+{
+#
+# revision history:
+#    changed 1/11/05 by P. Rossi to fix sum of squares error
+#
+# purpose:
+#    draw from posterior for Multivariate Regression Model
+# arguments:
+#    Y is n x m matrix
+#    X is n x k
+#    Fparm is a list of fixed parameters created by init.rmultiregfp    
+# output:
+#    list of B, Sigma draws of matrix of coefficients and Sigma matrix
+# model:
+#    Y=XB+U  cov(u_i) = Sigma
+#    B is k x m matrix of coefficients
+# priors:  beta|Sigma  ~ N(betabar,Sigma (x) A^-1)
+#                   betabar=vec(Bbar)
+#                   beta = vec(B) 
+#          Sigma ~ IW(nu,V) or Sigma^-1 ~ W(nu, V^-1)
+n=nrow(Y)
+m=ncol(Y)
+k=ncol(X)
+#
+# first draw Sigma
+#
+W=rbind(X,Fparm$RA)
+Z=rbind(Y,Fparm$RABbar)
+#   note:  Y,X,A,Bbar must be matrices!
+IR=Fparm$IR
+#                      W'W = R'R  &  (W'W)^-1 = IRIR'  -- this is the UL decomp!
+Btilde=crossprod(t(IR))%*%crossprod(W,Z)   
+#                      IRIR'(W'Z) = (X'X+A)^-1(X'Y + ABbar)
+S=crossprod(Z-W%*%Btilde)
+#                      E'E
+rwout=rwishart(Fparm$nu+n,chol2inv(chol(Fparm$V+S)))
+#
+# now draw B given Sigma
+#   note beta ~ N(vec(Btilde),Sigma (x) Covxxa)
+#       Cov=(X'X + A)^-1  = IR t(IR)  
+#       Sigma=CICI'    
+#       therefore, cov(beta)= Omega = CICI' (x) IR IR' = (CI (x) IR) (CI (x) IR)'
+#	so to draw beta we do beta= vec(Btilde) +(CI (x) IR)vec(Z_mk)  
+#			Z_mk is m x k matrix of N(0,1)
+#	since vec(ABC) = (C' (x) A)vec(B), we have 
+#		B = Btilde + IR Z_mk CI'
+#
+B = Btilde + IR%*%matrix(rnorm(m*k),ncol=m)%*%t(rwout$CI)
+return(list(B=B,Sigma=rwout$IW))
+}
+
diff --git a/R/rmvpGibbs.R b/R/rmvpGibbs.R
new file mode 100755
index 0000000..2d15699
--- /dev/null
+++ b/R/rmvpGibbs.R
@@ -0,0 +1,193 @@
+rmvpGibbs=
+function(Data,Prior,Mcmc) 
+{
+#
+# Revision History:
+#   modified by rossi 12/18/04 to include error checking
+#
+# purpose:  Gibbs MVP model with full covariance matrix
+#
+# Arguments:
+#   Data contains 
+#      p the number of alternatives (could be time or could be from pick j of p survey)
+#      y -- a vector of length n*p of indicators (1 if "chosen" if not)
+#      X -- np x k matrix of covariates (including intercepts)
+#                 each X_i is p x nvar
+#
+#   Prior contains a list of (betabar, A, nu, V)
+#      if elements of prior do not exist, defaults are used
+#
+#   Mcmc is a list of (beta0,sigma0,R,keep)  
+#     beta0,sigma0 are intial values, if not supplied defaults are used
+#     R is number of draws
+#     keep is thinning parm, keep every keepth draw
+#
+# Output: a list of every keepth betadraw and sigmsdraw
+#
+#  model: 
+#    w_i = X_ibeta + e    e~N(0,Sigma)     note w_i,e are p x 1
+#    y_ij = 1 if w_ij > 0 else y_ij = 0  
+#  
+#  priors:
+#    beta ~ N(betabar,A^-1) in prior
+#    Sigma ~ IW(nu,V)
+#
+#  Check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of p, y, X")}
+  if(is.null(Data$p)) {pandterm("Requires Data element p -- number of binary indicators")}
+  p=Data$p
+  if(is.null(Data$y)) {pandterm("Requires Data element y -- values of binary indicators")}
+  y=Data$y
+  if(is.null(Data$X)) {pandterm("Requires Data element X -- matrix of covariates")}
+  X=Data$X
+#
+# check data for validity
+#
+levely=as.numeric(levels(as.factor(y)))
+  bady=FALSE
+  for (i in 0:1) 
+  { if(levely[i+1] != i) {bady=TRUE} }
+cat("Table of y values",fill=TRUE)
+print(table(y))
+if (bady) {pandterm("Invalid y")}
+if (length(y)%%p !=0) {pandterm("length of y is not a multiple of p")}
+n=length(y)/p
+k=ncol(X)
+if(nrow(X) != (n*p)) {pandterm(paste("X has ",nrow(X)," rows; must be = p*n"))}
+#
+# check for prior elements
+#
+if(missing(Prior)) 
+  { betabar=rep(0,k) ; A=.01*diag(k) ; nu=p+3; V=nu*diag(p)}
+else 
+  {if(is.null(Prior$betabar)) {betabar=rep(0,k)} else {betabar=Prior$betabar}
+   if(is.null(Prior$A)) {A=.01*diag(k)} else {A=Prior$A}
+   if(is.null(Prior$nu)) {nu=p+3} else {nu=Prior$nu}
+   if(is.null(Prior$V)) {V=nu*diag(p)} else {V=Prior$V}}
+if(length(betabar) != k) pandterm("length betabar ne k")
+if(sum(dim(A)==c(k,k)) != 2) pandterm("A is of incorrect dimension")
+if(nu < 1) pandterm("invalid nu value")
+if(sum(dim(V)==c(p,p)) != 2) pandterm("V is of incorrect dimension")
+#
+# check for Mcmc 
+#
+if(missing(Mcmc)) pandterm("Requires Mcmc argument -- at least R must be included")
+if(is.null(Mcmc$R)) {pandterm("Requires element R of Mcmc")} else {R=Mcmc$R}
+if(is.null(Mcmc$beta0)) {beta0=rep(0,k)} else {beta0=Mcmc$beta0}
+if(is.null(Mcmc$sigma0)) {sigma0=diag(p)} else {sigma0=Mcmc$sigma0}
+if(length(beta0) != k) pandterm("beta0 is not of length k")
+if(sum(dim(sigma0) == c(p,p)) != 2) pandterm("sigma0 is of incorrect dimension")
+if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+#
+# print out problem
+#
+cat(" ",fill=TRUE)
+cat("Starting Gibbs Sampler for MVP",fill=TRUE)
+cat("  ",n," obs of ",p," binary indicators; ",k," indep vars (including intercepts)",fill=TRUE)
+cat("  ",R," reps; keeping every ",keep,"th draw",fill=TRUE)
+cat(" ",fill=TRUE)
+cat("Prior Parms:",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu",fill=TRUE)
+print(nu)
+cat("V",fill=TRUE)
+print(V)
+cat(" ",fill=TRUE)
+cat("MCMC Parms:",fill=TRUE)
+cat("R= ",R,fill=TRUE)
+cat("initial beta= ",beta0,fill=TRUE)
+cat("initial sigma= ",sigma0,fill=TRUE)
+cat(" ",fill=TRUE)
+
+#
+# allocate space for draws
+#
+sigmadraw=matrix(double(floor(R/keep)*p*p),ncol=p*p)
+betadraw=matrix(double(floor(R/keep)*k),ncol=k)
+wnew=double(nrow(X))
+betanew=double(k)
+
+#
+#  set initial values of w,beta, sigma (or root of inv)
+#
+wold=c(rep(0,nrow(X)))
+betaold=beta0
+C=chol(solve(sigma0))
+#
+#  C is upper triangular root of sigma^-1 (G) = C'C
+#
+#  create functions needed
+#
+drawwMvpC=function(w,mu,y,sigi) {
+	p=ncol(sigi)
+      .C("draww_mvp",w=as.double(w),as.double(mu),as.double(sigi),
+        as.integer(length(w)/p),as.integer(p),as.integer(y))$w}
+
+drawwMvp=
+function(w,X,y,beta,sigmai){
+#
+#   draw latent vector
+#
+#  	w is n x (p-1) vector
+#       X ix n(p-1) x k  matrix
+#       y is n x (p-1) vector of binary (0,1) outcomes 
+#       beta is k x 1 vector
+#       sigmai is (p-1) x (p-1) 
+#
+
+Xbeta=as.vector(X%*%beta)
+drawwMvpC(w,Xbeta,y,sigmai)
+}
+
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+for (rep in 1:R) 
+   {
+   #
+   # draw w given beta(rep-1),sigma(rep-1)
+   #
+   sigmai=crossprod(C)
+   wnew=drawwMvp(wold,X,y,betaold,sigmai)
+   #
+   # draw beta given w(rep) and sigma(rep-1)
+   #
+   #  note:  if Sigma^-1 (G) = C'C then Var(Ce)=CSigmaC' = I
+   #  first, transform w_i = X_ibeta + e_i by premultiply by C
+   #
+   zmat=matrix(cbind(wnew,X),nrow=p)
+   zmat=C%*%zmat
+   zmat=matrix(zmat,nrow=nrow(X))
+   betanew=breg(zmat[,1],zmat[,2:(k+1)],betabar,A)
+   #
+   # draw sigmai given w and beta
+   #
+   epsilon=matrix((wnew-X%*%betanew),nrow=p)
+   S=crossprod(t(epsilon))
+   W=rwishart(nu+n,chol2inv(chol(V+S)))
+   C=W$C
+   #
+   #       print time to completion and draw # every 100th draw
+   #
+   if(rep%%100 == 0)
+     {ctime=proc.time()[3]
+      timetoend=((ctime-itime)/rep)*(R+1-rep)
+      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+      fsh()}
+   #
+   #       save every keepth draw
+   #
+   if(rep%%keep ==0)
+      {mkeep=rep/keep
+      betadraw[mkeep,]=betanew
+      sigmadraw[mkeep,]=as.vector(W$IW)}
+   wold=wnew
+   betaold=betanew
+   }
+
+return(list(betadraw=betadraw,sigmadraw=sigmadraw))
+}
diff --git a/R/rmvst.R b/R/rmvst.R
new file mode 100755
index 0000000..674cab4
--- /dev/null
+++ b/R/rmvst.R
@@ -0,0 +1,8 @@
+rmvst=
+function(nu,mu,root){
+#
+# function to draw from MV s-t  with nu df, mean mu, Sigma=t(root)%*%root
+#      root is upper triangular cholesky root
+nvec=t(root)%*%rnorm(length(mu))
+return(nvec/sqrt(rchisq(1,nu)/nu) + mu)
+}
diff --git a/R/rnmixGibbs.R b/R/rnmixGibbs.R
new file mode 100755
index 0000000..5d464d6
--- /dev/null
+++ b/R/rnmixGibbs.R
@@ -0,0 +1,160 @@
+rnmixGibbs= 
+function(Data,Prior,Mcmc)
+{
+#
+# Revision History: 
+#   P. Rossi 3/05
+#
+# purpose: do Gibbs sampling inference for a mixture of multivariate normals
+#
+# arguments:
+#     Data is a list of y which is an n x k matrix of data -- each row
+#       is an iid draw from the normal mixture
+#     Prior is a list of (Mubar,A,nu,V,a,ncomp)
+#       ncomp is required
+#       if elements of the prior don't exist, defaults are assumed
+#     Mcmc is a list of R and keep (thinning parameter)
+# Output:
+#     list with elements
+#     pdraw -- R/keep x ncomp array of mixture prob draws
+#     zdraw -- R/keep x nobs array of indicators of mixture comp identity for each obs
+#     compsdraw -- list of R/keep lists of lists of comp parm draws
+#        e.g. compsdraw[[i]] is ith draw -- list of ncomp lists
+#             compsdraw[[i]][[j]] is list of parms for jth normal component
+#             if jcomp=compsdraw[[i]][j]]
+#                        ~N(jcomp[[1]],Sigma), Sigma = t(R)%*%R, R^{-1} = jcomp[[2]]
+#
+# Model:
+#        y_i ~ N(mu_ind,Sigma_ind)
+#        ind ~ iid multinomial(p)  p is a 1x ncomp vector of probs
+# Priors:
+#        mu_j ~ N(mubar,Sigma (x) A^-1)
+#        mubar=vec(Mubar)
+#        Sigma_j ~ IW(nu,V)
+#        note: this is the natural conjugate prior -- a special case of multivariate 
+#              regression
+#        p ~ Dirchlet(a)
+#
+#  check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of y")}
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+#
+# check data for validity
+#
+if(!is.matrix(y)) {pandterm("y must be a matrix")}
+nobs=nrow(y)
+dimy=ncol(y)
+#
+# check for Prior
+#
+if(missing(Prior)) {pandterm("requires Prior argument <min of Prior$ncomp>")}
+else
+   {
+    if(is.null(Prior$ncomp)) {pandterm("requires number of mix comps -- Prior$ncomp")}
+       else {ncomp=Prior$ncomp}
+    if(is.null(Prior$Mubar)) {Mubar=matrix(rep(0,dimy),nrow=1)} 
+       else {Mubar=Prior$Mubar}
+    if(is.null(Prior$A)) {A=matrix(c(.01),ncol=1)} 
+       else {A=Prior$A}
+    if(is.null(Prior$nu)) {nu=dimy+2} 
+       else {nu=Prior$nu}
+    if(is.null(Prior$V)) {V=nu*diag(dimy)} 
+       else {V=Prior$V}
+    if(is.null(Prior$a)) {a=c(rep(5,ncomp))}
+       else {a=Prior$a}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != 1)
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(!is.matrix(Mubar))
+   {pandterm("Mubar must be a matrix")}
+if(nrow(Mubar) != 1 || ncol(Mubar) != dimy) 
+   {pandterm(paste("bad dimensions for Mubar",dim(Mubar)))}
+if(ncol(V) != nrow(V) || ncol(V) != dimy)
+   {pandterm(paste("bad dimensions for V",dim(V)))}
+if(length(a) != ncomp)
+   {pandterm(paste("a wrong length, length= ",length(a)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+   }
+
+#
+# print out the problem
+#
+cat(" Starting Gibbs Sampler for Mixture of Normals",fill=TRUE)
+cat(" ",nobs," observations on ",dimy," dimensional data",fill=TRUE)
+cat("     using ",ncomp," mixture components",fill=TRUE)
+cat(" ",fill=TRUE)
+cat(" Prior Parms: ",fill=TRUE)
+cat("  mu_j ~ N(mubar,Sigma (x) A^-1)",fill=TRUE)
+cat("  mubar = ",fill=TRUE)
+print(Mubar)
+cat("  precision parm for prior variance of mu vectors (A)= ",A,fill=TRUE)
+cat("  Sigma_j ~ IW(nu,V) nu= ",nu,fill=TRUE)
+cat("  V =",fill=TRUE)
+print(V)
+cat("  Dirichlet parameters ",fill=TRUE)
+print(a)
+cat(" ",fill=TRUE)
+cat(" Mcmc Parms: R= ",R," keep= ",keep,fill=TRUE)
+
+pdraw=matrix(double(floor(R/keep)*ncomp),ncol=ncomp)
+zdraw=matrix(double(floor(R/keep)*nobs),ncol=nobs)
+compdraw=list()
+compsd=list()
+
+#
+# set initial values of z
+#
+ninc = floor(nobs/ncomp)
+z = c()
+for(i in 1:(ncomp-1)) z = c(z,rep(i,ninc))
+z = c(z,rep(ncomp,nobs-length(z)))
+cat(" ",fill=TRUE)
+cat("starting value for z",fill=TRUE)
+print(table(z))
+cat(" ",fill=TRUE)
+p=c(rep(1,ncomp))/ncomp # note this is not used
+
+
+#
+# start main iteration loop
+#
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end -min) ",fill=TRUE)
+fsh()
+for(rep in 1:R)
+{
+   out = rmixGibbs(y,Mubar,A,nu,V,a,p,z,compsd)
+   compsd=out$comps
+   p=out$p
+   z=out$z
+   if(rep%%100==0)
+     {
+      ctime=proc.time()[3]
+      timetoend=((ctime-itime)/rep)*(R-rep)
+      cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+      fsh()
+      }
+   if(rep%%keep ==0)
+     {
+      mkeep=rep/keep
+      pdraw[mkeep,]=p
+      zdraw[mkeep,]=z
+      compdraw[[rep]]=compsd
+      }
+}
+return(list(probdraw=pdraw,zdraw=zdraw,compdraw=compdraw))
+}
diff --git a/R/rtrun.R b/R/rtrun.R
new file mode 100755
index 0000000..65aca38
--- /dev/null
+++ b/R/rtrun.R
@@ -0,0 +1,11 @@
+rtrun=
+function(mu,sigma,a,b){
+#
+# function to draw from univariate truncated norm
+# a is vector of lower bounds for truncation
+# b is vector of upper bounds for truncation
+#
+FA=pnorm(((a-mu)/sigma))
+FB=pnorm(((b-mu)/sigma))
+return(mu+sigma*qnorm(runif(length(mu))*(FB-FA)+FA))
+}
diff --git a/R/runireg.R b/R/runireg.R
new file mode 100755
index 0000000..8651ff5
--- /dev/null
+++ b/R/runireg.R
@@ -0,0 +1,47 @@
+runireg=
+function(y,X,betabar,A,nu,ssq){
+#   
+#    revision history:
+#      p. rossi 1/11/05 -- fix error in sum of squares
+#  
+# purpose: 
+#          draw from posterior for a univariate regression model
+#          with natural conjugate prior
+#
+# arguments: 
+#      y is n x 1
+#      X is n x k
+#      betabar is prior mean 
+#      A is prior precision
+#      nu, ssq are parameters of prior on sigmasq
+# output:
+#      list of beta, sigmasq draws
+#         beta is k x 1 vector of coefficients
+# model:
+#      Y=Xbeta+e  var(e_i) = sigmasq
+#      priors:  beta| sigmasq ~ N(betabar,sigmasq*A^-1)
+#                sigmasq ~ (nu*ssq)/chisq_nu
+n=length(y)
+k=ncol(X)
+#
+# first draw Sigma
+#
+RA=chol(A)
+W=rbind(X,RA)
+z=c(y,as.vector(RA%*%betabar))
+IR=backsolve(chol(crossprod(W)),diag(k))
+#      W'W=R'R ;  (W'W)^-1 = IR IR'  -- this is UL decomp
+btilde=crossprod(t(IR))%*%crossprod(W,z)
+res=z-W%*%btilde
+s=t(res)%*%res
+#
+# first draw Sigma
+#
+#
+sigmasq=(nu*ssq + s)/rchisq(1,nu+n)
+#
+# now draw beta given Sigma
+#	
+beta = btilde + as.vector(sqrt(sigmasq))*IR%*%rnorm(k)
+list(beta=beta,sigmasq=sigmasq)
+}
diff --git a/R/runiregGibbs.R b/R/runiregGibbs.R
new file mode 100755
index 0000000..5bf6e94
--- /dev/null
+++ b/R/runiregGibbs.R
@@ -0,0 +1,143 @@
+runiregGibbs=
+function(Data,Prior,Mcmc)
+{
+# 
+# revision history:
+#          P. Rossi 1/17/05
+# Purpose:
+#   perform Gibbs iterations for Univ Regression Model using
+#     prior with beta, sigma-sq indep
+# 
+# Arguments:
+#   Data -- list of data 
+#           y,X
+#   Prior -- list of prior hyperparameters
+#     betabar,A      prior mean, prior precision
+#     nu, ssq        prior on sigmasq
+#   Mcmc -- list of MCMC parms
+#     sigmasq=initial value for sigmasq
+#     R number of draws
+#     keep -- thinning parameter
+# 
+# Output: 
+#   list of beta, sigmasq
+#
+# Model:
+#   y = Xbeta + e  e ~N(0,sigmasq)
+#          y is n x 1
+#          X is n x k
+#          beta is k x 1 vector of coefficients
+#
+# Priors:  beta ~ N(betabar,A^-1)
+#          sigmasq ~ (nu*ssq)/chisq_nu
+# 
+#
+# check arguments
+#
+pandterm=function(message) {stop(message,call.=FALSE)}
+if(missing(Data)) {pandterm("Requires Data argument -- list of y and X")}
+    if(is.null(Data$X)) {pandterm("Requires Data element X")}
+    X=Data$X
+    if(is.null(Data$y)) {pandterm("Requires Data element y")}
+    y=Data$y
+nvar=ncol(X)
+nobs=length(y)
+#
+# check data for validity
+#
+if(nobs != nrow(X) ) {pandterm("length(y) ne nrow(X)")}
+#
+# check for Prior
+#
+if(missing(Prior))
+   { betabar=c(rep(0,nvar)); A=.01*diag(nvar)}
+else
+   {
+    if(is.null(Prior$betabar)) {betabar=c(rep(0,nvar))} 
+       else {betabar=Prior$betabar}
+    if(is.null(Prior$A)) {A=.01*diag(nvar)} 
+       else {A=Prior$A}
+    if(is.null(Prior$nu)) {nu=3}
+       else {nu=Prior$nu}
+    if(is.null(Prior$ssq)) {ssq=var(y)}
+       else {ssq=Prior$ssq}
+   }
+#
+# check dimensions of Priors
+#
+if(ncol(A) != nrow(A) || ncol(A) != nvar || nrow(A) != nvar) 
+   {pandterm(paste("bad dimensions for A",dim(A)))}
+if(length(betabar) != nvar)
+   {pandterm(paste("betabar wrong length, length= ",length(betabar)))}
+#
+# check MCMC argument
+#
+if(missing(Mcmc)) {pandterm("requires Mcmc argument")}
+else
+   {
+    if(is.null(Mcmc$R)) 
+       {pandterm("requires Mcmc element R")} else {R=Mcmc$R}
+    if(is.null(Mcmc$keep)) {keep=1} else {keep=Mcmc$keep}
+    if(is.null(Mcmc$sigmasq)) {sigmasq=var(y)} else {sigmasq=Mcmc$sigmasq}
+   }
+#
+# print out problem
+#
+cat(" ", fill=TRUE)
+cat("Starting Gibbs Sampler for Univariate Regression Model",fill=TRUE)
+cat("  with ",nobs," observations",fill=TRUE)
+cat(" ", fill=TRUE)
+cat("Prior Parms: ",fill=TRUE)
+cat("betabar",fill=TRUE)
+print(betabar)
+cat("A",fill=TRUE)
+print(A)
+cat("nu = ",nu," ssq= ",ssq,fill=TRUE)
+cat(" ", fill=TRUE)
+cat("MCMC parms: ",fill=TRUE)
+cat("R= ",R," keep= ",keep,fill=TRUE)
+cat(" ",fill=TRUE)
+
+sigmasqdraw=double(floor(Mcmc$R/keep))
+betadraw=matrix(double(floor(Mcmc$R*nvar/keep)),ncol=nvar)
+XpX=crossprod(X)
+Xpy=crossprod(X,y)
+sigmasq=as.vector(sigmasq)
+
+itime=proc.time()[3]
+cat("MCMC Iteration (est time to end - min) ",fill=TRUE)
+fsh()
+
+for (rep in 1:Mcmc$R)
+{
+#
+#     first draw beta | sigmasq
+#
+  IR=backsolve(chol(XpX/sigmasq+A),diag(nvar))
+  btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
+  beta = btilde + IR%*%rnorm(nvar)
+#
+#    now draw sigmasq | beta
+#
+  res=y-X%*%beta
+  s=t(res)%*%res
+  sigmasq=(nu*ssq + s)/rchisq(1,nu+nobs)
+  sigmasq=as.vector(sigmasq)
+#
+#       print time to completion and draw # every 100th draw
+#
+  if(rep%%100 == 0)
+    {ctime=proc.time()[3]
+    timetoend=((ctime-itime)/rep)*(R-rep)
+    cat(" ",rep," (",round(timetoend/60,1),")",fill=TRUE)
+    fsh()}
+
+  if(rep%%keep == 0) 
+    {mkeep=rep/keep; betadraw[mkeep,]=beta; sigmasqdraw[mkeep]=sigmasq}
+}
+ctime = proc.time()[3]
+cat('  Total Time Elapsed: ',round((ctime-itime)/60,2),'\n')
+
+
+return(list(betadraw=betadraw,sigmasqdraw=sigmasqdraw))
+}
diff --git a/R/rwishart.R b/R/rwishart.R
new file mode 100755
index 0000000..cc372e9
--- /dev/null
+++ b/R/rwishart.R
@@ -0,0 +1,30 @@
+rwishart=
+function(nu,V){
+#
+# function to draw from Wishart (nu,V) and IW
+# 
+# W ~ W(nu,V)
+# E[W]=nuV
+#
+# WI=W^-1
+# E[WI]=V^-1/(nu-m-1)
+# 
+#
+m=nrow(V)
+df=(nu+nu-m+1)-(nu-m+1):nu
+if(m >1) {
+T=diag(sqrt(rchisq(c(rep(1,m)),df)))
+T[lower.tri(T)]=rnorm((m*(m+1)/2-m))}
+else
+{T=sqrt(rchisq(1,df))}
+U=chol(V)
+C=t(T)%*%U
+CI=backsolve(C,diag(m))
+#
+#   C is the upper triangular root of Wishart
+#      therefore, W=C'C  this is the LU decomposition 
+#      Inv(W) = CICI'  Note:  this is the UL decomp not LU!
+#
+return(list(W=crossprod(C),IW=crossprod(t(CI)),C=C,CI=CI))
+#  W is Wishart draw,  IW is W^-1
+}
diff --git a/R/simmnl.R b/R/simmnl.R
new file mode 100755
index 0000000..a285545
--- /dev/null
+++ b/R/simmnl.R
@@ -0,0 +1,51 @@
+simmnl=
+function(j,n,beta) 
+{
+#
+#   p. rossi 2004
+#
+# Purpose: simulate from MNL (including X values)
+#
+# Arguments:
+#   j is number of alternatives
+#   n is number of obs
+#   beta is true parm value
+#
+# Output:
+#   list of X  (note: we include full set of intercepts and 2 unif(-1,1) X vars)
+#   y  (indicator of choice-- 1, ...,j
+#   prob is a n x j matrix of choice probs
+#
+#   note: first choice alternative has intercept set to zero
+#
+k=length(beta)
+x1=runif(n*j,min=-1,max=1)
+x2=runif(n*j,min=-1,max=1)
+I2=diag(rep(1,j-1))
+zero=rep(0,j-1)
+xadd=rbind(zero,I2)
+for(i in 2:n) {
+        xadd=rbind(xadd,zero,I2)
+}
+
+X=cbind(xadd,x1,x2)
+
+# now construct probabilities
+Xbeta=X%*%beta
+j=nrow(Xbeta)/n
+Xbeta=matrix(Xbeta,byrow=T,ncol=j)
+Prob=exp(Xbeta)
+iota=c(rep(1,j))
+denom=Prob%*%iota
+Prob=Prob/as.vector(denom)
+# draw y
+y=vector("double",n)
+ind=1:j
+for (i in 1:n) 
+        {
+        yvec=rmultinom(1,1,Prob[i,])
+        y[i]=ind%*%yvec
+        }
+
+return(list(y=y,X=X,beta=beta,prob=Prob))
+}
diff --git a/R/simmnlwX.R b/R/simmnlwX.R
new file mode 100755
index 0000000..4995c26
--- /dev/null
+++ b/R/simmnlwX.R
@@ -0,0 +1,41 @@
+simmnlwX=
+function(n,X,beta) 
+{
+#
+#     p. rossi 2004
+#
+# Purpose:  simulate from MNL model conditional on X matrix
+#
+# Arguments:
+#   j is number of alternatives
+#   X is full matrix, including intercepts
+#   beta is true parm value
+#
+# Output:
+#   list of X  (note: we include full set of intercepts and 2 unif(-1,1) X vars)
+#   y  (indicator of choice-- 1, ...,j
+#   prob is a n x j matrix of choice probs
+#
+#   note: first choice alternative has intercept set to zero
+#
+#
+k=length(beta)
+# now construct probabilities
+Xbeta=X%*%beta
+j=nrow(Xbeta)/n
+Xbeta=matrix(Xbeta,byrow=T,ncol=j)
+Prob=exp(Xbeta)
+iota=c(rep(1,j))
+denom=Prob%*%iota
+Prob=Prob/as.vector(denom)
+# draw y
+y=vector("double",n)
+ind=1:j
+for (i in 1:n) 
+        {
+        yvec=rmultinom(1,1,Prob[i,])
+        y[i]=ind%*%yvec
+        }
+
+list(y=y,X=X,beta=beta,prob=Prob)
+}
diff --git a/R/simmnp.R b/R/simmnp.R
new file mode 100755
index 0000000..d9188b9
--- /dev/null
+++ b/R/simmnp.R
@@ -0,0 +1,27 @@
+simmnp=
+function(X,p,n,beta,sigma) {
+#
+# simulate from MNP:  w_i=X_i*beta + e   e ~ MVN(0,Sigma)
+#                     y_i = j  if max(w_i) = w_ij
+#			       if max(w_i) < 0  y_i = p
+#	Sigma is p-1 x p-1
+#       X is n(p-1) x k
+#       beta is k x 1
+#
+#   create functions needed
+#
+indmax=function(x) {
+ind=1:length(x)
+ind[x==max(x)]
+}
+
+Xbeta=X%*%beta
+w=as.vector(crossprod(chol(sigma),matrix(rnorm((p-1)*n),ncol=n)))+ Xbeta
+w=matrix(w,ncol=(p-1),byrow=T)
+maxw=apply(w,1,max)
+y=apply(w,1,indmax)
+y=ifelse(maxw < 0,p,y)
+
+return(list(y=y,X=X,beta=beta,sigma=sigma))
+}
+
diff --git a/R/simmvp.R b/R/simmvp.R
new file mode 100755
index 0000000..ef2915a
--- /dev/null
+++ b/R/simmvp.R
@@ -0,0 +1,18 @@
+simmvp=
+function(X,p,n,beta,sigma)
+{
+#
+# simulate from MVP:  w_i=X_i*beta + e_i   e ~ MVN(0,Sigma)
+#                     y_ij = 1  if w_ij > 0
+#
+#	Sigma is p x p
+#       X is np x k
+#       beta is k x 1
+
+#
+Xbeta=X%*%beta
+w=as.vector(crossprod(chol(sigma),matrix(rnorm(p*n),ncol=n)))+ Xbeta
+y=ifelse(w<0,0,1)
+
+return(list(y=y,X=X,beta=beta,sigma=sigma))
+}
diff --git a/R/simnhlogit.R b/R/simnhlogit.R
new file mode 100755
index 0000000..52c7977
--- /dev/null
+++ b/R/simnhlogit.R
@@ -0,0 +1,51 @@
+simnhlogit=
+function(theta,lnprices,Xexpend) 
+{
+#   function to simulate non-homothetic logit model
+#       creates y  a n x 1 vector with indicator of choice (1,...,m)
+#	lnprices is n x m array of log-prices faced
+#       Xexpend is n x d array of variables predicting expenditure
+#
+#       non-homothetic model specifies ln(psi_i(u))= alpha_i - exp(k_i)u
+#
+#       structure of theta vector:
+#           alpha  (m x 1)
+#           k      (m x1 )
+#           gamma  (k x 1)   expenditure function coefficients
+#           tau   -- scaling of v
+#	    
+root=function(c1,c2,tol,iterlim) {
+   u=double(length(c1))
+   .C("callroot",as.integer(length(c1)),as.double(c1),as.double(c2),as.double(tol),
+       as.integer(iterlim),r=as.double(u))$r}
+
+   m=ncol(lnprices)
+   n=nrow(lnprices)
+   d=ncol(Xexpend)
+   alpha=theta[1:m]
+   k=theta[(m+1):(2*m)]
+   gamma=theta[(2*m+1):(2*m+d)]
+   tau=theta[length(theta)]
+   iotam=c(rep(1,m))
+   c1=as.vector(Xexpend%*%gamma)%x%iotam-as.vector(t(lnprices))+alpha
+   c2=c(rep(exp(k),n))   
+   u=root(c1,c2,.0000001,20)
+   v=alpha - u*exp(k)-as.vector(t(lnprices))
+   vmat=matrix(v,ncol=m,byrow=TRUE)
+   vmat=tau*vmat
+   Prob=exp(vmat)
+   denom=Prob%*%iotam
+   Prob=Prob/as.vector(denom)
+# draw y
+   y=vector("double",n)
+   ind=1:m
+   for (i in 1:n) 
+        {
+        yvec=rmultinom(1,1,Prob[i,])
+        y[i]=ind%*%yvec
+        }
+
+return(list(y=y,Xexpend=Xexpend,lnprices=lnprices,theta=theta,prob=Prob))
+
+}
+
diff --git a/inst/doc/Some_Useful_R_Pointers.pdf b/inst/doc/Some_Useful_R_Pointers.pdf
new file mode 100755
index 0000000..d47520d
Binary files /dev/null and b/inst/doc/Some_Useful_R_Pointers.pdf differ
diff --git a/inst/doc/Tips_On_Using_bayesm.pdf b/inst/doc/Tips_On_Using_bayesm.pdf
new file mode 100755
index 0000000..b1bfe71
Binary files /dev/null and b/inst/doc/Tips_On_Using_bayesm.pdf differ
diff --git a/man/breg.Rd b/man/breg.Rd
new file mode 100755
index 0000000..9fbebf0
--- /dev/null
+++ b/man/breg.Rd
@@ -0,0 +1,68 @@
+\name{breg}
+\alias{breg}
+\concept{bayes}
+\concept{regression}
+
+\title{Posterior Draws from a Univariate Regression with Unit Error Variance}
+\description{
+  \code{breg} makes one draw from the posterior of a univariate regression
+  (scalar dependent variable) given the error variance = 1.0.
+  A natural conjugate, normal prior is used.
+}
+\usage{
+breg(y, X, betabar, A)
+}
+
+\arguments{
+  \item{y}{ vector of values of dep variable. }
+  \item{X}{ n (length(y)) x k Design matrix. }
+  \item{betabar}{ k x 1 vector. Prior mean of regression coefficients. }
+  \item{A}{ Prior precision matrix. }
+}
+
+\details{
+  model: \eqn{y=x'\beta + e}.   \eqn{e} \eqn{\sim}{~} \eqn{N(0,1)}.  \cr
+
+  prior: \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}.
+}
+
+\value{
+  k x 1 vector containing a draw from the posterior distribution.
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\examples{
+## simulate data
+set.seed(66)
+n=100
+X=cbind(rep(1,n),runif(n)); beta=c(1,2)
+y=X\%*\%beta+rnorm(n)
+##
+## set prior
+A=diag(c(.05,.05)); betabar=c(0,0)
+##
+## make draws from posterior
+R=1000
+betadraw=matrix(double(R*2),ncol=2)
+for (rep in 1:R) {betadraw[rep,]=breg(y,X,betabar,A)}
+##
+## summarize draws
+mat=apply(betadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+}
+\keyword{models}
+\keyword{regression}
+\keyword{distribution}
diff --git a/man/cgetC.Rd b/man/cgetC.Rd
new file mode 100755
index 0000000..a6f783b
--- /dev/null
+++ b/man/cgetC.Rd
@@ -0,0 +1,39 @@
+\name{cgetC}
+\alias{cgetC}
+
+\title{ Obtain A List of Cut-offs for Scale Usage Problems }
+\description{
+  \code{cgetC} obtains a list of censoring points, or cut-offs, used
+  in the ordinal multivariate probit model of Rossi et al (2001). 
+  This approach uses a quadratic parameterization of the cut-offs.
+  The model is useful for modeling correlated ordinal data on a
+  scale from 1, ..., k with different scale usage patterns. 
+}
+\usage{
+cgetC(e, k)
+}
+
+\arguments{
+  \item{e}{ quadratic parameter (>0 and less than 1) }
+  \item{k}{ items are on a scale from 1, \ldots, k }
+}
+\section{Warning}{
+  This is a utility function which implements \strong{no} error-checking.
+}
+\value{
+  A vector of k+1 cut-offs.
+}
+\references{
+ Rossi et al (2001), \dQuote{Overcoming Scale Usage Heterogeneity,} \emph{JASA}.
+}
+
+\author{ Rob McCulloch and Peter Rossi, Graduate School of Business, University of Chicago.
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rscaleUsage}} }
+\examples{
+##
+cgetC(.1,10)
+}
+\keyword{ utilities }
diff --git a/man/condMom.Rd b/man/condMom.Rd
new file mode 100755
index 0000000..a3f3328
--- /dev/null
+++ b/man/condMom.Rd
@@ -0,0 +1,54 @@
+\name{condMom}
+\alias{condMom}
+\concept{normal distribution}
+\concept{conditional distribution}
+
+\title{ Computes Conditional Mean/Var of One Element of MVN given All Others }
+\description{
+  \code{condMom} compute moments of conditional distribution of ith element of normal given
+ all others.
+}
+\usage{
+condMom(x, mu, sigi, i)
+}
+\arguments{
+  \item{x}{ vector of values to condition on }
+  \item{mu}{ length(x) mean vector }
+  \item{sigi}{ length(x)-dim covariance matrix }
+  \item{i}{ conditional distribution of ith element }
+}
+\details{
+  \eqn{x} \eqn{\sim}{~} \eqn{MVN(mu,Sigma)}.
+
+  \code{condMom} computes moments of \eqn{x_i} given \eqn{x_{-1}}.
+}
+\value{
+  a list containing:
+
+  \item{cmean }{ cond mean }
+  \item{cvar }{ cond variance}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\examples{
+##
+sig=matrix(c(1,.5,.5,.5,1,.5,.5,.5,1),ncol=3)
+sigi=chol2inv(chol(sig))
+mu=c(1,2,3)
+x=c(1,1,1)
+condMom(x,mu,sigi,2)
+
+}
+\keyword{ distribution }
diff --git a/man/createX.Rd b/man/createX.Rd
new file mode 100755
index 0000000..4222b47
--- /dev/null
+++ b/man/createX.Rd
@@ -0,0 +1,58 @@
+\name{createX}
+\alias{createX}
+\concept{multinomial logit}
+\concept{multinomial probit}
+
+\title{ Create X Matrix for Use in Multinomial Logit and Probit Routines }
+\description{
+  \code{createX} makes up an X matrix in the form expected by Multinomial
+  Logit (\code{\link{rmnlIndepMetrop}}) and Multinomial Probit (\code{\link{rmnpGibbs}})
+  routines.  Requires an array of alternative specific variables and/or an
+  array of "demographics" or variables constant across alternatives which
+  may vary across choice occasions. 
+}
+\usage{
+createX(p, na, nd, Xa, Xd, INT = TRUE, DIFF = FALSE, base = p)
+}
+
+\arguments{
+  \item{p}{ integer - number of choice alternatives }
+  \item{na}{ integer - number of alternative-specific vars in Xa  }
+  \item{nd}{ integer - number of "demographics" vars }
+  \item{Xa}{ n x p*na matrix of alternative-specific vars }
+  \item{Xd}{ n x nd matrix of "demographic" vars }
+  \item{INT}{ logical flag for inclusion of intercepts }
+  \item{DIFF}{ logical flag for differencing wrt to base alternative }
+  \item{base}{ integer - index of base choice alternative }
+  note: na,nd,Xa,Xd can be NULL to indicate lack of Xa or Xd variables.
+}
+
+\value{
+  X matrix -- n*(p-DIFF) x [(INT+nd)*(p-1) + na] matrix.
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rmnlIndepMetrop}}, \code{\link{rmnpGibbs}} }
+\examples{
+na=2; nd=1; p=3
+vec=c(1,1.5,.5,2,3,1,3,4.5,1.5)
+Xa=matrix(vec,byrow=TRUE,ncol=3)
+Xa=cbind(Xa,-Xa)
+Xd=matrix(c(-1,-2,-3),ncol=1)
+createX(p=p,na=na,nd=nd,Xa=Xa,Xd=Xd)
+createX(p=p,na=na,nd=nd,Xa=Xa,Xd=Xd,base=1)
+createX(p=p,na=na,nd=nd,Xa=Xa,Xd=Xd,DIFF=TRUE)
+createX(p=p,na=na,nd=nd,Xa=Xa,Xd=Xd,DIFF=TRUE,base=2)
+createX(p=p,na=na,nd=NULL,Xa=Xa,Xd=NULL)
+createX(p=p,na=NULL,nd=nd,Xa=NULL,Xd=Xd)
+}
+\keyword{ array }
+\keyword{ utilities }
diff --git a/man/eMixMargDen.Rd b/man/eMixMargDen.Rd
new file mode 100755
index 0000000..d49c819
--- /dev/null
+++ b/man/eMixMargDen.Rd
@@ -0,0 +1,53 @@
+\name{eMixMargDen}
+\alias{eMixMargDen}
+\concept{normal mixtures}
+\concept{bayes}
+\concept{MCMC}
+
+\title{ Compute Marginal Densities of A Normal Mixture Averaged over MCMC Draws }
+\description{
+  \code{eMixMargDen} assumes that a multivariate mixture of normals has been fitted
+  via MCMC (using \code{rnmixGibbs}).  For each MCMC draw, the marginal densities
+  for each component in the multivariate mixture are computed on a user-supplied
+  grid and then averaged over draws. 
+}
+
+\usage{
+eMixMargDen(grid, probdraw, compdraw)
+}
+
+\arguments{
+  \item{grid}{  array of grid points, grid[,i] are ordinates for ith component }
+  \item{probdraw}{ array - each row of which contains a draw of probabilities of mixture comp }
+  \item{compdraw}{ list of lists of draws of mixture comp moments }
+}
+
+\details{
+  length(compdraw) is number of MCMC draws. \cr
+  compdraw[[i]] is a list draws of mu and inv Chol root for each of mixture components. \cr
+  compdraw[[i]][[j]] is jth component.  compdraw[[i]][[j]]\$mu is mean vector; compdraw[[i]][[j]]\$rooti
+  is the UL decomp of \eqn{Sigma^{-1}}.
+}
+
+\value{
+  an array of the same dimension as grid with density values.
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type. To avoid errors, call with
+  output from \code{\link{rnmixGibbs}}.
+}
+
+\seealso{ \code{\link{rnmixGibbs}}  }
+
+\keyword{ models }
+\keyword{ multivariate }
diff --git a/man/fsh.Rd b/man/fsh.Rd
new file mode 100755
index 0000000..87be325
--- /dev/null
+++ b/man/fsh.Rd
@@ -0,0 +1,21 @@
+\name{fsh}
+\alias{fsh}
+
+\title{ Flush Console Buffer }
+\description{
+  Flush contents of console buffer.  This function only has an effect on
+  the Windows GUI. 
+}
+\usage{
+fsh()
+}
+}
+\value{
+  No value is returned.
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago, 
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\keyword{ utilities }
diff --git a/man/ghkvec.Rd b/man/ghkvec.Rd
new file mode 100755
index 0000000..651a09e
--- /dev/null
+++ b/man/ghkvec.Rd
@@ -0,0 +1,48 @@
+\name{ghkvec}
+\alias{ghkvec}
+\concept{multivariate normal distribution}
+\concept{integral}
+
+\title{ Compute GHK approximation to Multivariate Normal Integrals }
+\description{
+  \code{ghkvec} computes the GHK approximation to the integral of a
+  multivariate normal density over a half plane defined by a set
+  of truncation points.  
+}
+\usage{
+ghkvec(L, trunpt, above, r)
+}
+\arguments{
+  \item{L}{ lower triangular Cholesky root of Covariance matrix }
+  \item{trunpt}{ vector of truncation points}
+  \item{above}{ vector of indicators for truncation above(1) or below(0) }
+  \item{r}{ number of draws to use in GHK }
+}
+\value{
+  approximation to integral
+}
+\note{
+  \code{ghkvec} can accept a vector of truncations and compute more than one
+  integral.  That is, length(trunpt)/length(above) number of different integrals,
+  each with the same Sigma and mean 0 but different truncation points. See 
+  example below for an example with two integrals at different truncation points.
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 3. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\examples{
+##
+
+Sigma=matrix(c(1,.5,.5,1),ncol=2)
+L=t(chol(Sigma))
+trunpt=c(0,0,1,1)
+above=c(1,1)
+ghkvec(L,trunpt,above,100)
+}
+\keyword{ distribution }
diff --git a/man/init.rmultiregfp.Rd b/man/init.rmultiregfp.Rd
new file mode 100755
index 0000000..c51bc7d
--- /dev/null
+++ b/man/init.rmultiregfp.Rd
@@ -0,0 +1,46 @@
+\name{init.rmultiregfp}
+\alias{init.rmultiregfp}
+
+\title{ Initialize Variables for Multivariate Regression Draw }
+\description{
+  \code{init.rmultiregfp} initializes variables which can be pre-computed
+  for draws from the posterior of a multivariate regression model.  
+  \code{init.rmultiregfp} should be called prior to use of \code{rmultiregfp}
+}
+\usage{
+init.rmultiregfp(X, A, Bbar, nu, V)
+}
+\arguments{
+  \item{X}{ Design matrix }
+  \item{A}{ Prior Precision matrix (m x k) }
+  \item{Bbar}{ Prior mean matrix (m x k) }
+  \item{nu}{ degrees of freedom parmeter for Sigma prior }
+  \item{V}{ location parameter for Sigma prior }
+}
+\details{
+  model: \eqn{Y = XB + U}. \eqn{u_i} \eqn{\sim}{~} \eqn{N(0,\Sigma)}.  \eqn{u_i} is the ith row of U.  \eqn{Y} is n x m. \eqn{X} is n x k. \eqn{B} is k x m. 
+
+  priors: \eqn{vec(B)} \eqn{\sim}{~} \eqn{N(vec(Bbar,\Sigma (x) A^{-1})} \cr
+          \eqn{\Sigma} \eqn{\sim}{~} \eqn{IW(nu,V)}.
+}
+\value{
+  A list containing \ldots
+  \item{IR }{Inverse of Cholesky Root of \eqn{(X'X + A)}}
+  \item{RA }{ Cholesky root of A }
+  \item{RABbar}{RA \%*\% Bbar}
+  \item{nu}{d.f. parm for IWishart prior}
+  \item{V}{location matrix for IWishart prior}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rmultiregfp}}}
+
+\keyword{ utilities }
+\keyword{ regression }
diff --git a/man/llmnl.Rd b/man/llmnl.Rd
new file mode 100755
index 0000000..f278033
--- /dev/null
+++ b/man/llmnl.Rd
@@ -0,0 +1,50 @@
+\name{llmnl}
+\alias{llmnl}
+\concept{multinomial logit}
+\concept{likelihood}
+
+\title{ Evaluate Log Likelihood for Multinomial Logit Model  }
+\description{
+  \code{llmnl} evaluates log-likelihood for the multinomial logit model.
+}
+\usage{
+llmnl(y, X, beta)
+}
+
+\arguments{
+  \item{y}{ n x 1 vector of obs on y (1,\ldots, p) }
+  \item{X}{ n*p x k Design matrix (use \code{createX} to make) }
+  \item{beta}{ k x 1 coefficient vector }
+}
+\details{
+  Let \eqn{mu_i=X_i}\%*\%\eqn{\beta}, then \eqn{Pr(y=j) = e^{mu_j}/\sum_i{mu_i}}.\cr
+  \eqn{X_i} is the submatrix of X corresponding to the
+  ith observation.  X has n*p rows.  
+  
+  Use \code{\link{createX}} to create X. 
+}
+\value{
+  value of log-likelihood (sum of log prob of observed multinomial outcomes).
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{createX}}, \code{\link{rmnlIndepMetrop}} }
+
+\examples{
+##
+\dontrun{ll=llmnl(y,X,beta)}
+}
+
+\keyword{ models }
diff --git a/man/llmnp.Rd b/man/llmnp.Rd
new file mode 100755
index 0000000..abc7f4a
--- /dev/null
+++ b/man/llmnp.Rd
@@ -0,0 +1,66 @@
+\name{llmnp}
+\alias{llmnp}
+\concept{multinomial probit}
+\concept{likelihood}
+
+\title{ Evaluate Log Likelihood for Multinomial Probit Model  }
+\description{
+  \code{llmnp} evaluates the log-likelihood for the multinomial probit model.
+}
+
+\usage{
+llmnp(X, y, beta, Sigma, r)
+}
+
+\arguments{
+  \item{X}{ X is n*(p-1) x k array. X is from differenced system. }
+  \item{y}{ y is vector of n indicators of multinomial response (1, \ldots, p). }
+  \item{beta}{ k x 1 vector of coefficients }
+  \item{Sigma}{ (p-1) x (p-1) Covariance matrix of errors }
+  \item{r}{ number of draws used in GHK }
+}
+
+\details{
+  X is (p-1)*n x k matrix.  Use \code{\link{createX}} with \code{DIFF=TRUE} to create X. \cr
+
+  Model for each obs:  \eqn{w = Xbeta + e}.   \eqn{e} \eqn{\sim}{~} \eqn{N(0,Sigma)}. 
+
+   censoring mechanism: 
+
+      if \eqn{y=j (j<p),  w_j > max(w_{-j})} and \eqn{w_j >0}    \cr
+      if \eqn{y=p,  w < 0}                   \cr
+
+   To use GHK, we must transform so that these are rectangular regions
+   e.g.  if \eqn{y=1,  w_1 > 0} and \eqn{w_1 - w_{-1} > 0}. 
+
+   Define \eqn{A_j} such that if j=1,\ldots,p-1, \eqn{A_jw = A_jmu + A_je > 0} is equivalent to \eqn{y=j}.
+   Thus, if y=j, we have \eqn{A_je > -A_jmu}.  Lower truncation is \eqn{-A_jmu} and \eqn{cov = A_jSigmat(A_j)}.  
+   For \eqn{j=p}, \eqn{e < - mu}.
+
+
+}
+\value{
+  value of log-likelihood (sum of log prob of observed multinomial outcomes).
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapters 2 and 4. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{createX}}, \code{\link{rmnpGibbs}} }
+
+\examples{
+##
+\dontrun{ll=llmnp(X,y,beta,Sigma,r)}
+}
+
+\keyword{ models }
diff --git a/man/llnhlogit.Rd b/man/llnhlogit.Rd
new file mode 100755
index 0000000..6d1b0aa
--- /dev/null
+++ b/man/llnhlogit.Rd
@@ -0,0 +1,55 @@
+\name{llnhlogit}
+\alias{llnhlogit}
+\concept{multinomial logit}
+\concept{non-homothetic utility}
+
+\title{ Evaluate Log Likelihood for non-homothetic Logit Model  }
+\description{
+  \code{llmnp} evaluates log-likelihood for the Non-homothetic Logit model.
+}
+
+\usage{
+llnhlogit(theta, choice, lnprices, Xexpend)
+}
+
+\arguments{
+  \item{theta}{ parameter vector (see details section) }
+  \item{choice}{ n x 1 vector of choice (1, \ldots, p) }
+  \item{lnprices}{ n x p  array of log-prices}
+  \item{Xexpend}{ n x d array of vars predicting expenditure }
+}
+
+\details{
+  Non-homothetic logit model with: \eqn{ln(psi_i(U)) = alpha_i - e^{k_i}U} \cr
+
+  Structure of theta vector \cr
+  alpha: (p x 1) vector of utility intercepts.\cr
+  k: (p x 1) vector of utility rotation parms. \cr
+  gamma: (k x 1) -- expenditure variable coefs.\cr
+  tau: (1 x 1) -- logit scale parameter.\cr
+}
+
+\value{
+  value of log-likelihood (sum of log prob of observed multinomial outcomes).
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi,Chapter 4. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{simnhlogit}} }
+\examples{
+##
+\dontrun{ll=llnhlogit(theta,choice,lnprices,Xexpend)}
+}
+\keyword{ models }
diff --git a/man/lndIChisq.Rd b/man/lndIChisq.Rd
new file mode 100755
index 0000000..e60be72
--- /dev/null
+++ b/man/lndIChisq.Rd
@@ -0,0 +1,44 @@
+\name{lndIChisq}
+\alias{lndIChisq}
+\concept{Inverted Chi-squared Distribution}
+\concept{density}
+
+\title{ Compute Log of Inverted Chi-Squared Density }
+\description{
+  \code{lndIChisq} computes the log of an Inverted Chi-Squared Density.
+}
+\usage{
+lndIChisq(nu, ssq, x)
+}
+\arguments{
+  \item{nu}{ d.f. parameter }
+  \item{ssq}{ scale parameter }
+  \item{x}{ ordinate for density evaluation }
+}
+\details{
+  \eqn{Z= \nu*ssq/\chi^2_{\nu}}, \eqn{Z} \eqn{\sim}{~} Inverted Chi-Squared.  \cr
+  \code{lndIChisq} computes the complete log-density, including normalizing constants.
+}
+\value{
+  log density value
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{dchisq}} }
+\examples{
+##
+lndIChisq(3,1,2)
+}
+\keyword{ distribution }
diff --git a/man/lndIWishart.Rd b/man/lndIWishart.Rd
new file mode 100755
index 0000000..7461505
--- /dev/null
+++ b/man/lndIWishart.Rd
@@ -0,0 +1,46 @@
+\name{lndIWishart}
+\alias{lndIWishart}
+\concept{Inverted Wishart distribution}
+\concept{density}
+
+\title{ Compute Log of Inverted Wishart Density }
+\description{
+  \code{lndIWishart} computes the log of an Inverted Wishart density.
+}
+\usage{
+lndIWishart(nu, S, IW)
+}
+\arguments{
+  \item{nu}{ d.f. parameter }
+  \item{S}{ "location" parameter  }
+  \item{IW}{ ordinate for density evaluation }
+}
+\details{
+  \eqn{Z = Wishart(nu,V^{-1})^{-1}}, \eqn{Z} \eqn{\sim}{~} Inverted Wishart(nu,V).    \cr
+  \code{lndIWishart} computes the complete log-density, including normalizing constants.
+}
+\value{
+  log density value
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rwishart}} }
+\examples{
+##
+lndIWishart(5,diag(3),(diag(3)+.5))
+}
+\keyword{ distribution }
+
+
diff --git a/man/lndMvn.Rd b/man/lndMvn.Rd
new file mode 100755
index 0000000..d74c40c
--- /dev/null
+++ b/man/lndMvn.Rd
@@ -0,0 +1,48 @@
+\name{lndMvn}
+\alias{lndMvn}
+\concept{multivariate normal distribution}
+\concept{density}
+
+\title{ Compute Log of Multivariate Normal Density }
+\description{
+  \code{lndMvn} computes the log of a Multivariate Normal Density.
+
+}
+
+\usage{
+lndMvn(x, mu, rooti)
+}
+
+\arguments{
+  \item{x}{ density ordinate }
+  \item{mu}{ mu vector }
+  \item{rooti}{ inv of Cholesky root of Sigma }
+}
+\details{
+  \eqn{z} \eqn{\sim}{~} \eqn{N(mu,\Sigma)}
+  note: does not include full normalizing constant 
+}
+\value{
+  log density value
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{lndMvst}} }
+\examples{
+##
+Sigma=matrix(c(1,.5,.5,1),ncol=2)
+lndMvn(x=c(rep(0,2)),mu=c(rep(0,2)),rooti=backsolve(chol(Sigma),diag(2)))
+}
+\keyword{ distribution }
diff --git a/man/lndMvst.Rd b/man/lndMvst.Rd
new file mode 100755
index 0000000..4fe2feb
--- /dev/null
+++ b/man/lndMvst.Rd
@@ -0,0 +1,49 @@
+\name{lndMvst}
+\alias{lndMvst}
+\concept{multivariate t distribution}
+\concept{student-t distribution}
+\concept{density}
+
+\title{ Compute Log of Multivariate Student-t Density }
+\description{
+  \code{lndMvst} computes the log of a Multivariate Student-t Density.
+}
+\usage{
+lndMvst(x, nu, mu, rooti)
+}
+
+\arguments{
+  \item{x}{ density ordinate }
+  \item{nu}{ d.f. parameter }
+  \item{mu}{ mu vector }
+  \item{rooti}{ inv of Cholesky root of Sigma }
+}
+
+\details{
+  \eqn{z} \eqn{\sim}{~} \eqn{MVst(mu,nu,\Sigma)}
+  note: does not include full normalizing constant 
+}
+\value{
+  log density value
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{lndMvn}} }
+\examples{
+##
+Sigma=matrix(c(1,.5,.5,1),ncol=2)
+lndMvst(x=c(rep(0,2)),nu=4,mu=c(rep(0,2)),rooti=backsolve(chol(Sigma),diag(2)))
+}
+\keyword{ distribution }
diff --git a/man/logMargDenNR.Rd b/man/logMargDenNR.Rd
new file mode 100755
index 0000000..5d3893c
--- /dev/null
+++ b/man/logMargDenNR.Rd
@@ -0,0 +1,35 @@
+\name{logMargDenNR}
+\alias{logMargDenNR}
+\concept{Newton-Raftery approximation}
+\concept{bayes}
+\concept{marginal likelihood}
+\concept{density}
+
+\title{ Compute Log Marginal Density Using Newton-Raftery Approx }
+\description{
+  \code{logMargDenNR} computes log marginal density using the Newton-Raftery approximation.\cr
+  Note: this approximation can be influenced by outliers in the vector of log-likelihoods. Use 
+  with \strong{care} . 
+}
+\usage{
+logMargDenNR(ll)
+}
+\arguments{
+  \item{ll}{ vector of log-likelihoods evaluated at length(ll) MCMC draws }
+}
+\value{
+  approximation to log marginal density value.
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 6. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\keyword{ distribution }
diff --git a/man/mixDen.Rd b/man/mixDen.Rd
new file mode 100755
index 0000000..73567ab
--- /dev/null
+++ b/man/mixDen.Rd
@@ -0,0 +1,46 @@
+\name{mixDen}
+\alias{mixDen}
+\concept{normal mixture}
+\concept{marginal distribution}
+\concept{density}
+
+\title{ Compute Marginal Density for Multivariate Normal Mixture }
+\description{
+  \code{mixDen} computes the marginal density for each component of
+  a normal mixture at each of the points on a user-specifed grid.
+}
+\usage{
+mixDen(x, p, comps)
+}
+\arguments{
+  \item{x}{ array - ith column gives grid points for ith variable }
+  \item{p}{ vector of mixture component probabilites }
+  \item{comps}{ list of lists of components for normal mixture }
+}
+\details{
+  length(comps) is the number of mixture components.  comps[[j]] is a list of
+  parameters of the jth component. comps[[j]]\$mu is mean vector; comps[[j]]\$rooti
+  is the UL decomp of \eqn{Sigma^{-1}}.
+}
+
+\value{
+  an array of the same dimension as grid with density values.
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 5. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rnmixGibbs}}  }
+
+\keyword{ models }
+\keyword{ multivariate }
diff --git a/man/mnlHess.Rd b/man/mnlHess.Rd
new file mode 100755
index 0000000..84674d0
--- /dev/null
+++ b/man/mnlHess.Rd
@@ -0,0 +1,44 @@
+\name{mnlHess}
+\alias{mnlHess}
+\concept{multinomial logit}
+\concept{hessian}
+
+
+\title{ Computes -Expected Hessian for Multinomial Logit}
+\description{
+  \code{mnlHess} computes -Expected[Hessian] for Multinomial Logit Model
+}
+\usage{
+mnlHess(y, X, beta)
+}
+\arguments{
+  \item{y}{ n x 1 vector of choices, (1, \ldots,p) }
+  \item{X}{ n*p x k Design matrix }
+  \item{beta}{ k x 1 vector of coefficients }
+}
+\details{
+  See \code{\link{llmnl}} for information on structure of X array.  Use \code{\link{createX}} to make X.
+}
+\value{
+  k x k matrix
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 3. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{llmnl}}, \code{\link{createX}}, \code{\link{rmnlIndepMetrop}} }
+\examples{
+##
+\dontrun{mnlHess(y,X,beta)}
+}
+\keyword{ models }
diff --git a/man/momMix.Rd b/man/momMix.Rd
new file mode 100755
index 0000000..e8f382d
--- /dev/null
+++ b/man/momMix.Rd
@@ -0,0 +1,51 @@
+\name{momMix}
+\alias{momMix}
+\concept{mcmc}
+\concept{normal mixture}
+\concept{posterior moments}
+
+\title{ Compute Posterior Expectation of Normal Mixture Model Moments }
+\description{
+  \code{momMix} averages the moments of a normal mixture model over MCMC draws.
+}
+\usage{
+momMix(probdraw, compdraw)
+}
+
+\arguments{
+  \item{probdraw}{ R x ncomp list of draws of mixture probs }
+  \item{compdraw}{ list of length R of draws of mixture component moments }
+}
+\details{
+  R is the number of MCMC draws in argument list above. \cr
+  ncomp is the number of mixture components fitted.\cr
+  compdraw is a list of lists of lists with mixture components.  \cr
+  compdraw[[i]] is ith draw. \cr
+  compdraw[[i]][[j]][[1]] is the mean parameter vector for the jth component, ith MCMC draw. \cr
+  compdraw[[i]][[j]][[2]] is the UL decomposition of \eqn{Sigma^{-1}} for the jth component, ith MCMC draw. 
+ 
+}
+\value{
+  a list of the following items \dots
+  \item{mu }{Posterior Expectation of Mean}
+  \item{sigma }{Posterior Expecation of Covariance Matrix}
+  \item{sd }{Posterior Expectation of Vector of Standard Deviations}
+  \item{corr }{Posterior Expectation of Correlation Matrix}
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 5. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+\seealso{ \code{\link{rmixGibbs}}}
+
+\keyword{ multivariate }
diff --git a/man/nmat.Rd b/man/nmat.Rd
new file mode 100755
index 0000000..2d19b1e
--- /dev/null
+++ b/man/nmat.Rd
@@ -0,0 +1,37 @@
+\name{nmat}
+\alias{nmat}
+\title{ Convert Covariance Matrix to a Correlation Matrix }
+\description{
+  \code{nmat} converts a covariance matrix (stored as a vector, col by col) to a correlation matrix (also stored
+   as a vector).
+}
+\usage{
+nmat(vec)
+}
+\arguments{
+  \item{vec}{ k x k Cov matrix stored as a k*k x 1 vector (col by col) }
+}
+\details{
+  This routine is often used with apply to convert an R x (k*k) array of covariance MCMC draws to correlations. As in   \code{corrdraws=apply(vardraws,1,nmat)}
+}
+\value{
+  k*k x 1 vector with correlation matrix
+}
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+\examples{
+##
+set.seed(66)
+X=matrix(rnorm(200,4),ncol=2)
+Varmat=var(X)
+nmat(as.vector(Varmat))
+}
+\keyword{ utilities }
+\keyword{ array }
+
diff --git a/man/numEff.Rd b/man/numEff.Rd
new file mode 100755
index 0000000..0ff4a51
--- /dev/null
+++ b/man/numEff.Rd
@@ -0,0 +1,44 @@
+\name{numEff}
+\alias{numEff}
+\concept{numerical efficiency}
+
+\title{ Compute Numerical Standard Error and Relative Numerical Efficiency }
+\description{
+  \code{numEff} computes the numerical standard error for the mean of a vector of draws as well as the relative
+  numerical efficiency (ratio of variance of mean of this time series process relative to iid sequence).
+}
+
+\usage{
+numEff(x, m = max(length(x), (100/sqrt(5000)) * sqrt(length(x))))
+}
+
+\arguments{
+  \item{x}{ R x 1 vector of draws }
+  \item{m}{ number of lags for autocorrelations }
+}
+
+\details{
+  default for number of lags is chosen so that if R = 5000, m =100 and increases as the sqrt(R).
+}
+\value{
+  \item{stderr }{standard error of the mean of x}
+  \item{f }{ variance ratio (relative numerical efficiency) }
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 3. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+}
+\keyword{ ts }
+\keyword{ utilities }
diff --git a/man/rbprobitGibbs.Rd b/man/rbprobitGibbs.Rd
new file mode 100755
index 0000000..b4ce26b
--- /dev/null
+++ b/man/rbprobitGibbs.Rd
@@ -0,0 +1,79 @@
+\name{rbprobitGibbs}
+\alias{rbprobitGibbs}
+\concept{bayes}
+\concept{MCMC}
+\concept{probit}
+\concept{Gibbs Sampling}
+
+\title{ Gibbs Sampler (Albert and Chib) for Binary Probit }
+\description{
+  \code{rbprobitGibbs} implements the Albert and Chib Gibbs Sampler for the binary probit model.
+
+}
+\usage{
+rbprobitGibbs(Data, Prior, Mcmc)
+}
+
+\arguments{
+  \item{Data}{ list(X,y)}
+  \item{Prior}{ list(betabar,A)}
+  \item{Mcmc}{ list(R,keep)  }
+}
+
+\details{
+  Model: \eqn{y = X\beta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,I)}. 
+
+  Prior:  \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}. 
+
+  List arguments contain  
+  \describe{
+    \item{\code{X}}{Design Matrix}
+    \item{\code{y}}{n x 1 vector of observations, (0 or 1)} 
+    \item{\code{betabar}}{k x 1 prior mean (def: 0)}
+    \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
+    \item{\code{R}}{ number of MCMC draws }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+  }
+}
+
+\value{
+  \item{betadraw }{R/keep x k array of betadraws}
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 3. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rmnpGibbs}} }
+\examples{
+## rbprobitGibbs example
+##
+set.seed(66)
+simbprobit=
+function(X,beta) {
+##  function to simulate from binary probit including x variable
+y=ifelse((X\%*\%beta+rnorm(nrow(X)))<0,0,1)
+list(X=X,y=y,beta=beta)
+}
+
+nobs=200
+X=cbind(rep(1,nobs),runif(nobs),runif(nobs))
+beta=c(0,1,-1)
+nvar=ncol(X)
+simout=simbprobit(X,beta)
+
+Data=list(X=simout$X,y=simout$y)
+Mcmc=list(R=2000,keep=1)
+
+out=rbprobitGibbs(Data=Data,Mcmc=Mcmc)
+
+cat(" Betadraws ",fill=TRUE)
+mat=apply(out$betadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+}
+\keyword{ models }
diff --git a/man/rdirichlet.Rd b/man/rdirichlet.Rd
new file mode 100755
index 0000000..c2b2e8b
--- /dev/null
+++ b/man/rdirichlet.Rd
@@ -0,0 +1,40 @@
+\name{rdirichlet}
+\alias{rdirichlet}
+\concept{dirichlet distribution}
+\concept{simulation}
+
+\title{ Draw From Dirichlet Distribution }
+\description{
+  \code{rdirichlet} draws from Dirichlet
+}
+\usage{
+rdirichlet(alpha)
+}
+\arguments{
+  \item{alpha}{ vector of Dirichlet parms (must be > 0)}
+}
+
+\value{
+  Vector of draws from Dirichlet
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\examples{
+##
+set.seed(66)
+rdirichlet(c(rep(3,5)))
+}
+\keyword{ distribution }
+
diff --git a/man/rhierLinearModel.Rd b/man/rhierLinearModel.Rd
new file mode 100755
index 0000000..3e0d77b
--- /dev/null
+++ b/man/rhierLinearModel.Rd
@@ -0,0 +1,109 @@
+\name{rhierLinearModel}
+\alias{rhierLinearModel}
+\concept{bayes}
+\concept{MCMC}
+\concept{Gibbs Sampling}
+\concept{hierarchical models}
+\concept{linear model}
+
+\title{ Gibbs Sampler for Hierarchical Linear Model }
+\description{
+  \code{rhierLinearModel} implements a Gibbs Sampler for hierarchical linear models.
+}
+\usage{
+rhierLinearModel(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(regdata,Z)}
+  \item{Prior}{ list(Deltabar,A,nu.e,ssq,nu,V)}
+  \item{Mcmc}{ list(R,keep)}
+}
+\details{
+  Model: length(regdata) regression equations. \cr
+        \eqn{y_i = X_ibeta_i + e_i}. \eqn{e_i} \eqn{\sim}{~} \eqn{N(0,tau_i)}.  nvar X vars in each equation. 
+
+ Priors:\cr
+        \eqn{tau_i} \eqn{\sim}{~} nu.e*\eqn{ssq_i/\chi^2_{nu.e}}.  \eqn{tau_i} is the variance of \eqn{e_i}.\cr
+        \eqn{beta_i} \eqn{\sim}{~} N(ZDelta[i,],\eqn{V_{beta}}). \cr
+               Note:  ZDelta is the matrix Z * Delta; [i,] refers to ith row of this product.
+
+          \eqn{vec(Delta)} given \eqn{V_{beta}} \eqn{\sim}{~} \eqn{N(vec(Deltabar),V_{beta} (x) A^{-1})}.\cr
+          \eqn{V_{beta}} \eqn{\sim}{~} \eqn{IW(nu,V)}. \cr
+              \eqn{Delta, Deltabar} are nz x nvar.  \eqn{A} is nz x nz.  \eqn{V_{beta}} is nvar x nvar.
+        
+          Note: if you don't have any z vars, set Z=iota (nreg x 1).
+
+  List arguments contain:
+  \itemize{
+    \item{\code{regdata}}{ list of lists with X,y matrices for each of length(regdata) regressions}
+    \item{\code{regdata[[i]]$X}}{ X matrix for equation i }
+    \item{\code{regdata[[i]]$y}}{ y vector for equation i }
+    \item{\code{Deltabar}}{ nz x nvar matrix of prior means (def: 0)}
+    \item{\code{A}}{ nz x nz matrix for prior precision (def: .01I)}
+    \item{\code{nu.e}}{ d.f. parm for regression error variance prior (def: 3)}
+    \item{\code{ssq}}{ scale parm for regression error var prior (def: var(\eqn{y_i}))}
+    \item{\code{nu}}{ d.f. parm for Vbeta prior (def: nvar+3)}
+    \item{\code{V}}{ Scale location matrix for Vbeta prior (def: nu*I)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+   }
+}
+\value{
+  a list containing
+  \item{betadraw}{nreg x nvar x R/keep array of individual regression coef draws}
+  \item{taudraw}{R/keep x nreg array of error variance draws}
+  \item{Deltadraw}{R/keep x nz x nvar array of Deltadraws}
+  \item{Vbetadraw}{R/keep x nvar*nvar array of Vbeta draws}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 3. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) # set env var LONG_TEST to run
+{
+
+nreg=100; nobs=100; nvar=3
+Vbeta=matrix(c(1,.5,0,.5,2,.7,0,.7,1),ncol=3)
+Z=cbind(c(rep(1,nreg)),3*runif(nreg)); Z[,2]=Z[,2]-mean(Z[,2])
+nz=ncol(Z)
+Delta=matrix(c(1,-1,2,0,1,0),ncol=2)
+Delta=t(Delta) # first row of Delta is means of betas
+Beta=matrix(rnorm(nreg*nvar),nrow=nreg)\%*\%chol(Vbeta)+Z\%*\%Delta
+tau=.1
+iota=c(rep(1,nobs))
+regdata=NULL
+for (reg in 1:nreg) { X=cbind(iota,matrix(runif(nobs*(nvar-1)),ncol=(nvar-1)))
+	y=X\%*\%Beta[reg,]+sqrt(tau)*rnorm(nobs); regdata[[reg]]=list(y=y,X=X) }
+
+nu.e=3
+ssq=NULL
+for(reg in 1:nreg) {ssq[reg]=var(regdata[[reg]]$y)}
+nu=nvar+3
+V=nu*diag(c(rep(1,nvar)))
+A=diag(c(rep(.01,nz)),ncol=nz)
+Deltabar=matrix(c(rep(0,nz*nvar)),nrow=nz)
+
+R=1000
+
+Data=list(regdata=regdata,Z=Z)
+Prior=list(Deltabar=Deltabar,A=A,nu.e=nu.e,ssq=ssq,nu=nu,V=V)
+Mcmc=list(R=R,keep=1)
+out=rhierLinearModel(Data=Data,Mcmc=Mcmc)
+
+cat(" Deltadraws ",fill=TRUE)
+mat=apply(out$Deltadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Delta),mat); rownames(mat)[1]="delta"; print(mat)
+cat(" Vbetadraws ",fill=TRUE)
+mat=apply(out$Vbetadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Vbeta),mat); rownames(mat)[1]="Vbeta"; print(mat)
+
+}
+
+}
+\keyword{ regression }
diff --git a/man/rhierMnlRwMixture.Rd b/man/rhierMnlRwMixture.Rd
new file mode 100755
index 0000000..215f5d1
--- /dev/null
+++ b/man/rhierMnlRwMixture.Rd
@@ -0,0 +1,211 @@
+\name{rhierMnlRwMixture}
+\alias{rhierMnlRwMixture}
+\concept{bayes}
+\concept{MCMC}
+\concept{Multinomial Logit}
+\concept{mixture of normals}
+\concept{normal mixture}
+\concept{heterogeneity}
+\concept{hierarchical models}
+
+\title{ MCMC Algorithm for Hierarchical Multinomial Logit with Mixture of Normals Heterogeneity}
+\description{
+  \code{rhierMnlRwMixture} is a MCMC algorithm for a hierarchical multinomial logit with a mixture of normals 
+  heterogeneity distribution.  This is a hybrid Gibbs Sampler with a RW Metropolis step for the MNL 
+  coefficients for each panel unit.
+}
+\usage{
+rhierMnlRwMixture(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(m,lgtdata,Z) (note: Z is optional) }
+  \item{Prior}{ list(deltabar,Ad,mubar,Amu,nu,V,ncomp) (note: all but ncomp are optional)}
+  \item{Mcmc}{ list(s,c,R,keep) (note: all but R are optional)}
+}
+\details{
+  Model: \cr
+  \eqn{y_i} \eqn{\sim}{~} \eqn{MNL(X_i,theta_i)}.  i=1,\ldots, length(lgtdata). \eqn{theta_i} is nvar x 1.
+
+  \eqn{theta_i}= ZD[i,] + \eqn{u_i}. \cr
+  Note: here ZDelta refers to Z\%*\%D, ZD[i,] is ith row of this product.\cr
+  D is an nvar x nz array. 
+
+  \eqn{u_i} \eqn{\sim}{~} \eqn{N(mu_{ind},Sigma_{ind})}. \eqn{ind} \eqn{\sim}{~} multinomial(p). \cr
+
+  Priors: \cr
+  \eqn{p} \eqn{\sim}{~} dirichlet (a)\cr
+  \eqn{delta= vec(D)} \eqn{\sim}{~} \eqn{N(deltabar,A_d^{-1})}\cr
+  \eqn{mu_j} \eqn{\sim}{~} \eqn{N(mubar,Amu^{-1}(x)Sigma_j)}\cr
+  \eqn{Sigma_j} \eqn{\sim}{~} IW(nu,V) \cr
+
+  Lists contain:
+  \itemize{
+    \item{\code{m}}{ m is number of choice alternatives}
+    \item{\code{lgtdata}}{list of lists with each cross-section unit MNL data}
+    \item{\code{lgtdata[[i]]$y}}{ \eqn{n_i} vector of multinomial outcomes (1,\ldots,m)}
+    \item{\code{lgtdata[[i]]$X}}{ \eqn{n_i} by nvar design matrix for ith unit}
+    \item{\code{deltabar}}{nz*nvar vector of prior means (def: 0)}
+    \item{\code{Ad}}{ prior prec matrix for vec(D) (def: .01I)}
+    \item{\code{mubar}}{ nvar x 1 prior mean vector for normal comp mean (def: 0)}
+    \item{\code{Amu}}{ prior precision for normal comp mean (def: .01I)}
+    \item{\code{nu}}{ d.f. parm for IW prior on norm comp Sigma (def: nvar+3)}
+    \item{\code{V}}{ pds location parm for IW prior on norm comp Sigma (def: nuI)}
+    \item{\code{ncomp}}{ number of components used in normal mixture }
+    \item{\code{s}}{ scaling parm for RW Metropolis (def: 2.93/sqrt(nvar))}
+    \item{\code{c}}{ fraction likelihood weighting parm (def: 2)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+  }
+}
+\value{
+  a list containing:
+  \item{Deltadraw}{R/keep  x nz*nvar matrix of draws of Delta, first row is initial value}
+  \item{betadraw}{ nlgt x nvar x R/keep array of draws of betas}
+  \item{probdraw}{ R/keep x ncomp matrix of draws of probs of mixture components}
+  \item{compdraw}{ list of list of lists (length R/keep)}
+}
+\note{
+  More on \code{compdraw} component of return value list: \cr
+  \itemize{
+  \item{compdraw[[i]]}{ the ith draw of components for mixtures}
+  \item{compdraw[[i]][[j]]}{ ith draw of the jth normal mixture comp}
+  \item{compdraw[[i]][[j]][[1]]}{ ith draw of jth normal mixture comp mean vector}
+  \item{compdraw[[i]][[j]][[2]]}{ ith draw of jth normal mixture cov parm (rooti) }
+  }
+
+  Note: Z does \strong{not} include an intercept and is centered for ease of interpretation.\cr
+  
+  Be careful in assessing prior parameter,Amu.  .01 is too small for many applications. See 
+  Allenby et al, chapter 5 for full discussion.\cr
+
+  R=5000 is used in the example below.  This is too short for reliable results. Use R > 10000 in practice.
+} 
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 5. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+  
+\seealso{ \code{\link{rmnlIndepMetrop}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) # set env var LONG_TEST to run
+{
+
+set.seed(66)
+m=3                                # num of choice alterns
+ncoef=3  
+nlgt=300                           # num of cross sectional units
+nz=2
+Z=matrix(runif(nz*nlgt),ncol=nz)
+Z=t(t(Z)-apply(Z,2,mean))          # demean Z
+ncomp=3                                # no of mixture components
+Delta=matrix(c(1,0,1,0,1,2),ncol=2)
+comps=NULL
+comps[[1]]=list(mu=c(0,-1,-2),rooti=diag(rep(1,3)))
+comps[[2]]=list(mu=c(0,-1,-2)*2,rooti=diag(rep(1,3)))
+comps[[3]]=list(mu=c(0,-1,-2)*4,rooti=diag(rep(1,3)))
+p=c(.4,.2,.4)
+
+## simulate data
+simlgtdata=NULL
+ni=rep(50,300)
+for (i in 1:nlgt) 
+{  betai=Delta\%*\%Z[i,]+as.vector(rmixture(1,p,comps)$x)
+   X=NULL
+   for(j in 1:ni[i]) 
+      { Xone=cbind(rbind(c(rep(0,m-1)),diag(m-1)),runif(m,min=-1.5,max=0))
+        X=rbind(X,Xone) }
+   outa=simmnlwX(ni[i],X,betai)
+   simlgtdata[[i]]=list(y=outa$y,X=X,beta=betai)
+}
+
+## plot betas
+if(0){
+## set if(1) above to produce plots
+bmat=matrix(0,nlgt,ncoef)
+for(i in 1:nlgt) {bmat[i,]=simlgtdata[[i]]$beta}
+par(mfrow=c(ncoef,1))
+for(i in 1:ncoef) hist(bmat[,i],breaks=30,col="magenta")
+}
+
+##   set parms for priors and Z
+nu=ncoef+3
+V=nu*diag(rep(1,ncoef))
+Ad=.01*(diag(rep(1,nz*ncoef)))
+mubar=matrix(rep(0,ncoef),nrow=1)
+deltabar=rep(0,ncoef*nz)
+Amu=matrix(.01,ncol=1)
+a=rep(5,ncoef)
+
+R=5000 
+keep=5
+c=2
+s=2.93/sqrt(ncoef)
+Data1=list(m=m,lgtdata=simlgtdata,Z=Z)
+Prior1=list(ncomp=ncomp,nu=nu,V=V,Amu=Amu,mubar=mubar,a=a,Ad=Ad,deltabar=deltabar)
+Mcmc1=list(s=s,c=c,R=R,keep=keep)
+out=rhierMnlRwMixture(Data=Data1,Prior=Prior1,Mcmc=Mcmc1)
+
+begin=1000/keep; end=R/keep  
+cat(" Deltadraws ",fill=TRUE)
+mat=apply(out$Deltadraw[begin:end,],2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Delta),mat); rownames(mat)[1]="delta"; print(mat)
+
+tmom=momMix(matrix(p,nrow=1),list(comps))
+pmom=momMix(out$probdraw[begin:end,],out$compdraw[begin:end])
+mat=rbind(tmom$mu,pmom$mu)
+rownames(mat)=c("true","post expect")
+cat(" mu and posterior expectation of mu",fill=TRUE)
+print(mat)
+mat=rbind(tmom$sd,pmom$sd)
+rownames(mat)=c("true","post expect")
+cat(" std dev and posterior expectation of sd",fill=TRUE)
+print(mat)
+mat=rbind(as.vector(tmom$corr),as.vector(pmom$corr))
+rownames(mat)=c("true","post expect")
+cat(" corr and posterior expectation of corr",fill=TRUE)
+print(t(mat)) 
+
+if(0) {
+## set if(1) to produce plots
+par(mfrow=c(4,1))
+plot(out$betadraw[1,1,])
+abline(h=simlgtdata[[1]]$beta[1])
+plot(out$betadraw[2,1,])
+abline(h=simlgtdata[[2]]$beta[1])
+plot(out$betadraw[100,3,])
+abline(h=simlgtdata[[100]]$beta[3])
+plot(out$betadraw[101,3,])
+abline(h=simlgtdata[[101]]$beta[3])
+par(mfrow=c(4,1))
+plot(out$deltadraw[,1])
+abline(h=Delta[1,1])
+plot(out$deltadraw[,2])
+abline(h=Delta[2,1])
+plot(out$deltadraw[,3])
+abline(h=Delta[3,1])
+plot(out$deltadraw[,6])
+abline(h=Delta[3,2])
+begin=100
+end=1000
+ngrid=50
+grid=matrix(0,ngrid,ncoef)
+rgm=matrix(c(-3,-7,-10,3,1,0),ncol=2)
+for(i in 1:ncoef) {rg=rgm[i,]; grid[,i]=rg[1] + ((1:ngrid)/ngrid)*(rg[2]-rg[1])}
+mden=eMixMargDen(grid,out$probdraw[begin:end,],out$compdraw[begin:end])
+par(mfrow=c(2,ncoef))
+for(i in 1:ncoef)
+{plot(grid[,i],mden[,i],type="l")}
+for(i in 1:ncoef)
+tden=mixDen(grid,p,comps)
+for(i in 1:ncoef)
+{plot(grid[,i],tden[,i],type="l")}
+}
+
+}
+}
+\keyword{ models }
diff --git a/man/rivGibbs.Rd b/man/rivGibbs.Rd
new file mode 100755
index 0000000..87b367b
--- /dev/null
+++ b/man/rivGibbs.Rd
@@ -0,0 +1,87 @@
+\name{rivGibbs}
+\alias{rivGibbs}
+\concept{Instrumental Variables}
+\concept{Gibbs Sampler}
+\concept{bayes}
+\concept{endogeneity}
+\concept{simultaneity}
+\concept{MCMC}
+
+\title{ Gibbs Sampler for Linear "IV" Model}
+\description{
+  \code{rivGibbs} is a Gibbs Sampler for a linear structural equation with an arbitrary number of instruments.
+}
+\usage{
+rivGibbs(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(z,w,x,y) }
+  \item{Prior}{ list(md,Ad,mbg,Abg,nu,V) this is an optional parm}
+  \item{Mcmc}{ list(R,keep)}
+}
+\details{
+  Model:\cr
+  \eqn{x=z'delta + e1}. \cr
+  \eqn{y=beta*x + w'gamma + e2}. \cr
+  \eqn{e1,e2} \eqn{\sim}{~} \eqn{N(0,Sigma)}. 
+
+  Priors:\cr
+  \eqn{delta} \eqn{\sim}{~} \eqn{N(md,Ad^{-1})}.  \eqn{vec(beta,gamma)} \eqn{\sim}{~} \eqn{N(mbg,Abg^{-1})} \cr
+  \eqn{Sigma} \eqn{\sim}{~} IW(nu,V)
+
+  List arguments contain:
+  \itemize{
+    \item{\code{z}}{ matrix of obs on instruments}
+    \item{\code{y}}{ vector of obs on lhs var in structural equation}
+    \item{\code{x}}{ "endogenous" var in structural eqn}
+    \item{\code{w}}{ matrix of obs on "exogenous" vars in the structural eqn}
+    \item{\code{md}}{ prior mean of delta (def: 0)}
+    \item{\code{Ad}}{ pds prior prec for prior on delta (def: .01I)}
+    \item{\code{mbg}}{ prior mean vector for prior on beta,gamma (def: 0)}
+    \item{\code{Abg}}{ pds prior prec  for prior on beta,gamma (def: .01I)}
+    \item{\code{nu}}{ d.f. parm for IW prior on Sigma (def: 5)}
+    \item{\code{V}}{ pds location matrix for IW prior on Sigma (def: nuI)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)} 
+  }
+}
+\value{
+  a list containing:
+  \item{deltadraw}{R/keep x dim(delta) array of delta draws}
+  \item{betadraw}{R/keep x 1 vector of beta draws}
+  \item{gammadraw}{R/keep x dim(gamma) array of gamma draws }
+  \item{Sigmadraw}{R/keep x 4 array of Sigma draws}
+}
+\examples{
+##
+set.seed(66)
+simIV = function(delta,beta,Sigma,n,z,w,gamma) {
+eps = matrix(rnorm(2*n),ncol=2) \%*\% chol(Sigma)
+x = z \%*\% delta + eps[,1]; y = beta*x +  eps[,2] + w\%*\%gamma
+list(x=as.vector(x),y=as.vector(y)) }
+n = 200 ; p=1 # number of instruments
+z = cbind(rep(1,n),matrix(runif(n*p),ncol=p))
+w = matrix(1,n,1)
+rho=.8
+Sigma = matrix(c(1,rho,rho,1),ncol=2)
+delta = c(1,4); beta = .5; gamma = c(1)
+simiv = simIV(delta,beta,Sigma,n,z,w,gamma)
+
+Mcmc=list(); Prior=list(); Data = list()
+Data$z = z; Data$w=w; Data$x=simiv$x; Data$y=simiv$y
+Mcmc$R = 5000
+Mcmc$keep=1
+out=rivGibbs(Data=Data,Prior=Prior,Mcmc=Mcmc)
+
+cat(" deltadraws ",fill=TRUE)
+mat=apply(out$deltadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(delta,mat); rownames(mat)[1]="delta"; print(mat)
+cat(" betadraws ",fill=TRUE)
+qout=quantile(out$betadraw,probs=c(.01,.05,.5,.95,.99))
+mat=matrix(qout,ncol=1)
+mat=rbind(beta,mat); rownames(mat)=c("beta",names(qout)); print(mat)
+cat(" Sigma draws",fill=TRUE)
+mat=apply(out$Sigmadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Sigma),mat); rownames(mat)[1]="Sigma"; print(mat)
+}
+\keyword{ models }
diff --git a/man/rmixGibbs.Rd b/man/rmixGibbs.Rd
new file mode 100755
index 0000000..3398f8f
--- /dev/null
+++ b/man/rmixGibbs.Rd
@@ -0,0 +1,47 @@
+\name{rmixGibbs}
+\alias{rmixGibbs}
+\title{ Gibbs Sampler for Normal Mixtures w/o Error Checking}
+\description{
+  \code{rmixGibbs} makes one draw using the Gibbs Sampler for a mixture of multivariate normals.
+}
+\usage{
+rmixGibbs(y, Bbar, A, nu, V, a, p, z, comps)
+}
+\arguments{
+  \item{y}{ data array - rows are obs }
+  \item{Bbar}{ prior mean for mean vector of each norm comp }
+  \item{A}{ prior precision parameter}
+  \item{nu}{ prior d.f. parm }
+  \item{V}{ prior location matrix for covariance priro }
+  \item{a}{ Dirichlet prior parms }
+  \item{p}{ prior prob of each mixture component }
+  \item{z}{ component identities for each observation -- "indicators"}
+  \item{comps}{ list of components for the normal mixture   }
+}
+\details{
+  \code{rmixGibbs} is not designed to be called directly. Instead, use \code{rnmixGibbs} wrapper function.
+}
+\value{
+  a list containing:
+  \item{p}{draw mixture probilities }
+  \item{z}{draw of indicators of each component}
+  \item{comps}{new draw of normal component parameters }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 5. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Rob McCulloch and Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rnmixGibbs}} }
+
+\keyword{ multivariate } 
+
diff --git a/man/rmixture.Rd b/man/rmixture.Rd
new file mode 100755
index 0000000..f8c405c
--- /dev/null
+++ b/man/rmixture.Rd
@@ -0,0 +1,39 @@
+\name{rmixture}
+\alias{rmixture}
+\concept{mixture of normals}
+\concept{simulation}
+
+\title{ Draw from Mixture of Normals }
+\description{
+  \code{rmixture} simulates iid draws from a Multivariate Mixture of Normals
+}
+\usage{
+rmixture(n, p, comps)
+}
+\arguments{
+  \item{n}{ number of observations }
+  \item{p}{ ncomp x 1 vector of prior probabilities for each mixture component }
+  \item{comps}{ list of mixture component parameters }
+}
+\details{
+  comps is a list of length(ncomp) = length(p). comps[[j]][[1]] is mean vector for the jth component. 
+  comps[[j]][[2]] is the inverse of the cholesky root of Sigma for that component
+}
+\value{
+  A list containing \ldots
+  \item{x}{ An n x length(comps[[1]][[1]]) array of iid draws }
+  \item{z}{ A n x 1 vector of indicators of which component each draw is taken from }
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rnmixGibbs}} }
+\keyword{ distribution }
+\keyword{ multivariate }
diff --git a/man/rmnlIndepMetrop.Rd b/man/rmnlIndepMetrop.Rd
new file mode 100755
index 0000000..e30fc5c
--- /dev/null
+++ b/man/rmnlIndepMetrop.Rd
@@ -0,0 +1,62 @@
+\name{rmnlIndepMetrop}
+\alias{rmnlIndepMetrop}
+\concept{MCMC}
+\concept{multinomial logit}
+\concept{Metropolis algorithm}
+\concept{bayes}
+\title{ MCMC Algorithm for Multinomial Logit Model }
+\description{
+  \code{rmnIndepMetrop} implements Independence Metropolis for the MNL.
+}
+\usage{
+rmnlIndepMetrop(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(m,X,y)}
+  \item{Prior}{ list(A,betabar)  optional}
+  \item{Mcmc}{ list(R,keep,nu) }
+}
+\details{
+  Model:   \eqn{Pr(y=j) = exp(x_j'beta)/\sum_k{e^{x_k'beta}}}. \cr
+
+  Prior:   \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
+
+  list arguments contain:
+  \itemize{
+    \item{\code{m}}{number of alternatives}
+    \item{\code{X}}{nobs*m x nvar matrix}
+    \item{\code{y}}{ nobs vector of multinomial outcomes (1,\ldots, m)}
+    \item{\code{A}}{ nvar x nvar pds prior prec matrix (def: .01I)}
+    \item{\code{betabar}}{ nvar x 1 prior mean (def: 0)}
+    \item{\code{R}}{ number of MCMC draws}
+    \item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+    \item{\code{nu}}{ degrees of freedom parameter for independence t density (def: 6) }
+  }
+}
+\value{
+  a list containing: 
+  \item{betadraw}{R/keep x nvar array of beta draws}
+  \item{acceptr}{acceptance rate of Metropolis draws}
+}
+
+\seealso{ \code{\link{rhierMnlRwMixture}} }
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) # set env var LONG_TEST to run
+{
+
+set.seed(66)
+n=200; m=3; beta=c(1,-1,1.5,.5)
+simout=simmnl(m,n,beta)
+A=diag(c(rep(.01,length(beta)))); betabar=rep(0,length(beta))
+
+R=2000
+Data=list(y=simout$y,X=simout$X,m=m); Mcmc=list(R=R,keep=1) ; Prior=list(A=A,betabar=betabar)
+out=rmnlIndepMetrop(Data=Data,Prior=Prior,Mcmc=Mcmc)
+cat(" Betadraws ",fill=TRUE)
+mat=apply(out$betadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+}
+
+}
+\keyword{ models }
diff --git a/man/rmnpGibbs.Rd b/man/rmnpGibbs.Rd
new file mode 100755
index 0000000..ad1a999
--- /dev/null
+++ b/man/rmnpGibbs.Rd
@@ -0,0 +1,101 @@
+\name{rmnpGibbs}
+\alias{rmnpGibbs}
+\concept{bayes}
+\concept{multinomial probit}
+\concept{MCMC}
+\concept{Gibbs Sampling}
+
+\title{ Gibbs Sampler for Multinomial Probit }
+\description{
+  \code{rmnpGibbs} implements the McCulloch/Rossi Gibbs Sampler for the multinomial probit model.
+}
+
+\usage{
+rmnpGibbs(Data, Prior, Mcmc)
+}
+
+\arguments{
+  \item{Data}{ list(p, y, X)}
+  \item{Prior}{ list(betabar,A,nu,V)}
+  \item{Mcmc}{ list(beta0,sigma0,R,keep)  }
+}
+
+\details{
+  model:  \cr
+    \eqn{w_i = X_i\beta + e}.    \eqn{e} \eqn{\sim}{~} \eqn{N(0,Sigma)}.     note: \eqn{w_i, e} are (p-1) x 1.\cr
+    \eqn{y_i = j},  if \eqn{w_{ij} > w_{i,-j}}  j=1,\ldots,p-1.  \eqn{w_{i,-j}} means elements of \eqn{w_i}
+     other than the jth. \cr
+    \eqn{y_i = p},  if all \eqn{w_i < 0}.\cr
+  
+  priors:\cr
+    \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})} \cr
+    \eqn{Sigma} \eqn{\sim}{~} IW(nu,V)\cr
+
+  to make up X matrix use \code{\link{createX}} with \code{DIFF=TRUE}.
+
+  List arguments contain  
+  \itemize{
+    \item{\code{p}}{number of choices or possible multinomial outcomes}
+    \item{\code{y}}{n x 1 vector of multinomial outcomes}
+    \item{\code{X}}{n*(p-1) x k Design Matrix}
+    \item{\code{betabar}}{k x 1 prior mean (def: 0)}
+    \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
+    \item{\code{nu}}{ d.f. parm for IWishart prior (def: (p-1) + 3)}
+    \item{\code{V}}{ pds location parm for IWishart prior (def: nu*I)}
+    \item{\code{beta0}}{ initial value for beta}
+    \item{\code{sigma0}}{ initial value for sigma }
+    \item{\code{R}}{ number of MCMC draws }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+  }
+}
+
+\value{
+  a list containing: 
+  \item{betadraw }{R/keep x k array of betadraws}
+  \item{sigmadraw}{R/keep x (p-1)*(p-1) array of sigma draws -- each row is in vector form}
+}
+\note{
+  beta is not identified.  beta/sqrt(\eqn{sigma_{11}}) and Sigma/\eqn{sigma_{11}} are.  See Allenby et al or
+  example below for details.
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 4. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rmvpGibbs}} }
+\examples{
+##
+
+set.seed(66)
+p=3
+n=500
+beta=c(-1,1,1,2)
+Sigma=matrix(c(1,.5,.5,1),ncol=2)
+k=length(beta)
+x1=runif(n*(p-1),min=-1,max=1); x2=runif(n*(p-1),min=-1,max=1)
+I2=diag(rep(1,p-1)); xadd=rbind(I2)
+for(i in 2:n) { xadd=rbind(xadd,I2)}
+X=cbind(xadd,x1,x2)
+simout=simmnp(X,p,500,beta,Sigma)
+
+R=2000
+Data=list(p=p,y=simout$y,X=simout$X)
+Mcmc=list(R=R,keep=1)
+
+out=rmnpGibbs(Mcmc=Mcmc,Data=Data)
+
+cat(" Betadraws ",fill=TRUE)
+mat=apply(out$betadraw/sqrt(out$sigmadraw[,1]),2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+cat(" Sigmadraws ",fill=TRUE)
+mat=apply(out$sigmadraw/out$sigmadraw[,1],2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Sigma),mat); rownames(mat)[1]="sigma"; print(mat)
+
+}
+\keyword{ models }
diff --git a/man/rmultireg.Rd b/man/rmultireg.Rd
new file mode 100755
index 0000000..cb62ec5
--- /dev/null
+++ b/man/rmultireg.Rd
@@ -0,0 +1,83 @@
+\name{rmultireg}
+\alias{rmultireg}
+\concept{bayes}
+\concept{multivariate regression}
+\concept{simulation}
+
+
+\title{ Draw from the Posterior of a Multivariate Regression }
+\description{
+  \code{ rmultireg} draws from the posterior of a Multivariate Regression model with a natural conjugate
+  prior.
+}
+\usage{
+rmultireg(Y, X, Bbar, A, nu, V)
+}
+
+\arguments{
+  \item{Y}{ n x m matrix of observations on m dep vars }
+  \item{X}{ n x k matrix of observations on indep vars (supply intercept) }
+  \item{Bbar}{ k x m matrix of prior mean of regression coefficients }
+  \item{A}{ k x k Prior precision matrix }
+  \item{nu}{ d.f. parameter for Sigma }
+  \item{V}{ m x m pdf location parameter for prior on Sigma }
+}
+\details{
+  Model: \eqn{Y=XB+U}.  \eqn{cov(u_i) = Sigma}.  \eqn{B} is k x m matrix of coefficients. \eqn{Sigma} is m x m covariance.
+
+  Priors:  \eqn{beta} given \eqn{Sigma}  \eqn{\sim}{~} \eqn{N(betabar,Sigma (x) A^{-1})}. 
+  \eqn{betabar=vec(Bbar)};  \eqn{beta = vec(B)} \cr
+          \eqn{Sigma} \eqn{\sim}{~} IW(nu,V). 
+}
+\value{
+  A list of the components of a draw from the posterior
+  \item{B }{ draw of regression coefficient matrix }
+  \item{Sigma }{ draw of Sigma }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rmultiregfp}},\code{\link{init.rmultiregfp}}}
+\examples{
+##
+set.seed(66)
+n=200
+m=2
+X=cbind(rep(1,n),runif(n))
+k=ncol(X)
+B=matrix(c(1,2,-1,3),ncol=m)
+Sigma=matrix(c(1,.5,.5,1),ncol=m); RSigma=chol(Sigma)
+Y=X\%*\%B+matrix(rnorm(m*n),ncol=m)\%*\%RSigma
+
+betabar=rep(0,k*m);Bbar=matrix(betabar,ncol=m)
+A=diag(rep(.01,k))
+nu=3; V=nu*diag(m)
+
+R=1000
+betadraw=matrix(double(R*k*m),ncol=k*m)
+Sigmadraw=matrix(double(R*m*m),ncol=m*m)
+for (rep in 1:R) 
+   {out=rmultireg(Y,X,Bbar,A,nu,V);betadraw[rep,]=out$B
+    Sigmadraw[rep,]=out$Sigma}
+
+cat(" Betadraws ",fill=TRUE)
+mat=apply(betadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(B),mat); rownames(mat)[1]="beta"
+print(mat)
+cat(" Sigma draws",fill=TRUE)
+mat=apply(Sigmadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Sigma),mat); rownames(mat)[1]="Sigma"
+print(mat)
+}
+\keyword{ regression }
diff --git a/man/rmultiregfp.Rd b/man/rmultiregfp.Rd
new file mode 100755
index 0000000..5fc5ea5
--- /dev/null
+++ b/man/rmultiregfp.Rd
@@ -0,0 +1,48 @@
+\name{rmultiregfp}
+\alias{rmultiregfp}
+
+\title{ Draw from the Posterior of a Multivariate Regression }
+\description{
+  \code{ rmultiregfp} draws from the posterior of a Multivariate Regression model with a natural conjugate
+  prior.
+}
+\usage{
+rmultiregfp(Y, X, Fparm)
+}
+
+\arguments{
+  \item{Y}{ n x m matrix of observations on m dep vars }
+  \item{X}{ n x k matrix of observations on indep vars (supply intercept) }
+  \item{Fparm}{ a list of prior parameters prepared by \code{init.rmultiregfp}}
+}
+\details{
+  Model: \eqn{Y=XB+U}.  \eqn{cov(u_i) = Sigma}.  \eqn{B} is k x m matrix of coefficients. 
+         \eqn{Sigma} is an m x m covariance matrix.
+
+  Priors:  \eqn{beta} given \eqn{Sigma} \eqn{\sim}{~} \eqn{N(betabar,Sigma (x) A^{-1})}. 
+  \eqn{betabar=vec(Bbar)};  \eqn{beta = vec(B)}. \cr
+  \eqn{Sigma} \eqn{\sim}{~} IW(nu,V). 
+
+  prepare Fparm by call \code{\link{init.rmultiregfp}}
+}
+\value{
+  A list of the components of a draw from the posterior
+  \item{B }{ draw of regression coefficient matrix }
+  \item{Sigma }{ draw of \eqn{Sigma} }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rmultireg}},\code{\link{init.rmultiregfp}}}
+\keyword{ regression }
diff --git a/man/rmvpGibbs.Rd b/man/rmvpGibbs.Rd
new file mode 100755
index 0000000..c7c829d
--- /dev/null
+++ b/man/rmvpGibbs.Rd
@@ -0,0 +1,104 @@
+\name{rmvpGibbs}
+\alias{rmvpGibbs}
+
+\concept{bayes}
+\concept{multivariate probit}
+\concept{MCMC}
+\concept{Gibbs Sampling}
+
+\title{ Gibbs Sampler for Multivariate Probit }
+\description{
+  \code{rmvpGibbs} implements the Edwards/Allenby Gibbs Sampler for the multivariate probit model.
+}
+  
+}
+\usage{
+rmvpGibbs(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(p,y,X)}
+  \item{Prior}{ list(betabar,A,nu,V)}
+  \item{Mcmc}{ list(beta0,sigma0,R,keep)  }
+}
+
+\details{
+  model:  \cr
+    \eqn{w_i = X_i beta + e}.    \eqn{e} \eqn{\sim}{~} N(0,Sigma).     note: \eqn{w_i} is p x 1.\cr
+    \eqn{y_{ij} = 1},  if \eqn{w_{ij} > 0}, else \eqn{y_i=0}.  j=1,\ldots,p.   \cr
+  
+  priors:\cr
+    \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}\cr
+    \eqn{Sigma} \eqn{\sim}{~} IW(nu,V)\cr
+
+  to make up X matrix use \code{createX}
+
+  List arguments contain  
+  \itemize{
+    \item{\code{p}}{dimension of multivariate probit}
+    \item{\code{X}}{n*p x k Design Matrix}
+    \item{\code{y}}{n x 1 vector of multinomial outcomes}
+    \item{\code{betabar}}{k x 1 prior mean (def: 0)}
+    \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
+    \item{\code{nu}}{ d.f. parm for IWishart prior (def: (p-1) + 3)}
+    \item{\code{V}}{ pds location parm for IWishart prior (def: nu*I)}
+    \item{\code{beta0}}{ initial value for beta}
+    \item{\code{sigma0}}{ initial value for sigma }
+    \item{\code{R}}{ number of MCMC draws }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw (def: 1)}
+  }
+}
+
+\value{
+  a list containing: 
+  \item{betadraw }{R/keep x k array of betadraws}
+  \item{sigmadraw}{R/keep x p*p array of sigma draws -- each row is in vector form}
+}
+
+\note{
+  beta and Sigma are not identifed.  Correlation matrix and the betas divided by the
+  appropriate standard deviation are.  See Allenby et al for details or example below.
+} 
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 4. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rmnpGibbs}} }
+\examples{
+##
+
+set.seed(66)
+p=3
+n=500
+beta=c(-2,0,2)
+Sigma=matrix(c(1,.5,.5,.5,1,.5,.5,.5,1),ncol=3)
+k=length(beta)
+I2=diag(rep(1,p)); xadd=rbind(I2)
+for(i in 2:n) { xadd=rbind(xadd,I2)}; X=xadd
+simout=simmvp(X,p,500,beta,Sigma)
+
+Data=list(p=p,y=simout$y,X=simout$X)
+R=2000
+Mcmc=list(R=R,keep=1)
+out=rmvpGibbs(Data=Data,Mcmc=Mcmc)
+
+ind=seq(from=0,by=p,length=k)
+inda=1:3
+ind=ind+inda
+cat(" Betadraws ",fill=TRUE)
+mat=apply(out$betadraw/sqrt(out$sigmadraw[,ind]),2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+rdraw=matrix(double((R)*p*p),ncol=p*p)
+rdraw=t(apply(out$sigmadraw,1,nmat))
+cat(" Sigmadraws ",fill=TRUE)
+mat=apply(rdraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(as.vector(Sigma),mat); rownames(mat)[1]="Sigma"; print(mat)
+
+}
+\keyword{ models }
+\keyword{ multivariate }
diff --git a/man/rmvst.Rd b/man/rmvst.Rd
new file mode 100755
index 0000000..ce0b788
--- /dev/null
+++ b/man/rmvst.Rd
@@ -0,0 +1,42 @@
+\name{rmvst}
+\alias{rmvst}
+\concept{multivariate t distribution}
+\concept{student-t}
+\concept{simulation}
+
+\title{ Draw from Multivariate Student-t }
+\description{
+  \code{rmvst} draws from a Multivariate student-t distribution.
+}
+\usage{
+rmvst(nu, mu, root)
+}
+\arguments{
+  \item{nu}{ d.f. parameter }
+  \item{mu}{ mean vector }
+  \item{root}{ Upper Tri Cholesky Root of Sigma }
+}
+\value{
+  length(mu) draw vector
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{lndMvst}}}
+\examples{
+##
+set.seed(66)
+rmvst(nu=5,mu=c(rep(0,2)),root=chol(matrix(c(2,1,1,2),ncol=2)))
+}
+\keyword{ distribution }
diff --git a/man/rnmixGibbs.Rd b/man/rnmixGibbs.Rd
new file mode 100755
index 0000000..e9f6fb9
--- /dev/null
+++ b/man/rnmixGibbs.Rd
@@ -0,0 +1,114 @@
+\name{rnmixGibbs}
+\alias{rnmixGibbs}
+\concept{bayes}
+\concept{MCMC}
+\concept{normal mixtures}
+\concept{Gibbs Sampling}
+
+\title{ Gibbs Sampler for Normal Mixtures}
+\description{
+  \code{rnmixGibbs} implements a Gibbs Sampler for normal mixtures.
+}
+\usage{
+rnmixGibbs(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(y) }
+  \item{Prior}{ list(Mubar,A,nu,V,a,ncomp)}
+  \item{Mcmc}{ list(R,keep) }
+}
+\details{
+  Model: \cr
+        \eqn{y_i} \eqn{\sim}{~} \eqn{N(mu_ind,Sigma_ind)}. \cr
+        ind \eqn{\sim}{~} iid multinomial(p).  p is a ncomp x 1 vector of probs. \cr
+  Priors:\cr
+        \eqn{mu_j} \eqn{\sim}{~} \eqn{N(mubar,Sigma (x) A^{-1})}. \eqn{mubar=vec(Mubar)}. \cr
+        \eqn{Sigma_j} \eqn{\sim}{~} IW(nu,V).\cr
+        note: this is the natural conjugate prior -- a special case of multivariate 
+             regression.\cr
+        \eqn{p} \eqn{\sim}{~} Dirchlet(a).
+
+  Output of the components is in the form of a list of lists. \cr
+  compsdraw[[i]] is ith draw -- list of ncomp lists. \cr
+  compsdraw[[i]][[j]] is list of parms for jth normal component. \cr
+  jcomp=compsdraw[[i]][j]]. Then jth comp \eqn{\sim}{~} \eqn{N(jcomp[[1]],Sigma)}, 
+  \eqn{Sigma} = t(R)\%*\%R, \eqn{R^{-1}} = jcomp[[2]].
+
+  List arguments contain:
+  \itemize{
+    \item{y}{ n x k array of data (rows are obs) }
+    \item{Mubar}{ k x 1 array with prior mean of normal comp means (def: 0)}
+    \item{A}{ 1 x 1 precision parameter for prior on mean of normal comp (def: .01)}
+    \item{nu}{ d.f. parameter for prior on Sigma (normal comp cov matrix) (def: k+3)}
+    \item{V}{ k x k location matrix of IW prior on Sigma (def: nuI)}
+    \item{a}{ ncomp x 1 vector of Dirichlet prior parms (def: rep(5,ncomp))}
+    \item{ncomp}{ number of normal components to be included }
+    \item{R}{ number of MCMC draws }
+    \item{keep}{ MCMC thinning parm: keep every keepth draw (def: 1)}
+  }
+}
+\value{
+  a list containing:
+  \item{probdraw}{R/keep x ncomp array of mixture prob draws}
+  \item{zdraw}{R/keep x nobs array of indicators of mixture comp identity for each obs}
+  \item{compsdraw}{R/keep lists of lists of comp parm draws}
+}
+
+\note{
+  In this model, the component normal parameters are not-identified due to label-switching.
+  However, the fitted mixture of normals density is identified as it is invariant to label-switching. 
+  See Allenby et al, chapter 5 for details. Use \code{eMixMargDen} or \code{momMix} to compute
+  posterior expectation or distribution of various identified parameters.
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 5. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{rmixture}}, \code{\link{rmixGibbs}} ,\code{\link{eMixMargDen}}, \code{\link{momMix}}}
+\examples{
+##
+if(nchar(Sys.getenv("LONG_TEST")) != 0) # set env var LONG_TEST to run
+{
+
+set.seed(66)
+dim=5;  k=3   # dimension of simulated data and number of "true" components
+sigma = matrix(rep(0.5,dim^2),nrow=dim);diag(sigma)=1
+sigfac = c(1,1,1);mufac=c(1,2,3); compsmv=list()
+for(i in 1:k) compsmv[[i]] = list(mu=mufac[i]*1:dim,sigma=sigfac[i]*sigma)
+comps = list() # change to "rooti" scale
+for(i in 1:k) comps[[i]] = list(mu=compsmv[[i]][[1]],rooti=solve(chol(compsmv[[i]][[2]])))
+p=(1:k)/sum(1:k)
+
+nobs=5000
+dm = rmixture(nobs,p,comps)
+
+Data=list(y=dm$x)
+ncomp=9
+Prior=list(ncomp=ncomp,a=c(rep(1,ncomp)))
+Mcmc=list(R=2000,keep=1)
+out=rnmixGibbs(Data=Data,Prior=Prior,Mcmc=Mcmc)
+
+tmom=momMix(matrix(p,nrow=1),list(comps))
+pmom=momMix(out$probdraw[500:2000,],out$compdraw[500:2000])
+mat=rbind(tmom$mu,pmom$mu)
+rownames(mat)=c("true","post expect")
+cat(" mu and posterior expectation of mu",fill=TRUE)
+print(mat)
+mat=rbind(tmom$sd,pmom$sd)
+rownames(mat)=c("true","post expect")
+cat(" std dev and posterior expectation of sd",fill=TRUE)
+print(mat)
+mat=rbind(as.vector(tmom$corr),as.vector(pmom$corr))
+rownames(mat)=c("true","post expect")
+cat(" corr and posterior expectation of corr",fill=TRUE)
+print(t(mat))
+
+}
+}
+\keyword{ multivariate }
diff --git a/man/rscaleUsage.Rd b/man/rscaleUsage.Rd
new file mode 100755
index 0000000..60bdafb
--- /dev/null
+++ b/man/rscaleUsage.Rd
@@ -0,0 +1,61 @@
+\name{rscaleUsage}
+\alias{rscaleUsage}
+\concept{MCMC}
+\concept{bayes}
+\concept{ordinal data}
+\concept{scale usage}
+\concept{hierarchical models}
+
+\title{ MCMC Algorithm for Multivariate Ordinal Data with Scale Usage Heterogeneity.}
+\description{
+  \code{rscaleUsage} implements an MCMC algorithm for multivariate ordinal data with scale usage heterogeniety.  
+}
+\usage{
+rscaleUsage(Data,Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(k,x)}
+  \item{Prior}{ list(nu,V,mubar,Am,gsigma,gl11,gl22,gl12,Lambdanu,LambdaV,ge) optional }
+  \item{Mcmc}{ list(R,keep,ndghk,printevery,e,y,mu,Sigma,sigma,tau,Lambda) optional }
+}
+\details{
+  Model: n=nrow(X) individuals respond to m=ncol(X) questions. all questions are on a scale 1, \ldots, k.\cr
+
+  for respondent i and question j, \cr
+  \eqn{x_ij = d}, if \eqn{c_d-1 \le y_i,j \le c_d}. \cr
+  d=1,\ldots,k. \eqn{c_d = a + bd +ed^2}. \cr
+
+  \eqn{y_i = mu + tau_i*iota + sigma_i*z_i}.  \eqn{z_i} \eqn{\sim}{~} \eqn{N(0,Sigma)}. \cr
+
+  \eqn{(tau_i,ln(sigma_i))} \eqn{\sim}{~} \eqn{N(phi,Lamda)}.  \eqn{phi=(0,lambda_{22})}. \cr
+
+  Priors:\cr
+  mu \eqn{\sim}{~} \eqn{N(mubar, Am{^-1})}.\cr
+  Sigma \eqn{\sim}{~} IW(nu,V).\cr
+  Lambda \eqn{\sim}{~} IW(Lambdanu,LambdaV).\cr
+  e \eqn{\sim}{~} unif on a grid. \cr
+}
+\value{
+  a list containing:
+  \item{drawSigma}{R/keep x m*m array of Sigma draws}
+  \item{mudraw}{R/keep x m array of mu draws}
+  \item{taudraw}{R/keep x n array of tau draws}
+  \item{sigmadraw}{R/keep x n array of sigma draws}
+  \item{Lambdadraw}{R/keep x 4 array of Lamda draws}
+  \item{edraw}{R/keep x  1 array of  e draws}
+}
+\note{
+  It is \strong{highly} recommended that the user choose the default settings. This means not specifying the argument
+  \code{Prior} and setting \code{R} in Mcmc and \code{Data} only.  If you wish to change prior settings and/or
+  the grids used, please read the case study in Allenby et al carefully.
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Case Study on Scale Usage Heterogeneity. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Rob McCulloch and Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+\keyword{ models }
diff --git a/man/rtrun.Rd b/man/rtrun.Rd
new file mode 100755
index 0000000..4ac24ad
--- /dev/null
+++ b/man/rtrun.Rd
@@ -0,0 +1,44 @@
+\name{rtrun}
+\alias{rtrun}
+\concept{truncated normal}
+\concept{simulation}
+
+\title{ Draw from Truncated Univariate Normal }
+\description{
+  \code{rtrun} draws from a truncated univariate normal distribution
+}
+\usage{
+rtrun(mu, sigma, a, b)
+}
+\arguments{
+  \item{mu}{ mean }
+  \item{sigma}{ sd }
+  \item{a}{ lower bound }
+  \item{b}{ upper bound }
+}
+\details{
+  Note that due to the vectorization of the rnorm,qnorm commands in R, all arguments can be vectors of 
+  equal length.  This makes the inverse CDF method the most efficient to use in R.
+}
+\value{
+  draw (possibly a vector)
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+\examples{
+##
+set.seed(66)
+rtrun(mu=c(rep(0,10)),sigma=c(rep(1,10)),a=c(rep(0,10)),b=c(rep(2,10)))
+}
+\keyword{ distribution }
diff --git a/man/runireg.Rd b/man/runireg.Rd
new file mode 100755
index 0000000..16c5889
--- /dev/null
+++ b/man/runireg.Rd
@@ -0,0 +1,71 @@
+\name{runireg}
+\alias{runireg}
+\concept{bayes}
+\concept{regression}
+\concept{simulation}
+
+\title{ Draw from Posterior for Univariate Regression }
+\description{
+ \code{runireg} draws from posterior for univariate regression with a natural conjugate prior.
+}
+\usage{
+runireg(y, X, betabar, A, nu, ssq)
+}
+\arguments{
+  \item{y}{ n x 1 dep var  }
+  \item{X}{ n x k Design matrix }
+  \item{betabar}{ prior mean }
+  \item{A}{ k x k pds prior precision matrix }
+  \item{nu}{ d.f. parameter for sigma-sq prior }
+  \item{ssq}{ scale parameter for sigma-sq prior }
+}
+\details{
+  Model: \eqn{y = Xbeta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,sigmasq)}. \cr
+
+  Priors: beta given sigmasq \eqn{\sim}{~} \eqn{N(betabar,sigmasq*A^{-1})}.\cr
+  \eqn{sigmasq} \eqn{\sim}{~} eqn{(nu*ssq)}/\eqn{\chi^2_{nu}}.
+}
+\value{
+  a list with one draw from posterior
+  \item{beta }{beta draw}
+  \item{sigmasq }{sigmasq draw}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr 
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{runiregGibbs}} }
+\examples{
+set.seed(66)
+n=100
+X=cbind(rep(1,n),runif(n)); beta=c(1,2); sigsq=.25
+y=X\%*\%beta+rnorm(n,sd=sqrt(sigsq))
+
+A=diag(c(.05,.05)); betabar=c(0,0)
+nu=3; ssq=1.0
+
+R=1000
+betadraw=matrix(double(R*2),ncol=2)
+sigsqdraw=double(R)
+for (rep in 1:R) 
+   {out=runireg(y,X,betabar,A,nu,ssq);betadraw[rep,]=out$beta
+    sigsqdraw[rep]=out$sigmasq}
+
+cat(" Betadraws ",fill=TRUE)
+mat=apply(betadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+cat(" Sigma-sq draws",fill=TRUE)
+cat(" sigma-sq= ",sigsq,fill=TRUE)
+print(quantile(sigsqdraw,probs=c(.01,.05,.5,.95,.99)))
+}
+\keyword{ regression }
diff --git a/man/runiregGibbs.Rd b/man/runiregGibbs.Rd
new file mode 100755
index 0000000..c05b03f
--- /dev/null
+++ b/man/runiregGibbs.Rd
@@ -0,0 +1,70 @@
+\name{runiregGibbs}
+\alias{runiregGibbs}
+\concept{bayes}
+\concept{Gibbs Sampler}
+\concept{regression}
+\concept{MCMC}
+
+\title{ Gibbs Sampler for Univariate Regression }
+\description{
+ \code{runiregGibbs} implements a Gibbs Sampler to draw from posterior for univariate regression with a conditionally conjugate prior.
+}
+\usage{
+runiregGibbs(Data, Prior, Mcmc)
+}
+\arguments{
+  \item{Data}{ list(y,X)}
+  \item{Prior}{ list(betabar,A, nu, ssq) }
+  \item{Mcmc}{ list(sigmasq,R,keep)}
+}
+\details{
+  Model: \eqn{y = Xbeta + e}.  \eqn{e} \eqn{\sim}{~} \eqn{N(0,sigmasq)}. \cr
+
+  Priors: \eqn{beta} \eqn{\sim}{~} \eqn{N(betabar,A^{-1})}.  \eqn{sigmasq} \eqn{\sim}{~} \eqn{(nu*ssq)/chisq_{nu}}.
+  List arguments contain  
+  \itemize{
+    \item{\code{X}}{Design Matrix}
+    \item{\code{y}}{n x 1 vector of observations, (0 or 1)} 
+    \item{\code{betabar}}{k x 1 prior mean (def: 0)}
+    \item{\code{A}}{k x k prior precision matrix (def: .01I)} 
+    \item{\code{nu}}{ d.f. parm for Inverted Chi-square prior}
+    \item{\code{ssq}}{ scale parm for Inverted Chi-square prior}
+    \item{\code{R}}{ number of MCMC draws }
+    \item{\code{keep}}{ thinning parameter - keep every keepth draw }
+  }
+}
+\value{
+  list of MCMC draws
+  \item{betadraw }{ R x k array of betadraws }
+  \item{sigmasqdraw }{ R vector of sigma-sq draws}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 3. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\seealso{ \code{\link{runireg}} }
+\examples{
+set.seed(66)
+n=100
+X=cbind(rep(1,n),runif(n)); beta=c(1,2); sigsq=.25
+y=X\%*\%beta+rnorm(n,sd=sqrt(sigsq))
+
+A=diag(c(.05,.05)); betabar=c(0,0)
+nu=3; ssq=1.0
+
+R=1000
+Data=list(y=y,X=X); Mcmc=list(R=R,keep=1) ; Prior=list(A=A,betabar=betabar,nu=nu,ssq=ssq)
+out=runiregGibbs(Data=Data,Prior=Prior,Mcmc=Mcmc)
+cat(" Betadraws ",fill=TRUE)
+mat=apply(out$betadraw,2,quantile,probs=c(.01,.05,.5,.95,.99))
+mat=rbind(beta,mat); rownames(mat)[1]="beta"; print(mat)
+cat(" Sigma-sq draws",fill=TRUE)
+cat(" sigma-sq= ",sigsq,fill=TRUE)
+print(quantile(out$sigmasqdraw,probs=c(.01,.05,.5,.95,.99)))
+}
+\keyword{ regression }
diff --git a/man/rwishart.Rd b/man/rwishart.Rd
new file mode 100755
index 0000000..f87b7d0
--- /dev/null
+++ b/man/rwishart.Rd
@@ -0,0 +1,49 @@
+\name{rwishart}
+\alias{rwishart}
+\concept{Wishart distribution}
+\concept{Inverted Wishart}
+\concept{simulation}
+
+\title{ Draw from Wishart and Inverted Wishart Distribution  }
+\description{
+  \code{rwishart} draws from the Wishart and Inverted Wishart distributions.
+}
+\usage{
+rwishart(nu, V)
+}
+\arguments{
+  \item{nu}{ d.f. parameter}
+  \item{V}{ pds location matrix}
+}
+\details{
+  In the parameterization used here, \eqn{W} \eqn{\sim}{~} \eqn{W(nu,V)}, \eqn{E[W]=nuV}.  \cr
+
+  If you want to use an Inverted Wishart prior, you \emph{must invert the location matrix} 
+  before calling \code{rwishart}, e.g. \cr
+  \eqn{Sigma} \eqn{\sim}{~} IW(nu,V);  \eqn{Sigma^{-1}} \eqn{\sim}{~} \eqn{W(nu,V^{-1})}.
+}
+\value{
+  \item{W}{ Wishart draw }
+  \item{IW }{Inverted Wishart draw}
+  \item{C }{ Upper tri root of W}
+  \item{CI }{ inv(C), \eqn{W^{-1}} = CICI'}
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 2. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+\examples{
+##
+set.seed(66)
+rwishart(5,diag(3))$IW
+}
+\keyword{ multivariate }
diff --git a/man/simmnl.Rd b/man/simmnl.Rd
new file mode 100755
index 0000000..dc6ce63
--- /dev/null
+++ b/man/simmnl.Rd
@@ -0,0 +1,42 @@
+\name{simmnl}
+\alias{simmnl}
+\concept{logit}
+\concept{mnl}
+\title{ Simulate from Multinomial Logit Model }
+\description{
+  \code{simmnl} simulates from the MNL model.
+}
+\usage{
+simmnl(j, n, beta)
+}
+\arguments{
+  \item{j}{ number choice alternatives }
+  \item{n}{ number of observations }
+  \item{beta}{ MNL coefficient vector }
+}
+\details{
+  \code{simmnl} will simulate two uniformly distributed X vars and add intercepts.
+}
+
+\value{
+  \item{y}{ n x 1 vector of multinomial outcomes (1, \ldots, j)}
+  \item{X} { n x k design matrix }
+  \item{beta}{beta vector }
+  \item{prob}{n x j array of choice probabilities }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{llmnl}}, \code{\link{rmnlIndepMetrop}} }
+}
+\keyword{ models }
diff --git a/man/simmnlwX.Rd b/man/simmnlwX.Rd
new file mode 100755
index 0000000..be71413
--- /dev/null
+++ b/man/simmnlwX.Rd
@@ -0,0 +1,36 @@
+\name{simmnlwX}
+\alias{simmnlwX}
+\title{ Simulate from MNL given X Matrix }
+\description{
+  \code{simmnlwX} simulates from MNL given X Matrix. 
+}
+\usage{
+simmnlwX(n, X, beta)
+}
+\arguments{
+  \item{n}{ number of obs }
+  \item{X}{ n*p x k Design matrix (p is number of choice alts)} 
+  \item{beta}{ k x 1 coefficient vector }
+}
+\value{
+  a list containing:
+  \item{y}{ n x 1 vector of multinomial outcomes (1, \ldots, nrow(X)/n)}
+  \item{X}{ Design matrix }
+  \item{beta}{ coefficient vector }
+  \item{prob}{ n x p array of choice probs }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+\seealso{ \code{\link{simmnl}} }
+\keyword{ models }
diff --git a/man/simmnp.Rd b/man/simmnp.Rd
new file mode 100755
index 0000000..c7fb500
--- /dev/null
+++ b/man/simmnp.Rd
@@ -0,0 +1,44 @@
+\name{simmnp}
+\alias{simmnp}
+\concept{simulation}
+\concept{probit}
+\concept{mnp}
+\title{ Simulate from Multinomial Probit Model }
+\description{
+  \code{simmvp} simulates from the multinomial probit model.
+}
+\usage{
+simmnp(X, p, n, beta, sigma)
+}
+\arguments{
+  \item{X}{ n*(p-1) x length(beta) Design matrix }
+  \item{p}{ number of choice alternatives }
+  \item{n}{ number of observations }
+  \item{beta}{ coefficient vector }
+  \item{sigma}{ (p-1) x (p-1) covariance matrix }
+}
+\value{
+  a list of 
+  \item{y}{n vector of multinomial (1, \ldots,p) outcomes}
+  \item{X}{Design matrix}
+  \item{beta}{ coefficients}
+  \item{sigma}{ covariance matrix }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi, Chapter 4. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rmnpGibbs}} }
+
+}
+\keyword{ models }
diff --git a/man/simmvp.Rd b/man/simmvp.Rd
new file mode 100755
index 0000000..a3d19ed
--- /dev/null
+++ b/man/simmvp.Rd
@@ -0,0 +1,43 @@
+\name{simmvp}
+\alias{simmvp}
+\concept{probit}
+\concept{mvp}
+\title{ Simulate from Multivariate Probit Model }
+\description{
+  \code{simmvp} simulates from the multivariate probit model.
+}
+\usage{
+simmvp(X, p, n, beta, sigma)
+}
+\arguments{
+  \item{X}{ n*p x length(beta) Design matrix }
+  \item{p}{ dimension of the MVP}
+  \item{n}{ number of observations }
+  \item{beta}{ coefficient vector }
+  \item{sigma}{ p x p covariance matrix }
+}
+\value{
+  a list of 
+  \item{y}{p*n vector of 0/1 binary outcomes}
+  \item{X}{Design matrix}
+  \item{beta}{ coefficients}
+  \item{sigma}{ covariance matrix }
+}
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{rmvpGibbs}} }
+
+}
+\keyword{ models }
diff --git a/man/simnhlogit.Rd b/man/simnhlogit.Rd
new file mode 100755
index 0000000..d0e13eb
--- /dev/null
+++ b/man/simnhlogit.Rd
@@ -0,0 +1,45 @@
+\name{simnhlogit}
+\alias{simnhlogit}
+\concept{logit}
+\concept{non-homothetic}
+\title{ Simulate from Non-homothetic Logit Model }
+\description{
+  \code{simnhlogit} simulates from the non-homothetic logit model
+}
+\usage{
+simnhlogit(theta, lnprices, Xexpend)
+}
+\arguments{
+  \item{theta}{ coefficient vector }
+  \item{lnprices}{ n x p array of prices }
+  \item{Xexpend}{ n x k array of values of expenditure variables}
+}
+\details{
+  For detail on parameterization, see \code{llnhlogit}.
+}
+\value{
+  a list containing: 
+  \item{y}{n x 1 vector of multinomial outcomes (1, \ldots, p)}
+  \item{Xexpend}{expenditure variables}
+  \item{lnprices}{ price array }
+  \item{theta}{coefficients}
+  \item{prob}{n x p array of choice probabilities}
+}
+
+\references{ For further discussion, see \emph{Bayesian Statistics and Marketing}
+  by Allenby, McCulloch, and Rossi. \cr
+  \url{http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html}
+}
+
+\author{ Peter Rossi, Graduate School of Business, University of Chicago,
+  \email{Peter.Rossi at ChicagoGsb.edu}.
+}
+
+\section{Warning}{
+  This routine is a utility routine that does \strong{not} check the
+  input arguments for proper dimensions and type.
+}
+
+\seealso{ \code{\link{llnhlogit}} }
+
+\keyword{ models }
diff --git a/src/bayesmc.c b/src/bayesmc.c
new file mode 100755
index 0000000..6d44ce9
--- /dev/null
+++ b/src/bayesmc.c
@@ -0,0 +1,347 @@
+#include <R.h>
+#include <Rmath.h>
+#include <math.h>
+
+void condmom(double *x, double *mu, double *sigi, int p, int j, double *m, double *csig)
+{
+/*	function to compute moments of x[j] | x[-j]  */
+
+	int ind,i,jm1;
+	double csigsq;
+	jm1=j-1;
+	ind = p*jm1;
+	csigsq = 1./sigi[ind+jm1];
+
+	*m = 0.0;
+	for (i=0 ; i < p; ++i)
+		{
+		if (i != jm1) 
+			{*m +=  - csigsq*sigi[ind+i]*(x[i]-mu[i]);}
+		}
+	*m=mu[jm1]+*m ;
+	*csig=sqrt(csigsq);
+}
+
+double rtrun(double mu, double sigma,double trunpt, int above) 
+{
+/*	function to draw truncated normal
+		above=1 means from above b=trunpt, a=-inf
+		above=0 means from below a=trunpt, b= +inf   
+*/
+	double FA,FB,rnd,result ;
+	if (above) {
+		FA=0.0; FB=pnorm(((trunpt-mu)/(sigma)),0.0,1.0,1,0);
+			}
+	else {
+		FB=1.0; FA=pnorm(((trunpt-mu)/(sigma)),0.0,1.0,1,0);
+		}
+	
+	GetRNGstate();
+	rnd=unif_rand();
+	result = mu + sigma*qnorm((rnd*(FB-FA)+FA),0.0,1.0,1,0);
+	PutRNGstate();
+	return result;
+}
+
+
+void drawwi(double *w, double *mu, double *sigmai,int *p, int *y)
+{
+/*	function to draw w_i by Gibbing's thru p vector   */
+
+	int i,j,above;
+	double bound;
+	double mean, csig;
+
+		for (i=0; i < *p; ++i) 
+		{	
+			bound=0.0;
+		    	for (j=0; j < *p ; ++j) 
+				{ if (j != i) {bound=fmax2(bound,w[j]); }}
+			if (*y == i+1) 	
+				above = 0;
+			else 
+				above = 1;
+
+		condmom(w,mu,sigmai,*p,(i+1),&mean,&csig);
+		w[i]=rtrun(mean,csig,bound,above);
+
+		}
+}
+
+void draww(double *w, double *mu, double *sigmai, int *n, int *p, int *y) 
+{
+/*	function to gibbs down entire w vector for all n obs  */
+	int i, ind;
+	for (i=0; i < *n; ++i)
+	{
+		ind= *p * i;
+		drawwi(w+ind,mu+ind,sigmai,p,y+i);
+	}
+}
+
+
+void drawwi_mvp(double *w, double *mu, double *sigmai,int *p, int *y)
+{
+/*	function to draw w_i for Multivariate Probit  */
+
+	int i,above;
+	double mean, csig;
+
+		for (i=0; i < *p; ++i) 
+		{	
+			if (y[i]) 	
+				above = 0;
+			else 
+				above = 1;
+
+		condmom(w,mu,sigmai,*p,(i+1),&mean,&csig);
+		w[i]=rtrun(mean,csig,0.0,above);
+
+		}
+}
+
+void draww_mvp(double *w, double *mu, double *sigmai, int *n, int *p, int *y) 
+{
+/*	function to gibbs down entire w vector for all n obs  */
+	int i, ind;
+	for (i=0; i < *n; ++i)
+	{
+		ind= *p * i;
+		drawwi_mvp(w+ind,mu+ind,sigmai,p,y+ind);
+	}
+}
+
+double root(double c1, double  c2, double *tol,int *iterlim)
+{
+/*	function to find root of c1 - c2u = lnu */
+   int iter;
+   double uold, unew;
+   uold=1.;
+   unew=0.00001;
+   iter=0;
+   while (iter <= *iterlim && fabs(uold-unew) > *tol )
+      {
+      uold=unew;
+      unew=uold + (uold*(c1 -c2*uold -  log(uold)))/(1. + c2*uold); 
+      if(unew < 1.0e-50) unew=1.0e-50;
+      iter=iter+1;
+      }
+   return unew;
+}
+   
+void callroot(int *n,double *c1, double *c2, double *tol, int *iterlim,double *u)
+{
+   int i;
+   for (i=0;i < *n; ++i)
+   {
+	u[i]=root(c1[i],c2[i],tol,iterlim);
+   }
+}
+
+
+
+void ghk_oneside(double *L, double* trunpt, int *above, int *dim, int *n, double *res)
+/*	routine to implement ghk with a region defined by truncation only on one- side
+ 						r mcculloch 8/04
+        if above=1, then we truncate component i from above at point trunpt[i-1]
+        L is lower triangular root of Sigma
+	random vector is assumed to have zero mean
+    	n is number of draws to use in GHK	
+*/
+{
+   int i,j,k;
+   double mu,tpz,u,prod,pa,pb;
+   double *z;
+   z = (double *)R_alloc(*dim,sizeof(double));
+   GetRNGstate();
+   *res = 0.0;
+   for(i=0;i<*n;i++) {
+      prod=1.0;
+      for(j=0;j<*dim;j++) {
+         mu=0.0; for(k=0;k<j;k++) mu += L[k*(*dim)+j]*z[k];
+	 tpz = (trunpt[j]-mu)/L[j*(*dim)+j];
+	 if(above[j]) {
+	    pa=0.0; pb = pnorm(tpz,0.0,1.0,1,0);   
+	 }
+	 else {
+	    pb=1.0; pa = pnorm(tpz,0.0,1.0,1,0);
+	 }
+	 prod *= pb-pa;
+	 u = unif_rand();
+	 z[j] = qnorm(u*pb + (1-u)*pa,0.0,1.0,1,0);
+      }
+      *res += prod;
+   }
+   *res /= (double)(*n);
+   PutRNGstate();
+}
+void ghk(double *L, double* a, double *b, int *dim, int *n, double *res)
+/*	routine to implement ghk with a region : a[i-1] <= x_i <= b[i-1]
+ 						r mcculloch 8/04
+        L is lower triangular root of Sigma
+	random vector is assumed to have zero mean
+    	n is number of draws to use in GHK	
+*/
+{
+   int i,j,k;
+   double aa,bb,pa,pb,u,prod,mu;
+   double *z;
+   z = (double *)R_alloc(*dim,sizeof(double));
+   GetRNGstate();
+   *res=0.0;
+   for(i=0;i<*n;i++) {
+      prod = 1.0;
+      for(j=0;j<*dim;j++) {
+         mu=0.0; for(k=0;k<j;k++) mu += L[k*(*dim)+j]*z[k];
+	 aa=(a[j]-mu)/L[j*(*dim)+j]; bb = (b[j]-mu)/L[j*(*dim)+j];
+	 pa = pnorm(aa,0.0,1.0,1,0); pb = pnorm(bb,0.0,1.0,1,0);
+	 prod *= pb-pa;
+	 u = unif_rand();
+	 z[j] = qnorm(u*pb + (1-u)*pa,0.0,1.0,1,0);
+      }
+      *res += prod;
+   }
+   *res /= (double)(*n);
+   PutRNGstate();
+}
+
+void ghk_vec(int *n,double *L, double *trunpt,int *above, int *dim, int *r, double *res)
+{
+/* routine to call ghk_oneside for n different truncation points stacked in to the
+   vector trunpt  -- puts n results in vector res
+                                          p rossi 12/04
+*/
+	int i, ind;
+	for (i=0; i < *n; ++i)
+	{
+		ind = *dim * i;
+		ghk_oneside(L,trunpt + ind,above,dim,r,res+i);
+	}
+}
+
+void cuttov(double *ut,double *v, int *dim)
+/*
+purpose: write upper triangular (ut) to vector (v), goes down columns, omitting zeros
+arguments:
+   ut: upper triangular matrix, stored as series of columns (including the zeros)
+   v: vector ut is copied to, on input must have correct length
+   dim: ut is dim x dim, v is dim*(dim+1)/2
+*/
+{
+   int ind=0;
+   int i,j;
+   for(i=0;i<(*dim);i++) {
+      for(j=0;j<=i;j++) {
+         v[ind] = ut[i*(*dim)+j];
+         ind += 1;
+      }
+   }
+}
+void cvtout(double *v, double *ut, int *dim)
+/*
+purpose: write vector (v) to upper triangular (inverse of cuttov above)
+arguments:
+   v: vector
+   ut: upper triangulare matrix, columns stacked, zeros included
+   dim: ut is dim x dim, v is dim*(dim+1)/2
+*/
+{
+   int ind=0;
+   int i,j;
+   for(i=0;i<(*dim);i++) {
+      for(j=(i+1);j<(*dim);j++) ut[i*(*dim)+j]=0.0;
+      for(j=0;j<=i;j++) {
+         ut[i*(*dim)+j] = v[ind];
+         ind += 1;
+      }
+   }
+}
+void clmvn(double *x, double *mu, double *riv, int *dim, double *res)
+/*
+purpose:
+   calculate log of multivariate density 
+   evaluated at x
+   mean is mu, and covariance matrix t(R)%*%R and riv is vector version of the inverse of R
+arguments:
+   x: compute log(f(x))
+   mu, riv: x~N(mu,t(R)%*%R), riv is vector version of R^{-1}
+   dim: dimension of x
+   res: place to put result
+*/
+{
+   int i,j;
+   double sum = 0.0;
+   double prod = 1.0;
+   double z;
+   int ind = 0;
+   for(i=0;i<(*dim);i++) {
+      z = 0.0;
+      for(j=0;j<=i;j++) {z += riv[ind]*(x[j]-mu[j]); ind += 1;}
+      sum += z*z;
+      prod *= riv[ind-1];
+   }
+   *res = log(prod) -.5*sum;
+}
+void crdisc(double *p, int *res)
+/*
+purpose: draw from a discrete distribution
+arguments:
+   p: vector of probabilities
+   res: draw is in {1,2,...length(p)}, giving the draw's category
+*/
+{
+   double u,sum;
+   GetRNGstate();
+   u = unif_rand();
+   *res = 1;
+   sum = p[*res -1];
+   while(sum<u) {sum += p[*res];(*res) +=1;}
+   PutRNGstate();
+}
+void crcomp(double *x, double *mu, double *riv, double *p, int *dim, int *nc, int *res)
+/*
+purpose: draw component of x, where x is drawn form mixture of multivariate normal components
+arguments:
+   x: observed vector x
+   mu: matrix of class means, each column gives a mean vector
+   riv: matrix of class covariances, each column gives a vector version of R^{-1}, Sigma = t(R)%*%R
+   p: prior class probabilities
+   dim: dimension of x (and mu, and Sigma)
+   nc: number of classes
+   res: result
+note:
+   mu is column stacked version of a dim x nc matrix
+   riv is column stacked version of a dim*(dim+1)/2 x nc matrix
+*/
+{
+   double *post;
+   double max,sum;
+   int dim_riv = (*dim)*((*dim)+1)/2;
+   int i;
+   post = (double *)R_alloc(*nc,sizeof(double));
+   clmvn(x,mu,riv,dim,post);
+   max = *post;
+   for(i=1;i<(*nc);i++) {
+      clmvn(x,mu+i*(*dim),riv+i*dim_riv,dim,post+i);
+      if(*(post+i) > max) max = *(post+i);
+   }
+   sum = 0.0;
+   for(i=0;i<(*nc);i++) { post[i] = exp(post[i]-max)*p[i]; sum += post[i];}
+   for(i=0;i<(*nc);i++) post[i] /= sum;
+   crdisc(post,res);
+}
+void crcomps(double *x, double *mu, double *riv, double *p, int *dim, int *nc, int *nob, int *res)
+/*
+purpose: x represents a matrix, whose columns are draws from a normal mixture, draw component membership for each x
+arguments:
+   all the same as crcomp, except x is now column stacked version of dim x nob matrix
+   and nob is the number of observations
+   res is now of length nob
+*/
+{
+   int i;
+   for(i=0;i<(*nob);i++) {
+      crcomp(x+i*(*dim),mu,riv,p,dim,nc,res+i);
+   }
+}
+
diff --git a/src/bayesmcpp.cpp b/src/bayesmcpp.cpp
new file mode 100755
index 0000000..37d769a
--- /dev/null
+++ b/src/bayesmcpp.cpp
@@ -0,0 +1,80 @@
+// R.McCulloch, 12/04  code for scale usage R function (rScaleUsage)
+#include <iostream>
+#include <algorithm>
+
+extern "C" {
+#include <R.h>
+#include <Rmath.h>
+};
+
+extern "C" void getC(double *ep,int *kp, double *m1p, double *m2p,  double *c);
+extern "C" void dy(int *p, int *nob,  double *y, int *x, double *c, double *mu, double *beta, double *s, double *tau, double *sigma);
+
+void getC(double *ep,int *kp, double *m1p, double *m2p, double *c)
+{
+   double e = *ep;
+   int k = *kp;
+   double m1 = *m1p;
+   double m2 = *m2p;
+
+   //first sum to get s's, this is a waste since it should be done
+   //once but I don't want to see this things anywhere else and it should take no time
+   double s0 = (double)(k-1);
+   double s1=0.0,s2=0.0,s3=0.0,s4=0.0;
+   for(int i=1;i<k;i++) {s1+=i; s2+=i*i; s3+= i*i*i; s4+=i*i*i*i;}
+
+   // now make quadratic for b (just as in Peter's code)
+   double aq = s0*s2-s1*s1;
+   double bq = 2*e*s0*s3-2*e*s1*s2;
+   double cq = m1*m1 - m2*s0 + e*e*s0*s4 - e*e*s2*s2;
+
+   //get a and b
+   double det = bq*bq - 4*aq*cq;
+   if(det<0) std::cout << "error: no solution for c's given e and m1, m2" << std::endl;
+   double b=(-bq+sqrt(det))/(2.0*aq);
+   double a=(m1-b*s1-e*s2)/s0;
+
+   //make c
+   c[0]= -1000.0;
+   c[k]= 1000.0;
+   for(int i=1;i<k;i++) c[i] = a+b*i+e*i*i;
+   
+   std::sort(c,c+k+1);
+
+}
+
+
+
+void d1y(int p, double *y, int *x, double *c, double *mu, double *beta, double *s, double tau, double sigma)
+{
+   //std::cout << "int main of d1y" << std::endl;
+
+   GetRNGstate();
+   double cm,cs; //cm = conditional mean, cs = condtional standard deviation
+   double u;    // uniform for truncated normal draw
+   double a,b;  // standardized truncation points
+   double pa,pb; // cdf at truncation points
+
+   //loop over coordinates of y
+   for(int i=0;i<p;i++) {
+      //compute conditonal mean and standard deviation
+      cs = s[i]*sigma;
+      cm = mu[i]+tau;
+      for(int j=0;j<i;j++) cm += (*(beta+i*(p-1)+j))*(y[j]-mu[j]-tau);
+      for(int j=(i+1);j<p;j++) cm += (*(beta+i*(p-1)+j-1))*(y[j]-mu[j]-tau);
+      //draw truncated normal
+      // y~N(cm,cs^2) I[c[x[i]-1],c[x[i])
+      a = (c[x[i]-1]-cm)/cs;  b = (c[x[i]]-cm)/cs;
+      pa = pnorm(a,0.0,1.0,1,0); pb = pnorm(b,0.0,1.0,1,0);
+      u = unif_rand();
+      y[i] = cm + cs*qnorm(u*pb + (1-u)*pa,0.0,1.0,1,0);
+   }
+   PutRNGstate();
+}
+
+void dy(int *p, int *nob, double *y, int *x, double *c, double *mu, double *beta, double *s, double *tau, double *sigma)
+{
+   for(int i=0;i<(*nob);i++) {
+      d1y(*p,y+i*(*p),x+i*(*p),c,mu,beta,s,*(tau+i),*(sigma+i));
+   }
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-bayesm.git



More information about the debian-science-commits mailing list