[r-cran-mcmcpack] 71/90: Imported Upstream version 1.3-2

Andreas Tille tille at debian.org
Fri Dec 16 09:07:50 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-mcmcpack.

commit a6c3f2d7866cef6f98dd9dd9235a2ec0d5699fea
Author: Andreas Tille <tille at debian.org>
Date:   Fri Dec 16 08:07:24 2016 +0100

    Imported Upstream version 1.3-2
---
 DESCRIPTION                      |   12 +-
 HISTORY                          |   22 +-
 LICENSE.note                     |   33 -
 MD5                              |   66 +-
 NAMESPACE                        |    3 +
 R/MCMCintervention.R             |  231 ++++++
 R/MCMCpoissonChange.R            |   50 +-
 R/MCMCregress.R                  |   20 +-
 R/MCMCregressChange.R            |  170 ++++
 R/btsutil.R                      |  105 +++
 README                           |   11 +-
 man/MCMCintervention.Rd          |  209 +++++
 man/MCMCoprobitChange.Rd         |    2 +
 man/MCMCpoissonChange.Rd         |   15 +-
 man/MCMCregress.Rd               |   20 +-
 man/MCMCregressChange.Rd         |  205 +++++
 man/plotIntervention.Rd          |   37 +
 src/HMMmultivariateGaussian.cc   |   16 +-
 src/HMMpanelFE.cc                |   14 +-
 src/HMMpanelRE.cc                |   18 +-
 src/MCMCbinaryChange.cc          |   11 +-
 src/MCMCdynamicIRT1d-b.cc        |    4 +-
 src/MCMCdynamicIRT1d.cc          |    4 +-
 src/MCMChierBetaBinom.cc         |   10 +-
 src/MCMChlogit.cc                |    2 +-
 src/MCMCintervention.cc          | 1664 ++++++++++++++++++++++++++++++++++++++
 src/MCMCirtKdRob.cc              |    6 +-
 src/MCMCoprobit.cc               |    8 +-
 src/MCMCoprobitChange.cc         |   12 +-
 src/MCMCpoissonChange.cc         |   13 +-
 src/MCMCprobitChange.cc          |   16 +-
 src/MCMCregress.cc               |    4 +-
 src/MCMCregressChange.cc         |  513 ++++++++++++
 src/MCMCresidualBreakAnalysis.cc |   12 +-
 src/distributions.h              |  308 +++----
 src/matrix.h                     |  344 ++++----
 src/rng.h                        |  274 +++----
 src/stat.h                       |    2 +-
 38 files changed, 3829 insertions(+), 637 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 547507e..c36d366 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,9 +1,9 @@
 Package: MCMCpack
-Version: 1.2-4.1
-Date: 2012-6-13
+Version: 1.3-2
+Date: 2013-4-19
 Title: Markov chain Monte Carlo (MCMC) Package
 Author: Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park
-Maintainer: Jong Hee Park <jhp at uchicago.edu>
+Maintainer: Jong Hee Park <jongheepark at snu.ac.kr>
 Depends: R (>= 2.10.0), coda (>= 0.11-3), MASS, stats
 Description: This package contains functions to perform Bayesian
         inference using posterior simulation for a number of
@@ -18,7 +18,7 @@ Description: This package contains functions to perform Bayesian
 License: GPL-3
 SystemRequirements: gcc (>= 4.0)
 URL: http://mcmcpack.wustl.edu
-Packaged: 2013-04-06 17:27:35 UTC; ripley
-Repository: CRAN
-Date/Publication: 2013-04-07 00:05:40
+Packaged: 2013-04-19 15:12:42 UTC; jongheepark
 NeedsCompilation: yes
+Repository: CRAN
+Date/Publication: 2013-04-19 19:53:47
diff --git a/HISTORY b/HISTORY
index a8f4609..35de0b8 100644
--- a/HISTORY
+++ b/HISTORY
@@ -1,9 +1,27 @@
 //
 // Changes and Bug Fixes
 //
+
+1.3-1 to 1.3-2
+  * fixed two misktakes in marginal likelihood computation in MCMCregress 
+    (thanks to Sid Chib). In computing log_sigma2, 
+      - exp(digamma) -> digamma
+      - averaging over tot_iter -> averaging over nstore
+  * added MCMCintervention(), MCMCregressChange(), and plotIntervention()
+  * sigma.mu and sigma.var are added in MCMCregress()
+  * lambda.mu and lambda.sigma are added in MCMCpoissonChange()
+  * fixed minor mistakes and change expressions for a recent version of g++
+    (thanks to Jeffrey Oldham)
+
+1.2-4 to 1.3-1
+  * fixed many C++ issues related to clang and Solaris (many thanks to
+     Brian Ripley for his significant contributions)
+
 1.2-3 to 1.2-4
-  * fixed a bug in HMMmultivariateGaussian.cc (comparison of unsigned and singed integers)
-  * cleaned up g++ 4.7 compatibility issues (thanks to Dan Pemstein and Chris Lawrence)
+  * fixed a bug in HMMmultivariateGaussian.cc (comparison of unsigned and
+    singed integers)
+  * cleaned up g++ 4.7 compatibility issues (thanks to Dan Pemstein and
+    Chris Lawrence)
 
 1.2-2 to 1.2-3
   * fixed extraneous warnings in hidden.R (thanks to Jeffrey Arnold)
diff --git a/LICENSE.note b/LICENSE.note
deleted file mode 100644
index 0ea20ef..0000000
--- a/LICENSE.note
+++ /dev/null
@@ -1,33 +0,0 @@
-Markov chain Monte Carlo Package (MCMCpack)
-Copyright (C) 2003-2007 Andrew D. Martin and Kevin M. Quinn
-Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn,
-   and Jong Hee Park
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-Please contact:
-
-Andrew D. Martin, Ph.D.
-Professor and CERL Director, School of Law
-Professor, Political Science, Arts & Sciences
-Washington University in St. Louis
-
-(314) 935-5863 (Office)
-(314) 935-5856 (Fax)
-
-Email: admartin at wustl.edu
-WWW: http://adm.wustl.edu
-
-With any problems or questions.
diff --git a/MD5 b/MD5
index 69341c3..15851e5 100644
--- a/MD5
+++ b/MD5
@@ -1,7 +1,6 @@
-98e2f9cc509c381facf4e84506abde9c *DESCRIPTION
-15bdf8ae0bbca2e2fb31ab9a8f202a0e *HISTORY
-45b74f9b9cce80e9215266776a1be85c *LICENSE.note
-4851e0ecf23d44b58fcd53e597210689 *NAMESPACE
+27f74cbcc96af526b73ea978bdce8cb2 *DESCRIPTION
+c726ff27322432e935244da14dcd1c1c *HISTORY
+b1382cb26ee38f7a860c379826362e50 *NAMESPACE
 1eaf24e8d468ac0b624a9aa90cded306 *R/BayesFactors.R
 9c52f72ee21a71bfbfd312cb56233125 *R/HMMpanelFE.R
 ca0b3fff125085e13069fae3f237e5ed *R/HMMpanelRE.R
@@ -16,6 +15,7 @@ dcd83d013877c39ace4ca33c4357c779 *R/MCMChierBetaBinom.R
 4b95a74aa87e670a88106d4661b6e833 *R/MCMChlogit.R
 e2b64d567aa2e22410251a753f0b55c6 *R/MCMChpoisson.R
 0a759c18d9f3b280dbe6090c92e20e35 *R/MCMChregress.R
+e4ece8084610294f593ca5fc632dd432 *R/MCMCintervention.R
 a4db065464be685b498d341ecc3c2109 *R/MCMCirt1d.R
 78b6d6361c4e9b3056226d66c50a3479 *R/MCMCirtHier1d.R
 4b433d0cace650f7304ff9a94e69ad4a *R/MCMCirtKd.R
@@ -29,18 +29,19 @@ a6508dd38804705e750995ca27070cde *R/MCMCmnl.R
 6b63259803b2890eae6d5778592ed7dd *R/MCMCoprobitChange.R
 be0928d3cd74e5485b1e26a9f0a15c59 *R/MCMCordfactanal.R
 d438fc9dabd9993da5d4a45a3df68c39 *R/MCMCpoisson.R
-5bf87e428e3835f6f73d13fc432b36e5 *R/MCMCpoissonChange.R
+017cf7ca0c8f9fb15b62d6b371070df6 *R/MCMCpoissonChange.R
 b98bdefca76d3316c664bcb2ba2bcc38 *R/MCMCprobit.R
 31fdcb514ab53ba258a581536fcbda60 *R/MCMCprobitChange.R
 e27dcf535f23c25018bb778b61640f22 *R/MCMCquantreg.R
-4b0822eb63ca0687af1c99ddc8f9c01d *R/MCMCregress.R
+fe5ffab10fce83348751c3bd7abaa83a *R/MCMCregress.R
+868cfbf83aef7ee5009e78e5c0bc36f7 *R/MCMCregressChange.R
 510f86a50f94be5b4bbd76cd361025f4 *R/MCMCresidualBreakAnalysis.R
 720834bbb59d6c4ce7c610acd22558be *R/MCMCtobit.R
 4d2218bcdb4715ac9d791f26d5469f9b *R/MCmodels.R
 3a98b4d919a265b46c29d14310bb90d4 *R/SSVSquantreg.R
 555c87acdcc2f8572b14151e29022e20 *R/SSVSquantregsummary.R
 fdfcfe9909cadaf15c47bbb604be2257 *R/automate.R
-ad65af39e4dc71e5a2facf7d5ad62317 *R/btsutil.R
+49a5845b1a235ed9e931f8f57800eb71 *R/btsutil.R
 56829ce5340d26f9488b8b9894ff5cd0 *R/distn.R
 18daa9fee800c7d8d5827405738531e2 *R/hidden-hmodels.R
 4dc19c1cb7809d9be5e7353929358aa1 *R/hidden.R
@@ -52,7 +53,7 @@ f799711e5d3fb7140cbc0995360339b3 *R/procrust.R
 c67ac5238de55cdc8c49d017e0226ed9 *R/tomog.R
 6377da3453982f578df4a886f673122c *R/utility.R
 7e2021def8a88edb41fe78d0a0ae642c *R/zzz.R
-e42f762c27e26e0ace372c03c47ac4e5 *README
+7bf94529cc64482070ef07fa6805012c *README
 46e955baeb55718a380a11fb34d2ea67 *cleanup
 ce8d6bc2b702996ba91799bc33b82840 *configure
 21e1a006fb388ed39daf16e6d2ec9549 *configure.ac
@@ -74,6 +75,7 @@ c3e38d2c8a1adee2ff096056416ca6ec *man/HMMpanelFE.Rd
 00a29263d43f0e2006de60d4800f5f7f *man/MCMChlogit.Rd
 b894aa07a0e68f9ffdeb3b09bab0aee2 *man/MCMChpoisson.Rd
 8d9c22e8d965d60581ec837803790c80 *man/MCMChregress.Rd
+9afab254ca5f275f437f7b22ab798b5a *man/MCMCintervention.Rd
 52f2cb9bc1cde5e38d35aa5d9858be4a *man/MCMCirt1d.Rd
 336682b062ff43261f0e75a4b51a34d8 *man/MCMCirtHier1d.Rd
 0d6b55f1f6062483b894fd4f4e9cecfd *man/MCMCirtKd.Rd
@@ -84,14 +86,15 @@ b894aa07a0e68f9ffdeb3b09bab0aee2 *man/MCMChpoisson.Rd
 751e56edabc12da2d8246f7c34cd7613 *man/MCMCmixfactanal.Rd
 b7dfcf274126008379b0ee903f134b25 *man/MCMCmnl.Rd
 cf07e2c9f307215c8fe841f5381b62f8 *man/MCMCoprobit.Rd
-1556620313c5a4ca175fbe0b68f84161 *man/MCMCoprobitChange.Rd
+0ac0dc970781b60cecc8a7a99ce8e6ad *man/MCMCoprobitChange.Rd
 a491294e97dd0b7aa5d0de525542ea8f *man/MCMCordfactanal.Rd
 03f2e93e6876a4f0e87b3469bfe38d2f *man/MCMCpoisson.Rd
-c80d0b7b63dbaf0df8e23d78dd9e65cc *man/MCMCpoissonChange.Rd
+7da98cdb1e3ec5ed459db403783309f1 *man/MCMCpoissonChange.Rd
 1917e296040b80c92525e5cb8ad02e71 *man/MCMCprobit.Rd
 c3631c612ccc2d8975a7e164c8506331 *man/MCMCprobitChange.Rd
 2f9caf8983e405f912eb5b25f29784eb *man/MCMCquantreg.Rd
-e1852779dbd3598219f162860d60e8f5 *man/MCMCregress.Rd
+94f027a39b5a875cf5c54757a093f144 *man/MCMCregress.Rd
+65de16850240fd21bfe6e62c63fc5c10 *man/MCMCregressChange.Rd
 02144cc6cca72312f4eafb013cfe23c3 *man/MCMCresidualBreakAnalysis.Rd
 c314e36afdd60449cf4f8ec08e00f80d *man/MCMCtobit.Rd
 a96eacae6b64827e40505661fb636cd4 *man/MCbinomialbeta.Rd
@@ -116,6 +119,7 @@ a5378f49cfb5f7ae58439b828e10fb84 *man/make.breaklist.Rd
 629023d17bc9299449eec0b74a73340d *man/mptable.Rd
 96ab1db8e6a5fb7c6640f06dd95a162c *man/noncenhypergeom.Rd
 170bc2168de62e08f7157a8cbc635e06 *man/plotChangepoint.Rd
+34999350c3ac48a2b5dbbb7766e5d463 *man/plotIntervention.Rd
 c64abc923f2c6f56a167a815c881ffb1 *man/plotState.Rd
 6a88d877e88b5abfd7a66d659c9b4e6a *man/procrust.Rd
 41feb17dda3b00cffa2f6176ba584b74 *man/readscythe.Rd
@@ -127,25 +131,26 @@ a4c560360ba912bb342a429e434bcc96 *man/vech.Rd
 8699162f8e39a03d96dd633866f8c4ee *man/wishart.Rd
 9d56915bf90694106df6586cec1bada0 *man/writescythe.Rd
 ee8f580a79a520bbdf458410b49aae2a *man/xpnd.Rd
-0885994bdcc29ac01c2945170660ee5a *src/HMMmultivariateGaussian.cc
-16ec2efa868e3f0339d32bfc4917b0cd *src/HMMpanelFE.cc
-37113b38bf4693823d6a05cc8e3d2e99 *src/HMMpanelRE.cc
+40b362dda564acce80bffa4748808bf3 *src/HMMmultivariateGaussian.cc
+9c0015db15e56f1028564a23b3a2b3ee *src/HMMpanelFE.cc
+46a172ae3349a7724c02a2d5ab001e74 *src/HMMpanelRE.cc
 1c68060796310df44a049e66f10b14f7 *src/MCMCSVDreg.cc
-6474ba3567cea0fc10d61bbdc35c649a *src/MCMCbinaryChange.cc
+659155a4036b2f0e6da9e07440d1cd3e *src/MCMCbinaryChange.cc
 c6353855b63e402d7e60d248b22e6f50 *src/MCMCdynamicEI.cc
-583345a23401484a6ba5f0b0b206c408 *src/MCMCdynamicIRT1d-b.cc
-5caf9c1724b4e023483fe45ad247f2f2 *src/MCMCdynamicIRT1d.cc
+35b5469f5e52a965f78cad71c21582c3 *src/MCMCdynamicIRT1d-b.cc
+5fa9b4a2d06646a79042296a62431d3d *src/MCMCdynamicIRT1d.cc
 b3323f678776ada7c1fdf0f400be12e0 *src/MCMCfactanal.cc
 2745b72494da22ef8e0f62553011bbc9 *src/MCMCfcds.h
-8e5750dd27556aa3cc6a5ba4858889ca *src/MCMChierBetaBinom.cc
+db0074b59394f540436a57cbe7d03544 *src/MCMChierBetaBinom.cc
 d4f36358e358d81d2d432c8cf23cc33d *src/MCMChierEI.cc
-4221e7cbdb9fbd6a5b6ce16955e88fb5 *src/MCMChlogit.cc
+c938af28648eb358458f111d52119052 *src/MCMChlogit.cc
 ab7fc8d513b7c66d9b777af3fda19133 *src/MCMChpoisson.cc
 ea2d9f60541c419765dde67824ca9139 *src/MCMChregress.cc
+d0c3136bef3a5243dd984dab7f2df5ab *src/MCMCintervention.cc
 fa7404ccc4ce053d13b2e0b9a86819b8 *src/MCMCirt1d.cc
 abdf98660af4bed11d89e04074d9f1d1 *src/MCMCirtHier1d.cc
 ac9b6d3f588b46cf009816bf028ee5ed *src/MCMCirtKdHet.cc
-b6c0ff1884472c0e297b7d6770d61ca0 *src/MCMCirtKdRob.cc
+128b3fffee2679d9ae85e844ed9b522c *src/MCMCirtKdRob.cc
 82710c136632704b70c71cc8ee5dca5c *src/MCMClogit.cc
 8e8ab807995eedcd108f11ad639475a9 *src/MCMClogituserprior.cc
 d886c33fcacd18ba4435f4171159824f *src/MCMCmetrop1R.cc
@@ -153,17 +158,18 @@ af5765bb03517864b5d29b8c4b594ad2 *src/MCMCmixfactanal.cc
 eaab5804f229b660e958f6aa2cf728ba *src/MCMCmnl.h
 97c3e9b9e2d034ff9bd12fd1e1c81466 *src/MCMCmnlMH.cc
 9f0c3eea4d8ecafe8ab931c12d1616f9 *src/MCMCmnlslice.cc
-a78375184abf7c97bf7fbf3f7614cad3 *src/MCMCoprobit.cc
-387f517e5df0bde2b483c8986ce10983 *src/MCMCoprobitChange.cc
+0d7b2c13371662f7cf901ef5a7ec00a3 *src/MCMCoprobit.cc
+bdd18ead0856bbff8587b11639945699 *src/MCMCoprobitChange.cc
 6addbe81efb51cb4b8401e9a6931f782 *src/MCMCordfactanal.cc
 6bfed80bb7c109537461e6381837b6c4 *src/MCMCpoisson.cc
-73fb68d449004927f02a4bbf764258f6 *src/MCMCpoissonChange.cc
+b93e764e46e07581bd576b75597beecc *src/MCMCpoissonChange.cc
 84b94f6e67b026a915a23fb85bdaf00b *src/MCMCprobit.cc
-8a2f4a79c6bc951310074f5757041424 *src/MCMCprobitChange.cc
+e2239f7d7503dc4eca812034f4bc1da8 *src/MCMCprobitChange.cc
 a6a01c3906a8b92b9f1a12cfa92a1a63 *src/MCMCprobitres.cc
 806af936660056856d4b908f8fb0cfa4 *src/MCMCquantreg.cc
-aedab719c419ce9d80f534e967d2648d *src/MCMCregress.cc
-1cab26aaeea8a8b701325f41aadff24c *src/MCMCresidualBreakAnalysis.cc
+c89523a7da37782992fe91d8364b1cd3 *src/MCMCregress.cc
+3fa981b1ba947770ff834c45237be2b1 *src/MCMCregressChange.cc
+cfbd489b6fd8886e262a57ca9629c6da *src/MCMCresidualBreakAnalysis.cc
 ef5566f824887d9bbef5b76f1b64d839 *src/MCMCrng.h
 f28dc81e54a6ca54a2202a0d6f52ed40 *src/MCMCtobit.cc
 b3e224bb1a8452c3da347348b8e510cc *src/Makevars
@@ -172,21 +178,21 @@ b3e224bb1a8452c3da347348b8e510cc *src/Makevars
 5362ed41a42ce34cf7817f6bfd988935 *src/algorithm.h
 80eab6b8072ae34de6c192af41324093 *src/datablock.h
 5da8702c23b61c037f61dccebec10838 *src/defs.h
-457857685244c7b4993075b5a8e776eb *src/distributions.h
+62288e8cb29f90983d0ab28852cf5ba7 *src/distributions.h
 b3e55dce9080169d7161c8a182f0c70a *src/error.h
 0b1465cd303a39f5bd6ccb327e1e4b1f *src/ide.h
 822c244edb53be2f4f8b1e831abe16fb *src/la.h
 41a6caea698666bf78dda6d4d8b923e8 *src/lapack.h
 cfc7f8d03a981a3453f81f2bd67d02c5 *src/lecuyer.cc
 53cd167ad58abffbb0f155da45e6385a *src/lecuyer.h
-45d0ce718fa5b6071a8abd2e9d97958b *src/matrix.h
+0e84b5d3b0cd6c3d61f9ae9ab4bf3f64 *src/matrix.h
 7ab16dbb6757a5de37dc7de9867a3c10 *src/matrix_bidirectional_iterator.h
 f924b218cd6eaed80846ed0cba534b03 *src/matrix_forward_iterator.h
 3dc8b8e303daa7ffdf3e2d29b2fa43d9 *src/matrix_random_access_iterator.h
 b0f0c5a5c5197df32d18b77af2b85957 *src/mersenne.h
 a111bb9e9e66be7cb044ad27ea675124 *src/optimize.h
-72aba83ca5fb6450350a439fd18f139d *src/rng.h
+e6ce586d7974b548aead641795ad9ca4 *src/rng.h
 b48baf77b93da895a04154ef15c8683f *src/rtmvnorm.h
 9a1656760820e50605248198302c308c *src/smath.h
-012f34388c3a3b9f51b23462e51d2dd0 *src/stat.h
+a5574b5829f3cab30cd73eb26725ae68 *src/stat.h
 ca47c74890139220fcf9dddd01d33c3c *src/wrapped_generator.h
diff --git a/NAMESPACE b/NAMESPACE
index 2910a27..0eb1506 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -26,6 +26,7 @@ export(
        MCMChlogit,
        MCMChpoisson,
        MCMChregress,
+       MCMCintervention,
        MCMCirt1d,
        MCMCirtHier1d,
        MCMCirtKd,
@@ -44,12 +45,14 @@ export(
        MCMCprobitChange,
        MCMCquantreg,
        MCMCregress,
+       MCMCregressChange,
        MCMCSVDreg,
        MCMCtobit,
        make.breaklist,
        mptable,
        plotState,
        plotChangepoint,
+       plotIntervention,
        PostProbMod,
        procrustes,
        rdirichlet,
diff --git a/R/MCMCintervention.R b/R/MCMCintervention.R
new file mode 100644
index 0000000..64e66cd
--- /dev/null
+++ b/R/MCMCintervention.R
@@ -0,0 +1,231 @@
+#########################################################
+## Internvetion Analysis using Changepoint Model
+#########################################################
+
+"MCMCintervention"<-
+  function(y, data=parent.frame(),  m = 1, intervention = 1,
+           prediction.type=c("trend", "ar"),
+           change.type = c("fixed", "random", "all"),
+           b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA,
+           a = NULL, b = NULL,
+           mcmc = 1000, burnin = 1000,  thin = 1, verbose = 0, 
+           seed = NA, beta.start = NA, P.start = NA,
+           marginal.likelihood = c("none", "Chib95"), ...){
+    
+    ## form response and model matrices
+    y <- as.vector(y)
+    n <- length(y)
+    ns <- m + 1 # number of states
+    if (prediction.type == "trend"){
+      X <- matrix(cbind(1, c(1:n)), n, 2)
+      xnames <- c("constant", "trend") 
+    }
+    else if (prediction.type == "ar"){
+      y1 <- y[1:(n/2)]
+      ar1 <- arima(y1, c(1,0,0))
+      muy0 <- as.numeric(ar1$coef[2])
+      sigmay0 <- sqrt(as.numeric(as.numeric(ar1[2])/(1 - ar1$coef[1]^2)))
+      y0 <- rnorm(1, muy0, sigmay0)
+      X <- matrix(cbind(1, c(y0, y[-n])), n, 2)
+      xnames <- c("constant", "lag.y") 
+    }
+    else {
+      X <- matrix(1, n, 1)
+      xnames <- c("constant") 
+    }
+    k <- ncol(X)    # number of covariates
+    
+    ## check iteration parameters
+    check.mcmc.parameters(burnin, mcmc, thin)
+    totiter <- mcmc + burnin
+    nstore <- mcmc/thin    
+    cl <- match.call()
+
+    ## seeds
+    seeds <- form.seeds(seed)
+    lecuyer <- seeds[[1]]
+    seed.array <- seeds[[2]]
+    lecuyer.stream <- seeds[[3]]
+    if(!is.na(seed)) set.seed(seed)
+  
+    ## prior
+    mvn.prior <- form.mvn.prior(b0, B0, k)
+    b0 <- mvn.prior[[1]]
+    B0 <- mvn.prior[[2]]
+    if (prediction.type == "ar"){
+      if (b0[2] > 1|b0[2] < -1){
+        stop("The prior of AR coefficient ",b0[2], " is outside the stationary region! \n") 
+      }
+    }
+    if (is.na(sigma.mu)|is.na(sigma.var)) {
+      check.ig.prior(c0, d0)
+    }
+    else {
+      d0 <- 2*(sigma.mu + sigma.mu^3/sigma.var)
+      c0 <- 2*(1 + (d0/2)/sigma.mu)
+    }
+  
+    ## get marginal likelihood argument
+    marginal.likelihood  <- match.arg(marginal.likelihood)
+    
+    ## following MCMCregress, set chib as binary
+    logmarglike <- loglik <- NULL
+    chib <- 0
+    if (marginal.likelihood == "Chib95"){
+      chib <- 1
+    }
+    
+    ## initial values
+    Y <- matrix(y, n, 1)
+    if (m == 0){
+      output <- MCMCregress(formula = Y ~ X-1, mcmc=mcmc,
+                            burnin=burnin, verbose=verbose, thin=thin,
+                            b0 = b0, B0 = solve(B0), c0 = c0, d0 = d0, 
+                            marginal.likelihood = marginal.likelihood)
+      attr(output, "y")       <- y
+      attr(output, "intervention")  <- intervention
+      yhatout <- output[, 1:2]%*%t(X)
+      attr(output, "yhat") <- matrix(yhatout, nstore, n) ## X_j*beta_j
+      attr(output, "yforepred") <- matrix(yhatout, nstore, n) ## yt|Y_t-1, theta
+      attr(output, "ybackpred") <- matrix(yhatout, nstore, n) ## yt|Y_t+1, theta
+
+    }
+    else{ ## if m > 0
+      A0 <- trans.mat.prior(m=m, n=n, a=a, b=b)  
+      Pstart  <-  check.P(P.start, m, a=a, b=b)
+      ols <- lm(y~X-1)
+      bols <- coef(ols)
+      betastart  <- matrix(rep(bols, ns), ns, k, byrow = TRUE)
+      Sigmastart <- rep(summary(ols)$sigma^2, ns)
+      statestart <- sort(sample(1:ns, n, replace=T))
+      AR <- 0
+      if (prediction.type == "ar"){
+        AR <- 1
+      }
+      change <- 0
+      betaout.N <- nstore*ns*k
+      Sigmaout.N <- nstore*ns
+      if (change.type == "fixed"){
+        change <- 1
+        betaout.N <- nstore*ns*k
+        Sigmaout.N <- nstore
+     }
+      if (change.type == "random"){
+        change <- 2
+        betaout.N <- nstore*k
+        Sigmaout.N <- nstore*ns
+      }
+      
+      ## call C++ code to draw sample
+      posterior <- .C("MCMCintervention",
+                      accept = as.double(0.0),
+                      betaout = as.double(rep(0.0, betaout.N)), 
+                      Sigmaout = as.double(rep(0.0, Sigmaout.N)), 
+                      Pout = as.double(rep(0.0, nstore*ns*ns)), 
+                      psout = as.double(rep(0.0, n*ns)),
+                      sout = as.double(rep(0.0, nstore*n)),
+                      
+                      yhatout = as.double(rep(0.0, nstore*n)),
+                      yerrorout = as.double(rep(0.0, nstore*n)),
+                      yforepredout = as.double(rep(0.0, nstore*n)),
+                      ybackpredout = as.double(rep(0.0, nstore*n)),
+                      
+                      Ydata = as.double(Y),
+                      Yrow = as.integer(nrow(Y)),
+                      Ycol = as.integer(ncol(Y)),
+                      Xdata = as.double(X),
+                      Xrow = as.integer(nrow(X)),
+                      Xcol = as.integer(ncol(X)),
+                      
+                      m = as.integer(m),
+                      intervention = as.integer(intervention), 
+                      burnin = as.integer(burnin),           
+                      mcmc = as.integer(mcmc), 
+                      thin = as.integer(thin),
+                      verbose = as.integer(verbose),
+                      
+                      lecuyer=as.integer(lecuyer), 
+                      seedarray=as.integer(seed.array),
+                      lecuyerstream=as.integer(lecuyer.stream),
+                      
+                      betastart = as.double(betastart),  
+                      Sigmastart = as.double(Sigmastart),  
+                      Pstart = as.double(Pstart),
+                      statestart = as.integer(statestart),    
+                      
+                      a = as.double(a),
+                      b = as.double(b),
+                      b0data = as.double(b0),
+                      B0data = as.double(B0), 
+                      c0 = as.double(c0),
+                      d0 = as.double(d0),
+                      A0data = as.double(A0), 
+                      logmarglikeholder = as.double(0.0),
+                      loglikeholder = as.double(0.0),
+                      ar = as.integer(AR),
+                      change = as.integer(change), 
+                      chib = as.integer(chib))                
+      
+      ## get marginal likelihood if Chib95
+      if (marginal.likelihood == "Chib95"){
+        logmarglike <- posterior$logmarglikeholder
+        loglike <- posterior$loglikeholder
+      }
+      ## pull together matrix and build MCMC object to return
+      beta.holder <- matrix(posterior$betaout, nstore, )
+      Sigma.holder <- matrix(posterior$Sigmaout, nstore, )
+      P.holder    <- matrix(posterior$Pout, nstore, )
+      s.holder    <- matrix(posterior$sout, nstore, )
+      ps.holder   <- matrix(posterior$psout, n, )
+      
+      output1 <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin)
+      if (change == 0){
+        varnames(output1)  <- sapply(c(1:ns),
+                                     function(i){
+                                       paste(c(xnames), "_regime", i, sep = "")
+                                     })
+        output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin)
+        varnames(output2)  <- sapply(c(1:ns),
+                                     function(i){
+                                       paste(c("sigma2"), "_regime", i, sep = "")
+                                     }) 
+      }
+      else if (change == 1){
+        varnames(output1)  <- sapply(c(1:ns),
+                                     function(i){
+                                       paste(c(xnames), "_regime", i, sep = "")
+                                     })
+        output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin)
+        names(output2)  <- c("sigma2") 
+      }
+      else{
+        output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin)
+        varnames(output2)  <- sapply(c(1:ns),
+                                     function(i){
+                                       paste(c("sigma2"), "_regime", i, sep = "")
+                                     }) 
+      }
+      ## To check the acceptance rate (if AR == 1)
+      accept <- posterior$accept
+
+      output <- as.mcmc(cbind(output1, output2))
+      attr(output, "title") <- "MCMCintervention Posterior Sample"
+      attr(output, "intervention")  <- intervention
+      attr(output, "accept")  <- accept
+      attr(output, "y")       <- y
+      attr(output, "X")       <- X
+      attr(output, "m")       <- m
+      attr(output, "call")    <- cl
+      attr(output, "logmarglike") <- logmarglike
+      attr(output, "loglik") <- loglik
+      attr(output, "prob.state") <- ps.holder/nstore
+      attr(output, "s.store") <- s.holder
+      attr(output, "yhat") <- matrix(posterior$yhatout, nstore, n)## X_j*beta_j
+      attr(output, "yerror") <- matrix(posterior$yerrorout, nstore, n)## y_j - X_j*beta_j
+      attr(output, "yforepred") <- matrix(posterior$yforepredout, nstore, n)## yt|Y_t-1, theta
+      attr(output, "ybackpred") <- matrix(posterior$ybackpredout, nstore, n)## yt|Y_t+1, theta
+    }
+    return(output)
+    
+}## end of MCMC function
+
diff --git a/R/MCMCpoissonChange.R b/R/MCMCpoissonChange.R
index 15e28a0..bee8f43 100644
--- a/R/MCMCpoissonChange.R
+++ b/R/MCMCpoissonChange.R
@@ -6,9 +6,10 @@
 
 "MCMCpoissonChange"<-
   function(formula, data = parent.frame(), m = 1,
-           b0 = 0, B0 = 1, a = NULL, b = NULL, c0 = NA, d0 = NA, 
+           b0 = 0, B0 = 1, a = NULL, b = NULL, c0 = NA, d0 = NA,
+           lambda.mu = NA, lambda.var = NA, 
            burnin = 1000, mcmc = 1000, thin = 1, verbose = 0, 
-           seed = NA, beta.start = NA, P.start = NA,   
+           seed = NA, beta.start = NA, P.start = NA, offset = NA,   
            marginal.likelihood = c("none", "Chib95"), ...) {
     
     ## form response and model matrices
@@ -35,10 +36,15 @@
     seed.array <- seeds[[2]]
     lecuyer.stream <- seeds[[3]]
     if(!is.na(seed)) set.seed(seed)
-    
+        
     if (k==1){
-      if (is.na(c0)||is.na(d0))
-        stop("You have to prior for lambda (c0 and d0) when there is no covariate.\n")
+      if (lambda.mu != NA && lambda.var != NA) {
+        c0 <- lambda.mu^2/lambda.var
+        d0 <- lambda.mu/lambda.var
+      }
+      if ((is.na(c0)||is.na(d0))&&((is.na(lambda.mu)||is.na(lambda.var)))){
+        stop("You have to provide a prior for lambda (c0 and d0 or lambda.mu and lambda.var) when there is no covariate.\n")
+      }
     }
     else{
       c0 <- d0 <- 0
@@ -47,6 +53,8 @@
       B0 <- mvn.prior[[2]]
     }
     
+    
+    
     ## get marginal likelihood argument
     marginal.likelihood  <- match.arg(marginal.likelihood)
     logmarglike <- NULL
@@ -56,11 +64,15 @@
     }
     if (m == 0){
       if (marginal.likelihood == "Chib95"){
-        output <- MCMCpoisson(formula, burnin = burnin, mcmc = mcmc,
-                              thin = thin, verbose = verbose,
-                              b0 = b0, B0 = B0,
-                              marginal.likelihood = "Laplace")
-        cat("\n Chib95 method is not yet available for m = 0. Laplace method is used instead.")
+        if (is.na(b0)||is.na(B0))
+          stop("You have to have a prior for beta (b0 and B0) when m = 0.\n")
+        else{
+          output <- MCMCpoisson(formula, burnin = burnin, mcmc = mcmc,
+                                thin = thin, verbose = verbose,
+                                b0 = b0, B0 = B0,
+                                marginal.likelihood = "Laplace")
+          cat("\n Chib95 method is not yet available for m = 0. Laplace method is used instead.")
+        }
       }
       else {
         output <- MCMCpoisson(formula, burnin = burnin, mcmc = mcmc,
@@ -80,6 +92,18 @@
       }
       taustart <- tau.initial(y, tot.comp)
       componentstart  <-  round(runif(tot.comp, 1, 5))
+      if(is.na(offset)){
+        logoffset <- rep(0, length(y))
+      }
+      else{
+        if(length(offset) == length(y)){
+          logoffset <- log(offset)
+        }
+        else{
+          stop("\n The length of offset is not matched with y.")
+        }
+      }
+      print(offset)
       
       ## normal mixture weights
       wr  <-  c(0.2294, 0.2590, 0.2480, 0.1525, 0.0472)
@@ -98,6 +122,7 @@
                       Xdata = as.double(X),
                       Xrow = as.integer(nrow(X)),
                       Xcol = as.integer(ncol(X)),
+                      logoffset = as.double(logoffset), 
                       m = as.integer(m), 
                       burnin = as.integer(burnin),           
                       mcmc = as.integer(mcmc), 
@@ -131,6 +156,11 @@
         logmarglike <- posterior$logmarglikeholder
         loglike <- posterior$loglikeholder
       }
+      else {
+        logmarglike <- 0
+        loglike <- 0
+      }
+ 
       
       ## pull together matrix and build MCMC object to return
       beta.holder <- matrix(posterior$betaout, nstore, )
diff --git a/R/MCMCregress.R b/R/MCMCregress.R
index e6537da..e1a677c 100644
--- a/R/MCMCregress.R
+++ b/R/MCMCregress.R
@@ -20,18 +20,16 @@
 "MCMCregress" <-
   function(formula, data=NULL, burnin = 1000, mcmc = 10000,
            thin=1, verbose = 0, seed = NA, beta.start = NA,
-           b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001,
+           b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, 
            marginal.likelihood = c("none", "Laplace", "Chib95"),
            ...) {
     
     ## checks
     check.offset(list(...))
     check.mcmc.parameters(burnin, mcmc, thin)
-
-    cl <- match.call()
-    
-
     
+    cl <- match.call()
+        
     ## seeds
     seeds <- form.seeds(seed) 
     lecuyer <- seeds[[1]]
@@ -50,9 +48,15 @@
     mvn.prior <- form.mvn.prior(b0, B0, K)
     b0 <- mvn.prior[[1]]
     B0 <- mvn.prior[[2]]
-    check.ig.prior(c0, d0)
-
-
+    
+    if (is.na(sigma.mu)|is.na(sigma.var)) {
+      check.ig.prior(c0, d0)
+    }
+    else {
+      c0 <- 4 + 2 *(sigma.mu^2/sigma.var)
+      d0 <- 2*sigma.mu *(c0/2 - 1)
+    }
+      
     ## get marginal likelihood argument
     marginal.likelihood  <- match.arg(marginal.likelihood)
     B0.eigenvalues <- eigen(B0)$values
diff --git a/R/MCMCregressChange.R b/R/MCMCregressChange.R
new file mode 100644
index 0000000..7c73e8c
--- /dev/null
+++ b/R/MCMCregressChange.R
@@ -0,0 +1,170 @@
+#########################################################
+## sample from the posterior distribution
+## of a linear Gaussian model with multiple changepoints
+## using linked C++ code in Scythe
+##
+## JHP 07/01/2007
+## JHP 03/03/2009
+#########################################################
+
+"MCMCregressChange"<-
+  function(formula, data=parent.frame(), m = 1, 
+           b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, 
+           a = NULL, b = NULL,
+           mcmc = 1000, burnin = 1000,  thin = 1, verbose = 0, 
+           seed = NA, beta.start = NA, P.start = NA,
+           marginal.likelihood = c("none", "Chib95"), ...){
+    
+    ## form response and model matrices
+    holder <- parse.formula(formula, data)
+    y <- holder[[1]]
+    X <- holder[[2]]
+    xnames <- holder[[3]]
+    k <- ncol(X)    # number of covariates
+    n <- length(y)
+    ns <- m + 1 # number of states
+
+    ## check iteration parameters
+    check.mcmc.parameters(burnin, mcmc, thin)
+    totiter <- mcmc + burnin
+    cl <- match.call()
+    nstore <- mcmc/thin    
+    
+    ## seeds
+    seeds <- form.seeds(seed)
+    lecuyer <- seeds[[1]]
+    seed.array <- seeds[[2]]
+    lecuyer.stream <- seeds[[3]]
+    if(!is.na(seed)) set.seed(seed)
+
+    ## prior
+    mvn.prior <- form.mvn.prior(b0, B0, k)
+    b0 <- mvn.prior[[1]]
+    B0 <- mvn.prior[[2]]
+    if (is.na(sigma.mu)|is.na(sigma.var)) {
+      check.ig.prior(c0, d0)
+    }
+    else {
+      c0 <- 4 + 2 *(sigma.mu^2/sigma.var)
+      d0 <- 2*sigma.mu *(c0/2 - 1)
+    }
+   
+    ## get marginal likelihood argument
+    marginal.likelihood  <- match.arg(marginal.likelihood)
+       
+    ## following MCMCregress, set chib as binary
+    logmarglike <- loglik <- NULL
+    chib <- 0
+    if (marginal.likelihood == "Chib95"){
+      chib <- 1
+    }
+
+    if (m == 0){
+      output <- MCMCregress(formula, burnin = burnin, mcmc = mcmc,
+                            thin = thin, verbose = verbose,
+                            b0 = b0, B0 = B0, c0 =c0, d0=d0,
+                            marginal.likelihood = "Chib95")
+      attr(output, "y") <- y
+      attr(output, "m") <- m
+    }
+    else{
+      if(k == 1){
+        output <- MCMCresidualBreakAnalysis(y, data=data,  m = m, 
+                                            b0 = b0, B0 = B0, c0 = c0, d0 = d0,
+                                            a = a, b = b,
+                                            burnin = burnin, mcmc = mcmc, thin = thin, verbose = verbose,
+                                            seed = seed, beta.start = beta.start, P.start = P.start,
+                                            marginal.likelihood = marginal.likelihood)
+      }
+      else{
+        ## initial values
+        Pstart  <-  check.P(P.start, m, a=a, b=b)
+        A0 <- trans.mat.prior(m=m, n=n, a=a, b=b)        
+        betastart  <- beta.change.start(beta.start, ns, k, formula, family=gaussian, data)
+        ols <- lm(y~X-1)
+        Sigmastart <- rep(summary(ols)$sigma^2, ns)
+        statestart <- sort(sample(1:ns, n, replace=T))
+        
+        ## call C++ code to draw sample
+        posterior <- .C("MCMCregressChange", 
+                        betaout = as.double(rep(0.0, nstore*ns*k)), 
+                        Sigmaout = as.double(rep(0.0, nstore*ns)), 
+                        Pout = as.double(rep(0.0, nstore*ns*ns)), 
+                        psout = as.double(rep(0.0, n*ns)),
+                        sout = as.double(rep(0.0, nstore*n)),
+                        
+                        Ydata = as.double(y),
+                        Yrow = as.integer(nrow(y)),
+                        Ycol = as.integer(ncol(y)),
+                        Xdata = as.double(X),
+                        Xrow = as.integer(nrow(X)),
+                        Xcol = as.integer(ncol(X)),
+                        
+                        m = as.integer(m),
+                        burnin = as.integer(burnin),           
+                        mcmc = as.integer(mcmc), 
+                        thin = as.integer(thin),
+                        verbose = as.integer(verbose),
+                        
+                        lecuyer=as.integer(lecuyer), 
+                        seedarray=as.integer(seed.array),
+                        lecuyerstream=as.integer(lecuyer.stream),
+                        
+                        betastart = as.double(betastart),  
+                        Sigmastart = as.double(Sigmastart),  
+                        Pstart = as.double(Pstart),
+                        statestart = as.integer(statestart),    
+                        
+                        a = as.double(a),
+                        b = as.double(b),
+                        b0data = as.double(b0),
+                        B0data = as.double(B0), 
+                        c0 = as.double(c0),
+                        d0 = as.double(d0),
+                        A0data = as.double(A0), 
+                        logmarglikeholder = as.double(0.0),
+                        loglikeholder = as.double(0.0),
+                        chib = as.integer(chib))                
+        
+        ## get marginal likelihood if Chib95
+        if (marginal.likelihood == "Chib95"){
+          logmarglike <- posterior$logmarglikeholder
+          loglike <- posterior$loglikeholder
+        }
+        
+        ## pull together matrix and build MCMC object to return
+        beta.holder <- matrix(posterior$betaout, nstore, ns*k)
+        Sigma.holder <- matrix(posterior$Sigmaout, nstore, ns)
+        P.holder    <- matrix(posterior$Pout, nstore, )
+        s.holder    <- matrix(posterior$sout, nstore, )
+        ps.holder   <- matrix(posterior$psout, n, )
+        
+        output1 <- mcmc(data=beta.holder, start=burnin+1, end=burnin + mcmc, thin=thin)
+        varnames(output1)  <- sapply(c(1:ns),
+                                     function(i){
+                                       paste(c(xnames), "_regime", i, sep = "")
+                                     })
+        output2 <- mcmc(data=Sigma.holder, start=burnin+1, end=burnin + mcmc, thin=thin)
+        varnames(output2)  <- sapply(c(1:ns),
+                                     function(i){
+                                       paste(c("sigma2"), "_regime", i, sep = "")
+                                     })
+        output <- as.mcmc(cbind(output1, output2))
+        
+        attr(output, "title") <- "MCMCregressChange Posterior Sample"
+        attr(output, "formula") <- formula
+        attr(output, "y")       <- y
+        attr(output, "X")       <- X
+        attr(output, "m")       <- m
+        attr(output, "call")    <- cl
+        attr(output, "logmarglike") <- logmarglike
+        attr(output, "loglik") <- loglik
+        attr(output, "prob.state") <- ps.holder/nstore
+        attr(output, "s.store") <- s.holder
+      }
+    }
+    return(output)
+      
+  }## end of MCMC function
+    
+    
diff --git a/R/btsutil.R b/R/btsutil.R
index 03bd069..1d38b62 100644
--- a/R/btsutil.R
+++ b/R/btsutil.R
@@ -248,4 +248,109 @@
 }
 
 
+## draw predicted outcomes of intervention analysis
+plotIntervention <- function(mcmcout, forward = TRUE, start = 1,
+                             alpha = 0.05, col = "red", main="", ylab="", xlab=""){
+  ## pull inputs
+  y <- ts(attr(mcmcout, "y"), start=start)
+  inter <- attr(mcmcout, "intervention") 
+  N <- length(y)
+  
+  ## draw a plot
+  plot(y, main=main, ylab=ylab, xlab=xlab)
+  abline(v = time(y)[inter], lty=2)
+  if (forward == TRUE){
+    yfore <- attr(mcmcout, "yforepred")
+    yfore.mu <- ts(apply(yfore, 2, mean), start=start); yfore.mu[1:(inter-1)] <- NA
+    yfore.upper <- ts(apply(yfore, 2, quantile, probs=(1-alpha/2)), start=start); yfore.upper[1:(inter-1)] <- NA
+    yfore.lower <- ts(apply(yfore, 2, quantile, probs=(alpha/2)), start=start); yfore.lower[1:(inter-1)] <- NA
+    lines(yfore.mu, col=col, lwd=2)
+    lines(yfore.upper, col=col, lty=3)
+    lines(yfore.lower, col=col, lty=3)
+  }
+  else {
+    yback <- attr(mcmcout, "ybackpred")
+    yback.mu <- ts(apply(yback, 2, mean), start=start);  yback.mu[(inter+1):N] <- NA
+    yback.upper <- ts(apply(yback, 2, quantile, probs=(1-alpha/2)), start=start); yback.upper[(inter+1):N] <- NA
+    yback.lower <- ts(apply(yback, 2, quantile, probs=(alpha/2)), start=start); yback.lower[(inter+1):N] <- NA
+    lines(yback.mu, col=col, lwd=2)
+    lines(yback.upper, col=col, lty=3)
+    lines(yback.lower, col=col, lty=3)
+  }
+}
+## Example
+## pdf(file="Nile_MCMCinter.pdf", width=12, height=4)
+## par(mfrow=c(1,3))
+## plotState(ar1, start=1871, main="Hidden Regime Change")
+## plotIntervention(ar1, start=1871, main="Forward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year")
+## plotIntervention(ar1, forward=FALSE, start=1871, main="Backward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year")
+## dev.off()
+
+## when we compare models in a list
+BayesFactorList <- function (model.list){
+  oldM <- length(model.list)
+  zero.marg <- rep(NA, oldM)
+  for (j in 1:oldM) {
+    zero.marg[j] <- ifelse(attr(model.list[[j]], "logmarglike") == 0, 1, 0)
+  }
+  new.model.list <- model.list[c(which(zero.marg == 0))]
+  M <- length(new.model.list)
+  out <- matrix(NA, M, 2)
+  BF <- rep(NA, M)
+  for (j in 1:M) {
+    BF[j] <- attr(new.model.list[[j]], "logmarglike")
+    out[j, 1] <- BF[j]
+  }
+  if (sum(exp(BF) == 0)){
+    ## if log like is too small, add some constants
+    BF <- BF + abs(BF[1])
+  }
+  prob <- exp(BF)/sum(exp(BF))
+  out[, 2] <- prob
+  marker <- which(zero.marg == 0)
+  rownames(out) <- names(model.list)[marker]
+  return(out)
+}
 
+## consecutive geweke diag test
+geweke.test <- function(output.list, z.value=1.96){
+  n <- length(output.list)
+  result <- rep(NA, n)
+  cat("\n --------------------------------- ")
+  for (i in 1:n){
+    if(sum(abs(geweke.diag(output.list[[i]])$z) > z.value)>0){
+      cat("\n Non-convergence for model ", i)
+      result[i] <- "Fail"
+    }
+    else {
+      result[i] <- "Pass"
+    }
+  }
+  cat("\n --------------------------------- \n")
+  return(result)
+}
+## outputlist <- list(ar0, ar1a, ar2a, ar1f, ar2f, ar1r, ar2r, tr0, tr1a, tr2a, tr1f, tr2f, tr1r, tr2r)
+## conv <- geweke.test(outputlist)
+
+## consecutive heidel Heidelberger and Welch's convergence diagnostic test
+heidel.test <- function(output.list, p.value=0.05){
+  n <- length(output.list)
+  result <- rep(NA, n)
+  cat("\n --------------------------------- ")
+  for (i in 1:n){
+    print(i)
+    plist1 <- heidel.diag(output.list[[i]], pvalue=p.value)[,1]
+    plist2 <- heidel.diag(output.list[[i]], pvalue=p.value)[,4]
+    if(sum(c(plist1, plist2) == 0)>0){
+      cat("\n Non-convergence for model ", i)
+      result[i] <- "Fail"
+    }
+    else {
+      result[i] <- "Pass"
+    }
+  }
+  cat("\n --------------------------------- \n")
+  return(result)
+}
+## outputlist <- list(ar0, ar1a, ar2a, ar1f, ar2f, ar1r, ar2r, tr0, tr1a, tr2a, tr1f, tr2f, tr1r, tr2r)
+## conv <- geweke.test(outputlist)
diff --git a/README b/README
index 6769194..b083a3c 100644
--- a/README
+++ b/README
@@ -16,7 +16,9 @@ current package has been tested using GCC 4.0 on Linux and MacOS X.
 
 Many thanks to Dan Pemstein for helping with all sorts of C++ issues,
 and to Kurt Hornik and Fritz Leisch for their help with debugging as
-well as their service to the R community.
+well as their service to the R community.  We are also very grateful to
+Brian Ripley who provided C++ patches to fix a number of clang and Solaris
+issues. 
 
 // Acknowledgments
 
@@ -42,12 +44,13 @@ you have any problems or questions.
 
 --
 Andrew D. Martin, Ph.D.
-Professor and Chair, Political Science, Arts & Sciences
-Professor and CERL Director, School of Law
+Charles Nagel Chair of Constitutional Law and Political Science	
+Vice Dean and CERL Director, School of Law	
 Washington University in St. Louis
 
 (314) 935-5863 (Office)
-(314) 935-5856 (Fax)
+(314) 935-3836 (Fax)
 
 Email: admartin at wustl.edu
 WWW: http://adm.wustl.edu
+
diff --git a/man/MCMCintervention.Rd b/man/MCMCintervention.Rd
new file mode 100644
index 0000000..0cc4d58
--- /dev/null
+++ b/man/MCMCintervention.Rd
@@ -0,0 +1,209 @@
+\name{MCMCintervention}
+\alias{MCMCintervention}
+
+\title{Markov Chain Monte Carlo for a linear Gaussian Multiple Changepoint Model}
+\description{
+  This function generates a sample from the posterior distribution
+  of a linear Gaussian model with multiple changepoints. The function uses
+  the Markov chain Monte Carlo method of Chib (1998).
+  The user supplies data and priors, and
+  a sample from the posterior distribution is returned as an mcmc
+  object, which can be subsequently analyzed with functions
+  provided in the coda package.
+}
+
+\usage{MCMCintervention(y, data=parent.frame(),  m = 1, intervention,
+        prediction.type=c("trend","ar"), change.type=c("fixed", "random", "all"),		   
+        b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA,
+        a = NULL, b = NULL, mcmc = 1000, burnin = 1000, thin = 1, verbose = 0,
+        seed = NA, beta.start = NA, P.start = NA,
+        marginal.likelihood = c("none", "Chib95"), ...)}
+
+\arguments{
+    \item{y}{data.}
+
+    \item{data}{Data frame.}
+
+    \item{m}{The number of changepoints.}
+    
+    \item{intervention}{The timing of intervention measured by its place in the response vector. 
+    It should be larger than 0 and smaller than the length of the response vector. No default value is provided.}
+    
+    \item{prediction.type}{The type of local process. "trend" denotes the linear trend model and "ar" 
+    denotes AR(1) process. By default, MCMCintervention uses the linear trend model.}
+    
+    \item{change.type}{The tyep of parameteric breaks. "all" denotes that all parameters have breaks, 
+    "fixed" denotes that only the intercept and the slope have breaks, and "random" denotes that
+    only the variance has breaks. By default, MCMCintervetnion assumes that all parameters have breaks.}
+
+    \item{b0}{The prior mean of \eqn{\beta}{beta}.  This can either be a
+    scalar or a
+    column vector with dimension equal to the number of betas. If this
+    takes a scalar  value, then that value will serve as the prior
+    mean for all of the betas.}
+
+    \item{B0}{The prior precision of \eqn{\beta}{beta}.  This can either be a
+    scalar or a square matrix with dimensions equal to the number of betas.
+    If this
+    takes a scalar value, then that value times an identity matrix serves
+    as the prior precision of beta. Default value of 0 is equivalent to
+    an improper uniform prior for beta.}
+
+    \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse
+    Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the
+    disturbances). The amount of information in the inverse Gamma prior
+    is something like that from \eqn{c_0}{c0} pseudo-observations.}
+
+    \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the
+    disturbances). In constructing the inverse Gamma prior,
+    \eqn{d_0}{d0} acts like the sum of squared errors from the
+    \eqn{c_0}{c0} pseudo-observations.}
+
+  	\item{sigma.mu}{The mean of the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. 
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior
+    by choosing its mean and variance. }
+  
+  	\item{sigma.var}{The variacne of the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2}.
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior
+    by choosing its mean and variance. }
+
+    \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default,
+    the expected duration is computed and corresponding a and b values are assigned. The expected
+    duration is the sample period divided by the number of states.}
+
+    \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default,
+    the expected duration is computed and corresponding a and b values are assigned. The expected
+    duration is the sample period divided by the number of states.}
+
+    \item{burnin}{The number of burn-in iterations for the sampler.}
+
+    \item{mcmc}{The number of MCMC iterations after burnin.}
+
+    \item{thin}{The thinning interval used in the simulation.  The number of
+      MCMC iterations must be divisible by this value.}
+
+    \item{verbose}{A switch which determines whether or not the progress of
+    the sampler is printed to the screen.  If \code{verbose} is greater
+    than 0 the iteration number, the
+    \eqn{\beta}{beta} vector, and the error variance are printed to
+    the screen every \code{verbose}th iteration.}
+
+    \item{seed}{The seed for the random number generator.  If NA, the Mersenne
+    Twister generator is used with default seed 12345; if an integer is
+    passed it is used to seed the Mersenne twister.  The user can also
+    pass a list of length two to use the L'Ecuyer random number generator,
+    which is suitable for parallel computation.  The first element of the
+    list is the L'Ecuyer seed, which is a vector of length six or NA (if NA
+    a default seed of \code{rep(12345,6)} is used).  The second element of
+    list is a positive substream number. See the MCMCpack
+    specification for more details.}
+
+    \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector.
+    This can either be a scalar or a
+    column vector with dimension equal to the number of betas.
+    The default value of of NA will use the MLE
+    estimate of \eqn{\beta}{beta} as the starting value.  If this is a
+    scalar,  that value will serve as the starting value
+    mean for all of the betas.}
+    
+    \item{P.start}{The starting values for the transition matrix. 
+    A user should provide a square matrix with dimension equal to the number of states. 
+    By default, draws from the \code{Beta(0.9, 0.1)} are used to construct 
+    a proper transition matrix for each raw except the last raw.}
+
+    \item{marginal.likelihood}{How should the marginal likelihood be
+    calculated? Options are: \code{none} in which case the marginal
+    likelihood will not be calculated, and
+    \code{Chib95} in which case the method of Chib (1995) is used.}
+
+    \item{...}{further arguments to be passed}
+}
+
+\value{
+   An mcmc object that contains the posterior sample.  This
+   object can be summarized by functions provided by the coda package.
+   The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, the
+   log-likelihood of the model (\code{loglike}), and
+   the log-marginal likelihood of the model (\code{logmarglike}).
+}
+
+\details{
+  \code{MCMCintervention} simulates from the posterior distribution of
+  a binary model with multiple changepoints.
+
+  The model takes the following form:
+  \deqn{y_t = x_t ' \beta_i + I(s_t = i)\varepsilon_{t},\;\; i = 1, \ldots, k}{y_t = x_t'beta_i + I(s_t = i)epsilon_t, i = 1,...,k.}
+  Where \eqn{k}{k} is the number of states and \eqn{I(s_t = i)}{I(s_t = i)} is an indicator function
+  that becomes 1 when a state at \eqn{t}{t} is \eqn{i}{i} and otherwise 0.
+
+  The errors are assumed to be Gaussian in each regime:
+  \deqn{I(s_t = i)\varepsilon_{t} \sim \mathcal{N}(0, \sigma^2_i)}{I(s_t = i)epsilon_t ~ N(0,
+    sigma^2_i)}
+
+  We assume standard, semi-conjugate priors:
+  \deqn{\beta_i \sim \mathcal{N}(b_0,B_0^{-1}),\;\; i = 1, \ldots, k}{beta_i ~ N(b0,B0^(-1)), i = 1,...,k.}
+  And:
+  \deqn{\sigma^{-2}_i \sim \mathcal{G}amma(c_0/2, d_0/2),\;\; i = 1, \ldots, k}{sigma^(-2)_i ~
+    Gamma(c0/2, d0/2), i = 1,...,k.}
+  Where \eqn{\beta_i}{beta_i} and \eqn{\sigma^{-2}_i}{sigma^(-2)_i} are assumed
+  \emph{a priori} independent.
+
+  The simulation proper is done in compiled C++ code to maximize efficiency.   }
+
+\references{
+ Jong Hee Park. 2012. "A Change-point Approach to Intervention Analysis Using Bayesian Inference"
+ Presented at the 2012 Annual Meeting of Korean Statistical Society.    
+
+ Siddhartha Chib. 1998. "Estimation and comparison of multiple change-point models."
+   \emph{Journal of Econometrics}. 86: 221-241.
+}
+
+\examples{
+    \dontrun{
+    Nile.std <- (Nile - mean(Nile))/sd(Nile)
+    set.seed(1973)
+    b0 <- matrix(c(0, 0), 2, 1);
+    B0 <- diag(rep(0.25, 2))
+    c0 = 2; d0 = 1
+    
+    ## Model Comparison
+    ar0 <- MCMCintervention(Nile.std, m=0, prediction.type="ar", change.type = "all",
+                        b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                        intervention = 29, marginal.likelihood = "Chib95")
+    ar1all <- MCMCintervention(Nile.std, m=1, prediction.type="ar", change.type = "all",
+                         b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                         intervention = 29, marginal.likelihood = "Chib95")
+    ar1fixed <- MCMCintervention(Nile.std, m=1, prediction.type="ar", change.type = "fixed",
+                         b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                         intervention = 29, marginal.likelihood = "Chib95")
+    ar1random <- MCMCintervention(Nile.std, m=1, prediction.type="ar", change.type = "random",
+                         b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                         intervention = 29, marginal.likelihood = "Chib95")
+    tr0 <- MCMCintervention(Nile.std, m=0, prediction.type="trend", change.type = "all",
+                        b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                        intervention = 29, marginal.likelihood = "Chib95")
+    tr1all <- MCMCintervention(Nile.std, m=1, prediction.type="trend", change.type = "all",
+                         b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                         intervention = 29, marginal.likelihood = "Chib95")
+    tr1fixed <- MCMCintervention(Nile.std, m=1, prediction.type="trend", change.type = "fixed",
+                         b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                         intervention = 29, marginal.likelihood = "Chib95")
+    tr1random <- MCMCintervention(Nile.std, m=1, prediction.type="trend", change.type = "random",
+                         b0=b0, B0=B0, c0=c0, d0=d0, mcmc = 1000, burnin = 1000, verbose = 500,
+                         intervention = 29, marginal.likelihood = "Chib95")
+    
+    BayesFactor(ar0, ar1all, ar1fixed, ar1random, tr0, tr1all, tr1fixed, tr1random)
+
+    par(mfrow=c(1,3))
+    plotState(ar1fixed, start=1871, main="Hidden Regime Change")
+    plotIntervention(ar1fixed, start=1871, main="Forward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year")
+    plotIntervention(ar1fixed, forward=FALSE, start=1871, main="Backward Analysis", alpha= 0.5, ylab="Nile River flow", xlab="Year")
+    }
+}
+
+\keyword{models}
+
+\seealso{\code{\link{plotIntervention}}}
diff --git a/man/MCMCoprobitChange.Rd b/man/MCMCoprobitChange.Rd
index 55bc1ae..8800b28 100644
--- a/man/MCMCoprobitChange.Rd
+++ b/man/MCMCoprobitChange.Rd
@@ -153,6 +153,7 @@
 }
 
 \examples{
+\dontrun{
 set.seed(1909)
 N <- 200
 x1 <- rnorm(N, 1, .5);
@@ -189,6 +190,7 @@ BayesFactor(out1, out2, out3)
 plotState(out1)
 plotChangepoint(out1)
 }
+}
 
 \keyword{models}
 
diff --git a/man/MCMCpoissonChange.Rd b/man/MCMCpoissonChange.Rd
index fea7a03..3fab155 100644
--- a/man/MCMCpoissonChange.Rd
+++ b/man/MCMCpoissonChange.Rd
@@ -15,8 +15,9 @@
 \usage{MCMCpoissonChange(
 		formula, data = parent.frame(), m = 1,
            	b0 = 0, B0 = 1, a = NULL, b = NULL, c0 = NA, d0 = NA, 
+            lambda.mu = NA, lambda.var = NA, 
            	burnin = 1000, mcmc = 1000, thin = 1, verbose = 0, 
-           	seed = NA, beta.start = NA, P.start = NA,   
+           	seed = NA, beta.start = NA, P.start = NA, offset = NA,     
            	marginal.likelihood = c("none", "Chib95"), ...)}
 
 \arguments{
@@ -53,6 +54,16 @@
     \item{d0}{\eqn{d_0}{d0} is the scale parameter for Gamma prior on \eqn{\lambda}{lambda}
     (the mean). When there is no covariate, this should be provided by users. No default value is provided.}
 
+  \item{lambda.mu}{The mean of the
+    Gamma prior on \eqn{\lambda}{lambda}. 
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the Gamma prior
+    by choosing its mean and variance. }
+  
+  \item{lambda.var}{The variacne of the
+    Gamma prior on \eqn{\lambda}{lambda}.
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the Gamma prior
+    by choosing its mean and variance. }
+ 
     \item{burnin}{The number of burn-in iterations for the sampler.}
 
     \item{mcmc}{The number of MCMC iterations after burn-in.}
@@ -70,6 +81,8 @@
 	
     \item{P.start}{The starting values for the transition matrix. A user should provide a square matrix with dimension equal to the number of states. By default, draws from the \code{Beta(0.9, 0.1)} are used to construct a proper transition matrix for each raw except the last raw.}	
 
+    \item{offset}{offset or exposure. Insert raw data. Do not take logarithm.}
+
     \item{marginal.likelihood}{How should the marginal likelihood be
     calculated? Options are: \code{none} in which case the marginal
     likelihood will not be calculated, and
diff --git a/man/MCMCregress.Rd b/man/MCMCregress.Rd
index 022c69c..42b9f36 100644
--- a/man/MCMCregress.Rd
+++ b/man/MCMCregress.Rd
@@ -15,7 +15,7 @@
 \usage{
 MCMCregress(formula, data = NULL, burnin = 1000, mcmc = 10000,
    thin = 1, verbose = 0, seed = NA, beta.start = NA, 
-   b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001,
+   b0 = 0, B0 = 0, c0 = 0.001, d0 = 0.001, sigma.mu = NA, sigma.var = NA, 
    marginal.likelihood = c("none", "Laplace", "Chib95"), ...) }
 
 \arguments{
@@ -67,7 +67,7 @@ MCMCregress(formula, data = NULL, burnin = 1000, mcmc = 10000,
     as the prior precision of beta. Default value of 0 is equivalent to
     an improper uniform prior for beta.}
     
-  \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse
+  	\item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse
     Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the
     disturbances). The amount of information in the inverse Gamma prior
     is something like that from \eqn{c_0}{c0} pseudo-observations.} 
@@ -77,8 +77,18 @@ MCMCregress(formula, data = NULL, burnin = 1000, mcmc = 10000,
     disturbances). In constructing the inverse Gamma prior,
     \eqn{d_0}{d0} acts like the sum of squared errors from the
     \eqn{c_0}{c0} pseudo-observations.}
-
-  \item{marginal.likelihood}{How should the marginal likelihood be
+    
+  	\item{sigma.mu}{The mean of the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. 
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior
+    by choosing its mean and variance. }
+  
+  	\item{sigma.var}{The variacne of the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2}.
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior
+    by choosing its mean and variance. }
+ 
+   \item{marginal.likelihood}{How should the marginal likelihood be
     calculated? Options are: \code{none} in which case the marginal
     likelihood will not be calculated, \code{Laplace} in which case the
     Laplace approximation (see Kass and Raftery, 1995) is used, and
@@ -142,7 +152,7 @@ MCMCregress(formula, data = NULL, burnin = 1000, mcmc = 10000,
 \examples{
 \dontrun{
 line   <- list(X = c(-2,-1,0,1,2), Y = c(1,3,3,3,5))
-posterior  <- MCMCregress(Y~X, data=line, verbose=1000)
+posterior  <- MCMCregress(Y~X, b0=0, B0 = 0.1, sigma.mu = 5, sigma.var = 25, data=line, verbose=1000)
 plot(posterior)
 raftery.diag(posterior)
 summary(posterior)
diff --git a/man/MCMCregressChange.Rd b/man/MCMCregressChange.Rd
new file mode 100644
index 0000000..b919fb2
--- /dev/null
+++ b/man/MCMCregressChange.Rd
@@ -0,0 +1,205 @@
+\name{MCMCregressChange}
+\alias{MCMCregressChange}
+
+\title{Markov Chain Monte Carlo for a linear Gaussian Multiple Changepoint Model}
+\description{
+  This function generates a sample from the posterior distribution
+  of a linear Gaussian model with multiple changepoints. The function uses
+  the Markov chain Monte Carlo method of Chib (1998).
+  The user supplies data and priors, and
+  a sample from the posterior distribution is returned as an mcmc
+  object, which can be subsequently analyzed with functions
+  provided in the coda package.
+}
+
+\usage{MCMCregressChange(formula, data=parent.frame(),  m=1,
+        b0=0, B0=0, c0=0.001, d0=0.001, sigma.mu=NA, sigma.var=NA, 
+        a=NULL, b=NULL, mcmc=1000, burnin=1000, thin=1, verbose=0,
+        seed=NA, beta.start=NA, P.start=NA,
+        marginal.likelihood=c("none", "Chib95"), ...)}
+
+\arguments{
+   \item{formula}{Model formula.}
+
+    \item{data}{Data frame.}
+
+    \item{m}{The number of changepoints.}
+
+     \item{b0}{The prior mean of \eqn{\beta}{beta}.  This can either be a
+    scalar or a
+    column vector with dimension equal to the number of betas. If this
+    takes a scalar  value, then that value will serve as the prior
+    mean for all of the betas.}
+
+    \item{B0}{The prior precision of \eqn{\beta}{beta}.  This can either be a
+    scalar or a square matrix with dimensions equal to the number of betas.
+    If this
+    takes a scalar value, then that value times an identity matrix serves
+    as the prior precision of beta. Default value of 0 is equivalent to
+    an improper uniform prior for beta.}
+
+    \item{c0}{\eqn{c_0/2}{c0/2} is the shape parameter for the inverse
+    Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the
+    disturbances). The amount of information in the inverse Gamma prior
+    is something like that from \eqn{c_0}{c0} pseudo-observations.}
+
+    \item{d0}{\eqn{d_0/2}{d0/2} is the scale parameter for the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2} (the variance of the
+    disturbances). In constructing the inverse Gamma prior,
+    \eqn{d_0}{d0} acts like the sum of squared errors from the
+    \eqn{c_0}{c0} pseudo-observations.}
+	
+  	\item{sigma.mu}{The mean of the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2}. 
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior
+    by choosing its mean and variance. }
+  
+  	\item{sigma.var}{The variacne of the
+    inverse Gamma prior on \eqn{\sigma^2}{sigma^2}.
+    \eqn{sigma.mu}{sigma.mu} and \eqn{sigma.var}{sigma.var} allow users to choose the inverse Gamma prior
+    by choosing its mean and variance. }
+
+    \item{a}{\eqn{a}{a} is the shape1 beta prior for transition probabilities. By default,
+    the expected duration is computed and corresponding a and b values are assigned. The expected
+    duration is the sample period divided by the number of states.}
+
+    \item{b}{\eqn{b}{b} is the shape2 beta prior for transition probabilities. By default,
+    the expected duration is computed and corresponding a and b values are assigned. The expected
+    duration is the sample period divided by the number of states.}
+
+    \item{burnin}{The number of burn-in iterations for the sampler.}
+
+    \item{mcmc}{The number of MCMC iterations after burnin.}
+
+    \item{thin}{The thinning interval used in the simulation.  The number of
+      MCMC iterations must be divisible by this value.}
+
+    \item{verbose}{A switch which determines whether or not the progress of
+    the sampler is printed to the screen.  If \code{verbose} is greater
+    than 0 the iteration number, the
+    \eqn{\beta}{beta} vector, and the error variance are printed to
+    the screen every \code{verbose}th iteration.}
+
+    \item{seed}{The seed for the random number generator.  If NA, the Mersenne
+    Twister generator is used with default seed 12345; if an integer is
+    passed it is used to seed the Mersenne twister.  The user can also
+    pass a list of length two to use the L'Ecuyer random number generator,
+    which is suitable for parallel computation.  The first element of the
+    list is the L'Ecuyer seed, which is a vector of length six or NA (if NA
+    a default seed of \code{rep(12345,6)} is used).  The second element of
+    list is a positive substream number. See the MCMCpack
+    specification for more details.}
+
+    \item{beta.start}{The starting values for the \eqn{\beta}{beta} vector.
+    This can either be a scalar or a
+    column vector with dimension equal to the number of betas.
+    The default value of of NA will use the MLE
+    estimate of \eqn{\beta}{beta} as the starting value.  If this is a
+    scalar,  that value will serve as the starting value
+    mean for all of the betas.}
+    
+    \item{P.start}{The starting values for the transition matrix. 
+    A user should provide a square matrix with dimension equal to the number of states. 
+    By default, draws from the \code{Beta(0.9, 0.1)} 
+    are used to construct a proper transition matrix for each raw except the last raw.}	
+
+
+
+    \item{marginal.likelihood}{How should the marginal likelihood be
+    calculated? Options are: \code{none} in which case the marginal
+    likelihood will not be calculated, and
+    \code{Chib95} in which case the method of Chib (1995) is used.}
+
+    \item{...}{further arguments to be passed}
+}
+
+\value{
+   An mcmc object that contains the posterior sample.  This
+   object can be summarized by functions provided by the coda package.
+   The object contains an attribute \code{prob.state} storage matrix that contains the probability of \eqn{state_i}{state_i} for each period, the
+   log-likelihood of the model (\code{loglike}), and
+   the log-marginal likelihood of the model (\code{logmarglike}).
+}
+
+\details{
+  \code{MCMCregressChange} simulates from the posterior distribution of
+  a binary model with multiple changepoints.
+
+  The model takes the following form:
+  \deqn{y_t=x_t ' \beta_i + I(s_t=i)\varepsilon_{t},\;\; i=1, \ldots, k}{y_t=x_t'beta_i + I(s_t=i)epsilon_t, i=1,...,k.}
+  Where \eqn{k}{k} is the number of states and \eqn{I(s_t=i)}{I(s_t=i)} is an indicator function
+  that becomes 1 when a state at \eqn{t}{t} is \eqn{i}{i} and otherwise 0.
+
+  The errors are assumed to be Gaussian in each regime:
+  \deqn{I(s_t=i)\varepsilon_{t} \sim \mathcal{N}(0, \sigma^2_i)}{I(s_t=i)epsilon_t ~ N(0,
+    sigma^2_i)}
+
+  We assume standard, semi-conjugate priors:
+  \deqn{\beta_i \sim \mathcal{N}(b_0,B_0^{-1}),\;\; i=1, \ldots, k}{beta_i ~ N(b0,B0^(-1)), i=1,...,k.}
+  And:
+  \deqn{\sigma^{-2}_i \sim \mathcal{G}amma(c_0/2, d_0/2),\;\; i=1, \ldots, k}{sigma^(-2)_i ~
+    Gamma(c0/2, d0/2), i=1,...,k.}
+  Where \eqn{\beta_i}{beta_i} and \eqn{\sigma^{-2}_i}{sigma^(-2)_i} are assumed
+  \emph{a priori} independent.
+
+  The simulation proper is done in compiled C++ code to maximize efficiency.   }
+
+\references{
+ Jong Hee Park. 2012. "A Change-point Approach to Intervention Analysis Using Bayesian Inference"
+ Presented at the 2012 Annual Meeting of Korean Statistical Society.    
+
+ Siddhartha Chib. 1995. "Marginal Likelihood from the Gibbs Output."
+   \emph{Journal of the American Statistical Association}. 90:
+   1313-1321.
+
+ Siddhartha Chib. 1998. "Estimation and comparison of multiple change-point models."
+   \emph{Journal of Econometrics}. 86: 221-241.
+
+
+ Martyn Plummer, Nicky Best, Kate Cowles, and Karen Vines. 2002.
+   \emph{Output Analysis and Diagnostics for MCMC (CODA)}.
+   \url{http://www-fis.iarc.fr/coda/}.
+}
+
+\examples{
+\dontrun{
+set.seed(1119)
+n <- 100
+x1 <- runif(n)
+true.beta1 <- c(2, -2)
+true.beta2 <- c(0,  2)
+true.Sigma <- c(1, 2)
+true.s <- rep(1:2, each=n/2)
+
+mu1 <- cbind(1, x1[true.s==1])%*%true.beta1
+mu2 <- cbind(1, x1[true.s==2])%*%true.beta2
+
+y <- as.ts(c(rnorm(n/2, mu1, sd=sqrt(true.Sigma[1])), rnorm(n/2, mu2, sd=sqrt(true.Sigma[2]))))
+formula=y ~ x1
+
+ols1 <- lm(y[true.s==1] ~x1[true.s==1])
+ols2 <- lm(y[true.s==2] ~x1[true.s==2])
+    
+## prior
+b0 <- 0
+B0 <- 1
+sigma.mu=sd(y)
+sigma.var=var(y)
+		
+## models
+model1 <-  MCMCregressChange(formula, m=1, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95")
+model2 <-  MCMCregressChange(formula, m=2, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95")
+model3 <-  MCMCregressChange(formula, m=3, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95")
+model4 <-  MCMCregressChange(formula, m=4, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95")
+model5 <-  MCMCregressChange(formula, m=5, b0=b0, B0=B0, sigma.mu=sigma.mu, sigma.var=sigma.var, marginal.likelihood="Chib95")
+
+print(BayesFactor(model1, model2, model3, model4, model5))
+plotState(model1)
+plotChangepoint(model1)
+
+}
+}
+
+\keyword{models}
+
+\seealso{\code{\link{plotState}}, \code{\link{plotChangepoint}}}
diff --git a/man/plotIntervention.Rd b/man/plotIntervention.Rd
new file mode 100644
index 0000000..18c1582
--- /dev/null
+++ b/man/plotIntervention.Rd
@@ -0,0 +1,37 @@
+\name{plotIntervention}
+\alias{plotIntervention}
+\title{Plot of intervention analysis}
+\description{Plot the results of changepoint internvetion analysis}
+
+\usage{
+    plotIntervention(mcmcout, forward = TRUE, start = 1,
+    alpha = 0.05, col = "red", main="", ylab="", xlab="")
+}
+
+\arguments{
+
+\item{mcmcout}{The \code{mcmcout} object containing the posterior density sample from changepoint intervention analysis.}
+
+\item{forward}{If TRUE, draw the result of forward prediction. If FALSE, 
+draw the result of backward prediction.} 
+
+\item{start}{The time of the first observation to be shown in the time series plot.}
+
+\item{alpha}{The size of the prediction error band in the plot. By default, alpha = 0.05, which means the 95 percent Bayesian credible interval.}
+
+\item{col}{The color of predicted data.}
+
+\item{main}{Title of the plot} 
+
+\item{ylab}{Label for the y-axis.}
+
+\item{xlab}{Label for the x-axis.}
+
+}
+
+\references{
+ Jong Hee Park. 2012. "A Change-point Approach to Intervention Analysis Using Bayesian Inference"
+ Presented at the 2012 Annual Meeting of Korean Statistical Society.    
+}
+
+\seealso{\code{\link{MCMCintervention}}}
diff --git a/src/HMMmultivariateGaussian.cc b/src/HMMmultivariateGaussian.cc
index cd582e0..79e867f 100644
--- a/src/HMMmultivariateGaussian.cc
+++ b/src/HMMmultivariateGaussian.cc
@@ -1,15 +1,17 @@
 //////////////////////////////////////////////////////////////////////////
-//HMMmultivariateGaussian.cc is C++ code to estimate a Gaussian panel model with a structural break
+// HMMmultivariateGaussian.cc is C++ code to estimate a Gaussian panel model with a structural break
 // y_{it} = \x'_{it}\b + \varepsilon_{it}
 // \varepsilon_{it} \sim \normdist{0}{\sigma^2}
 // Parameters = {beta, sigma2, P_i}// static + changing
 // 
-// Written by Jong Hee Park
-// 11/19/2008
-// Modified by JHP
-// 07/04/2011
+// Jong Hee Park
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
+
+// Written 11/19/2008
+// Modified 07/04/2011
 //
-// Copyright (C) 2008-present Jong Hee Park
 //////////////////////////////////////////////////////////////////////////
 #ifndef HMMMULTIVARIATEGAUSSIAN_CC
 #define HMMMULTIVARIATEGAUSSIAN_CC
@@ -225,7 +227,6 @@ void MultivariateGaussian_impl(rng<RNGTYPE>& stream,
     for (unsigned int iter = 0; iter<nstore; ++iter){
       double SSE = 0;   
       for(unsigned int s=0;s<nsubj; ++s){
-	int ntime_s = subject_groupinfo(s, 2);
 	Matrix<> e = t(Yk_arr[s]-Xk_arr[s]*beta_st)*(Yk_arr[s] - Xk_arr[s]*beta_st);
 	SSE = SSE + e[0];
       }
@@ -826,7 +827,6 @@ void HMMmultivariateGaussian_impl(rng<RNGTYPE>& stream,
     Matrix<> pstyt1(ns, 1);
     for (unsigned int tt=0; tt<ntime ; ++tt) {
       for (int j=0; j<ns;++j){
-	int nsubj_s = time_groupinfo(tt, 2);
 	Matrix<> Sigma = eye(nsubj)*sigma2_st[j];
 	Matrix<> Mu = Xt_arr[tt]*::t(beta_st(j,_));
 	py[j]  =  ::exp(lndmvn(Yt_arr[tt], Mu, Sigma));
diff --git a/src/HMMpanelFE.cc b/src/HMMpanelFE.cc
index c48f889..5cb50ae 100644
--- a/src/HMMpanelFE.cc
+++ b/src/HMMpanelFE.cc
@@ -1,10 +1,13 @@
 //////////////////////////////////////////////////////////////////////////
 // HMMpanelFE.cc is C++ code 
 // 
-// Written by Jong Hee Park
-// 11/19/2008
-// 09/20/2009
-// Copyright (C) 2008-present Jong Hee Park
+// Jong Hee Park
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
+
+// Written 11/19/2008
+// Modified 09/20/2009
 //////////////////////////////////////////////////////////////////////////
 #ifndef HMMPANELFE_CC
 #define HMMPANELFE_CC
@@ -36,7 +39,6 @@ using namespace scythe;
 #define M(ROW,COL,NROWS) (COL*NROWS+ROW)
 
 template <typename RNGTYPE>
-// HMM_Gaussian_Fixed_state_sampler(stream, mvector[s], ntime_s, Yres[s], Zarr[s], delta, P);
 // For better identification, first two and last two states are constrained to be 1 and ns
 // 10/31/2011 JHP
 Matrix<int> hetero_state_sampler(rng<RNGTYPE>& stream, 
@@ -130,8 +132,6 @@ void HMMpanelFE_impl(rng<RNGTYPE>& stream,
   const int NOBS = Y.rows();
 
   const int tot_iter = burnin + mcmc;
-  const int nstore = mcmc / thin; // number of draws to store
-  
   vector< vector <double> > P0_holder;
   vector< vector <double> > P_holder;
   int count = 0;
diff --git a/src/HMMpanelRE.cc b/src/HMMpanelRE.cc
index 22872d6..41c423d 100644
--- a/src/HMMpanelRE.cc
+++ b/src/HMMpanelRE.cc
@@ -5,10 +5,13 @@
 // \bi_i \sim \normdist{0}{\D}
 // Parameters = {beta, bi, D, sigma2, || {s_it}, delta, P_i}// static + changing
 // 
-// First written by Jong Hee Park on 11/19/2008
-// Modified by JHP (consistent with MCMChregress by Ghislain Vieilledent) on 07/04/2011
-// Thanks for letting know the Woodbury matrix identity!
-// Included in MCMCpack b JHP on 09/2011
+// Jong Hee Park
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
+
+// Modified (consistent with MCMChregress by Ghislain Vieilledent) on 07/04/2011
+// Included in MCMCpack on 09/2011
 //////////////////////////////////////////////////////////////////////////
 #ifndef HMMPANELRE_CC
 #define HMMPANELRE_CC
@@ -47,7 +50,7 @@ static double lndwish (const Matrix<>& W,
     double gammain = (v - i)/2; 
     gammapart = gammapart * gammafn(gammain);// since i starts from 0!
   }
-  double logdenom = log(gammapart) + (v * K/2)*log(2) + (K * (K - 1)/4)*log(M_PI);
+  double logdenom = log(gammapart) + (v * K/2)*log(2.0) + (K * (K - 1)/4)*log(M_PI);
   double detS = 0;
   double detW = 0;
   if(K==1){
@@ -209,7 +212,6 @@ void GaussianPanelRE_impl(rng<RNGTYPE>& stream,
     /////////////////////////////////////////////////
     double SSE = 0;   
     for(int s=0;s<nsubj; ++s){
-      int ntime_s = subject_groupinfo(s, 2);
       Matrix<> e = t(Yk_arr[s]-Xk_arr[s]*beta - Wk_arr[s]*bi(_,s))*
 	(Yk_arr[s] - Xk_arr[s]*beta - Wk_arr[s]*bi(_,s));
       SSE = SSE + e[0];
@@ -319,7 +321,6 @@ void GaussianPanelRE_impl(rng<RNGTYPE>& stream,
        /////////////////////////////////////////////////
        double SSE = 0;   
        for(int s=0;s<nsubj; ++s){
-	 int ntime_s = subject_groupinfo(s, 2);
 	 Matrix<> e = t(Yk_arr[s]-Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s))*
 	   (Yk_arr[s] - Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s));
 	 SSE = SSE + e[0];
@@ -361,7 +362,6 @@ void GaussianPanelRE_impl(rng<RNGTYPE>& stream,
        /////////////////////////////////////////////////
        double SSE = 0;   
        for(int s=0;s<nsubj; ++s){
-	 int ntime_s = subject_groupinfo(s, 2);
 	 Matrix<> e = t(Yk_arr[s]-Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s))*
 	   (Yk_arr[s] - Xk_arr[s]*beta_st - Wk_arr[s]*bi(_,s));
 	SSE = SSE + e[0];
@@ -667,7 +667,6 @@ void HMMpanelRE_impl(rng<RNGTYPE>& stream,
       Matrix<> XVX(K, K);
       Matrix<> XVY(K, 1);
       for(int tt = (beta_count - nstate[j]); tt<beta_count; ++tt) { 
-	int nsubj_s = time_groupinfo(tt, 2); 
 	XVX += (1/sigma2[j])*cpXt_arr[tt]-pow(1/sigma2[j],2)*tXWt_arr[tt]*invpd(Dinv[j] + cpWt_arr[tt]*(1/sigma2[j]))*tWXt_arr[tt];      
 	XVY += (1/sigma2[j])*tXYt_arr[tt]-pow(1/sigma2[j],2)*tXWt_arr[tt]*invpd(Dinv[j] + cpWt_arr[tt]*(1/sigma2[j]))*tWYt_arr[tt];
       }
@@ -854,7 +853,6 @@ void HMMpanelRE_impl(rng<RNGTYPE>& stream,
 	Matrix<> XVX(K, K);
 	Matrix<> XVY(K, 1);
 	for(int tt = (beta_count - nstate[j]); tt<beta_count; ++tt) { 
-	  int nsubj_s = time_groupinfo(tt, 2);   
 	  XVX += (1/sigma_store(iter, j))*cpXt_arr[tt]-pow(1/sigma_store(iter, j),2)*tXWt_arr[tt]*invpd(Dinv_j + cpWt_arr[tt]*(1/sigma_store(iter, j)))*tWXt_arr[tt];  
 	  XVY += (1/sigma_store(iter, j))*tXYt_arr[tt]-pow(1/sigma_store(iter, j),2)*tXWt_arr[tt]*invpd(Dinv_j + cpWt_arr[tt]*(1/sigma_store(iter, j)))*tWYt_arr[tt];
 	}
diff --git a/src/MCMCbinaryChange.cc b/src/MCMCbinaryChange.cc
index 0689688..e5b91e9 100644
--- a/src/MCMCbinaryChange.cc
+++ b/src/MCMCbinaryChange.cc
@@ -1,12 +1,14 @@
+////////////////////////////////////////////////////////////////////
 // MCMCbinaryChange.cc is C++ code to estimate a binary changepoint model
 // with a beta prior
 //
 // Jong Hee Park
-// Dept. of Political Science
-// University of Chicago
-// jhp at uchicago.edu
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
 //
-// 03/03/2009 
+// 03/03/2009 Written
+////////////////////////////////////////////////////////////////////
 
 #ifndef MCMCBINARYCHANGE_CC
 #define MCMCBINARYCHANGE_CC
@@ -322,7 +324,6 @@ extern "C"{
     
     // pull together Matrix objects
     const Matrix <> Y(*Yrow, *Ycol, Ydata);  
-    const unsigned int tot_iter = *burnin + *mcmc; 
     const unsigned int nstore = *mcmc / *thin; 
     const int n = Y.rows();
     const int ns = *m + 1;                 
diff --git a/src/MCMCdynamicIRT1d-b.cc b/src/MCMCdynamicIRT1d-b.cc
index 6f2be70..e7c1b31 100644
--- a/src/MCMCdynamicIRT1d-b.cc
+++ b/src/MCMCdynamicIRT1d-b.cc
@@ -90,7 +90,7 @@ void MCMCdynamicIRT1d_b_impl(rng<RNGTYPE>& stream,
 
   
   const int tot_iter = *burnin + *mcmc;
-  const int nsamp = *mcmc / *thin;
+// JHP  const int nsamp = *mcmc / *thin;
   //sparse matrix of latent outcome variables
   double** Z;
   Z = new double*[*nsubj];
@@ -103,7 +103,7 @@ void MCMCdynamicIRT1d_b_impl(rng<RNGTYPE>& stream,
   for (int j=0; j<*nrowYdata; ++j){
     const int s = Ydata[M(j, 0, *nrowYdata)]; // subject id
     const int i = Ydata[M(j, 1, *nrowYdata)]; // item id
-    const int y = Ydata[M(j, 2, *nrowYdata)]; // y value
+// JHP    const int y = Ydata[M(j, 2, *nrowYdata)]; // y value
     Z[s][i] = 0.0;
   }
   
diff --git a/src/MCMCdynamicIRT1d.cc b/src/MCMCdynamicIRT1d.cc
index bd000ef..901c340 100644
--- a/src/MCMCdynamicIRT1d.cc
+++ b/src/MCMCdynamicIRT1d.cc
@@ -90,7 +90,7 @@ void MCMCdynamicIRT1d_impl(rng<RNGTYPE>& stream,
 
   
   const int tot_iter = *burnin + *mcmc;
-  const int nsamp = *mcmc / *thin;
+// JHP  const int nsamp = *mcmc / *thin;
   //sparse matrix of latent outcome variables
   double** Z;
   Z = new double*[*nsubj];
@@ -103,7 +103,7 @@ void MCMCdynamicIRT1d_impl(rng<RNGTYPE>& stream,
   for (int j=0; j<*nrowYdata; ++j){
     const int s = Ydata[M(j, 0, *nrowYdata)]; // subject id
     const int i = Ydata[M(j, 1, *nrowYdata)]; // item id
-    const int y = Ydata[M(j, 2, *nrowYdata)]; // y value
+// JHP    const int y = Ydata[M(j, 2, *nrowYdata)]; // y value
     Z[s][i] = 0.0;
   }
   
diff --git a/src/MCMChierBetaBinom.cc b/src/MCMChierBetaBinom.cc
index 6a16f25..5d53542 100644
--- a/src/MCMChierBetaBinom.cc
+++ b/src/MCMChierBetaBinom.cc
@@ -86,7 +86,7 @@ double logABfcd(const double& alpha, const double& beta,
     }      
   }
   else{
-    term1 + -numeric_limits<double>::infinity();
+    term1 = -numeric_limits<double>::infinity();
   }
 
   // a and/or b <= 0 is treated as improper uniform prior 
@@ -162,7 +162,7 @@ void hierBetaBinom_impl(rng<RNGTYPE>& stream,
 			const double* base_sigma){
   
   const int tot_iter = burnin + mcmc;
-  const int nstore = mcmc/thin;
+// JHP  const int nstore = mcmc/thin;
 
   // these probably should not be hard coded
   const double mixfrac = 0.05;
@@ -251,9 +251,9 @@ void hierBetaBinom_impl(rng<RNGTYPE>& stream,
 
     // sample [alpha_j, beta_j | theta, y, s, a, b]
     for (int j=0; j<nj; ++j){
-      const int len_thetaj = js_thetas_ptr[j].size();
-      double logfcd_cur = logABfcd(alpha[j], beta[j], 
-				      js_thetas_ptr[j], a, b);
+// JHP     const int len_thetaj = js_thetas_ptr[j].size();
+// JHP      double logfcd_cur = logABfcd(alpha[j], beta[j], 
+// JHP			      js_thetas_ptr[j], a, b);
       if (iter < 10){
 	double alpha_can = stream.rnorm(alpha[j], base_sigma[j]);
 	double beta_can = stream.rnorm(beta[j], base_sigma[j]);
diff --git a/src/MCMChlogit.cc b/src/MCMChlogit.cc
index 250f0a3..41a0b8f 100644
--- a/src/MCMChlogit.cc
+++ b/src/MCMChlogit.cc
@@ -391,7 +391,7 @@ extern "C"{
 	    	for (int k=0;k<NGROUP;k++){
 	    	    for (int m=0; m<nobsk[k]; m++) {
 	    		int w=posk_arr[k][m];
-			const double C=pow(16*sqrt(3)/(15*M_PI),2);
+			const double C=pow(16*sqrt(3.0)/(15*M_PI),2);
 			Matrix<double> logit_theta_hat=Xk_arr[k](m,_)*beta_run+Wk_arr[k](m,_)*bk_run[k];
 	    		theta_pred[w]+=invlogit(logit_theta_hat(0)/sqrt(1+C*V_run))/(NSAMP); // We compute the mean of NSAMP values
 	    	    }
diff --git a/src/MCMCintervention.cc b/src/MCMCintervention.cc
new file mode 100644
index 0000000..4d316b4
--- /dev/null
+++ b/src/MCMCintervention.cc
@@ -0,0 +1,1664 @@
+////////////////////////////////////////////////////////////////////
+// MCMCintervention.cc is a C++ code to estimate 
+// linear regression changepoint model
+//
+// Jong Hee Park
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
+//
+// Written 03/03/2009
+////////////////////////////////////////////////////////////////////
+
+#ifndef MCMCREGRESSCHANGE_CC
+#define MCMCREGRESSCHANGE_CC
+
+#include "MCMCrng.h"
+#include "MCMCfcds.h"
+#include "matrix.h"
+#include "distributions.h"
+#include "stat.h"
+#include "la.h"
+#include "ide.h"
+#include "smath.h"
+#include "rng.h"
+#include "mersenne.h"
+#include "lecuyer.h"
+
+#include <R.h>           // needed to use Rprintf()
+#include <R_ext/Utils.h> // needed to allow user interrupts
+
+
+using namespace std;
+using namespace scythe;
+
+
+double lndinvgamma_pjh (const double x, const double shape, const double scale){
+  double log_density = shape *::log(scale) - lngammafn(shape) - (shape + 1) * ::log(x) - (scale/x);
+  return (log_density);
+}
+
+template <typename RNGTYPE>
+Matrix<double> gaussian_state_fixedBeta_sampler(rng<RNGTYPE>& stream, 
+						 const int m, 
+						 const Matrix<double>& Y,
+						 const Matrix<double>& X,
+						 const Matrix<double>& beta,
+						 const Matrix<double>& Sigma,
+						 const Matrix<double>& P){
+  
+  const int ns = m + 1;
+  const int n = Y.rows();
+  Matrix<double> F(n, ns);
+  Matrix<double> pr1(ns, 1);
+  pr1[0] = 1;
+  Matrix<double> py(ns, 1);
+  Matrix<double> pstyt1(ns, 1);
+  Matrix<int> s(n, 1);                        // holder for state variables
+  Matrix<double> ps = Matrix<double>(n, ns);  // holder for state probabilities
+  for (int tt=0; tt<n ; ++tt){
+    Matrix<double> mu = X(tt,_)*beta; //k by 1 vector
+    for (int j = 0; j< ns; ++j){
+      py[j]  =  dnorm(Y[tt], mu[0], sqrt(Sigma[j]));
+    }
+    if (tt==0) pstyt1 = pr1;
+    else {
+      pstyt1 =  ::t(F(tt-1,_)*P); // make it an ns by 1 matrix
+    }
+    Matrix<double> unnorm_pstyt = pstyt1%py;
+    const Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+    for (int j=0; j<ns ; ++j) F(tt,j) = pstyt(j);
+    
+  }// end of F matrix filtering
+  ps(n-1,_) = F(n-1,_);                     
+  s(n-1) = ns;                                
+  Matrix<double> pstyn = Matrix<double>(ns, 1);
+  double pone = 0.0;
+  int tt = n-2;
+  while (tt >= 0){
+    int st = s(tt+1);
+    Matrix<double> Pst_1 = ::t(P(_,st-1)); 
+    Matrix<double> unnorm_pstyn = F(tt,_)%Pst_1;
+    pstyn = unnorm_pstyn/sum(unnorm_pstyn); 
+    if (st==1)   s(tt) = 1;                  
+    else{
+      pone = pstyn(st-2);
+      if(stream.runif() < pone) s(tt) = st-1;
+      else s(tt) = st;
+    }
+    ps(tt,_) = pstyn;
+    --tt;
+  }// end of while loop
+  Matrix<double> Sout(n, ns+1); 
+  Sout(_, 0) = s(_,0);
+  for (int j = 0; j<ns; ++j){
+    Sout(_,j+1) = ps(_, j);
+  }
+  return Sout;
+} // end of state sampler
+
+
+template <typename RNGTYPE>
+Matrix<double> gaussian_state_sampler(rng<RNGTYPE>& stream, 
+				      const int m, 
+				      const Matrix<double>& Y,
+				      const Matrix<double>& X,
+				      const Matrix<double>& beta,
+				      const Matrix<double>& Sigma,
+				      const Matrix<double>& P){
+  
+  const int ns = m + 1;
+  const int n = Y.rows();
+  
+  // P is a (m+1 by m+1) transition matrix
+  // F matrix contains all the information of Pr(st|Yt)
+  Matrix<double> F(n, ns);
+  Matrix<double> pr1(ns, 1);
+  pr1[0] = 1;
+  Matrix<double> py(ns, 1);
+  Matrix<double> pstyt1(ns, 1);
+  Matrix<int> s(n, 1);                        // holder for state variables
+  Matrix<double> ps = Matrix<double>(n, ns);  // holder for state probabilities
+  
+  //
+  // Forward sampling: update F matrix
+  //
+  for (int tt=0; tt<n ; ++tt){
+    Matrix<double> mu = X(tt,_)*::t(beta); //k by 1 vector
+    for (int j = 0; j< ns; ++j){
+      py[j]  =  dnorm(Y[tt], mu[j], sqrt(Sigma[j]));
+    }
+    if (tt==0) pstyt1 = pr1;
+    else {
+      pstyt1 =  ::t(F(tt-1,_)*P); // make it an ns by 1 matrix
+    }
+    Matrix<double> unnorm_pstyt = pstyt1%py;
+    const Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+    for (int j=0; j<ns ; ++j) F(tt,j) = pstyt(j);
+    
+  }// end of F matrix filtering
+  
+  //
+  // Backward recursions: sample s using F and a transition matrix P
+  //
+  ps(n-1,_) = F(n-1,_);                       // we know last elements of ps and s
+  s(n-1) = ns;                                // the last period is s(*n-1) but one down in C location
+  
+  Matrix<double> pstyn = Matrix<double>(ns, 1);
+  double pone = 0.0;
+  int tt = n-2;
+  while (tt >= 0){
+    int st = s(tt+1);
+    Matrix<double> Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state
+    Matrix<double> unnorm_pstyn = F(tt,_)%Pst_1;
+    pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density
+    if (st==1)   s(tt) = 1;                  // If this is the first period, state should be 1.
+    // Otherwise, draw a state from a discrete prob. distribution("pstyn")
+    // using the inverse CDF method.
+    else{
+      pone = pstyn(st-2);
+      if(stream.runif() < pone) s(tt) = st-1;
+      else s(tt) = st;
+    }
+    ps(tt,_) = pstyn;
+    --tt;
+  }// end of while loop
+  
+  // name and report outputs
+  Matrix<double> Sout(n, ns+1); 
+  Sout(_, 0) = s(_,0);
+  for (int j = 0; j<ns; ++j){
+    Sout(_,j+1) = ps(_, j);
+  }
+  
+  return Sout;
+  
+} // end of state sampler
+
+//////////////////////////////////////////// 
+// MCMCintervention random effect changes only  
+//////////////////////////////////////////// 
+template <typename RNGTYPE>
+void MCMCintervention_random_impl(rng<RNGTYPE>& stream, 
+				  const double m, const int intervention, 				 
+				  const Matrix<>& Y, const Matrix<>& X,
+				  Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix<int>& s, 
+				  Matrix<>& b0, Matrix<>& B0,   
+				  const double c0, const double d0,
+				  const Matrix<>& A0, 
+				  unsigned int burnin, unsigned int mcmc, unsigned int thin,
+				  unsigned int verbose, bool chib, bool ar, 
+				  Matrix<>& beta_store, Matrix<>& Sigma_store, 
+				  Matrix<>& P_store, Matrix<>& ps_store, Matrix<int>& s_store, 
+				  double& logmarglike, 
+				  Matrix<>& yhat_mat, 
+				  Matrix<>& yerror_mat, 
+				  Matrix<>& yfore_pred_mat, 
+				  Matrix<>& yback_pred_mat)
+{
+  // define constants and form cross-product matrices
+  const int tot_iter = burnin + mcmc; //total iterations
+  const int nstore = mcmc / thin; // number of draws to store
+  const int n = Y.rows();
+  const int ns = m + 1;                 // number of states
+  const int k = X.cols();
+  const Matrix<> B0inv = invpd(B0);
+  Matrix<> sigma(ns, 1);
+  
+  //MCMC loop
+  unsigned int count = 0;  
+  unsigned int reject = 0;  
+  double rejectionrate = 0; 
+  Matrix <> sum_e(ns, 1);
+
+  for (int iter = 0; iter < tot_iter; ++iter){
+    //////////////////////
+    // 1. Sample beta and Sigma
+    //////////////////////
+    int beta_count = 0;
+    Matrix<int> nstate(ns, 1); 
+    Matrix<double> XtX(k, k);   
+    Matrix<double> XtY(k, 1);   
+    for (int j = 0; j<ns; ++j){
+      for (int i = 0; i<n; ++i){
+	if (s[i] == j + 1) { // since j starts from 0
+	  nstate[j] = nstate[j] + 1;
+	}// end of if
+      }// end of int i<n
+      beta_count = beta_count + nstate[j];        
+ 
+      Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+      Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+
+      // SIGMA UPDATE
+      double shape = (c0 + (double)nstate[j])/2;
+      Matrix<> yhat_j = Xj*beta; 
+      Matrix<double> ej = yj - yhat_j;
+      Matrix<double> sum_ej = t(ej)*ej;
+      double scale =(d0 + sum_ej[0])/2;
+      
+      Sigma[j] = 1/stream.rgamma(shape, scale);
+      sigma[j] = sqrt(Sigma[j]);
+      
+      if (iter >= burnin && ((iter % thin)==0)){
+	yhat_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = yhat_j(_,0);
+	yerror_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = ej(_,0);
+      }    
+      
+      // CONSTANT BETA UPDATE
+      XtX = XtX + (crossprod(Xj))/Sigma[j];
+      XtY = XtY + ::t(Xj)*yj/Sigma[j];
+    }
+    Matrix<double> Bn = invpd(B0 + XtX);
+    Matrix<double> bn = Bn*(B0*b0 + XtY);
+    if (ar == 1){
+      Matrix<double> beta_can = stream.rmvnorm(bn, Bn);
+      if (beta_can(1) > 1 | beta_can(1) < -1){
+	// Rprintf("\n AR coefficient %10.5f is outside the stationary region! \n", beta_can(1));  
+	++reject;
+      }
+      else{
+	beta = beta_can;
+      }
+    }
+    else{
+      beta = stream.rmvnorm(bn, Bn);
+    }
+    //////////////////////
+    // 2. Sample P
+    //////////////////////        
+    double shape1 = 0;
+    double shape2 = 0;    
+    P(ns-1, ns-1) = 1; //no jump at the last state
+    
+    for (int j =0; j<(ns-1); ++j){
+      shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+      shape2 =  A0(j,j+1) + 1; // SS(j,j+1);        
+      P(j,j) = stream.rbeta(shape1, shape2);
+      P(j,j+1) = 1 - P(j,j);
+    }
+    
+    //////////////////////
+    // 3. Sample s
+    //////////////////////
+    Matrix<double> F(n, ns);
+    Matrix<double> pr1(ns, 1);
+    pr1[0] = 1;
+    Matrix<double> py(ns, 1);
+    Matrix<double> pstyt1(ns, 1);
+    Matrix<double> ps = Matrix<double>(n, ns);  // holder for state probabilities
+    
+    //
+    // Forward sampling: update F matrix
+    //
+    for (int tt=0; tt<n ; ++tt){
+      Matrix<double> mu = X(tt,_)*beta; //k by 1 vector
+      for (int j = 0; j< ns; ++j){
+	py[j]  =  dnorm(Y[tt], mu[0], sigma[j]);
+      }
+      if (tt==0) pstyt1 = pr1;
+      else {
+	pstyt1 =  ::t(F(tt-1,_)*P); // make it an ns by 1 matrix
+      }
+      Matrix<double> unnorm_pstyt = pstyt1%py;
+
+      /////////////////////////////////////////////////////////////////////
+      // Prediction of future outcomes based on pre-intervention state
+      /////////////////////////////////////////////////////////////////////
+      if (tt==(intervention - 1)&&iter >= burnin && ((iter % thin)==0)){
+	// Forward prediction
+	Matrix <> yfore_pred(1, n);
+	for (int ttt=tt; ttt<n ; ++ttt){
+	  int ss = s(tt-1); // 1 unit before the intervention
+	  mu = X(ttt,_)*beta; //k by 1 vector
+	  yfore_pred(ttt) = stream.rnorm(mu(0), sigma[ss-1]);
+	}
+	yfore_pred_mat(count, _) = yfore_pred(0, _);
+	// backward prediction
+	Matrix <> yback_pred(1, n);
+	for (int ttt=tt; ttt>=0 ; --ttt){
+	  int ss = s(tt+1);
+	  mu = X(ttt,_)*beta; //k by 1 vector
+	  yback_pred(ttt) = stream.rnorm(mu(0), sigma[ss-1]);
+	}
+	yback_pred_mat(count, _) = yback_pred(0, _);
+      }	
+    
+      const Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+      for (int j=0; j<ns ; ++j) F(tt,j) = pstyt(j);
+      
+    }// end of F matrix filtering
+    
+    //
+    // Backward recursions: sample s using F and a transition matrix P
+    //
+    ps(n-1,_) = F(n-1,_);                       // we know last elements of ps and s
+    s(n-1) = ns;                                // the last period is s(*n-1) but one down in C location
+    
+    Matrix<double> pstyn = Matrix<double>(ns, 1);
+    double pone = 0.0;
+    int tt = n-2;
+    while (tt >= 0){
+      int st = s(tt+1);
+      Matrix<double> Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state
+      Matrix<double> unnorm_pstyn = F(tt,_)%Pst_1;
+      pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density
+      if (st==1)   s(tt) = 1;                  // If this is the first period, state should be 1.
+      // Otherwise, draw a state from a discrete prob. distribution("pstyn")
+      // using the inverse CDF method.
+      else{
+	pone = pstyn(st-2);
+	if(stream.runif() < pone) s(tt) = st-1;// jump from tt-1 to tt 
+	else s(tt) = st;// stay 
+      }
+      ps(tt,_) = pstyn;
+      --tt;
+    }// end of while loop
+    
+
+    // load draws into sample array
+    if (iter >= burnin && ((iter % thin)==0)){
+      for (int i=0; i<k; ++i)
+	beta_store(count,i) = beta[i];// stored by the order of (11, 12, 13, 21, 22, 23)
+      for (int i=0; i<ns; ++i)
+	Sigma_store(count, i) = Sigma[i]; 
+      for (int j=0; j<ns*ns; ++j)    
+	P_store(count,j)= P[j];
+      for (int l=0; l<n ; ++l)           
+	ps_store(l,_) = ps_store(l,_) + ps(l,_);           // add two matrices
+      s_store(count,_) = s;
+      
+      ++count; // count when (iter % *thin)==0
+      
+    }   // end of if(iter...)
+    
+    
+    // print output to stdout
+    if(verbose > 0 && iter % verbose == 0){
+      Rprintf("\nMCMCintervention iteration %i of %i \n", (iter+1), tot_iter);
+      if (ar == 1 ){
+	double rejectionrate = (double)reject/(double)(iter+1);
+	Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
+	Rprintf("The acceptance rate was %3.5f", 1 - rejectionrate);
+	Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
+       }
+      for (int j = 0;j<ns; ++j){
+	Rprintf("\n nstate[j] %10.5f", static_cast<double>(nstate[j]));     
+      }
+      Rprintf("\n beta \n");
+      for (int i = 0; i<k; ++i){
+	Rprintf("%10.5f\n", beta(i)); 
+      }
+      Rprintf("\n sigma^2 \n");
+      for (int i = 0; i<ns; ++i){
+	Rprintf("%10.5f\n", Sigma(i)); 
+      }
+    }
+    
+  }// end MCMC loop 
+  
+
+  if(chib ==1){\
+    double rejectionrate = (double)reject/(double)nstore;    
+    if(rejectionrate > .05){
+      Rprintf("The acceptance rate is too low.\n"); 
+    }
+    else {
+      Matrix<double> beta_st(k, 1);
+      Matrix<double> betast = meanc(beta_store); //meanc(beta_store)=(11, 12, 13, 21, 22, 23) 
+      for (int i = 0; i<k ; ++i){
+	beta_st(i) = betast(i);
+	//Rprintf("beta_st(i) %10.5f\n", beta_st(i)); 
+      }
+      Matrix<double> Sigma_st = meanc(Sigma_store);
+      Matrix<double> sigma_st(ns, 1);
+      for (int j = 0; j<ns ; ++j){
+	sigma_st(j) = sqrt(Sigma_st(j));
+  	//Rprintf("sigma_st(j) %10.5f\n", sigma_st(j)); 
+      }
+      
+      Matrix<double> P_vec_st = meanc(P_store);
+      const Matrix<double> P_st(ns, ns);
+      for (int j = 0; j< ns*ns; ++j){  
+	P_st[j] = P_vec_st[j]; 
+      }    
+      
+      //////////////////////
+      // 1. pdf.beta
+      //////////////////////      
+      Matrix<double> density_beta(nstore, 1);      
+      for (int iter = 0; iter<nstore; ++iter){  
+	Matrix<double> XtX(k, k);   
+	Matrix<double> XtY(k, 1);   
+	Matrix<int> nstate(ns, 1); // contains total numbers of each state
+	int beta_count = 0;
+	
+	for (int j = 0; j<ns ; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s_store(iter, i) == (j+1)) { // since j starts from 0
+	      nstate[j] = nstate[j] + 1;
+	    }// end of if
+	  }// end of int i<n
+	  beta_count = beta_count + nstate[j];     
+	  
+	  const Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	  const Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+	  const double precision = 1.0/Sigma_store(iter, j);
+	  XtX = XtX + (crossprod(Xj))*precision;
+	  XtY = XtY + ::t(Xj)*yj*precision;
+	}
+	Matrix<double> Bn = invpd(B0 + XtX);
+	Matrix<double> bn = Bn*(B0*b0 + XtY);
+	if (k == 1){
+	  density_beta(iter) = dnorm(beta_st(0), bn(0), sqrt(Bn(0)));
+	}
+	else{
+	  density_beta(iter) = ::exp(lndmvn(beta_st, bn, Bn));
+	}
+      } 
+      
+      double pdf_beta = log(prod(meanc(density_beta)));     
+      
+      ////////////////////// ////////////////////// //////////////////////
+      // 2. pdf.Sigma|beta_st, S, P
+      //////////////////////   ////////////////////// //////////////////////    
+      Matrix<double> density_Sigma(nstore, ns);
+      for (int iter = 0; iter<nstore; ++iter){   
+	// STEP 2.1 S|y, beta.st, Sigma, P     
+	Matrix <double> Sout = gaussian_state_fixedBeta_sampler(stream, m, Y, X, beta_st, Sigma, P);
+	Matrix <double> s = Sout(_, 0);
+	
+	// STEP 2.2 Sigma|y, beta.st, S, P
+	int beta_count = 0;
+	Matrix<int> nstate(ns, 1); // contains total numbers of each state
+	
+	for (int j = 0; j <ns ; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s[i] == (j+1)) { // since j starts from 0
+	      nstate[j] = nstate[j] + 1;
+	    }// end of if
+	  }// end of int i<n
+	  beta_count = beta_count + nstate[j];        
+
+	  Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	  Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);  
+	  Matrix<double> ej = yj - Xj*beta_st;
+	  Matrix<double> sum_ej = ::t(ej)*ej;
+	  double shape = (c0 + (double)nstate[j])/2;
+	  double scale =(d0 + sum_ej[0])/2;
+	  
+	  Sigma[j] = stream.rigamma(shape, scale);
+	  density_Sigma(iter, j) = ::exp(lndinvgamma_pjh(Sigma_st[j], shape, scale));
+	}// end of sampling beta and Sigma
+
+	// STEP 2.3 P|S
+	double shape1 = 0;
+	double shape2 = 0;    
+	P(ns-1, ns-1) = 1; //no jump at the last state
+	
+	for (int j =0; j< (ns-1); ++j){
+	  shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+	  shape2 =  A0(j,j+1) + 1; //       
+	  P(j,j) =  stream.rbeta(shape1, shape2);
+	  P(j,j+1) = 1 - P(j,j);
+	}  	
+      }// end of pdf.Sigma  
+      
+      double pdf_Sigma = log(prod(meanc(density_Sigma)));   
+      
+      //////////////////////
+      // 3. pdf.P|beta_st, Sigma_st, S
+      //////////////////////
+      Matrix<double> density_P(nstore, ns);
+      for (int iter = 0; iter < nstore; ++iter){
+	
+	// STEP 2.1 S|y, beta.st, Sigma, P     
+	Matrix <double> Sout = gaussian_state_fixedBeta_sampler(stream, m, Y, X, beta_st, Sigma_st, P);
+	Matrix <double> s = Sout(_, 0);
+	
+	double shape1 = 0;
+	double shape2 = 0;    
+	P(ns-1, ns-1) = 1; //no jump at the last state
+	
+	// compute addN
+	Matrix <double> P_addN(ns, 1); 
+	for (int j = 0; j<ns; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s(i) == (j+1)) { // since j starts from 0
+	      P_addN[j] = P_addN[j] + 1;            
+	    }// end of if
+	  } // end of for i
+	} // end of for j        
+	
+	// evaluate P
+	for (int j =0; j< (ns-1); ++j){
+	  shape1 =  A0(j,j) + P_addN[j] - 1;  
+	  shape2 =  A0(j,j+1) + 1; //         
+	  P(j,j) = stream.rbeta(shape1, shape2);
+	  P(j,j+1) = 1 - P(j,j);
+	  density_P(iter, j) = dbeta(P_st(j,j), shape1, shape2); 
+	} // end of for j
+	density_P(iter, ns-1) = 1; //
+	
+      }// end of pdf.P MCMC run            
+      double pdf_P = log(prod(meanc(density_P)));
+      
+      //////////////////////
+      // likelihood
+      //////////////////////       
+      Matrix<double> F = Matrix<double>(n, ns);
+      Matrix<double> like(n, 1);
+      Matrix<double> pr1 = Matrix<double>(ns, 1);
+      pr1[0] = 1;
+      Matrix<double> py(ns, 1);
+      Matrix<double> pstyt1(ns, 1);
+      
+      for (int t=0; t<n ; ++t){
+	int yt = (int) Y[t];
+	Matrix<double> mu = X(t,_)*beta_st; 
+	for (int j = 0; j< ns; ++j){
+	  py[j]  =  dnorm(Y[t], mu[0], sigma_st[j]);
+	}
+	if (t==0) pstyt1 = pr1;
+	else {
+	  pstyt1 =  ::t(F(t-1,_)*P_st); 
+	}
+	Matrix<double> unnorm_pstyt = pstyt1%py;
+	Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt);
+	for (int j=0; j<ns ; ++j){
+	  F(t,j) = pstyt(j);
+	}
+	like[t] = sum(unnorm_pstyt);
+      }// end of likelihood computation
+      
+      const double loglike = sum(log(like));
+      
+      //////////////////////
+      // log prior ordinate 
+      //////////////////////
+      double density_beta_prior = 0.0;
+      Matrix<double> density_Sigma_prior(ns, 1);
+      Matrix<double> density_P_prior(ns, 1);
+      density_P[ns-1] = 1; //
+      if (k == 1){
+	density_beta_prior = log(dnorm(beta_st(0), b0(0), sqrt(B0inv(0)))); 
+      }
+      else{
+	density_beta_prior = lndmvn(beta_st, b0, B0inv); 
+      }
+      for (int j=0; j<ns ; ++j){
+	density_Sigma_prior[j] = lndinvgamma_pjh(Sigma_st[j], c0/2, d0/2); 
+      }   
+      
+      for (int j =0; j< (ns-1); ++j){
+	density_P_prior[j] = log(dbeta(P_st(j,j), A0(j,j), A0(j,j+1))); 
+      }        
+      
+      // compute marginal likelihood
+      double logprior = density_beta_prior + sum(density_Sigma_prior) + sum(density_P_prior);
+      logmarglike = (loglike + logprior) - (pdf_beta + pdf_Sigma + pdf_P);
+      if(verbose > 0){
+	Rprintf("logmarglike = %10.5f\n", logmarglike);
+	Rprintf("loglike = %10.5f\n", loglike);
+	Rprintf("logprior = %10.5f\n", logprior);
+	Rprintf("pdf_beta = %10.5f\n", pdf_beta);
+	Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma);
+	Rprintf("pdf_P = %10.5f\n", pdf_P);
+      }
+    }
+  }// end of marginal likelihood computation
+}
+
+
+//////////////////////////////////////////// 
+// MCMCintervention fixed effect changes only  
+//////////////////////////////////////////// 
+template <typename RNGTYPE>
+void MCMCintervention_fixed_impl(rng<RNGTYPE>& stream, 
+				 const double m, const int intervention, 				 
+				 const Matrix<>& Y, const Matrix<>& X,
+				 Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix<int>& s, 
+				 Matrix<>& b0, Matrix<>& B0,   
+				 const double c0, const double d0,
+				 const Matrix<>& A0, 
+				 unsigned int burnin, unsigned int mcmc, unsigned int thin,
+				 unsigned int verbose, bool chib, bool ar, 
+				 Matrix<>& beta_store, Matrix<>& Sigma_store, 
+				 Matrix<>& P_store, Matrix<>& ps_store, Matrix<int>& s_store, 
+				 double& logmarglike, 
+				 Matrix<>& yhat_mat, 
+				 Matrix<>& yerror_mat, 
+				 Matrix<>& yfore_pred_mat, 
+				 Matrix<>& yback_pred_mat, 
+				 double acceptance)
+{
+  // define constants and form cross-product matrices
+  const int tot_iter = burnin + mcmc; //total iterations
+  const int nstore = mcmc / thin; // number of draws to store
+  const int n = Y.rows();
+  const int ns = m + 1;                 // number of states
+  const int k = X.cols();
+  const Matrix<> B0inv = invpd(B0);
+  double sigma2 = Sigma(0);
+  double sigma = sqrt(sigma2);
+  
+  //MCMC loop
+  unsigned int count = 0;  
+  unsigned int reject = 0;  
+  Matrix <> sum_e(ns, 1);
+
+  for (int iter = 0; iter < tot_iter; ++iter){
+    //////////////////////
+    // 1. Sample beta and Sigma
+    //////////////////////
+    int beta_count = 0;
+    Matrix<int> nstate(ns, 1); // contains total numbers of each state    
+    for (int j = 0; j<ns; ++j){
+      for (int i = 0; i<n; ++i){
+	if (s[i] == j + 1) { // since j starts from 0
+	  nstate[j] = nstate[j] + 1;
+	}// end of if
+      }// end of int i<n
+      beta_count = beta_count + nstate[j];        
+ 
+      // BETA UPDATE
+      Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+      Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+      Matrix<double> tXj = ::t(Xj);
+      Matrix<double> Bn = invpd(B0 + crossprod(Xj)/sigma2);
+      Matrix<double> bn = Bn*(B0*b0 + tXj*yj/sigma2);
+      if (ar == 1){
+	Matrix<double> beta_can = stream.rmvnorm(bn, Bn);
+	if (beta_can(1) > 1 | beta_can(1) < -1){
+	  // Rprintf("\n AR coefficient %10.5f is outside the stationary region! \n", beta_can(1));  
+	  ++reject;
+	}
+	else{
+	  for (int kk = 0; kk<k; ++kk){
+	    beta(j,kk) = beta_can(kk);
+	  }
+	}
+      }
+      else{
+	beta(j,_) = stream.rmvnorm(bn, Bn);
+      }
+      Matrix<> yhat_j = Xj*::t(beta(j,_)); 
+      Matrix<double> ej = yj - yhat_j;
+      Matrix<double> sum_ej = t(ej)*ej;
+      sum_e(j) = sum_ej[0];
+      if (iter >= burnin && ((iter % thin)==0)){
+	yhat_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = yhat_j(_,0);
+	yerror_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = ej(_,0);
+      }
+      
+    }// end of sampling beta and Sigma
+    
+    // SIGMA UPDATE
+    double shape = (c0 + (double)n)/2;
+    double scale =(d0 + sum(sum_e))/2;
+    
+    sigma2 = 1/stream.rgamma(shape, scale);
+    sigma = sqrt(sigma2);
+     
+    //////////////////////
+    // 2. Sample P
+    //////////////////////        
+    double shape1 = 0;
+    double shape2 = 0;    
+    P(ns-1, ns-1) = 1; //no jump at the last state
+    
+    for (int j =0; j<(ns-1); ++j){
+      shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+      shape2 =  A0(j,j+1) + 1; // SS(j,j+1);        
+      P(j,j) = stream.rbeta(shape1, shape2);
+      P(j,j+1) = 1 - P(j,j);
+    }
+    
+    //////////////////////
+    // 3. Sample s
+    //////////////////////
+    Matrix<double> F(n, ns);
+    Matrix<double> pr1(ns, 1);
+    pr1[0] = 1;
+    Matrix<double> py(ns, 1);
+    Matrix<double> pstyt1(ns, 1);
+    Matrix<double> ps = Matrix<double>(n, ns);  // holder for state probabilities
+    
+    //
+    // Forward sampling: update F matrix
+    //
+    for (int tt=0; tt<n ; ++tt){
+      Matrix<double> mu = X(tt,_)*::t(beta); //k by 1 vector
+      for (int j = 0; j< ns; ++j){
+	py[j]  =  dnorm(Y[tt], mu[j], sigma);
+      }
+      if (tt==0) pstyt1 = pr1;
+      else {
+	pstyt1 =  ::t(F(tt-1,_)*P); // make it an ns by 1 matrix
+      }
+      Matrix<double> unnorm_pstyt = pstyt1%py;
+
+      /////////////////////////////////////////////////////////////////////
+      // Prediction of future outcomes based on pre-intervention state
+      /////////////////////////////////////////////////////////////////////
+      if (tt==(intervention - 1)&&iter >= burnin && ((iter % thin)==0)){
+	// Forward prediction
+	Matrix <> yfore_pred(1, n);
+	for (int ttt=tt; ttt<n ; ++ttt){
+	  int ss = s(tt-1); // 1 unit before the intervention
+	  mu = X(ttt,_)*::t(beta); //k by 1 vector
+	  yfore_pred(ttt) = stream.rnorm(mu[ss-1], sigma);
+	}
+	yfore_pred_mat(count, _) = yfore_pred(0, _);
+	// backward prediction
+	Matrix <> yback_pred(1, n);
+	for (int ttt=tt; ttt>=0 ; --ttt){
+	  int ss = s(tt+1);
+	  mu = X(ttt,_)*::t(beta); //k by 1 vector
+	  yback_pred(ttt) = stream.rnorm(mu[ss-1], sigma);
+	}
+	yback_pred_mat(count, _) = yback_pred(0, _);
+      }	
+    
+      const Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+      for (int j=0; j<ns ; ++j) F(tt,j) = pstyt(j);
+      
+    }// end of F matrix filtering
+    
+    //
+    // Backward recursions: sample s using F and a transition matrix P
+    //
+    ps(n-1,_) = F(n-1,_);                       // we know last elements of ps and s
+    s(n-1) = ns;                                // the last period is s(*n-1) but one down in C location
+    
+    Matrix<double> pstyn = Matrix<double>(ns, 1);
+    double pone = 0.0;
+    int tt = n-2;
+    while (tt >= 0){
+      int st = s(tt+1);
+      Matrix<double> Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state
+      Matrix<double> unnorm_pstyn = F(tt,_)%Pst_1;
+      pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density
+      if (st==1)   s(tt) = 1;                  // If this is the first period, state should be 1.
+      // Otherwise, draw a state from a discrete prob. distribution("pstyn")
+      // using the inverse CDF method.
+      else{
+	pone = pstyn(st-2);
+	if(stream.runif() < pone) s(tt) = st-1;// jump from tt-1 to tt 
+	else s(tt) = st;// stay 
+      }
+      ps(tt,_) = pstyn;
+      --tt;
+    }// end of while loop
+    
+
+    // load draws into sample array
+    if (iter >= burnin && ((iter % thin)==0)){
+      Matrix<double> tbeta = ::t(beta); //transpose beta for R output 
+      for (int i=0; i<(ns*k); ++i)
+	beta_store(count,i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23)
+      Sigma_store(count) = sigma2; 
+      for (int j=0; j<ns*ns; ++j)    
+	P_store(count,j)= P[j];
+      for (int l=0; l<n ; ++l)           
+	ps_store(l,_) = ps_store(l,_) + ps(l,_);           // add two matrices
+      s_store(count,_) = s;
+      
+      ++count; // count when (iter % *thin)==0
+      
+    }   // end of if(iter...)
+    
+    
+    // print output to stdout
+    if(verbose > 0 && iter % verbose == 0){
+      Rprintf("\nMCMCintervention iteration %i of %i \n", (iter+1), tot_iter);
+      if (ar == 1 ){
+	double rejectionrate = (double)reject/(double)(iter+1);
+	Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
+	Rprintf("The acceptance rate was %3.5f", 1 - rejectionrate);
+	Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
+      }
+      for (int j = 0;j<ns; ++j){
+	Rprintf("\n nstate[j] %10.5f", static_cast<double>(nstate[j]));     
+      }
+      Rprintf("\n beta \n");
+      for (int i = 0; i<ns; ++i){
+	for (int j = 0; j<k; ++j){
+	  Rprintf("%10.5f\n", beta(i, j)); 
+	}
+      }
+      Rprintf("\n sigma^2 \n");
+      Rprintf("%10.5f\n", sigma2); 
+    }
+    
+  }// end MCMC loop 
+  
+  acceptance = 1 - (double)reject/(double)nstore;    
+  
+  if(chib ==1){
+    // 0.05 is used for an arbitrary threshold to avoid the computation of marg like
+    // Later, the threshold should be set by users
+    if(acceptance < .95){
+      Rprintf("The acceptance rate is too low.\n"); 
+    }
+    else{
+      Matrix<double> betast = meanc(beta_store); //meanc(beta_store)=(11, 12, 13, 21, 22, 23) 
+      Matrix<double, Row> beta_st(ns, k);
+      for (int j = 0; j<ns*k; ++j){
+	beta_st[j] = betast[j];
+      }
+      
+      Matrix<double> Sigma_st(ns, 1);
+      for (int j = 0; j<ns ; ++j){
+	Sigma_st(j) = mean(Sigma_store);
+      }
+      Matrix<double> P_vec_st = meanc(P_store);
+      const Matrix<double> P_st(ns, ns);
+      for (int j = 0; j< ns*ns; ++j){  
+	P_st[j] = P_vec_st[j]; 
+      }    
+      
+      //////////////////////
+      // 1. pdf.beta
+      //////////////////////      
+      Matrix<double> density_beta(nstore, ns);      
+      for (int iter = 0; iter<nstore; ++iter){  
+	
+	Matrix<int> nstate(ns, 1); // contains total numbers of each state
+	int beta_count = 0;
+	
+	for (int j = 0; j<ns ; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s_store(iter, i) == (j+1)) { // since j starts from 0
+	      nstate[j] = nstate[j] + 1;
+	    }// end of if
+	  }// end of int i<n
+	  beta_count = beta_count + nstate[j];     
+	  
+	  // BETA UPDATE
+	  const Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	  const Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+	  const double precision = 1.0/Sigma_store(iter);
+	  const Matrix<double> XpX = crossprod(Xj);
+	  const Matrix<double> XpY = (::t(Xj)*yj);
+	  const Matrix<double> Bn = invpd(B0 + XpX*precision);
+	  const Matrix<double> bn = Bn*gaxpy(B0, b0, XpY*precision);
+	  if (k == 1){
+	    density_beta(iter, j) = dnorm(beta_st(j), bn(0), sqrt(Bn(0)));
+	  }
+	  else{
+	    density_beta(iter, j) = ::exp(lndmvn(::t(beta_st(j,_)), bn, Bn));
+	  }
+	}// end of sampling beta and Sigma
+	
+      }// end of pdf.beta   
+      
+      double pdf_beta = log(prod(meanc(density_beta)));     
+    
+      //////////////////////
+      // 2. pdf.Sigma|beta_st, S, P
+      //////////////////////      
+      Matrix<double> density_Sigma(nstore, 1);
+      for (int iter = 0; iter<nstore; ++iter){   
+	
+	// STEP 2.1 S|y, beta.st, Sigma, P     
+	Matrix <double> Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma, P);
+	Matrix <double> s = Sout(_, 0);
+	
+	// STEP 2.2 Sigma|y, beta.st, S, P
+	int beta_count = 0;
+	Matrix<int> nstate(ns, 1); 
+	Matrix<> sum_e(ns, 1);
+	
+	for (int j = 0; j <ns ; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s[i] == (j+1)) { // since j starts from 0
+	      nstate[j] = nstate[j] + 1;
+	    }// end of if
+	  }// end of int i<n
+	  beta_count = beta_count + nstate[j];        
+	  
+	  Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	  Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);  
+	  Matrix<double> ej = yj - Xj*::t(beta_st(j,_));
+	  Matrix<double> sum_ej = ::t(ej)*ej;
+	  sum_e(j) = sum_ej(0);
+	}// end of sampling beta and Sigma
+	double shape = (c0 + (double)n)/2;
+	double scale =(d0 + sum(sum_e))/2;
+	
+	sigma2 = stream.rigamma(shape, scale);
+	density_Sigma(iter) = ::exp(lndinvgamma_pjh(Sigma_st(0), shape, scale));
+
+	for (int j = 0; j <ns ; ++j){
+	  Sigma[j] = sigma2;// to use gaussian_state_sampler
+	}
+
+	// STEP 2.3 P|S
+	double shape1 = 0;
+	double shape2 = 0;    
+	P(ns-1, ns-1) = 1; //no jump at the last state
+	
+	for (int j =0; j< (ns-1); ++j){
+	  shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+	  shape2 =  A0(j,j+1) + 1; //       
+	  P(j,j) =  stream.rbeta(shape1, shape2);
+	  P(j,j+1) = 1 - P(j,j);
+
+	  // not to change gaussian_state_sampler, I use Sigma matrix
+	  Sigma[j] = sigma2;
+	}  
+	
+      }// end of pdf.Sigma  
+      
+      double pdf_Sigma = log(prod(meanc(density_Sigma)));   
+      
+      //////////////////////
+      // 3. pdf.P|beta_st, Sigma_st, S
+      //////////////////////
+      Matrix<double> density_P(nstore, ns);
+      for (int iter = 0; iter < nstore; ++iter){
+	
+	// STEP 2.1 S|y, beta.st, Sigma, P     
+	Matrix <double> Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma_st, P);
+	Matrix <double> s = Sout(_, 0);
+	
+	double shape1 = 0;
+	double shape2 = 0;    
+	P(ns-1, ns-1) = 1; //no jump at the last state
+	
+	// compute addN
+	Matrix <double> P_addN(ns, 1); 
+	for (int j = 0; j<ns; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s(i) == (j+1)) { // since j starts from 0
+	      P_addN[j] = P_addN[j] + 1;            
+	    }// end of if
+	  } // end of for i
+	} // end of for j        
+	
+	// evaluate P
+	for (int j =0; j< (ns-1); ++j){
+	  shape1 =  A0(j,j) + P_addN[j] - 1;  
+	  shape2 =  A0(j,j+1) + 1; //         
+	  P(j,j) = stream.rbeta(shape1, shape2);
+	  P(j,j+1) = 1 - P(j,j);
+	  density_P(iter, j) = dbeta(P_st(j,j), shape1, shape2); 
+	} // end of for j
+	density_P(iter, ns-1) = 1; //
+	
+      }// end of pdf.P MCMC run            
+      double pdf_P = log(prod(meanc(density_P)));
+      
+      //////////////////////
+      // likelihood
+      //////////////////////       
+      Matrix<double> F = Matrix<double>(n, ns);
+      Matrix<double> like(n, 1);
+      Matrix<double> pr1 = Matrix<double>(ns, 1);
+      pr1[0] = 1;
+      Matrix<double> py(ns, 1);
+      Matrix<double> pstyt1(ns, 1);
+      
+      for (int t=0; t<n ; ++t){
+	int yt = (int) Y[t];
+	Matrix<double> mu = X(t,_)*::t(beta_st); //k by 1 vector
+	for (int j = 0; j< ns; ++j){
+	  py[j]  =  dnorm(Y[t], mu[j], sqrt(Sigma_st[0]));
+	}
+	if (t==0) pstyt1 = pr1;
+	else {
+	  pstyt1 =  ::t(F(t-1,_)*P_st); // make it an ns by 1 matrix
+	}
+	Matrix<double> unnorm_pstyt = pstyt1%py;
+	Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+	for (int j=0; j<ns ; ++j){
+	  F(t,j) = pstyt(j);
+	}
+	like[t] = sum(unnorm_pstyt);
+      }// end of likelihood computation
+      
+      const double loglike = sum(log(like));
+      
+      //////////////////////
+      // log prior ordinate 
+      //////////////////////
+      Matrix<double> density_beta_prior(ns, 1);
+      double density_Sigma_prior = 0.0;
+      Matrix<double> density_P_prior(ns, 1);
+      density_P[ns-1] = 1; //
+      if (k == 1){
+	for (int j=0; j<ns ; ++j){
+	  density_beta_prior[j] = log(dnorm(beta_st(j), b0(0), sqrt(B0inv(0)))); 
+	}   
+      }
+      else{
+	for (int j=0; j<ns ; ++j){
+	  density_beta_prior[j] = lndmvn(::t(beta_st(j,_)), b0, B0inv); 
+	}   
+      }
+      
+      density_Sigma_prior = lndinvgamma_pjh(Sigma_st(0), c0/2, d0/2); 
+      
+      for (int j =0; j< (ns-1); ++j){
+	density_P_prior[j] = log(dbeta(P_st(j,j), A0(j,j), A0(j,j+1))); 
+      }        
+      
+      // compute marginal likelihood
+      double logprior = sum(density_beta_prior) + density_Sigma_prior + sum(density_P_prior);
+      logmarglike = (loglike + logprior) - (pdf_beta + pdf_Sigma + pdf_P);
+      if(verbose > 0){
+	Rprintf("logmarglike = %10.5f\n", logmarglike);
+	Rprintf("loglike = %10.5f\n", loglike);
+	Rprintf("logprior = %10.5f\n", logprior);
+	Rprintf("pdf_beta = %10.5f\n", pdf_beta);
+	Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma);
+	Rprintf("pdf_P = %10.5f\n", pdf_P);
+      }
+    }
+  }// end of marginal likelihood computation
+}
+
+template <typename RNGTYPE>
+void MCMCintervention_impl(rng<RNGTYPE>& stream, 
+			   const double m, const int intervention, 				 
+			   const Matrix<>& Y, const Matrix<>& X,
+			   Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix<int>& s, 
+			   Matrix<>& b0, Matrix<>& B0,   
+			   const double c0, const double d0,
+			   const Matrix<>& A0, 
+			   unsigned int burnin, unsigned int mcmc, unsigned int thin,
+			   unsigned int verbose, bool chib, bool ar, 
+			   Matrix<>& beta_store, Matrix<>& Sigma_store, 
+			   Matrix<>& P_store, Matrix<>& ps_store, Matrix<int>& s_store, 
+			   double& logmarglike, 
+			   Matrix<>& yhat_mat, 
+			   Matrix<>& yerror_mat, 
+			   Matrix<>& yfore_pred_mat, 
+			   Matrix<>& yback_pred_mat, 
+			   double acceptance)
+{
+  // define constants and form cross-product matrices
+  const int tot_iter = burnin + mcmc; //total iterations
+  const int nstore = mcmc / thin; // number of draws to store
+  const int n = Y.rows();
+  const int ns = m + 1;                 // number of states
+  const int k = X.cols();
+  const Matrix<> B0inv = invpd(B0);
+  Matrix<double> sigma(ns, 1);
+
+  //MCMC loop
+  unsigned int count = 0;  
+  int reject = 0;  
+  for (int iter = 0; iter < tot_iter; ++iter){
+    //////////////////////
+    // 1. Sample beta and Sigma
+    //////////////////////
+    int beta_count = 0;
+    Matrix<int> nstate(ns, 1); // contains total numbers of each state    
+    for (int j = 0; j<ns; ++j){
+      for (int i = 0; i<n; ++i){
+	if (s[i] == j + 1) { // since j starts from 0
+	  nstate[j] = nstate[j] + 1;
+	}// end of if
+      }// end of int i<n
+      beta_count = beta_count + nstate[j];        
+ 
+      // BETA UPDATE
+      Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+      Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+      Matrix<double> tXj = ::t(Xj);
+      Matrix<double> Bn = invpd(B0 + crossprod(Xj)/Sigma[j]);
+      Matrix<double> bn = Bn*(B0*b0 + tXj*yj/Sigma[j]);
+      if (ar == 1){
+	Matrix<double> beta_can = stream.rmvnorm(bn, Bn);
+	if (beta_can(1) > 1 | beta_can(1) < -1){
+	  // Rprintf("\n AR coefficient %10.5f is outside the stationary region! \n", beta_can(1));  
+	  ++reject;
+	}
+	else{
+	  for (int kk = 0; kk<k; ++kk){
+	    beta(j,kk) = beta_can(kk);
+	  }
+	}
+      }
+      else{
+	beta(j,_) = stream.rmvnorm(bn, Bn);
+      }
+      
+      // SIGMA UPDATE
+      double shape = (c0 + (double)nstate[j])/2;
+      Matrix<> yhat_j = Xj*::t(beta(j,_)); 
+      Matrix<double> ej = yj - yhat_j;
+      Matrix<double> sum_ej = t(ej)*ej;
+      double scale =(d0 + sum_ej[0])/2;
+      
+      Sigma[j] = 1/stream.rgamma(shape, scale);
+      sigma(j) = sqrt(Sigma(j));
+      
+
+      if (iter >= burnin && ((iter % thin)==0)){
+	yhat_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = yhat_j(_,0);
+	yerror_mat(count, (beta_count - nstate[j]), count, (beta_count - 1)) = ej(_,0);
+      }
+      
+    }// end of sampling beta and Sigma
+    
+    //////////////////////
+    // 2. Sample P
+    //////////////////////        
+    double shape1 = 0;
+    double shape2 = 0;    
+    P(ns-1, ns-1) = 1; //no jump at the last state
+    
+    for (int j =0; j<(ns-1); ++j){
+      shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+      shape2 =  A0(j,j+1) + 1; // SS(j,j+1);        
+      P(j,j) = stream.rbeta(shape1, shape2);
+      P(j,j+1) = 1 - P(j,j);
+    }
+    
+    //////////////////////
+    // 3. Sample s
+    //////////////////////
+    Matrix<double> F(n, ns);
+    Matrix<double> pr1(ns, 1);
+    pr1[0] = 1;
+    Matrix<double> py(ns, 1);
+    Matrix<double> pstyt1(ns, 1);
+    Matrix<double> ps = Matrix<double>(n, ns);  // holder for state probabilities
+    
+    //
+    // Forward sampling: update F matrix
+    //
+    for (int tt=0; tt<n ; ++tt){
+      Matrix<double> mu = X(tt,_)*::t(beta); //k by 1 vector
+      for (int j = 0; j< ns; ++j){
+	py[j]  =  dnorm(Y[tt], mu[j], sigma[j]);
+      }
+      if (tt==0) pstyt1 = pr1;
+      else {
+	pstyt1 =  ::t(F(tt-1,_)*P); // make it an ns by 1 matrix
+      }
+      Matrix<double> unnorm_pstyt = pstyt1%py;
+
+      /////////////////////////////////////////////////////////////////////
+      // Prediction of future outcomes based on pre-intervention state
+      /////////////////////////////////////////////////////////////////////
+      if (tt==(intervention - 1)&&iter >= burnin && ((iter % thin)==0)){
+	// Forward prediction
+	Matrix <> yfore_pred(1, n);
+	for (int ttt=tt; ttt<n ; ++ttt){
+	  int ss = s(tt-1); // 1 unit before the intervention
+	  mu = X(ttt,_)*::t(beta); //k by 1 vector
+	  yfore_pred(ttt) = stream.rnorm(mu[ss-1], sigma[ss-1]);
+	}
+	yfore_pred_mat(count, _) = yfore_pred(0, _);
+	// backward prediction
+	Matrix <> yback_pred(1, n);
+	for (int ttt=tt; ttt>=0 ; --ttt){
+	  int ss = s(tt+1);
+	  mu = X(ttt,_)*::t(beta); //k by 1 vector
+	  yback_pred(ttt) = stream.rnorm(mu[ss-1], sigma[ss-1]);
+	}
+	yback_pred_mat(count, _) = yback_pred(0, _);
+      }	
+    
+      const Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+      for (int j=0; j<ns ; ++j) F(tt,j) = pstyt(j);
+      
+    }// end of F matrix filtering
+    
+    //
+    // Backward recursions: sample s using F and a transition matrix P
+    //
+    ps(n-1,_) = F(n-1,_);                       // we know last elements of ps and s
+    s(n-1) = ns;                                // the last period is s(*n-1) but one down in C location
+    
+    Matrix<double> pstyn = Matrix<double>(ns, 1);
+    double pone = 0.0;
+    int tt = n-2;
+    while (tt >= 0){
+      int st = s(tt+1);
+      Matrix<double> Pst_1 = ::t(P(_,st-1)); // prob of being at a previous state
+      Matrix<double> unnorm_pstyn = F(tt,_)%Pst_1;
+      pstyn = unnorm_pstyn/sum(unnorm_pstyn); // normalize into a prob. density
+      if (st==1)   s(tt) = 1;                  // If this is the first period, state should be 1.
+      // Otherwise, draw a state from a discrete prob. distribution("pstyn")
+      // using the inverse CDF method.
+      else{
+	pone = pstyn(st-2);
+	if(stream.runif() < pone) s(tt) = st-1;// jump from tt-1 to tt 
+	else s(tt) = st;// stay 
+      }
+      ps(tt,_) = pstyn;
+      --tt;
+    }// end of while loop
+    
+
+    // load draws into sample array
+    if (iter >= burnin && ((iter % thin)==0)){
+      Matrix<double> tbeta = ::t(beta); //transpose beta for R output 
+      for (int i=0; i<(ns*k); ++i)
+	beta_store(count,i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23)
+      for (int i=0; i<ns; ++i)
+	Sigma_store(count,i) = Sigma[i]; 
+      for (int j=0; j<ns*ns; ++j)    
+	P_store(count,j)= P[j];
+      for (int l=0; l<n ; ++l)           
+	ps_store(l,_) = ps_store(l,_) + ps(l,_);           // add two matrices
+      s_store(count,_) = s;
+      
+      ++count; // count when (iter % *thin)==0
+      
+    }   // end of if(iter...)
+    
+    
+    // print output to stdout
+    if(verbose > 0 && iter % verbose == 0){
+      Rprintf("\nMCMCintervention iteration %i of %i \n", (iter+1), tot_iter);
+      if (ar == 1 ){
+	double rejectionrate = (double)reject/(double)(iter+1);
+	Rprintf("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
+	Rprintf("The acceptance rate was %3.5f", 1 - rejectionrate);
+	Rprintf("\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
+      }
+      for (int j = 0;j<ns; ++j){
+	Rprintf("\n nstate[j] %10.5f", static_cast<double>(nstate[j]));     
+      }
+      Rprintf("\n beta \n");
+      for (int i = 0; i<ns; ++i){
+	for (int j = 0; j<k; ++j){
+	  Rprintf("%10.5f\n", beta(i, j)); 
+	}
+      }
+      Rprintf("\n sigma^2 \n");
+      for (int i = 0; i<ns; ++i){
+	Rprintf("%10.5f\n", Sigma(i)); 
+      }
+    }
+    
+  }// end MCMC loop 
+ 
+  acceptance = 1 - (double)reject/(double)nstore;    
+  
+  if(chib ==1){
+    // 0.05 is used for an arbitrary threshold to avoid the computation of marg like
+    // Later, the threshold should be set by users
+    if(acceptance < .95){
+      Rprintf("The acceptance rate is too low.\n"); 
+    }
+    else{
+      Matrix<double> betast = meanc(beta_store); 
+      Matrix<double, Row> beta_st(ns, k);
+      for (int j = 0; j<ns*k; ++j){
+	beta_st[j] = betast[j];
+      }
+      
+      Matrix<double> Sigma_st = meanc(Sigma_store);
+      Matrix<double> sigma_st(ns, 1);
+      for (int j = 0; j<ns ; ++j){
+	sigma_st(j) = sqrt(Sigma_st(j));
+      }
+      
+      Matrix<double> P_vec_st = meanc(P_store);
+      const Matrix<double> P_st(ns, ns);
+      for (int j = 0; j< ns*ns; ++j){  
+	P_st[j] = P_vec_st[j]; 
+      }    
+      
+      //////////////////////
+      // 1. pdf.beta
+      //////////////////////      
+      Matrix<double> density_beta(nstore, ns);      
+      for (int iter = 0; iter<nstore; ++iter){  
+	
+	Matrix<int> nstate(ns, 1); 
+	int beta_count = 0;
+	
+	for (int j = 0; j<ns ; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s_store(iter, i) == (j+1)) { 
+	      nstate[j] = nstate[j] + 1;
+	    }// end of if
+	  }// end of int i<n
+	  beta_count = beta_count + nstate[j];     
+	  
+	  // BETA UPDATE
+	  const Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	  const Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+	  const double precision = 1.0/Sigma_store(iter, j);
+	  const Matrix<double> XpX = crossprod(Xj);
+	  const Matrix<double> XpY = (::t(Xj)*yj);
+	  const Matrix<double> Bn = invpd(B0 + XpX*precision);
+	  const Matrix<double> bn = Bn*gaxpy(B0, b0, XpY*precision);
+	  if (k == 1){
+	    density_beta(iter, j) = log(dnorm(beta_st(j), bn(0), sqrt(Bn(0))));
+	  }
+	  else{
+	    density_beta(iter, j) = lndmvn(::t(beta_st(j,_)), bn, Bn);
+	  }
+	}// end of sampling beta and Sigma
+	
+      }// end of pdf.beta   
+      
+      double pdf_beta = sum(meanc(density_beta));     
+    
+      //////////////////////
+      // 2. pdf.Sigma|beta_st, S, P
+      //////////////////////      
+      Matrix<double> density_Sigma(nstore, ns);
+      for (int iter = 0; iter<nstore; ++iter){   
+	
+	// STEP 2.1 S|y, beta.st, Sigma, P     
+	Matrix <double> Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma, P);
+	Matrix <double> s = Sout(_, 0);
+	
+	// STEP 2.2 Sigma|y, beta.st, S, P
+	int beta_count = 0;
+	Matrix<int> nstate(ns, 1); // contains total numbers of each state
+	
+	for (int j = 0; j <ns ; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s[i] == (j+1)) { // since j starts from 0
+	      nstate[j] = nstate[j] + 1;
+	    }// end of if
+	  }// end of int i<n
+	  beta_count = beta_count + nstate[j];        
+	  
+	  Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	  Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);  
+	  Matrix<double> ej = yj - Xj*::t(beta_st(j,_));
+	  Matrix<double> sum_ej = ::t(ej)*ej;
+	  double shape = (c0 + (double)nstate[j])/2;
+	  double scale =(d0 + sum_ej[0])/2;
+	  
+	  Sigma[j] = stream.rigamma(shape, scale);
+	  density_Sigma(iter, j) = ::exp(lndinvgamma_pjh(Sigma_st[j], shape, scale));
+	  
+	}// end of sampling beta and Sigma
+	
+	// STEP 2.3 P|S
+	double shape1 = 0;
+	double shape2 = 0;    
+	P(ns-1, ns-1) = 1; //no jump at the last state
+	
+	for (int j =0; j< (ns-1); ++j){
+	  shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+	  shape2 =  A0(j,j+1) + 1; //       
+	  P(j,j) =  stream.rbeta(shape1, shape2);
+	  P(j,j+1) = 1 - P(j,j);
+	}  
+	
+      }// end of pdf.Sigma  
+      
+      double pdf_Sigma = log(prod(meanc(density_Sigma)));   
+      
+      //////////////////////
+      // 3. pdf.P|beta_st, Sigma_st, S
+      //////////////////////
+      Matrix<double> density_P(nstore, ns);
+      for (int iter = 0; iter < nstore; ++iter){
+	
+	// STEP 2.1 S|y, beta.st, Sigma, P     
+	Matrix <double> Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma_st, P);
+	Matrix <double> s = Sout(_, 0);
+	
+	double shape1 = 0;
+	double shape2 = 0;    
+	P(ns-1, ns-1) = 1;
+	
+	// compute addN
+	Matrix <double> P_addN(ns, 1); 
+	for (int j = 0; j<ns; ++j){
+	  for (int i = 0; i<n; ++i){
+	    if (s(i) == (j+1)) { 
+	      P_addN[j] = P_addN[j] + 1;            
+	    }// end of if
+	  } // end of for i
+	} // end of for j        
+	
+	// evaluate P
+	for (int j =0; j< (ns-1); ++j){
+	  shape1 =  A0(j,j) + P_addN[j] - 1;  
+	  shape2 =  A0(j,j+1) + 1; //         
+	  P(j,j) = stream.rbeta(shape1, shape2);
+	  P(j,j+1) = 1 - P(j,j);
+	  density_P(iter, j) = dbeta(P_st(j,j), shape1, shape2); 
+	} // end of for j
+	density_P(iter, ns-1) = 1; //
+	
+      }// end of pdf.P MCMC run            
+      double pdf_P = log(prod(meanc(density_P)));
+      
+      //////////////////////
+      // likelihood
+      //////////////////////       
+      Matrix<double> F = Matrix<double>(n, ns);
+      Matrix<double> like(n, 1);
+      Matrix<double> pr1 = Matrix<double>(ns, 1);
+      pr1[0] = 1;
+      Matrix<double> py(ns, 1);
+      Matrix<double> pstyt1(ns, 1);
+      
+      for (int t=0; t<n ; ++t){
+	int yt = (int) Y[t];
+	Matrix<double> mu = X(t,_)*::t(beta_st); 
+	for (int j = 0; j< ns; ++j){
+	  py[j]  =  dnorm(Y[t], mu[j], sigma_st[j]);
+	}
+	if (t==0) pstyt1 = pr1;
+	else {
+	  pstyt1 =  ::t(F(t-1,_)*P_st); 
+	}
+	Matrix<double> unnorm_pstyt = pstyt1%py;
+	Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); 
+	for (int j=0; j<ns ; ++j){
+	  F(t,j) = pstyt(j);
+	}
+	like[t] = sum(unnorm_pstyt);
+      }// end of likelihood computation
+      
+      const double loglike = sum(log(like));
+      
+      //////////////////////
+      // log prior ordinate 
+      //////////////////////
+      Matrix<double> density_beta_prior(ns, 1);
+      Matrix<double> density_Sigma_prior(ns, 1);
+      Matrix<double> density_P_prior(ns, 1);
+      density_P[ns-1] = 1; //
+      if (k == 1){
+	for (int j=0; j<ns ; ++j){
+	  density_beta_prior[j] = log(dnorm(beta_st(j), b0(0), sqrt(B0inv(0)))); 
+	}   
+      }
+      else{
+	for (int j=0; j<ns ; ++j){
+	  density_beta_prior[j] = lndmvn(::t(beta_st(j,_)), b0, B0inv); 
+	}   
+      }
+      
+      for (int j=0; j<ns ; ++j){
+	density_Sigma_prior[j] = lndinvgamma_pjh(Sigma_st[j], c0/2, d0/2); 
+      }   
+      
+      for (int j =0; j< (ns-1); ++j){
+	density_P_prior[j] = log(dbeta(P_st(j,j), A0(j,j), A0(j,j+1))); 
+      }        
+      
+      // compute marginal likelihood
+      double logprior = sum(density_beta_prior) + sum(density_Sigma_prior) + sum(density_P_prior);
+      logmarglike = (loglike + logprior) - (pdf_beta + pdf_Sigma + pdf_P);
+      if(verbose > 0){
+	Rprintf("logmarglike = %10.5f\n", logmarglike);
+	Rprintf("loglike = %10.5f\n", loglike);
+	Rprintf("logprior = %10.5f\n", logprior);
+	Rprintf("pdf_beta = %10.5f\n", pdf_beta);
+	Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma);
+	Rprintf("pdf_P = %10.5f\n", pdf_P);
+      }
+    }// end of marginal likelihood computation
+  }
+}
+
+//////////////////////////////////////////// 
+// Start MCMCinterventionpoint function
+///////////////////////////////////////////
+extern "C"{
+  void MCMCintervention(double *accept, double *betaout, double *Sigmaout, double *Pout, 
+			double *psout, double *sout,  
+			double *yhatout, double *yerrorout, double *yforepredout, double *ybackpredout,
+			const double *Ydata, const int *Yrow, const int *Ycol, 
+			const double *Xdata, const int *Xrow, const int *Xcol, 
+			const int *m, const int *intervention, 
+			const int *burnin, const int *mcmc, const int *thin, const int *verbose, 
+			const int *uselecuyer, const int *seedarray, const int *lecuyerstream, 
+			const double *betastart, const double *Sigmastart, const double *Pstart, 
+			const int *statestart, 
+			const double *a, const double *b, 
+			const double *b0data, const double *B0data, 
+			const double *c0, const double *d0, 
+			const double *A0data, 
+			double *logmarglikeholder, double *loglikeholder, 
+			const int *ar, const int *change, const int *chib){
+    
+    // pull together Matrix objects
+    const Matrix <double> Y(*Yrow, *Ycol, Ydata); 
+    const Matrix <double> X(*Xrow, *Xcol, Xdata); 
+    const unsigned int tot_iter = *burnin + *mcmc; //total iterations
+    const unsigned int nstore = *mcmc / *thin; // number of draws to store
+    const int n = Y.rows();
+    const int k = X.cols();
+    const int ns = *m + 1;                 // number of states
+    
+    // generate starting values
+    Matrix <> Sigma(ns, 1, Sigmastart);
+    Matrix <> P(ns, ns, Pstart);    
+    Matrix <int> s(n, 1, statestart); 
+    Matrix <> b0(k, 1, b0data);
+    Matrix <> B0(k, k, B0data);
+    const Matrix <> A0(ns, ns, A0data);
+    double logmarglike;
+    double acceptance;
+    
+    // storage matrices
+    Matrix<> P_store(nstore, ns*ns);
+    Matrix<> ps_store(n, ns);
+    Matrix<int> s_store(nstore, n);
+    
+    Matrix<> yhat_mat(nstore, n);
+    Matrix<> yerror_mat(nstore, n);
+    Matrix<> yfore_pred_mat(nstore, n);
+    Matrix<> yback_pred_mat(nstore, n);
+
+    if (*change == 1){    
+      // fixed effects change only
+      Matrix <> beta(ns, k, betastart);
+      Matrix<> beta_store(nstore, ns*k);
+      Matrix<> Sigma_store(nstore, 1);
+      MCMCPACK_PASSRNG2MODEL(MCMCintervention_fixed_impl, 
+			     *m, 
+			     *intervention, 
+			     Y, X, 
+			     beta, Sigma, P, s, b0, B0,   
+			     *c0, *d0, A0,
+			     *burnin, *mcmc, *thin, *verbose, 
+			     *chib, *ar, 
+			     beta_store, Sigma_store, 
+			     P_store, ps_store, s_store, 
+			     logmarglike, 
+			     yhat_mat, 
+			     yerror_mat, 
+			     yfore_pred_mat, 
+			     yback_pred_mat, 
+			     acceptance);	
+      for (int i = 0; i<(nstore*ns*k); ++i){
+	betaout[i] = beta_store[i]; 
+      }
+      for (int i = 0; i<(nstore); ++i){
+	Sigmaout[i] = Sigma_store[i]; 
+      }  
+    }
+    else if (*change == 2){
+      // random effects change only
+      Matrix<> beta(k, 1, betastart);
+      Matrix<> beta_store(nstore, k);
+      Matrix<> Sigma_store(nstore, ns);
+      MCMCPACK_PASSRNG2MODEL(MCMCintervention_random_impl, 
+			     *m, 
+			     *intervention, 
+			     Y, X, 
+			     beta, Sigma, P, s, b0, B0,   
+			     *c0, *d0, A0,
+			     *burnin, *mcmc, *thin, *verbose, 
+			     *chib, *ar, 
+			     beta_store, Sigma_store, 
+			     P_store, ps_store, s_store, 
+			     logmarglike, 
+			     yhat_mat, 
+			     yerror_mat, 
+			     yfore_pred_mat, 
+			     yback_pred_mat)  ;
+      for (int i = 0; i<(nstore*k); ++i){
+	betaout[i] = beta_store[i]; 
+      }
+      for (int i = 0; i<(nstore*ns); ++i){
+	Sigmaout[i] = Sigma_store[i]; 
+      }  
+      acceptance = 1;
+    }
+    else {
+      Matrix <> beta(ns, k, betastart);
+      Matrix<> beta_store(nstore, ns*k);
+      Matrix<> Sigma_store(nstore, ns);
+      MCMCPACK_PASSRNG2MODEL(MCMCintervention_impl, 
+			     *m, 
+			     *intervention, 
+			     Y, X, 
+			     beta, Sigma, P, s, b0, B0,   
+			     *c0, *d0, A0,
+			     *burnin, *mcmc, *thin, *verbose, 
+			     *chib, *ar, 
+			     beta_store, Sigma_store, 
+			     P_store, ps_store, s_store, 
+			     logmarglike, 
+			     yhat_mat, 
+			     yerror_mat, 
+			     yfore_pred_mat, 
+			     yback_pred_mat, 
+			     acceptance);
+      // return output
+      for (int i = 0; i<(nstore*ns*k); ++i){
+	betaout[i] = beta_store[i]; 
+      }
+      for (int i = 0; i<(nstore*ns); ++i){
+	Sigmaout[i] = Sigma_store[i]; 
+      }
+    }
+    
+    logmarglikeholder[0] = logmarglike;
+    accept[0] = acceptance;
+
+    for (int i = 0; i<(nstore*ns*ns); ++i){
+      Pout[i] = P_store[i]; 
+    }
+    for (int i = 0; i<(n*ns); ++i){
+      psout[i] = ps_store[i]; 
+    }	   
+    for (int i = 0; i<(nstore*n); ++i){
+      sout[i] = s_store[i];
+      yhatout[i] = yhat_mat[i];
+      yerrorout[i] = yerror_mat[i];
+      yforepredout[i] = yfore_pred_mat[i];
+      ybackpredout[i] = yback_pred_mat[i];
+    }
+    
+  }// end of MCMCpoissonChange
+}//end extern "C"
+
+#endif
+
+
+
+  
diff --git a/src/MCMCirtKdRob.cc b/src/MCMCirtKdRob.cc
index 2bbfd61..ac012d0 100644
--- a/src/MCMCirtKdRob.cc
+++ b/src/MCMCirtKdRob.cc
@@ -375,7 +375,7 @@ static void doubling(double (*logfun)(const double&,
 		     const int& param){
   
   const double U = stream.runif();
-  double x0;
+  double x0 = 0.0;
   if (param==0){ // Lambda
     x0 = Lambda(rowindex, colindex);
   }
@@ -400,7 +400,7 @@ static void doubling(double (*logfun)(const double&,
   while (K > 0 && 
 	 (z < logfun(L, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
 		     Lambda_prior_prec, Lambda_ineq, theta_ineq, 
-		     k0, k1, c0, d0, c1, d1, rowindex, colindex) |
+		     k0, k1, c0, d0, c1, d1, rowindex, colindex) ||
 	  z < logfun(R, X, Lambda, theta, delta0, delta1, Lambda_prior_mean,
 		     Lambda_prior_prec, Lambda_ineq, theta_ineq, 
 		     k0, k1, c0, d0, c1, d1, rowindex, colindex))){
@@ -460,7 +460,7 @@ static void StepOut(double (*logfun)(const double&,
 		    const int& param){
   
   const double U = stream.runif();
-  double x0;
+  double x0 = 0.0;
   if (param==0){ // Lambda
     x0 = Lambda(rowindex, colindex);
   }
diff --git a/src/MCMCoprobit.cc b/src/MCMCoprobit.cc
index 8fa9b7c..e58dc1f 100644
--- a/src/MCMCoprobit.cc
+++ b/src/MCMCoprobit.cc
@@ -228,10 +228,10 @@ void MCMCoprobit_impl (rng<RNGTYPE>& stream, const int * Y,
   Matrix<> alpha_hat = alpha;
   
   // initialize current value stuff
-  Matrix<> propV = tune * alpha_prior_var * tune;
-  Matrix<> propCinvT = t(cholesky(invpd(propV)));
-  double logpost_cur = oprobit_logpost(nY, X, alpha, alpha_prior_mean, alpha_prior_var, beta);
-  double logjump_cur = lnmulttdens(alpha_prior_mean, alpha_hat, propCinvT, tdf);
+// JHP  Matrix<> propV = tune * alpha_prior_var * tune;
+// JHP  Matrix<> propCinvT = t(cholesky(invpd(propV)));
+// JHP  double logpost_cur = oprobit_logpost(nY, X, alpha, alpha_prior_mean, alpha_prior_var, beta);
+// JHP  double logjump_cur = lnmulttdens(alpha_prior_mean, alpha_hat, propCinvT, tdf);
   
   double tune_double = tune[0];
   for (unsigned int iter = 0; iter < tot_iter; ++iter) {		
diff --git a/src/MCMCoprobitChange.cc b/src/MCMCoprobitChange.cc
index 7784fe4..81af3b5 100644
--- a/src/MCMCoprobitChange.cc
+++ b/src/MCMCoprobitChange.cc
@@ -1,13 +1,15 @@
+////////////////////////////////////////////////////////////////////
 // MCMCoprobitChange.cc is C++ code to estimate a oprobit changepoint model
 // with linear approximation
 //
 // Jong Hee Park
-// Dept. of Political Science
-// University of Chicago
-// jhp at uchicago.edu
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
 //
-// 07/06/2007 
+// 07/06/2007 Written
 // 11/02/2009 Modified
+////////////////////////////////////////////////////////////////////
 
 #ifndef MCMCOPROBITCHANGE_CC
 #define MCMCOPROBITCHANGE_CC
@@ -38,7 +40,7 @@ static double dtnormLX(const double x,
 		     const double lower, 
 		     const double upper){
   double out = 0.0;
-  if (x>lower&x<upper){
+  if (x>lower && x<upper){
     double numer = dnorm(x, mean, sd);
     double denom = pnorm(upper, mean, sd) - pnorm(lower, mean, sd);
     out = numer/denom;
diff --git a/src/MCMCpoissonChange.cc b/src/MCMCpoissonChange.cc
index 795bfd8..8c76d5e 100644
--- a/src/MCMCpoissonChange.cc
+++ b/src/MCMCpoissonChange.cc
@@ -1,13 +1,15 @@
+//////////////////////////////////////////////////////////////////////////
 // MCMCpoissonRegChange.cc is C++ code to estimate a Poisson changepoint model
 //
 // Jong Hee Park
-// Dept. of Political Science
-// University of Chicago
-// jhp at uchicago.edu
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
 //
 // 07/06/2007
 // 12/20/2007 included in MCMCpack
 // 03/09/2012 fixed a bug in a random uniform draw (thanks to Matt Blackwell)
+//////////////////////////////////////////////////////////////////////////
 
 #ifndef MCMCPOISSONCHANGE_CC
 #define MCMCPOISSONCHANGE_CC
@@ -47,7 +49,6 @@ Matrix<> tau_comp_sampler(rng<RNGTYPE>& stream,
 				const Matrix<>& s){
   
   // itialize
-  const int ns = m + 1;    
   const int n = Y.rows();
   const int k = X.cols();
   Matrix<int> component(totcomp, 1);
@@ -89,10 +90,10 @@ Matrix<> tau_comp_sampler(rng<RNGTYPE>& stream,
       Matrix<> ut = stream.runif(yt, 1);    
       // redraw the uniform if there are any repeats
       // thanks to Matt Blackwell 
-      while (::unique(ut).size() != ut.size()) {
+      while (unique(ut).size() != ut.size()) {
 	ut = stream.runif(yt, 1);
       }                   
-      Matrix<> sort_ut = ::sort(ut);
+      Matrix<> sort_ut = sort(ut);
       Matrix<> tau_tj(yt, 1);
       for(int i=1; i<yt; ++i){
 	tau_tj[i] = sort_ut[i] - sort_ut[i-1];
diff --git a/src/MCMCprobitChange.cc b/src/MCMCprobitChange.cc
index 7cbf7f1..ad158ea 100644
--- a/src/MCMCprobitChange.cc
+++ b/src/MCMCprobitChange.cc
@@ -1,16 +1,15 @@
+////////////////////////////////////////////////////////////////////
 // MCMCprobitChange.cc is C++ code to estimate a probit changepoint model
 // with a beta prior
 //
 // Jong Hee Park
-// Dept. of Political Science
-// University of Chicago
-// jhp at uchicago.edu
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
 //
-// Written by JHP 07/06/2007 
-// Modifieed by JHP 11/02/2009 
-// Included by JHP 1/21/2011
-// Copyright (C) 2007-present Andrew D. Martin, Kevin M. Quinn,
-//    and Jong Hee Park
+// Written 07/06/2007 
+// Modifieed 11/02/2009 
+// Included 1/21/2011
 //////////////////////////////////////////////////////////////////////////
 
 #ifndef MCMCPROBITCHANGE_CC
@@ -363,7 +362,6 @@ extern "C"{
     // pull together Matrix objects
     const Matrix <> Y(*Yrow, *Ycol, Ydata); 
     const Matrix <> X(*Xrow, *Xcol, Xdata); 
-    const unsigned int tot_iter = *burnin + *mcmc; 
     const unsigned int nstore = *mcmc / *thin; 
     const int n = Y.rows();
     const int k = X.cols();
diff --git a/src/MCMCregress.cc b/src/MCMCregress.cc
index 0f3438d..8dbbee6 100644
--- a/src/MCMCregress.cc
+++ b/src/MCMCregress.cc
@@ -118,7 +118,7 @@ void MCMCregress_impl (rng<RNGTYPE>& stream, const Matrix<>& Y,
      const Matrix<> betastar = t(meanc(t(betamatrix)));
      
      // step 1
-     Matrix<> sigma2_density(tot_iter, 1);
+     Matrix<> sigma2_density(nstore, 1);
      
      // second set of Gibbs scans
      for (unsigned int iter = 0; iter < nstore; ++iter) {
@@ -129,7 +129,7 @@ void MCMCregress_impl (rng<RNGTYPE>& stream, const Matrix<>& Y,
        const Matrix<> SSE = crossprod (e); 
        const double c_post = (c0 + X.rows ()) * 0.5;
        const double d_post = (d0 + SSE[0]) * 0.5;
-       sigma2_density(iter) = ::exp(digamma(sigma2star, c_post, d_post));
+       sigma2_density(iter) = digamma(sigma2star, c_post, d_post);
        R_CheckUserInterrupt();
      } // end MCMC loop
      double pdf_sigma2 = log(mean(sigma2_density));
diff --git a/src/MCMCregressChange.cc b/src/MCMCregressChange.cc
new file mode 100755
index 0000000..6174ef2
--- /dev/null
+++ b/src/MCMCregressChange.cc
@@ -0,0 +1,513 @@
+////////////////////////////////////////////////////////////////////
+// MCMCregressChange.cc is a C++ code to estimate 
+// linear regression changepoint model
+//
+// Jong Hee Park
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
+//
+// 03/03/2009 Written
+////////////////////////////////////////////////////////////////////
+
+#ifndef MCMCREGRESSCHANGE_CC
+#define MCMCREGRESSCHANGE_CC
+
+#include "MCMCrng.h"
+#include "MCMCfcds.h"
+#include "matrix.h"
+#include "distributions.h"
+#include "stat.h"
+#include "la.h"
+#include "ide.h"
+#include "smath.h"
+#include "rng.h"
+#include "mersenne.h"
+#include "lecuyer.h"
+
+#include <R.h>           // needed to use Rprintf()
+#include <R_ext/Utils.h> // needed to allow user interrupts
+
+
+using namespace std;
+using namespace scythe;
+
+double lndinvgamma_jhp (const double x, const double shape, const double scale){
+  double log_density = shape *::log(scale) - lngammafn(shape) - (shape + 1) * ::log(x) - (scale/x);
+  return (log_density);
+}
+
+template <typename RNGTYPE>
+Matrix<double> gaussian_state_sampler(rng<RNGTYPE>& stream, 
+				      const int m, 
+				      const Matrix<double>& Y,
+				      const Matrix<double>& X,
+				      const Matrix<double>& beta,
+				      const Matrix<double>& Sigma,
+				      const Matrix<double>& P){
+  
+  const int ns = m + 1;
+  const int n = Y.rows();
+  
+  Matrix<double> F(n, ns);
+  Matrix<double> pr1(ns, 1);
+  pr1[0] = 1;
+  Matrix<double> py(ns, 1);
+  Matrix<double> pstyt1(ns, 1);
+  Matrix<int> s(n, 1);                        
+  Matrix<double> ps = Matrix<double>(n, ns);  
+  
+  for (int tt=0; tt<n ; ++tt){
+    Matrix<double> mu = X(tt,_)*::t(beta); 
+    for (int j = 0; j< ns; ++j){
+      py[j]  =  dnorm(Y[tt], mu[j], sqrt(Sigma[j]));
+    }
+    if (tt==0) pstyt1 = pr1;
+    else {
+      pstyt1 =  ::t(F(tt-1,_)*P); 
+    }
+    Matrix<double> unnorm_pstyt = pstyt1%py;
+    const Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); 
+    for (int j=0; j<ns ; ++j) F(tt,j) = pstyt(j);
+    
+  }
+  
+  ps(n-1,_) = F(n-1,_);                      
+  s(n-1) = ns;                                
+  
+  Matrix<double> pstyn = Matrix<double>(ns, 1);
+  double pone = 0.0;
+  int tt = n-2;
+  while (tt >= 0){
+    int st = s(tt+1);
+    Matrix<double> Pst_1 = ::t(P(_,st-1));
+    Matrix<double> unnorm_pstyn = F(tt,_)%Pst_1;
+    pstyn = unnorm_pstyn/sum(unnorm_pstyn); 
+    if (st==1)   s(tt) = 1;                  
+    else{
+      pone = pstyn(st-2);
+      if(stream.runif() < pone) s(tt) = st-1;
+      else s(tt) = st;
+    }
+    ps(tt,_) = pstyn;
+    --tt;
+  }// end of while loop
+  
+  Matrix<double> Sout(n, ns+1); 
+  Sout(_, 0) = s(_,0);
+  for (int j = 0; j<ns; ++j){
+    Sout(_,j+1) = ps(_, j);
+  }
+  
+  return Sout;
+  
+} // end of state sampler
+
+//////////////////////////////////////////// 
+// MCMCregressChange implementation.  
+//////////////////////////////////////////// 
+template <typename RNGTYPE>
+void MCMCregressChange_impl(rng<RNGTYPE>& stream, 
+			    const double m, 				 
+			    const Matrix<>& Y, const Matrix<>& X,
+			    Matrix<>& beta, Matrix<>& Sigma, Matrix<>& P, Matrix<int>& s, 
+			    Matrix<>& b0, Matrix<>& B0,   
+			    const double c0, const double d0,
+			    const Matrix<>& A0, 
+			    unsigned int burnin, unsigned int mcmc, unsigned int thin,
+			    unsigned int verbose, bool chib, 
+			    Matrix<>& beta_store, Matrix<>& Sigma_store, 
+			    Matrix<>& P_store, Matrix<>& ps_store, Matrix<int>& s_store, 
+			    double& logmarglike)
+{
+  // define constants and form cross-product matrices
+  const int tot_iter = burnin + mcmc; //total iterations
+  const int nstore = mcmc / thin; // number of draws to store
+  const int n = Y.rows();
+  const int ns = m + 1;                 // number of states
+  const int k = X.cols();
+  const Matrix<> B0inv = invpd(B0);
+  Matrix <> sigma(ns, 1);
+
+  //MCMC loop
+  unsigned int count = 0;    
+  for (int iter = 0; iter < tot_iter; ++iter){
+    //////////////////////
+    // 1. Sample beta and Sigma
+    //////////////////////
+    int beta_count = 0;
+    Matrix<int> nstate(ns, 1); 
+    
+    for (int j = 0; j<ns; ++j){
+      for (int i = 0; i<n; ++i){
+	if (s[i] == j + 1) { 
+	  nstate[j] = nstate[j] + 1;
+	}// end of if
+      }// end of int i<n
+      beta_count = beta_count + nstate[j];        
+
+      // BETA UPDATE
+      Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+      Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+      Matrix<double> Bn = invpd(B0 + t(Xj)*Xj/Sigma[j]);
+      Matrix<double> bn = Bn*(B0*b0 + t(Xj)*yj/Sigma[j]);
+      beta(j,_) = stream.rmvnorm(bn, Bn);
+
+      // SIGMA UPDATE
+      double shape = (c0 + (double)nstate[j])/2;
+      Matrix<> ysimul_j = Xj*::t(beta(j,_)); // 
+      Matrix<double> ej = yj - ysimul_j;
+      Matrix<double> sum_ej = t(ej)*ej;
+      double scale =(d0 + sum_ej[0])/2;
+      
+      Sigma[j] = 1/stream.rgamma(shape, scale);   
+      sigma[j] = sqrt(Sigma[j]);   
+    }// end of sampling beta and Sigma
+    
+    //////////////////////
+    // 2. Sample P
+    //////////////////////        
+    double shape1 = 0;
+    double shape2 = 0;    
+    P(ns-1, ns-1) = 1; 
+    
+    for (int j =0; j<(ns-1); ++j){
+      shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+      shape2 =  A0(j,j+1) + 1; // SS(j,j+1);        
+      P(j,j) = stream.rbeta(shape1, shape2);
+      P(j,j+1) = 1 - P(j,j);
+    }
+
+    //////////////////////
+    // 3. Sample s
+    //////////////////////
+    Matrix<double> Sout = gaussian_state_sampler(stream, m, Y, X, beta, Sigma, P);
+    Matrix<double> s = Sout(_, 0);
+    Matrix<> ps(n, ns); 
+    for (int j = 0; j<ns; ++j){
+      ps(_,j) = Sout(_,j+1);
+    }
+
+    // load draws into sample array
+    if (iter >= burnin && ((iter % thin)==0)){
+      Matrix<double> tbeta = ::t(beta); //transpose beta for R output 
+      for (int i=0; i<(ns*k); ++i)
+	beta_store(count,i) = tbeta[i];// stored by the order of (11, 12, 13, 21, 22, 23)
+      for (int i=0; i<ns; ++i)
+	Sigma_store(count,i) = Sigma[i]; 
+      for (int j=0; j<ns*ns; ++j)    
+	P_store(count,j)= P[j];
+      for (int l=0; l<n ; ++l)           
+	ps_store(l,_) = ps_store(l,_) + ps(l,_);           // add two matrices
+      s_store(count,_) = s;
+      
+      ++count; // count when (iter % *thin)==0
+      
+    }   // end of if(iter...)
+    
+    
+    // print output to stdout
+    if(verbose > 0 && iter % verbose == 0){
+      Rprintf("\nMCMCregressChange iteration %i of %i \n", (iter+1), tot_iter);
+      for (int j = 0;j<ns; ++j){
+	Rprintf("\n nstate[j] %10.5f", static_cast<double>(nstate[j]));     
+      }
+      Rprintf("\n beta \n");
+      for (int i = 0; i<ns; ++i){
+	for (int j = 0; j<k; ++j){
+	  Rprintf("%10.5f\n", beta(i, j)); 
+	}
+      }
+      Rprintf("\n sigma^2 \n");
+      for (int i = 0; i<ns; ++i){
+	Rprintf("%10.5f\n", Sigma(i)); 
+      }
+    }
+    
+  }// end MCMC loop 
+  
+  if(chib ==1){
+    
+    Matrix<double> betast = meanc(beta_store); //meanc(beta_store)=(11, 12, 13, 21, 22, 23) 
+    Matrix<double, Row> beta_st(ns, k);
+    for (int j = 0; j<ns*k; ++j){
+      beta_st[j] = betast[j];
+    }
+    
+    Matrix<double> Sigma_st = meanc(Sigma_store);
+    Matrix<double> P_vec_st = meanc(P_store);
+    const Matrix<double> P_st(ns, ns);
+    for (int j = 0; j< ns*ns; ++j){  
+      P_st[j] = P_vec_st[j]; 
+    }    
+
+    //////////////////////
+    // 1. pdf.beta
+    //////////////////////      
+    Matrix<double> density_beta(nstore, ns);      
+    for (int iter = 0; iter<nstore; ++iter){  
+      
+      Matrix<int> nstate(ns, 1); // contains total numbers of each state
+      int beta_count = 0;
+      
+      for (int j = 0; j<ns ; ++j){
+	for (int i = 0; i<n; ++i){
+	  if (s_store(iter, i) == (j+1)) { // since j starts from 0
+	    nstate[j] = nstate[j] + 1;
+	  }// end of if
+	}// end of int i<n
+	beta_count = beta_count + nstate[j];     
+	
+	// BETA UPDATE
+	const Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	const Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);
+	const double precision = 1.0/Sigma_store(iter, j);
+	const Matrix<double> XpX = (::t(Xj)*Xj);
+	const Matrix<double> XpY = (::t(Xj)*yj);
+	const Matrix<double> Bn = invpd(B0 + XpX*precision);
+	const Matrix<double> bn = Bn*gaxpy(B0, b0, XpY*precision);
+	if (k == 1){
+	  density_beta(iter, j) = dnorm(beta_st(j), bn(0), sqrt(Bn(0)));
+	}
+	else{
+	  density_beta(iter, j) = ::exp(lndmvn(::t(beta_st(j,_)), bn, Bn));
+	}
+      }
+      
+    }
+    
+    double pdf_beta = log(prod(meanc(density_beta)));     
+    
+    //////////////////////
+    // 2. pdf.Sigma
+    //////////////////////      
+    Matrix<double> density_Sigma(nstore, ns);
+    for (int iter = 0; iter<nstore; ++iter){      
+      Matrix <double> Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma, P);
+      Matrix <double> s = Sout(_, 0);
+      int beta_count = 0;
+      Matrix<int> nstate(ns, 1); 
+      for (int j = 0; j <ns ; ++j){
+	for (int i = 0; i<n; ++i){
+	  if (s[i] == (j+1)) { 
+	    nstate[j] = nstate[j] + 1;
+	  }// end of if
+	}// end of int i<n
+	beta_count = beta_count + nstate[j];        
+	Matrix<double> yj = Y((beta_count - nstate[j]), 0, (beta_count - 1), 0);
+	Matrix<double> Xj = X((beta_count - nstate[j]), 0, (beta_count - 1), k-1);  
+	Matrix<double> ej = yj - Xj*::t(beta_st(j,_));
+	Matrix<double> sum_ej = ::t(ej)*ej;
+	double shape = (c0 + (double)nstate[j])/2;
+	double scale =(d0 + sum_ej[0])/2;
+	
+	Sigma[j] = stream.rigamma(shape, scale);
+	density_Sigma(iter, j) = ::exp(lndinvgamma_jhp(Sigma_st[j], shape, scale));
+	
+      }// end of sampling beta and Sigma
+      
+      // STEP 2.3 P|S
+      double shape1 = 0;
+      double shape2 = 0;    
+      P(ns-1, ns-1) = 1; //no jump at the last state
+      
+      for (int j =0; j< (ns-1); ++j){
+	shape1 =  A0(j,j) + (double)nstate[j] - 1;  
+	shape2 =  A0(j,j+1) + 1; //       
+	P(j,j) =  stream.rbeta(shape1, shape2);
+	P(j,j+1) = 1 - P(j,j);
+      }  
+      
+    }// end of pdf.Sigma  
+    
+    double pdf_Sigma = log(prod(meanc(density_Sigma)));   
+    
+    //////////////////////
+    // 3. pdf.P|beta_st, Sigma_st, S
+    //////////////////////
+    Matrix<double> density_P(nstore, ns);
+    for (int iter = 0; iter < nstore; ++iter){
+      
+     // STEP 2.1 S|y, beta.st, Sigma, P     
+      Matrix <double> Sout = gaussian_state_sampler(stream, m, Y, X, beta_st, Sigma_st, P);
+      Matrix <double> s = Sout(_, 0);
+ 
+      double shape1 = 0;
+      double shape2 = 0;    
+      P(ns-1, ns-1) = 1; //no jump at the last state
+      
+      // compute addN
+      Matrix <double> P_addN(ns, 1); 
+      for (int j = 0; j<ns; ++j){
+	for (int i = 0; i<n; ++i){
+	  if (s(i) == (j+1)) { // since j starts from 0
+	    P_addN[j] = P_addN[j] + 1;            
+	  }// end of if
+	} // end of for i
+      } // end of for j        
+      
+      // evaluate P
+      for (int j =0; j< (ns-1); ++j){
+	shape1 =  A0(j,j) + P_addN[j] - 1;  
+	shape2 =  A0(j,j+1) + 1; //         
+	P(j,j) = stream.rbeta(shape1, shape2);
+	P(j,j+1) = 1 - P(j,j);
+	density_P(iter, j) = dbeta(P_st(j,j), shape1, shape2); 
+      } // end of for j
+      density_P(iter, ns-1) = 1; //without this, there will be an error
+      
+    }// end of pdf.P MCMC run            
+    double pdf_P = log(prod(meanc(density_P)));
+       
+    //////////////////////
+    // likelihood
+    //////////////////////       
+    Matrix<double> F = Matrix<double>(n, ns);
+    Matrix<double> like(n, 1);
+    Matrix<double> pr1 = Matrix<double>(ns, 1);
+    pr1[0] = 1;
+    Matrix<double> py(ns, 1);
+    Matrix<double> pstyt1(ns, 1);
+    
+    for (int t=0; t<n ; ++t){
+      int yt = (int) Y[t];
+      Matrix<double> mu = X(t,_)*::t(beta_st); //k by 1 vector
+      for (int j = 0; j< ns; ++j){
+	py[j]  =  dnorm(Y[t], mu[j], sqrt(Sigma_st[j]));
+      }
+      if (t==0) pstyt1 = pr1;
+      else {
+	pstyt1 =  ::t(F(t-1,_)*P_st); // make it an ns by 1 matrix
+      }
+      Matrix<double> unnorm_pstyt = pstyt1%py;
+      Matrix<double> pstyt = unnorm_pstyt/sum(unnorm_pstyt); // pstyt = Pr(st|Yt)
+      for (int j=0; j<ns ; ++j){
+	F(t,j) = pstyt(j);
+      }
+      like[t] = sum(unnorm_pstyt);
+    }// end of likelihood computation
+    
+    const double loglike = sum(log(like));
+    
+    //////////////////////
+    // log prior ordinate 
+    //////////////////////
+    Matrix<double> density_beta_prior(ns, 1);
+    Matrix<double> density_Sigma_prior(ns, 1);
+    Matrix<double> density_P_prior(ns, 1);
+    density_P[ns-1] = 1; //
+    if (k == 1){
+      for (int j=0; j<ns ; ++j){
+	density_beta_prior[j] = log(dnorm(beta_st(j), b0(0), sqrt(B0inv(0)))); 
+      }   
+    }
+    else{
+      for (int j=0; j<ns ; ++j){
+	density_beta_prior[j] = lndmvn(::t(beta_st(j,_)), b0, B0inv); 
+      }   
+    }
+    
+    for (int j=0; j<ns ; ++j){
+      density_Sigma_prior[j] = lndinvgamma_jhp(Sigma_st[j], c0/2, d0/2); 
+    }   
+    
+    for (int j =0; j< (ns-1); ++j){
+      density_P_prior[j] = log(dbeta(P_st(j,j), A0(j,j), A0(j,j+1))); 
+    }        
+    
+    // compute marginal likelihood
+    double logprior = sum(density_beta_prior) + sum(density_Sigma_prior) + sum(density_P_prior);
+    logmarglike = (loglike + logprior) - (pdf_beta + pdf_Sigma + pdf_P);
+    if(verbose > 0){
+      Rprintf("logmarglike = %10.5f\n", logmarglike);
+      Rprintf("loglike = %10.5f\n", loglike);
+      Rprintf("logprior = %10.5f\n", logprior);
+      Rprintf("pdf_beta = %10.5f\n", pdf_beta);
+      Rprintf("pdf_Sigma = %10.5f\n", pdf_Sigma);
+      Rprintf("pdf_P = %10.5f\n", pdf_P);
+    }
+  }// end of marginal likelihood computation
+  
+}
+  
+//////////////////////////////////////////// 
+// Start MCMCregressChangepoint function
+///////////////////////////////////////////
+extern "C"{
+  void MCMCregressChange(double *betaout, double *Sigmaout, double *Pout, double *psout, double *sout,  
+			 const double *Ydata, const int *Yrow, const int *Ycol, 
+			 const double *Xdata, const int *Xrow, const int *Xcol, 
+			 const int *m, 
+			 const int *burnin, const int *mcmc, const int *thin, const int *verbose, 
+			 const int *uselecuyer, const int *seedarray, const int *lecuyerstream, 
+			 const double *betastart, const double *Sigmastart, const double *Pstart, 
+			 const int *statestart, 
+			 const double *a, const double *b, 
+			 const double *b0data, const double *B0data, 
+			 const double *c0, const double *d0, 
+			 const double *A0data, 
+			 double *logmarglikeholder, double *loglikeholder, 
+			 const int *chib){
+    
+    // pull together Matrix objects
+    const Matrix <double> Y(*Yrow, *Ycol, Ydata); 
+    const Matrix <double> X(*Xrow, *Xcol, Xdata); 
+    const unsigned int tot_iter = *burnin + *mcmc; //total iterations
+    const unsigned int nstore = *mcmc / *thin; // number of draws to store
+    const int n = Y.rows();
+    const int k = X.cols();
+    const int ns = *m + 1;                 // number of states
+    
+    // generate starting values
+    Matrix <> beta(ns, k, betastart);
+    Matrix <> Sigma(ns, 1, Sigmastart);
+    Matrix <> P(ns, ns, Pstart);    
+    Matrix <int> s(n, 1, statestart); 
+    Matrix <> b0(k, 1, b0data);
+    Matrix <> B0(k, k, B0data);
+    const Matrix <> A0(ns, ns, A0data);
+    double logmarglike;
+
+    // storage matrices
+    Matrix<> beta_store(nstore, ns*k);
+    Matrix<> Sigma_store(nstore, ns);
+    Matrix<> P_store(nstore, ns*ns);
+    Matrix<> ps_store(n, ns);
+    Matrix<int> s_store(nstore, n);
+    MCMCPACK_PASSRNG2MODEL(MCMCregressChange_impl, *m,
+			   Y, X, 
+			   beta, Sigma, P, s, b0, B0,   
+			   *c0, *d0, A0,
+			   *burnin, *mcmc, *thin, *verbose, 
+			   *chib, 
+			   beta_store, Sigma_store, 
+			   P_store, ps_store, s_store, 
+			   logmarglike)			   
+      logmarglikeholder[0] = logmarglike;
+    
+    
+    // return output
+    for (int i = 0; i<(nstore*ns*k); ++i){
+      betaout[i] = beta_store[i]; 
+    }
+    for (int i = 0; i<(nstore*ns); ++i){
+      Sigmaout[i] = Sigma_store[i]; 
+    }
+    for (int i = 0; i<(nstore*ns*ns); ++i){
+      Pout[i] = P_store[i]; 
+    }
+    for (int i = 0; i<(n*ns); ++i){
+      psout[i] = ps_store[i]; 
+    }
+    for (int i = 0; i<(nstore*n); ++i){
+      sout[i] = s_store[i];
+    }
+    
+  }// end of MCMCpoissonChange
+}//end extern "C"
+
+#endif
+
+
+
+  
diff --git a/src/MCMCresidualBreakAnalysis.cc b/src/MCMCresidualBreakAnalysis.cc
index e1003bc..f9a144e 100644
--- a/src/MCMCresidualBreakAnalysis.cc
+++ b/src/MCMCresidualBreakAnalysis.cc
@@ -1,11 +1,14 @@
+////////////////////////////////////////////////////////////////////
 // MCMCresidualBreakAnalysis.cc 
 //
 // Jong Hee Park
-// Department of Political Science
-// University of Chicago
-// jhp at uchicago.edu
+// Department of Political Science and International Relations
+// Seoul National University
+// jongheepark at snu.ac.kr
 //
-// 03/03/2009
+// Written 03/03/2009
+//
+////////////////////////////////////////////////////////////////////
 
 #ifndef MCMCRESIDUALBREAKANALYSIS_CC
 #define MCMCRESIDUALBREAKANALYSIS_CC
@@ -465,7 +468,6 @@ extern "C"{
     
     // pull together Matrix objects
     const Matrix <double> Y(*Yrow, *Ycol, Ydata); 
-    const unsigned int tot_iter = *burnin + *mcmc; 
     const unsigned int nstore = *mcmc / *thin; 
     const int n = Y.rows();
     const int ns = *m + 1;                 
diff --git a/src/distributions.h b/src/distributions.h
index 3cd4af2..baa9868 100644
--- a/src/distributions.h
+++ b/src/distributions.h
@@ -1,4 +1,4 @@
-/* 
+/*
  * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin
  * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn,
  * and Daniel Pemstein.  All Rights Reserved.
@@ -63,7 +63,7 @@
  * end of this document that are not, in fact, declared in
  * distributions.h.  Again, this error is a result of Doxygen's macro
  * parsing capabilities.
- * 
+ *
  */
 
 /* TODO: Figure out how to get doxygen to stop listing erroneous
@@ -117,7 +117,7 @@
 /*! @cond */
 inline double trunc(double x) throw ()
 {
-    if (x >= 0) 
+    if (x >= 0)
       return std::floor(x);
     else
       return std::ceil(x);
@@ -136,9 +136,9 @@ inline double trunc(double x) throw ()
  */
 
 namespace scythe {
-  
+
   /*! @cond */
-  
+
   /* Forward declarations */
   double gammafn (double);
   double lngammafn (double);
@@ -160,21 +160,21 @@ namespace scythe {
     {
       SCYTHE_CHECK_10(n < 1 || n > 1000, scythe_invalid_arg,
           "n not on [1, 1000]");
-  
+
       SCYTHE_CHECK_10(x < -1.1 || x > 1.1, scythe_invalid_arg,
           "x not on [-1.1, 1.1]");
-      
+
       double b0, b1, b2;
       b0 = b1 = b2 = 0;
-  
+
       double twox = x * 2;
-  
+
       for (int i = 1; i <= n; ++i) {
         b2 = b1;
         b1 = b0;
         b0 = twox * b1 - b2 + a[n - i];
       }
-  
+
       return (b0 - b2) * 0.5;
     }
 
@@ -189,17 +189,17 @@ namespace scythe {
         -.1809129475572494194263306266719e-10,
         +.6221098041892605227126015543416e-13,
       };
-    
+
       SCYTHE_CHECK_10(x < 10, scythe_invalid_arg,
-          "This function requires x >= 10");  
+          "This function requires x >= 10");
       SCYTHE_CHECK_10(x >= 3.745194030963158e306, scythe_range_error,
           "Underflow");
-      
+
       if (x < 94906265.62425156) {
         double tmp = 10 / x;
         return chebyshev_eval(tmp * tmp * 2 - 1, algmcs, 5) / x;
       }
-      
+
       return 1 / (x * 12);
     }
 
@@ -207,7 +207,7 @@ namespace scythe {
     double
     bd0(double x, double np)
     {
-      
+
       if(std::fabs(x - np) < 0.1 * (x + np)) {
         double v = (x - np) / (x + np);
         double s = (x - np) * v;
@@ -221,10 +221,10 @@ namespace scythe {
           s = s1;
         }
       }
-      
+
       return x * std::log(x / np) + np - x;
     }
-  
+
     /* Computes the log of the error term in Stirling's formula */
     double
     stirlerr(double n)
@@ -234,7 +234,7 @@ namespace scythe {
 #define S2 0.00079365079365079365079365  /* 1/1260 */
 #define S3 0.000595238095238095238095238 /* 1/1680 */
 #define S4 0.0008417508417508417508417508/* 1/1188 */
-      
+
       /* error for 0, 0.5, 1.0, 1.5, ..., 14.5, 15.0 */
       const double sferr_halves[31] = {
         0.0, /* n=0 - wrong, place holder only */
@@ -270,7 +270,7 @@ namespace scythe {
         0.005554733551962801371038690  /* 15.0 */
       };
       double nn;
-      
+
       if (n <= 15.0) {
         nn = n + n;
         if (nn == (int)nn)
@@ -278,7 +278,7 @@ namespace scythe {
         return (scythe::lngammafn(n + 1.) - (n + 0.5) * std::log(n) + n -
             std::log(std::sqrt(2 * M_PI)));
       }
-      
+
       nn = n*n;
       if (n > 500)
         return((S0 - S1 / nn) / n);
@@ -312,19 +312,19 @@ namespace scythe {
         / std::sqrt(2 * M_PI * x);
     }
 
-  
+
     /* helper for pbeta */
     double
     pbeta_raw(double x, double pin, double qin)
     {
       double ans, c, finsum, p, ps, p1, q, term, xb, xi, y;
       int n, i, ib, swap_tail;
-      
+
       const double eps = .5 * DBL_EPSILON;
       const double sml = DBL_MIN;
       const double lneps = std::log(eps);
       const double lnsml = std::log(eps);
-      
+
       if (pin / (pin + qin) < x) {
         swap_tail = 1;
         y = 1 - x;
@@ -336,7 +336,7 @@ namespace scythe {
         p = pin;
         q = qin;
       }
-      
+
       if ((p + q) * y / (p + 1) < eps) {
         ans = 0;
         xb = p * std::log(std::max(y,sml)) - std::log(p) - lnbetafn(p,q);
@@ -369,7 +369,7 @@ namespace scythe {
           term = std::exp(xb - ib * lnsml);
           c = 1 / (1 - y);
           p1 = q * c / (p + q - 1);
-              
+
           finsum = 0;
           n = (int) q;
           if(q == n)
@@ -388,18 +388,18 @@ namespace scythe {
           }
           ans += finsum;
         }
-        
+
         if(swap_tail)
           ans = 1-ans;
         ans = std::max(std::min(ans,1.),0.);
       }
       return ans;
     }
-  
+
    /* Helper for dbinom */
     double
     dbinom_raw (double x, double n, double p, double q)
-    { 
+    {
       double f, lc;
 
       if (p == 0)
@@ -407,14 +407,14 @@ namespace scythe {
       if (q == 0)
         return((x == n) ? 1.0 : 0.0);
 
-      if (x == 0) { 
+      if (x == 0) {
         if(n == 0)
           return 1.0;
-        
+
         lc = (p < 0.1) ? -bd0(n, n * q) - n * p : n * std::log(q);
         return(std::exp(lc));
       }
-      if (x == n) { 
+      if (x == n) {
         lc = (q < 0.1) ? -bd0(n,n * p) - n * q : n * std::log(p);
         return(std::exp(lc));
       }
@@ -424,7 +424,7 @@ namespace scythe {
 
       lc = stirlerr(n) - stirlerr(x) - stirlerr(n-x) - bd0(x,n*p) -
         bd0(n - x, n * q);
-      
+
       f = (M_2PI * x * (n-x)) / n;
 
       return (std::exp(lc) / std::sqrt(f));
@@ -505,7 +505,7 @@ namespace scythe {
         0.00378239633202758244,
         7.29751555083966205e-5
       };
-      
+
       double xden, xnum, temp, del, eps, xsq, y;
       int i, lower, upper;
 
@@ -528,7 +528,7 @@ namespace scythe {
             xden = (xden + b[i]) * xsq;
           }
         } else xnum = xden = 0.0;
-        
+
         temp = x * (xnum + a[3]) / (xden + b[3]);
         if(lower)  *cum = 0.5 + temp;
         if(upper) *ccum = 0.5 - temp;
@@ -537,7 +537,7 @@ namespace scythe {
           if(upper) *ccum = std::log(*ccum);
         }
       } else if (y <= M_SQRT_32) {
-        /* Evaluate pnorm for 0.674.. = qnorm(3/4) < |x| <= sqrt(32) 
+        /* Evaluate pnorm for 0.674.. = qnorm(3/4) < |x| <= sqrt(32)
          * ~= 5.657 */
 
         xnum = c[8] * y;
@@ -602,7 +602,7 @@ namespace scythe {
   /*************
    * Functions *
    *************/
-  
+
   /* The gamma function */
 
   /*! \brief The gamma function.
@@ -619,7 +619,7 @@ namespace scythe {
    * \throw scythe_range_error (Level 1)
    * \throw scythe_precision_error (Level 1)
    */
-  inline double 
+  inline double
   gammafn (double x)
   {
     const double gamcs[22] = {
@@ -659,11 +659,11 @@ namespace scythe {
       int n = (int) x;
       if (x < 0)
         --n;
-      
+
       y = x - n;/* n = floor(x)  ==>  y in [ 0, 1 ) */
       --n;
       double value = chebyshev_eval(y * 2 - 1, gamcs, 22) + .9375;
-      
+
       if (n == 0)
         return value;/* x = 1.dddd = 1+y */
 
@@ -677,7 +677,7 @@ namespace scythe {
 
         /* The answer is less than half precision */
         /* because x too near a negative integer. */
-        SCYTHE_CHECK_10(x < -0.5 && 
+        SCYTHE_CHECK_10(x < -0.5 &&
             std::fabs(x - (int)(x - 0.5) / x) < 67108864.0,
             scythe_precision_error,
             "Answer < 1/2 precision because x is too near" <<
@@ -692,7 +692,7 @@ namespace scythe {
 
         for (int i = 0; i < n; i++)
           value /= (x + i);
-        
+
         return value;
       } else {
         /* gamma(x) for 2 <= x <= 10 */
@@ -706,21 +706,21 @@ namespace scythe {
       /* gamma(x) for   y = |x| > 10. */
 
       /* Overflow */
-      SCYTHE_CHECK_10(x > 171.61447887182298, 
+      SCYTHE_CHECK_10(x > 171.61447887182298,
           scythe_range_error,"Overflow");
 
       /* Underflow */
       SCYTHE_CHECK_10(x < -170.5674972726612,
           scythe_range_error, "Underflow");
 
-      double value = std::exp((y - 0.5) * std::log(y) - y 
+      double value = std::exp((y - 0.5) * std::log(y) - y
           + M_LN_SQRT_2PI + lngammacor(y));
 
       if (x > 0)
         return value;
 
       SCYTHE_CHECK_10(std::fabs((x - (int)(x - 0.5))/x) < 67108864.0,
-          scythe_precision_error, 
+          scythe_precision_error,
           "Answer < 1/2 precision because x is " <<
             "too near a negative integer");
 
@@ -734,10 +734,10 @@ namespace scythe {
   }
 
   /* The natural log of the absolute value of the gamma function */
-  /*! \brief The natural log of the absolute value of the gamma 
+  /*! \brief The natural log of the absolute value of the gamma
    * function.
    *
-   * Computes the natural log of the absolute value of the gamma 
+   * Computes the natural log of the absolute value of the gamma
    * function, evaluated at \a x.
    *
    * \param x The value to compute log(abs(gamma())) at.
@@ -767,7 +767,7 @@ namespace scythe {
     if (x > 0) /* i.e. y = x > 10 */
       return M_LN_SQRT_2PI + (x - 0.5) * std::log(x) - x
         + lngammacor(x);
-    
+
     /* else: x < -10; y = -x */
     double sinpiy = std::fabs(std::sin(M_PI * y));
 
@@ -779,11 +779,11 @@ namespace scythe {
     double ans = M_LN_SQRT_PId2 + (x - 0.5) * std::log(y) - x - std::log(sinpiy)
       - lngammacor(y);
 
-    SCYTHE_CHECK_10(std::fabs((x - (int)(x - 0.5)) * ans / x) 
-        < 1.490116119384765696e-8, scythe_precision_error, 
-        "Answer < 1/2 precision because x is " 
+    SCYTHE_CHECK_10(std::fabs((x - (int)(x - 0.5)) * ans / x)
+        < 1.490116119384765696e-8, scythe_precision_error,
+        "Answer < 1/2 precision because x is "
         << "too near a negative integer");
-    
+
     return ans;
   }
 
@@ -815,14 +815,14 @@ namespace scythe {
     double val = lnbetafn(a, b);
     SCYTHE_CHECK_10(val < -708.39641853226412, scythe_range_error,
         "Underflow");
-    
+
     return std::exp(val);
   }
 
   /* The natural log of the beta function */
   /*! \brief The natural log of the beta function.
    *
-   * Computes the natural log of the beta function, 
+   * Computes the natural log of the beta function,
    * evaluated at (\a a, \a b).
    *
    * \param a The first parameter.
@@ -859,7 +859,7 @@ namespace scythe {
       return lngammafn(p) + corr + p - p * std::log(p + q)
         + (q - 0.5) * std::log(1 + (-p / (p + q)));
     }
-    
+
     /* p and q are small: p <= q > 10. */
     return std::log(gammafn(p) * (gammafn(q) / gammafn(p + q)));
   }
@@ -914,13 +914,13 @@ namespace scythe {
   }
 
   /*********************************
-   * Fully Specified Distributions * 
+   * Fully Specified Distributions *
    *********************************/
 
   /* These macros provide a nice shorthand for the matrix versions of
    * the pdf and cdf functions.
    */
- 
+
 #define SCYTHE_ARGSET(...) __VA_ARGS__
 
 #define SCYTHE_DISTFUN_MATRIX(NAME, XTYPE, ARGNAMES, ...)             \
@@ -985,12 +985,12 @@ namespace scythe {
   pbeta(double x, double a, double b)
   {
     SCYTHE_CHECK_10(a <= 0 || b <= 0,scythe_invalid_arg, "a or b <= 0");
-    
+
     if (x <= 0)
       return 0.;
     if (x >= 1)
       return 1.;
-    
+
     return pbeta_raw(x,a,b);
   }
 
@@ -1072,7 +1072,7 @@ namespace scythe {
    */
   inline double
   lndbeta1(double x, double a, double b)
-  { 
+  {
     SCYTHE_CHECK_10((x < 0.0) || (x > 1.0), scythe_invalid_arg,
         "x not in [0,1]");
     SCYTHE_CHECK_10(a < 0.0, scythe_invalid_arg, "a < 0");
@@ -1118,16 +1118,16 @@ namespace scythe {
   inline double
   pbinom(double x, unsigned int n, double p)
   {
-      
+
     SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg, "p not in [0,1]");
     double X = std::floor(x);
-      
+
     if (X < 0.0)
       return 0;
-    
+
     if (n <= X)
       return 1;
-      
+
     return pbeta(1 - p, n - X, X + 1);
   }
 
@@ -1171,7 +1171,7 @@ namespace scythe {
   SCYTHE_DISTFUN_MATRIX(dbinom, double, SCYTHE_ARGSET(n, p), unsigned int n, double p)
 
   /**** The Chi Squared Distribution ****/
-  
+
   /* CDFs */
   /*! \brief The \f$\chi^2\f$ distribution function.
    *
@@ -1274,10 +1274,10 @@ namespace scythe {
   pexp(double x, double scale)
   {
     SCYTHE_CHECK_10(scale <= 0, scythe_invalid_arg, "scale <= 0");
-    
+
     if (x <= 0)
       return 0;
-    
+
     return (1 - std::exp(-x*scale));
   }
 
@@ -1311,10 +1311,10 @@ namespace scythe {
   dexp(double x, double scale)
   {
     SCYTHE_CHECK_10(scale <= 0, scythe_invalid_arg, "scale <= 0");
-    
+
     if (x < 0)
       return 0;
-      
+
     return std::exp(-x * scale) * scale;
   }
 
@@ -1347,7 +1347,7 @@ namespace scythe {
    *
    * \see df(double x, double df1, double df2)
    * \see rng::rf(double df1, double df2)
-   * 
+   *
    * \throw scythe_invalid_arg (Level 1)
    * \throw scythe_range_error (Level 1)
    * \throw scythe_precision_error (Level 1)
@@ -1356,17 +1356,17 @@ namespace scythe {
   inline double
   pf(double x, double df1, double df2)
   {
-    SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg, 
+    SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg,
         "df1 or df2 <= 0");
-  
+
     if (x <= 0)
       return 0;
-    
+
     if (df2 > 4e5)
       return pchisq(x*df1,df1);
     if (df1 > 4e5)
       return 1-pchisq(df2/x,df2);
-    
+
     return (1-pbeta(df2 / (df2 + df1 * x), df2 / 2.0, df1 / 2.0));
   }
 
@@ -1397,7 +1397,7 @@ namespace scythe {
    *
    * \see df(double x, double df1, double df2)
    * \see rng::rf(double df1, double df2)
-   * 
+   *
    * \throw scythe_invalid_arg (Level 1)
    * \throw scythe_range_error (Level 1)
    * \throw scythe_precision_error (Level 1)
@@ -1407,17 +1407,17 @@ namespace scythe {
   df(double x, double df1, double df2)
   {
     double dens;
-    
-    SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg, 
+
+    SCYTHE_CHECK_10(df1 <= 0 || df2 <= 0, scythe_invalid_arg,
         "df1 or df2 <= 0");
-    
+
     if (x <= 0)
       return 0;
-      
+
     double f = 1 / (df2 + x * df1);
     double q = df2 * f;
     double p = x * df1 * f;
-    
+
     if (df1 >= 2) {
       f = df1 * q / 2;
       dens = dbinom_raw((df1 - 2) / 2,(df1 + df2 - 2) / 2, p, q);
@@ -1425,7 +1425,7 @@ namespace scythe {
       f = (df1 * df1 * q) /(2 * p * (df1 + df2));
       dens = dbinom_raw(df1 / 2,(df1 + df2)/ 2, p, q);
     }
-    
+
     return f*dens;
   }
 
@@ -1466,9 +1466,9 @@ namespace scythe {
   inline double
   pgamma (double x, double shape, double scale)
   {
-    const double xbig = 1.0e+8, xlarge = 1.0e+37, 
+    const double xbig = 1.0e+8, xlarge = 1.0e+37,
       alphlimit = 1000.;/* normal approx. for shape > alphlimit */
-      
+
     int lower_tail = 1;
 
     double pn1, pn2, pn3, pn4, pn5, pn6, arg, a, b, c, an, osum, sum;
@@ -1481,7 +1481,7 @@ namespace scythe {
         "shape or scale <= 0");
 
     x /= scale;
-    
+
     if (x <= 0.)
       return 0.0;
 
@@ -1595,22 +1595,22 @@ namespace scythe {
 
     if (x < 0)
       return 0.0;
-    
+
     if (x == 0) {
-      SCYTHE_CHECK_10(shape < 1,scythe_invalid_arg, 
+      SCYTHE_CHECK_10(shape < 1,scythe_invalid_arg,
           "x == 0 and shape < 1");
-      
+
       if (shape > 1)
         return 0.0;
-      
+
       return 1 / scale;
     }
-    
-    if (shape < 1) { 
+
+    if (shape < 1) {
       double pr = dpois_raw(shape, x/scale);
       return pr * shape / x;
     }
-    
+
     /* else  shape >= 1 */
     double pr = dpois_raw(shape - 1, x / scale);
     return pr / scale;
@@ -1642,18 +1642,18 @@ namespace scythe {
    *
    * \see dlogis(double x, double location, double scale)
    * \see rng::rlogis(double location, double scale)
-   * 
+   *
    * \throw scythe_invalid_arg (Level 1)
    */
   inline double
   plogis (double x, double location, double scale)
   {
     SCYTHE_CHECK_10(scale <= 0.0, scythe_invalid_arg, "scale <= 0");
-    
+
     double X = (x-location) / scale;
-      
+
     X = std::exp(-X);
-      
+
     return 1 / (1+X);
   }
 
@@ -1681,18 +1681,18 @@ namespace scythe {
    *
    * \see plogis(double x, double location, double scale)
    * \see rng::rlogis(double location, double scale)
-   * 
+   *
    * \throw scythe_invalid_arg (Level 1)
    */
   inline double
   dlogis(double x, double location, double scale)
   {
     SCYTHE_CHECK_10(scale <= 0.0, scythe_invalid_arg, "scale <= 0");
-    
+
     double X = (x - location) / scale;
     double e = std::exp(-X);
     double f = 1.0 + e;
-      
+
     return e / (scale * f * f);
   }
 
@@ -1731,10 +1731,10 @@ namespace scythe {
   plnorm (double x, double logmean, double logsd)
   {
     SCYTHE_CHECK_10(logsd <= 0, scythe_invalid_arg, "logsd <= 0");
-    
+
     if (x > 0)
       return pnorm(std::log(x), logmean, logsd);
-    
+
     return 0;
   }
 
@@ -1770,12 +1770,12 @@ namespace scythe {
   dlnorm(double x, double logmean, double logsd)
   {
     SCYTHE_CHECK_10(logsd <= 0, scythe_invalid_arg, "logsd <= 0");
-    
+
     if (x == 0)
       return 0;
-    
+
     double y = (std::log(x) - logmean) / logsd;
-    
+
     return (1 / (std::sqrt(2 * M_PI))) * std::exp(-0.5 * y * y) / (x * logsd);
   }
 
@@ -1817,7 +1817,7 @@ namespace scythe {
   {
     SCYTHE_CHECK_10(n == 0 || p <= 0 || p >= 1, scythe_invalid_arg,
         "n == 0 or p not in (0,1)");
-    
+
     return pbeta(p, n, x + 1);
   }
 
@@ -1857,17 +1857,17 @@ namespace scythe {
   {
     SCYTHE_CHECK_10(n == 0 || p <= 0 || p >= 1, scythe_invalid_arg,
         "n == 0 or p not in (0,1)");
-    
+
     double prob = dbinom_raw(n, x + n, p, 1 - p);
     double P = (double) n / (n + x);
-    
+
     return P * prob;
   }
 
   SCYTHE_DISTFUN_MATRIX(dnbinom, unsigned int, SCYTHE_ARGSET(n, p), double n, double p)
 
   /**** The Normal Distribution ****/
-  
+
   /* CDFs */
   /*! \brief The normal distribution function.
    *
@@ -1896,7 +1896,7 @@ namespace scythe {
    */
   inline double
   pnorm (double x, double mean, double sd)
-  
+
   {
     SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg,
         "negative standard deviation");
@@ -1905,7 +1905,7 @@ namespace scythe {
   }
 
   SCYTHE_DISTFUN_MATRIX(pnorm, double, SCYTHE_ARGSET(mean, sd), double mean, double sd)
-  
+
 
   /* PDFs */
   /*! \brief The normal density function.
@@ -1937,14 +1937,14 @@ namespace scythe {
   {
     SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg,
         "negative standard deviation");
-    
+
     double X = (x - mean) / sd;
-    
+
     return (M_1_SQRT_2PI * std::exp(-0.5 * X * X) / sd);
   }
 
   SCYTHE_DISTFUN_MATRIX(dnorm, double, SCYTHE_ARGSET(mean, sd), double mean, double sd)
- 
+
 
   /* Return the natural log of the normal PDF */
   /*! \brief The natural log of normal density function.
@@ -1977,9 +1977,9 @@ namespace scythe {
   {
     SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg,
         "negative standard deviation");
-    
+
     double X = (x - mean) / sd;
-    
+
     return -(M_LN_SQRT_2PI  +  0.5 * X * X + std::log(sd));
   }
 
@@ -2023,23 +2023,23 @@ namespace scythe {
     double q4 = 0.38560700634e-2;
     double xp = 0.0;
     double p = in_p;
-      
+
     if (p > 0.5)
       p = 1 - p;
-        
+
     SCYTHE_CHECK_10(p < 10e-20, scythe_range_error,
         "p outside accuracy limit");
-      
+
     if (p == 0.5)
       return xp;
-      
+
     double y = std::sqrt (std::log (1.0 / std::pow (p, 2)));
     xp = y + ((((y * p4 + p3) * y + p2) * y + p1) * y + p0) /
       ((((y * q4 + q3) * y + q2) * y + q1) * y + q0);
-      
+
     if (in_p < 0.5)
       xp = -1 * xp;
-    
+
     return xp;
   }
 
@@ -2078,10 +2078,10 @@ namespace scythe {
   ppois(unsigned int x, double lambda)
   {
     SCYTHE_CHECK_10(lambda<=0.0, scythe_invalid_arg, "lambda <= 0");
-    
+
     if (lambda == 1)
       return 1;
-    
+
     return 1 - pgamma(lambda, x + 1, 1.0);
   }
 
@@ -2115,7 +2115,7 @@ namespace scythe {
   dpois(unsigned int x, double lambda)
   {
     SCYTHE_CHECK_10(lambda<=0.0, scythe_invalid_arg, "lambda <= 0");
-    
+
     // compute log(x!)
     double xx = x+1;
     double cof[6] = {
@@ -2130,7 +2130,7 @@ namespace scythe {
       ser += (cof[j] / ++y);
     }
     double lnfactx = std::log(2.5066282746310005 * ser / xx) - tmp;
-      
+
     return (std::exp( -1*lnfactx + x * std::log(lambda) - lambda));
   }
 
@@ -2159,7 +2159,7 @@ namespace scythe {
    *
    * \see dt(double x, bool b1, bool b2)
    * \see rng::rt1(double mu, double sigma2, double nu)
-   * 
+   *
    * \throw scythe_invalid_arg (Level 1)
    * \throw scythe_convergence_error (Level 1)
    * \throw scythe_range_error (Level 1)
@@ -2169,19 +2169,19 @@ namespace scythe {
   pt(double x, double n)
   {
     double val;
-    
+
     SCYTHE_CHECK_10(n <= 0, scythe_invalid_arg, "n <= 0");
-    
+
     if (n > 4e5) {
       val = 1/(4*n);
-      return pnorm1(x * (1 - val) / ::sqrt(1 + x * x * 2. * val), 
+      return pnorm1(x * (1 - val) / ::sqrt(1 + x * x * 2. * val),
           true, false);
     }
-    
+
     val = pbeta(n / (n + x * x), n / 2.0, 0.5);
-    
+
     val /= 2;
-    
+
     if (x <= 0)
       return val;
     else
@@ -2189,7 +2189,7 @@ namespace scythe {
   }
 
   SCYTHE_DISTFUN_MATRIX(pt, double, n, double n)
-  
+
   /* PDFs */
   /*! \brief The Student t distribution function.
    *
@@ -2211,7 +2211,7 @@ namespace scythe {
    *
    * \see pt(double x, bool b1, bool b2)
    * \see rng::rt1(double mu, double sigma2, double nu)
-   * 
+   *
    * \throw scythe_invalid_arg (Level 1)
    * \throw scythe_range_error (Level 1)
    * \throw scythe_precision_error (Level 1)
@@ -2222,7 +2222,7 @@ namespace scythe {
     double u;
 
     SCYTHE_CHECK_10(n <= 0, scythe_invalid_arg, "n <= 0");
-    
+
     double t = -bd0(n/2., (n + 1) / 2.)
       + stirlerr((n + 1) / 2.)
       - stirlerr(n / 2.);
@@ -2230,14 +2230,14 @@ namespace scythe {
       u = std::log(1+x*x/n)*n/2;
     else
       u = -bd0(n/2., (n+x*x)/2.) + x*x/2;
-    
+
     return std::exp(t-u)/std::sqrt(2*M_PI*(1+x*x/n));
   }
 
   SCYTHE_DISTFUN_MATRIX(dt, double, n, double n)
-  
-  /* Returns the univariate Student-t density evaluated at x 
-   * with mean mu, scale sigma^2, and nu degrees of freedom.  
+
+  /* Returns the univariate Student-t density evaluated at x
+   * with mean mu, scale sigma^2, and nu degrees of freedom.
    *
    * TODO:  Do we want a pt for this distribution?
    */
@@ -2278,17 +2278,17 @@ namespace scythe {
       - lngammafn(nu / 2.0) - std::log(std::sqrt(sigma2))
       - (nu + 1.0) / 2.0 * std::log(1.0 + (std::pow((x - mu), 2.0))
             / (nu * sigma2));
-    
+
     return(std::exp(logdens));
   }
 
   SCYTHE_DISTFUN_MATRIX(dt1, double, SCYTHE_ARGSET(mu, sigma2, nu), double mu, double sigma2, double nu)
 
-  /* Returns the natural log of the univariate Student-t density 
-   * evaluated at x with mean mu, scale sigma^2, and nu 
+  /* Returns the natural log of the univariate Student-t density
+   * evaluated at x with mean mu, scale sigma^2, and nu
    * degrees of freedom
    */
-   
+
   /*! \brief The natural log of the univariate Student t density
    * function.
    *
@@ -2318,7 +2318,7 @@ namespace scythe {
    * \throw scythe_range_error (Level 1)
    * \throw scythe_precision_error (Level 1)
    */
-  inline double 
+  inline double
   lndt1(double x, double mu, double sigma2, double nu)
   {
     double logdens = lngammafn((nu+1.0)/2.0)
@@ -2326,7 +2326,7 @@ namespace scythe {
       - lngammafn(nu/2.0) - std::log(std::sqrt(sigma2))
       - (nu+1.0)/2.0 * std::log(1.0 + (std::pow((x-mu),2.0))
         /(nu * sigma2));
-    
+
     return(logdens);
   }
 
@@ -2363,13 +2363,13 @@ namespace scythe {
   punif(double x, double a, double b)
   {
     SCYTHE_CHECK_10(b <= a, scythe_invalid_arg, "b <= a");
-      
+
     if (x <= a)
       return 0.0;
-        
+
     if (x >= b)
       return 1.0;
-      
+
     return (x - a) / (b - a);
   }
 
@@ -2404,10 +2404,10 @@ namespace scythe {
   dunif(double x, double a, double b)
   {
     SCYTHE_CHECK_10(b <= a, scythe_invalid_arg, "b <= a");
-    
+
     if (a <= x && x <= b)
       return 1.0 / (b - a);
-    
+
     return 0.0;
   }
 
@@ -2445,10 +2445,10 @@ namespace scythe {
   {
     SCYTHE_CHECK_10(shape <= 0 || scale <= 0, scythe_invalid_arg,
         "shape or scale <= 0");
-    
+
     if (x <= 0)
       return 0.0;
-    
+
     return 1 - std::exp(-std::pow(x / scale, shape));
   }
 
@@ -2487,10 +2487,10 @@ namespace scythe {
 
     if (x < 0)
       return 0.;
-      
+
     double tmp1 = std::pow(x / scale, shape - 1);
     double tmp2 = tmp1*(x / scale);
-      
+
     return shape * tmp1 * std::exp(-tmp2) / scale;
   }
 
@@ -2531,11 +2531,11 @@ namespace scythe {
         "mu is not a column vector");
     SCYTHE_CHECK_10(! Sigma.isSquare(), scythe_dimension_error,
         "Sigma is not square");
-    SCYTHE_CHECK_10(mu.rows()!=Sigma.rows() || x.rows()!=Sigma.rows(), 
+    SCYTHE_CHECK_10(mu.rows()!=Sigma.rows() || x.rows()!=Sigma.rows(),
                     scythe_conformation_error,
                     "mu, x and Sigma have mismatched row lengths")
     int k = (int) mu.rows();
-    return ( (-k/2.0)*std::log(2*M_PI) -0.5 * std::log(det(Sigma)) 
+    return ( (-k/2.0)*std::log(2*M_PI) -0.5 * std::log(det(Sigma))
        -0.5 * (t(x - mu)) * invpd(Sigma) * (x-mu) )[0];
   }
 
diff --git a/src/matrix.h b/src/matrix.h
index 1602f50..b9a98cb 100644
--- a/src/matrix.h
+++ b/src/matrix.h
@@ -1,4 +1,4 @@
-/* 
+/*
  * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin
  * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn,
  * and Daniel Pemstein.  All Rights Reserved.
@@ -26,7 +26,7 @@
  * Many of the arithmetic and logical operators in this file are
  * implemented in terms of overloaded template definitions to provide
  * both generic and default versions of each operation.  Generic
- * templates allow (and force) the user to fully specify the 
+ * templates allow (and force) the user to fully specify the
  * template type of the returned matrix object (row or column order,
  * concrete or view) while default templates automatically return
  * concrete matrices with the ordering of the first or only Matrix
@@ -109,7 +109,7 @@ namespace scythe {
 	template <typename T_type, matrix_order ORDER, matrix_style STYLE>
 	class Matrix;
 
-  /*!  \brief A helper class for list-wise initialization.  
+  /*!  \brief A helper class for list-wise initialization.
    *
    * This class gets used behind the scenes to provide listwise
    * initialization for Matrix objects.  This documentation is mostly
@@ -149,15 +149,15 @@ namespace scythe {
    * GNU GPL.
    */
 
-  template<typename T_elem, typename T_iter, 
+  template<typename T_elem, typename T_iter,
            matrix_order O, matrix_style S>
   class ListInitializer {
     // An unbound friend
     template <class T, matrix_order OO, matrix_style SS>
     friend class Matrix;
-    
+
     public:
-      ListInitializer (T_elem val, T_iter begin, T_iter end, 
+      ListInitializer (T_elem val, T_iter begin, T_iter end,
                        Matrix<T_elem,O,S>* matrix)
         : vals_ (),
           iter_ (begin),
@@ -201,7 +201,7 @@ namespace scythe {
       Matrix<T_elem, O, S>* matrix_;
       bool populated_;
   };
-	
+
   /*! \brief Matrix superclass.
    *
    * The Matrix_base class handles Matrix functionality that doesn't
@@ -246,7 +246,7 @@ namespace scythe {
         }
       }
 
-      /* Copy constructors 
+      /* Copy constructors
        *
        * The first version handles matrices of the same order and
        * style.  The second handles matrices with different
@@ -306,7 +306,7 @@ namespace scythe {
          * run-time.  Of course, we should never get this far.
          */
         if (STYLE == Concrete) {
-          SCYTHE_THROW(scythe_style_error, 
+          SCYTHE_THROW(scythe_style_error,
               "Tried to construct a concrete submatrix (Matrix_base)!");
         }
       }
@@ -373,7 +373,7 @@ namespace scythe {
 
         storeorder_ = ORDER;
 			}
-			
+
 		public:
 			/**** ACCESSORS ****/
 
@@ -433,11 +433,11 @@ namespace scythe {
       }
 
       /*! \brief Returns the storage order of the underlying
-       * DataBlock.  
+       * DataBlock.
        *
        * In view matrices, the storage order of the data may not be the
        * same as the template ORDER.
-       * 
+       *
        *
        * \see rowstride()
        * \see colstride()
@@ -459,7 +459,7 @@ namespace scythe {
 			{
 				return rowstride_;
 			}
-			
+
       /*! \brief Returns the in-memory distance between elements in
        * successive columns of the matrix.
        *
@@ -501,7 +501,7 @@ namespace scythe {
 			{
 				return (rows() == 1);
 			}
-			
+
 			/*! \brief Returns true if this Matrix is nx1.
        *
        * \see isRowVector()
@@ -521,7 +521,7 @@ namespace scythe {
 			{
 				return (cols() == 1 || rows() == 1);
 			}
-			
+
 			/*! \brief Returns true if this Matrix is nxn.
        *
        * Null and scalar matrices are both considered square.
@@ -535,7 +535,7 @@ namespace scythe {
 			}
 
       /*! \brief Returns true if this Matrix has 0 elements.
-       *  
+       *
        *  \see empty()
        *  \see isScalar()
        */
@@ -555,7 +555,7 @@ namespace scythe {
 			{
 				return (rows() == 0);
 			}
-			
+
 
 			/**** HELPERS ****/
 
@@ -610,7 +610,7 @@ namespace scythe {
        * so great for views.  Of course, the type checks are done at
        * compile time.
 			 */
-			
+
 			/* Turn an index into a true offset into the data. */
 			inline uint index (uint i) const
       {
@@ -644,7 +644,7 @@ namespace scythe {
         }
       }
 
-    
+
     /**** INSTANCE VARIABLES ****/
     protected:
       uint rows_;   // # of rows
@@ -685,7 +685,7 @@ namespace scythe {
    * manipulating matrices.  Most notably, la.h provides definitions
    * of common linear algebra routines and ide.h defines functions
    * that perform inversion and decomposition.
-   * 
+   *
    * This Matrix data structure sits at the core of the library.  In
    * addition to standard matrix operations, this class allows
    * multiple matrices to view and modify the same underlying data.
@@ -732,7 +732,7 @@ namespace scythe {
    * There are also two possible styles of Matrix template, concrete
    * and view.  These two types of matrix provide distinct ways in
    * which to interact with an underlying block of data.
-   * 
+   *
    * Concrete matrices behave like matrices in previous
    * Scythe releases.  They directly encapsulate a block of data and
    * always process it in the same order as it is stored (their
@@ -742,7 +742,7 @@ namespace scythe {
    * the reference() method to make a concrete Matrix a view of
    * another Matrix.  Furthermore, concrete matrices are guaranteed to
    * have unit stride (That is, adjacent Matrix elements are stored
-   * adjacently in memory).  
+   * adjacently in memory).
    *
    * Views, on the other hand, provide references to data blocks.
    * More than one view can look at the same underlying block of data,
@@ -751,7 +751,7 @@ namespace scythe {
    * matrix, perhaps accessing a single row vector or a small
    * submatrix of a larger matrix.  When you copy construct
    * a view a deep copy is not made, rather the view simply provides
-   * access to the extant block of data underlying the copied object.  
+   * access to the extant block of data underlying the copied object.
    * Furthermore, when
    * you assign to a view, you overwrite the data the view is
    * currently pointing to, rather than generating a new data block.
@@ -788,7 +788,7 @@ namespace scythe {
    * objects.
    */
 
-	template <typename T_type = double, matrix_order ORDER = Col, 
+	template <typename T_type = double, matrix_order ORDER = Col,
             matrix_style STYLE = Concrete>
 	class Matrix : public Matrix_base<ORDER, STYLE>,
 								 public DataBlockReference<T_type>
@@ -858,8 +858,8 @@ namespace scythe {
        * \see reverse_bidirectional_iterator
        * \see const_reverse_bidirectional_iterator
        */
-			typedef typename 
-        std::reverse_iterator<matrix_random_access_iterator<T_type, 
+			typedef typename
+        std::reverse_iterator<matrix_random_access_iterator<T_type,
                               ORDER, ORDER, STYLE> > reverse_iterator;
 
       /*! \brief Reverse Const Random Access Iterator type.
@@ -956,7 +956,7 @@ namespace scythe {
        * a convenient shorthand for a compromise (with speed and
        * flexibility between const_matrix_random_access_iterator and
        * const_matrix_forward_iterator) const Matrix iterator type.
-       * 
+       *
        * \see iterator
        * \see const_iterator
        * \see reverse_iterator
@@ -990,8 +990,8 @@ namespace scythe {
        * \see const_bidirectional_iterator
        * \see const_reverse_bidirectional_iterator
        */
-			typedef typename 
-        std::reverse_iterator<matrix_bidirectional_iterator<T_type, 
+			typedef typename
+        std::reverse_iterator<matrix_bidirectional_iterator<T_type,
                 ORDER, ORDER, STYLE> > reverse_bidirectional_iterator;
 
       /*! \brief Reverse Const Bidirectional Iterator type.
@@ -1023,12 +1023,12 @@ namespace scythe {
        * Matrix.
        */
       typedef T_type ttype;
-		
+
 		private:
 			/* Some convenience typedefs */
 			typedef DataBlockReference<T_type> DBRef;
 			typedef Matrix_base<ORDER, STYLE> Base;
-			
+
 		public:
 			/**** CONSTRUCTORS ****/
 
@@ -1089,7 +1089,7 @@ namespace scythe {
        * The standard constructor creates a rowsXcols Matrix, filled
        * with zeros by default.  Optionally, you can leave the Matrix
        * uninitialized, or choose a different fill value.
-       * 
+       *
        * \param rows The number of rows in the Matrix.
        * \param cols The number of columns in the Matrix.
        * \param fill Indicates whether or not the Matrix should be
@@ -1203,7 +1203,7 @@ namespace scythe {
           uint rows, cols;
           file >> rows >> cols;
           resize(rows, cols);
-          std::copy(std::istream_iterator<T_type> (file), 
+          std::copy(std::istream_iterator<T_type> (file),
                     std::istream_iterator<T_type>(), begin_f<Row>());
         } else {
           std::string row;
@@ -1342,8 +1342,8 @@ namespace scythe {
        * matrix, even if the constructed matrix is a view. It is
        * impossible for a matrix view with one element type to
        * reference the data block of a matrix containing elements of a
-       * different type. 
-       * 
+       * different type.
+       *
        * \param M The Matrix to copy.
        *
        * \see Matrix()
@@ -1422,7 +1422,7 @@ namespace scythe {
          * run-time.
          */
         if (STYLE == Concrete) {
-          SCYTHE_THROW(scythe_style_error, 
+          SCYTHE_THROW(scythe_style_error,
               "Tried to construct a concrete submatrix (Matrix)!");
         }
       }
@@ -1430,7 +1430,7 @@ namespace scythe {
     public:
       /**** DESTRUCTOR ****/
 
-      /*!\brief Destructor. 
+      /*!\brief Destructor.
        */
       ~Matrix() {}
 
@@ -1448,7 +1448,7 @@ namespace scythe {
        * This modifier makes this matrix a view of another's data.
        * The action detaches the Matrix from its current view; if no
        * other Matrix views the detached DataBlock, it will be
-       * deallocated.  
+       * deallocated.
        *
        * Concrete matrices cannot convert into views at
        * run time.  Therefore, it is an error to invoke this method on
@@ -1471,7 +1471,7 @@ namespace scythe {
 			inline void reference (const Matrix<T_type, O, S> &M)
 			{
         if (STYLE == Concrete) {
-          SCYTHE_THROW(scythe_style_error, 
+          SCYTHE_THROW(scythe_style_error,
               "Concrete matrices cannot reference other matrices");
         } else {
           this->referenceOther(M);
@@ -1539,7 +1539,7 @@ namespace scythe {
       }
 
 			/**** INDEXING OPERATORS ****/
-			
+
       /*! \brief Access or modify an element in this Matrix.
        *
        * This indexing operator allows the caller to access or modify
@@ -1554,7 +1554,7 @@ namespace scythe {
        * \see operator()(uint) const
        * \see operator()(uint, uint)
        * \see operator()(uint, uint) const
-       * 
+       *
        * \throw scythe_bounds_error (Level 3)
        */
 			inline T_type& operator[] (uint i)
@@ -1564,10 +1564,10 @@ namespace scythe {
 
 				return data_[Base::index(i)];
 			}
-			
+
       /*! \brief Access an element in this Matrix.
        *
-       * This indexing operator allows the caller to access 
+       * This indexing operator allows the caller to access
        * the ith (indexed in this Matrix's matrix_order) element of
        * this Matrix, indexed from 0 to n - 1, where n is the number
        * of elements in the Matrix.
@@ -1579,7 +1579,7 @@ namespace scythe {
        * \see operator()(uint) const
        * \see operator()(uint, uint)
        * \see operator()(uint, uint) const
-       * 
+       *
        * \throw scythe_bounds_error (Level 3)
        */
 			inline T_type& operator[] (uint i) const
@@ -1604,7 +1604,7 @@ namespace scythe {
        * \see operator()(uint) const
        * \see operator()(uint, uint)
        * \see operator()(uint, uint) const
-       * 
+       *
        * \throw scythe_bounds_error (Level 3)
        */
 			inline T_type& operator() (uint i)
@@ -1614,10 +1614,10 @@ namespace scythe {
 
 				return data_[Base::index(i)];
 			}
-			
+
       /*! \brief Access an element in this Matrix.
        *
-       * This indexing operator allows the caller to access 
+       * This indexing operator allows the caller to access
        * the ith (indexed in this Matrix's matrix_order) element of
        * this Matrix, indexed from 0 to n - 1, where n is the number
        * of elements in the Matrix.
@@ -1629,7 +1629,7 @@ namespace scythe {
        * \see operator()(uint)
        * \see operator()(uint, uint)
        * \see operator()(uint, uint) const
-       * 
+       *
        * \throw scythe_bounds_error (Level 3)
        */
 			inline T_type& operator() (uint i) const
@@ -1655,7 +1655,7 @@ namespace scythe {
        * \see operator()(uint)
        * \see operator()(uint) const
        * \see operator()(uint, uint) const
-       * 
+       *
        * \throw scythe_bounds_error (Level 3)
        */
 			inline T_type& operator() (uint i, uint j)
@@ -1665,10 +1665,10 @@ namespace scythe {
 
 				return data_[Base::index(i, j)];
 			}
-				
+
       /*! \brief Access an element in this Matrix.
        *
-       * This indexing operator allows the caller to access 
+       * This indexing operator allows the caller to access
        * the (i, j)th element of
        * this Matrix, where i is an element of 0, 1, ..., rows - 1 and
        * j is an element of 0, 1, ..., columns - 1.
@@ -1681,7 +1681,7 @@ namespace scythe {
        * \see operator()(uint)
        * \see operator()(uint) const
        * \see operator() (uint, uint)
-       * 
+       *
        * \throw scythe_bounds_error (Level 3)
        */
 			inline T_type& operator() (uint i, uint j) const
@@ -1715,7 +1715,7 @@ namespace scythe {
        * is such complete gibberish that I don't think this is worth
        * the slight optimization.
        */
-      
+
       /*! \brief Returns a view of a submatrix.
        *
        * This operator returns a rectangular submatrix view of this
@@ -1738,19 +1738,19 @@ namespace scythe {
        * \b Example:
        * \include example.matrix.submatrix.cc
        */
-      inline Matrix<T_type, ORDER, View> 
+      inline Matrix<T_type, ORDER, View>
 			operator() (uint x1, uint y1, uint x2, uint y2)
 			{
-				SCYTHE_CHECK_20 (! Base::inRange(x1, y1) 
+				SCYTHE_CHECK_20 (! Base::inRange(x1, y1)
             || ! Base::inRange(x2, y2)
 						|| x1 > x2 || y1 > y2,
 						scythe_bounds_error,
 						"Submatrix (" << x1 << ", " << y1 << ") ; ("
 						<< x2 << ", " << y2 << ") out of range or ill-formed");
-				
+
 				return (Matrix<T_type, ORDER, View>(*this, x1, y1, x2, y2));
 			}
-			
+
       /*! \brief Returns a view of a submatrix.
        *
        * This operator returns a rectangular submatrix view of this
@@ -1770,10 +1770,10 @@ namespace scythe {
        *
        * \throw scythe_bounds_error (Level 2)
        */
-      inline Matrix<T_type, ORDER, View> 
+      inline Matrix<T_type, ORDER, View>
       operator() (uint x1, uint y1, uint x2, uint y2) const
 			{
-				SCYTHE_CHECK_20 (! Base::inRange(x1, y1) 
+				SCYTHE_CHECK_20 (! Base::inRange(x1, y1)
             || ! Base::inRange(x2, y2)
 						|| x1 > x2 || y1 > y2,
 						scythe_bounds_error,
@@ -1801,7 +1801,7 @@ namespace scythe {
        * \b Example:
        * \include example.matrix.vector.cc
        */
-      inline Matrix<T_type, ORDER, View> 
+      inline Matrix<T_type, ORDER, View>
 			operator() (const all_elements a, uint j)
 			{
 				SCYTHE_CHECK_20 (j >= Base::cols(), scythe_bounds_error,
@@ -1810,7 +1810,7 @@ namespace scythe {
 				return (Matrix<T_type, ORDER, View>
            (*this, 0, j, Base::rows() - 1, j));
 			}
-			
+
       /*! \brief Returns a view of a column vector.
        *
        * This operator returns a vector view of column j in this Matrix.
@@ -1826,7 +1826,7 @@ namespace scythe {
        *
        * \throw scythe_bounds_error (Level 2)
        */
-      inline Matrix<T_type, ORDER, View> 
+      inline Matrix<T_type, ORDER, View>
 			operator() (const all_elements a, uint j) const
 			{
 				SCYTHE_CHECK_20 (j >= Base::cols(), scythe_bounds_error,
@@ -1854,7 +1854,7 @@ namespace scythe {
        * \b Example:
        * \include example.matrix.vector.cc
        */
-      inline Matrix<T_type, ORDER, View> 
+      inline Matrix<T_type, ORDER, View>
 			operator() (uint i, const all_elements b)
 			{
 				SCYTHE_CHECK_20 (i >= Base::rows(), scythe_bounds_error,
@@ -1863,7 +1863,7 @@ namespace scythe {
 				return (Matrix<T_type, ORDER, View>
             (*this, i, 0, i, Base::cols() - 1));
 			}
-			
+
       /*! \brief Returns a view of a row vector.
        *
        * This operator returns a vector view of row i in this Matrix.
@@ -1879,14 +1879,14 @@ namespace scythe {
        *
        * \throw scythe_bounds_error (Level 2)
        */
-      inline Matrix<T_type, ORDER, View> 
+      inline Matrix<T_type, ORDER, View>
 			operator() (uint i, const all_elements b) const
 			{
 				SCYTHE_CHECK_20 (i >= Base::rows(), scythe_bounds_error,
 						"Row vector index " << i << " out of range");
 				return (Matrix<T_type, ORDER, View>
             (*this, i, 0, i, Base::cols() - 1));
-			}	
+			}
 
      /*! \brief Returns single element in matrix as scalar type
       *
@@ -1976,7 +1976,7 @@ namespace scythe {
 #ifndef SCYTHE_VIEW_ASSIGNMENT_RECYCLE
           SCYTHE_CHECK_10 (Base::size() != M.size(),
               scythe_conformation_error,
-              "LHS has dimensions (" << Base::rows() 
+              "LHS has dimensions (" << Base::rows()
               << ", " << Base::cols()
               << ") while RHS has dimensions (" << M.rows() << ", "
               << M.cols() << ")");
@@ -1989,7 +1989,7 @@ namespace scythe {
 
         return *this;
       }
-      
+
       /*! \brief Assign the contents of one Matrix to another.
        *
        * Like copy construction, assignment works differently for
@@ -2050,7 +2050,7 @@ namespace scythe {
 #ifndef SCYTHE_VIEW_ASSIGNMENT_RECYCLE
           SCYTHE_CHECK_10 (Base::size() != M.size(),
               scythe_conformation_error,
-              "LHS has dimensions (" << Base::rows() 
+              "LHS has dimensions (" << Base::rows()
               << ", " << Base::cols()
               << ") while RHS has dimensions (" << M.rows() << ", "
               << M.cols() << ")");
@@ -2063,7 +2063,7 @@ namespace scythe {
 
         return *this;
       }
-      
+
       /* List-wise initialization behavior is a touch complicated.
        * List needs to be less than or equal to matrix in size and it
        * is copied into the matrix with R-style recycling.
@@ -2111,10 +2111,10 @@ namespace scythe {
        * \b Example:
        * \include example.matrix.operator.assignment.cc
        */
-			ListInitializer<T_type, iterator, ORDER, STYLE> 
+			ListInitializer<T_type, iterator, ORDER, STYLE>
       operator=(T_type x)
 			{
-				return (ListInitializer<T_type, iterator, ORDER, STYLE> 
+				return (ListInitializer<T_type, iterator, ORDER, STYLE>
           (x, begin(),end(), this));
 			}
 
@@ -2162,17 +2162,17 @@ namespace scythe {
 			 */
 			template <typename OP, matrix_order O, matrix_style S>
 			inline Matrix&
-			elementWiseOperatorAssignment (const Matrix<T_type, O, S>& M, 
+			elementWiseOperatorAssignment (const Matrix<T_type, O, S>& M,
                                      OP op)
 			{
-				SCYTHE_CHECK_10 (Base::size() != 1 && M.size() != 1 && 
+				SCYTHE_CHECK_10 (Base::size() != 1 && M.size() != 1 &&
 						(Base::rows () != M.rows() || Base::cols() != M.cols()),
 						scythe_conformation_error,
-						"Matrices with dimensions (" << Base::rows() 
+						"Matrices with dimensions (" << Base::rows()
             << ", " << Base::cols()
 						<< ") and (" << M.rows() << ", " << M.cols()
 						<< ") are not conformable");
-				
+
 				if (Base::size() == 1) { // 1x1 += nXm
 					T_type tmp = (*this)(0);
 					resize2Match(M);
@@ -2233,10 +2233,10 @@ namespace scythe {
        */
       inline Matrix& operator+= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::plus<T_type> ());
       }
-			
+
       /*! \brief Subtract another Matrix from this Matrix.
        *
        * This operator subtracts another Matrix from this one and
@@ -2280,10 +2280,10 @@ namespace scythe {
        */
       inline Matrix& operator-= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::minus<T_type> ());
       }
-			
+
       /*! \brief Multiply the elements of this Matrix with another's.
        *
        * This operator multiplies the elements of this Matrix with
@@ -2311,7 +2311,7 @@ namespace scythe {
       template <matrix_order O, matrix_style S>
 			inline Matrix& operator%= (const Matrix<T_type, O, S> &M)
 			{
-				return elementWiseOperatorAssignment(M, 
+				return elementWiseOperatorAssignment(M,
             std::multiplies<T_type> ());
 			}
 
@@ -2333,10 +2333,10 @@ namespace scythe {
        */
       inline Matrix& operator%= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::multiplies<T_type> ());
       }
-			
+
       /*! \brief Divide the elements of this Matrix by another's.
        *
        * This operator divides the elements of this Matrix by
@@ -2381,7 +2381,7 @@ namespace scythe {
        */
       inline Matrix& operator/= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::divides<T_type> ());
       }
 
@@ -2408,7 +2408,7 @@ namespace scythe {
       template <matrix_order O, matrix_style S>
 			inline Matrix& operator^= (const Matrix<T_type, O, S> &M)
 			{
-				return elementWiseOperatorAssignment(M, 
+				return elementWiseOperatorAssignment(M,
             exponentiate<T_type>());
 			}
 
@@ -2430,7 +2430,7 @@ namespace scythe {
        */
       inline Matrix& operator^= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             exponentiate<T_type> ());
       }
 
@@ -2477,8 +2477,8 @@ namespace scythe {
          * always have to create a new matrix here, so there is no
          * speed-up from using *=.
          */
-        
-        /* This saves a copy over 
+
+        /* This saves a copy over
          * *this = (*this) * M;
          * if we're concrete
          */
@@ -2514,7 +2514,7 @@ namespace scythe {
        */
       inline Matrix& operator*= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::multiplies<T_type> ());
       }
 
@@ -2572,13 +2572,13 @@ namespace scythe {
             }
           }
         }
-       
+
         this->referenceOther(res);
         this->mimic(res);
 
         return *this;
       }
-        
+
       /*! \brief Kronecker multiply this Matrix by a scalar.
        *
        * This method Kronecker multiplies this Matrix with some scalar,
@@ -2602,7 +2602,7 @@ namespace scythe {
        */
       inline Matrix& kronecker (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::multiplies<T_type> ());
       }
 
@@ -2630,7 +2630,7 @@ namespace scythe {
       template <matrix_order O, matrix_style S>
 			inline Matrix& operator&= (const Matrix<T_type, O, S> &M)
 			{
-				return elementWiseOperatorAssignment(M, 
+				return elementWiseOperatorAssignment(M,
             std::logical_and<T_type>());
 			}
 
@@ -2652,7 +2652,7 @@ namespace scythe {
        */
       inline Matrix& operator&= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::logical_and<T_type> ());
       }
 
@@ -2678,7 +2678,7 @@ namespace scythe {
       template <matrix_order O, matrix_style S>
 			inline Matrix& operator|= (const Matrix<T_type, O, S> &M)
 			{
-				return elementWiseOperatorAssignment(M, 
+				return elementWiseOperatorAssignment(M,
             std::logical_or<T_type>());
 			}
 
@@ -2700,7 +2700,7 @@ namespace scythe {
        */
       inline Matrix& operator|= (T_type x)
       {
-        return elementWiseOperatorAssignment(Matrix(x), 
+        return elementWiseOperatorAssignment(Matrix(x),
             std::logical_or<T_type> ());
       }
 
@@ -2723,7 +2723,7 @@ namespace scythe {
 			 * even if the dimensions passed to resize are the same as the
 			 * current Matrix's dimensions.  Resized matrices point to new,
 			 * uninitialized data blocks (technically, the Matrix might
-			 * recycle its current block if it is the only Matrix viewing 
+			 * recycle its current block if it is the only Matrix viewing
 			 * the block, but callers cannot rely on this).  It is important
        * to realize that concrete matrices behave just like views in
        * this respect.  Any views to a concrete Matrix will be
@@ -2822,7 +2822,7 @@ namespace scythe {
        * Matrix<double, Col, View> v1 = A(_, 1);
        * A.swap(B);
        * Matrix<double, Col, View> v2 = B(_, 1);
-       * 
+       *
        * v1 == v2; // evaluates to true
        *
        */
@@ -2830,7 +2830,7 @@ namespace scythe {
       /*! \brief Swap this Matrix with another.
        *
        * This modifier is much like a dual copy constructor and is
-       * part of the Standard Template Library (STL) 
+       * part of the Standard Template Library (STL)
        * interface for container objects.  It is only possible to swap
        * two matrices of the same matrix_order and matrix_style.  When
        * two matrices are swapped, they trade their underlying
@@ -2880,7 +2880,7 @@ namespace scythe {
       inline bool isZero () const
       {
         const_forward_iterator last = end_f();
-        return (last == std::find_if(begin_f(), last, 
+        return (last == std::find_if(begin_f(), last,
           std::bind1st(std::not_equal_to<T_type> (), 0)));
       }
 
@@ -3102,7 +3102,7 @@ namespace scythe {
       {
         if (data_ == M.getArray() && STYLE == Concrete && S == Concrete)
           return true;
-        else if (data_ == M.getArray() && Base::rows() == M.rows() 
+        else if (data_ == M.getArray() && Base::rows() == M.rows()
                  && Base::cols() == M.cols()) {
           return true;
         } else if (this->isNull() && M.isNull())
@@ -3128,7 +3128,7 @@ namespace scythe {
       equals(T_type x) const
       {
         const_forward_iterator last = end_f();
-        return (last == std::find_if(begin_f(), last, 
+        return (last == std::find_if(begin_f(), last,
           std::bind1st(std::not_equal_to<T_type> (), x)));
       }
 
@@ -3140,7 +3140,7 @@ namespace scythe {
        *
        * This method returns a pointer to the internal data array
        * contained within the DataBlock that this Matrix references.
-       * 
+       *
        * \warning It is generally a bad idea to use this method.  We
        * provide it only for convenience.  Please note that, when
        * working with views, the internal data array may not even be
@@ -3201,8 +3201,8 @@ namespace scythe {
           SCYTHE_THROW(scythe_invalid_arg, "Invalid flag: " << flag);
 
         if (! out)
-          SCYTHE_THROW(scythe_file_error, 
-              "Could not open file " << path); 
+          SCYTHE_THROW(scythe_file_error,
+              "Could not open file " << path);
 
         if (header) {
           out << Base::rows() << " " << Base::cols();
@@ -3227,7 +3227,7 @@ namespace scythe {
        */
 
       /* Random Access Iterator Factories */
-      
+
       /* Generalized versions */
 
       /*! \brief Get an iterator pointing to the start of a Matrix.
@@ -3240,14 +3240,14 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       matrix_random_access_iterator<T_type, I_ORDER, ORDER, STYLE>
       begin ()
       {
         return matrix_random_access_iterator<T_type, I_ORDER, ORDER,
                                              STYLE>(*this);
       }
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a
@@ -3279,13 +3279,13 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       matrix_random_access_iterator<T_type, I_ORDER, ORDER, STYLE>
       end ()
       {
         return (begin<I_ORDER>() + Base::size());
       }
-      
+
       /*! \brief Get an iterator pointing to the end of a Matrix.
        *
        * This is a factory that returns an
@@ -3297,7 +3297,7 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       const_matrix_random_access_iterator<T_type, I_ORDER, ORDER, STYLE>
       end () const
       {
@@ -3320,7 +3320,7 @@ namespace scythe {
       rbegin()
       {
         return std::reverse_iterator<matrix_random_access_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
                (end<I_ORDER>());
       }
 
@@ -3335,13 +3335,13 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       std::reverse_iterator<const_matrix_random_access_iterator
-                            <T_type, I_ORDER, ORDER, STYLE> > 
+                            <T_type, I_ORDER, ORDER, STYLE> >
       rbegin() const
       {
         return std::reverse_iterator<const_matrix_random_access_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
         (end<I_ORDER>());
       }
 
@@ -3362,7 +3362,7 @@ namespace scythe {
       rend()
       {
         return std::reverse_iterator<matrix_random_access_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
                (begin<I_ORDER>());
       }
 
@@ -3377,20 +3377,20 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       std::reverse_iterator<const_matrix_random_access_iterator
-                            <T_type, I_ORDER, ORDER, STYLE> > 
+                            <T_type, I_ORDER, ORDER, STYLE> >
       rend() const
       {
         return std::reverse_iterator<const_matrix_random_access_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
           (begin<I_ORDER>());
       }
 
       /* Specific versions --- the generalized versions force you
        * choose the ordering explicitly.  These definitions set up
        * in-order iteration as a default */
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a Matrix::iterator that
@@ -3406,7 +3406,7 @@ namespace scythe {
       {
         return iterator(*this);
       }
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a Matrix::const_iterator that
@@ -3438,7 +3438,7 @@ namespace scythe {
       {
         return (begin() + Base::size());
       }
-      
+
       /*! \brief Get an iterator pointing to the end of a Matrix.
        *
        * This is a factory that returns an Matrix::const_iterator that
@@ -3450,7 +3450,7 @@ namespace scythe {
        * returned by this function always iterates in the same order
        * as the given Matrix' matrix_order.
        */
-      inline 
+      inline
       const_iterator end () const
       {
         return (begin() + Base::size());
@@ -3537,14 +3537,14 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       matrix_forward_iterator<T_type, I_ORDER, ORDER, STYLE>
       begin_f ()
       {
         return matrix_forward_iterator<T_type, I_ORDER, ORDER,
                                              STYLE>(*this);
       }
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a
@@ -3575,13 +3575,13 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       matrix_forward_iterator<T_type, I_ORDER, ORDER, STYLE>
       end_f ()
       {
         return (begin_f<I_ORDER>().set_end());
       }
-      
+
       /*! \brief Get an iterator pointing to the end of a Matrix.
        *
        * This is a factory that returns an
@@ -3593,7 +3593,7 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       const_matrix_forward_iterator<T_type, I_ORDER, ORDER, STYLE>
       end_f () const
       {
@@ -3601,7 +3601,7 @@ namespace scythe {
       }
 
       /* Default Versions */
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a Matrix::forward_iterator that
@@ -3617,7 +3617,7 @@ namespace scythe {
       {
         return forward_iterator(*this);
       }
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a
@@ -3650,7 +3650,7 @@ namespace scythe {
       {
         return (begin_f().set_end());
       }
-      
+
       /*! \brief Get an iterator pointing to the end of a Matrix.
        *
        * This is a factory that returns an
@@ -3663,7 +3663,7 @@ namespace scythe {
        * returned by this function always iterates in the same order
        * as the given Matrix' matrix_order.
        */
-      inline 
+      inline
       const_forward_iterator end_f () const
       {
         return (begin_f().set_end());
@@ -3684,14 +3684,14 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       matrix_bidirectional_iterator<T_type, I_ORDER, ORDER, STYLE>
       begin_bd ()
       {
         return matrix_bidirectional_iterator<T_type, I_ORDER, ORDER,
                                              STYLE>(*this);
       }
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a
@@ -3723,13 +3723,13 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       matrix_bidirectional_iterator<T_type, I_ORDER, ORDER, STYLE>
       end_bd ()
       {
         return (begin_bd<I_ORDER>().set_end());
       }
-      
+
       /*! \brief Get an iterator pointing to the end of a Matrix.
        *
        * This is a factory that returns an
@@ -3741,7 +3741,7 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       const_matrix_bidirectional_iterator<T_type, I_ORDER, ORDER, STYLE>
       end_bd () const
       {
@@ -3764,7 +3764,7 @@ namespace scythe {
       rbegin_bd ()
       {
         return std::reverse_iterator<matrix_bidirectional_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
                (end_bd<I_ORDER>());
       }
 
@@ -3779,13 +3779,13 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       std::reverse_iterator<const_matrix_bidirectional_iterator
-                            <T_type, I_ORDER, ORDER, STYLE> > 
+                            <T_type, I_ORDER, ORDER, STYLE> >
       rbegin_bd () const
       {
         return std::reverse_iterator<const_matrix_bidirectional_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
         (end_bd<I_ORDER>());
       }
 
@@ -3806,7 +3806,7 @@ namespace scythe {
       rend_bd ()
       {
         return std::reverse_iterator<matrix_bidirectional_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
                (begin_bd<I_ORDER>());
       }
 
@@ -3821,20 +3821,20 @@ namespace scythe {
        * in any order through an explicit template instantiation.
        */
       template <matrix_order I_ORDER>
-      inline 
+      inline
       std::reverse_iterator<const_matrix_bidirectional_iterator
-                            <T_type, I_ORDER, ORDER, STYLE> > 
+                            <T_type, I_ORDER, ORDER, STYLE> >
       rend_bd () const
       {
         return std::reverse_iterator<const_matrix_bidirectional_iterator
-                                     <T_type, I_ORDER, ORDER, STYLE> > 
+                                     <T_type, I_ORDER, ORDER, STYLE> >
           (begin_bd<I_ORDER>());
       }
 
       /* Specific versions --- the generalized versions force you
        * choose the ordering explicitly.  These definitions set up
        * in-order iteration as a default */
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a
@@ -3851,7 +3851,7 @@ namespace scythe {
       {
         return bidirectional_iterator(*this);
       }
-      
+
       /*! \brief Get an iterator pointing to the start of a Matrix.
        *
        * This is a factory that returns a
@@ -3885,7 +3885,7 @@ namespace scythe {
       {
         return (begin_bd().set_end());
       }
-      
+
       /*! \brief Get an iterator pointing to the end of a Matrix.
        *
        * This is a factory that returns an Matrix::const_bidirectional
@@ -3898,7 +3898,7 @@ namespace scythe {
        * returned by this function always iterates in the same order
        * as the given Matrix' matrix_order.
        */
-      inline 
+      inline
       const_bidirectional_iterator end_bd () const
       {
         return (begin_bd().set_end());
@@ -3979,7 +3979,7 @@ namespace scythe {
 			 * more code than should be necessary but "using" inherited ivs
        * is just stupid.
 			 */
-			using DBRef::data_;  // refer to inherited data pointer directly 
+			using DBRef::data_;  // refer to inherited data pointer directly
 			using Base::rows_;   // " # of rows directly
 			using Base::cols_;   // " # of cols directly
 
@@ -4075,7 +4075,7 @@ namespace scythe {
   {                                                                   \
     return (OP <T_type, ORDER, Concrete> (lhs, rhs));                 \
   }
-  
+
 #define SCYTHE_BINARY_OPERATOR_GSM(OP)                                \
   template <typename T_type, matrix_order ORDER, matrix_style STYLE,  \
             matrix_order R_ORDER, matrix_style R_STYLE>               \
@@ -4105,10 +4105,10 @@ namespace scythe {
   SCYTHE_BINARY_OPERATOR_DSM(OP)
 
   /* Matrix multiplication */
-  
+
   /* General template version. Must be called with operator*<> syntax
    */
- 
+
   /* We provide two symmetric algorithms for matrix multiplication,
    * one for col-major and the other for row-major matrices.  They are
    * designed to minimize cache misses.The decision is based on the
@@ -4164,7 +4164,7 @@ namespace scythe {
    * \throw scythe_conformation_error (Level 1)
    * \throw scythe_alloc_error (Level 1)
    */
-   
+
   template <typename T_type, matrix_order ORDER, matrix_style STYLE,
             matrix_order L_ORDER, matrix_style L_STYLE,
             matrix_order R_ORDER, matrix_style R_STYLE>
@@ -4177,7 +4177,7 @@ namespace scythe {
 
     SCYTHE_CHECK_10 (lhs.cols() != rhs.rows(),
         scythe_conformation_error,
-        "Matrices with dimensions (" << lhs.rows() 
+        "Matrices with dimensions (" << lhs.rows()
         << ", " << lhs.cols()
         << ") and (" << rhs.rows() << ", " << rhs.cols()
         << ") are not multiplication-conformable");
@@ -4254,7 +4254,7 @@ namespace scythe {
   /* Macro definition for general return type templates of standard
    * binary operators (this handles, +, -, %, /, but not *)
    */
-    
+
 #define SCYTHE_GENERAL_BINARY_OPERATOR(OP,FUNCTOR)                    \
   template <typename T_type, matrix_order ORDER, matrix_style STYLE,  \
             matrix_order L_ORDER, matrix_style L_STYLE,               \
@@ -4493,13 +4493,13 @@ namespace scythe {
   operator- (const Matrix<T_type, ORDER, STYLE>& M)
   {
     Matrix<T_type, R_ORDER, Concrete> result(M.rows(), M.cols(), false);
-    std::transform(M.template begin_f<ORDER>(), 
-                   M.template end_f<ORDER>(), 
+    std::transform(M.template begin_f<ORDER>(),
+                   M.template end_f<ORDER>(),
                    result.template begin_f<R_ORDER>(),
                    std::negate<T_type> ());
     SCYTHE_VIEW_RETURN(T_type, R_ORDER, R_STYLE, result)
   }
-  
+
   // Default return type version
   template <matrix_order ORDER, matrix_style P_STYLE, typename T_type>
   inline Matrix<T_type, ORDER, Concrete>
@@ -4531,13 +4531,13 @@ namespace scythe {
   operator! (const Matrix<T_type, ORDER, STYLE>& M)
   {
     Matrix<bool, R_ORDER, Concrete> result(M.rows(), M.cols(), false);
-    std::transform(M.template begin_f<ORDER>(), 
-                   M.template end_f<ORDER>(), 
+    std::transform(M.template begin_f<ORDER>(),
+                   M.template end_f<ORDER>(),
                    result.template begin_f<R_ORDER>(),
                    std::logical_not<T_type> ());
     SCYTHE_VIEW_RETURN(T_type, R_ORDER, R_STYLE, result)
   }
-  
+
   // Default return type version
   template <typename T_type, matrix_order ORDER, matrix_style P_STYLE>
   inline Matrix<bool, ORDER, Concrete>
@@ -4621,7 +4621,7 @@ namespace scythe {
   {                                                                   \
     return (OP <ORDER, Concrete> (lhs, rhs));                         \
   }
-  
+
 #define SCYTHE_BINARY_BOOL_OPERATOR_GSM(OP)                           \
   template <matrix_order ORDER, matrix_style STYLE, typename T_type,  \
             matrix_order R_ORDER, matrix_style R_STYLE>               \
@@ -4772,7 +4772,7 @@ namespace scythe {
    *
    * This operator compares the elements of \a lhs and \a rhs and
    * returns a boolean Matrix of true and false values, indicating
-   * whether each of the left-hand side elements is less than 
+   * whether each of the left-hand side elements is less than
    * or equal to its
    * corresponding right hand side element.  This operator is
    * overloaded to provide both Matrix by Matrix inequality testing
@@ -4849,7 +4849,7 @@ namespace scythe {
    *
    * This operator compares the elements of \a lhs and \a rhs and
    * returns a boolean Matrix of true and false values, indicating
-   * whether each of the left-hand side elements is greater than 
+   * whether each of the left-hand side elements is greater than
    * or equal to its
    * corresponding right hand side element.  This operator is
    * overloaded to provide both Matrix by Matrix inequality testing
@@ -5019,7 +5019,7 @@ namespace scythe {
     std::ostringstream oss;
     oss.precision(os.precision());
     oss << std::setiosflags(std::ios::fixed);
-    
+
     typename Matrix<T,O,S>::const_forward_iterator last = M.end_f();
     for (typename Matrix<T,O,S>::const_forward_iterator i = M.begin_f();
         i != last; ++i) {
@@ -5034,22 +5034,22 @@ namespace scythe {
     // Change to a fixed with format.  Users should control precision
     os << std::setiosflags(std::ios::fixed);
 
-    
+
     for (uint i = 0; i < M.rows(); ++i) {
       Matrix<T, O, View> row = M(i, _);
       //for (uint i = 0; i < row.size(); ++i)
       //  os << std::setw(mlen) << row[i] << " ";
-      typename Matrix<T,O,View>::const_forward_iterator row_last 
+      typename Matrix<T,O,View>::const_forward_iterator row_last
         = row.end_f();
-      for (typename 
+      for (typename
           Matrix<T,O,View>::forward_iterator el = row.begin_f();
           el != row_last; ++el) {
         os << std::setw(mlen) << *el << " ";
       }
       os << std::endl;
     }
-    
-    
+
+
     // Restore pre-op flags
     os.flags(preop);
 
@@ -5077,7 +5077,7 @@ namespace scythe {
     SCYTHE_DEBUG_MSG("Using lapack/blas for matrix multiplication");
     SCYTHE_CHECK_10 (lhs.cols() != rhs.rows(),
         scythe_conformation_error,
-        "Matrices with dimensions (" << lhs.rows() 
+        "Matrices with dimensions (" << lhs.rows()
         << ", " << lhs.cols()
         << ") and (" << rhs.rows() << ", " << rhs.cols()
         << ") are not multiplication-conformable");
diff --git a/src/rng.h b/src/rng.h
index ce3a218..d200b80 100644
--- a/src/rng.h
+++ b/src/rng.h
@@ -1,4 +1,4 @@
-/* 
+/*
  * Scythe Statistical Library Copyright (C) 2000-2002 Andrew D. Martin
  * and Kevin M. Quinn; 2002-present Andrew D. Martin, Kevin M. Quinn,
  * and Daniel Pemstein.  All Rights Reserved.
@@ -16,7 +16,7 @@
  * in rng.cc is based on that in the R project, version 1.6.0-1.7.1.
  * This code is available under the terms of the GNU GPL.  Original
  * copyright:
- * 
+ *
  * Copyright (C) 1998      Ross Ihaka
  * Copyright (C) 2000-2002 The R Development Core Team
  * Copyright (C) 2003      The R Foundation
@@ -64,7 +64,7 @@ namespace scythe {
 /* Shorthand for the matrix versions of the various distributions'
  * random number generators.
  */
-  
+
 #define SCYTHE_RNGMETH_MATRIX(NAME, RTYPE, ARGNAMES, ...)             \
   template <matrix_order O, matrix_style S>                           \
   Matrix<RTYPE, O, S>                                                 \
@@ -167,8 +167,8 @@ namespace scythe {
        * is illegal to make template methods virtual
        */
       template <matrix_order O, matrix_style S>
-      Matrix<double,O,S> runif(unsigned int rows, 
-                               unsigned int cols) 
+      Matrix<double,O,S> runif(unsigned int rows,
+                               unsigned int cols)
       {
         Matrix<double, O, S> ret(rows, cols, false);
         typename Matrix<double,O,S>::forward_iterator it;
@@ -179,7 +179,7 @@ namespace scythe {
         return ret;
       }
 
-      Matrix<double,Col,Concrete> runif(unsigned int rows, 
+      Matrix<double,Col,Concrete> runif(unsigned int rows,
                                         unsigned int cols)
       {
         return runif<Col,Concrete>(rows, cols);
@@ -193,7 +193,7 @@ namespace scythe {
        *
        * \param alpha The first positive beta shape parameter.
        * \param beta the second positive beta shape parameter.
-			 * 
+			 *
 			 * \see pbeta(double x, double a, double b)
 			 * \see dbeta(double x, double a, double b)
 			 * \see betafn(double a, double b)
@@ -206,15 +206,15 @@ namespace scythe {
       {
         double report;
         double xalpha, xbeta;
-        
+
         // Check for allowable parameters
         SCYTHE_CHECK_10(alpha <= 0, scythe_invalid_arg, "alpha <= 0");
         SCYTHE_CHECK_10(beta <= 0, scythe_invalid_arg, "beta <= 0");
-        
+
         xalpha = rchisq (2 * alpha);
         xbeta = rchisq (2 * beta);
         report = xalpha / (xalpha + xbeta);
-        
+
         return (report);
       }
 
@@ -237,7 +237,7 @@ namespace scythe {
        *
        * \throw scythe_convergence_error (Level 0)
        */
-      double 
+      double
       rnchypgeom(double m1, double n1, double n2, double psi,
                  double delta)
       {
@@ -245,7 +245,7 @@ namespace scythe {
         double a = psi - 1;
         double b = -1 * ((n1+m1+2)*psi + n2 - m1);
         double c = psi * (n1+1) * (m1+1);
-        double q = -0.5 * ( b + sgn(b) * 
+        double q = -0.5 * ( b + sgn(b) *
             std::sqrt(std::pow(b,2) - 4*a*c));
         double root1 = c/q;
         double root2 = q/a;
@@ -257,7 +257,7 @@ namespace scythe {
           mode = std::floor(root2);
           exactcheck = 1;
         }
-     
+
 
         int size = static_cast<int>(u+1);
 
@@ -265,7 +265,7 @@ namespace scythe {
         fvec[static_cast<int>(mode)] = 1.0;
         double s;
         // compute the mass function at y
-        if (delta <= 0 || exactcheck==1){  //exact evaluation 
+        if (delta <= 0 || exactcheck==1){  //exact evaluation
           // sum from mode to u
           double f = 1.0;
           s = 1.0;
@@ -275,7 +275,7 @@ namespace scythe {
             s += f;
             fvec[static_cast<int>(i)] = f;
           }
-         
+
           // sum from mode to el
           f = 1.0;
           for (double i=(mode-1); i>=el; --i){
@@ -299,7 +299,7 @@ namespace scythe {
             fvec[static_cast<int>(i)] = f;
             ++i;
           } while(f>=epsilon || r>=5.0/6.0);
-         
+
           // sum from mode to elstar
           f = 1.0;
           i = mode-1;
@@ -310,7 +310,7 @@ namespace scythe {
             s += f;
             fvec[static_cast<int>(i)] = f;
             --i;
-          } while(f>=epsilon || r <=6.0/5.0);         
+          } while(f>=epsilon || r <=6.0/5.0);
         }
 
         double udraw = runif();
@@ -325,7 +325,7 @@ namespace scythe {
           double fu;
           if (lower >= el)
             fl = fvec[static_cast<int>(lower)];
-          else 
+          else
             fl = 0.0;
 
           if (upper <= u)
@@ -345,7 +345,7 @@ namespace scythe {
             ++upper;
           }
         } while(udraw>psum);
-       
+
         delete [] fvec;
         SCYTHE_THROW(scythe_convergence_error,
           "Algorithm did not converge");
@@ -361,7 +361,7 @@ namespace scythe {
 			 * Bernoulli distribution with probability of success \a p.
        *
        * \param p The probability of success on a trial.
-			 * 
+			 *
        * \throw scythe_invalid_arg (Level 1)
        */
       unsigned int
@@ -369,22 +369,22 @@ namespace scythe {
       {
         unsigned int report;
         double unif;
-          
+
         // Check for allowable paramters
         SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg,
             "p parameter not in[0,1]");
-        
+
         unif = runif ();
         if (unif < p)
           report = 1;
         else
           report = 0;
-        
+
         return (report);
       }
 
       SCYTHE_RNGMETH_MATRIX(rbern, unsigned int, p, double p);
-      
+
       /*! \brief Generate a binomial distributed random variate.
        *
 			 * This function returns a pseudo-random variate drawn from the
@@ -393,7 +393,7 @@ namespace scythe {
        *
        * \param n The number of trials.
        * \param p The probability of success on each trial.
-			 * 
+			 *
 			 * \see pbinom(double x, unsigned int n, double p)
 			 * \see dbinom(double x, unsigned int n, double p)
 			 *
@@ -405,12 +405,12 @@ namespace scythe {
         unsigned int report;
         unsigned int count = 0;
         double hold;
-          
+
         // Check for allowable parameters
         SCYTHE_CHECK_10(n == 0, scythe_invalid_arg, "n == 0");
-        SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg, 
+        SCYTHE_CHECK_10(p < 0 || p > 1, scythe_invalid_arg,
             "p not in [0,1]");
-          
+
         // Loop and count successes
         for (unsigned int i = 0; i < n; i++) {
           hold = runif ();
@@ -418,7 +418,7 @@ namespace scythe {
             ++count;
         }
         report = count;
-        
+
         return (report);
       }
 
@@ -431,7 +431,7 @@ namespace scythe {
 			 * \f$\chi^2\f$distribution with \a df degress of freedom.
        *
        * \param df The degrees of freedom.
-			 * 
+			 *
 			 * \see pchisq(double x, double df)
 			 * \see dchisq(double x, double df)
 			 *
@@ -441,14 +441,14 @@ namespace scythe {
       rchisq (double df)
       {
         double report;
-          
+
         // Check for allowable paramter
         SCYTHE_CHECK_10(df <= 0, scythe_invalid_arg,
             "Degrees of freedom <= 0");
-      
+
         // Return Gamma(nu/2, 1/2) variate
         report = rgamma (df / 2, .5);
-        
+
         return (report);
       }
 
@@ -461,7 +461,7 @@ namespace scythe {
 			 * parameter \a invscale.
        *
        * \param invscale The inverse scale parameter.
-			 * 
+			 *
 			 * \see pexp(double x, double scale)
 			 * \see dexp(double x, double scale)
 			 *
@@ -471,18 +471,18 @@ namespace scythe {
       rexp (double invscale)
       {
         double report;
-        
+
         // Check for allowable parameter
         SCYTHE_CHECK_10(invscale <= 0, scythe_invalid_arg,
             "Inverse scale parameter <= 0");
-        
+
         report = -std::log (runif ()) / invscale;
-        
+
         return (report);
       }
 
       SCYTHE_RNGMETH_MATRIX(rexp, double, invscale, double invscale);
-    
+
       /*! \brief Generate an F distributed random variate.
        *
 			 * This function returns a pseudo-random variate drawn from the
@@ -517,7 +517,7 @@ namespace scythe {
        *
        * \param shape The strictly positive shape of the distribution.
 			 * \param rate The inverse of the strictly positive scale of the distribution.  That is, 1 / scale.
-			 * 
+			 *
 			 * \see pgamma(double x, double shape, double scale)
 			 * \see dgamma(double x, double shape, double scale)
 			 * \see gammafn(double x)
@@ -539,7 +539,7 @@ namespace scythe {
         else if (shape == 1)
           report = -std::log (runif ()) / rate;
         else
-          report = rgamma1 (shape + 1) 
+          report = rgamma1 (shape + 1)
             * std::pow (runif (), 1 / shape) / rate;
 
         return (report);
@@ -556,7 +556,7 @@ namespace scythe {
        *
        * \param location The location of the distribution.
 			 * \param scale The scale of the distribution.
-			 * 
+			 *
 			 * \see plogis(double x, double location, double scale)
 			 * \see dlogis(double x, double location, double scale)
 			 *
@@ -567,17 +567,17 @@ namespace scythe {
       {
         double report;
         double unif;
-          
+
         // Check for allowable paramters
         SCYTHE_CHECK_10(scale <= 0, scythe_invalid_arg, "scale <= 0");
-        
+
         unif = runif ();
         report = location + scale * std::log (unif / (1 - unif));
-        
+
         return (report);
       }
 
-      SCYTHE_RNGMETH_MATRIX(rlogis, double, 
+      SCYTHE_RNGMETH_MATRIX(rlogis, double,
           SCYTHE_ARGSET(location, scale),
           double location, double scale);
 
@@ -590,7 +590,7 @@ namespace scythe {
        * \param logmean The logged mean of the distribtion.
 			 * \param logsd The strictly positive logged standard deviation
 			 * of the distribution.
-			 * 
+			 *
 			 * \see plnorm(double x, double logmean, double logsd)
 			 * \see dlnorm(double x, double logmean, double logsd)
 			 *
@@ -605,7 +605,7 @@ namespace scythe {
         return std::exp(rnorm(logmean, logsd));
       }
 
-      SCYTHE_RNGMETH_MATRIX(rlnorm, double, 
+      SCYTHE_RNGMETH_MATRIX(rlnorm, double,
           SCYTHE_ARGSET(logmean, logsd),
           double logmean, double logsd);
 
@@ -619,7 +619,7 @@ namespace scythe {
        * \param n The strictly positive target number of successful
 			 * trials (dispersion parameters).
 			 * \param p The probability of success on each trial.
-			 * 
+			 *
 			 * \see pnbinom(unsigned int x, double n, double p)
 			 * \see dnbinom(unsigned int x, double n, double p)
 			 *
@@ -645,7 +645,7 @@ namespace scythe {
        *
        * \param mean The mean of the distribution.
 			 * \param sd The standard deviation of the distribution.
-			 * 
+			 *
 			 * \see pnorm(double x, double mean, double sd)
 			 * \see dnorm(double x, double mean, double sd)
 			 *
@@ -654,9 +654,9 @@ namespace scythe {
       double
       rnorm (double mean = 0, double sd = 1)
       {
-        SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg, 
+        SCYTHE_CHECK_10(sd <= 0, scythe_invalid_arg,
             "Negative standard deviation");
-        
+
         return (mean + rnorm1 () * sd);
       }
 
@@ -671,7 +671,7 @@ namespace scythe {
        *
        * \param lambda The strictly positive expected number of
 			 * occurrences.
-			 * 
+			 *
 			 * \see ppois(double x, double lambda)
 			 * \see dpois(double x, double lambda)
 			 *
@@ -682,7 +682,7 @@ namespace scythe {
       {
         SCYTHE_CHECK_10(lambda <= 0, scythe_invalid_arg, "lambda <= 0");
         unsigned int n;
-        
+
         if (lambda < 33) {
           double cutoff = std::exp(-lambda);
           n = -1;
@@ -690,21 +690,21 @@ namespace scythe {
           do {
             ++n;
             t *= runif();
-          } while (t > cutoff);    
+          } while (t > cutoff);
         } else {
           bool accept = false;
           double c = 0.767 - 3.36/lambda;
           double beta = M_PI/std::sqrt(3*lambda);
           double alpha = lambda*beta;
           double k = std::log(c) - lambda - std::log(beta);
-            
+
           while (! accept){
             double u1 = runif();
             double x = (alpha - std::log((1-u1)/u1))/beta;
             while (x <= -0.5){
               u1 = runif();
               x = (alpha - std::log((1-u1)/u1))/beta;
-            } 
+            }
             n = static_cast<int>(x + 0.5);
             double u2 = runif();
             double lhs = alpha - beta*x +
@@ -714,7 +714,7 @@ namespace scythe {
               accept = true;
           }
         }
-        
+
         return n;
       }
 
@@ -735,7 +735,7 @@ namespace scythe {
        * \param mu The mean of the distribution.
 			 * \param sigma2 The variance of the distribution.
 			 * \param nu The degrees of freedom of the distribution.
-			 * 
+			 *
 			 * \see dt1(double x, double mu, double sigma2, double nu)
 			 *
        * \throw scythe_invalid_arg (Level 1)
@@ -745,18 +745,18 @@ namespace scythe {
       {
         double report;
         double x, z;
-          
+
         // Check for allowable paramters
         SCYTHE_CHECK_10(sigma2 <= 0, scythe_invalid_arg,
             "Variance parameter sigma2 <= 0");
         SCYTHE_CHECK_10(nu <= 0, scythe_invalid_arg,
             "D.O.F parameter nu <= 0");
-        
+
         z = rnorm1 ();
         x = rchisq (nu);
-        report = mu + std::sqrt (sigma2) * z 
+        report = mu + std::sqrt (sigma2) * z
           * std::sqrt (nu) / std::sqrt (x);
-        
+
         return (report);
       }
 
@@ -770,7 +770,7 @@ namespace scythe {
        *
        * \param shape The strictly positive shape of the distribution.
 			 * \param scale The strictly positive scale of the distribution.
-			 * 
+			 *
 			 * \see pweibull(double x, double shape, double scale)
 			 * \see dweibull(double x, double shape, double scale)
 			 *
@@ -805,11 +805,11 @@ namespace scythe {
       richisq (double nu)
       {
         double report;
-          
+
         // Check for allowable parameter
         SCYTHE_CHECK_10(nu <= 0, scythe_invalid_arg,
             "Degrees of freedom <= 0");
-          
+
         // Return Inverse-Gamma(nu/2, 1/2) variate
         report = rigamma (nu / 2, .5);
         return (report);
@@ -824,7 +824,7 @@ namespace scythe {
        *
        * \param shape The strictly positive shape of the distribution.
 			 * \param scale The strictly positive scale of the distribution.
-			 * 
+			 *
 			 * \see rgamma(double alpha, double beta)
 			 *
        * \throw scythe_invalid_arg (Level 1)
@@ -833,7 +833,7 @@ namespace scythe {
       rigamma (double alpha, double beta)
       {
         double report;
-        
+
         // Check for allowable parameters
         SCYTHE_CHECK_10(alpha <= 0, scythe_invalid_arg, "alpha <= 0");
         SCYTHE_CHECK_10(beta <= 0, scythe_invalid_arg, "beta <= 0");
@@ -861,7 +861,7 @@ namespace scythe {
 			 * \param variance The variance of the distribution.
 			 * \param below The lower truncation point of the distribution.
 			 * \param above The upper truncation point of the distribution.
-			 * 
+			 *
 			 * \see rtnorm_combo(double mean, double variance, double below, double above)
 			 * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10)
 			 * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10)
@@ -871,30 +871,30 @@ namespace scythe {
 			 *
        * \throw scythe_invalid_arg (Level 1)
        */
-      double 
+      double
       rtnorm(double mean, double variance, double below, double above)
-      {  
+      {
         SCYTHE_CHECK_10(below >= above, scythe_invalid_arg,
             "Truncation bound not logically consistent");
         SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg,
             "Variance <= 0");
-        
+
         double sd = std::sqrt(variance);
         double FA = 0.0;
         double FB = 0.0;
-        if ((std::fabs((above-mean)/sd) < 8.2) 
+        if ((std::fabs((above-mean)/sd) < 8.2)
             && (std::fabs((below-mean)/sd) < 8.2)){
           FA = pnorm1((above-mean)/sd, true, false);
           FB = pnorm1((below-mean)/sd, true, false);
         }
-        if ((((above-mean)/sd) < 8.2)  && (((below-mean)/sd) <= -8.2) ){ 
+        if ((((above-mean)/sd) < 8.2)  && (((below-mean)/sd) <= -8.2) ){
           FA = pnorm1((above-mean)/sd, true, false);
           FB = 0.0;
         }
-        if ( (((above-mean)/sd) >= 8.2)  && (((below-mean)/sd) > -8.2) ){ 
+        if ( (((above-mean)/sd) >= 8.2)  && (((below-mean)/sd) > -8.2) ){
           FA = 1.0;
           FB = pnorm1((below-mean)/sd, true, false);
-        } 
+        }
         if ( (((above-mean)/sd) >= 8.2) && (((below-mean)/sd) <= -8.2)){
           FA = 1.0;
           FB = 0.0;
@@ -909,11 +909,11 @@ namespace scythe {
           draw = above;
         if (draw < below)
           draw = below;
-         
+
         return draw;
       }
 
-      SCYTHE_RNGMETH_MATRIX(rtnorm, double, 
+      SCYTHE_RNGMETH_MATRIX(rtnorm, double,
           SCYTHE_ARGSET(mean, variance, above, below), double mean,
           double variance, double above, double below);
 
@@ -931,7 +931,7 @@ namespace scythe {
 			 * \param variance The variance of the distribution.
 			 * \param below The lower truncation point of the distribution.
 			 * \param above The upper truncation point of the distribution.
-			 * 
+			 *
 			 * \see rtnorm(double mean, double variance, double below, double above)
 			 * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10)
 			 * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10)
@@ -942,20 +942,20 @@ namespace scythe {
        * \throw scythe_invalid_arg (Level 1)
        */
       double
-      rtnorm_combo(double mean, double variance, double below, 
+      rtnorm_combo(double mean, double variance, double below,
                    double above)
       {
         SCYTHE_CHECK_10(below >= above, scythe_invalid_arg,
             "Truncation bound not logically consistent");
         SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg,
             "Variance <= 0");
-        
+
         double sd = std::sqrt(variance);
         if ((((above-mean)/sd > 0.5) && ((mean-below)/sd > 0.5))
             ||
             (((above-mean)/sd > 2.0) && ((below-mean)/sd < 0.25))
             ||
-            (((mean-below)/sd > 2.0) && ((above-mean)/sd > -0.25))) { 
+            (((mean-below)/sd > 2.0) && ((above-mean)/sd > -0.25))) {
           double x = rnorm(mean, sd);
           while ((x > above) || (x < below))
             x = rnorm(mean,sd);
@@ -964,19 +964,19 @@ namespace scythe {
           // use the inverse cdf method
           double FA = 0.0;
           double FB = 0.0;
-          if ((std::fabs((above-mean)/sd) < 8.2) 
+          if ((std::fabs((above-mean)/sd) < 8.2)
               && (std::fabs((below-mean)/sd) < 8.2)){
             FA = pnorm1((above-mean)/sd, true, false);
             FB = pnorm1((below-mean)/sd, true, false);
           }
-          if ((((above-mean)/sd) < 8.2)  && (((below-mean)/sd) <= -8.2) ){ 
+          if ((((above-mean)/sd) < 8.2)  && (((below-mean)/sd) <= -8.2) ){
             FA = pnorm1((above-mean)/sd, true, false);
             FB = 0.0;
           }
-          if ( (((above-mean)/sd) >= 8.2)  && (((below-mean)/sd) > -8.2) ){ 
+          if ( (((above-mean)/sd) >= 8.2)  && (((below-mean)/sd) > -8.2) ){
             FA = 1.0;
             FB = pnorm1((below-mean)/sd, true, false);
-          } 
+          }
           if ( (((above-mean)/sd) >= 8.2) && (((below-mean)/sd) <= -8.2)){
             FA = 1.0;
             FB = 0.0;
@@ -992,10 +992,10 @@ namespace scythe {
           if (x < below)
             x = below;
           return x;
-        }    
+        }
       }
 
-      SCYTHE_RNGMETH_MATRIX(rtnorm_combo, double, 
+      SCYTHE_RNGMETH_MATRIX(rtnorm_combo, double,
           SCYTHE_ARGSET(mean, variance, above, below), double mean,
           double variance, double above, double below);
 
@@ -1011,7 +1011,7 @@ namespace scythe {
 			 * \param variance The variance of the distribution.
 			 * \param below The lower truncation point of the distribution.
 			 * \param iter The number of iterations to use.
-			 * 
+			 *
 			 * \see rtnorm(double mean, double variance, double below, double above)
 			 * \see rtnorm_combo(double mean, double variance, double below, double above)
 			 * \see rtanorm_slice(double mean, double variance, double above, unsigned int iter = 10)
@@ -1029,10 +1029,10 @@ namespace scythe {
             "Truncation point < mean");
         SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg,
             "Variance <= 0");
-         
+
         double z = 0;
         double x = below + .00001;
-         
+
         for (unsigned int i=0; i<iter; ++i){
           z = runif()*std::exp(-1*std::pow((x-mean),2)/(2*variance));
           x = runif()*
@@ -1042,14 +1042,14 @@ namespace scythe {
         if (! R_finite(x)) {
           SCYTHE_WARN("Mean extremely far from truncation point. "
               << "Returning truncation point");
-          return below; 
+          return below;
         }
 
         return x;
       }
 
-      SCYTHE_RNGMETH_MATRIX(rtbnorm_slice, double, 
-          SCYTHE_ARGSET(mean, variance, below, iter), double mean, 
+      SCYTHE_RNGMETH_MATRIX(rtbnorm_slice, double,
+          SCYTHE_ARGSET(mean, variance, below, iter), double mean,
           double variance, double below, unsigned int iter = 10);
 
 			/*! \brief Generate a normally distributed random variate,
@@ -1064,7 +1064,7 @@ namespace scythe {
 			 * \param variance The variance of the distribution.
 			 * \param above The upper truncation point of the distribution.
 			 * \param iter The number of iterations to use.
-			 * 
+			 *
 			 * \see rtnorm(double mean, double variance, double below, double above)
 			 * \see rtnorm_combo(double mean, double variance, double below, double above)
 			 * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10)
@@ -1075,37 +1075,37 @@ namespace scythe {
        * \throw scythe_invalid_arg (Level 1)
        */
       double
-      rtanorm_slice (double mean, double variance, double above, 
+      rtanorm_slice (double mean, double variance, double above,
           unsigned int iter = 10)
       {
         SCYTHE_CHECK_10(above > mean, scythe_invalid_arg,
             "Truncation point > mean");
         SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg,
             "Variance <= 0");
-      
+
         double below = -1*above;
         double newmu = -1*mean;
         double z = 0;
         double x = below + .00001;
-         
+
         for (unsigned int i=0; i<iter; ++i){
           z = runif()*std::exp(-1*std::pow((x-newmu),2)
               /(2*variance));
           x = runif()
-            *( (newmu + std::sqrt(-2*variance*std::log(z))) - below) 
+            *( (newmu + std::sqrt(-2*variance*std::log(z))) - below)
             + below;
         }
         if (! R_finite(x)) {
           SCYTHE_WARN("Mean extremely far from truncation point. "
               << "Returning truncation point");
-          return above; 
+          return above;
         }
-        
+
         return -1*x;
       }
 
-      SCYTHE_RNGMETH_MATRIX(rtanorm_slice, double, 
-          SCYTHE_ARGSET(mean, variance, above, iter), double mean, 
+      SCYTHE_RNGMETH_MATRIX(rtanorm_slice, double,
+          SCYTHE_ARGSET(mean, variance, above, iter), double mean,
           double variance, double above, unsigned int iter = 10);
 
 			/*! \brief Generate a normally distributed random
@@ -1123,7 +1123,7 @@ namespace scythe {
 			 * \param below The lower truncation point of the distribution.
 			 * \param iter The number of iterations to run the slice
 			 * sampler.
-			 * 
+			 *
 			 * \see rtnorm(double mean, double variance, double below, double above)
 			 * \see rtnorm_combo(double mean, double variance, double below, double above)
 			 * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10)
@@ -1134,12 +1134,12 @@ namespace scythe {
        * \throw scythe_invalid_arg (Level 1)
        */
       double
-      rtbnorm_combo (double mean, double variance, double below, 
+      rtbnorm_combo (double mean, double variance, double below,
           unsigned int iter = 10)
       {
         SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg,
             "Variance <= 0");
-        
+
         double s = std::sqrt(variance);
         // do rejection sampling and return value
         //if (m >= below){
@@ -1147,7 +1147,7 @@ namespace scythe {
           double x = rnorm(mean, s);
           while (x < below)
             x = rnorm(mean,s);
-          return x; 
+          return x;
         } else if ((mean/s - below/s ) > -5.0 ){
           // use the inverse cdf method
           double above =  std::numeric_limits<double>::infinity();
@@ -1160,21 +1160,21 @@ namespace scythe {
           for (unsigned int i=0; i<iter; ++i){
             z = runif() * std::exp(-1 * std::pow((x - mean), 2)
                 / (2 * variance));
-            x = runif() 
-              * ((mean + std::sqrt(-2 * variance * std::log(z))) 
+            x = runif()
+              * ((mean + std::sqrt(-2 * variance * std::log(z)))
                 - below) + below;
           }
           if (! R_finite(x)) {
             SCYTHE_WARN("Mean extremely far from truncation point. "
                 << "Returning truncation point");
-            return below; 
+            return below;
           }
           return x;
         }
       }
 
-      SCYTHE_RNGMETH_MATRIX(rtbnorm_combo, double, 
-          SCYTHE_ARGSET(mean, variance, below, iter), double mean, 
+      SCYTHE_RNGMETH_MATRIX(rtbnorm_combo, double,
+          SCYTHE_ARGSET(mean, variance, below, iter), double mean,
           double variance, double below, unsigned int iter = 10);
 
 			/*! \brief Generate a normally distributed random variate,
@@ -1191,7 +1191,7 @@ namespace scythe {
 			 * \param variance The variance of the distribution.
 			 * \param above The upper truncation point of the distribution.
 			 * \param iter The number of iterations to run the slice sampler.
-			 * 
+			 *
 			 * \see rtnorm(double mean, double variance, double below, double above)
 			 * \see rtnorm_combo(double mean, double variance, double below, double above)
 			 * \see rtbnorm_slice(double mean, double variance, double below, unsigned int iter = 10)
@@ -1202,7 +1202,7 @@ namespace scythe {
        * \throw scythe_invalid_arg (Level 1)
        */
       double
-      rtanorm_combo (double mean, double variance, double above, 
+      rtanorm_combo (double mean, double variance, double above,
           const unsigned int iter = 10)
       {
         SCYTHE_CHECK_10(variance <= 0, scythe_invalid_arg,
@@ -1210,7 +1210,7 @@ namespace scythe {
 
         double s = std::sqrt(variance);
         // do rejection sampling and return value
-        if ((mean/s - above/s ) < 0.5){ 
+        if ((mean/s - above/s ) < 0.5){
           double x = rnorm(mean, s);
           while (x > above)
             x = rnorm(mean,s);
@@ -1226,29 +1226,29 @@ namespace scythe {
           double newmu = -1*mean;
           double z = 0;
           double x = below + .00001;
-             
+
           for (unsigned int i=0; i<iter; ++i){
             z = runif() * std::exp(-1 * std::pow((x-newmu), 2)
                 /(2 * variance));
-            x = runif() 
+            x = runif()
               * ((newmu + std::sqrt(-2 * variance * std::log(z)))
                   - below) + below;
           }
           if (! R_finite(x)) {
             SCYTHE_WARN("Mean extremely far from truncation point. "
                 << "Returning truncation point");
-            return above; 
+            return above;
           }
           return -1*x;
         }
       }
 
-      SCYTHE_RNGMETH_MATRIX(rtanorm_combo, double, 
-          SCYTHE_ARGSET(mean, variance, above, iter), double mean, 
+      SCYTHE_RNGMETH_MATRIX(rtanorm_combo, double,
+          SCYTHE_ARGSET(mean, variance, above, iter), double mean,
           double variance, double above, unsigned int iter = 10);
 
       /* Multivariate Distributions */
-      
+
       /*! \brief Generate a Wishart distributed random variate Matrix.
        *
        * This function returns a pseudo-random matrix-valued variate
@@ -1267,14 +1267,14 @@ namespace scythe {
       {
         SCYTHE_CHECK_10(! Sigma.isSquare(), scythe_dimension_error,
             "Sigma not square");
-        SCYTHE_CHECK_10(v < Sigma.rows(), scythe_invalid_arg, 
+        SCYTHE_CHECK_10(v < Sigma.rows(), scythe_invalid_arg,
             "v < Sigma.rows()");
-          
-        Matrix<double,O,Concrete> 
+
+        Matrix<double,O,Concrete>
           A(Sigma.rows(), Sigma.rows());
         Matrix<double,O,Concrete> C = cholesky<O,Concrete>(Sigma);
         Matrix<double,O,Concrete> alpha;
-          
+
         for (unsigned int i = 0; i < v; ++i) {
           alpha = C * rnorm(Sigma.rows(), 1, 0, 1);
           A += (alpha * (t(alpha)));
@@ -1296,14 +1296,14 @@ namespace scythe {
        */
       template <matrix_order O, matrix_style S>
       Matrix<double, O, Concrete>
-      rdirich(const Matrix<double, O, S>& alpha) 
-      { 
+      rdirich(const Matrix<double, O, S>& alpha)
+      {
         // Check for allowable parameters
         SCYTHE_CHECK_10(std::min(alpha) <= 0, scythe_invalid_arg,
             "alpha has elements < 0");
         SCYTHE_CHECK_10(! alpha.isColVector(), scythe_dimension_error,
             "alpha not column vector");
-     
+
         Matrix<double, O, Concrete> y(alpha.rows(), 1);
         double ysum = 0;
 
@@ -1312,7 +1312,7 @@ namespace scythe {
         const_matrix_forward_iterator<double,O,O,S> ait;
         const_matrix_forward_iterator<double,O,O,S> alast
           = alpha.template end_f();
-        typename Matrix<double,O,Concrete>::forward_iterator yit 
+        typename Matrix<double,O,Concrete>::forward_iterator yit
           = y.begin_f();
         for (ait = alpha.begin_f(); ait != alast; ++ait) {
           *yit = rgamma(*ait, 1);
@@ -1341,9 +1341,9 @@ namespace scythe {
       template <matrix_order PO1, matrix_style PS1,
                 matrix_order PO2, matrix_style PS2>
       Matrix<double, PO1, Concrete>
-      rmvnorm(const Matrix<double, PO1, PS1>& mu, 
+      rmvnorm(const Matrix<double, PO1, PS1>& mu,
               const Matrix<double, PO2, PS2>& sigma)
-      {  
+      {
         unsigned int dim = mu.rows();
         SCYTHE_CHECK_10(! mu.isColVector(), scythe_dimension_error,
             "mu not column vector");
@@ -1351,7 +1351,7 @@ namespace scythe {
             "sigma not square");
         SCYTHE_CHECK_10(sigma.rows() != dim, scythe_conformation_error,
             "mu and sigma not conformable");
-        
+
         return(mu + cholesky(sigma) * rnorm(dim, 1, 0, 1));
       }
 
@@ -1377,7 +1377,7 @@ namespace scythe {
         SCYTHE_CHECK_10(nu <= 0, scythe_invalid_arg,
             "D.O.F parameter nu <= 0");
 
-        result = 
+        result =
           rmvnorm(Matrix<double, O>(sigma.rows(), 1, true, 0), sigma);
         result /= std::sqrt(rchisq(nu) / nu);
         return result;
@@ -1389,7 +1389,7 @@ namespace scythe {
        *
        * Instantiate a random number generator
        */
-      rng() 
+      rng()
         : rnorm_count_ (1) // Initialize the normal counter
       {}
 
@@ -1429,7 +1429,7 @@ namespace scythe {
         } else { // even numbered passes
           rnorm_count_ = 1;
           return x2_;
-        } 
+        }
       }
 
       /* Generate standard gamma variates */
@@ -1469,12 +1469,12 @@ namespace scythe {
             }
           }
         }
-        
+
         return (accept_);
       }
 
   };
 
-  
-} // end namespace scythe    
+
+} // end namespace scythe
 #endif /* RNG_H */
diff --git a/src/stat.h b/src/stat.h
index cec48a7..7a7055c 100644
--- a/src/stat.h
+++ b/src/stat.h
@@ -231,7 +231,7 @@ namespace scythe {
     if (n % 2 == 0)
       return ((temp[n / 2] + temp[n / 2 - 1]) / 2);
     else
-      return temp[n / 2];
+      return temp[(uint) ::floor(n / 2.0)];
   }
 
   /* Calculate the median of each column of a matrix */

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-mcmcpack.git



More information about the debian-science-commits mailing list